hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
48c658d7ab96365a768bfff604dd1b0b5ee4cf50
79
use std::fs::File; fn main() { let f = File::open("hello.txt").unwrap(); }
15.8
45
0.556962
d95a75d898dbc6eab78a7f98a514d65ec678d2f3
19,721
/// Arrow schema as specified in /// https://arrow.apache.org/docs/python/api/datatypes.html /// and serialized to bytes using IPC: /// https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc /// /// See code samples on how this message can be deserialized. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ArrowSchema { /// IPC serialized Arrow schema. #[prost(bytes, tag = "1")] pub serialized_schema: std::vec::Vec<u8>, } /// Arrow RecordBatch. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ArrowRecordBatch { /// IPC-serialized Arrow RecordBatch. #[prost(bytes, tag = "1")] pub serialized_record_batch: std::vec::Vec<u8>, } /// Contains options specific to Arrow Serialization. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ArrowSerializationOptions { /// The Arrow IPC format to use. #[prost(enumeration = "arrow_serialization_options::Format", tag = "1")] pub format: i32, } pub mod arrow_serialization_options { /// The IPC format to use when serializing Arrow streams. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Format { /// If unspecied the IPC format as of 0.15 release will be used. Unspecified = 0, /// Use the legacy IPC message format as of Apache Arrow Release 0.14. Arrow014 = 1, /// Use the message format as of Apache Arrow Release 0.15. Arrow015 = 2, } } /// Avro schema. #[derive(Clone, PartialEq, ::prost::Message)] pub struct AvroSchema { /// Json serialized schema, as described at /// https://avro.apache.org/docs/1.8.1/spec.html. #[prost(string, tag = "1")] pub schema: std::string::String, } /// Avro rows. #[derive(Clone, PartialEq, ::prost::Message)] pub struct AvroRows { /// Binary serialized rows in a block. #[prost(bytes, tag = "1")] pub serialized_binary_rows: std::vec::Vec<u8>, } /// Information about the ReadSession. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReadSession { /// Output only. Unique identifier for the session, in the form /// `projects/{project_id}/locations/{location}/sessions/{session_id}`. #[prost(string, tag = "1")] pub name: std::string::String, /// Output only. Time at which the session becomes invalid. After this time, subsequent /// requests to read this Session will return errors. The expire_time is /// automatically assigned and currently cannot be specified or updated. #[prost(message, optional, tag = "2")] pub expire_time: ::std::option::Option<::prost_types::Timestamp>, /// Immutable. Data format of the output data. #[prost(enumeration = "DataFormat", tag = "3")] pub data_format: i32, /// Immutable. Table that this ReadSession is reading from, in the form /// `projects/{project_id}/datasets/{dataset_id}/tables/{table_id} #[prost(string, tag = "6")] pub table: std::string::String, /// Optional. Any modifiers which are applied when reading from the specified table. #[prost(message, optional, tag = "7")] pub table_modifiers: ::std::option::Option<read_session::TableModifiers>, /// Optional. Read options for this session (e.g. column selection, filters). #[prost(message, optional, tag = "8")] pub read_options: ::std::option::Option<read_session::TableReadOptions>, /// Output only. A list of streams created with the session. /// /// At least one stream is created with the session. In the future, larger /// request_stream_count values *may* result in this list being unpopulated, /// in that case, the user will need to use a List method to get the streams /// instead, which is not yet available. #[prost(message, repeated, tag = "10")] pub streams: ::std::vec::Vec<ReadStream>, /// The schema for the read. If read_options.selected_fields is set, the /// schema may be different from the table schema as it will only contain /// the selected fields. #[prost(oneof = "read_session::Schema", tags = "4, 5")] pub schema: ::std::option::Option<read_session::Schema>, } pub mod read_session { /// Additional attributes when reading a table. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TableModifiers { /// The snapshot time of the table. If not set, interpreted as now. #[prost(message, optional, tag = "1")] pub snapshot_time: ::std::option::Option<::prost_types::Timestamp>, } /// Options dictating how we read a table. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TableReadOptions { /// Names of the fields in the table that should be read. If empty, all /// fields will be read. If the specified field is a nested field, all /// the sub-fields in the field will be selected. The output field order is /// unrelated to the order of fields in selected_fields. #[prost(string, repeated, tag = "1")] pub selected_fields: ::std::vec::Vec<std::string::String>, /// SQL text filtering statement, similar to a WHERE clause in a query. /// Aggregates are not supported. /// /// Examples: "int_field > 5" /// "date_field = CAST('2014-9-27' as DATE)" /// "nullable_field is not NULL" /// "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" /// "numeric_field BETWEEN 1.0 AND 5.0" #[prost(string, tag = "2")] pub row_restriction: std::string::String, /// Optional. Options specific to the Apache Arrow output format. #[prost(message, optional, tag = "3")] pub arrow_serialization_options: ::std::option::Option<super::ArrowSerializationOptions>, } /// The schema for the read. If read_options.selected_fields is set, the /// schema may be different from the table schema as it will only contain /// the selected fields. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Schema { /// Output only. Avro schema. #[prost(message, tag = "4")] AvroSchema(super::AvroSchema), /// Output only. Arrow schema. #[prost(message, tag = "5")] ArrowSchema(super::ArrowSchema), } } /// Information about a single stream that gets data out of the storage system. /// Most of the information about `ReadStream` instances is aggregated, making /// `ReadStream` lightweight. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReadStream { /// Output only. Name of the stream, in the form /// `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`. #[prost(string, tag = "1")] pub name: std::string::String, } /// Data format for input or output data. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum DataFormat { Unspecified = 0, /// Avro is a standard open source row based file format. /// See https://avro.apache.org/ for more details. Avro = 1, /// Arrow is a standard open source column-based message format. /// See https://arrow.apache.org/ for more details. Arrow = 2, } /// Request message for `CreateReadSession`. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateReadSessionRequest { /// Required. The request project that owns the session, in the form of /// `projects/{project_id}`. #[prost(string, tag = "1")] pub parent: std::string::String, /// Required. Session to be created. #[prost(message, optional, tag = "2")] pub read_session: ::std::option::Option<ReadSession>, /// Max initial number of streams. If unset or zero, the server will /// provide a value of streams so as to produce reasonable throughput. Must be /// non-negative. The number of streams may be lower than the requested number, /// depending on the amount parallelism that is reasonable for the table. Error /// will be returned if the max count is greater than the current system /// max limit of 1,000. /// /// Streams must be read starting from offset 0. #[prost(int32, tag = "3")] pub max_stream_count: i32, } /// Request message for `ReadRows`. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReadRowsRequest { /// Required. Stream to read rows from. #[prost(string, tag = "1")] pub read_stream: std::string::String, /// The offset requested must be less than the last row read from Read. /// Requesting a larger offset is undefined. If not specified, start reading /// from offset zero. #[prost(int64, tag = "2")] pub offset: i64, } /// Information on if the current connection is being throttled. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ThrottleState { /// How much this connection is being throttled. Zero means no throttling, /// 100 means fully throttled. #[prost(int32, tag = "1")] pub throttle_percent: i32, } /// Estimated stream statistics for a given Stream. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamStats { /// Represents the progress of the current stream. #[prost(message, optional, tag = "2")] pub progress: ::std::option::Option<stream_stats::Progress>, } pub mod stream_stats { #[derive(Clone, PartialEq, ::prost::Message)] pub struct Progress { /// The fraction of rows assigned to the stream that have been processed by /// the server so far, not including the rows in the current response /// message. /// /// This value, along with `at_response_end`, can be used to interpolate /// the progress made as the rows in the message are being processed using /// the following formula: `at_response_start + (at_response_end - /// at_response_start) * rows_processed_from_response / rows_in_response`. /// /// Note that if a filter is provided, the `at_response_end` value of the /// previous response may not necessarily be equal to the /// `at_response_start` value of the current response. #[prost(double, tag = "1")] pub at_response_start: f64, /// Similar to `at_response_start`, except that this value includes the /// rows in the current response. #[prost(double, tag = "2")] pub at_response_end: f64, } } /// Response from calling `ReadRows` may include row data, progress and /// throttling information. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReadRowsResponse { /// Number of serialized rows in the rows block. #[prost(int64, tag = "6")] pub row_count: i64, /// Statistics for the stream. #[prost(message, optional, tag = "2")] pub stats: ::std::option::Option<StreamStats>, /// Throttling state. If unset, the latest response still describes /// the current throttling status. #[prost(message, optional, tag = "5")] pub throttle_state: ::std::option::Option<ThrottleState>, /// Row data is returned in format specified during session creation. #[prost(oneof = "read_rows_response::Rows", tags = "3, 4")] pub rows: ::std::option::Option<read_rows_response::Rows>, } pub mod read_rows_response { /// Row data is returned in format specified during session creation. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Rows { /// Serialized row data in AVRO format. #[prost(message, tag = "3")] AvroRows(super::AvroRows), /// Serialized row data in Arrow RecordBatch format. #[prost(message, tag = "4")] ArrowRecordBatch(super::ArrowRecordBatch), } } /// Request message for `SplitReadStream`. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SplitReadStreamRequest { /// Required. Name of the stream to split. #[prost(string, tag = "1")] pub name: std::string::String, /// A value in the range (0.0, 1.0) that specifies the fractional point at /// which the original stream should be split. The actual split point is /// evaluated on pre-filtered rows, so if a filter is provided, then there is /// no guarantee that the division of the rows between the new child streams /// will be proportional to this fractional value. Additionally, because the /// server-side unit for assigning data is collections of rows, this fraction /// will always map to a data storage boundary on the server side. #[prost(double, tag = "2")] pub fraction: f64, } /// Response message for `SplitReadStream`. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SplitReadStreamResponse { /// Primary stream, which contains the beginning portion of /// |original_stream|. An empty value indicates that the original stream can no /// longer be split. #[prost(message, optional, tag = "1")] pub primary_stream: ::std::option::Option<ReadStream>, /// Remainder stream, which contains the tail of |original_stream|. An empty /// value indicates that the original stream can no longer be split. #[prost(message, optional, tag = "2")] pub remainder_stream: ::std::option::Option<ReadStream>, } #[doc = r" Generated client implementations."] pub mod big_query_read_client { #![allow(unused_variables, dead_code, missing_docs)] use tonic::codegen::*; #[doc = " BigQuery Read API."] #[doc = ""] #[doc = " The Read API can be used to read data from BigQuery."] pub struct BigQueryReadClient<T> { inner: tonic::client::Grpc<T>, } impl<T> BigQueryReadClient<T> where T: tonic::client::GrpcService<tonic::body::BoxBody>, T::ResponseBody: Body + HttpBody + Send + 'static, T::Error: Into<StdError>, <T::ResponseBody as HttpBody>::Error: Into<StdError> + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_interceptor(inner: T, interceptor: impl Into<tonic::Interceptor>) -> Self { let inner = tonic::client::Grpc::with_interceptor(inner, interceptor); Self { inner } } #[doc = " Creates a new read session. A read session divides the contents of a"] #[doc = " BigQuery table into one or more streams, which can then be used to read"] #[doc = " data from the table. The read session also specifies properties of the"] #[doc = " data to be read, such as a list of columns or a push-down filter describing"] #[doc = " the rows to be returned."] #[doc = ""] #[doc = " A particular row can be read by at most one stream. When the caller has"] #[doc = " reached the end of each stream in the session, then all the data in the"] #[doc = " table has been read."] #[doc = ""] #[doc = " Data is assigned to each stream such that roughly the same number of"] #[doc = " rows can be read from each stream. Because the server-side unit for"] #[doc = " assigning data is collections of rows, the API does not guarantee that"] #[doc = " each stream will return the same number or rows. Additionally, the"] #[doc = " limits are enforced based on the number of pre-filtered rows, so some"] #[doc = " filters can lead to lopsided assignments."] #[doc = ""] #[doc = " Read sessions automatically expire 24 hours after they are created and do"] #[doc = " not require manual clean-up by the caller."] pub async fn create_read_session( &mut self, request: impl tonic::IntoRequest<super::CreateReadSessionRequest>, ) -> Result<tonic::Response<super::ReadSession>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.bigquery.storage.v1beta2.BigQueryRead/CreateReadSession", ); self.inner.unary(request.into_request(), path, codec).await } #[doc = " Reads rows from the stream in the format prescribed by the ReadSession."] #[doc = " Each response contains one or more table rows, up to a maximum of 100 MiB"] #[doc = " per response; read requests which attempt to read individual rows larger"] #[doc = " than 100 MiB will fail."] #[doc = ""] #[doc = " Each request also returns a set of stream statistics reflecting the current"] #[doc = " state of the stream."] pub async fn read_rows( &mut self, request: impl tonic::IntoRequest<super::ReadRowsRequest>, ) -> Result<tonic::Response<tonic::codec::Streaming<super::ReadRowsResponse>>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.bigquery.storage.v1beta2.BigQueryRead/ReadRows", ); self.inner .server_streaming(request.into_request(), path, codec) .await } #[doc = " Splits a given `ReadStream` into two `ReadStream` objects. These"] #[doc = " `ReadStream` objects are referred to as the primary and the residual"] #[doc = " streams of the split. The original `ReadStream` can still be read from in"] #[doc = " the same manner as before. Both of the returned `ReadStream` objects can"] #[doc = " also be read from, and the rows returned by both child streams will be"] #[doc = " the same as the rows read from the original stream."] #[doc = ""] #[doc = " Moreover, the two child streams will be allocated back-to-back in the"] #[doc = " original `ReadStream`. Concretely, it is guaranteed that for streams"] #[doc = " original, primary, and residual, that original[0-j] = primary[0-j] and"] #[doc = " original[j-n] = residual[0-m] once the streams have been read to"] #[doc = " completion."] pub async fn split_read_stream( &mut self, request: impl tonic::IntoRequest<super::SplitReadStreamRequest>, ) -> Result<tonic::Response<super::SplitReadStreamResponse>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.bigquery.storage.v1beta2.BigQueryRead/SplitReadStream", ); self.inner.unary(request.into_request(), path, codec).await } } impl<T: Clone> Clone for BigQueryReadClient<T> { fn clone(&self) -> Self { Self { inner: self.inner.clone(), } } } impl<T> std::fmt::Debug for BigQueryReadClient<T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "BigQueryReadClient {{ ... }}") } } }
47.179426
103
0.633589
1ebb52648980497aab6d777e5c795449e9993042
5,303
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use crate::Layout; use crate::LayoutLine; use crate::LayoutRun; use crate::Rectangle; use glib::translate::*; use std::mem; glib::wrapper! { #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct LayoutIter(Boxed<ffi::PangoLayoutIter>); match fn { copy => |ptr| ffi::pango_layout_iter_copy(mut_override(ptr)), free => |ptr| ffi::pango_layout_iter_free(ptr), get_type => || ffi::pango_layout_iter_get_type(), } } impl LayoutIter { pub fn at_last_line(&mut self) -> bool { unsafe { from_glib(ffi::pango_layout_iter_at_last_line( self.to_glib_none_mut().0, )) } } pub fn get_baseline(&mut self) -> i32 { unsafe { ffi::pango_layout_iter_get_baseline(self.to_glib_none_mut().0) } } pub fn get_char_extents(&mut self) -> Rectangle { unsafe { let mut logical_rect = Rectangle::uninitialized(); ffi::pango_layout_iter_get_char_extents( self.to_glib_none_mut().0, logical_rect.to_glib_none_mut().0, ); logical_rect } } pub fn get_cluster_extents(&mut self) -> (Rectangle, Rectangle) { unsafe { let mut ink_rect = Rectangle::uninitialized(); let mut logical_rect = Rectangle::uninitialized(); ffi::pango_layout_iter_get_cluster_extents( self.to_glib_none_mut().0, ink_rect.to_glib_none_mut().0, logical_rect.to_glib_none_mut().0, ); (ink_rect, logical_rect) } } pub fn get_index(&mut self) -> i32 { unsafe { ffi::pango_layout_iter_get_index(self.to_glib_none_mut().0) } } pub fn get_layout(&mut self) -> Option<Layout> { unsafe { from_glib_none(ffi::pango_layout_iter_get_layout(self.to_glib_none_mut().0)) } } pub fn get_layout_extents(&mut self) -> (Rectangle, Rectangle) { unsafe { let mut ink_rect = Rectangle::uninitialized(); let mut logical_rect = Rectangle::uninitialized(); ffi::pango_layout_iter_get_layout_extents( self.to_glib_none_mut().0, ink_rect.to_glib_none_mut().0, logical_rect.to_glib_none_mut().0, ); (ink_rect, logical_rect) } } pub fn get_line(&mut self) -> Option<LayoutLine> { unsafe { from_glib_none(ffi::pango_layout_iter_get_line(self.to_glib_none_mut().0)) } } pub fn get_line_extents(&mut self) -> (Rectangle, Rectangle) { unsafe { let mut ink_rect = Rectangle::uninitialized(); let mut logical_rect = Rectangle::uninitialized(); ffi::pango_layout_iter_get_line_extents( self.to_glib_none_mut().0, ink_rect.to_glib_none_mut().0, logical_rect.to_glib_none_mut().0, ); (ink_rect, logical_rect) } } pub fn get_line_readonly(&mut self) -> Option<LayoutLine> { unsafe { from_glib_none(ffi::pango_layout_iter_get_line_readonly( self.to_glib_none_mut().0, )) } } pub fn get_line_yrange(&mut self) -> (i32, i32) { unsafe { let mut y0_ = mem::MaybeUninit::uninit(); let mut y1_ = mem::MaybeUninit::uninit(); ffi::pango_layout_iter_get_line_yrange( self.to_glib_none_mut().0, y0_.as_mut_ptr(), y1_.as_mut_ptr(), ); let y0_ = y0_.assume_init(); let y1_ = y1_.assume_init(); (y0_, y1_) } } pub fn get_run(&mut self) -> Option<LayoutRun> { unsafe { from_glib_none(ffi::pango_layout_iter_get_run(self.to_glib_none_mut().0)) } } pub fn get_run_extents(&mut self) -> (Rectangle, Rectangle) { unsafe { let mut ink_rect = Rectangle::uninitialized(); let mut logical_rect = Rectangle::uninitialized(); ffi::pango_layout_iter_get_run_extents( self.to_glib_none_mut().0, ink_rect.to_glib_none_mut().0, logical_rect.to_glib_none_mut().0, ); (ink_rect, logical_rect) } } pub fn get_run_readonly(&mut self) -> Option<LayoutRun> { unsafe { from_glib_none(ffi::pango_layout_iter_get_run_readonly( self.to_glib_none_mut().0, )) } } pub fn next_char(&mut self) -> bool { unsafe { from_glib(ffi::pango_layout_iter_next_char(self.to_glib_none_mut().0)) } } pub fn next_cluster(&mut self) -> bool { unsafe { from_glib(ffi::pango_layout_iter_next_cluster( self.to_glib_none_mut().0, )) } } pub fn next_line(&mut self) -> bool { unsafe { from_glib(ffi::pango_layout_iter_next_line(self.to_glib_none_mut().0)) } } pub fn next_run(&mut self) -> bool { unsafe { from_glib(ffi::pango_layout_iter_next_run(self.to_glib_none_mut().0)) } } }
31.945783
95
0.578352
26b513a73a693fd0e4642f542d9414aaab8c505a
5,227
//! Provides userspace access to LEDs on a board. //! //! This allows for much more cross platform controlling of LEDs without having //! to know which of the GPIO pins exposed across the syscall interface are //! LEDs. //! //! This capsule takes an array of pins and the polarity of the LED (active high //! or active low). This allows the board to configure how the underlying GPIO //! must be controlled to turn on and off LEDs, such that the syscall driver //! interface can be agnostic to the LED polarity. //! //! Usage //! ----- //! //! ```rust //! let led_pins = static_init!( //! [(&'static sam4l::gpio::GPIOPin, capsules::led::ActivationMode); 3], //! [(&sam4l::gpio::PA[13], capsules::led::ActivationMode::ActiveLow), // Red //! (&sam4l::gpio::PA[15], capsules::led::ActivationMode::ActiveLow), // Green //! (&sam4l::gpio::PA[14], capsules::led::ActivationMode::ActiveLow)]); // Blue //! let led = static_init!( //! capsules::led::LED<'static, sam4l::gpio::GPIOPin>, //! capsules::led::LED::new(led_pins)); //! ``` //! //! Syscall Interface //! ----------------- //! //! - Stability: 2 - Stable //! //! ### Command //! //! All LED operations are synchronous, so this capsule only uses the `command` //! syscall. //! //! #### `command_num` //! //! - `0`: Return the number of LEDs on this platform. //! - `data`: Unused. //! - Return: Number of LEDs. //! - `1`: Turn the LED on. //! - `data`: The index of the LED. Starts at 0. //! - Return: `SUCCESS` if the LED index was valid, `EINVAL` otherwise. //! - `2`: Turn the LED off. //! - `data`: The index of the LED. Starts at 0. //! - Return: `SUCCESS` if the LED index was valid, `EINVAL` otherwise. //! - `3`: Toggle the on/off state of the LED. //! - `data`: The index of the LED. Starts at 0. //! - Return: `SUCCESS` if the LED index was valid, `EINVAL` otherwise. use kernel::hil; use kernel::{AppId, Driver, ReturnCode}; /// Syscall driver number. pub const DRIVER_NUM: usize = 0x00000002; /// Whether the LEDs are active high or active low on this platform. #[derive(Clone, Copy)] pub enum ActivationMode { ActiveHigh, ActiveLow, } /// Holds the array of GPIO pins attached to the LEDs and implements a `Driver` /// interface to control them. pub struct LED<'a, G: hil::gpio::Pin + 'a> { pins_init: &'a [(&'a G, ActivationMode)], } impl<'a, G: hil::gpio::Pin + hil::gpio::PinCtl> LED<'a, G> { pub fn new(pins_init: &'a [(&'a G, ActivationMode)]) -> LED<'a, G> { // Make all pins output and off for &(pin, mode) in pins_init.as_ref().iter() { pin.make_output(); match mode { ActivationMode::ActiveHigh => pin.clear(), ActivationMode::ActiveLow => pin.set(), } } LED { pins_init: pins_init, } } } impl<'a, G: hil::gpio::Pin + hil::gpio::PinCtl> Driver for LED<'a, G> { /// Control the LEDs. /// /// ### `command_num` /// /// - `0`: Returns the number of LEDs on the board. This will always be 0 or /// greater, and therefore also allows for checking for this driver. /// - `1`: Turn the LED at index specified by `data` on. Returns `EINVAL` if /// the LED index is not valid. /// - `2`: Turn the LED at index specified by `data` off. Returns `EINVAL` /// if the LED index is not valid. /// - `3`: Toggle the LED at index specified by `data` on or off. Returns /// `EINVAL` if the LED index is not valid. fn command(&self, command_num: usize, data: usize, _: usize, _: AppId) -> ReturnCode { let pins_init = self.pins_init.as_ref(); match command_num { // get number of LEDs 0 => ReturnCode::SuccessWithValue { value: pins_init.len() as usize, }, // on 1 => { if data >= pins_init.len() { ReturnCode::EINVAL /* impossible pin */ } else { let (pin, mode) = pins_init[data]; match mode { ActivationMode::ActiveHigh => pin.set(), ActivationMode::ActiveLow => pin.clear(), } ReturnCode::SUCCESS } } // off 2 => { if data >= pins_init.len() { ReturnCode::EINVAL /* impossible pin */ } else { let (pin, mode) = pins_init[data]; match mode { ActivationMode::ActiveHigh => pin.clear(), ActivationMode::ActiveLow => pin.set(), } ReturnCode::SUCCESS } } // toggle 3 => { if data >= pins_init.len() { ReturnCode::EINVAL /* impossible pin */ } else { let (pin, _) = pins_init[data]; pin.toggle(); ReturnCode::SUCCESS } } // default _ => ReturnCode::ENOSUPPORT, } } }
34.388158
90
0.527262
0114d83d56bd4f2f8d3fd09871b0f0f102a84236
1,346
/* * * * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: 1.0.0 * * Generated by: https://openapi-generator.tech */ #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LolCareerStatsChampionQueueStatsResponse { #[serde(rename = "championId", skip_serializing_if = "Option::is_none")] pub champion_id: Option<i32>, #[serde(rename = "position", skip_serializing_if = "Option::is_none")] pub position: Option<crate::models::LolCareerStatsSummonersRiftPosition>, #[serde(rename = "queueType", skip_serializing_if = "Option::is_none")] pub queue_type: Option<crate::models::LolCareerStatsCareerStatsQueueType>, #[serde(rename = "rankTier", skip_serializing_if = "Option::is_none")] pub rank_tier: Option<crate::models::LolCareerStatsRankedTier>, #[serde(rename = "stats", skip_serializing_if = "Option::is_none")] pub stats: Option<serde_json::Value>, } impl LolCareerStatsChampionQueueStatsResponse { pub fn new() -> LolCareerStatsChampionQueueStatsResponse { LolCareerStatsChampionQueueStatsResponse { champion_id: None, position: None, queue_type: None, rank_tier: None, stats: None, } } }
32.829268
109
0.694651
23ba62fb2c2f1695883c1f4cf40faa39655b2edd
2,643
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // for settings getter setter macro_rules! apply_getter_setter_settings { ($(($NAME: expr, $TYPE: tt, $VALUE:expr, $DESC: expr)),* ) => { $( paste::paste!{ pub fn [< get_ $NAME >](&self) -> Result<$TYPE> { self.inner.[<try_get_ $TYPE:lower>]($NAME) } pub fn [< set_ $NAME >](&self, value: $TYPE) -> Result<()> { self.inner.[<try_update_ $TYPE:lower>]($NAME, value) } } )* }; } macro_rules! apply_initial_settings { ($(($NAME: expr, $TYPE: tt, $VALUE:expr, $DESC: expr)),* ) => { pub fn initial_settings(&self) -> Result<()> { paste::paste! { $( self.inner.[<try_set_ $TYPE:lower>]($NAME, $VALUE, $DESC)?; )* } Ok(()) } }; } macro_rules! apply_parse_value { ($VALUE: expr, String) => { $VALUE }; ($VALUE: expr, $TYPE: tt) => { $VALUE.parse::<$TYPE>().map_err(ErrorCode::from)? }; } macro_rules! apply_update_settings { ($(($NAME: expr, $TYPE: tt, $VALUE:expr, $DESC: expr)),* ) => { pub fn update_settings(&self, key: &str, value: String) -> Result<()> { paste::paste! { $( if (key.to_lowercase().as_str() == $NAME) { let v = apply_parse_value!{value, $TYPE}; return self.inner.[<try_update_ $TYPE:lower>]($NAME, v); } )* } Err(ErrorCode::UnknownVariable( format!("Unknown variable: {:?}", key) )) } }; } macro_rules! apply_macros { ($MACRO_A: ident, $MACRO_B: ident, $MACRO_C: ident, $(($NAME: expr, $TYPE: tt, $VALUE:expr, $DESC: expr)),* ) => { $MACRO_A! { $( ($NAME, $TYPE, $VALUE, $DESC) ), * } $MACRO_B! { $( ($NAME, $TYPE, $VALUE, $DESC) ), * } $MACRO_C! { $( ($NAME, $TYPE, $VALUE, $DESC) ), * } }; }
32.62963
118
0.508513
bfb100152d91922e43fcdef8b7ab18388243bd06
3,383
/* automatically generated by rust-bindgen */ #![allow(dead_code, non_snake_case, non_camel_case_types, non_upper_case_globals)] #[repr(C)] pub struct __BindgenUnionField<T>(::std::marker::PhantomData<T>); impl <T> __BindgenUnionField<T> { #[inline] pub fn new() -> Self { __BindgenUnionField(::std::marker::PhantomData) } #[inline] pub unsafe fn as_ref(&self) -> &T { ::std::mem::transmute(self) } #[inline] pub unsafe fn as_mut(&mut self) -> &mut T { ::std::mem::transmute(self) } } impl <T> ::std::default::Default for __BindgenUnionField<T> { #[inline] fn default() -> Self { Self::new() } } impl <T> ::std::clone::Clone for __BindgenUnionField<T> { #[inline] fn clone(&self) -> Self { Self::new() } } impl <T> ::std::marker::Copy for __BindgenUnionField<T> { } impl <T> ::std::fmt::Debug for __BindgenUnionField<T> { fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { fmt.write_str("__BindgenUnionField") } } #[repr(C)] #[derive(Debug, Default, Copy)] pub struct s { pub u: s__bindgen_ty_1, } #[repr(C)] #[derive(Debug, Default, Copy)] pub struct s__bindgen_ty_1 { pub field: __BindgenUnionField<s__bindgen_ty_1_inner>, pub bindgen_union_field: u32, } #[repr(C)] #[derive(Debug, Default, Copy)] pub struct s__bindgen_ty_1_inner { pub b: ::std::os::raw::c_int, } #[test] fn bindgen_test_layout_s__bindgen_ty_1_inner() { assert_eq!(::std::mem::size_of::<s__bindgen_ty_1_inner>() , 4usize , concat ! ( "Size of: " , stringify ! ( s__bindgen_ty_1_inner ) )); assert_eq! (::std::mem::align_of::<s__bindgen_ty_1_inner>() , 4usize , concat ! ( "Alignment of " , stringify ! ( s__bindgen_ty_1_inner ) )); assert_eq! (unsafe { & ( * ( 0 as * const s__bindgen_ty_1_inner ) ) . b as * const _ as usize } , 0usize , concat ! ( "Alignment of field: " , stringify ! ( s__bindgen_ty_1_inner ) , "::" , stringify ! ( b ) )); } impl Clone for s__bindgen_ty_1_inner { fn clone(&self) -> Self { *self } } #[test] fn bindgen_test_layout_s__bindgen_ty_1() { assert_eq!(::std::mem::size_of::<s__bindgen_ty_1>() , 4usize , concat ! ( "Size of: " , stringify ! ( s__bindgen_ty_1 ) )); assert_eq! (::std::mem::align_of::<s__bindgen_ty_1>() , 4usize , concat ! ( "Alignment of " , stringify ! ( s__bindgen_ty_1 ) )); assert_eq! (unsafe { & ( * ( 0 as * const s__bindgen_ty_1 ) ) . field as * const _ as usize } , 0usize , concat ! ( "Alignment of field: " , stringify ! ( s__bindgen_ty_1 ) , "::" , stringify ! ( field ) )); } impl Clone for s__bindgen_ty_1 { fn clone(&self) -> Self { *self } } #[test] fn bindgen_test_layout_s() { assert_eq!(::std::mem::size_of::<s>() , 4usize , concat ! ( "Size of: " , stringify ! ( s ) )); assert_eq! (::std::mem::align_of::<s>() , 4usize , concat ! ( "Alignment of " , stringify ! ( s ) )); assert_eq! (unsafe { & ( * ( 0 as * const s ) ) . u as * const _ as usize } , 0usize , concat ! ( "Alignment of field: " , stringify ! ( s ) , "::" , stringify ! ( u ) )); } impl Clone for s { fn clone(&self) -> Self { *self } }
36.376344
82
0.571682
38b5f799a32584e7db35a1be6d4aad6f9da1eaeb
18,436
// Copyright 2019 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // https://android.googlesource.com/platform/system/core/+/7b444f0/libsparse/sparse_format.h use std::collections::BTreeMap; use std::fmt::{self, Display}; use std::fs::File; use std::io::{self, ErrorKind, Read, Seek, SeekFrom}; use std::mem; use crate::DiskGetLen; use base::{ AsRawDescriptor, FileAllocate, FileReadWriteAtVolatile, FileSetLen, FileSync, PunchHole, RawDescriptor, WriteZeroesAt, }; use data_model::{DataInit, Le16, Le32, VolatileSlice}; use remain::sorted; #[sorted] #[derive(Debug)] pub enum Error { InvalidMagicHeader, InvalidSpecification(String), ReadSpecificationError(io::Error), } impl Display for Error { #[remain::check] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Error::*; #[sorted] match self { InvalidMagicHeader => write!(f, "invalid magic header for android sparse format"), InvalidSpecification(s) => write!(f, "invalid specification: \"{}\"", s), ReadSpecificationError(e) => write!(f, "failed to read specification: \"{}\"", e), } } } pub type Result<T> = std::result::Result<T, Error>; pub const SPARSE_HEADER_MAGIC: u32 = 0xed26ff3a; const MAJOR_VERSION: u16 = 1; #[repr(C)] #[derive(Clone, Copy, Debug)] struct SparseHeader { magic: Le32, /* SPARSE_HEADER_MAGIC */ major_version: Le16, /* (0x1) - reject images with higher major versions */ minor_version: Le16, /* (0x0) - allow images with higer minor versions */ file_hdr_sz: Le16, /* 28 bytes for first revision of the file format */ chunk_hdr_size: Le16, /* 12 bytes for first revision of the file format */ blk_sz: Le32, /* block size in bytes, must be a multiple of 4 (4096) */ total_blks: Le32, /* total blocks in the non-sparse output image */ total_chunks: Le32, /* total chunks in the sparse input image */ image_checksum: Le32, /* CRC32 checksum of the original data, counting "don't care" */ /* as 0. Standard 802.3 polynomial, use a Public Domain */ /* table implementation */ } unsafe impl DataInit for SparseHeader {} const CHUNK_TYPE_RAW: u16 = 0xCAC1; const CHUNK_TYPE_FILL: u16 = 0xCAC2; const CHUNK_TYPE_DONT_CARE: u16 = 0xCAC3; const CHUNK_TYPE_CRC32: u16 = 0xCAC4; #[repr(C)] #[derive(Clone, Copy, Debug)] struct ChunkHeader { chunk_type: Le16, /* 0xCAC1 -> raw; 0xCAC2 -> fill; 0xCAC3 -> don't care */ reserved1: u16, chunk_sz: Le32, /* in blocks in output image */ total_sz: Le32, /* in bytes of chunk input file including chunk header and data */ } unsafe impl DataInit for ChunkHeader {} #[derive(Clone, Debug, PartialEq, Eq)] enum Chunk { Raw(u64), // Offset into the file Fill(Vec<u8>), DontCare, } #[derive(Clone, Debug, PartialEq, Eq)] struct ChunkWithSize { chunk: Chunk, expanded_size: u64, } /* Following a Raw or Fill or CRC32 chunk is data. * For a Raw chunk, it's the data in chunk_sz * blk_sz. * For a Fill chunk, it's 4 bytes of the fill data. * For a CRC32 chunk, it's 4 bytes of CRC32 */ #[derive(Debug)] pub struct AndroidSparse { file: File, total_size: u64, chunks: BTreeMap<u64, ChunkWithSize>, } fn parse_chunk<T: Read + Seek>( mut input: &mut T, chunk_hdr_size: u64, blk_sz: u64, ) -> Result<Option<ChunkWithSize>> { let current_offset = input .seek(SeekFrom::Current(0)) .map_err(Error::ReadSpecificationError)?; let chunk_header = ChunkHeader::from_reader(&mut input).map_err(Error::ReadSpecificationError)?; let chunk = match chunk_header.chunk_type.to_native() { CHUNK_TYPE_RAW => { input .seek(SeekFrom::Current( chunk_header.total_sz.to_native() as i64 - chunk_hdr_size as i64, )) .map_err(Error::ReadSpecificationError)?; Chunk::Raw(current_offset + chunk_hdr_size as u64) } CHUNK_TYPE_FILL => { if chunk_header.total_sz == chunk_hdr_size as u32 { return Err(Error::InvalidSpecification(format!( "Fill chunk did not have any data to fill" ))); } let fill_size = chunk_header.total_sz.to_native() as u64 - chunk_hdr_size as u64; let mut fill_bytes = vec![0u8; fill_size as usize]; input .read_exact(&mut fill_bytes) .map_err(Error::ReadSpecificationError)?; Chunk::Fill(fill_bytes) } CHUNK_TYPE_DONT_CARE => Chunk::DontCare, CHUNK_TYPE_CRC32 => return Ok(None), // TODO(schuffelen): Validate crc32s in input unknown_type => { return Err(Error::InvalidSpecification(format!( "Chunk had invalid type, was {:x}", unknown_type ))) } }; let expanded_size = chunk_header.chunk_sz.to_native() as u64 * blk_sz; Ok(Some(ChunkWithSize { chunk, expanded_size, })) } impl AndroidSparse { pub fn from_file(mut file: File) -> Result<AndroidSparse> { file.seek(SeekFrom::Start(0)) .map_err(Error::ReadSpecificationError)?; let sparse_header = SparseHeader::from_reader(&mut file).map_err(Error::ReadSpecificationError)?; if sparse_header.magic != SPARSE_HEADER_MAGIC { return Err(Error::InvalidSpecification(format!( "Header did not match magic constant. Expected {:x}, was {:x}", SPARSE_HEADER_MAGIC, sparse_header.magic.to_native() ))); } else if sparse_header.major_version != MAJOR_VERSION { return Err(Error::InvalidSpecification(format!( "Header major version did not match. Expected {}, was {}", MAJOR_VERSION, sparse_header.major_version.to_native(), ))); } else if (sparse_header.chunk_hdr_size.to_native() as usize) < mem::size_of::<ChunkHeader>() { return Err(Error::InvalidSpecification(format!( "Chunk header size does not fit chunk header struct, expected >={}, was {}", sparse_header.chunk_hdr_size.to_native(), mem::size_of::<ChunkHeader>() ))); } let header_size = sparse_header.chunk_hdr_size.to_native() as u64; let block_size = sparse_header.blk_sz.to_native() as u64; let chunks = (0..sparse_header.total_chunks.to_native()) .filter_map(|_| parse_chunk(&mut file, header_size, block_size).transpose()) .collect::<Result<Vec<ChunkWithSize>>>()?; let total_size = sparse_header.total_blks.to_native() as u64 * sparse_header.blk_sz.to_native() as u64; AndroidSparse::from_parts(file, total_size, chunks) } fn from_parts(file: File, size: u64, chunks: Vec<ChunkWithSize>) -> Result<AndroidSparse> { let mut chunks_map: BTreeMap<u64, ChunkWithSize> = BTreeMap::new(); let mut expanded_location: u64 = 0; for chunk_with_size in chunks { let size = chunk_with_size.expanded_size; if chunks_map .insert(expanded_location, chunk_with_size) .is_some() { return Err(Error::InvalidSpecification(format!( "Two chunks were at {}", expanded_location ))); } expanded_location += size; } let image = AndroidSparse { file, total_size: size, chunks: chunks_map, }; let calculated_len = image.get_len().map_err(Error::ReadSpecificationError)?; if calculated_len != size { return Err(Error::InvalidSpecification(format!( "Header promised size {}, chunks added up to {}", size, calculated_len ))); } Ok(image) } } impl DiskGetLen for AndroidSparse { fn get_len(&self) -> io::Result<u64> { Ok(self.total_size) } } impl FileSetLen for AndroidSparse { fn set_len(&self, _len: u64) -> io::Result<()> { Err(io::Error::new( ErrorKind::PermissionDenied, "unsupported operation", )) } } impl FileSync for AndroidSparse { fn fsync(&mut self) -> io::Result<()> { Ok(()) } } impl PunchHole for AndroidSparse { fn punch_hole(&mut self, _offset: u64, _length: u64) -> io::Result<()> { Err(io::Error::new( ErrorKind::PermissionDenied, "unsupported operation", )) } } impl WriteZeroesAt for AndroidSparse { fn write_zeroes_at(&mut self, _offset: u64, _length: usize) -> io::Result<usize> { Err(io::Error::new( ErrorKind::PermissionDenied, "unsupported operation", )) } } impl AsRawDescriptor for AndroidSparse { fn as_raw_descriptor(&self) -> RawDescriptor { self.file.as_raw_descriptor() } } impl FileAllocate for AndroidSparse { fn allocate(&mut self, _offset: u64, _length: u64) -> io::Result<()> { Err(io::Error::new( ErrorKind::PermissionDenied, "unsupported operation", )) } } // Performs reads up to the chunk boundary. impl FileReadWriteAtVolatile for AndroidSparse { fn read_at_volatile(&mut self, slice: VolatileSlice, offset: u64) -> io::Result<usize> { let found_chunk = self.chunks.range(..=offset).next_back(); let ( chunk_start, ChunkWithSize { chunk, expanded_size, }, ) = found_chunk.ok_or(io::Error::new( ErrorKind::UnexpectedEof, format!("no chunk for offset {}", offset), ))?; let chunk_offset = offset - chunk_start; let chunk_size = *expanded_size; let subslice = if chunk_offset + (slice.size() as u64) > chunk_size { slice .sub_slice(0, (chunk_size - chunk_offset) as usize) .map_err(|e| io::Error::new(ErrorKind::InvalidData, format!("{:?}", e)))? } else { slice }; match chunk { Chunk::DontCare => { subslice.write_bytes(0); Ok(subslice.size() as usize) } Chunk::Raw(file_offset) => self .file .read_at_volatile(subslice, *file_offset + chunk_offset), Chunk::Fill(fill_bytes) => { let chunk_offset_mod = chunk_offset % fill_bytes.len() as u64; let filled_memory: Vec<u8> = fill_bytes .iter() .cloned() .cycle() .skip(chunk_offset_mod as usize) .take(subslice.size() as usize) .collect(); subslice.copy_from(&filled_memory); Ok(subslice.size() as usize) } } } fn write_at_volatile(&mut self, _slice: VolatileSlice, _offset: u64) -> io::Result<usize> { Err(io::Error::new( ErrorKind::PermissionDenied, "unsupported operation", )) } } #[cfg(test)] mod tests { use super::*; use std::io::{Cursor, Write}; use tempfile::tempfile; const CHUNK_SIZE: usize = mem::size_of::<ChunkHeader>(); #[test] fn parse_raw() { let chunk_raw = ChunkHeader { chunk_type: CHUNK_TYPE_RAW.into(), reserved1: 0, chunk_sz: 1.into(), total_sz: (CHUNK_SIZE as u32 + 123).into(), }; let header_bytes = chunk_raw.as_slice(); let mut chunk_bytes: Vec<u8> = Vec::new(); chunk_bytes.extend_from_slice(header_bytes); chunk_bytes.extend_from_slice(&[0u8; 123]); let mut chunk_cursor = Cursor::new(chunk_bytes); let chunk = parse_chunk(&mut chunk_cursor, CHUNK_SIZE as u64, 123) .expect("Failed to parse") .expect("Failed to determine chunk type"); let expected_chunk = ChunkWithSize { chunk: Chunk::Raw(CHUNK_SIZE as u64), expanded_size: 123, }; assert_eq!(expected_chunk, chunk); } #[test] fn parse_dont_care() { let chunk_raw = ChunkHeader { chunk_type: CHUNK_TYPE_DONT_CARE.into(), reserved1: 0, chunk_sz: 100.into(), total_sz: (CHUNK_SIZE as u32).into(), }; let header_bytes = chunk_raw.as_slice(); let mut chunk_cursor = Cursor::new(header_bytes); let chunk = parse_chunk(&mut chunk_cursor, CHUNK_SIZE as u64, 123) .expect("Failed to parse") .expect("Failed to determine chunk type"); let expected_chunk = ChunkWithSize { chunk: Chunk::DontCare, expanded_size: 12300, }; assert_eq!(expected_chunk, chunk); } #[test] fn parse_fill() { let chunk_raw = ChunkHeader { chunk_type: CHUNK_TYPE_FILL.into(), reserved1: 0, chunk_sz: 100.into(), total_sz: (CHUNK_SIZE as u32 + 4).into(), }; let header_bytes = chunk_raw.as_slice(); let mut chunk_bytes: Vec<u8> = Vec::new(); chunk_bytes.extend_from_slice(header_bytes); chunk_bytes.extend_from_slice(&[123u8; 4]); let mut chunk_cursor = Cursor::new(chunk_bytes); let chunk = parse_chunk(&mut chunk_cursor, CHUNK_SIZE as u64, 123) .expect("Failed to parse") .expect("Failed to determine chunk type"); let expected_chunk = ChunkWithSize { chunk: Chunk::Fill(vec![123, 123, 123, 123]), expanded_size: 12300, }; assert_eq!(expected_chunk, chunk); } #[test] fn parse_crc32() { let chunk_raw = ChunkHeader { chunk_type: CHUNK_TYPE_CRC32.into(), reserved1: 0, chunk_sz: 0.into(), total_sz: (CHUNK_SIZE as u32 + 4).into(), }; let header_bytes = chunk_raw.as_slice(); let mut chunk_bytes: Vec<u8> = Vec::new(); chunk_bytes.extend_from_slice(header_bytes); chunk_bytes.extend_from_slice(&[123u8; 4]); let mut chunk_cursor = Cursor::new(chunk_bytes); let chunk = parse_chunk(&mut chunk_cursor, CHUNK_SIZE as u64, 123).expect("Failed to parse"); assert_eq!(None, chunk); } fn test_image(chunks: Vec<ChunkWithSize>) -> AndroidSparse { let file = tempfile().expect("failed to create tempfile"); let size = chunks.iter().map(|x| x.expanded_size).sum(); AndroidSparse::from_parts(file, size, chunks).expect("Could not create image") } #[test] fn read_dontcare() { let chunks = vec![ChunkWithSize { chunk: Chunk::DontCare, expanded_size: 100, }]; let mut image = test_image(chunks); let mut input_memory = [55u8; 100]; image .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0) .expect("Could not read"); let expected = [0u8; 100]; assert_eq!(&expected[..], &input_memory[..]); } #[test] fn read_fill_simple() { let chunks = vec![ChunkWithSize { chunk: Chunk::Fill(vec![10, 20]), expanded_size: 8, }]; let mut image = test_image(chunks); let mut input_memory = [55u8; 8]; image .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0) .expect("Could not read"); let expected = [10, 20, 10, 20, 10, 20, 10, 20]; assert_eq!(&expected[..], &input_memory[..]); } #[test] fn read_fill_edges() { let chunks = vec![ChunkWithSize { chunk: Chunk::Fill(vec![10, 20, 30]), expanded_size: 8, }]; let mut image = test_image(chunks); let mut input_memory = [55u8; 6]; image .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 1) .expect("Could not read"); let expected = [20, 30, 10, 20, 30, 10]; assert_eq!(&expected[..], &input_memory[..]); } #[test] fn read_fill_offset_edges() { let chunks = vec![ ChunkWithSize { chunk: Chunk::DontCare, expanded_size: 20, }, ChunkWithSize { chunk: Chunk::Fill(vec![10, 20, 30]), expanded_size: 100, }, ]; let mut image = test_image(chunks); let mut input_memory = [55u8; 7]; image .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 39) .expect("Could not read"); let expected = [20, 30, 10, 20, 30, 10, 20]; assert_eq!(&expected[..], &input_memory[..]); } #[test] fn read_raw() { let chunks = vec![ChunkWithSize { chunk: Chunk::Raw(0), expanded_size: 100, }]; let mut image = test_image(chunks); write!(image.file, "hello").expect("Failed to write into internal file"); let mut input_memory = [55u8; 5]; image .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0) .expect("Could not read"); let expected = [104, 101, 108, 108, 111]; assert_eq!(&expected[..], &input_memory[..]); } #[test] fn read_two_fills() { let chunks = vec![ ChunkWithSize { chunk: Chunk::Fill(vec![10, 20]), expanded_size: 4, }, ChunkWithSize { chunk: Chunk::Fill(vec![30, 40]), expanded_size: 4, }, ]; let mut image = test_image(chunks); let mut input_memory = [55u8; 8]; image .read_exact_at_volatile(VolatileSlice::new(&mut input_memory[..]), 0) .expect("Could not read"); let expected = [10, 20, 10, 20, 30, 40, 30, 40]; assert_eq!(&expected[..], &input_memory[..]); } }
34.654135
98
0.571816
ac5c370525d20a23bb7517369d6a813b97195a7a
1,127
#![allow(unused_imports, unused_labels, unused_variables, dead_code)] #[macro_use] extern crate log; extern crate resolver; extern crate env_logger; #[macro_use] extern crate tokio; #[macro_use] extern crate clap; use resolver::name_server::NameServer; use resolver::stub::Service; use resolver::config::Config; use tokio::net::UdpSocket; use tokio::net::TcpListener; use std::io; use std::env; async fn run() -> Result<(), io::Error> { try_join!( // run_udp_server("127.0.0.1:8000"), // run_tcp_server("127.0.0.1:3001") ).map(|_| ()) } fn main() -> Result<(), Box<dyn std::error::Error>> { std::env::set_var("RUST_LOG", "resolver=trace,named=trace"); env_logger::init(); println!("Example: $ dig @127.0.0.1 video.qq.com -p 3000 $ dig @127.0.0.1 www.mypce.com -p 3000 $ dig @127.0.0.1 www.gov.cn -p 3000 AAAA "); let mut rt = tokio::runtime::Runtime::new()?; println!("{:?}", "udp+tcp://127.0.0.1?domain=dns.google&tcp_port=50".parse::<NameServer>() ); let config = Config::default(); let service = rt.block_on(Service::new(config))?; Ok(()) }
21.673077
97
0.634428
792e9884dc580891c2c1ccbc2de169fffeb03899
1,703
//! Functionality for sending requests to Slack. use std::error; /// Functionality for sending authenticated and unauthenticated requests to Slack via HTTP. /// /// If you do not have a custom client to integrate with and just want to send requests, use /// the [`default_client()`] function to get a simple request sender. pub trait SlackWebRequestSender { type Error: error::Error; /// Make an API call to Slack. Takes a map of parameters that get appended to the request as query /// params. fn send(&self, method: &str, params: &[(&str, &str)]) -> Result<String, Self::Error>; } #[cfg(feature = "reqwest")] mod reqwest_support { use reqwest_ as reqwest; pub use self::reqwest::Error; use super::SlackWebRequestSender; impl SlackWebRequestSender for reqwest::blocking::Client { type Error = reqwest::Error; fn send(&self, method_url: &str, params: &[(&str, &str)]) -> Result<String, Self::Error> { let mut url = reqwest::Url::parse(method_url).expect("Unable to parse url"); url.query_pairs_mut().extend_pairs(params); self.get(url).send()?.text() } } /// Provides a default `reqwest` client to give to the API functions to send requests. /// /// # Examples /// /// ``` /// # let token = "some_token"; /// let client = slack_api::requests::default_client().unwrap(); /// let response = slack_api::channels::list(&client, &token, &Default::default()); /// ``` pub fn default_client() -> Result<reqwest::blocking::Client, reqwest::Error> { Ok(reqwest::blocking::Client::new()) } } #[cfg(feature = "reqwest")] pub use self::reqwest_support::*;
32.75
102
0.63946
1d2c0d5a5a010fbfedb4ed8c4d634887a7e571d1
1,286
use std::fs::File; use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::{Arc, Weak}; use parking_lot::{Mutex, RawRwLock, RwLock}; use crate::memory_data_size::*; use crate::memory_fs::allocator::{AllocatedChunk, CHUNKS_ALLOCATOR}; use crate::memory_fs::file::internal::{FileChunk, MemoryFileInternal}; use crate::memory_fs::FILES_FLUSH_HASH_MAP; use parking_lot::lock_api::ArcRwLockWriteGuard; use std::panic::Location; use std::path::PathBuf; pub struct FlushableItem { pub underlying_file: Arc<(PathBuf, Mutex<File>)>, pub mode: FileFlushMode, } pub enum FileFlushMode { Append { chunk: ArcRwLockWriteGuard<RawRwLock, FileChunk>, }, WriteAt { buffer: AllocatedChunk, offset: u64, }, } // impl Drop for FlushableItem { // fn drop(&mut self) { // if Arc::strong_count(&self.underlying_file) == 1 { // unsafe { // FILES_FLUSH_HASH_MAP // .as_mut() // .unwrap() // .lock() // .entry(self.underlying_file.0.clone()) // .or_insert(Vec::new()) // .push(self.underlying_file.clone()) // } // } // // BUFFER_MANAGER.notify_drop() // } // }
27.361702
70
0.57776
db209dae21c0137ddeee2332a1cc7a57bbe62743
2,116
use failure::Fail; #[derive(Debug, Fail)] pub enum Error { #[fail(display = "Failed to access the data directory")] DataDirectoryAccessDenied, #[fail(display = "Another Soma instance is using the data directory")] DataDirectoryLockFailed, #[fail(display = "Failed to build docker image for a problem")] DockerBuildFailed, #[fail(display = "A repository with the same name already exists")] DuplicateRepository, #[fail(display = "Failed to detect filename from the path")] FileNameNotFound, #[fail( display = "The specified file does not exist, or you don't have enough permission to access it" )] FileUnreachable, #[fail(display = "Some entry in the manifest is invalid")] InvalidManifest, #[fail(display = "The provided repository does not contain 'soma.toml' or 'soma-list.toml'")] InvalidRepository, #[fail(display = "soma-list.toml contains a duplicate or inaccessible entry")] InvalidSomaList, #[fail( display = "The name doesn't satisfy docker name component rules, which allows lower case alphanumerics with non-boundary '_', '__', or (multiple) '-'(s)" )] InvalidName, #[fail(display = "The specified file's path contains unsupported characters")] InvalidUnicode, #[fail(display = "There is a container already running for the specified problem")] ProblemAlreadyRunning, #[fail(display = "The specified problem is not found")] ProblemNotFound, #[fail(display = "There is no container running for the specified problem")] ProblemNotRunning, #[fail(display = "The provided query returned multiple problems")] ProblemQueryAmbiguous, #[fail(display = "There is an image or an container from the repository")] RepositoryInUse, #[fail(display = "The specified repository is not found")] RepositoryNotFound, #[fail( display = "The repository contains changes that cannot be handled by update command; Please remove and add the repository manually" )] UnsupportedUpdate, } pub type Result<T> = std::result::Result<T, failure::Error>;
42.32
161
0.698015
e5e9ce8a190fe7bbd8a045f56a9b0ab72b7f9b35
464
mod coin_flip; mod control_flow_graph; mod shortest_path; pub use self::{coin_flip::*, control_flow_graph::*, shortest_path::*}; use strum::{self, EnumString, EnumVariantNames, IntoStaticStr}; pub trait ExplorationStrategy { fn choose_path(&self, branch1: u64, branch2: u64) -> u64; } #[derive(Debug, EnumString, EnumVariantNames, IntoStaticStr)] #[strum(serialize_all = "kebab_case")] pub enum ExplorationStrategyType { ShortestPaths, CoinFlip, }
24.421053
70
0.74569
f59e61fe5f673a6f49cd99cb69b1da1d1e345779
11,471
use std::collections::{HashMap, HashSet}; use failure::Error; use rstring_builder::StringBuilder; use text_reader::TextReader; use crate::types::*; pub fn token_group(grammars: &Vec<Box<TLGrammar>>) -> Result<Vec<TLTokenGroup>, Error> { let mut tokens : HashMap<String, TLTokenGroup> = HashMap::new(); let mut return_types: HashSet<String> = HashSet::new(); let mut super_types_to_types: HashMap<String, Vec<String>> = HashMap::new(); let mut token_group_type = TLTokenGroupType::Struct; for grammar in grammars { if grammar.is_group() { let group: TLGroup = grammar.to_group().expect("Impossible error"); let token_group = parse_token_group(&group, token_group_type.clone())?; if let (Some(return_type), TLTokenGroupType::Function) = (&token_group.blood, &token_group.type_) { if let Some(token) = tokens.get_mut(return_type) { (*token).is_return_type = true; } else { return_types.insert(return_type.clone()); } } if let (Some(super_type), TLTokenGroupType::Struct) = (&token_group.blood, &token_group.type_) { if let Some(child_types) = super_types_to_types.get_mut(super_type) { child_types.push(token_group.name.clone()); } else { super_types_to_types.insert(super_type.clone(), vec![token_group.name.clone()]); } } tokens.insert(token_group.name.clone(), token_group); } if grammar.is_paragraph() { let paragraph: TLParagraph = grammar.to_paragraph().expect("Impossible error"); match paragraph { TLParagraph::Functions { start, end } => { token_group_type = TLTokenGroupType::Function } } debug!("PARAGRAPH: {:?}", paragraph); } } for return_type in return_types.iter() { if let Some(token) = tokens.get_mut(return_type) { (*token).is_return_type = true; } else if let Some(child_tokens) = super_types_to_types.get(return_type) { match child_tokens.len() { 1 => { let token_name = child_tokens.first().unwrap(); if let Some(token_group) = tokens.get_mut(token_name) { (*token_group).is_return_type = true; } }, _ => debug!("Skipping candidate return type {} because it is a super type with multiple children.", return_type) } } } let mut arrs: Vec<(&String, &TLTokenGroup)> = tokens.iter().collect(); arrs.sort_by(|a, b| a.0.cmp(b.0)); let rets: Vec<TLTokenGroup> = arrs.into_iter() .map(|(_, token_group)| token_group.clone()) .collect(); // for x in &rets { // println!("{}", x.name); // } Ok(rets) } /// TLTokenGroup fn parse_token_group(group: &TLGroup, token_group_type: TLTokenGroupType) -> Result<TLTokenGroup, Error> { let lines = &group.lines; let mut token_group = TLTokenGroup { description_all: None, description: None, name: "".to_string(), arguments: Default::default(), type_: token_group_type, blood: None, is_return_type: false }; // description builder let mut dbuilder = StringBuilder::new(); for (ix, gl) in lines.iter().enumerate() { let text = gl.text.clone(); match gl.token { TLGroupLineToken::Trait => { return group_trait(gl); } TLGroupLineToken::Description => { dbuilder.append(text).append(' '); } TLGroupLineToken::Struct => { let name = group_name(&text)?; let blood = group_blood(&text)?; let args = group_args(gl.line, &text)?; token_group.name = name; token_group.blood = Some(blood); token_group.arguments = args; } } }; let description_all = group_description(dbuilder.string()); if let Some(dall) = &description_all { token_group.description = dall.get("description").map(|v| v.clone()); let args = &mut token_group.arguments; for tat in args { tat.description = dall.get(&tat.sign_name[..]).map(|v| v.clone()); } } token_group.description_all = description_all; Ok(token_group) } /// parse group trait fn group_trait(gl: &TLGroupLine) -> Result<TLTokenGroup, Error> { let mut token_group = TLTokenGroup { description_all: None, description: None, name: "------".to_string(), arguments: Default::default(), type_: TLTokenGroupType::Trait, blood: None, is_return_type: false }; let description_map = tl_description_map(gl.text.clone()); let name = match description_map.get("class") { Some(class) => class.clone(), None => return bail!("Syntax error line -> {} -> {}", gl.line, gl.text) }; let description = match description_map.get("description") { Some(class) => class.clone(), None => return bail!("Syntax error line -> {} -> {}", gl.line, gl.text) }; token_group.description_all = Some(description_map); token_group.description = Some(description); token_group.name = name; Ok(token_group) } /// parse group arguments fn group_args(line: i32, code: &String) -> Result<Vec<TLTokenArgType>, Error> { let words: Vec<&str> = code.split(" ").collect(); let mut args = vec![]; for (ix, &word) in words.iter().enumerate() { if ix == 0 { continue; } if word == "=" || word == "?" { break; } // component type defined if ix == 1 && word.starts_with("{") && word.ends_with("}") { return arg_type_define_with_component(line, code); } // struct sign if !word.contains(":") { return bail!("Syntax fail. line -> {} -> {}", line, code); } let arg_type = arg_type(line, code, word)?; args.push(arg_type); } Ok(args) } fn arg_type_define_with_component(line: i32, code: &String) -> Result<Vec<TLTokenArgType>, Error> { let mut reader = TextReader::new(code); let mut args = vec![]; while reader.has_next() { match reader.next() { Some(' ') => continue, Some('{') => { let mut end = false; let mut builder = StringBuilder::new(); while reader.has_next() { match reader.next() { Some(' ') => continue, Some('}') => { end = true; break; } Some(ch) => { builder.append(ch); } None => {} }; }; if !end { return bail!("Syntax fail. line -> {} -> {}", line, code); } let component_type_text = builder.string(); // like `t:Type` if component_type_text.is_empty() { return bail!("Syntax fail. line -> {} -> {}", line, code); } let arg_type = arg_type(line, code, &component_type_text)?; args.push(arg_type); } Some(ch) => {} None => {} }; } Ok(args) } // TLTokenArgType fn arg_type<S: AsRef<str>>(line: i32, code: &String, arg_text: S) -> Result<TLTokenArgType, Error> { let word = arg_text.as_ref(); let signs: Vec<&str> = word.split(":").collect(); if signs.len() != 2 { return bail!("Syntax fail. line -> {} -> {}", line, code); } let sign_name = match signs.get(0) { Some(&t) => t, None => return bail!("Syntax fail. line -> {} -> {}", line, code) }; let sign_type = match signs.get(1) { Some(&t) => t, None => return bail!("Syntax fail. line -> {} -> {}", line, code) }; // not have component type if !sign_type.contains("<") { let tat = TLTokenArgType::builder() .sign_name(sign_name) .sign_type(sign_type) .components(vec![]) .build(); // debug!("{:?}", tat); return Ok(tat); } let component_sign_type = component_sign_type(sign_type); let component_sign_components = component_sign_components(sign_type); let tat = TLTokenArgType::builder() .sign_name(sign_name) .sign_type(component_sign_type) .components(arg_component_types(component_sign_components)) .build(); // debug!("{}:{} ----> {:#?}", sign_name, sign_type, tat); Ok(tat) } fn arg_component_types<S: AsRef<str>>(sign_type: S) -> Vec<TLTokenComponentType> { let mut rets = vec![]; let sign_type = sign_type.as_ref(); // not have sub components if !sign_type.contains("<") { let component_sign_type = component_sign_type(sign_type); let tct = TLTokenComponentType::builder() .sign_type(component_sign_type) .components(vec![]) .build(); rets.push(tct); return rets; } // have sub components let mut reader = TextReader::new(sign_type); let mut builder = StringBuilder::new(); while reader.has_next() { match reader.next() { Some('<') => { let component_sign_type = component_sign_type(sign_type); let component_sign_components = component_sign_components(sign_type); let tct = TLTokenComponentType::builder() .sign_type(component_sign_type) .components(arg_component_types(component_sign_components)) .build(); rets.push(tct); } Some(',') => { // tl schema not support `,` , so don't have multi component type (like map<string, string>). nothing to do. } Some(ch) => { builder.append(ch); } None => {} }; }; rets } /// parse component sign type vec<vec<string>> -> vec fn component_sign_type<S: AsRef<str>>(sign_type: S) -> String { let sign_type = sign_type.as_ref(); let chs = sign_type.chars().collect::<Vec<char>>(); let ix = chs.iter().enumerate() .find(|(_, &ch)| ch == '<') .map(|(ix, _)| ix) .map_or(chs.len(), |v| v); sign_type.chars().take(ix).collect() } /// parse component sign type components vec<vec<string>> -> vec<string> fn component_sign_components<S: AsRef<str>>(sign_type: S) -> String { let sign_type = sign_type.as_ref(); let chs = sign_type.chars().collect::<Vec<char>>(); let ix = chs.iter().enumerate() .find(|(_, &ch)| ch == '<') .map(|(ix, _)| ix) .map_or(chs.len(), |v| v); sign_type.chars().skip(ix + 1).take(chs.len() - (ix + 2)).collect() } /// parse group name fn group_name(code: &String) -> Result<String, Error> { let words: Vec<&str> = code.split(" ").collect(); match words.get(0) { Some(t) => Ok(t.to_string()), None => bail!("Not found group name"), } } /// parse group blood fn group_blood(code: &String) -> Result<String, Error> { let words: Vec<&str> = code.split(" ").collect(); let mut entry = false; let mut bloods = vec![]; for word in words { if !entry && word == "=" { entry = true; continue; } if entry { bloods.push(word); } } Ok(bloods.join(" ")) } /// parse group description fn group_description(description_text: String) -> Option<HashMap<String, String>> { if description_text.is_empty() { return None; } Some(tl_description_map(description_text)) } fn tl_description_map<S: AsRef<str>>(text: S) -> HashMap<String, String> { let mut description_map = HashMap::new(); let dwords: Vec<&str> = text.as_ref().split(" ").collect(); let mut name = None; let mut vvec = Vec::with_capacity(dwords.len()); for word in dwords { if word.is_empty() { continue; } if !word.starts_with("@") { vvec.push(if word.starts_with("-") { word[1..].to_string() } else { word.to_string() }); continue; } if let Some(n) = name { description_map.insert(n, vvec.join(" ")); vvec.clear(); } name = Some(word[1..].to_string()); } description_map.insert(name.expect("Impossible error"), vvec.join(" ")); vvec.clear(); description_map }
30.346561
120
0.606922
0ed331c66253d0edee9b932c7a160d76dc2894e7
15,809
// Copyright 2019 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. use super::ConnectResult; use crate::{ connection::{ self, handler::{THandlerError, THandlerInEvent, THandlerOutEvent}, Close, Connected, Connection, ConnectionError, ConnectionHandler, ConnectionLimit, IntoConnectionHandler, PendingConnectionError, Substream, }, muxing::StreamMuxer, Multiaddr, }; use futures::{channel::mpsc, prelude::*, stream}; use std::{pin::Pin, task::Context, task::Poll}; /// Identifier of a [`Task`] in a [`Manager`](super::Manager). #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] pub struct TaskId(pub(super) usize); /// Commands that can be sent to a [`Task`]. #[derive(Debug)] pub enum Command<T> { /// Notify the connection handler of an event. NotifyHandler(T), /// Gracefully close the connection (active close) before /// terminating the task. Close(Option<ConnectionLimit>), } /// Events that a task can emit to its manager. #[derive(Debug)] pub enum Event<H: IntoConnectionHandler, TE> { /// A connection to a node has succeeded. Established { id: TaskId, info: Connected }, /// A pending connection failed. Failed { id: TaskId, error: PendingConnectionError<TE>, handler: H, }, /// A node we are connected to has changed its address. AddressChange { id: TaskId, new_address: Multiaddr }, /// Notify the manager of an event from the connection. Notify { id: TaskId, event: THandlerOutEvent<H>, }, /// A connection closed, possibly due to an error. /// /// If `error` is `None`, the connection has completed /// an active orderly close. Closed { id: TaskId, error: Option<ConnectionError<THandlerError<H>>>, handler: H::Handler, }, } impl<H: IntoConnectionHandler, TE> Event<H, TE> { pub fn id(&self) -> &TaskId { match self { Event::Established { id, .. } => id, Event::Failed { id, .. } => id, Event::AddressChange { id, .. } => id, Event::Notify { id, .. } => id, Event::Closed { id, .. } => id, } } } /// A `Task` is a [`Future`] that handles a single connection. pub struct Task<F, M, H, E> where M: StreamMuxer, H: IntoConnectionHandler, H::Handler: ConnectionHandler<Substream = Substream<M>>, { /// The ID of this task. id: TaskId, /// Sender to emit events to the manager of this task. events: mpsc::Sender<Event<H, E>>, /// Receiver for commands sent by the manager of this task. commands: stream::Fuse<mpsc::Receiver<Command<THandlerInEvent<H>>>>, /// Inner state of this `Task`. state: State<F, M, H, E>, } impl<F, M, H, E> Task<F, M, H, E> where M: StreamMuxer, H: IntoConnectionHandler, H::Handler: ConnectionHandler<Substream = Substream<M>>, { /// Create a new task to connect and handle some node. pub fn pending( id: TaskId, events: mpsc::Sender<Event<H, E>>, commands: mpsc::Receiver<Command<THandlerInEvent<H>>>, future: F, handler: H, ) -> Self { Task { id, events, commands: commands.fuse(), state: State::Pending { future: Box::pin(future), handler, }, } } } /// The state associated with the `Task` of a connection. enum State<F, M, H, E> where M: StreamMuxer, H: IntoConnectionHandler, H::Handler: ConnectionHandler<Substream = Substream<M>>, { /// The connection is being negotiated. Pending { /// The future that will attempt to reach the node. // TODO: don't pin this Future; this requires deeper changes though future: Pin<Box<F>>, /// The intended handler for the established connection. handler: H, }, /// The connection is established. Established { connection: Connection<M, H::Handler>, /// An event to send to the `Manager`. If `None`, the `connection` /// is polled for new events in this state, otherwise the event /// must be sent to the `Manager` before the connection can be /// polled again. event: Option<Event<H, E>>, }, /// The connection is closing (active close). Closing { closing_muxer: Close<M>, handler: H::Handler, error: Option<ConnectionLimit>, }, /// The task is terminating with a final event for the `Manager`. Terminating(Event<H, E>), /// The task has finished. Done, } impl<F, M, H, E> Unpin for Task<F, M, H, E> where M: StreamMuxer, H: IntoConnectionHandler, H::Handler: ConnectionHandler<Substream = Substream<M>>, { } impl<F, M, H, E> Future for Task<F, M, H, E> where M: StreamMuxer, F: Future<Output = ConnectResult<M, E>>, H: IntoConnectionHandler, H::Handler: ConnectionHandler<Substream = Substream<M>> + Send + 'static, { type Output = (); // NOTE: It is imperative to always consume all incoming commands from // the manager first, in order to not prevent it from making progress because // it is blocked on the channel capacity. fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { let this = &mut *self; let id = this.id; 'poll: loop { match std::mem::replace(&mut this.state, State::Done) { State::Pending { mut future, handler, } => { // Check whether the task is still registered with a `Manager` // by polling the commands channel. match this.commands.poll_next_unpin(cx) { Poll::Pending => {} Poll::Ready(None) => { // The manager has dropped the task; abort. // Don't accept any further commands and terminate the // task with a final event. this.commands.get_mut().close(); let event = Event::Failed { id, handler, error: PendingConnectionError::Aborted, }; this.state = State::Terminating(event); continue 'poll; } Poll::Ready(Some(_)) => { panic!("Task received command while the connection is pending.") } } // Check if the connection succeeded. match future.poll_unpin(cx) { Poll::Ready(Ok((info, muxer))) => { this.state = State::Established { connection: Connection::new(muxer, handler.into_handler(&info)), event: Some(Event::Established { id, info }), } } Poll::Pending => { this.state = State::Pending { future, handler }; return Poll::Pending; } Poll::Ready(Err(error)) => { // Don't accept any further commands and terminate the // task with a final event. this.commands.get_mut().close(); let event = Event::Failed { id, handler, error }; this.state = State::Terminating(event) } } } State::Established { mut connection, event, } => { // Check for commands from the `Manager`. loop { match this.commands.poll_next_unpin(cx) { Poll::Pending => break, Poll::Ready(Some(Command::NotifyHandler(event))) => { connection.inject_event(event) } Poll::Ready(Some(Command::Close(error))) => { // Don't accept any further commands. this.commands.get_mut().close(); // Discard the event, if any, and start a graceful close. let (handler, closing_muxer) = connection.close(); this.state = State::Closing { handler, closing_muxer, error, }; continue 'poll; } Poll::Ready(None) => { // The manager has disappeared; abort. return Poll::Ready(()); } } } if let Some(event) = event { // Send the event to the manager. match this.events.poll_ready(cx) { Poll::Pending => { this.state = State::Established { connection, event: Some(event), }; return Poll::Pending; } Poll::Ready(result) => { if result.is_ok() { if let Ok(()) = this.events.start_send(event) { this.state = State::Established { connection, event: None, }; continue 'poll; } } // The manager is no longer reachable; abort. return Poll::Ready(()); } } } else { // Poll the connection for new events. match Connection::poll(Pin::new(&mut connection), cx) { Poll::Pending => { this.state = State::Established { connection, event: None, }; return Poll::Pending; } Poll::Ready(Ok(connection::Event::Handler(event))) => { this.state = State::Established { connection, event: Some(Event::Notify { id, event }), }; } Poll::Ready(Ok(connection::Event::AddressChange(new_address))) => { this.state = State::Established { connection, event: Some(Event::AddressChange { id, new_address }), }; } Poll::Ready(Err(error)) => { // Don't accept any further commands. this.commands.get_mut().close(); let (handler, _closing_muxer) = connection.close(); // Terminate the task with the error, dropping the connection. let event = Event::Closed { id, error: Some(error), handler, }; this.state = State::Terminating(event); } } } } State::Closing { handler, error, mut closing_muxer, } => { // Try to gracefully close the connection. match closing_muxer.poll_unpin(cx) { Poll::Ready(Ok(())) => { let event = Event::Closed { id: this.id, error: error.map(|limit| ConnectionError::ConnectionLimit(limit)), handler, }; this.state = State::Terminating(event); } Poll::Ready(Err(e)) => { let event = Event::Closed { id: this.id, error: Some(ConnectionError::IO(e)), handler, }; this.state = State::Terminating(event); } Poll::Pending => { this.state = State::Closing { handler, error, closing_muxer, }; return Poll::Pending; } } } State::Terminating(event) => { // Try to deliver the final event. match this.events.poll_ready(cx) { Poll::Pending => { self.state = State::Terminating(event); return Poll::Pending; } Poll::Ready(result) => { if result.is_ok() { let _ = this.events.start_send(event); } return Poll::Ready(()); } } } State::Done => panic!("`Task::poll()` called after completion."), } } } }
39.921717
98
0.442912
ab1177b4a970a346c03e03d20a84a4dbc4b1062f
52
pub mod file; #[cfg(feature = "git")] pub mod git;
10.4
23
0.615385
e53a49af6ed803bb25ec5d44dcb1c0d866280bf0
757
use bevy::prelude::*; fn main() { App::new() .add_plugins(DefaultPlugins) .add_startup_system(setup) .run(); } fn setup( mut commands: Commands, asset_server: Res<AssetServer>, mut materials: ResMut<Assets<ColorMaterial>>, ) { let texture_handle = asset_server.load("branding/icon.png"); commands.spawn_bundle(OrthographicCameraBundle::new_2d()); commands.spawn_bundle(SpriteBundle { material: materials.add(texture_handle.into()), sprite: Sprite { // Flip the logo to the left flip_x: true, // And don't flip it upside-down ( the default ) flip_y: false, ..Default::default() }, ..Default::default() }); }
26.103448
64
0.59181
9c8e6643e0dee7664b1a6fbe167ca620f19fbc8f
570
// Note: If you change this test, change 'marker_trait_attr-feature-gate.rs' at the same time. // marker_trait_attr // Tracking issue: https://github.com/rust-lang/rust/issues/29864 #![feature(marker_trait_attr)] // See https://github.com/taiki-e/pin-project/issues/105#issuecomment-535355974 use std::marker::PhantomPinned; use pin_project::pin_project; #[pin_project] //~ ERROR E0119 struct Struct<T> { #[pin] f: T, } // unsound Unpin impl impl<T> Unpin for Struct<T> {} fn is_unpin<T: Unpin>() {} fn main() { is_unpin::<Struct<PhantomPinned>>() }
21.111111
94
0.701754
f8a12681375e2e50391fdc71fa75d1dcf1af5148
3,019
/* * MIT License (MIT) * Copyright (c) 2019 Activeledger * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ //! # Activeledger Rust SDK //! //! <img src="https://www.activeledger.io/wp-content/uploads/2018/09/Asset-23.png" alt="Activeledger" width="300"/> //! //! Activeledger is a powerful distributed ledger technology. //! Think about it as a single ledger, which is updated simultaneously in multiple locations. //! As the data is written to a ledger, it is approved and confirmed by all other locations. //! //! ## This Crate //! //! This crate provides Rust developers the ability to easily integrate their applications //! with Activeledger. //! //! This crate gives you access to the core essentials needed to get started. //! * Connection - Connect to an Activeledger node in a network. //! * Keys - RSA and EC key generation with data signing abilities. //! //! Integrating these into this crate makes it much quicker to bootstrap your DLT software. //! //! ## Additional Activeledger crates //! Adhearing to the Rust mentality of keeping things small we have created other crates that can be used in conjunction //! with this one to add additional functionality. //! //! These crates are: //! * [active_events](https://github.com/activeledger/SDK-Rust-Events) - For working with server sent events. //! * [active_tx](https://github.com/activeledger/SDK-Rust-TxBuilder) - To build transactions without worrying about the JSON. //! //! ## Links //! //! [Activeledger](https://activeledger.io) //! //! [Activeledger Developers portal](https://developers.activeledger.io) //! //! [Activeledger on GitHub](https://github.com/activeledger/activeledger) //! //! [Activeledger on NPM](https://www.npmjs.com/package/@activeledger/activeledger) //! //! [This SDK on GitHub](https://github.com/activeledger/SDK-Rust) //! //! [Report Issues](https://github.com/activeledger/SDK-Rust/issues) mod connection; pub mod key; pub use connection::{error, transaction::Transaction, Connection};
43.753623
126
0.740643
5dfad989c7c9216205f1086d7db08bb854ad15ff
45
mod file; mod header; mod input; mod output;
9
11
0.733333
ef512bc3b5bde2b2a67bd06b078ee20ede6b0886
1,210
fn main() { println!("Hello, world!"); let mut s = String::from("hello world."); s.push_str("world2!"); println!("{}", s); let s1 = String::from("Rust"); let s2 = s1; //浅拷贝 //s1不再有效 //println!("s1:{}",s1); println!("s2:{}", s2); let s3 = s2.clone(); println!("s3:{}", s3); println!("s2:{}", s2); //s是非copy的,在函数体内已经释放,所以不可用 taken_ownship(s); //x是copy的,所以还可以用 let x = 5; makes_copy(x); let s4 = gives_ownship(); let s5 = String::from("hello"); let s6 = takes_and_gives_back(s5); println!("s4:{}", s4); //s5被移除作用域并丢弃 //println!("s5:{}",s5); println!("s6:{}", s6); let s7 = String::from("hello"); let (s7, len) = calculate_length(s7); println!("The length of '{}' is {}.", s7, len); } fn taken_ownship(some_string: String) { println!("{}", some_string); } fn makes_copy(some_integer: i32) { println!("{}", some_integer); } fn gives_ownship() -> String { let some_string = String::from("hello"); some_string } fn takes_and_gives_back(a_string: String) -> String { a_string } fn calculate_length(s: String) -> (String, usize) { let length = s.len(); (s, length) }
20.166667
53
0.555372
e6039c89e528fe46fed5fd91374cf59d001709ba
2,478
use diesel::prelude::*; use failure::Fallible; use super::conn::Conn; use super::schema::{posts, users}; // ==== users ==== #[derive(Debug, Queryable)] pub struct User { pub id: i32, pub username: String, pub email: String, pub password: String, } impl User { pub fn verify(&self, password: &str) -> bool { bcrypt::verify(&password, &self.password).unwrap_or(false) } pub fn create( conn: &Conn, username: String, email: String, password: String, ) -> Fallible<User> { let new_user = NewUser { username, email, password: bcrypt::hash(&password, bcrypt::DEFAULT_COST)?, }; diesel::insert_into(users::table) .values(&new_user) .get_result(conn.get()) .map_err(Into::into) } pub fn find_by_email(conn: &Conn, email: String) -> Fallible<Option<User>> { use super::schema::users::dsl; dsl::users .filter(dsl::email.eq(email)) .get_result(conn.get()) .optional() .map_err(Into::into) } pub fn find_by_id(conn: &Conn, id: i32) -> Fallible<Option<User>> { use super::schema::users::dsl; dsl::users .filter(dsl::id.eq(id)) .get_result(conn.get()) .optional() .map_err(Into::into) } pub fn all_posts(conn: &Conn, user_id: i32) -> Fallible<Vec<Post>> { use super::schema::posts::dsl; dsl::posts .filter(dsl::user_id.eq(user_id)) .load(conn.get()) .map_err(Into::into) } } #[derive(Debug, Insertable)] #[table_name = "users"] pub struct NewUser { pub username: String, pub email: String, pub password: String, } // ==== posts ==== #[derive(Debug, Queryable)] pub struct Post { pub id: i32, pub user_id: i32, pub title: String, pub body: String, pub published: bool, } impl Post { pub fn create(conn: &Conn, user_id: i32, title: String, body: String) -> Fallible<Post> { let new_post = NewPost { user_id, title, body, }; diesel::insert_into(posts::table) .values(&new_post) .get_result(conn.get()) .map_err(Into::into) } } #[derive(Debug, Insertable)] #[table_name = "posts"] pub struct NewPost { pub user_id: i32, pub title: String, pub body: String, }
23.377358
93
0.54318
e5ae91b50a056decce7e25402f4dc26b1acd02e0
7,817
#[cfg(test)] use crate::controller::migrate::Migration; use crate::controller::Event; use dataflow::prelude::*; use noria::consensus::Authority; use noria::prelude::*; use noria::SyncControllerHandle; use std::collections::HashMap; use std::ops::{Deref, DerefMut}; use std::sync::Arc; use stream_cancel::Trigger; use tokio::prelude::*; use tokio_io_pool; /// A handle to a controller that is running in the same process as this one. pub struct Handle<A: Authority + 'static> { c: Option<ControllerHandle<A>>, #[allow(dead_code)] event_tx: Option<futures::sync::mpsc::UnboundedSender<Event>>, kill: Option<Trigger>, iopool: Option<tokio_io_pool::Runtime>, } impl<A: Authority> Deref for Handle<A> { type Target = ControllerHandle<A>; fn deref(&self) -> &Self::Target { self.c.as_ref().unwrap() } } impl<A: Authority> DerefMut for Handle<A> { fn deref_mut(&mut self) -> &mut Self::Target { self.c.as_mut().unwrap() } } impl<A: Authority + 'static> Handle<A> { pub(super) fn new( authority: Arc<A>, event_tx: futures::sync::mpsc::UnboundedSender<Event>, kill: Trigger, io: tokio_io_pool::Runtime, ) -> impl Future<Item = Self, Error = failure::Error> { ControllerHandle::make(authority).map(move |c| Handle { c: Some(c), event_tx: Some(event_tx), kill: Some(kill), iopool: Some(io), }) } #[cfg(test)] fn ready<E>(self) -> impl Future<Item = Self, Error = E> { let snd = self.event_tx.clone().unwrap(); future::loop_fn((self, snd), |(this, snd)| { let (tx, rx) = futures::sync::oneshot::channel(); snd.unbounded_send(Event::IsReady(tx)).unwrap(); rx.map_err(|_| unimplemented!("worker loop went away")) .and_then(|v| { if v { future::Either::A(future::ok(future::Loop::Break(this))) } else { use std::time; future::Either::B( tokio::timer::Delay::new( time::Instant::now() + time::Duration::from_millis(50), ) .map(move |_| future::Loop::Continue((this, snd))) .map_err(|_| unimplemented!("no timer available")), ) } }) }) } #[doc(hidden)] pub fn migrate<F, T>(&mut self, f: F) -> T where F: FnOnce(&mut Migration) -> T + Send + 'static, T: Send + 'static, { let (ret_tx, ret_rx) = futures::sync::oneshot::channel(); let (fin_tx, fin_rx) = futures::sync::oneshot::channel(); let b = Box::new(move |m: &mut Migration| -> () { if ret_tx.send(f(m)).is_err() { unreachable!("could not return migration result"); } }); self.event_tx .clone() .unwrap() .unbounded_send(Event::ManualMigration { f: b, done: fin_tx }) .unwrap(); match fin_rx.wait() { Ok(()) => ret_rx.wait().unwrap(), Err(e) => unreachable!("{:?}", e), } } /// Install a new set of policies on the controller. #[must_use] fn set_security_config(&mut self, p: String) -> impl Future<Item = (), Error = failure::Error> { self.rpc("set_security_config", p, "failed to set security config") } /// Install a new set of policies on the controller. #[must_use] fn create_universe( &mut self, context: HashMap<String, DataType>, ) -> impl Future<Item = (), Error = failure::Error> { let mut c = self.c.clone().unwrap(); let uid = context .get("id") .expect("Universe context must have id") .clone(); self.rpc::<_, ()>( "create_universe", &context, "failed to create security universe", ) .and_then(move |_| { // Write to Context table let bname = match context.get("group") { None => format!("UserContext_{}", uid.to_string()), Some(g) => format!("GroupContext_{}_{}", g.to_string(), uid.to_string()), }; let mut fields: Vec<_> = context.keys().collect(); fields.sort(); let record: Vec<DataType> = fields .iter() .map(|&f| context.get(f).unwrap().clone()) .collect(); c.table(&bname).and_then(|table| { table .insert(record) .map_err(|e| format_err!("failed to make table: {:?}", e)) .map(|_| ()) }) }) } /// Inform the local instance that it should exit. fn shutdown(&mut self) { if let Some(io) = self.iopool.take() { drop(self.c.take()); drop(self.event_tx.take()); drop(self.kill.take()); io.shutdown_on_idle(); } } } impl<A: Authority> Drop for Handle<A> { fn drop(&mut self) { self.shutdown(); } } /// A synchronous handle to a worker. pub struct SyncHandle<A: Authority + 'static> { rt: Option<tokio::runtime::Runtime>, wh: Handle<A>, // this is an Option so we can drop it sh: Option<SyncControllerHandle<A, tokio::runtime::TaskExecutor>>, } impl<A: Authority> SyncHandle<A> { /// Construct a new synchronous handle on top of an existing runtime. /// /// Note that the given `Handle` must have been created through the given `Runtime`. fn from_existing(rt: tokio::runtime::Runtime, wh: Handle<A>) -> Self { let sch = wh.sync_handle(rt.executor()); SyncHandle { rt: Some(rt), wh, sh: Some(sch), } } /// Construct a new synchronous handle on top of an existing external runtime. /// /// Note that the given `Handle` must have been created through the `Runtime` backing the /// given executor. fn from_executor(ex: tokio::runtime::TaskExecutor, wh: Handle<A>) -> Self { let sch = wh.sync_handle(ex); SyncHandle { rt: None, wh, sh: Some(sch), } } /// Stash away the given runtime inside this worker handle. fn wrap_rt(&mut self, rt: tokio::runtime::Runtime) { self.rt = Some(rt); } /// Run an operation on the underlying asynchronous worker handle. fn on_worker<F, FF>(&mut self, f: F) -> Result<FF::Item, FF::Error> where F: FnOnce(&mut Handle<A>) -> FF, FF: IntoFuture, FF::Future: Send + 'static, FF::Item: Send + 'static, FF::Error: Send + 'static, { let fut = f(&mut self.wh); self.sh.as_mut().unwrap().run(fut) } #[cfg(test)] fn migrate<F, T>(&mut self, f: F) -> T where F: FnOnce(&mut Migration) -> T + Send + 'static, T: Send + 'static, { self.on_worker(move |w| -> Result<_, ()> { Ok(w.migrate(f)) }) .unwrap() } } impl<A: Authority> Deref for SyncHandle<A> { type Target = SyncControllerHandle<A, tokio::runtime::TaskExecutor>; fn deref(&self) -> &Self::Target { self.sh.as_ref().unwrap() } } impl<A: Authority> DerefMut for SyncHandle<A> { fn deref_mut(&mut self) -> &mut Self::Target { self.sh.as_mut().unwrap() } } impl<A: Authority + 'static> Drop for SyncHandle<A> { fn drop(&mut self) { drop(self.sh.take()); self.wh.shutdown(); if let Some(rt) = self.rt.take() { rt.shutdown_on_idle().wait().unwrap(); } } }
31.268
100
0.525905
11748e76ea0a1fe4b75bb6aa11dba347715b7bac
1,717
#[cfg(all(doc, not(feature = "std")))] extern crate alloc; use super::Endian; #[cfg(all(doc, not(feature = "std")))] use alloc::vec::Vec; /// Runtime options for /// [`BinRead::read_options()`](crate::BinRead::read_options). #[derive(Default, Clone, Copy)] pub struct ReadOptions { /// The [byte order](crate::Endian) to use when reading data. /// /// Note that if a derived type uses one of the /// [byte order directives](crate::attribute#byte-order), this option /// will be overridden by the directive. endian: Endian, /// An absolute offset added to the [`FilePtr::ptr`](crate::FilePtr::ptr) /// offset before reading the pointed-to value. offset: u64, } impl ReadOptions { /// Create a new ReadOptions with a given Endian pub fn new(endian: Endian) -> Self { Self { endian, ..Default::default() } } /// Returns the given ReadOptions but with the endian replaced pub fn with_endian(self, endian: Endian) -> Self { Self { endian, ..self } } /// The [byte order](crate::Endian) to use when reading data. /// /// Note that if a derived type uses one of the /// [byte order directives](crate::attribute#byte-order), this option /// will be overridden by the directive. pub fn endian(&self) -> Endian { self.endian } /// Returns the given ReadOptions but with the offset replaced pub fn with_offset(self, offset: u64) -> Self { Self { offset, ..self } } /// An absolute offset added to the [`FilePtr::ptr`](crate::FilePtr::ptr) /// offset before reading the pointed-to value. pub fn offset(&self) -> u64 { self.offset } }
30.122807
77
0.616773
1efa852f6b7c5f6461e39fad97c674b5a530d795
7,159
use super::*; use crate::script_tests::utils::layer1::*; use ckb_crypto::secp::{Generator, Privkey, Pubkey}; use ckb_error::assert_error_eq; use ckb_script::{ScriptError, TransactionScriptsVerifier}; use ckb_types::{ bytes::Bytes, core::{Capacity, DepType, ScriptHashType, TransactionBuilder, TransactionView}, packed::{CellDep, CellInput, CellOutput, OutPoint, Script, WitnessArgs}, prelude::*, }; use gw_generator::account_lock_manage::{secp256k1::Secp256k1Eth, LockAlgorithm}; use rand::{thread_rng, Rng}; use sha3::{Digest, Keccak256}; const ERROR_PUBKEY_BLAKE160_HASH: i8 = -31; lazy_static! { pub static ref ETH_ACCOUNT_LOCK: Bytes = Bytes::from( &include_bytes!("../../../../../godwoken-scripts/c/build/account_locks/eth-account-lock")[..] ); } fn gen_tx(dummy: &mut DummyDataLoader, lock_args: Bytes, input_data: Bytes) -> TransactionView { let mut rng = thread_rng(); // setup sighash_all dep let script_out_point = { let contract_tx_hash = { let mut buf = [0u8; 32]; rng.fill(&mut buf); buf.pack() }; OutPoint::new(contract_tx_hash.clone(), 0) }; // dep contract code let script_cell = CellOutput::new_builder() .capacity( Capacity::bytes(ETH_ACCOUNT_LOCK.len()) .expect("script capacity") .pack(), ) .build(); let script_cell_data_hash = CellOutput::calc_data_hash(&ETH_ACCOUNT_LOCK); dummy.cells.insert( script_out_point.clone(), (script_cell, ETH_ACCOUNT_LOCK.clone()), ); // setup secp256k1_data dep let secp256k1_data_out_point = { let tx_hash = { let mut buf = [0u8; 32]; rng.fill(&mut buf); buf.pack() }; OutPoint::new(tx_hash, 0) }; let secp256k1_data_cell = CellOutput::new_builder() .capacity( Capacity::bytes(SECP256K1_DATA_BIN.len()) .expect("data capacity") .pack(), ) .build(); dummy.cells.insert( secp256k1_data_out_point.clone(), (secp256k1_data_cell, SECP256K1_DATA_BIN.clone()), ); // setup default tx builder let dummy_capacity = Capacity::shannons(42); let tx_builder = TransactionBuilder::default() .cell_dep( CellDep::new_builder() .out_point(script_out_point) .dep_type(DepType::Code.into()) .build(), ) .cell_dep( CellDep::new_builder() .out_point(secp256k1_data_out_point) .dep_type(DepType::Code.into()) .build(), ) .output( CellOutput::new_builder() .capacity(dummy_capacity.pack()) .build(), ) .output_data(Bytes::new().pack()); let previous_tx_hash = { let mut buf = [0u8; 32]; rng.fill(&mut buf); buf.pack() }; let previous_out_point = OutPoint::new(previous_tx_hash, 0); let script = Script::new_builder() .args(lock_args.pack()) .code_hash(script_cell_data_hash.clone()) .hash_type(ScriptHashType::Data.into()) .build(); let previous_output_cell = CellOutput::new_builder() .capacity(dummy_capacity.pack()) .lock(script) .build(); dummy.cells.insert( previous_out_point.clone(), (previous_output_cell.clone(), input_data), ); tx_builder .input(CellInput::new(previous_out_point, 0)) .build() } fn sign_message(key: &Privkey, message: [u8; 32]) -> gw_types::packed::Signature { use gw_types::prelude::*; // calculate eth signing message let message = { let mut hasher = Keccak256::new(); hasher.update("\x19Ethereum Signed Message:\n32"); hasher.update(&message); let buf = hasher.finalize(); let mut signing_message = [0u8; 32]; signing_message.copy_from_slice(&buf[..]); ckb_types::H256::from(signing_message) }; let sig = key.sign_recoverable(&message).expect("sign"); let mut signature = [0u8; 65]; signature.copy_from_slice(&sig.serialize()); signature.pack() } pub fn sha3_pubkey_hash(pubkey: &Pubkey) -> Bytes { let mut hasher = Keccak256::new(); hasher.update(&pubkey.as_bytes()); let buf = hasher.finalize(); buf[12..].to_vec().into() } #[test] fn test_sign_eth_message() { let mut data_loader = DummyDataLoader::new(); let privkey = Generator::random_privkey(); let pubkey = privkey.pubkey().expect("pubkey"); let pubkey_hash = sha3_pubkey_hash(&pubkey); let mut rng = thread_rng(); let mut message = [0u8; 32]; rng.fill(&mut message); let signature = sign_message(&privkey, message); let tx = gen_tx( &mut data_loader, pubkey_hash.clone(), Bytes::from(message.to_vec()), ); let tx = tx .as_advanced_builder() .set_witnesses(vec![WitnessArgs::new_builder() .lock(Some(signature.as_bytes()).pack()) .build() .as_bytes() .pack()]) .build(); let resolved_tx = build_resolved_tx(&data_loader, &tx); let mut verifier = TransactionScriptsVerifier::new(&resolved_tx, &data_loader); verifier.set_debug_printer(|_script, msg| println!("[script debug] {}", msg)); let verify_result = verifier.verify(MAX_CYCLES); verify_result.expect("pass verification"); let valid = Secp256k1Eth::default() .verify_signature(pubkey_hash, signature, message.into()) .unwrap(); assert!(valid); } #[test] fn test_wrong_signature() { let mut data_loader = DummyDataLoader::new(); let privkey = Generator::random_privkey(); let pubkey = privkey.pubkey().expect("pubkey"); let pubkey_hash = sha3_pubkey_hash(&pubkey); let mut rng = thread_rng(); let mut message = [0u8; 32]; rng.fill(&mut message); let signature = { let mut wrong_message = [0u8; 32]; rng.fill(&mut wrong_message); sign_message(&privkey, wrong_message) }; let tx = gen_tx( &mut data_loader, pubkey_hash.clone(), Bytes::from(message.to_vec()), ); let tx = tx .as_advanced_builder() .set_witnesses(vec![WitnessArgs::new_builder() .lock(Some(signature.as_bytes()).pack()) .build() .as_bytes() .pack()]) .build(); let resolved_tx = build_resolved_tx(&data_loader, &tx); let mut verifier = TransactionScriptsVerifier::new(&resolved_tx, &data_loader); verifier.set_debug_printer(|_script, msg| println!("[script debug] {}", msg)); let verify_result = verifier.verify(MAX_CYCLES); let script_cell_index = 0; assert_error_eq!( verify_result.unwrap_err(), ScriptError::ValidationFailure(ERROR_PUBKEY_BLAKE160_HASH) .input_lock_script(script_cell_index) ); let valid = Secp256k1Eth::default() .verify_signature(pubkey_hash, signature, message.into()) .unwrap(); assert!(!valid); }
33.143519
101
0.607487
e841b22ee08518bd37e845cee7d363453e9c9d6f
1,599
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! This is a legacy module that only contains re-exports of other modules pub use crate::datasource::{provider_as_source, source_as_provider, DefaultTableSource}; pub use datafusion_expr::{ logical_plan::{ display::{GraphvizVisitor, IndentVisitor}, Aggregate, Analyze, CreateCatalog, CreateCatalogSchema, CreateExternalTable, CreateMemoryTable, CreateView, CrossJoin, DropTable, EmptyRelation, Explain, Extension, FileType, Filter, Join, JoinConstraint, JoinType, Limit, LogicalPlan, Offset, Partitioning, PlanType, PlanVisitor, Projection, Repartition, Sort, StringifiedPlan, Subquery, SubqueryAlias, TableScan, ToStringifiedPlan, Union, UserDefinedLogicalNode, Values, Window, }, TableProviderFilterPushDown, TableSource, };
48.454545
88
0.754221
e5af4c606912bf7289066464695a6875e4086bf2
3,893
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(unused)] #[allow(dead_code)] use hir::map as hir_map; use rustc::dep_graph::DepKind; use rustc::hir; use rustc::hir::Ty_::*; use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; use rustc::ty::maps::Providers; use rustc::ty::subst::UnpackedKind; use rustc::ty::{self, CratePredicatesMap, TyCtxt}; use rustc_data_structures::sync::Lrc; use util::nodemap::FxHashMap; mod explicit; mod implicit_empty; mod implicit_infer; /// Code to write unit test for outlives. pub mod test; pub fn provide(providers: &mut Providers) { *providers = Providers { inferred_outlives_of, inferred_outlives_crate, ..*providers }; } fn inferred_outlives_of<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, item_def_id: DefId, ) -> Lrc<Vec<ty::Predicate<'tcx>>> { let id = tcx.hir .as_local_node_id(item_def_id) .expect("expected local def-id"); match tcx.hir.get(id) { hir_map::NodeItem(item) => match item.node { hir::ItemStruct(..) | hir::ItemEnum(..) | hir::ItemUnion(..) => { let crate_map = tcx.inferred_outlives_crate(LOCAL_CRATE); let dep_node = item_def_id.to_dep_node(tcx, DepKind::InferredOutlivesOf); tcx.dep_graph.read(dep_node); crate_map .predicates .get(&item_def_id) .unwrap_or(&crate_map.empty_predicate) .clone() } _ => Lrc::new(Vec::new()), }, _ => Lrc::new(Vec::new()), } } fn inferred_outlives_crate<'tcx>( tcx: TyCtxt<'_, 'tcx, 'tcx>, crate_num: CrateNum, ) -> Lrc<CratePredicatesMap<'tcx>> { // Compute a map from each struct/enum/union S to the **explicit** // outlives predicates (`T: 'a`, `'a: 'b`) that the user wrote. // Typically there won't be many of these, except in older code where // they were mandatory. Nonetheless, we have to ensure that every such // predicate is satisfied, so they form a kind of base set of requirements // for the type. // Compute the inferred predicates let exp = explicit::explicit_predicates(tcx, crate_num); let mut global_inferred_outlives = implicit_infer::infer_predicates(tcx, &exp); // Convert the inferred predicates into the "collected" form the // global data structure expects. // // FIXME -- consider correcting impedance mismatch in some way, // probably by updating the global data structure. let mut predicates = global_inferred_outlives .iter() .map(|(&def_id, set)| { let vec: Vec<ty::Predicate<'tcx>> = set.iter() .map( |ty::OutlivesPredicate(kind1, region2)| match kind1.unpack() { UnpackedKind::Type(ty1) => ty::Predicate::TypeOutlives(ty::Binder::bind( ty::OutlivesPredicate(ty1, region2), )), UnpackedKind::Lifetime(region1) => ty::Predicate::RegionOutlives( ty::Binder::bind(ty::OutlivesPredicate(region1, region2)), ), }, ) .collect(); (def_id, Lrc::new(vec)) }) .collect(); let empty_predicate = Lrc::new(Vec::new()); Lrc::new(ty::CratePredicatesMap { predicates, empty_predicate, }) }
34.758929
96
0.602106
ab9ae396de94c4279a6f58302702ca9559ff7af4
28,910
// Copyright 2018 Developers of the Rand project. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // // Based on jitterentropy-library, http://www.chronox.de/jent.html. // Copyright Stephan Mueller <[email protected]>, 2014 - 2017. // // With permission from Stephan Mueller to relicense the Rust translation under // the MIT license. //! Non-physical true random number generator based on timing jitter. //! //! This is a true random number generator, as opposed to pseudo-random //! generators. Random numbers generated by `JitterRng` can be seen as fresh //! entropy. A consequence is that it is orders of magnitude slower than `OsRng` //! and PRNGs (about 10<sup>3</sup>..10<sup>6</sup> slower). //! //! There are very few situations where using this RNG is appropriate. Only very //! few applications require true entropy. A normal PRNG can be statistically //! indistinguishable, and a cryptographic PRNG should also be as impossible to //! predict. //! //! Use of `JitterRng` is recommended for initializing cryptographic PRNGs when //! `OsRng` is not available. //! //! `JitterRng` can be used without the standard library, but not conveniently, //! you must provide a high-precision timer and carefully have to follow the //! instructions of [`JitterRng::new_with_timer`]. //! //! This implementation is based on [Jitterentropy] version 2.1.0. //! //! Note: There is no accurate timer available on WASM platforms, to help //! prevent fingerprinting or timing side-channel attacks. Therefore //! [`JitterRng::new()`] is not available on WASM. It is also unavailable //! with disabled `std` feature. //! //! [Jitterentropy]: http://www.chronox.de/jent.html #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png", html_favicon_url = "https://www.rust-lang.org/favicon.ico", html_root_url = "https://rust-random.github.io/rand/")] #![deny(missing_docs)] #![deny(missing_debug_implementations)] #![doc(test(attr(allow(unused_variables), deny(warnings))))] // Note: the C implementation of `Jitterentropy` relies on being compiled // without optimizations. This implementation goes through lengths to make the // compiler not optimize out code which does influence timing jitter, but is // technically dead code. #![no_std] pub extern crate rand_core; #[cfg(feature = "std")] extern crate std; #[cfg(feature = "log")] #[macro_use] extern crate log; #[cfg(any(target_os = "macos", target_os = "ios"))] extern crate libc; #[cfg(target_os = "windows")] extern crate winapi; #[cfg(not(feature = "log"))] #[macro_use] mod dummy_log; #[cfg(feature = "std")] mod platform; mod error; use rand_core::{RngCore, CryptoRng, Error, impls}; pub use error::TimerError; use core::{fmt, mem, ptr}; #[cfg(feature = "std")] use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; const MEMORY_BLOCKS: usize = 64; const MEMORY_BLOCKSIZE: usize = 32; const MEMORY_SIZE: usize = MEMORY_BLOCKS * MEMORY_BLOCKSIZE; /// A true random number generator based on jitter in the CPU execution time, /// and jitter in memory access time. pub struct JitterRng { data: u64, // Actual random number // Number of rounds to run the entropy collector per 64 bits rounds: u8, // Timer used by `measure_jitter` timer: fn() -> u64, // Memory for the Memory Access noise source mem_prev_index: u16, // Make `next_u32` not waste 32 bits data_half_used: bool, } // Note: `JitterRng` maintains a small 64-bit entropy pool. With every // `generate` 64 new bits should be integrated in the pool. If a round of // `generate` were to collect less than the expected 64 bit, then the returned // value, and the new state of the entropy pool, would be in some way related to // the initial state. It is therefore better if the initial state of the entropy // pool is different on each call to `generate`. This has a few implications: // - `generate` should be called once before using `JitterRng` to produce the // first usable value (this is done by default in `new`); // - We do not zero the entropy pool after generating a result. The reference // implementation also does not support zeroing, but recommends generating a // new value without using it if you want to protect a previously generated // 'secret' value from someone inspecting the memory; // - Implementing `Clone` seems acceptable, as it would not cause the systematic // bias a constant might cause. Only instead of one value that could be // potentially related to the same initial state, there are now two. // Entropy collector state. // These values are not necessary to preserve across runs. struct EcState { // Previous time stamp to determine the timer delta prev_time: u64, // Deltas used for the stuck test last_delta: i32, last_delta2: i32, // Memory for the Memory Access noise source mem: [u8; MEMORY_SIZE], } impl EcState { // Stuck test by checking the: // - 1st derivation of the jitter measurement (time delta) // - 2nd derivation of the jitter measurement (delta of time deltas) // - 3rd derivation of the jitter measurement (delta of delta of time // deltas) // // All values must always be non-zero. // This test is a heuristic to see whether the last measurement holds // entropy. fn stuck(&mut self, current_delta: i32) -> bool { let delta2 = self.last_delta - current_delta; let delta3 = delta2 - self.last_delta2; self.last_delta = current_delta; self.last_delta2 = delta2; current_delta == 0 || delta2 == 0 || delta3 == 0 } } // Custom Debug implementation that does not expose the internal state impl fmt::Debug for JitterRng { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "JitterRng {{}}") } } impl Clone for JitterRng { fn clone(&self) -> JitterRng { JitterRng { data: self.data, rounds: self.rounds, timer: self.timer, mem_prev_index: self.mem_prev_index, // The 32 bits that may still be unused from the previous round are // for the original to use, not for the clone. data_half_used: false, } } } // Initialise to zero; must be positive #[cfg(all(feature = "std", not(target_arch = "wasm32")))] static JITTER_ROUNDS: AtomicUsize = ATOMIC_USIZE_INIT; impl JitterRng { /// Create a new `JitterRng`. Makes use of `std::time` for a timer, or a /// platform-specific function with higher accuracy if necessary and /// available. /// /// During initialization CPU execution timing jitter is measured a few /// hundred times. If this does not pass basic quality tests, an error is /// returned. The test result is cached to make subsequent calls faster. #[cfg(all(feature = "std", not(target_arch = "wasm32")))] pub fn new() -> Result<JitterRng, TimerError> { if cfg!(target_arch = "wasm32") { return Err(TimerError::NoTimer); } let mut state = JitterRng::new_with_timer(platform::get_nstime); let mut rounds = JITTER_ROUNDS.load(Ordering::Relaxed) as u8; if rounds == 0 { // No result yet: run test. // This allows the timer test to run multiple times; we don't care. rounds = state.test_timer()?; JITTER_ROUNDS.store(rounds as usize, Ordering::Relaxed); info!("JitterRng: using {} rounds per u64 output", rounds); } state.set_rounds(rounds); // Fill `data` with a non-zero value. state.gen_entropy(); Ok(state) } /// Create a new `JitterRng`. /// A custom timer can be supplied, making it possible to use `JitterRng` in /// `no_std` environments. /// /// The timer must have nanosecond precision. /// /// This method is more low-level than `new()`. It is the responsibility of /// the caller to run [`test_timer`] before using any numbers generated with /// `JitterRng`, and optionally call [`set_rounds`]. Also it is important to /// consume at least one `u64` before using the first result to initialize /// the entropy collection pool. /// /// # Example /// /// ``` /// # use rand_jitter::rand_core::{RngCore, Error}; /// use rand_jitter::JitterRng; /// /// # fn try_inner() -> Result<(), Error> { /// fn get_nstime() -> u64 { /// use std::time::{SystemTime, UNIX_EPOCH}; /// /// let dur = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); /// // The correct way to calculate the current time is /// // `dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64` /// // But this is faster, and the difference in terms of entropy is /// // negligible (log2(10^9) == 29.9). /// dur.as_secs() << 30 | dur.subsec_nanos() as u64 /// } /// /// let mut rng = JitterRng::new_with_timer(get_nstime); /// let rounds = rng.test_timer()?; /// rng.set_rounds(rounds); // optional /// let _ = rng.next_u64(); /// /// // Ready for use /// let v: u64 = rng.next_u64(); /// # Ok(()) /// # } /// /// # let _ = try_inner(); /// ``` /// /// [`test_timer`]: JitterRng::test_timer /// [`set_rounds`]: JitterRng::set_rounds pub fn new_with_timer(timer: fn() -> u64) -> JitterRng { JitterRng { data: 0, rounds: 64, timer, mem_prev_index: 0, data_half_used: false, } } /// Configures how many rounds are used to generate each 64-bit value. /// This must be greater than zero, and has a big impact on performance /// and output quality. /// /// [`new_with_timer`] conservatively uses 64 rounds, but often less rounds /// can be used. The `test_timer()` function returns the minimum number of /// rounds required for full strength (platform dependent), so one may use /// `rng.set_rounds(rng.test_timer()?);` or cache the value. /// /// [`new_with_timer`]: JitterRng::new_with_timer pub fn set_rounds(&mut self, rounds: u8) { assert!(rounds > 0); self.rounds = rounds; } // Calculate a random loop count used for the next round of an entropy // collection, based on bits from a fresh value from the timer. // // The timer is folded to produce a number that contains at most `n_bits` // bits. // // Note: A constant should be added to the resulting random loop count to // prevent loops that run 0 times. #[inline(never)] fn random_loop_cnt(&mut self, n_bits: u32) -> u32 { let mut rounds = 0; let mut time = (self.timer)(); // Mix with the current state of the random number balance the random // loop counter a bit more. time ^= self.data; // We fold the time value as much as possible to ensure that as many // bits of the time stamp are included as possible. let folds = (64 + n_bits - 1) / n_bits; let mask = (1 << n_bits) - 1; for _ in 0..folds { rounds ^= time & mask; time >>= n_bits; } rounds as u32 } // CPU jitter noise source // Noise source based on the CPU execution time jitter // // This function injects the individual bits of the time value into the // entropy pool using an LFSR. // // The code is deliberately inefficient with respect to the bit shifting. // This function not only acts as folding operation, but this function's // execution is used to measure the CPU execution time jitter. Any change to // the loop in this function implies that careful retesting must be done. #[inline(never)] fn lfsr_time(&mut self, time: u64, var_rounds: bool) { fn lfsr(mut data: u64, time: u64) -> u64{ for i in 1..65 { let mut tmp = time << (64 - i); tmp >>= 64 - 1; // Fibonacci LSFR with polynomial of // x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is // primitive according to // http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf // (the shift values are the polynomial values minus one // due to counting bits from 0 to 63). As the current // position is always the LSB, the polynomial only needs // to shift data in from the left without wrap. data ^= tmp; data ^= (data >> 63) & 1; data ^= (data >> 60) & 1; data ^= (data >> 55) & 1; data ^= (data >> 30) & 1; data ^= (data >> 27) & 1; data ^= (data >> 22) & 1; data = data.rotate_left(1); } data } // Note: in the reference implementation only the last round effects // `self.data`, all the other results are ignored. To make sure the // other rounds are not optimised out, we first run all but the last // round on a throw-away value instead of the real `self.data`. let mut lfsr_loop_cnt = 0; if var_rounds { lfsr_loop_cnt = self.random_loop_cnt(4) }; let mut throw_away: u64 = 0; for _ in 0..lfsr_loop_cnt { throw_away = lfsr(throw_away, time); } black_box(throw_away); self.data = lfsr(self.data, time); } // Memory Access noise source // This is a noise source based on variations in memory access times // // This function performs memory accesses which will add to the timing // variations due to an unknown amount of CPU wait states that need to be // added when accessing memory. The memory size should be larger than the L1 // caches as outlined in the documentation and the associated testing. // // The L1 cache has a very high bandwidth, albeit its access rate is usually // slower than accessing CPU registers. Therefore, L1 accesses only add // minimal variations as the CPU has hardly to wait. Starting with L2, // significant variations are added because L2 typically does not belong to // the CPU any more and therefore a wider range of CPU wait states is // necessary for accesses. L3 and real memory accesses have even a wider // range of wait states. However, to reliably access either L3 or memory, // the `self.mem` memory must be quite large which is usually not desirable. #[inline(never)] fn memaccess(&mut self, mem: &mut [u8; MEMORY_SIZE], var_rounds: bool) { let mut acc_loop_cnt = 128; if var_rounds { acc_loop_cnt += self.random_loop_cnt(4) }; let mut index = self.mem_prev_index as usize; for _ in 0..acc_loop_cnt { // Addition of memblocksize - 1 to index with wrap around logic to // ensure that every memory location is hit evenly. // The modulus also allows the compiler to remove the indexing // bounds check. index = (index + MEMORY_BLOCKSIZE - 1) % MEMORY_SIZE; // memory access: just add 1 to one byte // memory access implies read from and write to memory location mem[index] = mem[index].wrapping_add(1); } self.mem_prev_index = index as u16; } // This is the heart of the entropy generation: calculate time deltas and // use the CPU jitter in the time deltas. The jitter is injected into the // entropy pool. // // Ensure that `ec.prev_time` is primed before using the output of this // function. This can be done by calling this function and not using its // result. fn measure_jitter(&mut self, ec: &mut EcState) -> Option<()> { // Invoke one noise source before time measurement to add variations self.memaccess(&mut ec.mem, true); // Get time stamp and calculate time delta to previous // invocation to measure the timing variations let time = (self.timer)(); // Note: wrapping_sub combined with a cast to `i64` generates a correct // delta, even in the unlikely case this is a timer that is not strictly // monotonic. let current_delta = time.wrapping_sub(ec.prev_time) as i64 as i32; ec.prev_time = time; // Call the next noise source which also injects the data self.lfsr_time(current_delta as u64, true); // Check whether we have a stuck measurement (i.e. does the last // measurement holds entropy?). if ec.stuck(current_delta) { return None }; // Rotate the data buffer by a prime number (any odd number would // do) to ensure that every bit position of the input time stamp // has an even chance of being merged with a bit position in the // entropy pool. We do not use one here as the adjacent bits in // successive time deltas may have some form of dependency. The // chosen value of 7 implies that the low 7 bits of the next // time delta value is concatenated with the current time delta. self.data = self.data.rotate_left(7); Some(()) } // Shuffle the pool a bit by mixing some value with a bijective function // (XOR) into the pool. // // The function generates a mixer value that depends on the bits set and // the location of the set bits in the random number generated by the // entropy source. Therefore, based on the generated random number, this // mixer value can have 2^64 different values. That mixer value is // initialized with the first two SHA-1 constants. After obtaining the // mixer value, it is XORed into the random number. // // The mixer value is not assumed to contain any entropy. But due to the // XOR operation, it can also not destroy any entropy present in the // entropy pool. #[inline(never)] fn stir_pool(&mut self) { // This constant is derived from the first two 32 bit initialization // vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1 // The order does not really matter as we do not rely on the specific // numbers. We just pick the SHA-1 constants as they have a good mix of // bit set and unset. const CONSTANT: u64 = 0x67452301efcdab89; // The start value of the mixer variable is derived from the third // and fourth 32 bit initialization vector of SHA-1 as defined in // FIPS 180-4 section 5.3.1 let mut mixer = 0x98badcfe10325476; // This is a constant time function to prevent leaking timing // information about the random number. // The normal code is: // ``` // for i in 0..64 { // if ((self.data >> i) & 1) == 1 { mixer ^= CONSTANT; } // } // ``` // This is a bit fragile, as LLVM really wants to use branches here, and // we rely on it to not recognise the opportunity. for i in 0..64 { let apply = (self.data >> i) & 1; let mask = !apply.wrapping_sub(1); mixer ^= CONSTANT & mask; mixer = mixer.rotate_left(1); } self.data ^= mixer; } fn gen_entropy(&mut self) -> u64 { trace!("JitterRng: collecting entropy"); // Prime `ec.prev_time`, and run the noice sources to make sure the // first loop round collects the expected entropy. let mut ec = EcState { prev_time: (self.timer)(), last_delta: 0, last_delta2: 0, mem: [0; MEMORY_SIZE], }; let _ = self.measure_jitter(&mut ec); for _ in 0..self.rounds { // If a stuck measurement is received, repeat measurement // Note: we do not guard against an infinite loop, that would mean // the timer suddenly became broken. while self.measure_jitter(&mut ec).is_none() {} } // Do a single read from `self.mem` to make sure the Memory Access noise // source is not optimised out. black_box(ec.mem[0]); self.stir_pool(); self.data } /// Basic quality tests on the timer, by measuring CPU timing jitter a few /// hundred times. /// /// If succesful, this will return the estimated number of rounds necessary /// to collect 64 bits of entropy. Otherwise a [`TimerError`] with the cause /// of the failure will be returned. pub fn test_timer(&mut self) -> Result<u8, TimerError> { debug!("JitterRng: testing timer ..."); // We could add a check for system capabilities such as `clock_getres` // or check for `CONFIG_X86_TSC`, but it does not make much sense as the // following sanity checks verify that we have a high-resolution timer. let mut delta_sum = 0; let mut old_delta = 0; let mut time_backwards = 0; let mut count_mod = 0; let mut count_stuck = 0; let mut ec = EcState { prev_time: (self.timer)(), last_delta: 0, last_delta2: 0, mem: [0; MEMORY_SIZE], }; // TESTLOOPCOUNT needs some loops to identify edge systems. // 100 is definitely too little. const TESTLOOPCOUNT: u64 = 300; const CLEARCACHE: u64 = 100; for i in 0..(CLEARCACHE + TESTLOOPCOUNT) { // Measure time delta of core entropy collection logic let time = (self.timer)(); self.memaccess(&mut ec.mem, true); self.lfsr_time(time, true); let time2 = (self.timer)(); // Test whether timer works if time == 0 || time2 == 0 { return Err(TimerError::NoTimer); } let delta = time2.wrapping_sub(time) as i64 as i32; // Test whether timer is fine grained enough to provide delta even // when called shortly after each other -- this implies that we also // have a high resolution timer if delta == 0 { return Err(TimerError::CoarseTimer); } // Up to here we did not modify any variable that will be // evaluated later, but we already performed some work. Thus we // already have had an impact on the caches, branch prediction, // etc. with the goal to clear it to get the worst case // measurements. if i < CLEARCACHE { continue; } if ec.stuck(delta) { count_stuck += 1; } // Test whether we have an increasing timer. if !(time2 > time) { time_backwards += 1; } // Count the number of times the counter increases in steps of 100ns // or greater. if (delta % 100) == 0 { count_mod += 1; } // Ensure that we have a varying delta timer which is necessary for // the calculation of entropy -- perform this check only after the // first loop is executed as we need to prime the old_delta value delta_sum += (delta - old_delta).abs() as u64; old_delta = delta; } // Do a single read from `self.mem` to make sure the Memory Access noise // source is not optimised out. black_box(ec.mem[0]); // We allow the time to run backwards for up to three times. // This can happen if the clock is being adjusted by NTP operations. // If such an operation just happens to interfere with our test, it // should not fail. The value of 3 should cover the NTP case being // performed during our test run. if time_backwards > 3 { return Err(TimerError::NotMonotonic); } // Test that the available amount of entropy per round does not get to // low. We expect 1 bit of entropy per round as a reasonable minimum // (although less is possible, it means the collector loop has to run // much more often). // `assert!(delta_average >= log2(1))` // `assert!(delta_sum / TESTLOOPCOUNT >= 1)` // `assert!(delta_sum >= TESTLOOPCOUNT)` if delta_sum < TESTLOOPCOUNT { return Err(TimerError::TinyVariantions); } // Ensure that we have variations in the time stamp below 100 for at // least 10% of all checks -- on some platforms, the counter increments // in multiples of 100, but not always if count_mod > (TESTLOOPCOUNT * 9 / 10) { return Err(TimerError::CoarseTimer); } // If we have more than 90% stuck results, then this Jitter RNG is // likely to not work well. if count_stuck > (TESTLOOPCOUNT * 9 / 10) { return Err(TimerError::TooManyStuck); } // Estimate the number of `measure_jitter` rounds necessary for 64 bits // of entropy. // // We don't try very hard to come up with a good estimate of the // available bits of entropy per round here for two reasons: // 1. Simple estimates of the available bits (like Shannon entropy) are // too optimistic. // 2. Unless we want to waste a lot of time during intialization, there // only a small number of samples are available. // // Therefore we use a very simple and conservative estimate: // `let bits_of_entropy = log2(delta_average) / 2`. // // The number of rounds `measure_jitter` should run to collect 64 bits // of entropy is `64 / bits_of_entropy`. let delta_average = delta_sum / TESTLOOPCOUNT; if delta_average >= 16 { let log2 = 64 - delta_average.leading_zeros(); // Do something similar to roundup(64/(log2/2)): Ok( ((64u32 * 2 + log2 - 1) / log2) as u8) } else { // For values < 16 the rounding error becomes too large, use a // lookup table. // Values 0 and 1 are invalid, and filtered out by the // `delta_sum < TESTLOOPCOUNT` test above. let log2_lookup = [0, 0, 128, 81, 64, 56, 50, 46, 43, 41, 39, 38, 36, 35, 34, 33]; Ok(log2_lookup[delta_average as usize]) } } /// Statistical test: return the timer delta of one normal run of the /// `JitterRng` entropy collector. /// /// Setting `var_rounds` to `true` will execute the memory access and the /// CPU jitter noice sources a variable amount of times (just like a real /// `JitterRng` round). /// /// Setting `var_rounds` to `false` will execute the noice sources the /// minimal number of times. This can be used to measure the minimum amount /// of entropy one round of the entropy collector can collect in the worst /// case. /// /// See this crate's README on how to use `timer_stats` to test the quality /// of `JitterRng`. pub fn timer_stats(&mut self, var_rounds: bool) -> i64 { let mut mem = [0; MEMORY_SIZE]; let time = (self.timer)(); self.memaccess(&mut mem, var_rounds); self.lfsr_time(time, var_rounds); let time2 = (self.timer)(); time2.wrapping_sub(time) as i64 } } // A function that is opaque to the optimizer to assist in avoiding dead-code // elimination. Taken from `bencher`. fn black_box<T>(dummy: T) -> T { unsafe { let ret = ptr::read_volatile(&dummy); mem::forget(dummy); ret } } impl RngCore for JitterRng { fn next_u32(&mut self) -> u32 { // We want to use both parts of the generated entropy if self.data_half_used { self.data_half_used = false; (self.data >> 32) as u32 } else { self.data = self.next_u64(); self.data_half_used = true; self.data as u32 } } fn next_u64(&mut self) -> u64 { self.data_half_used = false; self.gen_entropy() } fn fill_bytes(&mut self, dest: &mut [u8]) { // Fill using `next_u32`. This is faster for filling small slices (four // bytes or less), while the overhead is negligible. // // This is done especially for wrappers that implement `next_u32` // themselves via `fill_bytes`. impls::fill_bytes_via_next(self, dest) } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { Ok(self.fill_bytes(dest)) } } impl CryptoRng for JitterRng {}
40.433566
84
0.619855
71d8f8415f0cd729815b38e3c1e1f97517981005
5,013
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Format string literals. use unicode_segmentation::UnicodeSegmentation; use regex::Regex; use Indent; use config::Config; use utils::wrap_str; use MIN_STRING; pub struct StringFormat<'a> { pub opener: &'a str, pub closer: &'a str, pub line_start: &'a str, pub line_end: &'a str, pub width: usize, pub offset: Indent, pub trim_end: bool, pub config: &'a Config, } // FIXME: simplify this! pub fn rewrite_string<'a>(orig: &str, fmt: &StringFormat<'a>) -> Option<String> { // Strip line breaks. let re = Regex::new(r"([^\\](\\\\)*)\\[\n\r][:space:]*").unwrap(); let stripped_str = re.replace_all(orig, "$1"); let graphemes = UnicodeSegmentation::graphemes(&*stripped_str, false).collect::<Vec<&str>>(); let indent = fmt.offset.to_string(fmt.config); let punctuation = ":,;."; // `cur_start` is the position in `orig` of the start of the current line. let mut cur_start = 0; let mut result = String::with_capacity(stripped_str.len() .checked_next_power_of_two() .unwrap_or(usize::max_value())); result.push_str(fmt.opener); let ender_length = fmt.line_end.len(); // If we cannot put at least a single character per line, the rewrite won't // succeed. let max_chars = try_opt!(fmt.width.checked_sub(fmt.opener.len() + ender_length + 1)) + 1; // Snip a line at a time from `orig` until it is used up. Push the snippet // onto result. 'outer: loop { // `cur_end` will be where we break the line, as an offset into `orig`. // Initialised to the maximum it could be (which may be beyond `orig`). let mut cur_end = cur_start + max_chars; // We can fit the rest of the string on this line, so we're done. if cur_end >= graphemes.len() { let line = &graphemes[cur_start..].join(""); result.push_str(line); break 'outer; } // Push cur_end left until we reach whitespace (or the line is too small). while !graphemes[cur_end - 1].trim().is_empty() { cur_end -= 1; if cur_end < cur_start + MIN_STRING { // We couldn't find whitespace before the string got too small. // So start again at the max length and look for punctuation. cur_end = cur_start + max_chars; while !punctuation.contains(graphemes[cur_end - 1]) { cur_end -= 1; // If we can't break at whitespace or punctuation, grow the string instead. if cur_end < cur_start + MIN_STRING { cur_end = cur_start + max_chars; while !(punctuation.contains(graphemes[cur_end - 1]) || graphemes[cur_end - 1].trim().is_empty()) { if cur_end >= graphemes.len() { let line = &graphemes[cur_start..].join(""); result.push_str(line); break 'outer; } cur_end += 1; } break; } } break; } } // Make sure there is no whitespace to the right of the break. while cur_end < stripped_str.len() && graphemes[cur_end].trim().is_empty() { cur_end += 1; } // Make the current line and add it on to result. let raw_line = graphemes[cur_start..cur_end].join(""); let line = if fmt.trim_end { raw_line.trim() } else { raw_line.as_str() }; result.push_str(line); result.push_str(fmt.line_end); result.push('\n'); result.push_str(&indent); result.push_str(fmt.line_start); // The next line starts where the current line ends. cur_start = cur_end; } result.push_str(fmt.closer); wrap_str(result, fmt.config.max_width, fmt.width, fmt.offset) } #[cfg(test)] mod test { use super::{StringFormat, rewrite_string}; #[test] fn issue343() { let config = Default::default(); let fmt = StringFormat { opener: "\"", closer: "\"", line_start: " ", line_end: "\\", width: 2, offset: ::Indent::empty(), trim_end: false, config: &config, }; rewrite_string("eq_", &fmt); } }
34.572414
97
0.557351
fe1310d5df06a00c316b6229226328961427ed53
7,363
use std::collections::HashMap; use std::env; use std::fs::File; use std::io::Read; use std::path::Path; enum Section { Header, Data, Extension, } struct Fits<'a> { header: Header, data: Data<'a>, extension: Vec<Extension>, } #[derive(Default)] struct Header { data: HashMap<String, String>, } #[derive(Default)] struct Data<'t> { data: &'t [u8], } struct Extension { data: String, } struct Record { key: String, value: String, } const BLOCK_SIZE: usize = 2880; const RECORD_SIZE: usize = 80; fn main() { println!("Welcome to the FITS processing tool, built in Rust!"); let args: Vec<String> = env::args().collect(); let file_name = if args.len() > 1 { &args[1] } else { "data/HRSz0yd020fm_c2f.fits" }; let data_path = Path::new(file_name); let mut file = File::open(data_path).expect("Couldn't open data file"); let mut fits_data = Vec::new(); let bytes = file .read_to_end(&mut fits_data) .expect("Couldn't read data file"); println!( "Successfully read {} bytes from {}", bytes, data_path.display() ); let mut state = Section::Header; let mut block_index = 0; let mut fits: Fits = Fits { header: Default::default(), data: Default::default(), extension: Default::default(), }; for (current_block, chunk) in fits_data.chunks(BLOCK_SIZE).enumerate() { //println!("Processing chunk, {}", current_block); match state { Section::Header => { if String::from_utf8_lossy(chunk).contains(" END ") { fits.header = parse_header(&fits_data, block_index, current_block); state = Section::Data; block_index = current_block + 1; } } Section::Data => { if let Some(dimensionality) = get_dimensionality(&fits.header) { if dimensionality == 2 { if let Some((x, y)) = get_xy(&fits.header) { fits.data = parse_data(&fits_data, block_index, (x, y)); } } } state = Section::Extension; block_index = current_block + (fits.data.data.len() / BLOCK_SIZE) + 1; } Section::Extension => { if String::from_utf8_lossy(chunk).contains("XTENSION") { println!("[Found extension start]"); state = Section::Extension; } // @todo: it seems possible to hit this block and try and parse an // extension without actually finding an "XTENSION" keyword // @fix: fix this - and check if String::from_utf8_lossy(chunk).contains(" END ") { fits.extension .push(parse_extension(&fits_data, block_index, current_block)); state = Section::Header; block_index = current_block + 1; } } }; } render_data(&fits); //println!("Size of data unit: {}", fits.data.data.len()); //println!("Extension data: {}", fits.extension[0].data); } fn get_dimensionality(header: &Header) -> Option<u32> { if let Some(value) = header.data.get("NAXIS") { if let Ok(dim) = value.parse::<u32>() { Some(dim) } else { None } } else { None } } fn get_xy(header: &Header) -> Option<(u32, u32)> { let (mut x, mut y) = (0, 0); if let Some(value) = header.data.get("NAXIS1") { x = value.parse::<u32>().unwrap(); } if let Some(value) = header.data.get("NAXIS2") { y = value.parse::<u32>().unwrap(); } Some((x, y)) } // interpret data based on header values, fn render_data(fits: &Fits) { println!("[Rendering FITS data]"); let mut rendered_data: Vec<f32> = vec![0.0; fits.data.data.len() / 4]; // @todo: check BITPIX - if it's 32 bit... use byteorder::{BigEndian, ByteOrder}; BigEndian::read_f32_into(&fits.data.data, &mut rendered_data); // normalise and stretch the data for rendering / visualisation let mut normalised_data = Vec::new(); normalise_asinh(255.0, &rendered_data, &mut normalised_data); // write the data as a PNG write_png(&fits, &normalised_data, "data/output.png"); } fn normalise_asinh(normalise_to: f32, data: &[f32], normal_data: &mut Vec<u8>) { let mut high = 0.0; for i in data { if *i > high { high = *i; } } //println!("The largest pixel value is: {}", high); for i in data { let value = i.asinh() / high.asinh() * normalise_to; //let value = i / high * normalise_to; normal_data.push(value as u8); } } fn write_png(fits: &Fits, data: &[u8], output_path: &str) { use std::io::BufWriter; let file = File::create(output_path).expect("Couldn't create PNG file"); let buffer = BufWriter::new(file); if let Some((x, y)) = get_xy(&fits.header) { let mut encoder = png::Encoder::new(buffer, x, y); encoder.set_color(png::ColorType::Grayscale); encoder.set_depth(png::BitDepth::Eight); let mut writer = encoder.write_header().expect("Couldn't write PNG header"); if writer.write_image_data(&data).is_err() { println!("Couldn't write PNG image data"); } } } fn parse_header(fits: &[u8], last_block: usize, current_block: usize) -> Header { let mut header_records = HashMap::new(); let header_data = &fits[last_block * BLOCK_SIZE..(current_block + 1) * BLOCK_SIZE]; println!("[Found header end]"); for chunk in header_data.chunks(RECORD_SIZE) { let record_string = String::from_utf8_lossy(chunk); if let Some(Record { key, value }) = parse_record(&record_string) { //println!("{}: {}", key, value); header_records.insert(key, value); } } Header { data: header_records, } } fn parse_data(fits: &[u8], last_block: usize, (x, y): (u32, u32)) -> Data { // @Todo: work out data size by data unit let data_size = x * y * 4; let data_unit = &fits[last_block * BLOCK_SIZE..(data_size as usize + (last_block * BLOCK_SIZE))]; //println!("Data length: {}", data_unit.len()); assert_eq!(data_size as usize, data_unit.len()); Data { data: data_unit } } fn parse_extension(fits: &[u8], last_block: usize, current_block: usize) -> Extension { println!("[Found extension end]"); let extension_data = &fits[last_block * BLOCK_SIZE..(current_block + 1) * BLOCK_SIZE]; Extension { data: String::from_utf8_lossy(extension_data).to_string(), } } fn parse_record(record: &str) -> Option<Record> { if record.contains('=') { let records: Vec<&str> = record.splitn(2, '=').collect(); let k = records[0]; let mut v = records[1]; if v.contains('/') { let values: Vec<&str> = v.splitn(2, '/').collect(); v = values[0]; } let r = Record { key: k.trim().to_string(), value: v.trim().to_string(), }; Some(r) } else { None } }
29.102767
90
0.55385
5009d2535816f8a3b82b6355be16b790739ca832
2,471
#[macro_use] mod for_each2; #[macro_use] mod for_each3; pub(crate) use for_each2::{for_each_stride_parallel_global_unchecked2, Array2x1ForEachState}; pub(crate) use for_each3::{for_each_stride_parallel_global_unchecked3, Array3x1ForEachState}; use crate::{ArrayIndexer, Local, Stride}; use building_blocks_core::prelude::*; /// All information required to do strided iteration over an extent. #[derive(Clone)] pub struct ArrayForEach<N> { /// Extent of the iteration coordinates. pub(crate) iter_extent: ExtentN<N>, /// Shape of the array being indexed. pub(crate) array_shape: PointN<N>, /// Array-local minimum where we start indexing. pub(crate) index_min: Local<N>, } /// A 2D `ArrayForEach`. pub type Array2x1ForEach = ArrayForEach<[i32; 2]>; /// A 3D `ArrayForEach`. pub type Array3x1ForEach = ArrayForEach<[i32; 3]>; impl<N> ArrayForEach<N> where PointN<N>: IntegerPoint<N>, { #[inline] pub fn new_local_unchecked( array_shape: PointN<N>, index_min: Local<N>, iter_shape: PointN<N>, ) -> Self { Self { iter_extent: ExtentN::from_min_and_shape(index_min.0, iter_shape), array_shape, index_min, } } #[inline] pub fn new_local(array_shape: PointN<N>, iter_extent: &ExtentN<N>) -> Self { // Make sure we don't index out of array bounds. let iter_extent = iter_extent.intersection(&ExtentN::from_min_and_shape(PointN::ZERO, array_shape)); Self::new_local_unchecked(array_shape, Local(iter_extent.minimum), iter_extent.shape) } #[inline] pub fn new_global_unchecked(array_extent: &ExtentN<N>, iter_extent: ExtentN<N>) -> Self { // Translate to local coordinates. let index_min = Local(iter_extent.minimum - array_extent.minimum); Self { iter_extent, array_shape: array_extent.shape, index_min, } } #[inline] pub fn new_global(array_extent: &ExtentN<N>, iter_extent: ExtentN<N>) -> Self { // Make sure we don't index out of array bounds. let iter_extent = iter_extent.intersection(array_extent); Self::new_global_unchecked(array_extent, iter_extent) } } impl<N> ArrayForEach<N> where N: ArrayIndexer<N>, PointN<N>: Copy, { pub fn for_each_point_and_stride(self, f: impl FnMut(PointN<N>, Stride)) { N::for_each_point_and_stride_unchecked(self, f) } }
29.070588
94
0.66289
5bea8b2e8a503dd03384a7340ebed887d06a043c
13,155
//! Utilities for rendering nodes. use std::rc::Rc; use ahash::AHashMap; use wasm_bindgen::UnwrapThrowExt; use crate::generic_node::GenericNode; use crate::reactive::create_effect; use crate::view::{View, ViewType}; /// Insert a [`GenericNode`] under `parent` at the specified `marker`. If `initial` is `Some(_)`, /// `initial` will be replaced with the new inserted node. /// /// # Params /// * `parent` - The parent node to insert `accessor` under. /// * `accessor` - The [`View`] to be inserted. /// * `initial` - An optional initial node that is already inserted into the DOM. /// * `marker` - An optional marker node. If `marker` is `Some(_)`, `accessor` will be inserted /// directly before `marker`. If `marker` is `None`, `accessor` will be appended at the end of /// `parent`. /// * `multi` - A boolean flag indicating whether the node to be inserted is the only child of /// `parent`. Setting this to `true` will enable certain optimizations when clearing the node. /// Even if the node to be inserted is the only child of `parent`, `multi` can still be set to /// `false` but forgoes the optimizations. pub fn insert<G: GenericNode>( parent: &G, accessor: View<G>, initial: Option<View<G>>, marker: Option<&G>, multi: bool, ) { insert_expression(parent, &accessor, initial, marker, false, multi); } fn insert_expression<G: GenericNode>( parent: &G, value: &View<G>, mut current: Option<View<G>>, marker: Option<&G>, unwrap_fragment: bool, multi: bool, ) { while let Some(View { inner: ViewType::Dyn(f), }) = current { current = Some(f.get().as_ref().clone()); } match &value.inner { ViewType::Node(node) => { if let Some(current) = current { clean_children(parent, current.flatten(), marker, Some(node), multi); } else { parent.insert_child_before(node, marker); } } ViewType::Dyn(f) => { let parent = parent.clone(); let marker = marker.cloned(); let f = f.clone(); create_effect(move || { let mut value = f.get(); while let ViewType::Dyn(f) = &value.inner { value = f.get(); } insert_expression( &parent, &value, current.clone(), marker.as_ref(), false, multi, ); current = Some(value.as_ref().clone()); }); } ViewType::Fragment(fragment) => { let mut v = Vec::new(); // normalize_incoming_fragment will subscribe to all dynamic nodes in the function so as // to trigger the create_effect when the template changes. let dynamic = normalize_incoming_fragment(&mut v, fragment.as_ref(), unwrap_fragment); if dynamic { let parent = parent.clone(); let marker = marker.cloned(); create_effect(move || { let value = View::new_fragment(v.clone()); // This will call normalize_incoming_fragment again, but this time with the // unwrap_fragment arg set to true. insert_expression( &parent, &value, current.clone(), marker.as_ref(), true, false, ); current = Some(View::new_fragment( value.flatten().into_iter().map(View::new_node).collect(), )); // TODO: do not perform unnecessary flattening of template }); } else { let v = v .into_iter() .map(|x| match x.inner { ViewType::Node(node) => node, _ => unreachable!(), }) .collect::<Vec<_>>(); if v.is_empty() && current.is_some() && !multi { // Fast path when new array is empty. clean_children(parent, Vec::new(), None, None, false); } else { match current { Some(current) => match current.inner { ViewType::Node(node) => { reconcile_fragments(parent, &mut [node], &v); } ViewType::Dyn(_) => unreachable!(), ViewType::Fragment(ref fragment) => { if fragment.is_empty() { append_nodes(parent, v, marker); } else { reconcile_fragments(parent, &mut current.flatten(), &v); } } }, None => append_nodes(parent, v, marker), } } } } } } /// Cleans the children specified by `current` from `parent`. /// /// # Params /// * `parent` - The parent node from which to clean the children. /// * `current` - A [`Vec`] of [`GenericNode`]s that are to be removed. /// * `marker` - If `marker` is `None`, all the nodes from `parent` are removed regardless of /// `current`. This behavior will likely change in the future. /// * `replacement` - An optional replacement node for the removed nodes. pub fn clean_children<G: GenericNode>( parent: &G, current: Vec<G>, _marker: Option<&G>, replacement: Option<&G>, multi: bool, ) { if !multi { parent.update_inner_text(""); if let Some(replacement) = replacement { parent.append_child(replacement); } return; } for node in current { if node.parent_node().as_ref() == Some(parent) { if let Some(replacement) = replacement { parent.replace_child(&node, replacement); } else { parent.remove_child(&node); } } } } /// Appends all the nodes in `fragment` to `parent` behind `marker`. pub fn append_nodes<G: GenericNode>(parent: &G, fragment: Vec<G>, marker: Option<&G>) { for node in fragment { parent.insert_child_before(&node, marker); } } /// Normalizes a `Vec<Template<G>>` into a `Vec<G>`. /// /// Returns whether the normalized `Vec<G>` is dynamic (and should be rendered in an effect). /// /// # Params /// * `v` - The [`Vec`] to write the output to. /// * `fragment` - The `Vec<Template<G>>` to normalize. /// * `unwrap` - If `true`, unwraps the `fragment` without setting `dynamic` to true. In most cases, /// this should be `false`. pub fn normalize_incoming_fragment<G: GenericNode>( v: &mut Vec<View<G>>, fragment: &[View<G>], unwrap: bool, ) -> bool { let mut dynamic = false; for template in fragment { match &template.inner { ViewType::Node(_) => v.push(template.clone()), ViewType::Dyn(f) if unwrap => { let mut value = f.get().as_ref().clone(); while let ViewType::Dyn(f) = &value.inner { value = f.get().as_ref().clone(); } let fragment: Rc<Box<[View<G>]>> = match &value.inner { ViewType::Node(_) => Rc::new(Box::new([value])), ViewType::Fragment(fragment) => Rc::clone(fragment), _ => unreachable!(), }; dynamic = normalize_incoming_fragment(v, fragment.as_ref().as_ref(), false) || dynamic; } ViewType::Dyn(_) => { // Not unwrap v.push(template.clone()); dynamic = true; } ViewType::Fragment(fragment) => { dynamic = normalize_incoming_fragment(v, fragment.as_ref().as_ref(), false) || dynamic; } } } dynamic } /// Reconciles an array of nodes. /// /// # Params /// * `parent` - The parent node under which all other nodes are (direct) children. /// * `a` - The current/existing nodes that are to be diffed. /// * `b` - The new nodes that are to be inserted. After the reconciliation, all the nodes in `b` /// should be inserted under `parent`. /// /// # Panics /// Panics if `a.is_empty()`. Append nodes instead. pub fn reconcile_fragments<G: GenericNode>(parent: &G, a: &mut [G], b: &[G]) { debug_assert!(!a.is_empty(), "a cannot be empty"); // Sanity check: make sure all nodes in a are children of parent. #[cfg(debug_assertions)] { for (i, node) in a.iter().enumerate() { if node.parent_node().as_ref() != Some(parent) { panic!( "node {} in existing nodes Vec is not a child of parent. node = {:#?}", i, node ); } } } let b_len = b.len(); let mut a_end = a.len(); let mut b_end = b_len; let mut a_start = 0; let mut b_start = 0; let mut map = None::<AHashMap<G, usize>>; // Last node in a. let after = a[a_end - 1].next_sibling(); while a_start < a_end || b_start < b_end { if a_end == a_start { // Append. let node = if b_end < b_len { if b_start != 0 { b[b_start - 1].next_sibling() } else { Some(b[b_end - b_start].clone()) } } else { after.clone() }; while b_start < b_end { parent.insert_child_before(&b[b_start], node.as_ref()); b_start += 1; } } else if b_end == b_start { // Remove. while a_start < a_end { if map.is_none() || !map.as_ref().unwrap_throw().contains_key(&a[a_start]) { parent.remove_child(&a[a_start]); } a_start += 1; } } else if a[a_start] == b[b_start] { // Common prefix. a_start += 1; b_start += 1; } else if a[a_end - 1] == b[b_end - 1] { // Common suffix. a_end -= 1; b_end -= 1; } else if a[a_start] == b[b_end - 1] && b[b_start] == a[a_end - 1] { // Swap backwards. a_end -= 1; b_end -= 1; let node = a[a_end].next_sibling(); parent.insert_child_before(&b[b_start], a[a_start].next_sibling().as_ref()); a_start += 1; b_start += 1; parent.insert_child_before(&b[b_end], node.as_ref()); a[a_end] = b[b_end].clone(); } else { // Fallback to map. if map.is_none() { map = Some(AHashMap::with_capacity(b_end - b_start)); for (i, item) in b.iter().enumerate().take(b_end).skip(b_start) { map.as_mut().unwrap_throw().insert(item.clone(), i); } } let map = map.as_ref().unwrap_throw(); let index = map.get(&a[a_start]); if let Some(index) = index { if b_start < *index && *index < b_end { let mut i = a_start; let mut sequence = 1; let mut t; while i + 1 < a_end && i + 1 < b_end { i += 1; t = map.get(&a[i]); if t.is_none() || *t.unwrap_throw() != *index + sequence { break; } sequence += 1; } if sequence > *index - b_start { let node = &a[a_start]; while b_start < *index { parent.insert_child_before(&b[b_start], Some(node)); b_start += 1; } } else { parent.replace_child(&a[a_start], &b[b_start]); a_start += 1; b_start += 1; } } else { a_start += 1; } } else { parent.remove_child(&a[a_start]); a_start += 1; } } } // Sanity check: make sure all nodes in b are children of parent after reconciliation. #[cfg(debug_assertions)] { for (i, node) in b.iter().enumerate() { if node.parent_node().as_ref() != Some(parent) { panic!( "node {} in new nodes Vec is not a child of parent after reconciliation. node = {:#?}", i, node ); } } } }
35.942623
107
0.472748
d776b7b1ff7f2218125cfd67df1de906740c0131
1,253
use std::future::Future; use std::task::{Context, Poll}; use std::pin::Pin; use crate::op::Op; pub struct CompRunFuture<'s, C: Comp + ?Sized> { comp: &'s C, future: Pin<Box<C::TickFuture<'s>>>, } impl<'s, C: Comp + ?Sized> Future for CompRunFuture<'s, C> { type Output = Result<!, C::Error>; fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Self::Output> { match self.future.as_mut().poll(ctx) { Poll::Pending => Poll::Pending, Poll::Ready(Err(err)) => Poll::Ready(Err(err)), Poll::Ready(Ok(_)) => { let this = self.get_mut(); this.future = Box::pin(this.comp.tick()); Pin::new(this).poll(ctx) } } } } pub trait Comp { type Error: std::fmt::Debug; type TickFuture<'s>: Future<Output = Result<(), Self::Error>>; fn tick(&self) -> Self::TickFuture<'_>; } pub trait CompExt: Comp { fn run(&self) -> CompRunFuture<'_, Self> { CompRunFuture { comp: self, future: Box::pin(self.tick()), } } } impl<C: Comp> CompExt for C {} pub trait CompConnector<O: Op> { type Comp: Comp; #[must_use] fn connect(&self, op: O) -> Self::Comp; }
23.641509
84
0.532322
fb55005be1582cd4e6eedcc9a66850e8aaf74abd
12,002
use crate::internal::prelude::*; use crate::CacheAndHttp; use tokio::sync::{Mutex, RwLock}; use std::{ collections::{HashMap, VecDeque}, sync::Arc, }; use futures::channel::mpsc::{self, UnboundedSender as Sender}; use super::super::super::{EventHandler, RawEventHandler}; use super::{ GatewayIntents, ShardId, ShardManagerMessage, ShardManagerMonitor, ShardQueuer, ShardQueuerMessage, ShardRunnerInfo, }; use log::info; use typemap_rev::TypeMap; #[cfg(feature = "framework")] use crate::framework::Framework; #[cfg(feature = "voice")] use crate::client::bridge::voice::ClientVoiceManager; /// A manager for handling the status of shards by starting them, restarting /// them, and stopping them when required. /// /// **Note**: The [`Client`] internally uses a shard manager. If you are using a /// Client, then you do not need to make one of these. /// /// # Examples /// /// Initialize a shard manager with a framework responsible for shards 0 through /// 2, of 5 total shards: /// /// ```rust,no_run /// # use std::error::Error; /// # /// # #[cfg(feature = "voice")] /// # use serenity::client::bridge::voice::ClientVoiceManager; /// # #[cfg(feature = "voice")] /// # use serenity::model::id::UserId; /// # #[cfg(feature = "cache")] /// # use serenity::cache::Cache; /// # /// # #[cfg(feature = "framework")] /// # async fn run() -> Result<(), Box<dyn Error>> { /// # /// use tokio::sync::{Mutex, RwLock}; /// use serenity::client::bridge::gateway::{ShardManager, ShardManagerOptions}; /// use serenity::client::{EventHandler, RawEventHandler}; /// use serenity::http::Http; /// use serenity::CacheAndHttp; /// use serenity::prelude::*; /// use serenity::framework::{Framework, StandardFramework}; /// use std::sync::Arc; /// use std::env; /// /// struct Handler; /// /// impl EventHandler for Handler { } /// impl RawEventHandler for Handler { } /// /// # let cache_and_http = Arc::new(CacheAndHttp::default()); /// # let http = &cache_and_http.http; /// let gateway_url = Arc::new(Mutex::new(http.get_gateway().await?.url)); /// let data = Arc::new(RwLock::new(TypeMap::new())); /// let event_handler = Arc::new(Handler) as Arc<dyn EventHandler>; /// let framework = Arc::new(Box::new(StandardFramework::new()) as Box<dyn Framework + 'static + Send + Sync>); /// /// ShardManager::new(ShardManagerOptions { /// data: &data, /// event_handler: &Some(event_handler), /// raw_event_handler: &None, /// framework: &framework, /// // the shard index to start initiating from /// shard_index: 0, /// // the number of shards to initiate (this initiates 0, 1, and 2) /// shard_init: 3, /// // the total number of shards in use /// shard_total: 5, /// # #[cfg(feature = "voice")] /// # voice_manager: &Arc::new(Mutex::new(ClientVoiceManager::new(0, UserId(0)))), /// ws_url: &gateway_url, /// # cache_and_http: &cache_and_http, /// guild_subscriptions: true, /// intents: None, /// }); /// # Ok(()) /// # } /// ``` /// /// [`Client`]: ../../struct.Client.html #[derive(Debug)] pub struct ShardManager { monitor_tx: Sender<ShardManagerMessage>, /// The shard runners currently managed. /// /// **Note**: It is highly unrecommended to mutate this yourself unless you /// need to. Instead prefer to use methods on this struct that are provided /// where possible. pub runners: Arc<Mutex<HashMap<ShardId, ShardRunnerInfo>>>, /// The index of the first shard to initialize, 0-indexed. shard_index: u64, /// The number of shards to initialize. shard_init: u64, /// The total shards in use, 1-indexed. shard_total: u64, shard_queuer: Sender<ShardQueuerMessage>, } impl ShardManager { /// Creates a new shard manager, returning both the manager and a monitor /// for usage in a separate thread. pub async fn new(opt: ShardManagerOptions<'_>) -> (Arc<Mutex<Self>>, ShardManagerMonitor) { let (thread_tx, thread_rx) = mpsc::unbounded(); let (shard_queue_tx, shard_queue_rx) = mpsc::unbounded(); let runners = Arc::new(Mutex::new(HashMap::new())); let (shutdown_send, shutdown_recv) = mpsc::unbounded(); let mut shard_queuer = ShardQueuer { data: Arc::clone(opt.data), event_handler: opt.event_handler.as_ref().map(|h| Arc::clone(h)), raw_event_handler: opt.raw_event_handler.as_ref().map(|rh| Arc::clone(rh)), #[cfg(feature = "framework")] framework: Arc::clone(&opt.framework), last_start: None, manager_tx: thread_tx.clone(), queue: VecDeque::new(), runners: Arc::clone(&runners), rx: shard_queue_rx, #[cfg(feature = "voice")] voice_manager: Arc::clone(opt.voice_manager), ws_url: Arc::clone(opt.ws_url), cache_and_http: Arc::clone(&opt.cache_and_http), guild_subscriptions: opt.guild_subscriptions, intents: opt.intents, shard_shutdown: shutdown_recv, }; tokio::spawn(async move { shard_queuer.run().await; }); let manager = Arc::new(Mutex::new(Self { monitor_tx: thread_tx, shard_index: opt.shard_index, shard_init: opt.shard_init, shard_queuer: shard_queue_tx, shard_total: opt.shard_total, runners, })); (Arc::clone(&manager), ShardManagerMonitor { rx: thread_rx, manager, shutdown: shutdown_send, }) } /// Returns whether the shard manager contains either an active instance of /// a shard runner responsible for the given ID. /// /// If a shard has been queued but has not yet been initiated, then this /// will return `false`. Consider double-checking [`is_responsible_for`] to /// determine whether this shard manager is responsible for the given shard. /// /// [`is_responsible_for`]: #method.is_responsible_for pub async fn has(&self, shard_id: ShardId) -> bool { self.runners.lock().await.contains_key(&shard_id) } /// Initializes all shards that the manager is responsible for. /// /// This will communicate shard boots with the [`ShardQueuer`] so that they /// are properly queued. /// /// [`ShardQueuer`]: struct.ShardQueuer.html pub fn initialize(&mut self) -> Result<()> { let shard_to = self.shard_index + self.shard_init; for shard_id in self.shard_index..shard_to { let shard_total = self.shard_total; self.boot([ShardId(shard_id), ShardId(shard_total)]); } Ok(()) } /// Sets the new sharding information for the manager. /// /// This will shutdown all existing shards. /// /// This will _not_ instantiate the new shards. pub async fn set_shards(&mut self, index: u64, init: u64, total: u64) { self.shutdown_all().await; self.shard_index = index; self.shard_init = init; self.shard_total = total; } /// Restarts a shard runner. /// /// This sends a shutdown signal to a shard's associated [`ShardRunner`], /// and then queues a initialization of a shard runner for the same shard /// via the [`ShardQueuer`]. /// /// # Examples /// /// Creating a client and then restarting a shard by ID: /// /// _(note: in reality this precise code doesn't have an effect since the /// shard would not yet have been initialized via [`initialize`], but the /// concept is the same)_ /// /// ```rust,no_run /// use serenity::client::bridge::gateway::ShardId; /// use serenity::client::{Client, EventHandler}; /// use std::env; /// /// struct Handler; /// /// impl EventHandler for Handler { } /// /// # async fn run() -> Result<(), Box<dyn std::error::Error>> { /// let token = std::env::var("DISCORD_TOKEN")?; /// let mut client = Client::new(&token).event_handler(Handler).await?; /// /// // restart shard ID 7 /// client.shard_manager.lock().await.restart(ShardId(7)).await; /// # Ok(()) /// # } /// ``` /// /// [`ShardQueuer`]: struct.ShardQueuer.html /// [`ShardRunner`]: struct.ShardRunner.html /// [`initialize`]: #method.initialize pub async fn restart(&mut self, shard_id: ShardId) { info!("Restarting shard {}", shard_id); self.shutdown(shard_id, 4000); let shard_total = self.shard_total; self.boot([shard_id, ShardId(shard_total)]); } /// Returns the [`ShardId`]s of the shards that have been instantiated and /// currently have a valid [`ShardRunner`]. /// /// [`ShardId`]: struct.ShardId.html /// [`ShardRunner`]: struct.ShardRunner.html pub async fn shards_instantiated(&self) -> Vec<ShardId> { self.runners.lock().await.keys().cloned().collect() } /// Attempts to shut down the shard runner by Id. /// /// Returns a boolean indicating whether a shard runner was present. This is /// _not_ necessary an indicator of whether the shard runner was /// successfully shut down. /// /// **Note**: If the receiving end of an mpsc channel - theoretically owned /// by the shard runner - no longer exists, then the shard runner will not /// know it should shut down. This _should never happen_. It may already be /// stopped. pub fn shutdown(&mut self, shard_id: ShardId, code: u16) { info!("Shutting down shard {}", shard_id); let _ = self.shard_queuer.unbounded_send(ShardQueuerMessage::ShutdownShard(shard_id, code)); } /// Sends a shutdown message for all shards that the manager is responsible /// for that are still known to be running. /// /// If you only need to shutdown a select number of shards, prefer looping /// over the [`shutdown`] method. /// /// [`shutdown`]: #method.shutdown pub async fn shutdown_all(&mut self) { let keys = { let runners = self.runners.lock().await; if runners.is_empty() { return; } runners.keys().cloned().collect::<Vec<_>>() }; info!("Shutting down all shards"); for shard_id in keys { self.shutdown(shard_id, 1000); } let _ = self.shard_queuer.unbounded_send(ShardQueuerMessage::Shutdown); let _ = self.monitor_tx.unbounded_send(ShardManagerMessage::ShutdownInitiated); } fn boot(&mut self, shard_info: [ShardId; 2]) { info!("Telling shard queuer to start shard {}", shard_info[0]); let msg = ShardQueuerMessage::Start(shard_info[0], shard_info[1]); let _ = self.shard_queuer.unbounded_send(msg); } } impl Drop for ShardManager { /// A custom drop implementation to clean up after the manager. /// /// This shuts down all active [`ShardRunner`]s and attempts to tell the /// [`ShardQueuer`] to shutdown. /// /// [`ShardQueuer`]: struct.ShardQueuer.html /// [`ShardRunner`]: struct.ShardRunner.html fn drop(&mut self) { let _ = self.shard_queuer.unbounded_send(ShardQueuerMessage::Shutdown); let _ = self.monitor_tx.unbounded_send(ShardManagerMessage::ShutdownInitiated); } } pub struct ShardManagerOptions<'a> { pub data: &'a Arc<RwLock<TypeMap>>, pub event_handler: &'a Option<Arc<dyn EventHandler>>, pub raw_event_handler: &'a Option<Arc<dyn RawEventHandler>>, #[cfg(feature = "framework")] pub framework: &'a Arc<Box<dyn Framework + Send + Sync>>, pub shard_index: u64, pub shard_init: u64, pub shard_total: u64, #[cfg(feature = "voice")] pub voice_manager: &'a Arc<Mutex<ClientVoiceManager>>, pub ws_url: &'a Arc<Mutex<String>>, pub cache_and_http: &'a Arc<CacheAndHttp>, pub guild_subscriptions: bool, pub intents: Option<GatewayIntents>, }
34.991254
111
0.621396
568cb628b9c13dde697b9d0c57e38a9a910bba40
656
fn main() { let (a, b, c) = (&mut [1], &mut [2], &mut [3]); let v: Vec<&mut [u8]> = vec![a, b, c]; let (x, y, z) = (&mut [1, 2], &mut [2, 3], &mut [3, 4]); let w: Vec<&mut [u8]> = vec![x, y, z]; println!("{:?}", v); println!("{:?}", w); // &str "hello", &String::from("hello")[..], &String::from("hello") let m = [1, 2, 3]; let n = vec![4, 5, 6]; test(&n); test(&m); let h = [9, 8, 7, 6, 5, 4]; let j: Vec<&[u8]> = h.chunks(2).collect(); println!("{:?}", j); } fn test(list: &[u8]) { println!("{:?}", list); } /* [[1], [2], [3]] [[1, 2], [2, 3], [3, 4]] [4, 5, 6] [1, 2, 3] [[9, 8], [7, 6], [5, 4]] */
18.742857
69
0.378049
33379713c89e4f81427cc4894b47a09cd0c9961d
6,105
use std::error::Error; use std::fmt; pub mod state_machine; #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] pub enum ParseError<L, T, E> { /// Generated by the parser when it encounters a token (or EOF) it did not /// expect. InvalidToken { location: L }, /// Generated by the parser when it encounters a token (or EOF) it did not /// expect. UnrecognizedToken { /// If this is `Some`, then an unexpected token of type `T` /// was observed, with a span given by the two `L` values. If /// this is `None`, then EOF was observed when it was not /// expected. token: Option<(L, T, L)>, /// The set of expected tokens: these names are taken from the /// grammar and hence may not necessarily be suitable for /// presenting to the user. expected: Vec<String>, }, /// Generated by the parser when it encounters additional, /// unexpected tokens. ExtraToken { token: (L, T, L) }, /// Custom error type. User { error: E }, } impl<L, T, E> ParseError<L, T, E> { fn map_intern<FL, LL, FT, TT, FE, EE>( self, loc_op: FL, tok_op: FT, err_op: FE, ) -> ParseError<LL, TT, EE> where FL: Fn(L) -> LL, FT: Fn(T) -> TT, FE: Fn(E) -> EE, { let maptok = |(s, t, e): (L, T, L)| (loc_op(s), tok_op(t), loc_op(e)); match self { ParseError::InvalidToken { location } => ParseError::InvalidToken { location: loc_op(location), }, ParseError::UnrecognizedToken { token, expected } => ParseError::UnrecognizedToken { token: token.map(maptok), expected: expected, }, ParseError::ExtraToken { token } => ParseError::ExtraToken { token: maptok(token), }, ParseError::User { error } => ParseError::User { error: err_op(error), }, } } pub fn map_location<F, LL>(self, op: F) -> ParseError<LL, T, E> where F: Fn(L) -> LL, { self.map_intern(op, |x| x, |x| x) } pub fn map_token<F, TT>(self, op: F) -> ParseError<L, TT, E> where F: Fn(T) -> TT, { self.map_intern(|x| x, op, |x| x) } pub fn map_error<F, EE>(self, op: F) -> ParseError<L, T, EE> where F: Fn(E) -> EE, { self.map_intern(|x| x, |x| x, op) } } impl<L, T, E> fmt::Display for ParseError<L, T, E> where L: fmt::Display, T: fmt::Display, E: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::ParseError::*; match *self { InvalidToken { ref location } => write!(f, "Invalid token at {}", location), UnrecognizedToken { ref token, ref expected, } => { match *token { Some((ref start, ref token, ref end)) => try!(write!( f, "Unrecognized token `{}` found at {}:{}", token, start, end )), None => try!(write!(f, "Unrecognized EOF")), } if !expected.is_empty() { try!(writeln!(f, "")); for (i, e) in expected.iter().enumerate() { let sep = match i { 0 => "Expected one of", _ if i < expected.len() - 1 => ",", // Last expected message to be written _ => " or", }; try!(write!(f, "{} {}", sep, e)); } } Ok(()) } ExtraToken { token: (ref start, ref token, ref end), } => write!(f, "Extra token {} found at {}:{}", token, start, end), User { ref error } => write!(f, "{}", error), } } } impl<L, T, E> Error for ParseError<L, T, E> where L: fmt::Debug + fmt::Display, T: fmt::Debug + fmt::Display, E: fmt::Debug + fmt::Display, { fn description(&self) -> &str { "parse error" } } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct ErrorRecovery<L, T, E> { pub error: ParseError<L, T, E>, pub dropped_tokens: Vec<(L, T, L)>, } /// Define a module using the generated parse from a `.lalrpop` file. /// /// You have to specify the name of the module and the path of the file /// generated by LALRPOP. If the input is in the root directory, you can /// omit it. /// /// # Example /// ```ignore /// // load parser in src/parser.lalrpop /// lalrpop_mod!(parser); /// /// // load parser in src/lex/parser.lalrpop /// lalrpop_mod!(parser, "/lex/parser.rs"); /// /// // define a public module /// lalrpop_mod!(pub parser); /// ``` #[macro_export] macro_rules! lalrpop_mod { ($(#[$attr:meta])* $modname:ident) => { lalrpop_mod!($(#[$attr])* $modname, concat!("/", stringify!($modname), ".rs")); }; ($(#[$attr:meta])* pub $modname:ident) => { lalrpop_mod!($(#[$attr])* pub $modname, concat!("/", stringify!($modname), ".rs")); }; ($(#[$attr:meta])* $modname:ident, $source:expr) => { $(#[$attr])* mod $modname { include!(concat!(env!("OUT_DIR"), $source)); } }; ($(#[$attr:meta])* pub $modname:ident, $source:expr) => { $(#[$attr])* pub mod $modname { include!(concat!(env!("OUT_DIR"), $source)); } }; } #[cfg(test)] mod tests { use super::*; #[test] fn test() { let err = ParseError::UnrecognizedToken::<i32, &str, &str> { token: Some((1, "t0", 2)), expected: vec!["t1", "t2", "t3"] .into_iter() .map(|s| s.to_string()) .collect(), }; assert_eq!( format!("{}", err), "Unrecognized token `t0` found at 1:2\n\ Expected one of t1, t2 or t3" ); } }
29.780488
96
0.482064
5b6c517ddf27ccbc8bfee402e9ca4187d99da91a
12,155
#[cfg(any( target_os = "linux", all(target_os = "macos", feature = "posix-signals-on-macos") ))] mod tests { use anyhow::Result; use rustix::io::{mprotect, MprotectFlags}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use wasmtime::unix::StoreExt; use wasmtime::*; const WAT1: &str = r#" (module (func $hostcall_read (import "" "hostcall_read") (result i32)) (func $read (export "read") (result i32) (i32.load (i32.const 0)) ) (func $read_out_of_bounds (export "read_out_of_bounds") (result i32) (i32.load (i32.mul ;; memory size in Wasm pages (memory.size) ;; Wasm page size (i32.const 65536) ) ) ) (func (export "hostcall_read") (result i32) call $hostcall_read ) (func $start (i32.store (i32.const 0) (i32.const 123)) ) (start $start) (memory (export "memory") 1 4) ) "#; const WAT2: &str = r#" (module (import "other_module" "read" (func $other_module.read (result i32))) (func $run (export "run") (result i32) call $other_module.read) ) "#; fn invoke_export(store: &mut Store<()>, instance: Instance, func_name: &str) -> Result<i32> { let ret = instance .get_typed_func::<(), i32, _>(&mut *store, func_name)? .call(store, ())?; Ok(ret) } // Locate "memory" export, get base address and size and set memory protection to PROT_NONE fn set_up_memory(store: &mut Store<()>, instance: Instance) -> (usize, usize) { let mem_export = instance.get_memory(&mut *store, "memory").unwrap(); let base = mem_export.data_ptr(&store); let length = mem_export.data_size(&store); // So we can later trigger SIGSEGV by performing a read unsafe { mprotect( base as *mut std::ffi::c_void, length, MprotectFlags::empty(), ) .unwrap(); } println!("memory: base={:?}, length={}", base, length); (base as usize, length) } fn handle_sigsegv( base: usize, length: usize, signum: libc::c_int, siginfo: *const libc::siginfo_t, ) -> bool { println!("Hello from instance signal handler!"); // SIGSEGV on Linux, SIGBUS on Mac if libc::SIGSEGV == signum || libc::SIGBUS == signum { let si_addr: *mut libc::c_void = unsafe { (*siginfo).si_addr() }; // Any signal from within module's memory we handle ourselves let result = (si_addr as u64) < (base as u64) + (length as u64); // Remove protections so the execution may resume unsafe { mprotect( base as *mut libc::c_void, length, MprotectFlags::READ | MprotectFlags::WRITE, ) .unwrap(); } println!("signal handled: {}", result); result } else { // Otherwise, we forward to wasmtime's signal handler. false } } fn make_externs(store: &mut Store<()>, module: &Module) -> Vec<Extern> { module .imports() .map(|import| { assert_eq!("hostcall_read", import.name()); let func = Func::wrap(&mut *store, { move |mut caller: Caller<'_, _>| { let mem = caller.get_export("memory").unwrap().into_memory().unwrap(); let memory = mem.data(&caller); i32::from_le_bytes(memory[0..4].try_into().unwrap()) } }); wasmtime::Extern::Func(func) }) .collect::<Vec<_>>() } // This test will only succeed if the SIGSEGV signal originating from the // hostcall can be handled. #[test] fn test_custom_signal_handler_single_instance_hostcall() -> Result<()> { let engine = Engine::default(); let mut store = Store::new(&engine, ()); let module = Module::new(&engine, WAT1)?; let externs = make_externs(&mut store, &module); let instance = Instance::new(&mut store, &module, &externs)?; let (base, length) = set_up_memory(&mut store, instance); unsafe { store.set_signal_handler(move |signum, siginfo, _| { handle_sigsegv(base, length, signum, siginfo) }); } println!("calling hostcall_read..."); let result = invoke_export(&mut store, instance, "hostcall_read").unwrap(); assert_eq!(123, result); Ok(()) } #[test] fn test_custom_signal_handler_single_instance() -> Result<()> { let engine = Engine::new(&Config::default())?; let mut store = Store::new(&engine, ()); let module = Module::new(&engine, WAT1)?; let externs = make_externs(&mut store, &module); let instance = Instance::new(&mut store, &module, &externs)?; let (base, length) = set_up_memory(&mut store, instance); unsafe { store.set_signal_handler(move |signum, siginfo, _| { handle_sigsegv(base, length, signum, siginfo) }); } // these invoke wasmtime_call_trampoline from action.rs { println!("calling read..."); let result = invoke_export(&mut store, instance, "read").expect("read succeeded"); assert_eq!(123, result); } { println!("calling read_out_of_bounds..."); let trap = invoke_export(&mut store, instance, "read_out_of_bounds") .unwrap_err() .downcast::<Trap>()?; assert!( trap.to_string() .contains("wasm trap: out of bounds memory access"), "bad trap message: {:?}", trap.to_string() ); } // these invoke wasmtime_call_trampoline from callable.rs { let read_func = instance.get_typed_func::<(), i32, _>(&mut store, "read")?; println!("calling read..."); let result = read_func .call(&mut store, ()) .expect("expected function not to trap"); assert_eq!(123i32, result); } { let read_out_of_bounds_func = instance.get_typed_func::<(), i32, _>(&mut store, "read_out_of_bounds")?; println!("calling read_out_of_bounds..."); let trap = read_out_of_bounds_func.call(&mut store, ()).unwrap_err(); assert!(trap .to_string() .contains("wasm trap: out of bounds memory access")); } Ok(()) } #[test] fn test_custom_signal_handler_multiple_instances() -> Result<()> { let engine = Engine::default(); let mut store = Store::new(&engine, ()); let module = Module::new(&engine, WAT1)?; // Set up multiple instances let externs = make_externs(&mut store, &module); let instance1 = Instance::new(&mut store, &module, &externs)?; let instance1_handler_triggered = Arc::new(AtomicBool::new(false)); unsafe { let (base1, length1) = set_up_memory(&mut store, instance1); store.set_signal_handler({ let instance1_handler_triggered = instance1_handler_triggered.clone(); move |_signum, _siginfo, _context| { // Remove protections so the execution may resume mprotect( base1 as *mut libc::c_void, length1, MprotectFlags::READ | MprotectFlags::WRITE, ) .unwrap(); instance1_handler_triggered.store(true, Ordering::SeqCst); println!( "Hello from instance1 signal handler! {}", instance1_handler_triggered.load(Ordering::SeqCst) ); true } }); } // Invoke both instances and trigger both signal handlers // First instance1 { let mut exports1 = instance1.exports(&mut store); assert!(exports1.next().is_some()); drop(exports1); println!("calling instance1.read..."); let result = invoke_export(&mut store, instance1, "read").expect("read succeeded"); assert_eq!(123, result); assert_eq!( instance1_handler_triggered.load(Ordering::SeqCst), true, "instance1 signal handler has been triggered" ); } let externs = make_externs(&mut store, &module); let instance2 = Instance::new(&mut store, &module, &externs).expect("failed to instantiate module"); let instance2_handler_triggered = Arc::new(AtomicBool::new(false)); unsafe { let (base2, length2) = set_up_memory(&mut store, instance2); store.set_signal_handler({ let instance2_handler_triggered = instance2_handler_triggered.clone(); move |_signum, _siginfo, _context| { // Remove protections so the execution may resume mprotect( base2 as *mut libc::c_void, length2, MprotectFlags::READ | MprotectFlags::WRITE, ) .unwrap(); instance2_handler_triggered.store(true, Ordering::SeqCst); println!( "Hello from instance2 signal handler! {}", instance2_handler_triggered.load(Ordering::SeqCst) ); true } }); } // And then instance2 { let mut exports2 = instance2.exports(&mut store); assert!(exports2.next().is_some()); drop(exports2); println!("calling instance2.read..."); let result = invoke_export(&mut store, instance2, "read").expect("read succeeded"); assert_eq!(123, result); assert_eq!( instance2_handler_triggered.load(Ordering::SeqCst), true, "instance1 signal handler has been triggered" ); } Ok(()) } #[test] fn test_custom_signal_handler_instance_calling_another_instance() -> Result<()> { let engine = Engine::default(); let mut store = Store::new(&engine, ()); // instance1 which defines 'read' let module1 = Module::new(&engine, WAT1)?; let externs = make_externs(&mut store, &module1); let instance1 = Instance::new(&mut store, &module1, &externs)?; let (base1, length1) = set_up_memory(&mut store, instance1); unsafe { store.set_signal_handler(move |signum, siginfo, _| { println!("instance1"); handle_sigsegv(base1, length1, signum, siginfo) }); } let mut instance1_exports = instance1.exports(&mut store); let instance1_read = instance1_exports.next().unwrap().clone().into_extern(); drop(instance1_exports); // instance2 which calls 'instance1.read' let module2 = Module::new(&engine, WAT2)?; let instance2 = Instance::new(&mut store, &module2, &[instance1_read])?; // since 'instance2.run' calls 'instance1.read' we need to set up the signal handler to handle // SIGSEGV originating from within the memory of instance1 unsafe { store.set_signal_handler(move |signum, siginfo, _| { handle_sigsegv(base1, length1, signum, siginfo) }); } println!("calling instance2.run"); let result = invoke_export(&mut store, instance2, "run")?; assert_eq!(123, result); Ok(()) } }
35.75
102
0.53262
e532f623b8a62d41e903a4a49220da5a4bf7d87f
306
pub mod attributes; pub mod backend_window_options; pub mod config; pub mod file_provider; pub mod script_var_definition; #[cfg(test)] mod test; pub mod validate; pub mod var_definition; pub mod widget_definition; pub mod widget_use; pub mod window_definition; pub mod window_geometry; pub use config::*;
19.125
31
0.800654
bb00f8817d5ed24b59084c1c02f74cabb8f5fe94
1,852
#[macro_use] extern crate maplit; use std::io::{self, Read}; use std::str::FromStr; fn main() { let gen = 20; let mut state: Vec<char> = "#...#####.#..##...##...#.##.#.##.###..##.##.#.#..#...###..####.#.....#..##..#.##......#####..####...".to_owned().chars().collect(); let mut buffer = vec!['.'; gen]; buffer.extend(state); buffer.extend(vec!['.'; gen]); let mut state = buffer; let rules = hashmap!{ "#.#.#" => '#', "..###" => '.', "#..#." => '#', ".#..." => '#', "..##." => '#', "##.#." => '#', "##..#" => '#', "####." => '#', "...#." => '#', "..#.#" => '#', ".####" => '#', "#.###" => '.', "...##" => '.', "..#.." => '.', "#...#" => '.', ".###." => '#', ".#.##" => '.', ".##.." => '#', "....#" => '.', "#..##" => '.', "##.##" => '#', "#.##." => '.', "#...." => '.', "##..." => '#', ".#.#." => '.', "###.#" => '#', "#####" => '#', "#.#.." => '.', "....." => '.', ".##.#" => '.', "###.." => '.', ".#..#" => '.', }; let step = |state: &mut Vec<char>| { let state_copy = state.clone(); for i in 2..(state.len()-2) { let llnrr: String = state_copy[i-2..i+3].iter().collect(); state[i] = rules[&llnrr[..]] } }; for i in 0..gen { step(&mut state); let state_str: String = state.iter().collect(); println!("{:?}", state_str); } let state_str: String = state.iter().collect(); println!("{:?}", state_str); let mut result =0; for i in 0..state.len() { if state[i] == '#' { result += i - gen; } } println!("{}", result) }
24.051948
163
0.265659
0ef5c8d16b0f638028c7206190b4b8f78f922e7e
607
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // pp-exact fn main() { let x = Some(3); let y = match x { Some(_) => ~"some(_)", None => ~"none" }; assert y == ~"some(_)"; }
33.722222
68
0.675453
09fc368f0f806c430150e192b65057ba7513d3be
5,047
use std::cell::Cell; use std::fmt; use std::mem::ManuallyDrop; use std::ptr; use std::sync::atomic::{AtomicPtr, Ordering}; use std::sync::Arc; use crate::task::{LocalsMap, TaskId}; use crate::utils::abort_on_panic; thread_local! { /// A pointer to the currently running task. static CURRENT: Cell<*const Task> = Cell::new(ptr::null_mut()); } /// The inner representation of a task handle. struct Inner { /// The task ID. id: TaskId, /// The optional task name. name: Option<Box<str>>, /// The map holding task-local values. locals: LocalsMap, } impl Inner { #[inline] fn new(name: Option<String>) -> Inner { Inner { id: TaskId::generate(), name: name.map(String::into_boxed_str), locals: LocalsMap::new(), } } } /// A handle to a task. pub struct Task { /// The inner representation. /// /// This pointer is lazily initialized on first use. In most cases, the inner representation is /// never touched and therefore we don't allocate it unless it's really needed. inner: AtomicPtr<Inner>, } unsafe impl Send for Task {} unsafe impl Sync for Task {} impl Task { /// Creates a new task handle. /// /// If the task is unnamed, the inner representation of the task will be lazily allocated on /// demand. #[inline] pub(crate) fn new(name: Option<String>) -> Task { let inner = match name { None => AtomicPtr::default(), Some(name) => { let raw = Arc::into_raw(Arc::new(Inner::new(Some(name)))); AtomicPtr::new(raw as *mut Inner) } }; Task { inner } } /// Gets the task's unique identifier. #[inline] pub fn id(&self) -> TaskId { self.inner().id } /// Returns the name of this task. /// /// The name is configured by [`Builder::name`] before spawning. /// /// [`Builder::name`]: struct.Builder.html#method.name pub fn name(&self) -> Option<&str> { self.inner().name.as_ref().map(|s| &**s) } /// Returns the map holding task-local values. pub(crate) fn locals(&self) -> &LocalsMap { &self.inner().locals } /// Drops all task-local values. /// /// This method is only safe to call at the end of the task. #[inline] pub(crate) unsafe fn drop_locals(&self) { let raw = self.inner.load(Ordering::Acquire); if let Some(inner) = raw.as_mut() { // Abort the process if dropping task-locals panics. abort_on_panic(|| { inner.locals.clear(); }); } } /// Returns the inner representation, initializing it on first use. fn inner(&self) -> &Inner { loop { let raw = self.inner.load(Ordering::Acquire); if !raw.is_null() { return unsafe { &*raw }; } let new = Arc::into_raw(Arc::new(Inner::new(None))) as *mut Inner; if self.inner.compare_and_swap(raw, new, Ordering::AcqRel) != raw { unsafe { drop(Arc::from_raw(new)); } } } } /// Set a reference to the current task. pub(crate) unsafe fn set_current<F, R>(task: *const Task, f: F) -> R where F: FnOnce() -> R, { CURRENT.with(|current| { let old_task = current.replace(task); defer! { current.set(old_task); } f() }) } /// Gets a reference to the current task. pub(crate) fn get_current<F, R>(f: F) -> Option<R> where F: FnOnce(&Task) -> R, { let res = CURRENT.try_with(|current| unsafe { current.get().as_ref().map(f) }); match res { Ok(Some(val)) => Some(val), Ok(None) | Err(_) => None, } } } impl Drop for Task { fn drop(&mut self) { // Deallocate the inner representation if it was initialized. let raw = *self.inner.get_mut(); if !raw.is_null() { unsafe { drop(Arc::from_raw(raw)); } } } } impl Clone for Task { fn clone(&self) -> Task { // We need to make sure the inner representation is initialized now so that this instance // and the clone have raw pointers that point to the same `Arc<Inner>`. let arc = unsafe { ManuallyDrop::new(Arc::from_raw(self.inner())) }; let raw = Arc::into_raw(Arc::clone(&arc)); Task { inner: AtomicPtr::new(raw as *mut Inner), } } } impl fmt::Debug for Task { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Task") .field("id", &self.id()) .field("name", &self.name()) .finish() } }
28.353933
100
0.518922
1e1a7e49e908a055ac578953bcc5a700993703ee
7,784
#[doc = "Register `sf2_if_io_dly_0` reader"] pub struct R(crate::R<SF2_IF_IO_DLY_0_SPEC>); impl core::ops::Deref for R { type Target = crate::R<SF2_IF_IO_DLY_0_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::convert::From<crate::R<SF2_IF_IO_DLY_0_SPEC>> for R { fn from(reader: crate::R<SF2_IF_IO_DLY_0_SPEC>) -> Self { R(reader) } } #[doc = "Register `sf2_if_io_dly_0` writer"] pub struct W(crate::W<SF2_IF_IO_DLY_0_SPEC>); impl core::ops::Deref for W { type Target = crate::W<SF2_IF_IO_DLY_0_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl core::convert::From<crate::W<SF2_IF_IO_DLY_0_SPEC>> for W { fn from(writer: crate::W<SF2_IF_IO_DLY_0_SPEC>) -> Self { W(writer) } } #[doc = "Field `sf2_dqs_do_dly_sel` reader - "] pub struct SF2_DQS_DO_DLY_SEL_R(crate::FieldReader<u8, u8>); impl SF2_DQS_DO_DLY_SEL_R { pub(crate) fn new(bits: u8) -> Self { SF2_DQS_DO_DLY_SEL_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SF2_DQS_DO_DLY_SEL_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sf2_dqs_do_dly_sel` writer - "] pub struct SF2_DQS_DO_DLY_SEL_W<'a> { w: &'a mut W, } impl<'a> SF2_DQS_DO_DLY_SEL_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 30)) | ((value as u32 & 0x03) << 30); self.w } } #[doc = "Field `sf2_dqs_di_dly_sel` reader - "] pub struct SF2_DQS_DI_DLY_SEL_R(crate::FieldReader<u8, u8>); impl SF2_DQS_DI_DLY_SEL_R { pub(crate) fn new(bits: u8) -> Self { SF2_DQS_DI_DLY_SEL_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SF2_DQS_DI_DLY_SEL_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sf2_dqs_di_dly_sel` writer - "] pub struct SF2_DQS_DI_DLY_SEL_W<'a> { w: &'a mut W, } impl<'a> SF2_DQS_DI_DLY_SEL_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 28)) | ((value as u32 & 0x03) << 28); self.w } } #[doc = "Field `sf2_dqs_oe_dly_sel` reader - "] pub struct SF2_DQS_OE_DLY_SEL_R(crate::FieldReader<u8, u8>); impl SF2_DQS_OE_DLY_SEL_R { pub(crate) fn new(bits: u8) -> Self { SF2_DQS_OE_DLY_SEL_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SF2_DQS_OE_DLY_SEL_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sf2_dqs_oe_dly_sel` writer - "] pub struct SF2_DQS_OE_DLY_SEL_W<'a> { w: &'a mut W, } impl<'a> SF2_DQS_OE_DLY_SEL_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 26)) | ((value as u32 & 0x03) << 26); self.w } } #[doc = "Field `sf2_clk_out_dly_sel` reader - "] pub struct SF2_CLK_OUT_DLY_SEL_R(crate::FieldReader<u8, u8>); impl SF2_CLK_OUT_DLY_SEL_R { pub(crate) fn new(bits: u8) -> Self { SF2_CLK_OUT_DLY_SEL_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SF2_CLK_OUT_DLY_SEL_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sf2_clk_out_dly_sel` writer - "] pub struct SF2_CLK_OUT_DLY_SEL_W<'a> { w: &'a mut W, } impl<'a> SF2_CLK_OUT_DLY_SEL_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 8)) | ((value as u32 & 0x03) << 8); self.w } } #[doc = "Field `sf2_cs_dly_sel` reader - "] pub struct SF2_CS_DLY_SEL_R(crate::FieldReader<u8, u8>); impl SF2_CS_DLY_SEL_R { pub(crate) fn new(bits: u8) -> Self { SF2_CS_DLY_SEL_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SF2_CS_DLY_SEL_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sf2_cs_dly_sel` writer - "] pub struct SF2_CS_DLY_SEL_W<'a> { w: &'a mut W, } impl<'a> SF2_CS_DLY_SEL_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x03) | (value as u32 & 0x03); self.w } } impl R { #[doc = "Bits 30:31"] #[inline(always)] pub fn sf2_dqs_do_dly_sel(&self) -> SF2_DQS_DO_DLY_SEL_R { SF2_DQS_DO_DLY_SEL_R::new(((self.bits >> 30) & 0x03) as u8) } #[doc = "Bits 28:29"] #[inline(always)] pub fn sf2_dqs_di_dly_sel(&self) -> SF2_DQS_DI_DLY_SEL_R { SF2_DQS_DI_DLY_SEL_R::new(((self.bits >> 28) & 0x03) as u8) } #[doc = "Bits 26:27"] #[inline(always)] pub fn sf2_dqs_oe_dly_sel(&self) -> SF2_DQS_OE_DLY_SEL_R { SF2_DQS_OE_DLY_SEL_R::new(((self.bits >> 26) & 0x03) as u8) } #[doc = "Bits 8:9"] #[inline(always)] pub fn sf2_clk_out_dly_sel(&self) -> SF2_CLK_OUT_DLY_SEL_R { SF2_CLK_OUT_DLY_SEL_R::new(((self.bits >> 8) & 0x03) as u8) } #[doc = "Bits 0:1"] #[inline(always)] pub fn sf2_cs_dly_sel(&self) -> SF2_CS_DLY_SEL_R { SF2_CS_DLY_SEL_R::new((self.bits & 0x03) as u8) } } impl W { #[doc = "Bits 30:31"] #[inline(always)] pub fn sf2_dqs_do_dly_sel(&mut self) -> SF2_DQS_DO_DLY_SEL_W { SF2_DQS_DO_DLY_SEL_W { w: self } } #[doc = "Bits 28:29"] #[inline(always)] pub fn sf2_dqs_di_dly_sel(&mut self) -> SF2_DQS_DI_DLY_SEL_W { SF2_DQS_DI_DLY_SEL_W { w: self } } #[doc = "Bits 26:27"] #[inline(always)] pub fn sf2_dqs_oe_dly_sel(&mut self) -> SF2_DQS_OE_DLY_SEL_W { SF2_DQS_OE_DLY_SEL_W { w: self } } #[doc = "Bits 8:9"] #[inline(always)] pub fn sf2_clk_out_dly_sel(&mut self) -> SF2_CLK_OUT_DLY_SEL_W { SF2_CLK_OUT_DLY_SEL_W { w: self } } #[doc = "Bits 0:1"] #[inline(always)] pub fn sf2_cs_dly_sel(&mut self) -> SF2_CS_DLY_SEL_W { SF2_CS_DLY_SEL_W { w: self } } #[doc = "Writes raw bits to the register."] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "sf2_if_io_dly_0.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [sf2_if_io_dly_0](index.html) module"] pub struct SF2_IF_IO_DLY_0_SPEC; impl crate::RegisterSpec for SF2_IF_IO_DLY_0_SPEC { type Ux = u32; } #[doc = "`read()` method returns [sf2_if_io_dly_0::R](R) reader structure"] impl crate::Readable for SF2_IF_IO_DLY_0_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [sf2_if_io_dly_0::W](W) writer structure"] impl crate::Writable for SF2_IF_IO_DLY_0_SPEC { type Writer = W; } #[doc = "`reset()` method sets sf2_if_io_dly_0 to value 0"] impl crate::Resettable for SF2_IF_IO_DLY_0_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
31.901639
412
0.617292
5db217c940f0273248371bb6499b6476929721ff
4,603
//! Generates [safe prime numbers](https://www.wikiwand.com/en/Sophie_Germain_prime). use ramp::Int; use rand::rngs::OsRng; pub use common::{gen_prime, is_prime}; use common::{two, three}; use error::{Error, Result}; /// Constructs a new `SafePrime` with a size of `bit_length` bits. /// /// This will initialize an `OsRng` instance and call the /// `SafePrime::from_rng()` method. /// /// Note: the `bit_length` MUST be at least 512-bits. pub fn new(bit_length: usize) -> Result { if bit_length < 512 { Err(Error::BitLength(bit_length)) } else { let mut rngesus = OsRng::new()?; Ok(from_rng(bit_length, &mut rngesus)?) } } /// Checks if number is a safe prime pub fn is_safe_prime(candidate: &Int) -> bool { // according to https://eprint.iacr.org/2003/186.pdf // a safe prime is congruent to 2 mod 3 if (candidate % three()) == two() { if is_prime(&candidate) { // a safe prime satisfies (p-1)/2 is prime. Since a // prime is odd, We just need to divide by 2 let candidate_p = candidate >> 1; return is_prime(&candidate_p) } } false } /// Constructs a new `SafePrime` with the size of `bit_length` bits, sourced /// from an already-initialized `OsRng`. pub fn from_rng(bit_length: usize, mut rngesus: &mut OsRng) -> Result { if bit_length < 512 { Err(Error::BitLength(bit_length)) } else { let mut candidate: Int; loop { candidate = gen_prime(bit_length, &mut rngesus)?; if is_safe_prime(&candidate) { break; } candidate <<= 1; candidate += 1; if is_prime(&candidate) { break; } } Ok(candidate) } } #[cfg(test)] mod tests { use super::{new, is_safe_prime}; use ramp::Int; #[test] fn test_safe_prime_bit_length_too_small() { let sp = new(511); assert_eq!(false, match sp { Ok(_) => true, Err(_) => false }); } #[test] fn test_safe_prime() { let sp = new(512); assert_eq!(true, match sp { Ok(_) => true, Err(_) => false }); } #[test] fn test_is_safe_prime() { //Numbers pulled from https://github.com/mikelodder7/cunningham_chain/blob/master/findings.md //p0 is sophie german prime let p0 = Int::from_str_radix("37313426856874901938110133384605074194791927500210707276948918975046371522830901596065044944558427864187196889881993164303255749681644627614963632713725183364319410825898054225147061624559894980555489070322738683900143562848200257354774040241218537613789091499134051387344396560066242901217378861764936185029", 10).unwrap(); assert!(!is_safe_prime(&p0)); let p1 = Int::from_str_radix("74626853713749803876220266769210148389583855000421414553897837950092743045661803192130089889116855728374393779763986328606511499363289255229927265427450366728638821651796108450294123249119789961110978140645477367800287125696400514709548080482437075227578182998268102774688793120132485802434757723529872370059", 10).unwrap(); assert!(is_safe_prime(&p1)); let p2 = Int::from_str_radix("149253707427499607752440533538420296779167710000842829107795675900185486091323606384260179778233711456748787559527972657213022998726578510459854530854900733457277643303592216900588246498239579922221956281290954735600574251392801029419096160964874150455156365996536205549377586240264971604869515447059744740119", 10).unwrap(); assert!(is_safe_prime(&p2)); let p3 = Int::from_str_radix("298507414854999215504881067076840593558335420001685658215591351800370972182647212768520359556467422913497575119055945314426045997453157020919709061709801466914555286607184433801176492996479159844443912562581909471201148502785602058838192321929748300910312731993072411098755172480529943209739030894119489480239", 10).unwrap(); assert!(is_safe_prime(&p3)); let p4 = Int::from_str_radix("4806876214089177439121678559764069543282270755154137981051366776821330958611719328037311759924923156830623290278296826263863902327008664143707117531049168010908663795201825132050017581985031718536424081509084930569115857201636971728388275433540277562846153879803474020036767852693656753257597801227199822164846876100177774044259379232968071371318658371230787073384750022830829873718254139779006439569882904712552834431199870749249168775012460891012776977366721903", 10).unwrap(); assert!(is_safe_prime(&p4)); } }
43.018692
517
0.723224
4a141e3f26844f9ba6df31fcb47325b2b4dd41aa
2,181
use crate::value::{Primitive, UntaggedValue, Value}; use indexmap::IndexMap; use serde::{Deserialize, Serialize}; use std::fmt::Debug; /// An evaluation scope. Scopes map variable names to Values and aid in evaluating blocks and expressions. /// Additionally, holds the value for the special $it variable, a variable used to refer to the value passing /// through the pipeline at that moment #[derive(Deserialize, Serialize, Debug, Clone)] pub struct Scope { pub it: Value, pub vars: IndexMap<String, Value>, pub env: IndexMap<String, String>, } impl Scope { /// Create a new scope pub fn new(it: Value) -> Scope { Scope { it, vars: IndexMap::new(), env: IndexMap::new(), } } } impl Scope { /// Create an empty scope pub fn empty() -> Scope { Scope { it: UntaggedValue::Primitive(Primitive::Nothing).into_untagged_value(), vars: IndexMap::new(), env: IndexMap::new(), } } /// Create an empty scope, setting $it to a known Value pub fn it_value(value: Value) -> Scope { Scope { it: value, vars: IndexMap::new(), env: IndexMap::new(), } } pub fn env(env: IndexMap<String, String>) -> Scope { Scope { it: UntaggedValue::Primitive(Primitive::Nothing).into_untagged_value(), vars: IndexMap::new(), env, } } pub fn set_it(self, value: Value) -> Scope { Scope { it: value, vars: self.vars, env: self.env, } } pub fn set_var(self, name: String, value: Value) -> Scope { let mut new_vars = self.vars.clone(); new_vars.insert(name, value); Scope { it: self.it, vars: new_vars, env: self.env, } } pub fn set_env_var(self, variable: String, value: String) -> Scope { let mut new_env_vars = self.env.clone(); new_env_vars.insert(variable, value); Scope { it: self.it, vars: self.vars, env: new_env_vars, } } }
26.597561
109
0.548372
eb7b3be662f9fc229744b68b1eb0e19fc44de538
4,924
//! The parameter element definition. // Copyright (c) 2021 ShiftLeft Software // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use rust_decimal::prelude::*; use super::ElemSymbol; pub struct ElemParameter { /// Name of the parameter. name: String, /// Label of the parameter. label: String, /// Description of the parameter (i.e., help). description: String, /// Parameter symbol. symbol: ElemSymbol, } /// The parameter element implementation. impl ElemParameter { /// Create a new parameter element. /// /// # Arguments /// /// * `name_param` - Parameter name. /// * `label_param` - Parameter label. /// * `desc_param` - Parameter description. /// /// # Return /// /// * See description. pub fn new(name_param: &str, label_param: &str, desc_param: &str) -> ElemParameter { ElemParameter { name: String::from(name_param), label: String::from(label_param), description: String::from(desc_param), symbol: ElemSymbol::new(), } } /// Compare this parameter to the parameter parameter. /// /// # Arguments /// /// * `parameter` - Parameter to compare. /// /// # Return /// /// * True if equal, otherwise false. pub fn equal(&self, parameter: &ElemParameter) -> bool { if self.name != parameter.name() { return false; } true } /// Get the parameter name. /// /// # Return /// /// * See description. pub fn name(&self) -> &str { self.name.as_str() } /// Get the parameter label. /// /// # Return /// /// * See description. pub fn label(&self) -> &str { self.label.as_str() } /// Get the parameter description. /// /// # Return /// /// * See description. pub fn description(&self) -> &str { self.description.as_str() } /// Get the parameter type. /// /// # Return /// /// * See description. pub fn param_type(&self) -> crate::TokenType { self.symbol.sym_type() } /// Get the parameter integer. /// /// # Return /// /// * See description. pub fn param_integeri(&self) -> i32 { self.symbol.sym_integeri() } /// Get the parameter integer. /// /// # Return /// /// * See description. pub fn param_integer(&self) -> usize { self.symbol.sym_integer() } /// Get the parameter decimal. /// /// # Return /// /// * See description. pub fn param_decimal(&self) -> Decimal { self.symbol.sym_decimal() } /// Get the parameter string. /// /// # Return /// /// * See description. pub fn param_string(&self) -> &str { self.symbol.sym_string() } /// Set the parameter name. /// /// # Arguments /// /// * `name_param` - See description. pub fn set_name(&mut self, name_param: &str) { self.name = String::from(name_param); } /// Set the parameter label. /// /// # Arguments /// /// * `label_param` - See description. pub fn set_label(&mut self, label_param: &str) { self.label = String::from(label_param); } /// Set the parameter description. /// /// # Arguments /// /// * `desc_param` - See description. pub fn set_description(&mut self, desc_param: &str) { self.description = String::from(desc_param); } /// Set the parameter type. /// /// # Arguments /// /// * `type_param` - See description. pub fn set_type(&mut self, type_param: crate::TokenType) { self.symbol.set_type(type_param); } /// Set the parameter integer. /// /// # Arguments /// /// * `integer_param` - See description. pub fn set_integeri(&mut self, integer_param: i32) { self.symbol.set_integeri(integer_param); } /// Set the parameter integer. /// /// # Arguments /// /// * `integer_param` - See description. pub fn set_integer(&mut self, integer_param: usize) { self.symbol.set_integer(integer_param); } /// Set the parameter decimal. /// /// # Arguments /// /// * `float_param` - See description. pub fn set_decimal(&mut self, float_param: Decimal) { self.symbol.set_decimal(float_param); } /// Set the parameter string. /// /// # Arguments /// /// * `string_param` - See description. pub fn set_string(&mut self, string_param: &str) { self.symbol.set_string(string_param); } }
21.69163
88
0.551381
fcd79af11a403bd53e3aa49a4b52548b8d553f0d
293
#![deny(warnings)] #![deny(missing_docs)] //! A simple WebDav crate. #[macro_use] extern crate failure; #[macro_use] extern crate hyper; extern crate reqwest; extern crate xml; mod client; mod error; mod header; mod response; pub use client::{Client, ClientBuilder}; pub use error::Error;
14.65
40
0.730375
dd340fc866ded19300437f97dc2e8e66f85d3614
2,670
macro_rules! register { ($REGISTER:ident, $reset_value:expr, $uxx:ty, { $(#[$($attr:tt)*] $bitfield:ident @ $range:expr,)+ }) => { #[allow(non_camel_case_types)] #[derive(Clone, Copy)] pub struct $REGISTER<MODE> { bits: $uxx, _mode: ::core::marker::PhantomData<MODE>, } impl $REGISTER<crate::lowlevel::traits::Mask> { pub fn mask() -> $REGISTER<crate::lowlevel::traits::Mask> { $REGISTER { bits: 0, _mode: ::core::marker::PhantomData } } $( pub fn $bitfield(&self) -> $uxx { use crate::lowlevel::traits::OffsetSize; let size = $range.size() + 1; let offset = $range.offset(); (((1 << size) - 1) as u8) << offset } )+ } impl ::core::default::Default for $REGISTER<crate::lowlevel::traits::W> { fn default() -> Self { $REGISTER { bits: $reset_value, _mode: ::core::marker::PhantomData } } } #[allow(non_snake_case)] pub fn $REGISTER(bits: $uxx) -> $REGISTER<crate::lowlevel::traits::R> { $REGISTER { bits, _mode: ::core::marker::PhantomData } } impl $REGISTER<crate::lowlevel::traits::R> { pub fn modify(self) -> $REGISTER<crate::lowlevel::traits::W> { $REGISTER { bits: self.bits, _mode: ::core::marker::PhantomData } } $( #[$($attr)*] pub fn $bitfield(&self) -> $uxx { use crate::lowlevel::traits::OffsetSize; let offset = $range.offset(); let size = $range.size() + 1; let mask = ((1 << size) - 1) as u8; (self.bits >> offset) & mask } )+ } impl $REGISTER<crate::lowlevel::traits::W> { pub fn bits(self) -> $uxx { self.bits } $( #[$($attr)*] pub fn $bitfield(&mut self, mut bits: $uxx) -> &mut Self { use crate::lowlevel::traits::OffsetSize; let offset = $range.offset(); let size = $range.size() + 1; let mask = ((1 << size) - 1) as u8; debug_assert!(bits <= mask); bits &= mask; self.bits &= !(mask << offset); self.bits |= bits << offset; self } )+ } } }
31.785714
84
0.418352
ddfd27443f5fc7d9ccaa388ec396b87a08dca5db
2,317
use super::DeviceInterface; use crate::Error; use embedded_hal as hal; use shufflebuf::ShuffleBuf; /// This encapsulates the Serial UART peripheral /// and associated pins such as /// - DRDY: Data Ready: Sensor uses this to indicate it had data available for read pub struct SerialInterface<SER> { /// the serial port to use when communicating serial: SER, shuffler: ShuffleBuf<256>, } impl<SER, CommE> SerialInterface<SER> where SER: hal::serial::Read<u8, Error = CommE>, { pub fn new(serial_port: SER) -> Self { Self { serial: serial_port, shuffler: ShuffleBuf::default(), } } } impl<SER, CommE> DeviceInterface for SerialInterface<SER> where SER: hal::serial::Read<u8, Error = CommE>, { type InterfaceError = Error<CommE>; fn read(&mut self) -> Result<u8, Self::InterfaceError> { let (count, byte) = self.shuffler.read_one(); if count > 0 { Ok(byte) } else { let mut block_byte = [0u8; 1]; //TODO in practice this hasn't failed yet, but we should handle the error self.read_many(&mut block_byte)?; Ok(block_byte[0]) } } fn fill(&mut self) -> usize { let mut fetch_count = self.shuffler.vacant(); let mut err_count = 0; while fetch_count > 0 { let rc = self.serial.read(); match rc { Ok(byte) => { err_count = 0; //reset self.shuffler.push_one(byte); fetch_count -= 1; } Err(nb::Error::WouldBlock) => {} Err(nb::Error::Other(_)) => { // in practice this is returning Overrun a ton on stm32h7 err_count += 1; if err_count > 100 { break; } } } } self.shuffler.available() } fn read_many( &mut self, buffer: &mut [u8], ) -> Result<usize, Self::InterfaceError> { let avail = self.shuffler.available(); if avail >= buffer.len() { let final_read_count = self.shuffler.read_many(buffer); return Ok(final_read_count); } Ok(0) } }
27.583333
85
0.526543
f82495cc2ee63ccac86189764a029fe265e10baa
24,642
extern crate cargotest; extern crate hamcrest; use cargotest::support::{project, execs}; use cargotest::support::registry::Package; use hamcrest::assert_that; #[test] fn bad1() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] "#) .file("src/lib.rs", "") .file(".cargo/config", r#" [target] nonexistent-target = "foo" "#); assert_that(foo.cargo_process("build").arg("-v") .arg("--target=nonexistent-target"), execs().with_status(101).with_stderr("\ [ERROR] expected table for configuration key `target.nonexistent-target`, \ but found string in [..]config ")); } #[test] fn bad2() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] "#) .file("src/lib.rs", "") .file(".cargo/config", r#" [http] proxy = 3.0 "#); assert_that(foo.cargo_process("publish").arg("-v"), execs().with_status(101).with_stderr("\ [ERROR] Couldn't load Cargo configuration Caused by: failed to load TOML configuration from `[..]config` Caused by: failed to parse key `http` Caused by: failed to parse key `proxy` Caused by: found TOML configuration value of unknown type `float` ")); } #[test] fn bad3() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] "#) .file("src/lib.rs", "") .file(".cargo/config", r#" [http] proxy = true "#); Package::new("foo", "1.0.0").publish(); assert_that(foo.cargo_process("publish").arg("-v"), execs().with_status(101).with_stderr("\ error: failed to update registry [..] Caused by: invalid configuration for key `http.proxy` expected a string, but found a boolean for `http.proxy` in [..]config ")); } #[test] fn bad4() { let foo = project("foo") .file(".cargo/config", r#" [cargo-new] name = false "#); assert_that(foo.cargo_process("new").arg("-v").arg("foo"), execs().with_status(101).with_stderr("\ [ERROR] Failed to create project `foo` at `[..]` Caused by: invalid configuration for key `cargo-new.name` expected a string, but found a boolean for `cargo-new.name` in [..]config ")); } #[test] fn bad5() { let foo = project("foo") .file(".cargo/config", r#" foo = "" "#) .file("foo/.cargo/config", r#" foo = 2 "#); foo.build(); assert_that(foo.cargo("new") .arg("-v").arg("foo").cwd(&foo.root().join("foo")), execs().with_status(101).with_stderr("\ [ERROR] Failed to create project `foo` at `[..]` Caused by: Couldn't load Cargo configuration Caused by: failed to merge configuration at `[..]` Caused by: failed to merge key `foo` between files: file 1: [..]foo[..]foo[..]config file 2: [..]foo[..]config Caused by: expected integer, but found string ")); } #[test] fn bad_cargo_config_jobs() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] "#) .file("src/lib.rs", "") .file(".cargo/config", r#" [build] jobs = -1 "#); assert_that(foo.cargo_process("build").arg("-v"), execs().with_status(101).with_stderr("\ [ERROR] build.jobs must be positive, but found -1 in [..] ")); } #[test] fn default_cargo_config_jobs() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] "#) .file("src/lib.rs", "") .file(".cargo/config", r#" [build] jobs = 1 "#); assert_that(foo.cargo_process("build").arg("-v"), execs().with_status(0)); } #[test] fn good_cargo_config_jobs() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] "#) .file("src/lib.rs", "") .file(".cargo/config", r#" [build] jobs = 4 "#); assert_that(foo.cargo_process("build").arg("-v"), execs().with_status(0)); } #[test] fn invalid_global_config() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [dependencies] foo = "0.1.0" "#) .file(".cargo/config", "4") .file("src/lib.rs", ""); assert_that(foo.cargo_process("build").arg("-v"), execs().with_status(101).with_stderr("\ [ERROR] Couldn't load Cargo configuration Caused by: could not parse TOML configuration in `[..]` Caused by: could not parse input as TOML Caused by: expected an equals, found eof at line 1 ")); } #[test] fn bad_cargo_lock() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] "#) .file("Cargo.lock", "[[package]]\nfoo = 92") .file("src/lib.rs", ""); assert_that(foo.cargo_process("build").arg("-v"), execs().with_status(101).with_stderr("\ [ERROR] failed to parse lock file at: [..]Cargo.lock Caused by: missing field `name` for key `package` ")); } #[test] fn duplicate_packages_in_cargo_lock() { Package::new("foo", "0.1.0").publish(); let p = project("bar") .file("Cargo.toml", r#" [project] name = "bar" version = "0.0.1" authors = [] [dependencies] foo = "0.1.0" "#) .file("src/lib.rs", "") .file("Cargo.lock", r#" [root] name = "bar" version = "0.0.1" dependencies = [ "foo 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "foo" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "foo" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" "#); p.build(); assert_that(p.cargo("build").arg("--verbose"), execs().with_status(101).with_stderr("\ [ERROR] failed to parse lock file at: [..] Caused by: package `foo` is specified twice in the lockfile ")); } #[test] fn bad_source_in_cargo_lock() { Package::new("foo", "0.1.0").publish(); let p = project("bar") .file("Cargo.toml", r#" [project] name = "bar" version = "0.0.1" authors = [] [dependencies] foo = "0.1.0" "#) .file("src/lib.rs", "") .file("Cargo.lock", r#" [root] name = "bar" version = "0.0.1" dependencies = [ "foo 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "foo" version = "0.1.0" source = "You shall not parse" "#); p.build(); assert_that(p.cargo("build").arg("--verbose"), execs().with_status(101).with_stderr("\ [ERROR] failed to parse lock file at: [..] Caused by: invalid source `You shall not parse` for key `package.source` ")); } #[test] fn bad_dependency_in_lockfile() { let p = project("foo") .file("Cargo.toml", r#" [project] name = "foo" version = "0.0.1" authors = [] "#) .file("src/lib.rs", "") .file("Cargo.lock", r#" [root] name = "foo" version = "0.0.1" dependencies = [ "bar 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] "#); p.build(); assert_that(p.cargo("build").arg("--verbose"), execs().with_status(101).with_stderr("\ [ERROR] failed to parse lock file at: [..] Caused by: package `bar 0.1.0 ([..])` is specified as a dependency, but is missing from the package list ")); } #[test] fn bad_git_dependency() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [dependencies] foo = { git = "file:.." } "#) .file("src/lib.rs", ""); assert_that(foo.cargo_process("build").arg("-v"), execs().with_status(101).with_stderr("\ [UPDATING] git repository `file:///` [ERROR] failed to load source for a dependency on `foo` Caused by: Unable to update file:/// Caused by: failed to clone into: [..] Caused by: [[..]] 'file:///' is not a valid local file URI ")); } #[test] fn bad_crate_type() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [lib] crate-type = ["bad_type", "rlib"] "#) .file("src/lib.rs", ""); assert_that(foo.cargo_process("build").arg("-v"), execs().with_status(101).with_stderr_contains("\ error: failed to run `rustc` to learn about target-specific information ")); } #[test] fn malformed_override() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [target.x86_64-apple-darwin.freetype] native = { foo: "bar" } "#) .file("src/lib.rs", ""); assert_that(foo.cargo_process("build"), execs().with_status(101).with_stderr("\ [ERROR] failed to parse manifest at `[..]` Caused by: could not parse input as TOML Caused by: expected a table key, found a newline at line 8 ")); } #[test] fn duplicate_binary_names() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "qqq" version = "0.1.0" authors = ["A <[email protected]>"] [[bin]] name = "e" path = "a.rs" [[bin]] name = "e" path = "b.rs" "#) .file("a.rs", r#"fn main() -> () {}"#) .file("b.rs", r#"fn main() -> () {}"#); assert_that(foo.cargo_process("build"), execs().with_status(101).with_stderr("\ [ERROR] failed to parse manifest at `[..]` Caused by: found duplicate binary name e, but all binary targets must have a unique name ")); } #[test] fn duplicate_example_names() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "qqq" version = "0.1.0" authors = ["A <[email protected]>"] [[example]] name = "ex" path = "examples/ex.rs" [[example]] name = "ex" path = "examples/ex2.rs" "#) .file("examples/ex.rs", r#"fn main () -> () {}"#) .file("examples/ex2.rs", r#"fn main () -> () {}"#); assert_that(foo.cargo_process("build").arg("--example").arg("ex"), execs().with_status(101).with_stderr("\ [ERROR] failed to parse manifest at `[..]` Caused by: found duplicate example name ex, but all binary targets must have a unique name ")); } #[test] fn duplicate_bench_names() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "qqq" version = "0.1.0" authors = ["A <[email protected]>"] [[bench]] name = "ex" path = "benches/ex.rs" [[bench]] name = "ex" path = "benches/ex2.rs" "#) .file("benches/ex.rs", r#"fn main () {}"#) .file("benches/ex2.rs", r#"fn main () {}"#); assert_that(foo.cargo_process("bench"), execs().with_status(101).with_stderr("\ [ERROR] failed to parse manifest at `[..]` Caused by: found duplicate bench name ex, but all binary targets must have a unique name ")); } #[test] fn duplicate_deps() { let foo = project("foo") .file("shim-bar/Cargo.toml", r#" [package] name = "bar" version = "0.0.1" authors = [] "#) .file("shim-bar/src/lib.rs", r#" pub fn a() {} "#) .file("linux-bar/Cargo.toml", r#" [package] name = "bar" version = "0.0.1" authors = [] "#) .file("linux-bar/src/lib.rs", r#" pub fn a() {} "#) .file("Cargo.toml", r#" [package] name = "qqq" version = "0.0.1" authors = [] [dependencies] bar = { path = "shim-bar" } [target.x86_64-unknown-linux-gnu.dependencies] bar = { path = "linux-bar" } "#) .file("src/main.rs", r#"fn main () {}"#); assert_that(foo.cargo_process("build"), execs().with_status(101).with_stderr("\ [ERROR] failed to parse manifest at `[..]` Caused by: Dependency 'bar' has different source paths depending on the build target. Each dependency must \ have a single canonical source path irrespective of build target. ")); } #[test] fn duplicate_deps_diff_sources() { let foo = project("foo") .file("shim-bar/Cargo.toml", r#" [package] name = "bar" version = "0.0.1" authors = [] "#) .file("shim-bar/src/lib.rs", r#" pub fn a() {} "#) .file("linux-bar/Cargo.toml", r#" [package] name = "bar" version = "0.0.1" authors = [] "#) .file("linux-bar/src/lib.rs", r#" pub fn a() {} "#) .file("Cargo.toml", r#" [package] name = "qqq" version = "0.0.1" authors = [] [target.i686-unknown-linux-gnu.dependencies] bar = { path = "shim-bar" } [target.x86_64-unknown-linux-gnu.dependencies] bar = { path = "linux-bar" } "#) .file("src/main.rs", r#"fn main () {}"#); assert_that(foo.cargo_process("build"), execs().with_status(101).with_stderr("\ [ERROR] failed to parse manifest at `[..]` Caused by: Dependency 'bar' has different source paths depending on the build target. Each dependency must \ have a single canonical source path irrespective of build target. ")); } #[test] fn unused_keys() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.1.0" authors = [] [target.foo] bar = "3" "#) .file("src/lib.rs", ""); assert_that(foo.cargo_process("build"), execs().with_status(0).with_stderr("\ warning: unused manifest key: target.foo.bar [COMPILING] foo v0.1.0 (file:///[..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ")); } #[test] fn empty_dependencies() { let p = project("empty_deps") .file("Cargo.toml", r#" [package] name = "empty_deps" version = "0.0.0" authors = [] [dependencies] foo = {} "#) .file("src/main.rs", "fn main() {}"); Package::new("foo", "0.0.1").publish(); assert_that(p.cargo_process("build"), execs().with_status(0).with_stderr_contains("\ warning: dependency (foo) specified without providing a local path, Git repository, or version \ to use. This will be considered an error in future versions ")); } #[test] fn invalid_toml_historically_allowed_is_warned() { let p = project("empty_deps") .file("Cargo.toml", r#" [package] name = "empty_deps" version = "0.0.0" authors = [] "#) .file(".cargo/config", r#" [foo] bar = 2 "#) .file("src/main.rs", "fn main() {}"); assert_that(p.cargo_process("build"), execs().with_status(0).with_stderr("\ warning: TOML file found which contains invalid syntax and will soon not parse at `[..]config`. The TOML spec requires newlines after table definitions (e.g. `[a] b = 1` is invalid), but this file has a table header which does not have a newline after it. A newline needs to be added and this warning will soon become a hard error in the future. [COMPILING] empty_deps v0.0.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ")); } #[test] fn ambiguous_git_reference() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [dependencies.bar] git = "https://example.com" branch = "master" tag = "some-tag" "#) .file("src/lib.rs", ""); assert_that(foo.cargo_process("build").arg("-v"), execs().with_stderr_contains("\ [WARNING] dependency (bar) specification is ambiguous. \ Only one of `branch`, `tag` or `rev` is allowed. \ This will be considered an error in future versions ")); } #[test] fn bad_source_config1() { let p = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] "#) .file("src/lib.rs", "") .file(".cargo/config", r#" [source.foo] "#); assert_that(p.cargo_process("build"), execs().with_status(101).with_stderr("\ error: no source URL specified for `source.foo`, need [..] ")); } #[test] fn bad_source_config2() { let p = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [dependencies] bar = "*" "#) .file("src/lib.rs", "") .file(".cargo/config", r#" [source.crates-io] registry = 'http://example.com' replace-with = 'bar' "#); assert_that(p.cargo_process("build"), execs().with_status(101).with_stderr("\ error: failed to load source for a dependency on `bar` Caused by: Unable to update registry https://[..] Caused by: could not find a configured source with the name `bar` \ when attempting to lookup `crates-io` (configuration in [..]) ")); } #[test] fn bad_source_config3() { let p = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [dependencies] bar = "*" "#) .file("src/lib.rs", "") .file(".cargo/config", r#" [source.crates-io] registry = 'http://example.com' replace-with = 'crates-io' "#); assert_that(p.cargo_process("build"), execs().with_status(101).with_stderr("\ error: failed to load source for a dependency on `bar` Caused by: Unable to update registry https://[..] Caused by: detected a cycle of `replace-with` sources, [..] ")); } #[test] fn bad_source_config4() { let p = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [dependencies] bar = "*" "#) .file("src/lib.rs", "") .file(".cargo/config", r#" [source.crates-io] registry = 'http://example.com' replace-with = 'bar' [source.bar] registry = 'http://example.com' replace-with = 'crates-io' "#); assert_that(p.cargo_process("build"), execs().with_status(101).with_stderr("\ error: failed to load source for a dependency on `bar` Caused by: Unable to update registry https://[..] Caused by: detected a cycle of `replace-with` sources, the source `crates-io` is \ eventually replaced with itself (configuration in [..]) ")); } #[test] fn bad_source_config5() { let p = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [dependencies] bar = "*" "#) .file("src/lib.rs", "") .file(".cargo/config", r#" [source.crates-io] registry = 'http://example.com' replace-with = 'bar' [source.bar] registry = 'not a url' "#); assert_that(p.cargo_process("build"), execs().with_status(101).with_stderr("\ error: configuration key `source.bar.registry` specified an invalid URL (in [..]) Caused by: invalid url `not a url`: [..] ")); } #[test] fn both_git_and_path_specified() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [dependencies.bar] git = "https://example.com" path = "bar" "#) .file("src/lib.rs", ""); assert_that(foo.cargo_process("build").arg("-v"), execs().with_stderr_contains("\ [WARNING] dependency (bar) specification is ambiguous. \ Only one of `git` or `path` is allowed. \ This will be considered an error in future versions ")); } #[test] fn bad_source_config6() { let p = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [dependencies] bar = "*" "#) .file("src/lib.rs", "") .file(".cargo/config", r#" [source.crates-io] registry = 'http://example.com' replace-with = ['not', 'a', 'string'] "#); assert_that(p.cargo_process("build"), execs().with_status(101).with_stderr("\ error: expected a string, but found a array for `source.crates-io.replace-with` in [..] ")); } #[test] fn ignored_git_revision() { let foo = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [dependencies.bar] path = "bar" branch = "spam" "#) .file("src/lib.rs", ""); assert_that(foo.cargo_process("build").arg("-v"), execs().with_stderr_contains("\ [WARNING] key `branch` is ignored for dependency (bar). \ This will be considered an error in future versions")); } #[test] fn bad_source_config7() { let p = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [dependencies] bar = "*" "#) .file("src/lib.rs", "") .file(".cargo/config", r#" [source.foo] registry = 'http://example.com' local-registry = 'file:///another/file' "#); Package::new("bar", "0.1.0").publish(); assert_that(p.cargo_process("build"), execs().with_status(101).with_stderr("\ error: more than one source URL specified for `source.foo` ")); } #[test] fn bad_dependency() { let p = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [dependencies] bar = 3 "#) .file("src/lib.rs", ""); assert_that(p.cargo_process("build"), execs().with_status(101).with_stderr("\ error: failed to parse manifest at `[..]` Caused by: invalid type: integer `3`, expected a version string like [..] ")); } #[test] fn bad_debuginfo() { let p = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] [profile.dev] debug = 'a' "#) .file("src/lib.rs", ""); assert_that(p.cargo_process("build"), execs().with_status(101).with_stderr("\ error: failed to parse manifest at `[..]` Caused by: invalid type: string \"a\", expected a boolean or an integer for [..] ")); } #[test] fn bad_opt_level() { let p = project("foo") .file("Cargo.toml", r#" [package] name = "foo" version = "0.0.0" authors = [] build = 3 "#) .file("src/lib.rs", ""); assert_that(p.cargo_process("build"), execs().with_status(101).with_stderr("\ error: failed to parse manifest at `[..]` Caused by: invalid type: integer `3`, expected a boolean or a string for key [..] ")); }
24.158824
99
0.513351
c175d2c69016f926ff6d24cdc74e97eb408b7d67
55,041
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Implementation of the test-related targets of the build system. //! //! This file implements the various regression test suites that we execute on //! our CI. use std::env; use std::ffi::OsString; use std::iter; use std::fmt; use std::fs::{self, File}; use std::path::{PathBuf, Path}; use std::process::Command; use std::io::Read; use build_helper::{self, output}; use builder::{Kind, RunConfig, ShouldRun, Builder, Compiler, Step}; use Crate as CargoCrate; use cache::{INTERNER, Interned}; use compile; use dist; use native; use tool::{self, Tool}; use util::{self, dylib_path, dylib_path_var}; use {Build, Mode}; use toolstate::ToolState; const ADB_TEST_DIR: &str = "/data/tmp/work"; /// The two modes of the test runner; tests or benchmarks. #[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)] pub enum TestKind { /// Run `cargo test` Test, /// Run `cargo bench` Bench, } impl TestKind { // Return the cargo subcommand for this test kind fn subcommand(self) -> &'static str { match self { TestKind::Test => "test", TestKind::Bench => "bench", } } } impl fmt::Display for TestKind { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(match *self { TestKind::Test => "Testing", TestKind::Bench => "Benchmarking", }) } } fn try_run(build: &Build, cmd: &mut Command) -> bool { if !build.fail_fast { if !build.try_run(cmd) { let mut failures = build.delayed_failures.borrow_mut(); failures.push(format!("{:?}", cmd)); return false; } } else { build.run(cmd); } true } fn try_run_quiet(build: &Build, cmd: &mut Command) -> bool { if !build.fail_fast { if !build.try_run_quiet(cmd) { let mut failures = build.delayed_failures.borrow_mut(); failures.push(format!("{:?}", cmd)); return false; } } else { build.run_quiet(cmd); } true } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Linkcheck { host: Interned<String>, } impl Step for Linkcheck { type Output = (); const ONLY_HOSTS: bool = true; const DEFAULT: bool = true; /// Runs the `linkchecker` tool as compiled in `stage` by the `host` compiler. /// /// This tool in `src/tools` will verify the validity of all our links in the /// documentation to ensure we don't have a bunch of dead ones. fn run(self, builder: &Builder) { let build = builder.build; let host = self.host; build.info(&format!("Linkcheck ({})", host)); builder.default_doc(None); let _time = util::timeit(&build); try_run(build, builder.tool_cmd(Tool::Linkchecker) .arg(build.out.join(host).join("doc"))); } fn should_run(run: ShouldRun) -> ShouldRun { let builder = run.builder; run.path("src/tools/linkchecker").default_condition(builder.build.config.docs) } fn make_run(run: RunConfig) { run.builder.ensure(Linkcheck { host: run.target }); } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Cargotest { stage: u32, host: Interned<String>, } impl Step for Cargotest { type Output = (); const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.path("src/tools/cargotest") } fn make_run(run: RunConfig) { run.builder.ensure(Cargotest { stage: run.builder.top_stage, host: run.target, }); } /// Runs the `cargotest` tool as compiled in `stage` by the `host` compiler. /// /// This tool in `src/tools` will check out a few Rust projects and run `cargo /// test` to ensure that we don't regress the test suites there. fn run(self, builder: &Builder) { let build = builder.build; let compiler = builder.compiler(self.stage, self.host); builder.ensure(compile::Rustc { compiler, target: compiler.host }); // Note that this is a short, cryptic, and not scoped directory name. This // is currently to minimize the length of path on Windows where we otherwise // quickly run into path name limit constraints. let out_dir = build.out.join("ct"); t!(fs::create_dir_all(&out_dir)); let _time = util::timeit(&build); let mut cmd = builder.tool_cmd(Tool::CargoTest); try_run(build, cmd.arg(&build.initial_cargo) .arg(&out_dir) .env("RUSTC", builder.rustc(compiler)) .env("RUSTDOC", builder.rustdoc(compiler.host))); } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Cargo { stage: u32, host: Interned<String>, } impl Step for Cargo { type Output = (); const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.path("src/tools/cargo") } fn make_run(run: RunConfig) { run.builder.ensure(Cargo { stage: run.builder.top_stage, host: run.target, }); } /// Runs `cargo test` for `cargo` packaged with Rust. fn run(self, builder: &Builder) { let build = builder.build; let compiler = builder.compiler(self.stage, self.host); builder.ensure(tool::Cargo { compiler, target: self.host }); let mut cargo = builder.cargo(compiler, Mode::Tool, self.host, "test"); cargo.arg("--manifest-path").arg(build.src.join("src/tools/cargo/Cargo.toml")); if !build.fail_fast { cargo.arg("--no-fail-fast"); } // Don't build tests dynamically, just a pain to work with cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); // Don't run cross-compile tests, we may not have cross-compiled libstd libs // available. cargo.env("CFG_DISABLE_CROSS_TESTS", "1"); try_run(build, cargo.env("PATH", &path_for_cargo(builder, compiler))); } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Rls { stage: u32, host: Interned<String>, } impl Step for Rls { type Output = (); const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.path("src/tools/rls") } fn make_run(run: RunConfig) { run.builder.ensure(Rls { stage: run.builder.top_stage, host: run.target, }); } /// Runs `cargo test` for the rls. fn run(self, builder: &Builder) { let build = builder.build; let stage = self.stage; let host = self.host; let compiler = builder.compiler(stage, host); builder.ensure(tool::Rls { compiler, target: self.host, extra_features: Vec::new() }); let mut cargo = tool::prepare_tool_cargo(builder, compiler, host, "test", "src/tools/rls"); // Don't build tests dynamically, just a pain to work with cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); builder.add_rustc_lib_path(compiler, &mut cargo); if try_run(build, &mut cargo) { build.save_toolstate("rls", ToolState::TestPass); } } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Rustfmt { stage: u32, host: Interned<String>, } impl Step for Rustfmt { type Output = (); const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.path("src/tools/rustfmt") } fn make_run(run: RunConfig) { run.builder.ensure(Rustfmt { stage: run.builder.top_stage, host: run.target, }); } /// Runs `cargo test` for rustfmt. fn run(self, builder: &Builder) { let build = builder.build; let stage = self.stage; let host = self.host; let compiler = builder.compiler(stage, host); builder.ensure(tool::Rustfmt { compiler, target: self.host, extra_features: Vec::new() }); let mut cargo = tool::prepare_tool_cargo(builder, compiler, host, "test", "src/tools/rustfmt"); // Don't build tests dynamically, just a pain to work with cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); builder.add_rustc_lib_path(compiler, &mut cargo); if try_run(build, &mut cargo) { build.save_toolstate("rustfmt", ToolState::TestPass); } } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Miri { stage: u32, host: Interned<String>, } impl Step for Miri { type Output = (); const ONLY_HOSTS: bool = true; const DEFAULT: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { let test_miri = run.builder.build.config.test_miri; run.path("src/tools/miri").default_condition(test_miri) } fn make_run(run: RunConfig) { run.builder.ensure(Miri { stage: run.builder.top_stage, host: run.target, }); } /// Runs `cargo test` for miri. fn run(self, builder: &Builder) { let build = builder.build; let stage = self.stage; let host = self.host; let compiler = builder.compiler(stage, host); let miri = builder.ensure(tool::Miri { compiler, target: self.host, extra_features: Vec::new(), }); if let Some(miri) = miri { let mut cargo = builder.cargo(compiler, Mode::Tool, host, "test"); cargo.arg("--manifest-path").arg(build.src.join("src/tools/miri/Cargo.toml")); // Don't build tests dynamically, just a pain to work with cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); // miri tests need to know about the stage sysroot cargo.env("MIRI_SYSROOT", builder.sysroot(compiler)); cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler)); cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler)); cargo.env("MIRI_PATH", miri); builder.add_rustc_lib_path(compiler, &mut cargo); if try_run(build, &mut cargo) { build.save_toolstate("miri", ToolState::TestPass); } } else { eprintln!("failed to test miri: could not build"); } } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Clippy { stage: u32, host: Interned<String>, } impl Step for Clippy { type Output = (); const ONLY_HOSTS: bool = true; const DEFAULT: bool = false; fn should_run(run: ShouldRun) -> ShouldRun { run.path("src/tools/clippy") } fn make_run(run: RunConfig) { run.builder.ensure(Clippy { stage: run.builder.top_stage, host: run.target, }); } /// Runs `cargo test` for clippy. fn run(self, builder: &Builder) { let build = builder.build; let stage = self.stage; let host = self.host; let compiler = builder.compiler(stage, host); let clippy = builder.ensure(tool::Clippy { compiler, target: self.host, extra_features: Vec::new(), }); if let Some(clippy) = clippy { let mut cargo = builder.cargo(compiler, Mode::Tool, host, "test"); cargo.arg("--manifest-path").arg(build.src.join("src/tools/clippy/Cargo.toml")); // Don't build tests dynamically, just a pain to work with cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); // clippy tests need to know about the stage sysroot cargo.env("SYSROOT", builder.sysroot(compiler)); cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler)); cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler)); let host_libs = builder.stage_out(compiler, Mode::Tool).join(builder.cargo_dir()); cargo.env("HOST_LIBS", host_libs); // clippy tests need to find the driver cargo.env("CLIPPY_DRIVER_PATH", clippy); builder.add_rustc_lib_path(compiler, &mut cargo); if try_run(build, &mut cargo) { build.save_toolstate("clippy-driver", ToolState::TestPass); } } else { eprintln!("failed to test clippy: could not build"); } } } fn path_for_cargo(builder: &Builder, compiler: Compiler) -> OsString { // Configure PATH to find the right rustc. NB. we have to use PATH // and not RUSTC because the Cargo test suite has tests that will // fail if rustc is not spelled `rustc`. let path = builder.sysroot(compiler).join("bin"); let old_path = env::var_os("PATH").unwrap_or_default(); env::join_paths(iter::once(path).chain(env::split_paths(&old_path))).expect("") } #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct RustdocTheme { pub compiler: Compiler, } impl Step for RustdocTheme { type Output = (); const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.path("src/tools/rustdoc-themes") } fn make_run(run: RunConfig) { let compiler = run.builder.compiler(run.builder.top_stage, run.host); run.builder.ensure(RustdocTheme { compiler: compiler, }); } fn run(self, builder: &Builder) { let rustdoc = builder.rustdoc(self.compiler.host); let mut cmd = builder.tool_cmd(Tool::RustdocTheme); cmd.arg(rustdoc.to_str().unwrap()) .arg(builder.src.join("src/librustdoc/html/static/themes").to_str().unwrap()) .env("RUSTC_STAGE", self.compiler.stage.to_string()) .env("RUSTC_SYSROOT", builder.sysroot(self.compiler)) .env("RUSTDOC_LIBDIR", builder.sysroot_libdir(self.compiler, self.compiler.host)) .env("CFG_RELEASE_CHANNEL", &builder.build.config.channel) .env("RUSTDOC_REAL", builder.rustdoc(self.compiler.host)) .env("RUSTDOC_CRATE_VERSION", builder.build.rust_version()) .env("RUSTC_BOOTSTRAP", "1"); if let Some(linker) = builder.build.linker(self.compiler.host) { cmd.env("RUSTC_TARGET_LINKER", linker); } try_run(builder.build, &mut cmd); } } #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct RustdocJS { pub host: Interned<String>, pub target: Interned<String>, } impl Step for RustdocJS { type Output = (); const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.path("src/test/rustdoc-js") } fn make_run(run: RunConfig) { run.builder.ensure(RustdocJS { host: run.host, target: run.target, }); } fn run(self, builder: &Builder) { if let Some(ref nodejs) = builder.config.nodejs { let mut command = Command::new(nodejs); command.args(&["src/tools/rustdoc-js/tester.js", &*self.host]); builder.ensure(::doc::Std { target: self.target, stage: builder.top_stage, }); builder.run(&mut command); } else { builder.info(&format!("No nodejs found, skipping \"src/test/rustdoc-js\" tests")); } } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Tidy; impl Step for Tidy { type Output = (); const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; /// Runs the `tidy` tool. /// /// This tool in `src/tools` checks up on various bits and pieces of style and /// otherwise just implements a few lint-like checks that are specific to the /// compiler itself. fn run(self, builder: &Builder) { let build = builder.build; let mut cmd = builder.tool_cmd(Tool::Tidy); cmd.arg(build.src.join("src")); cmd.arg(&build.initial_cargo); if !build.config.vendor { cmd.arg("--no-vendor"); } if build.config.quiet_tests { cmd.arg("--quiet"); } let _folder = build.fold_output(|| "tidy"); builder.info(&format!("tidy check")); try_run(build, &mut cmd); } fn should_run(run: ShouldRun) -> ShouldRun { run.path("src/tools/tidy") } fn make_run(run: RunConfig) { run.builder.ensure(Tidy); } } fn testdir(build: &Build, host: Interned<String>) -> PathBuf { build.out.join(host).join("test") } macro_rules! default_test { ($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => { test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: false }); } } macro_rules! host_test { ($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => { test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: true }); } } macro_rules! test { ($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr, host: $host:expr }) => { #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct $name { pub compiler: Compiler, pub target: Interned<String>, } impl Step for $name { type Output = (); const DEFAULT: bool = $default; const ONLY_HOSTS: bool = $host; fn should_run(run: ShouldRun) -> ShouldRun { run.path($path) } fn make_run(run: RunConfig) { let compiler = run.builder.compiler(run.builder.top_stage, run.host); run.builder.ensure($name { compiler, target: run.target, }); } fn run(self, builder: &Builder) { builder.ensure(Compiletest { compiler: self.compiler, target: self.target, mode: $mode, suite: $suite, }) } } } } default_test!(Ui { path: "src/test/ui", mode: "ui", suite: "ui" }); default_test!(RunPass { path: "src/test/run-pass", mode: "run-pass", suite: "run-pass" }); default_test!(CompileFail { path: "src/test/compile-fail", mode: "compile-fail", suite: "compile-fail" }); default_test!(ParseFail { path: "src/test/parse-fail", mode: "parse-fail", suite: "parse-fail" }); default_test!(RunFail { path: "src/test/run-fail", mode: "run-fail", suite: "run-fail" }); default_test!(RunPassValgrind { path: "src/test/run-pass-valgrind", mode: "run-pass-valgrind", suite: "run-pass-valgrind" }); default_test!(MirOpt { path: "src/test/mir-opt", mode: "mir-opt", suite: "mir-opt" }); default_test!(Codegen { path: "src/test/codegen", mode: "codegen", suite: "codegen" }); default_test!(CodegenUnits { path: "src/test/codegen-units", mode: "codegen-units", suite: "codegen-units" }); default_test!(Incremental { path: "src/test/incremental", mode: "incremental", suite: "incremental" }); default_test!(Debuginfo { path: "src/test/debuginfo", // What this runs varies depending on the native platform being apple mode: "debuginfo-XXX", suite: "debuginfo" }); host_test!(UiFullDeps { path: "src/test/ui-fulldeps", mode: "ui", suite: "ui-fulldeps" }); host_test!(RunPassFullDeps { path: "src/test/run-pass-fulldeps", mode: "run-pass", suite: "run-pass-fulldeps" }); host_test!(RunFailFullDeps { path: "src/test/run-fail-fulldeps", mode: "run-fail", suite: "run-fail-fulldeps" }); host_test!(CompileFailFullDeps { path: "src/test/compile-fail-fulldeps", mode: "compile-fail", suite: "compile-fail-fulldeps" }); host_test!(IncrementalFullDeps { path: "src/test/incremental-fulldeps", mode: "incremental", suite: "incremental-fulldeps" }); host_test!(Rustdoc { path: "src/test/rustdoc", mode: "rustdoc", suite: "rustdoc" }); test!(Pretty { path: "src/test/pretty", mode: "pretty", suite: "pretty", default: false, host: true }); test!(RunPassPretty { path: "src/test/run-pass/pretty", mode: "pretty", suite: "run-pass", default: false, host: true }); test!(RunFailPretty { path: "src/test/run-fail/pretty", mode: "pretty", suite: "run-fail", default: false, host: true }); test!(RunPassValgrindPretty { path: "src/test/run-pass-valgrind/pretty", mode: "pretty", suite: "run-pass-valgrind", default: false, host: true }); test!(RunPassFullDepsPretty { path: "src/test/run-pass-fulldeps/pretty", mode: "pretty", suite: "run-pass-fulldeps", default: false, host: true }); test!(RunFailFullDepsPretty { path: "src/test/run-fail-fulldeps/pretty", mode: "pretty", suite: "run-fail-fulldeps", default: false, host: true }); default_test!(RunMake { path: "src/test/run-make", mode: "run-make", suite: "run-make" }); host_test!(RunMakeFullDeps { path: "src/test/run-make-fulldeps", mode: "run-make", suite: "run-make-fulldeps" }); #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] struct Compiletest { compiler: Compiler, target: Interned<String>, mode: &'static str, suite: &'static str, } impl Step for Compiletest { type Output = (); fn should_run(run: ShouldRun) -> ShouldRun { run.never() } /// Executes the `compiletest` tool to run a suite of tests. /// /// Compiles all tests with `compiler` for `target` with the specified /// compiletest `mode` and `suite` arguments. For example `mode` can be /// "run-pass" or `suite` can be something like `debuginfo`. fn run(self, builder: &Builder) { let build = builder.build; let compiler = self.compiler; let target = self.target; let mode = self.mode; let suite = self.suite; // Skip codegen tests if they aren't enabled in configuration. if !build.config.codegen_tests && suite == "codegen" { return; } if suite == "debuginfo" { // Skip debuginfo tests on MSVC if build.build.contains("msvc") { return; } if mode == "debuginfo-XXX" { return if build.build.contains("apple") { builder.ensure(Compiletest { mode: "debuginfo-lldb", ..self }); } else { builder.ensure(Compiletest { mode: "debuginfo-gdb", ..self }); }; } builder.ensure(dist::DebuggerScripts { sysroot: builder.sysroot(compiler), host: target }); } if suite.ends_with("fulldeps") || // FIXME: Does pretty need librustc compiled? Note that there are // fulldeps test suites with mode = pretty as well. mode == "pretty" || mode == "rustdoc" { builder.ensure(compile::Rustc { compiler, target }); } builder.ensure(compile::Test { compiler, target }); builder.ensure(native::TestHelpers { target }); builder.ensure(RemoteCopyLibs { compiler, target }); let mut cmd = builder.tool_cmd(Tool::Compiletest); // compiletest currently has... a lot of arguments, so let's just pass all // of them! cmd.arg("--compile-lib-path").arg(builder.rustc_libdir(compiler)); cmd.arg("--run-lib-path").arg(builder.sysroot_libdir(compiler, target)); cmd.arg("--rustc-path").arg(builder.rustc(compiler)); // Avoid depending on rustdoc when we don't need it. if mode == "rustdoc" || (mode == "run-make" && suite.ends_with("fulldeps")) { cmd.arg("--rustdoc-path").arg(builder.rustdoc(compiler.host)); } cmd.arg("--src-base").arg(build.src.join("src/test").join(suite)); cmd.arg("--build-base").arg(testdir(build, compiler.host).join(suite)); cmd.arg("--stage-id").arg(format!("stage{}-{}", compiler.stage, target)); cmd.arg("--mode").arg(mode); cmd.arg("--target").arg(target); cmd.arg("--host").arg(&*compiler.host); cmd.arg("--llvm-filecheck").arg(build.llvm_filecheck(build.build)); if let Some(ref nodejs) = build.config.nodejs { cmd.arg("--nodejs").arg(nodejs); } let mut flags = vec!["-Crpath".to_string()]; if build.config.rust_optimize_tests { flags.push("-O".to_string()); } if build.config.rust_debuginfo_tests { flags.push("-g".to_string()); } flags.push("-Zmiri -Zunstable-options".to_string()); flags.push(build.config.cmd.rustc_args().join(" ")); if let Some(linker) = build.linker(target) { cmd.arg("--linker").arg(linker); } let hostflags = flags.clone(); cmd.arg("--host-rustcflags").arg(hostflags.join(" ")); let mut targetflags = flags.clone(); targetflags.push(format!("-Lnative={}", build.test_helpers_out(target).display())); cmd.arg("--target-rustcflags").arg(targetflags.join(" ")); cmd.arg("--docck-python").arg(build.python()); if build.build.ends_with("apple-darwin") { // Force /usr/bin/python on macOS for LLDB tests because we're loading the // LLDB plugin's compiled module which only works with the system python // (namely not Homebrew-installed python) cmd.arg("--lldb-python").arg("/usr/bin/python"); } else { cmd.arg("--lldb-python").arg(build.python()); } if let Some(ref gdb) = build.config.gdb { cmd.arg("--gdb").arg(gdb); } if let Some(ref vers) = build.lldb_version { cmd.arg("--lldb-version").arg(vers); } if let Some(ref dir) = build.lldb_python_dir { cmd.arg("--lldb-python-dir").arg(dir); } cmd.args(&build.config.cmd.test_args()); if build.is_verbose() { cmd.arg("--verbose"); } if build.config.quiet_tests { cmd.arg("--quiet"); } if build.config.llvm_enabled { let llvm_config = builder.ensure(native::Llvm { target: build.config.build, emscripten: false, }); if !build.config.dry_run { let llvm_version = output(Command::new(&llvm_config).arg("--version")); cmd.arg("--llvm-version").arg(llvm_version); } if !build.is_rust_llvm(target) { cmd.arg("--system-llvm"); } // Only pass correct values for these flags for the `run-make` suite as it // requires that a C++ compiler was configured which isn't always the case. if !build.config.dry_run && suite == "run-make-fulldeps" { let llvm_components = output(Command::new(&llvm_config).arg("--components")); let llvm_cxxflags = output(Command::new(&llvm_config).arg("--cxxflags")); cmd.arg("--cc").arg(build.cc(target)) .arg("--cxx").arg(build.cxx(target).unwrap()) .arg("--cflags").arg(build.cflags(target).join(" ")) .arg("--llvm-components").arg(llvm_components.trim()) .arg("--llvm-cxxflags").arg(llvm_cxxflags.trim()); if let Some(ar) = build.ar(target) { cmd.arg("--ar").arg(ar); } } } if suite == "run-make-fulldeps" && !build.config.llvm_enabled { builder.info( &format!("Ignoring run-make test suite as they generally don't work without LLVM")); return; } if suite != "run-make-fulldeps" { cmd.arg("--cc").arg("") .arg("--cxx").arg("") .arg("--cflags").arg("") .arg("--llvm-components").arg("") .arg("--llvm-cxxflags").arg(""); } if build.remote_tested(target) { cmd.arg("--remote-test-client").arg(builder.tool_exe(Tool::RemoteTestClient)); } // Running a C compiler on MSVC requires a few env vars to be set, to be // sure to set them here. // // Note that if we encounter `PATH` we make sure to append to our own `PATH` // rather than stomp over it. if target.contains("msvc") { for &(ref k, ref v) in build.cc[&target].env() { if k != "PATH" { cmd.env(k, v); } } } cmd.env("RUSTC_BOOTSTRAP", "1"); build.add_rust_test_threads(&mut cmd); if build.config.sanitizers { cmd.env("SANITIZER_SUPPORT", "1"); } if build.config.profiler { cmd.env("PROFILER_SUPPORT", "1"); } cmd.env("RUST_TEST_TMPDIR", build.out.join("tmp")); cmd.arg("--adb-path").arg("adb"); cmd.arg("--adb-test-dir").arg(ADB_TEST_DIR); if target.contains("android") { // Assume that cc for this target comes from the android sysroot cmd.arg("--android-cross-path") .arg(build.cc(target).parent().unwrap().parent().unwrap()); } else { cmd.arg("--android-cross-path").arg(""); } build.ci_env.force_coloring_in_ci(&mut cmd); let _folder = build.fold_output(|| format!("test_{}", suite)); builder.info(&format!("Check compiletest suite={} mode={} ({} -> {})", suite, mode, &compiler.host, target)); let _time = util::timeit(&build); try_run(build, &mut cmd); } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] struct DocTest { compiler: Compiler, path: &'static str, name: &'static str, is_ext_doc: bool, } impl Step for DocTest { type Output = (); const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.never() } /// Run `rustdoc --test` for all documentation in `src/doc`. /// /// This will run all tests in our markdown documentation (e.g. the book) /// located in `src/doc`. The `rustdoc` that's run is the one that sits next to /// `compiler`. fn run(self, builder: &Builder) { let build = builder.build; let compiler = self.compiler; builder.ensure(compile::Test { compiler, target: compiler.host }); // Do a breadth-first traversal of the `src/doc` directory and just run // tests for all files that end in `*.md` let mut stack = vec![build.src.join(self.path)]; let _time = util::timeit(&build); let _folder = build.fold_output(|| format!("test_{}", self.name)); let mut files = Vec::new(); while let Some(p) = stack.pop() { if p.is_dir() { stack.extend(t!(p.read_dir()).map(|p| t!(p).path())); continue } if p.extension().and_then(|s| s.to_str()) != Some("md") { continue; } // The nostarch directory in the book is for no starch, and so isn't // guaranteed to build. We don't care if it doesn't build, so skip it. if p.to_str().map_or(false, |p| p.contains("nostarch")) { continue; } files.push(p); } files.sort(); for file in files { let test_result = markdown_test(builder, compiler, &file); if self.is_ext_doc { let toolstate = if test_result { ToolState::TestPass } else { ToolState::TestFail }; build.save_toolstate(self.name, toolstate); } } } } macro_rules! test_book { ($($name:ident, $path:expr, $book_name:expr, default=$default:expr;)+) => { $( #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct $name { compiler: Compiler, } impl Step for $name { type Output = (); const DEFAULT: bool = $default; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.path($path) } fn make_run(run: RunConfig) { run.builder.ensure($name { compiler: run.builder.compiler(run.builder.top_stage, run.host), }); } fn run(self, builder: &Builder) { builder.ensure(DocTest { compiler: self.compiler, path: $path, name: $book_name, is_ext_doc: !$default, }); } } )+ } } test_book!( Nomicon, "src/doc/nomicon", "nomicon", default=false; Reference, "src/doc/reference", "reference", default=false; RustdocBook, "src/doc/rustdoc", "rustdoc", default=true; RustByExample, "src/doc/rust-by-example", "rust-by-example", default=false; TheBook, "src/doc/book", "book", default=false; UnstableBook, "src/doc/unstable-book", "unstable-book", default=true; ); #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct ErrorIndex { compiler: Compiler, } impl Step for ErrorIndex { type Output = (); const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.path("src/tools/error_index_generator") } fn make_run(run: RunConfig) { run.builder.ensure(ErrorIndex { compiler: run.builder.compiler(run.builder.top_stage, run.host), }); } /// Run the error index generator tool to execute the tests located in the error /// index. /// /// The `error_index_generator` tool lives in `src/tools` and is used to /// generate a markdown file from the error indexes of the code base which is /// then passed to `rustdoc --test`. fn run(self, builder: &Builder) { let build = builder.build; let compiler = self.compiler; builder.ensure(compile::Std { compiler, target: compiler.host }); let dir = testdir(build, compiler.host); t!(fs::create_dir_all(&dir)); let output = dir.join("error-index.md"); let mut tool = builder.tool_cmd(Tool::ErrorIndex); tool.arg("markdown") .arg(&output) .env("CFG_BUILD", &build.build) .env("RUSTC_ERROR_METADATA_DST", build.extended_error_dir()); let _folder = build.fold_output(|| "test_error_index"); build.info(&format!("Testing error-index stage{}", compiler.stage)); let _time = util::timeit(&build); build.run(&mut tool); markdown_test(builder, compiler, &output); } } fn markdown_test(builder: &Builder, compiler: Compiler, markdown: &Path) -> bool { let build = builder.build; match File::open(markdown) { Ok(mut file) => { let mut contents = String::new(); t!(file.read_to_string(&mut contents)); if !contents.contains("```") { return true; } } Err(_) => {}, } build.info(&format!("doc tests for: {}", markdown.display())); let mut cmd = builder.rustdoc_cmd(compiler.host); build.add_rust_test_threads(&mut cmd); cmd.arg("--test"); cmd.arg(markdown); cmd.env("RUSTC_BOOTSTRAP", "1"); let test_args = build.config.cmd.test_args().join(" "); cmd.arg("--test-args").arg(test_args); if build.config.quiet_tests { try_run_quiet(build, &mut cmd) } else { try_run(build, &mut cmd) } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct CrateLibrustc { compiler: Compiler, target: Interned<String>, test_kind: TestKind, krate: Interned<String>, } impl Step for CrateLibrustc { type Output = (); const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.krate("rustc-main") } fn make_run(run: RunConfig) { let builder = run.builder; let compiler = builder.compiler(builder.top_stage, run.host); for krate in builder.in_tree_crates("rustc-main") { if run.path.ends_with(&krate.path) { let test_kind = if builder.kind == Kind::Test { TestKind::Test } else if builder.kind == Kind::Bench { TestKind::Bench } else { panic!("unexpected builder.kind in crate: {:?}", builder.kind); }; builder.ensure(CrateLibrustc { compiler, target: run.target, test_kind, krate: krate.name, }); } } } fn run(self, builder: &Builder) { builder.ensure(Crate { compiler: self.compiler, target: self.target, mode: Mode::Librustc, test_kind: self.test_kind, krate: self.krate, }); } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct CrateNotDefault { compiler: Compiler, target: Interned<String>, test_kind: TestKind, krate: &'static str, } impl Step for CrateNotDefault { type Output = (); fn should_run(run: ShouldRun) -> ShouldRun { run.path("src/liballoc_jemalloc") .path("src/librustc_asan") .path("src/librustc_lsan") .path("src/librustc_msan") .path("src/librustc_tsan") } fn make_run(run: RunConfig) { let builder = run.builder; let compiler = builder.compiler(builder.top_stage, run.host); let test_kind = if builder.kind == Kind::Test { TestKind::Test } else if builder.kind == Kind::Bench { TestKind::Bench } else { panic!("unexpected builder.kind in crate: {:?}", builder.kind); }; builder.ensure(CrateNotDefault { compiler, target: run.target, test_kind, krate: match run.path { _ if run.path.ends_with("src/liballoc_jemalloc") => "alloc_jemalloc", _ if run.path.ends_with("src/librustc_asan") => "rustc_asan", _ if run.path.ends_with("src/librustc_lsan") => "rustc_lsan", _ if run.path.ends_with("src/librustc_msan") => "rustc_msan", _ if run.path.ends_with("src/librustc_tsan") => "rustc_tsan", _ => panic!("unexpected path {:?}", run.path), }, }); } fn run(self, builder: &Builder) { builder.ensure(Crate { compiler: self.compiler, target: self.target, mode: Mode::Libstd, test_kind: self.test_kind, krate: INTERNER.intern_str(self.krate), }); } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Crate { compiler: Compiler, target: Interned<String>, mode: Mode, test_kind: TestKind, krate: Interned<String>, } impl Step for Crate { type Output = (); const DEFAULT: bool = true; fn should_run(mut run: ShouldRun) -> ShouldRun { let builder = run.builder; run = run.krate("test"); for krate in run.builder.in_tree_crates("std") { if krate.is_local(&run.builder) && !krate.name.contains("jemalloc") && !(krate.name.starts_with("rustc_") && krate.name.ends_with("san")) && krate.name != "dlmalloc" { run = run.path(krate.local_path(&builder).to_str().unwrap()); } } run } fn make_run(run: RunConfig) { let builder = run.builder; let compiler = builder.compiler(builder.top_stage, run.host); let make = |mode: Mode, krate: &CargoCrate| { let test_kind = if builder.kind == Kind::Test { TestKind::Test } else if builder.kind == Kind::Bench { TestKind::Bench } else { panic!("unexpected builder.kind in crate: {:?}", builder.kind); }; builder.ensure(Crate { compiler, target: run.target, mode, test_kind, krate: krate.name, }); }; for krate in builder.in_tree_crates("std") { if run.path.ends_with(&krate.local_path(&builder)) { make(Mode::Libstd, krate); } } for krate in builder.in_tree_crates("test") { if run.path.ends_with(&krate.local_path(&builder)) { make(Mode::Libtest, krate); } } } /// Run all unit tests plus documentation tests for a given crate defined /// by a `Cargo.toml` (single manifest) /// /// This is what runs tests for crates like the standard library, compiler, etc. /// It essentially is the driver for running `cargo test`. /// /// Currently this runs all tests for a DAG by passing a bunch of `-p foo` /// arguments, and those arguments are discovered from `cargo metadata`. fn run(self, builder: &Builder) { let build = builder.build; let compiler = self.compiler; let target = self.target; let mode = self.mode; let test_kind = self.test_kind; let krate = self.krate; builder.ensure(compile::Test { compiler, target }); builder.ensure(RemoteCopyLibs { compiler, target }); // If we're not doing a full bootstrap but we're testing a stage2 version of // libstd, then what we're actually testing is the libstd produced in // stage1. Reflect that here by updating the compiler that we're working // with automatically. let compiler = if build.force_use_stage1(compiler, target) { builder.compiler(1, compiler.host) } else { compiler.clone() }; let mut cargo = builder.cargo(compiler, mode, target, test_kind.subcommand()); match mode { Mode::Libstd => { compile::std_cargo(builder, &compiler, target, &mut cargo); } Mode::Libtest => { compile::test_cargo(build, &compiler, target, &mut cargo); } Mode::Librustc => { builder.ensure(compile::Rustc { compiler, target }); compile::rustc_cargo(build, &mut cargo); } _ => panic!("can only test libraries"), }; // Build up the base `cargo test` command. // // Pass in some standard flags then iterate over the graph we've discovered // in `cargo metadata` with the maps above and figure out what `-p` // arguments need to get passed. if test_kind.subcommand() == "test" && !build.fail_fast { cargo.arg("--no-fail-fast"); } if build.doc_tests { cargo.arg("--doc"); } cargo.arg("-p").arg(krate); // The tests are going to run with the *target* libraries, so we need to // ensure that those libraries show up in the LD_LIBRARY_PATH equivalent. // // Note that to run the compiler we need to run with the *host* libraries, // but our wrapper scripts arrange for that to be the case anyway. let mut dylib_path = dylib_path(); dylib_path.insert(0, PathBuf::from(&*builder.sysroot_libdir(compiler, target))); cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap()); cargo.arg("--"); cargo.args(&build.config.cmd.test_args()); if build.config.quiet_tests { cargo.arg("--quiet"); } if target.contains("emscripten") { cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), build.config.nodejs.as_ref().expect("nodejs not configured")); } else if target.starts_with("wasm32") { // Warn about running tests without the `wasm_syscall` feature enabled. // The javascript shim implements the syscall interface so that test // output can be correctly reported. if !build.config.wasm_syscall { build.info(&format!("Libstd was built without `wasm_syscall` feature enabled: \ test output may not be visible.")); } // On the wasm32-unknown-unknown target we're using LTO which is // incompatible with `-C prefer-dynamic`, so disable that here cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); let node = build.config.nodejs.as_ref() .expect("nodejs not configured"); let runner = format!("{} {}/src/etc/wasm32-shim.js", node.display(), build.src.display()); cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), &runner); } else if build.remote_tested(target) { cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), format!("{} run", builder.tool_exe(Tool::RemoteTestClient).display())); } let _folder = build.fold_output(|| { format!("{}_stage{}-{}", test_kind.subcommand(), compiler.stage, krate) }); build.info(&format!("{} {} stage{} ({} -> {})", test_kind, krate, compiler.stage, &compiler.host, target)); let _time = util::timeit(&build); try_run(build, &mut cargo); } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct CrateRustdoc { host: Interned<String>, test_kind: TestKind, } impl Step for CrateRustdoc { type Output = (); const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.paths(&["src/librustdoc", "src/tools/rustdoc"]) } fn make_run(run: RunConfig) { let builder = run.builder; let test_kind = if builder.kind == Kind::Test { TestKind::Test } else if builder.kind == Kind::Bench { TestKind::Bench } else { panic!("unexpected builder.kind in crate: {:?}", builder.kind); }; builder.ensure(CrateRustdoc { host: run.host, test_kind, }); } fn run(self, builder: &Builder) { let build = builder.build; let test_kind = self.test_kind; let compiler = builder.compiler(builder.top_stage, self.host); let target = compiler.host; let mut cargo = tool::prepare_tool_cargo(builder, compiler, target, test_kind.subcommand(), "src/tools/rustdoc"); if test_kind.subcommand() == "test" && !build.fail_fast { cargo.arg("--no-fail-fast"); } cargo.arg("-p").arg("rustdoc:0.0.0"); cargo.arg("--"); cargo.args(&build.config.cmd.test_args()); if build.config.quiet_tests { cargo.arg("--quiet"); } let _folder = build.fold_output(|| { format!("{}_stage{}-rustdoc", test_kind.subcommand(), compiler.stage) }); build.info(&format!("{} rustdoc stage{} ({} -> {})", test_kind, compiler.stage, &compiler.host, target)); let _time = util::timeit(&build); try_run(build, &mut cargo); } } fn envify(s: &str) -> String { s.chars().map(|c| { match c { '-' => '_', c => c, } }).flat_map(|c| c.to_uppercase()).collect() } /// Some test suites are run inside emulators or on remote devices, and most /// of our test binaries are linked dynamically which means we need to ship /// the standard library and such to the emulator ahead of time. This step /// represents this and is a dependency of all test suites. /// /// Most of the time this is a noop. For some steps such as shipping data to /// QEMU we have to build our own tools so we've got conditional dependencies /// on those programs as well. Note that the remote test client is built for /// the build target (us) and the server is built for the target. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct RemoteCopyLibs { compiler: Compiler, target: Interned<String>, } impl Step for RemoteCopyLibs { type Output = (); fn should_run(run: ShouldRun) -> ShouldRun { run.never() } fn run(self, builder: &Builder) { let build = builder.build; let compiler = self.compiler; let target = self.target; if !build.remote_tested(target) { return } builder.ensure(compile::Test { compiler, target }); build.info(&format!("REMOTE copy libs to emulator ({})", target)); t!(fs::create_dir_all(build.out.join("tmp"))); let server = builder.ensure(tool::RemoteTestServer { compiler, target }); // Spawn the emulator and wait for it to come online let tool = builder.tool_exe(Tool::RemoteTestClient); let mut cmd = Command::new(&tool); cmd.arg("spawn-emulator") .arg(target) .arg(&server) .arg(build.out.join("tmp")); if let Some(rootfs) = build.qemu_rootfs(target) { cmd.arg(rootfs); } build.run(&mut cmd); // Push all our dylibs to the emulator for f in t!(builder.sysroot_libdir(compiler, target).read_dir()) { let f = t!(f); let name = f.file_name().into_string().unwrap(); if util::is_dylib(&name) { build.run(Command::new(&tool) .arg("push") .arg(f.path())); } } } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Distcheck; impl Step for Distcheck { type Output = (); fn should_run(run: ShouldRun) -> ShouldRun { run.path("distcheck") } fn make_run(run: RunConfig) { run.builder.ensure(Distcheck); } /// Run "distcheck", a 'make check' from a tarball fn run(self, builder: &Builder) { let build = builder.build; build.info(&format!("Distcheck")); let dir = build.out.join("tmp").join("distcheck"); let _ = fs::remove_dir_all(&dir); t!(fs::create_dir_all(&dir)); // Guarantee that these are built before we begin running. builder.ensure(dist::PlainSourceTarball); builder.ensure(dist::Src); let mut cmd = Command::new("tar"); cmd.arg("-xzf") .arg(builder.ensure(dist::PlainSourceTarball)) .arg("--strip-components=1") .current_dir(&dir); build.run(&mut cmd); build.run(Command::new("./configure") .args(&build.config.configure_args) .arg("--enable-vendor") .current_dir(&dir)); build.run(Command::new(build_helper::make(&build.build)) .arg("check") .current_dir(&dir)); // Now make sure that rust-src has all of libstd's dependencies build.info(&format!("Distcheck rust-src")); let dir = build.out.join("tmp").join("distcheck-src"); let _ = fs::remove_dir_all(&dir); t!(fs::create_dir_all(&dir)); let mut cmd = Command::new("tar"); cmd.arg("-xzf") .arg(builder.ensure(dist::Src)) .arg("--strip-components=1") .current_dir(&dir); build.run(&mut cmd); let toml = dir.join("rust-src/lib/rustlib/src/rust/src/libstd/Cargo.toml"); build.run(Command::new(&build.initial_cargo) .arg("generate-lockfile") .arg("--manifest-path") .arg(&toml) .current_dir(&dir)); } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Bootstrap; impl Step for Bootstrap { type Output = (); const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; /// Test the build system itself fn run(self, builder: &Builder) { let build = builder.build; let mut cmd = Command::new(&build.initial_cargo); cmd.arg("test") .current_dir(build.src.join("src/bootstrap")) .env("RUSTFLAGS", "-Cdebuginfo=2") .env("CARGO_TARGET_DIR", build.out.join("bootstrap")) .env("RUSTC_BOOTSTRAP", "1") .env("RUSTC", &build.initial_rustc); if let Some(flags) = option_env!("RUSTFLAGS") { // Use the same rustc flags for testing as for "normal" compilation, // so that Cargo doesn’t recompile the entire dependency graph every time: // https://github.com/rust-lang/rust/issues/49215 cmd.env("RUSTFLAGS", flags); } if !build.fail_fast { cmd.arg("--no-fail-fast"); } cmd.arg("--").args(&build.config.cmd.test_args()); try_run(build, &mut cmd); } fn should_run(run: ShouldRun) -> ShouldRun { run.path("src/bootstrap") } fn make_run(run: RunConfig) { run.builder.ensure(Bootstrap); } }
31.742215
100
0.556113
29060cf6a9406edb0192901a684a8420faeb7fa4
5,132
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the THIRD-PARTY file. //! Macros and wrapper functions for dealing with ioctls. use libc; use std::os::raw::{c_int, c_uint, c_ulong, c_void}; use std::os::unix::io::AsRawFd; /// Raw macro to declare a function that returns an ioctl number. #[macro_export] macro_rules! ioctl_ioc_nr { ($name:ident, $dir:expr, $ty:expr, $nr:expr, $size:expr) => { #[allow(non_snake_case)] pub fn $name() -> ::std::os::raw::c_ulong { (($dir << $crate::ioctl::_IOC_DIRSHIFT) | ($ty << $crate::ioctl::_IOC_TYPESHIFT) | ($nr << $crate::ioctl::_IOC_NRSHIFT) | ($size << $crate::ioctl::_IOC_SIZESHIFT)) as ::std::os::raw::c_ulong } }; } /// Declare an ioctl that transfers no data. #[macro_export] macro_rules! ioctl_io_nr { ($name:ident, $ty:expr, $nr:expr) => { ioctl_ioc_nr!($name, $crate::ioctl::_IOC_NONE, $ty, $nr, 0); }; } /// Declare an ioctl that reads data. #[macro_export] macro_rules! ioctl_ior_nr { ($name:ident, $ty:expr, $nr:expr, $size:ty) => { ioctl_ioc_nr!( $name, $crate::ioctl::_IOC_READ, $ty, $nr, ::std::mem::size_of::<$size>() as u32 ); }; } /// Declare an ioctl that writes data. #[macro_export] macro_rules! ioctl_iow_nr { ($name:ident, $ty:expr, $nr:expr, $size:ty) => { ioctl_ioc_nr!( $name, $crate::ioctl::_IOC_WRITE, $ty, $nr, ::std::mem::size_of::<$size>() as u32 ); }; } /// Declare an ioctl that reads and writes data. #[macro_export] macro_rules! ioctl_iowr_nr { ($name:ident, $ty:expr, $nr:expr, $size:ty) => { ioctl_ioc_nr!( $name, $crate::ioctl::_IOC_READ | $crate::ioctl::_IOC_WRITE, $ty, $nr, ::std::mem::size_of::<$size>() as u32 ); }; } pub const _IOC_NRBITS: c_uint = 8; pub const _IOC_TYPEBITS: c_uint = 8; pub const _IOC_SIZEBITS: c_uint = 14; pub const _IOC_DIRBITS: c_uint = 2; pub const _IOC_NRMASK: c_uint = 255; pub const _IOC_TYPEMASK: c_uint = 255; pub const _IOC_SIZEMASK: c_uint = 16383; pub const _IOC_DIRMASK: c_uint = 3; pub const _IOC_NRSHIFT: c_uint = 0; pub const _IOC_TYPESHIFT: c_uint = 8; pub const _IOC_SIZESHIFT: c_uint = 16; pub const _IOC_DIRSHIFT: c_uint = 30; pub const _IOC_NONE: c_uint = 0; pub const _IOC_WRITE: c_uint = 1; pub const _IOC_READ: c_uint = 2; pub const IOC_IN: c_uint = 1073741824; pub const IOC_OUT: c_uint = 2147483648; pub const IOC_INOUT: c_uint = 3221225472; pub const IOCSIZE_MASK: c_uint = 1073676288; pub const IOCSIZE_SHIFT: c_uint = 16; // The type of the `req` parameter is different for the `musl` library. This will enable // successful build for other non-musl libraries. #[cfg(target_env = "musl")] type IoctlRequest = c_int; #[cfg(not(target_env = "musl"))] type IoctlRequest = c_ulong; /// Run an ioctl with no arguments. pub unsafe fn ioctl<F: AsRawFd>(fd: &F, req: c_ulong) -> c_int { libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, 0) } /// Run an ioctl with a single value argument. pub unsafe fn ioctl_with_val<F: AsRawFd>(fd: &F, req: c_ulong, arg: c_ulong) -> c_int { libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, arg) } /// Run an ioctl with an immutable reference. pub unsafe fn ioctl_with_ref<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: &T) -> c_int { libc::ioctl( fd.as_raw_fd(), req as IoctlRequest, arg as *const T as *const c_void, ) } /// Run an ioctl with a mutable reference. pub unsafe fn ioctl_with_mut_ref<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: &mut T) -> c_int { libc::ioctl( fd.as_raw_fd(), req as IoctlRequest, arg as *mut T as *mut c_void, ) } /// Run an ioctl with a raw pointer. pub unsafe fn ioctl_with_ptr<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: *const T) -> c_int { libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, arg as *const c_void) } /// Run an ioctl with a mutable raw pointer. pub unsafe fn ioctl_with_mut_ptr<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: *mut T) -> c_int { libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, arg as *mut c_void) } #[cfg(test)] mod tests { const TUNTAP: ::std::os::raw::c_uint = 0x54; const KVMIO: ::std::os::raw::c_uint = 0xAE; ioctl_io_nr!(KVM_CREATE_VM, KVMIO, 0x01); ioctl_ior_nr!(TUNGETFEATURES, TUNTAP, 0xcf, ::std::os::raw::c_uint); ioctl_iow_nr!(TUNSETQUEUE, TUNTAP, 0xd9, ::std::os::raw::c_int); ioctl_iowr_nr!(KVM_GET_MSR_INDEX_LIST, KVMIO, 0x2, ::std::os::raw::c_int); #[test] fn ioctl_macros() { assert_eq!(0x0000AE01, KVM_CREATE_VM()); assert_eq!(0x800454CF, TUNGETFEATURES()); assert_eq!(0x400454D9, TUNSETQUEUE()); assert_eq!(0xC004AE02, KVM_GET_MSR_INDEX_LIST()); } }
31.875776
93
0.62841
56f2383ea668bc85bcd8b9fc67b1c79a7b2b5cbf
21,659
use crate::headers::CONTENT_MD5; use crate::{ core::{ConnectionString, No}, shared_access_signature::account_sas::{ AccountSharedAccessSignatureBuilder, ClientAccountSharedAccessSignature, }, }; use azure_core::headers::*; use azure_core::HttpClient; use bytes::Bytes; use http::{ header::*, method::Method, request::{Builder, Request}, }; use ring::hmac; use std::sync::Arc; use url::Url; /// The well-known account used by Azurite and the legacy Azure Storage Emulator. /// https://docs.microsoft.com/azure/storage/common/storage-use-azurite#well-known-storage-account-and-key pub const EMULATOR_ACCOUNT: &str = "devstoreaccount1"; /// The well-known account key used by Azurite and the legacy Azure Storage Emulator. /// https://docs.microsoft.com/azure/storage/common/storage-use-azurite#well-known-storage-account-and-key pub const EMULATOR_ACCOUNT_KEY: &str = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="; const HEADER_VERSION: &str = "x-ms-version"; const AZURE_VERSION: &str = "2019-12-12"; #[derive(Debug, Clone, PartialEq, Eq)] pub enum StorageCredentials { Key(String, String), SASToken(Vec<(String, String)>), BearerToken(String), } #[derive(Debug, Clone, Copy)] pub enum ServiceType { Blob, // Queue, // File, Table, } #[derive(Debug, Clone)] pub struct StorageAccountClient { storage_credentials: StorageCredentials, http_client: Arc<dyn HttpClient>, blob_storage_url: Url, table_storage_url: Url, queue_storage_url: Url, queue_storage_secondary_url: Url, filesystem_url: Url, account: String, } fn get_sas_token_parms(sas_token: &str) -> Result<Vec<(String, String)>, url::ParseError> { // Any base url will do: we just need to parse the SAS token // to get its query pairs. let base_url = Url::parse("https://blob.core.windows.net")?; let url = Url::options().base_url(Some(&base_url)); // this code handles the leading ? // we support both with or without let url = if sas_token.starts_with('?') { url.parse(sas_token) } else { url.parse(&format!("?{}", sas_token)) }?; Ok(url .query_pairs() .map(|p| (String::from(p.0), String::from(p.1))) .collect()) } impl StorageAccountClient { pub fn new_access_key<A, K>(http_client: Arc<dyn HttpClient>, account: A, key: K) -> Arc<Self> where A: Into<String>, K: Into<String>, { let account = account.into(); Arc::new(Self { blob_storage_url: Url::parse(&format!("https://{}.blob.core.windows.net", &account)) .unwrap(), table_storage_url: Url::parse(&format!("https://{}.table.core.windows.net", &account)) .unwrap(), queue_storage_url: Url::parse(&format!("https://{}.queue.core.windows.net", &account)) .unwrap(), queue_storage_secondary_url: Url::parse(&format!( "https://{}-secondary.queue.core.windows.net", &account )) .unwrap(), filesystem_url: Url::parse(&format!("https://{}.dfs.core.windows.net", &account)) .unwrap(), storage_credentials: StorageCredentials::Key(account.clone(), key.into()), http_client, account, }) } /// Create a new client for customized emulator endpoints. pub fn new_emulator( http_client: Arc<dyn HttpClient>, blob_storage_url: &Url, table_storage_url: &Url, queue_storage_url: &Url, filesystem_url: &Url, ) -> Arc<Self> { Self::new_emulator_with_account( http_client, blob_storage_url, table_storage_url, queue_storage_url, filesystem_url, EMULATOR_ACCOUNT, EMULATOR_ACCOUNT_KEY, ) } /// Create a new client using the default HttpClient and the default emulator endpoints. pub fn new_emulator_default() -> Arc<Self> { let http_client = azure_core::new_http_client(); let blob_storage_url = Url::parse("http://127.0.0.1:10000").unwrap(); let queue_storage_url = Url::parse("http://127.0.0.1:10001").unwrap(); let table_storage_url = Url::parse("http://127.0.0.1:10002").unwrap(); let filesystem_url = Url::parse("http://127.0.0.1:10004").unwrap(); Self::new_emulator( http_client, &blob_storage_url, &table_storage_url, &queue_storage_url, &filesystem_url, ) } pub fn new_emulator_with_account<A, K>( http_client: Arc<dyn HttpClient>, blob_storage_url: &Url, table_storage_url: &Url, queue_storage_url: &Url, filesystem_url: &Url, account: A, key: K, ) -> Arc<Self> where A: Into<String>, K: Into<String>, { let account = account.into(); let blob_storage_url = Url::parse(&format!("{}{}", blob_storage_url.as_str(), account)).unwrap(); let table_storage_url = Url::parse(&format!("{}{}", table_storage_url.as_str(), account)).unwrap(); let queue_storage_url = Url::parse(&format!("{}{}", queue_storage_url.as_str(), account)).unwrap(); let filesystem_url = Url::parse(&format!("{}{}", filesystem_url.as_str(), account)).unwrap(); Arc::new(Self { blob_storage_url, table_storage_url, queue_storage_url: queue_storage_url.clone(), queue_storage_secondary_url: queue_storage_url, filesystem_url, storage_credentials: StorageCredentials::Key(account.clone(), key.into()), http_client, account, }) } pub fn new_sas_token<A, S>( http_client: Arc<dyn HttpClient>, account: A, sas_token: S, ) -> Result<Arc<Self>, url::ParseError> where A: Into<String>, S: AsRef<str>, { let account = account.into(); Ok(Arc::new(Self { blob_storage_url: Url::parse(&format!("https://{}.blob.core.windows.net", &account))?, table_storage_url: Url::parse(&format!("https://{}.table.core.windows.net", &account))?, queue_storage_url: Url::parse(&format!("https://{}.queue.core.windows.net", &account))?, queue_storage_secondary_url: Url::parse(&format!( "https://{}-secondary.queue.core.windows.net", &account ))?, filesystem_url: Url::parse(&format!("https://{}.dfs.core.windows.net", &account))?, storage_credentials: StorageCredentials::SASToken(get_sas_token_parms( sas_token.as_ref(), )?), http_client, account, })) } pub fn new_bearer_token<A, BT>( http_client: Arc<dyn HttpClient>, account: A, bearer_token: BT, ) -> Arc<Self> where A: Into<String>, BT: Into<String>, { let account = account.into(); let bearer_token = bearer_token.into(); Arc::new(Self { blob_storage_url: Url::parse(&format!("https://{}.blob.core.windows.net", &account)) .unwrap(), table_storage_url: Url::parse(&format!("https://{}.table.core.windows.net", &account)) .unwrap(), queue_storage_url: Url::parse(&format!("https://{}.queue.core.windows.net", &account)) .unwrap(), queue_storage_secondary_url: Url::parse(&format!( "https://{}-secondary.queue.core.windows.net", &account )) .unwrap(), filesystem_url: Url::parse(&format!("https://{}.dfs.core.windows.net", &account)) .unwrap(), storage_credentials: StorageCredentials::BearerToken(bearer_token), http_client, account, }) } pub fn new_connection_string( http_client: Arc<dyn HttpClient>, connection_string: &str, ) -> crate::Result<Arc<Self>> { match ConnectionString::new(connection_string)? { ConnectionString { account_name: Some(account), account_key: Some(_), sas: Some(sas_token), blob_endpoint, table_endpoint, queue_endpoint, file_endpoint, .. } => { log::warn!("Both account key and SAS defined in connection string. Using only the provided SAS."); Ok(Arc::new(Self { storage_credentials: StorageCredentials::SASToken(get_sas_token_parms( sas_token, )?), blob_storage_url: get_endpoint_uri(blob_endpoint, account, "blob")?, table_storage_url: get_endpoint_uri(table_endpoint, account, "table")?, queue_storage_url: get_endpoint_uri(queue_endpoint, account, "queue")?, queue_storage_secondary_url: get_endpoint_uri(queue_endpoint, &format!("{}-secondary", account), "queue")?, filesystem_url: get_endpoint_uri(file_endpoint, account, "dfs")?, http_client, account: account.to_string(), })) } ConnectionString { account_name: Some(account), sas: Some(sas_token), blob_endpoint, table_endpoint, queue_endpoint, file_endpoint, .. } => Ok(Arc::new(Self { storage_credentials: StorageCredentials::SASToken(get_sas_token_parms(sas_token)?), blob_storage_url: get_endpoint_uri(blob_endpoint, account, "blob")?, table_storage_url: get_endpoint_uri(table_endpoint, account, "table")?, queue_storage_url: get_endpoint_uri(queue_endpoint, account, "queue")?, queue_storage_secondary_url: get_endpoint_uri(queue_endpoint, &format!("{}-secondary", account), "queue")?, filesystem_url: get_endpoint_uri(file_endpoint, account, "dfs")?, http_client, account: account.to_string(), })), ConnectionString { account_name: Some(account), account_key: Some(key), blob_endpoint, table_endpoint, queue_endpoint, file_endpoint, .. } => Ok(Arc::new(Self { storage_credentials: StorageCredentials::Key(account.to_owned(), key.to_owned()), blob_storage_url: get_endpoint_uri(blob_endpoint, account, "blob")?, table_storage_url: get_endpoint_uri(table_endpoint, account, "table")?, queue_storage_url: get_endpoint_uri(queue_endpoint, account, "queue")?, queue_storage_secondary_url: get_endpoint_uri(queue_endpoint, &format!("{}-secondary", account), "queue")?, filesystem_url: get_endpoint_uri(file_endpoint, account, "dfs")?, http_client, account: account.to_string(), })), _ => { Err(crate::Error::GenericErrorWithText( "Could not create a storage client from the provided connection string. Please validate that you have specified the account name and means of authentication (key, SAS, etc.)." .to_owned(), )) } } } pub fn http_client(&self) -> &dyn HttpClient { self.http_client.as_ref() } pub fn blob_storage_url(&self) -> &Url { &self.blob_storage_url } pub fn table_storage_url(&self) -> &Url { &self.table_storage_url } pub fn queue_storage_url(&self) -> &Url { &self.queue_storage_url } pub fn queue_storage_secondary_url(&self) -> &Url { &self.queue_storage_secondary_url } pub fn filesystem_url(&self) -> &Url { &self.filesystem_url } pub fn account(&self) -> &str { &self.account } pub fn storage_credentials(&self) -> &StorageCredentials { &self.storage_credentials } pub fn prepare_request( &self, url: &str, method: &Method, http_header_adder: &dyn Fn(Builder) -> Builder, service_type: ServiceType, request_body: Option<Bytes>, ) -> crate::Result<(Request<Bytes>, url::Url)> { let dt = chrono::Utc::now(); let time = format!("{}", dt.format("%a, %d %h %Y %T GMT")); let mut url = url::Url::parse(url)?; // if we have a SAS token (in form of query pairs), let's add it to the url here if let StorageCredentials::SASToken(query_pairs) = &self.storage_credentials { for (k, v) in query_pairs { url.query_pairs_mut().append_pair(k, v); } } let mut request = Request::builder(); request = request.method(method).uri(url.as_str()); // let's add content length to avoid "chunking" errors. request = match request_body { Some(ref b) => request.header(CONTENT_LENGTH, &b.len().to_string() as &str), None => request.header(CONTENT_LENGTH, "0"), }; // This will give the caller the ability to add custom headers. // The closure is needed to because request.headers_mut().set_raw(...) requires // a Cow with 'static lifetime... request = http_header_adder(request); request = request .header(MS_DATE, time) .header(HEADER_VERSION, AZURE_VERSION); // We sign the request only if it is not already signed (with the signature of an // SAS token for example) let request = match &self.storage_credentials { StorageCredentials::Key(account, key) => { if !url.query_pairs().any(|(k, _)| k == "sig") { let auth = generate_authorization( request.headers_ref().unwrap(), &url, method, account, key, service_type, ); request.header(AUTHORIZATION, auth) } else { request } } StorageCredentials::SASToken(_query_pairs) => { // no headers to add here, the authentication // is in the URL request } StorageCredentials::BearerToken(token) => { request.header(AUTHORIZATION, format!("Bearer {}", token)) } }; let request = if let Some(request_body) = request_body { request.body(request_body) } else { request.body(azure_core::EMPTY_BODY) }?; debug!("using request == {:#?}", request); Ok((request, url)) } } impl ClientAccountSharedAccessSignature for StorageAccountClient { fn shared_access_signature( &self, ) -> Result<AccountSharedAccessSignatureBuilder<No, No, No, No>, crate::Error> { match self.storage_credentials { StorageCredentials::Key(ref account, ref key) => { Ok(AccountSharedAccessSignatureBuilder::new(account, key)) } _ => Err(crate::Error::OperationNotSupported( "Shared access signature generation".to_owned(), "SAS can be generated only from key and account clients".to_owned(), )), } } } fn generate_authorization( h: &HeaderMap, u: &url::Url, method: &Method, account: &str, key: &str, service_type: ServiceType, ) -> String { let str_to_sign = string_to_sign(h, u, method, account, service_type); // debug!("\nstr_to_sign == {:?}\n", str_to_sign); // debug!("str_to_sign == {}", str_to_sign); let auth = encode_str_to_sign(&str_to_sign, key); // debug!("auth == {:?}", auth); format!("SharedKey {}:{}", account, auth) } fn encode_str_to_sign(str_to_sign: &str, hmac_key: &str) -> String { let key = hmac::Key::new(ring::hmac::HMAC_SHA256, &base64::decode(hmac_key).unwrap()); let sig = hmac::sign(&key, str_to_sign.as_bytes()); // let res = hmac.result(); // debug!("{:?}", res.code()); base64::encode(sig.as_ref()) } fn add_if_exists<K: AsHeaderName>(h: &HeaderMap, key: K) -> &str { match h.get(key) { Some(ce) => ce.to_str().unwrap(), None => "", } } #[allow(unknown_lints)] fn string_to_sign( h: &HeaderMap, u: &url::Url, method: &Method, account: &str, service_type: ServiceType, ) -> String { match service_type { ServiceType::Table => { format!( "{}\n{}\n{}\n{}\n{}", method.as_str(), add_if_exists(h, CONTENT_MD5), add_if_exists(h, CONTENT_TYPE), add_if_exists(h, MS_DATE), canonicalized_resource_table(account, u) ) } _ => { // content lenght must only be specified if != 0 // this is valid from 2015-02-21 let cl = h .get(CONTENT_LENGTH) .map(|s| if s == "0" { "" } else { s.to_str().unwrap() }) .unwrap_or(""); format!( "{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}{}", method.as_str(), add_if_exists(h, CONTENT_ENCODING), add_if_exists(h, CONTENT_LANGUAGE), cl, add_if_exists(h, CONTENT_MD5), add_if_exists(h, CONTENT_TYPE), add_if_exists(h, DATE), add_if_exists(h, IF_MODIFIED_SINCE), add_if_exists(h, IF_MATCH), add_if_exists(h, IF_NONE_MATCH), add_if_exists(h, IF_UNMODIFIED_SINCE), add_if_exists(h, RANGE), canonicalize_header(h), canonicalized_resource(account, u) ) } } // expected // GET\n /*HTTP Verb*/ // \n /*Content-Encoding*/ // \n /*Content-Language*/ // \n /*Content-Length (include value when zero)*/ // \n /*Content-MD5*/ // \n /*Content-Type*/ // \n /*Date*/ // \n /*If-Modified-Since */ // \n /*If-Match*/ // \n /*If-None-Match*/ // \n /*If-Unmodified-Since*/ // \n /*Range*/ // x-ms-date:Sun, 11 Oct 2009 21:49:13 GMT\nx-ms-version:2009-09-19\n // /*CanonicalizedHeaders*/ // /myaccount /mycontainer\ncomp:metadata\nrestype:container\ntimeout:20 // /*CanonicalizedResource*/ // // } fn canonicalize_header(h: &HeaderMap) -> String { let mut v_headers = h .iter() .filter(|(k, _v)| k.as_str().starts_with("x-ms")) .map(|(k, _)| k.as_str()) .collect::<Vec<_>>(); v_headers.sort_unstable(); let mut can = String::new(); for header_name in v_headers { let s = h.get(header_name).unwrap().to_str().unwrap(); can = can + header_name + ":" + s + "\n"; } can } // For table fn canonicalized_resource_table(account: &str, u: &url::Url) -> String { format!("/{}{}", account, u.path()) } fn canonicalized_resource(account: &str, u: &url::Url) -> String { let mut can_res: String = String::new(); can_res += "/"; can_res += account; let paths = u.path_segments().unwrap(); for p in paths { can_res.push('/'); can_res.push_str(&*p); } can_res += "\n"; // query parameters let query_pairs = u.query_pairs(); //.into_owned(); { let mut qps = Vec::new(); { for (q, _p) in query_pairs { trace!("adding to qps {:?}", q); // add only once if !(qps.iter().any(|x: &String| x == q.as_ref())) { qps.push(q.into_owned()); } } } qps.sort(); for qparam in qps { // find correct parameter let ret = lexy_sort(&query_pairs, &qparam); // debug!("adding to can_res {:?}", ret); can_res = can_res + &qparam.to_lowercase() + ":"; for (i, item) in ret.iter().enumerate() { if i > 0 { can_res += "," } can_res += item; } can_res += "\n"; } }; can_res[0..can_res.len() - 1].to_owned() } fn lexy_sort<'a>( vec: &'a url::form_urlencoded::Parse, query_param: &str, ) -> Vec<std::borrow::Cow<'a, str>> { let mut v_values = Vec::new(); for item in vec.filter(|x| x.0 == *query_param) { v_values.push(item.1) } v_values.sort(); v_values } fn get_endpoint_uri<URL>( url: Option<URL>, account: &str, endpoint_type: &str, ) -> Result<url::Url, url::ParseError> where URL: AsRef<str>, { Ok(match url { Some(value) => url::Url::parse(value.as_ref())?, None => url::Url::parse(&format!( "https://{}.{}.core.windows.net", account, endpoint_type ))?, }) }
33.476043
195
0.546932
7922cd6994693da7b07db484056f8f3d87508056
2,472
#![cfg(min_const_generics)] //! Parallel iterator types for [arrays] (`[T; N]`) //! //! You will rarely need to interact with this module directly unless you need //! to name one of the iterator types. //! //! Everything in this module requires const generics, stabilized in Rust 1.51. //! //! [arrays]: https://doc.rust-lang.org/std/primitive.array.html use crate::iter::plumbing::*; use crate::iter::*; use crate::slice::{Iter, IterMut}; use crate::vec::DrainProducer; use std::mem::ManuallyDrop; /// This implementation requires const generics, stabilized in Rust 1.51. impl<'data, T: Sync + 'data, const N: usize> IntoParallelIterator for &'data [T; N] { type Item = &'data T; type Iter = Iter<'data, T>; fn into_par_iter(self) -> Self::Iter { <&[T]>::into_par_iter(self) } } /// This implementation requires const generics, stabilized in Rust 1.51. impl<'data, T: Send + 'data, const N: usize> IntoParallelIterator for &'data mut [T; N] { type Item = &'data mut T; type Iter = IterMut<'data, T>; fn into_par_iter(self) -> Self::Iter { <&mut [T]>::into_par_iter(self) } } /// This implementation requires const generics, stabilized in Rust 1.51. impl<T: Send, const N: usize> IntoParallelIterator for [T; N] { type Item = T; type Iter = IntoIter<T, N>; fn into_par_iter(self) -> Self::Iter { IntoIter { array: self } } } /// Parallel iterator that moves out of an array. #[derive(Debug, Clone)] pub struct IntoIter<T: Send, const N: usize> { array: [T; N], } impl<T: Send, const N: usize> ParallelIterator for IntoIter<T, N> { type Item = T; fn drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>, { bridge(self, consumer) } fn opt_len(&self) -> Option<usize> { Some(N) } } impl<T: Send, const N: usize> IndexedParallelIterator for IntoIter<T, N> { fn drive<C>(self, consumer: C) -> C::Result where C: Consumer<Self::Item>, { bridge(self, consumer) } fn len(&self) -> usize { N } fn with_producer<CB>(self, callback: CB) -> CB::Output where CB: ProducerCallback<Self::Item>, { unsafe { // Drain every item, and then the local array can just fall out of scope. let mut array = ManuallyDrop::new(self.array); callback.callback(DrainProducer::new(&mut *array)) } } }
27.164835
89
0.618528
ab6fb26fae264c64a4d188b848cb22f7852b1152
817
extern crate rand; use std::io; use std::cmp::Ordering; use rand::Rng; fn main() { println!("Guess the number!"); let secret_number = rand::thread_rng().gen_range(1, 101); loop { println!("Please input your guess."); let mut guess = String::new(); io::stdin().read_line(&mut guess) .expect("Failed to read line"); let guess: u32 = match guess.trim().parse() { Ok(num) => num, Err(_) => continue, }; println!("You guessed: {}", guess); match guess.cmp(&secret_number) { Ordering::Less => println!("Too small!"), Ordering::Greater => println!("Too big!"), Ordering::Equal => { println!("You win!"); break; } } } }
22.081081
61
0.49082
e80a133f6fb91fad439995ec5203ddea74b590a8
10,457
use crate::{dep_types::Req, util}; use crossterm::Color; use regex::Regex; use std::collections::HashMap; use std::{env, fs, path::PathBuf, process::Command}; // https://packaging.python.org/tutorials/packaging-projects/ /// Serialize to a python list of strings. fn serialize_py_list(items: &[String], indent_level: u8) -> String { let mut pad = "".to_string(); for _ in 0..indent_level { pad.push_str(" "); } let mut result = "[\n".to_string(); for item in items.iter() { result.push_str(&format!("{} \"{}\",\n", &pad, item)); } result.push_str(&pad); result.push(']'); result } /// Serialize to a Python dict of lists of strings. fn _serialize_py_dict(hm: &HashMap<String, Vec<String>>) -> String { let mut result = "{\n".to_string(); for (key, val) in hm.iter() { result.push_str(&format!(" \"{}\": {}\n", key, serialize_py_list(val, 0))); } result.push('}'); result } /// Serialize to a Python dict of strings. //fn serialize_scripts(hm: &HashMap<String, String>) -> String { // let mut result = "{\n".to_string(); // // for (key, val) in hm.iter() { // result.push_str(&format!(" \"{}\": {}\n", key, serialize_py_list(val))); // } // result.push('}'); // result //} ///// A different format, as used in console_scripts //fn serialize_py_dict2(hm: &HashMap<String, String>) -> String { // let mut result = "{\n".to_string(); // for (key, val) in hm.iter() { // result.push_str(&format!(" \"{}\": {}\n", key, serialize_py_list(val))); // } // result.push('}'); // result //} fn cfg_to_setup(cfg: &crate::Config) -> String { let cfg = cfg.clone(); let version = match cfg.version { Some(v) => v.to_string(), None => "".into(), }; let mut keywords = String::new(); for (i, kw) in cfg.keywords.iter().enumerate() { if i != 0 { keywords.push_str(" "); } keywords.push_str(kw); } let author_re = Regex::new(r"^(.*?)\s*(?:<(.*?)>)?\s*$").unwrap(); let mut author = "".to_string(); let mut author_email = "".to_string(); if let Some(first) = cfg.authors.get(0) { let caps = if let Some(c) = author_re.captures(first) { c } else { util::abort(&format!( "Problem parsing the `authors` field in `pyproject.toml`: {:?}", &cfg.authors )); unreachable!() }; author = caps.get(1).unwrap().as_str().to_owned(); author_email = caps.get(2).unwrap().as_str().to_owned(); } let deps: Vec<String> = cfg.reqs.iter().map(Req::to_setup_py_string).collect(); // todo: Entry pts! format!( r#"import setuptools with open("{}", "r") as fh: long_description = fh.read() setuptools.setup( name="{}", version="{}", author="{}", author_email="{}", license="{}", description="{}", long_description=long_description, long_description_content_type="text/markdown", url="{}", packages=setuptools.find_packages(), keywords="{}", classifiers={}, python_requires="{}", install_requires={}, ) "#, // entry_points={{ // "console_scripts": , // }}, cfg.readme_filename.unwrap_or_else(|| "README.md".into()), cfg.name.unwrap_or_else(|| "".into()), version, author, author_email, cfg.license.unwrap_or_else(|| "".into()), cfg.description.unwrap_or_else(|| "".into()), cfg.homepage.unwrap_or_else(|| "".into()), keywords, serialize_py_list(&cfg.classifiers, 1), // serialize_py_list(&cfg.console_scripts), cfg.python_requires.unwrap_or_else(|| "".into()), serialize_py_list(&deps, 1), // todo: // extras_require="{}", // match cfg.extras { // Some(e) => serialize_py_dict(&e), // None => "".into(), // } ) } /// Creates a temporary file which imitates setup.py fn create_dummy_setup(cfg: &crate::Config, filename: &str) { fs::write(filename, cfg_to_setup(cfg)).expect("Problem writing dummy setup.py"); if util::wait_for_dirs(&[env::current_dir() .expect("Problem finding current dir") .join(filename)]) .is_err() { util::abort("Problem waiting for setup.py to be created.") }; } pub(crate) fn build( lockpacks: &[crate::dep_types::LockPackage], bin_path: &PathBuf, lib_path: &PathBuf, cfg: &crate::Config, _extras: &[String], ) { for lp in lockpacks.iter() { if lp.rename.is_some() { // if lockpacks.iter().any(|lp| lp.rename.is_some()) { util::abort(&format!( "{} is installed with multiple versions. We can't create a package that \ relies on multiple versions of a dependency - \ this would cause this package not work work correctly if not used with pyflow.", lp.name )) } } // todo: Install twine and its dependencies directly. This is the only place we need pip currently. let dummy_setup_fname = "setup_temp_pyflow.py"; Command::new(bin_path.join("python")) .args(&["-m", "pip", "install", "twine"]) .status() .expect("Problem installing Twine"); create_dummy_setup(cfg, dummy_setup_fname); util::set_pythonpath(&[lib_path.to_owned()]); println!("🛠️️ Building the package..."); Command::new(bin_path.join("python")) .args(&[dummy_setup_fname, "sdist", "bdist_wheel"]) .status() .expect("Problem building"); util::print_color("Build complete.", Color::Green); if fs::remove_file(dummy_setup_fname).is_err() { println!("Problem removing temporary setup file while building ") }; } pub(crate) fn publish(bin_path: &PathBuf, cfg: &crate::Config) { let repo_url = match cfg.package_url.clone() { Some(pu) => { let mut r = pu; if !r.ends_with('/') { r.push('/'); } r } None => "https://test.pypi.org/legacy/".to_string(), }; println!("Uploading to {}", repo_url); Command::new(bin_path.join("twine")) .args(&["upload", "--repository-url", &repo_url, "dist/*"]) .status() .expect("Problem publishing"); } #[cfg(test)] pub mod test { use super::*; use crate::dep_types::{ Constraint, Req, ReqType::{Caret, Exact}, Version, }; #[test] fn setup_creation() { let mut scripts = HashMap::new(); scripts.insert("activate".into(), "jeejah:activate".into()); let cfg = crate::Config { name: Some("everythingkiller".into()), py_version: Some(Version::new_short(3, 6)), version: Some(Version::new_short(0, 1)), authors: vec!["Fraa Erasmas <[email protected]>".into()], homepage: Some("https://everything.math".into()), description: Some("Small, but packs a punch!".into()), repository: Some("https://github.com/raz/everythingkiller".into()), license: Some("MIT".into()), keywords: vec!["nanotech".into(), "weapons".into()], classifiers: vec![ "Topic :: System :: Hardware".into(), "Topic :: Scientific/Engineering :: Human Machine Interfaces".into(), ], python_requires: Some(">=3.6".into()), package_url: Some("https://upload.pypi.org/legacy/".into()), scripts, readme_filename: Some("README.md".into()), reqs: vec![ Req::new( "numpy".into(), vec![Constraint::new(Caret, Version::new(1, 16, 4))], ), Req::new( "manimlib".into(), vec![Constraint::new(Exact, Version::new(0, 1, 8))], ), Req::new( "ipython".into(), vec![Constraint::new(Caret, Version::new(7, 7, 0))], ), ], dev_reqs: vec![Req::new( "black".into(), vec![Constraint::new(Caret, Version::new(18, 0, 0))], )], extras: HashMap::new(), repo_url: None, }; let expected = r#"import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="everythingkiller", version="0.1.0", author="Fraa Erasmas", author_email="[email protected]", license="MIT", description="Small, but packs a punch!", long_description=long_description, long_description_content_type="text/markdown", url="https://everything.math", packages=setuptools.find_packages(), keywords="nanotech weapons", classifiers=[ "Topic :: System :: Hardware", "Topic :: Scientific/Engineering :: Human Machine Interfaces", ], python_requires=">=3.6", install_requires=[ "numpy>=1.16.4", "manimlib==0.1.8", "ipython>=7.7.0", ], ) "#; assert_eq!(expected, &cfg_to_setup(&cfg)); } #[test] fn py_list() { let expected = r#"[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ]"#; let actual = serialize_py_list( &vec![ "Programming Language :: Python :: 3".into(), "License :: OSI Approved :: MIT License".into(), "Operating System :: OS Independent".into(), ], 0, ); assert_eq!(expected, actual); } // todo: Re-impl if you end up using this // #[test] // fn py_dict() { // let expected = r#"{ // "PDF": [ // "ReportLab>=1.2", // "RXP" // ], // "reST": [ // "docutils>=0.3" // ], // }"#; // // let mut data = HashMap::new(); // data.insert("PDF".into(), vec!["ReportLab>=1.2".into(), "RXP".into()]); // data.insert("reST".into(), vec!["docutils>=0.3".into()]); // // assert_eq!(expected, serialize_py_dict(&data)); // } }
30.665689
103
0.52778
e66ba98c0288076f7ae1de3521ecb5b72fd7ffce
64,317
//! Generate Rust bindings for C and C++ libraries. //! //! Provide a C/C++ header file, receive Rust FFI code to call into C/C++ //! functions and use types defined in the header. //! //! See the [`Builder`](./struct.Builder.html) struct for usage. //! //! See the [Users Guide](https://rust-lang-nursery.github.io/rust-bindgen/) for //! additional documentation. #![deny(missing_docs)] #![deny(warnings)] #![deny(unused_extern_crates)] // To avoid rather annoying warnings when matching with CXCursor_xxx as a // constant. #![allow(non_upper_case_globals)] // `quote!` nests quite deeply. #![recursion_limit="128"] extern crate cexpr; #[macro_use] #[allow(unused_extern_crates)] extern crate cfg_if; extern crate clang_sys; #[macro_use] extern crate lazy_static; extern crate peeking_take_while; #[macro_use] extern crate quote; extern crate proc_macro2; extern crate regex; extern crate which; #[cfg(feature = "logging")] #[macro_use] extern crate log; #[cfg(not(feature = "logging"))] #[macro_use] mod log_stubs; #[macro_use] mod extra_assertions; // A macro to declare an internal module for which we *must* provide // documentation for. If we are building with the "testing_only_docs" feature, // then the module is declared public, and our `#![deny(missing_docs)]` pragma // applies to it. This feature is used in CI, so we won't let anything slip by // undocumented. Normal builds, however, will leave the module private, so that // we don't expose internals to library consumers. macro_rules! doc_mod { ($m:ident, $doc_mod_name:ident) => { cfg_if! { if #[cfg(feature = "testing_only_docs")] { pub mod $doc_mod_name { //! Autogenerated documentation module. pub use super::$m::*; } } else { } } }; } mod clang; mod codegen; mod features; mod ir; mod parse; mod regex_set; mod time; pub mod callbacks; doc_mod!(clang, clang_docs); doc_mod!(features, features_docs); doc_mod!(ir, ir_docs); doc_mod!(parse, parse_docs); doc_mod!(regex_set, regex_set_docs); pub use features::{LATEST_STABLE_RUST, RUST_TARGET_STRINGS, RustTarget}; use features::RustFeatures; use ir::context::{BindgenContext, ItemId}; use ir::item::Item; use parse::{ClangItemParser, ParseError}; use regex_set::RegexSet; pub use codegen::EnumVariation; use std::borrow::Cow; use std::collections::HashMap; use std::fs::{File, OpenOptions}; use std::io::{self, Write}; use std::iter; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; use std::sync::Arc; fn args_are_cpp(clang_args: &[String]) -> bool { return clang_args .windows(2) .any(|w| w[0] == "-x=c++" || w[1] == "-x=c++" || w == &["-x", "c++"]); } /// A type used to indicate which kind of items do we have to generate. /// /// TODO(emilio): Use `bitflags!` #[derive(Debug, Clone)] pub struct CodegenConfig { /// Whether to generate functions. pub functions: bool, /// Whether to generate types. pub types: bool, /// Whether to generate constants. pub vars: bool, /// Whether to generate methods. pub methods: bool, /// Whether to generate constructors. pub constructors: bool, /// Whether to generate destructors. pub destructors: bool, } impl CodegenConfig { /// Generate all kinds of items. pub fn all() -> Self { CodegenConfig { functions: true, types: true, vars: true, methods: true, constructors: true, destructors: true, } } /// Generate nothing. pub fn nothing() -> Self { CodegenConfig { functions: false, types: false, vars: false, methods: false, constructors: false, destructors: false, } } } impl Default for CodegenConfig { fn default() -> Self { CodegenConfig::all() } } /// Configure and generate Rust bindings for a C/C++ header. /// /// This is the main entry point to the library. /// /// ```ignore /// use bindgen::builder; /// /// // Configure and generate bindings. /// let bindings = builder().header("path/to/input/header") /// .whitelisted_type("SomeCoolClass") /// .whitelisted_function("do_some_cool_thing") /// .generate()?; /// /// // Write the generated bindings to an output file. /// bindings.write_to_file("path/to/output.rs")?; /// ``` /// /// # Enums /// /// Bindgen can map C/C++ enums into Rust in different ways. The way bindgen maps enums depends on /// the pattern passed to several methods: /// /// 1. [`constified_enum_module()`](#method.constified_enum_module) /// 2. [`bitfield_enum()`](#method.bitfield_enum) /// 3. [`rustified_enum()`](#method.rustified_enum) /// /// For each C enum, bindgen tries to match the pattern in the following order: /// /// 1. Constified enum module /// 2. Bitfield enum /// 3. Rustified enum /// /// If none of the above patterns match, then bindgen will generate a set of Rust constants. #[derive(Debug, Default)] pub struct Builder { options: BindgenOptions, input_headers: Vec<String>, // Tuples of unsaved file contents of the form (name, contents). input_header_contents: Vec<(String, String)>, } /// Construct a new [`Builder`](./struct.Builder.html). pub fn builder() -> Builder { Default::default() } impl Builder { /// Generates the command line flags use for creating `Builder`. pub fn command_line_flags(&self) -> Vec<String> { let mut output_vector: Vec<String> = Vec::new(); if let Some(header) = self.input_headers.last().cloned() { // Positional argument 'header' output_vector.push(header); } output_vector.push("--rust-target".into()); output_vector.push(self.options.rust_target.into()); if self.options.default_enum_style != Default::default() { output_vector.push("--default-enum-variant=".into()); output_vector.push(match self.options.default_enum_style { codegen::EnumVariation::Rust => "rust", codegen::EnumVariation::Bitfield => "bitfield", codegen::EnumVariation::Consts => "consts", codegen::EnumVariation::ModuleConsts => "moduleconsts", }.into()) } self.options .bitfield_enums .get_items() .iter() .map(|item| { output_vector.push("--bitfield-enum".into()); output_vector.push( item.trim_left_matches("^") .trim_right_matches("$") .into(), ); }) .count(); self.options .rustified_enums .get_items() .iter() .map(|item| { output_vector.push("--rustified-enum".into()); output_vector.push( item.trim_left_matches("^") .trim_right_matches("$") .into(), ); }) .count(); self.options .constified_enum_modules .get_items() .iter() .map(|item| { output_vector.push("--constified-enum-module".into()); output_vector.push( item.trim_left_matches("^") .trim_right_matches("$") .into(), ); }) .count(); self.options .constified_enums .get_items() .iter() .map(|item| { output_vector.push("--constified-enum".into()); output_vector.push( item.trim_left_matches("^") .trim_right_matches("$") .into(), ); }) .count(); self.options .blacklisted_types .get_items() .iter() .map(|item| { output_vector.push("--blacklist-type".into()); output_vector.push( item.trim_left_matches("^") .trim_right_matches("$") .into(), ); }) .count(); if !self.options.layout_tests { output_vector.push("--no-layout-tests".into()); } if self.options.impl_debug { output_vector.push("--impl-debug".into()); } if self.options.impl_partialeq { output_vector.push("--impl-partialeq".into()); } if !self.options.derive_copy { output_vector.push("--no-derive-copy".into()); } if !self.options.derive_debug { output_vector.push("--no-derive-debug".into()); } if !self.options.derive_default { output_vector.push("--no-derive-default".into()); } else { output_vector.push("--with-derive-default".into()); } if self.options.derive_hash { output_vector.push("--with-derive-hash".into()); } if self.options.derive_partialord { output_vector.push("--with-derive-partialord".into()); } if self.options.derive_ord { output_vector.push("--with-derive-ord".into()); } if self.options.derive_partialeq { output_vector.push("--with-derive-partialeq".into()); } if self.options.derive_eq { output_vector.push("--with-derive-eq".into()); } if self.options.time_phases { output_vector.push("--time-phases".into()); } if !self.options.generate_comments { output_vector.push("--no-doc-comments".into()); } if !self.options.whitelist_recursively { output_vector.push("--no-recursive-whitelist".into()); } if self.options.objc_extern_crate { output_vector.push("--objc-extern-crate".into()); } if self.options.builtins { output_vector.push("--builtins".into()); } if let Some(ref prefix) = self.options.ctypes_prefix { output_vector.push("--ctypes-prefix".into()); output_vector.push(prefix.clone()); } if self.options.emit_ast { output_vector.push("--emit-clang-ast".into()); } if self.options.emit_ir { output_vector.push("--emit-ir".into()); } if let Some(ref graph) = self.options.emit_ir_graphviz { output_vector.push("--emit-ir-graphviz".into()); output_vector.push(graph.clone()) } if self.options.enable_cxx_namespaces { output_vector.push("--enable-cxx-namespaces".into()); } if self.options.disable_name_namespacing { output_vector.push("--disable-name-namespacing".into()); } if !self.options.codegen_config.functions { output_vector.push("--ignore-functions".into()); } output_vector.push("--generate".into()); //Temporary placeholder for below 4 options let mut options: Vec<String> = Vec::new(); if self.options.codegen_config.functions { options.push("function".into()); } if self.options.codegen_config.types { options.push("types".into()); } if self.options.codegen_config.vars { options.push("vars".into()); } if self.options.codegen_config.methods { options.push("methods".into()); } if self.options.codegen_config.constructors { options.push("constructors".into()); } if self.options.codegen_config.destructors { options.push("destructors".into()); } output_vector.push(options.join(",")); if !self.options.codegen_config.methods { output_vector.push("--ignore-methods".into()); } if !self.options.convert_floats { output_vector.push("--no-convert-floats".into()); } if !self.options.prepend_enum_name { output_vector.push("--no-prepend-enum-name".into()); } self.options .opaque_types .get_items() .iter() .map(|item| { output_vector.push("--opaque-type".into()); output_vector.push( item.trim_left_matches("^") .trim_right_matches("$") .into(), ); }) .count(); self.options .raw_lines .iter() .map(|item| { output_vector.push("--raw-line".into()); output_vector.push( item.trim_left_matches("^") .trim_right_matches("$") .into(), ); }) .count(); if self.options.use_core { output_vector.push("--use-core".into()); } if self.options.conservative_inline_namespaces { output_vector.push("--conservative-inline-namespaces".into()); } self.options .whitelisted_functions .get_items() .iter() .map(|item| { output_vector.push("--whitelist-function".into()); output_vector.push( item.trim_left_matches("^") .trim_right_matches("$") .into(), ); }) .count(); self.options .whitelisted_types .get_items() .iter() .map(|item| { output_vector.push("--whitelist-type".into()); output_vector.push( item.trim_left_matches("^") .trim_right_matches("$") .into(), ); }) .count(); self.options .whitelisted_vars .get_items() .iter() .map(|item| { output_vector.push("--whitelist-var".into()); output_vector.push( item.trim_left_matches("^") .trim_right_matches("$") .into(), ); }) .count(); output_vector.push("--".into()); if !self.options.clang_args.is_empty() { output_vector.extend(self.options.clang_args.iter().cloned()); } if self.input_headers.len() > 1 { output_vector.extend( self.input_headers[..self.input_headers.len() - 1] .iter() .cloned(), ); } if !self.options.rustfmt_bindings { output_vector.push("--no-rustfmt-bindings".into()); } if let Some(path) = self.options .rustfmt_configuration_file .as_ref() .and_then(|f| f.to_str()) { output_vector.push("--rustfmt-configuration-file".into()); output_vector.push(path.into()); } self.options .no_partialeq_types .get_items() .iter() .map(|item| { output_vector.push("--no-partialeq".into()); output_vector.push( item.trim_left_matches("^") .trim_right_matches("$") .into(), ); }) .count(); self.options .no_copy_types .get_items() .iter() .map(|item| { output_vector.push("--no-copy".into()); output_vector.push( item.trim_left_matches("^") .trim_right_matches("$") .into(), ); }) .count(); self.options .no_hash_types .get_items() .iter() .map(|item| { output_vector.push("--no-hash".into()); output_vector.push( item.trim_left_matches("^") .trim_right_matches("$") .into(), ); }) .count(); output_vector } /// Add an input C/C++ header to generate bindings for. /// /// This can be used to generate bindings to a single header: /// /// ```ignore /// let bindings = bindgen::Builder::default() /// .header("input.h") /// .generate() /// .unwrap(); /// ``` /// /// Or you can invoke it multiple times to generate bindings to multiple /// headers: /// /// ```ignore /// let bindings = bindgen::Builder::default() /// .header("first.h") /// .header("second.h") /// .header("third.h") /// .generate() /// .unwrap(); /// ``` pub fn header<T: Into<String>>(mut self, header: T) -> Builder { self.input_headers.push(header.into()); self } /// Add `contents` as an input C/C++ header named `name`. /// /// The file `name` will be added to the clang arguments. pub fn header_contents(mut self, name: &str, contents: &str) -> Builder { self.input_header_contents.push( (name.into(), contents.into()), ); self } /// Specify the rust target /// /// The default is the latest stable Rust version pub fn rust_target(mut self, rust_target: RustTarget) -> Self { self.options.set_rust_target(rust_target); self } /// Disable support for native Rust unions, if supported. pub fn disable_untagged_union(mut self) -> Self { self.options.rust_features.untagged_union = false; self } /// Set the output graphviz file. pub fn emit_ir_graphviz<T: Into<String>>(mut self, path: T) -> Builder { let path = path.into(); self.options.emit_ir_graphviz = Some(path); self } /// Whether the generated bindings should contain documentation comments or /// not. /// /// This ideally will always be true, but it may need to be false until we /// implement some processing on comments to work around issues as described /// in: /// /// https://github.com/rust-lang-nursery/rust-bindgen/issues/426 pub fn generate_comments(mut self, doit: bool) -> Self { self.options.generate_comments = doit; self } /// Whether to whitelist recursively or not. Defaults to true. /// /// Given that we have explicitly whitelisted the "initiate_dance_party" /// function in this C header: /// /// ```c /// typedef struct MoonBoots { /// int bouncy_level; /// } MoonBoots; /// /// void initiate_dance_party(MoonBoots* boots); /// ``` /// /// We would normally generate bindings to both the `initiate_dance_party` /// function and the `MoonBoots` struct that it transitively references. By /// configuring with `whitelist_recursively(false)`, `bindgen` will not emit /// bindings for anything except the explicitly whitelisted items, and there /// would be no emitted struct definition for `MoonBoots`. However, the /// `initiate_dance_party` function would still reference `MoonBoots`! /// /// **Disabling this feature will almost certainly cause `bindgen` to emit /// bindings that will not compile!** If you disable this feature, then it /// is *your* responsibility to provide definitions for every type that is /// referenced from an explicitly whitelisted item. One way to provide the /// definitions is by using the [`Builder::raw_line`](#method.raw_line) /// method, another would be to define them in Rust and then `include!(...)` /// the bindings immediately afterwards. pub fn whitelist_recursively(mut self, doit: bool) -> Self { self.options.whitelist_recursively = doit; self } /// Generate `#[macro_use] extern crate objc;` instead of `use objc;` /// in the prologue of the files generated from objective-c files pub fn objc_extern_crate(mut self, doit: bool) -> Self { self.options.objc_extern_crate = doit; self } /// Whether to use the clang-provided name mangling. This is true by default /// and probably needed for C++ features. /// /// However, some old libclang versions seem to return incorrect results in /// some cases for non-mangled functions, see [1], so we allow disabling it. /// /// [1]: https://github.com/rust-lang-nursery/rust-bindgen/issues/528 pub fn trust_clang_mangling(mut self, doit: bool) -> Self { self.options.enable_mangling = doit; self } /// Hide the given type from the generated bindings. Regular expressions are /// supported. #[deprecated(note = "Use blacklist_type instead")] pub fn hide_type<T: AsRef<str>>(self, arg: T) -> Builder { self.blacklist_type(arg) } /// Hide the given type from the generated bindings. Regular expressions are /// supported. pub fn blacklist_type<T: AsRef<str>>(mut self, arg: T) -> Builder { self.options.blacklisted_types.insert(arg); self } /// Treat the given type as opaque in the generated bindings. Regular /// expressions are supported. pub fn opaque_type<T: AsRef<str>>(mut self, arg: T) -> Builder { self.options.opaque_types.insert(arg); self } /// Whitelist the given type so that it (and all types that it transitively /// refers to) appears in the generated bindings. Regular expressions are /// supported. #[deprecated(note = "use whitelist_type instead")] pub fn whitelisted_type<T: AsRef<str>>(self, arg: T) -> Builder { self.whitelist_type(arg) } /// Whitelist the given type so that it (and all types that it transitively /// refers to) appears in the generated bindings. Regular expressions are /// supported. pub fn whitelist_type<T: AsRef<str>>(mut self, arg: T) -> Builder { self.options.whitelisted_types.insert(arg); self } /// Whitelist the given function so that it (and all types that it /// transitively refers to) appears in the generated bindings. Regular /// expressions are supported. pub fn whitelist_function<T: AsRef<str>>(mut self, arg: T) -> Builder { self.options.whitelisted_functions.insert(arg); self } /// Whitelist the given function. /// /// Deprecated: use whitelist_function instead. #[deprecated(note = "use whitelist_function instead")] pub fn whitelisted_function<T: AsRef<str>>(self, arg: T) -> Builder { self.whitelist_function(arg) } /// Whitelist the given variable so that it (and all types that it /// transitively refers to) appears in the generated bindings. Regular /// expressions are supported. pub fn whitelist_var<T: AsRef<str>>(mut self, arg: T) -> Builder { self.options.whitelisted_vars.insert(arg); self } /// Whitelist the given variable. /// /// Deprecated: use whitelist_var instead. #[deprecated(note = "use whitelist_var instead")] pub fn whitelisted_var<T: AsRef<str>>(self, arg: T) -> Builder { self.whitelist_var(arg) } /// Set the default style of code to generate for enums pub fn default_enum_style(mut self, arg: codegen::EnumVariation) -> Builder { self.options.default_enum_style = arg; self } /// Mark the given enum (or set of enums, if using a pattern) as being /// bitfield-like. Regular expressions are supported. /// /// This makes bindgen generate a type that isn't a rust `enum`. Regular /// expressions are supported. pub fn bitfield_enum<T: AsRef<str>>(mut self, arg: T) -> Builder { self.options.bitfield_enums.insert(arg); self } /// Mark the given enum (or set of enums, if using a pattern) as a Rust /// enum. /// /// This makes bindgen generate enums instead of constants. Regular /// expressions are supported. /// /// **Use this with caution.** You should not be using Rust enums unless /// you have complete control of the C/C++ code that you're binding to. /// Take a look at https://github.com/rust-lang/rust/issues/36927 for /// more information. pub fn rustified_enum<T: AsRef<str>>(mut self, arg: T) -> Builder { self.options.rustified_enums.insert(arg); self } /// Mark the given enum (or set of enums, if using a pattern) as a set of /// constants that are not to be put into a module. pub fn constified_enum<T: AsRef<str>>(mut self, arg: T) -> Builder { self.options.constified_enums.insert(arg); self } /// Mark the given enum (or set of enums, if using a pattern) as a set of /// constants that should be put into a module. /// /// This makes bindgen generate modules containing constants instead of /// just constants. Regular expressions are supported. pub fn constified_enum_module<T: AsRef<str>>(mut self, arg: T) -> Builder { self.options.constified_enum_modules.insert(arg); self } /// Add a string to prepend to the generated bindings. The string is passed /// through without any modification. pub fn raw_line<T: Into<String>>(mut self, arg: T) -> Self { self.options.raw_lines.push(arg.into()); self } /// Add a given line to the beginning of module `mod`. pub fn module_raw_line<T, U>(mut self, mod_: T, line: U) -> Self where T: Into<String>, U: Into<String>, { self.options .module_lines .entry(mod_.into()) .or_insert_with(Vec::new) .push(line.into()); self } /// Add a given set of lines to the beginning of module `mod`. pub fn module_raw_lines<T, I>(mut self, mod_: T, lines: I) -> Self where T: Into<String>, I: IntoIterator, I::Item: Into<String>, { self.options .module_lines .entry(mod_.into()) .or_insert_with(Vec::new) .extend(lines.into_iter().map(Into::into)); self } /// Add an argument to be passed straight through to clang. pub fn clang_arg<T: Into<String>>(mut self, arg: T) -> Builder { self.options.clang_args.push(arg.into()); self } /// Add arguments to be passed straight through to clang. pub fn clang_args<I>(mut self, iter: I) -> Builder where I: IntoIterator, I::Item: AsRef<str>, { for arg in iter { self = self.clang_arg(arg.as_ref()) } self } /// Emit bindings for builtin definitions (for example `__builtin_va_list`) /// in the generated Rust. pub fn emit_builtins(mut self) -> Builder { self.options.builtins = true; self } /// Avoid converting floats to `f32`/`f64` by default. pub fn no_convert_floats(mut self) -> Self { self.options.convert_floats = false; self } /// Set whether layout tests should be generated. pub fn layout_tests(mut self, doit: bool) -> Self { self.options.layout_tests = doit; self } /// Set whether `Debug` should be implemented, if it can not be derived automatically. pub fn impl_debug(mut self, doit: bool) -> Self { self.options.impl_debug = doit; self } /// Set whether `PartialEq` should be implemented, if it can not be derived automatically. pub fn impl_partialeq(mut self, doit: bool) -> Self { self.options.impl_partialeq = doit; self } /// Set whether `Copy` should be derived by default. pub fn derive_copy(mut self, doit: bool) -> Self { self.options.derive_copy = doit; self } /// Set whether `Debug` should be derived by default. pub fn derive_debug(mut self, doit: bool) -> Self { self.options.derive_debug = doit; self } /// Set whether `Default` should be derived by default. pub fn derive_default(mut self, doit: bool) -> Self { self.options.derive_default = doit; self } /// Set whether `Hash` should be derived by default. pub fn derive_hash(mut self, doit: bool) -> Self { self.options.derive_hash = doit; self } /// Set whether `PartialOrd` should be derived by default. /// If we don't compute partialord, we also cannot compute /// ord. Set the derive_ord to `false` when doit is `false`. pub fn derive_partialord(mut self, doit: bool) -> Self { self.options.derive_partialord = doit; if !doit { self.options.derive_ord = false; } self } /// Set whether `Ord` should be derived by default. /// We can't compute `Ord` without computing `PartialOrd`, /// so we set the same option to derive_partialord. pub fn derive_ord(mut self, doit: bool) -> Self { self.options.derive_ord = doit; self.options.derive_partialord = doit; self } /// Set whether `PartialEq` should be derived by default. /// /// If we don't derive `PartialEq`, we also cannot derive `Eq`, so deriving /// `Eq` is also disabled when `doit` is `false`. pub fn derive_partialeq(mut self, doit: bool) -> Self { self.options.derive_partialeq = doit; if !doit { self.options.derive_eq = false; } self } /// Set whether `Eq` should be derived by default. /// /// We can't derive `Eq` without also deriving `PartialEq`, so we also /// enable deriving `PartialEq` when `doit` is `true`. pub fn derive_eq(mut self, doit: bool) -> Self { self.options.derive_eq = doit; if doit { self.options.derive_partialeq = doit; } self } /// Set whether or not to time bindgen phases, and print information to /// stderr. pub fn time_phases(mut self, doit: bool) -> Self { self.options.time_phases = doit; self } /// Emit Clang AST. pub fn emit_clang_ast(mut self) -> Builder { self.options.emit_ast = true; self } /// Emit IR. pub fn emit_ir(mut self) -> Builder { self.options.emit_ir = true; self } /// Enable C++ namespaces. pub fn enable_cxx_namespaces(mut self) -> Builder { self.options.enable_cxx_namespaces = true; self } /// Disable name auto-namespacing. /// /// By default, bindgen mangles names like `foo::bar::Baz` to look like /// `foo_bar_Baz` instead of just `Baz`. /// /// This method disables that behavior. /// /// Note that this intentionally does not change the names used for /// whitelisting and blacklisting, which should still be mangled with the /// namespaces. /// /// Note, also, that this option may cause bindgen to generate duplicate /// names. pub fn disable_name_namespacing(mut self) -> Builder { self.options.disable_name_namespacing = true; self } /// Treat inline namespaces conservatively. /// /// This is tricky, because in C++ is technically legal to override an item /// defined in an inline namespace: /// /// ```cpp /// inline namespace foo { /// using Bar = int; /// } /// using Bar = long; /// ``` /// /// Even though referencing `Bar` is a compiler error. /// /// We want to support this (arguably esoteric) use case, but we don't want /// to make the rest of bindgen users pay an usability penalty for that. /// /// To support this, we need to keep all the inline namespaces around, but /// then bindgen usage is a bit more difficult, because you cannot /// reference, e.g., `std::string` (you'd need to use the proper inline /// namespace). /// /// We could complicate a lot of the logic to detect name collisions, and if /// not detected generate a `pub use inline_ns::*` or something like that. /// /// That's probably something we can do if we see this option is needed in a /// lot of cases, to improve it's usability, but my guess is that this is /// not going to be too useful. pub fn conservative_inline_namespaces(mut self) -> Builder { self.options.conservative_inline_namespaces = true; self } /// Whether inline functions should be generated or not. /// /// Note that they will usually not work. However you can use /// `-fkeep-inline-functions` or `-fno-inline-functions` if you are /// responsible of compiling the library to make them callable. pub fn generate_inline_functions(mut self, doit: bool) -> Self { self.options.generate_inline_functions = doit; self } /// Ignore functions. pub fn ignore_functions(mut self) -> Builder { self.options.codegen_config.functions = false; self } /// Ignore methods. pub fn ignore_methods(mut self) -> Builder { self.options.codegen_config.methods = false; self } /// Avoid generating any unstable Rust, such as Rust unions, in the generated bindings. #[deprecated(note = "please use `rust_target` instead")] pub fn unstable_rust(self, doit: bool) -> Self { let rust_target = if doit { RustTarget::Nightly } else { LATEST_STABLE_RUST }; self.rust_target(rust_target) } /// Use core instead of libstd in the generated bindings. pub fn use_core(mut self) -> Builder { self.options.use_core = true; self } /// Use the given prefix for the raw types instead of `::std::os::raw`. pub fn ctypes_prefix<T: Into<String>>(mut self, prefix: T) -> Builder { self.options.ctypes_prefix = Some(prefix.into()); self } /// Allows configuring types in different situations, see the /// [`ParseCallbacks`](./callbacks/trait.ParseCallbacks.html) documentation. pub fn parse_callbacks( mut self, cb: Box<callbacks::ParseCallbacks>, ) -> Self { self.options.parse_callbacks = Some(cb); self } /// Choose what to generate using a /// [`CodegenConfig`](./struct.CodegenConfig.html). pub fn with_codegen_config(mut self, config: CodegenConfig) -> Self { self.options.codegen_config = config; self } /// Prepend the enum name to constant or bitfield variants. pub fn prepend_enum_name(mut self, doit: bool) -> Self { self.options.prepend_enum_name = doit; self } /// Set whether rustfmt should format the generated bindings. pub fn rustfmt_bindings(mut self, doit: bool) -> Self { self.options.rustfmt_bindings = doit; self } /// Set the absolute path to the rustfmt configuration file, if None, the standard rustfmt /// options are used. pub fn rustfmt_configuration_file(mut self, path: Option<PathBuf>) -> Self { self = self.rustfmt_bindings(true); self.options.rustfmt_configuration_file = path; self } /// Sets an explicit path to rustfmt, to be used when rustfmt is enabled. pub fn with_rustfmt<P: Into<PathBuf>>(mut self, path: P) -> Self { self.options.rustfmt_path = Some(path.into()); self } /// Generate the Rust bindings using the options built up thus far. pub fn generate(mut self) -> Result<Bindings, ()> { self.options.input_header = self.input_headers.pop(); self.options.clang_args.extend( self.input_headers .drain(..) .flat_map(|header| { iter::once("-include".into()).chain(iter::once(header)) }), ); self.options.input_unsaved_files.extend( self.input_header_contents.drain(..).map(|(name, contents)| { clang::UnsavedFile::new(&name, &contents) }), ); Bindings::generate(self.options) } /// Preprocess and dump the input header files to disk. /// /// This is useful when debugging bindgen, using C-Reduce, or when filing /// issues. The resulting file will be named something like `__bindgen.i` or /// `__bindgen.ii` pub fn dump_preprocessed_input(&self) -> io::Result<()> { fn check_is_cpp(name_file: &str) -> bool { name_file.ends_with(".hpp") || name_file.ends_with(".hxx") || name_file.ends_with(".hh") || name_file.ends_with(".h++") } let clang = clang_sys::support::Clang::find(None, &[]).ok_or_else(|| { io::Error::new(io::ErrorKind::Other, "Cannot find clang executable") })?; // The contents of a wrapper file that includes all the input header // files. let mut wrapper_contents = String::new(); // Whether we are working with C or C++ inputs. let mut is_cpp = args_are_cpp(&self.options.clang_args); // For each input header, add `#include "$header"`. for header in &self.input_headers { is_cpp |= check_is_cpp(header); wrapper_contents.push_str("#include \""); wrapper_contents.push_str(header); wrapper_contents.push_str("\"\n"); } // For each input header content, add a prefix line of `#line 0 "$name"` // followed by the contents. for &(ref name, ref contents) in &self.input_header_contents { is_cpp |= check_is_cpp(name); wrapper_contents.push_str("#line 0 \""); wrapper_contents.push_str(name); wrapper_contents.push_str("\"\n"); wrapper_contents.push_str(contents); } let wrapper_path = PathBuf::from(if is_cpp { "__bindgen.cpp" } else { "__bindgen.c" }); { let mut wrapper_file = File::create(&wrapper_path)?; wrapper_file.write(wrapper_contents.as_bytes())?; } let mut cmd = Command::new(&clang.path); cmd.arg("-save-temps") .arg("-E") .arg("-C") .arg("-c") .arg(&wrapper_path) .stdout(Stdio::piped()); for a in &self.options.clang_args { cmd.arg(a); } let mut child = cmd.spawn()?; let mut preprocessed = child.stdout.take().unwrap(); let mut file = File::create(if is_cpp { "__bindgen.ii" } else { "__bindgen.i" })?; io::copy(&mut preprocessed, &mut file)?; if child.wait()?.success() { Ok(()) } else { Err(io::Error::new( io::ErrorKind::Other, "clang exited with non-zero status", )) } } /// Don't derive `PartialEq` for a given type. Regular /// expressions are supported. pub fn no_partialeq<T: Into<String>>(mut self, arg: T) -> Builder { self.options.no_partialeq_types.insert(arg.into()); self } /// Don't derive `Copy` for a given type. Regular /// expressions are supported. pub fn no_copy<T: Into<String>>(mut self, arg: T) -> Self { self.options.no_copy_types.insert(arg.into()); self } /// Don't derive `Hash` for a given type. Regular /// expressions are supported. pub fn no_hash<T: Into<String>>(mut self, arg: T) -> Builder { self.options.no_hash_types.insert(arg.into()); self } } /// Configuration options for generated bindings. #[derive(Debug)] struct BindgenOptions { /// The set of types that have been blacklisted and should not appear /// anywhere in the generated code. blacklisted_types: RegexSet, /// The set of types that should be treated as opaque structures in the /// generated code. opaque_types: RegexSet, /// The explicit rustfmt path. rustfmt_path: Option<PathBuf>, /// The set of types that we should have bindings for in the generated /// code. /// /// This includes all types transitively reachable from any type in this /// set. One might think of whitelisted types/vars/functions as GC roots, /// and the generated Rust code as including everything that gets marked. whitelisted_types: RegexSet, /// Whitelisted functions. See docs for `whitelisted_types` for more. whitelisted_functions: RegexSet, /// Whitelisted variables. See docs for `whitelisted_types` for more. whitelisted_vars: RegexSet, /// The default style of code to generate for enums default_enum_style: codegen::EnumVariation, /// The enum patterns to mark an enum as bitfield. bitfield_enums: RegexSet, /// The enum patterns to mark an enum as a Rust enum. rustified_enums: RegexSet, /// The enum patterns to mark an enum as a module of constants. constified_enum_modules: RegexSet, /// The enum patterns to mark an enum as a set of constants. constified_enums: RegexSet, /// Whether we should generate builtins or not. builtins: bool, /// True if we should dump the Clang AST for debugging purposes. emit_ast: bool, /// True if we should dump our internal IR for debugging purposes. emit_ir: bool, /// Output graphviz dot file. emit_ir_graphviz: Option<String>, /// True if we should emulate C++ namespaces with Rust modules in the /// generated bindings. enable_cxx_namespaces: bool, /// True if we should avoid mangling names with namespaces. disable_name_namespacing: bool, /// True if we should generate layout tests for generated structures. layout_tests: bool, /// True if we should implement the Debug trait for C/C++ structures and types /// that do not support automatically deriving Debug. impl_debug: bool, /// True if we should implement the PartialEq trait for C/C++ structures and types /// that do not support automatically deriving PartialEq. impl_partialeq: bool, /// True if we should derive Copy trait implementations for C/C++ structures /// and types. derive_copy: bool, /// True if we should derive Debug trait implementations for C/C++ structures /// and types. derive_debug: bool, /// True if we should derive Default trait implementations for C/C++ structures /// and types. derive_default: bool, /// True if we should derive Hash trait implementations for C/C++ structures /// and types. derive_hash: bool, /// True if we should derive PartialOrd trait implementations for C/C++ structures /// and types. derive_partialord: bool, /// True if we should derive Ord trait implementations for C/C++ structures /// and types. derive_ord: bool, /// True if we should derive PartialEq trait implementations for C/C++ structures /// and types. derive_partialeq: bool, /// True if we should derive Eq trait implementations for C/C++ structures /// and types. derive_eq: bool, /// True if we should avoid using libstd to use libcore instead. use_core: bool, /// An optional prefix for the "raw" types, like `c_int`, `c_void`... ctypes_prefix: Option<String>, /// Whether to time the bindgen phases. time_phases: bool, /// True if we should generate constant names that are **directly** under /// namespaces. namespaced_constants: bool, /// True if we should use MSVC name mangling rules. msvc_mangling: bool, /// Whether we should convert float types to f32/f64 types. convert_floats: bool, /// The set of raw lines to prepend to the top-level module of generated /// Rust code. raw_lines: Vec<String>, /// The set of raw lines to prepend to each of the modules. /// /// This only makes sense if the `enable_cxx_namespaces` option is set. module_lines: HashMap<String, Vec<String>>, /// The set of arguments to pass straight through to Clang. clang_args: Vec<String>, /// The input header file. input_header: Option<String>, /// Unsaved files for input. input_unsaved_files: Vec<clang::UnsavedFile>, /// A user-provided visitor to allow customizing different kinds of /// situations. parse_callbacks: Option<Box<callbacks::ParseCallbacks>>, /// Which kind of items should we generate? By default, we'll generate all /// of them. codegen_config: CodegenConfig, /// Whether to treat inline namespaces conservatively. /// /// See the builder method description for more details. conservative_inline_namespaces: bool, /// Whether to keep documentation comments in the generated output. See the /// documentation for more details. generate_comments: bool, /// Whether to generate inline functions. Defaults to false. generate_inline_functions: bool, /// Whether to whitelist types recursively. Defaults to true. whitelist_recursively: bool, /// Instead of emitting 'use objc;' to files generated from objective c files, /// generate '#[macro_use] extern crate objc;' objc_extern_crate: bool, /// Whether to use the clang-provided name mangling. This is true and /// probably needed for C++ features. /// /// However, some old libclang versions seem to return incorrect results in /// some cases for non-mangled functions, see [1], so we allow disabling it. /// /// [1]: https://github.com/rust-lang-nursery/rust-bindgen/issues/528 enable_mangling: bool, /// Whether to prepend the enum name to bitfield or constant variants. prepend_enum_name: bool, /// Version of the Rust compiler to target rust_target: RustTarget, /// Features to enable, derived from `rust_target` rust_features: RustFeatures, /// Whether rustfmt should format the generated bindings. rustfmt_bindings: bool, /// The absolute path to the rustfmt configuration file, if None, the standard rustfmt /// options are used. rustfmt_configuration_file: Option<PathBuf>, /// The set of types that we should not derive `PartialEq` for. no_partialeq_types: RegexSet, /// The set of types that we should not derive `Copy` for. no_copy_types: RegexSet, /// The set of types that we should not derive `Hash` for. no_hash_types: RegexSet, } /// TODO(emilio): This is sort of a lie (see the error message that results from /// removing this), but since we don't share references across panic boundaries /// it's ok. impl ::std::panic::UnwindSafe for BindgenOptions {} impl BindgenOptions { fn build(&mut self) { self.whitelisted_vars.build(); self.whitelisted_types.build(); self.whitelisted_functions.build(); self.blacklisted_types.build(); self.opaque_types.build(); self.bitfield_enums.build(); self.constified_enums.build(); self.constified_enum_modules.build(); self.rustified_enums.build(); self.no_partialeq_types.build(); self.no_copy_types.build(); self.no_hash_types.build(); } /// Update rust target version pub fn set_rust_target(&mut self, rust_target: RustTarget) { self.rust_target = rust_target; // Keep rust_features synced with rust_target self.rust_features = rust_target.into(); } /// Get features supported by target Rust version pub fn rust_features(&self) -> RustFeatures { self.rust_features } } impl Default for BindgenOptions { fn default() -> BindgenOptions { let rust_target = RustTarget::default(); BindgenOptions { rust_target: rust_target, rust_features: rust_target.into(), blacklisted_types: Default::default(), opaque_types: Default::default(), rustfmt_path: Default::default(), whitelisted_types: Default::default(), whitelisted_functions: Default::default(), whitelisted_vars: Default::default(), default_enum_style: Default::default(), bitfield_enums: Default::default(), rustified_enums: Default::default(), constified_enums: Default::default(), constified_enum_modules: Default::default(), builtins: false, emit_ast: false, emit_ir: false, emit_ir_graphviz: None, layout_tests: true, impl_debug: false, impl_partialeq: false, derive_copy: true, derive_debug: true, derive_default: false, derive_hash: false, derive_partialord: false, derive_ord: false, derive_partialeq: false, derive_eq: false, enable_cxx_namespaces: false, disable_name_namespacing: false, use_core: false, ctypes_prefix: None, namespaced_constants: true, msvc_mangling: false, convert_floats: true, raw_lines: vec![], module_lines: HashMap::default(), clang_args: vec![], input_header: None, input_unsaved_files: vec![], parse_callbacks: None, codegen_config: CodegenConfig::all(), conservative_inline_namespaces: false, generate_comments: true, generate_inline_functions: false, whitelist_recursively: true, objc_extern_crate: false, enable_mangling: true, prepend_enum_name: true, time_phases: false, rustfmt_bindings: true, rustfmt_configuration_file: None, no_partialeq_types: Default::default(), no_copy_types: Default::default(), no_hash_types: Default::default(), } } } fn ensure_libclang_is_loaded() { if clang_sys::is_loaded() { return; } // XXX (issue #350): Ensure that our dynamically loaded `libclang` // doesn't get dropped prematurely, nor is loaded multiple times // across different threads. lazy_static! { static ref LIBCLANG: Arc<clang_sys::SharedLibrary> = { clang_sys::load().expect("Unable to find libclang"); clang_sys::get_library() .expect("We just loaded libclang and it had better still be \ here!") }; } clang_sys::set_library(Some(LIBCLANG.clone())); } /// Generated Rust bindings. #[derive(Debug)] pub struct Bindings { options: BindgenOptions, module: quote::Tokens, } impl Bindings { /// Generate bindings for the given options. pub(crate) fn generate( mut options: BindgenOptions, ) -> Result<Bindings, ()> { ensure_libclang_is_loaded(); options.build(); // Filter out include paths and similar stuff, so we don't incorrectly // promote them to `-isystem`. let clang_args_for_clang_sys = { let mut last_was_include_prefix = false; options.clang_args.iter().filter(|arg| { if last_was_include_prefix { last_was_include_prefix = false; return false; } let arg = &**arg; // https://clang.llvm.org/docs/ClangCommandLineReference.html // -isystem and -isystem-after are harmless. if arg == "-I" || arg == "--include-directory" { last_was_include_prefix = true; return false; } if arg.starts_with("-I") || arg.starts_with("--include-directory=") { return false; } true }).cloned().collect::<Vec<_>>() }; // TODO: Make this path fixup configurable? if let Some(clang) = clang_sys::support::Clang::find( None, &clang_args_for_clang_sys, ) { // If --target is specified, assume caller knows what they're doing // and don't mess with include paths for them let has_target_arg = options .clang_args .iter() .rposition(|arg| arg.starts_with("--target")) .is_some(); if !has_target_arg { // TODO: distinguish C and C++ paths? C++'s should be enough, I // guess. // Whether we are working with C or C++ inputs. let is_cpp = args_are_cpp(&options.clang_args); let search_paths = if is_cpp { clang.cpp_search_paths } else { clang.c_search_paths }; if let Some(search_paths) = search_paths { for path in search_paths.into_iter() { if let Ok(path) = path.into_os_string().into_string() { options.clang_args.push("-isystem".to_owned()); options.clang_args.push(path); } } } } } #[cfg(unix)] fn can_read(perms: &std::fs::Permissions) -> bool { use std::os::unix::fs::PermissionsExt; perms.mode() & 0o444 > 0 } #[cfg(not(unix))] fn can_read(_: &std::fs::Permissions) -> bool { true } if let Some(h) = options.input_header.as_ref() { if let Ok(md) = std::fs::metadata(h) { if md.is_dir() { eprintln!("error: '{}' is a folder", h); return Err(()); } if !can_read(&md.permissions()) { eprintln!("error: insufficient permissions to read '{}'", h); return Err(()); } options.clang_args.push(h.clone()) } else { eprintln!("error: header '{}' does not exist.", h); return Err(()); } } for f in options.input_unsaved_files.iter() { options.clang_args.push(f.name.to_str().unwrap().to_owned()) } let time_phases = options.time_phases; let mut context = BindgenContext::new(options); { let _t = time::Timer::new("parse") .with_output(time_phases); parse(&mut context)?; } let (items, options) = codegen::codegen(context); Ok(Bindings { options: options, module: quote! { #( #items )* } }) } /// Convert these bindings into source text (with raw lines prepended). pub fn to_string(&self) -> String { let mut bytes = vec![]; self.write(Box::new(&mut bytes) as Box<Write>) .expect("writing to a vec cannot fail"); String::from_utf8(bytes) .expect("we should only write bindings that are valid utf-8") } /// Write these bindings as source text to a file. pub fn write_to_file<P: AsRef<Path>>(&self, path: P) -> io::Result<()> { let file = OpenOptions::new() .write(true) .truncate(true) .create(true) .open(path.as_ref())?; self.write(Box::new(file))?; Ok(()) } /// Write these bindings as source text to the given `Write`able. pub fn write<'a>(&self, mut writer: Box<Write + 'a>) -> io::Result<()> { writer.write( "/* automatically generated by rust-bindgen */\n\n".as_bytes(), )?; for line in self.options.raw_lines.iter() { writer.write(line.as_bytes())?; writer.write("\n".as_bytes())?; } if !self.options.raw_lines.is_empty() { writer.write("\n".as_bytes())?; } let bindings = self.module.to_string(); match self.rustfmt_generated_string(&bindings) { Ok(rustfmt_bindings) => { writer.write(rustfmt_bindings.as_bytes())?; }, Err(err) => { eprintln!("{:?}", err); writer.write(bindings.as_bytes())?; }, } Ok(()) } /// Checks if rustfmt_bindings is set and runs rustfmt on the string fn rustfmt_generated_string<'a>( &self, source: &'a str, ) -> io::Result<Cow<'a, str>> { let _t = time::Timer::new("rustfmt_generated_string") .with_output(self.options.time_phases); if !self.options.rustfmt_bindings { return Ok(Cow::Borrowed(source)); } let rustfmt = match self.options.rustfmt_path { Some(ref p) => Cow::Borrowed(p), None => { let path = which::which("rustfmt") .map_err(|e| { io::Error::new(io::ErrorKind::Other, e.to_owned()) })?; Cow::Owned(path) } }; let mut cmd = Command::new(&*rustfmt); cmd .stdin(Stdio::piped()) .stdout(Stdio::piped()); if let Some(path) = self.options .rustfmt_configuration_file .as_ref() .and_then(|f| f.to_str()) { cmd.args(&["--config-path", path]); } let mut child = cmd.spawn()?; let mut child_stdin = child.stdin.take().unwrap(); let mut child_stdout = child.stdout.take().unwrap(); let source = source.to_owned(); // Write to stdin in a new thread, so that we can read from stdout on this // thread. This keeps the child from blocking on writing to its stdout which // might block us from writing to its stdin. let stdin_handle = ::std::thread::spawn(move || { let _ = child_stdin.write_all(source.as_bytes()); source }); let mut output = vec![]; io::copy(&mut child_stdout, &mut output)?; let status = child.wait()?; let source = stdin_handle.join() .expect("The thread writing to rustfmt's stdin doesn't do \ anything that could panic"); match String::from_utf8(output) { Ok(bindings) => { match status.code() { Some(0) => Ok(Cow::Owned(bindings)), Some(2) => Err(io::Error::new( io::ErrorKind::Other, "Rustfmt parsing errors.".to_string(), )), Some(3) => { warn!("Rustfmt could not format some lines."); Ok(Cow::Owned(bindings)) } _ => Err(io::Error::new( io::ErrorKind::Other, "Internal rustfmt error".to_string(), )), } }, _ => Ok(Cow::Owned(source)) } } } /// Determines whether the given cursor is in any of the files matched by the /// options. fn filter_builtins(ctx: &BindgenContext, cursor: &clang::Cursor) -> bool { ctx.options().builtins || !cursor.is_builtin() } /// Parse one `Item` from the Clang cursor. fn parse_one( ctx: &mut BindgenContext, cursor: clang::Cursor, parent: Option<ItemId>, ) -> clang_sys::CXChildVisitResult { if !filter_builtins(ctx, &cursor) { return CXChildVisit_Continue; } use clang_sys::CXChildVisit_Continue; match Item::parse(cursor, parent, ctx) { Ok(..) => {} Err(ParseError::Continue) => {} Err(ParseError::Recurse) => { cursor.visit(|child| parse_one(ctx, child, parent)); } } CXChildVisit_Continue } /// Parse the Clang AST into our `Item` internal representation. fn parse(context: &mut BindgenContext) -> Result<(), ()> { use clang_sys::*; let mut any_error = false; for d in context.translation_unit().diags().iter() { let msg = d.format(); let is_err = d.severity() >= CXDiagnostic_Error; eprintln!("{}, err: {}", msg, is_err); any_error |= is_err; } if any_error { return Err(()); } let cursor = context.translation_unit().cursor(); if context.options().emit_ast { fn dump_if_not_builtin(cur: &clang::Cursor) -> CXChildVisitResult { if !cur.is_builtin() { clang::ast_dump(&cur, 0) } else { CXChildVisit_Continue } } cursor.visit(|cur| dump_if_not_builtin(&cur)); } let root = context.root_module(); context.with_module(root, |context| { cursor.visit(|cursor| parse_one(context, cursor, None)) }); assert!( context.current_module() == context.root_module(), "How did this happen?" ); Ok(()) } /// Extracted Clang version data #[derive(Debug)] pub struct ClangVersion { /// Major and minor semver, if parsing was successful pub parsed: Option<(u32, u32)>, /// full version string pub full: String, } /// Get the major and the minor semver numbers of Clang's version pub fn clang_version() -> ClangVersion { if !clang_sys::is_loaded() { // TODO(emilio): Return meaningful error (breaking). clang_sys::load().expect("Unable to find libclang"); } let raw_v: String = clang::extract_clang_version(); let split_v: Option<Vec<&str>> = raw_v.split_whitespace().nth(2).map(|v| { v.split('.').collect() }); match split_v { Some(v) => { if v.len() >= 2 { let maybe_major = v[0].parse::<u32>(); let maybe_minor = v[1].parse::<u32>(); match (maybe_major, maybe_minor) { (Ok(major), Ok(minor)) => { return ClangVersion { parsed: Some((major, minor)), full: raw_v.clone(), } } _ => {} } } } None => {} }; ClangVersion { parsed: None, full: raw_v.clone(), } } /// Test command_line_flag function. #[test] fn commandline_flag_unit_test_function() { //Test 1 let bindings = ::builder(); let command_line_flags = bindings.command_line_flags(); let test_cases = vec![ "--rust-target", "--no-derive-default", "--generate", "function,types,vars,methods,constructors,destructors", ].iter() .map(|&x| x.into()) .collect::<Vec<String>>(); assert!(test_cases.iter().all( |ref x| command_line_flags.contains(x), )); //Test 2 let bindings = ::builder() .header("input_header") .whitelist_type("Distinct_Type") .whitelist_function("safe_function"); let command_line_flags = bindings.command_line_flags(); let test_cases = vec![ "--rust-target", "input_header", "--no-derive-default", "--generate", "function,types,vars,methods,constructors,destructors", "--whitelist-type", "Distinct_Type", "--whitelist-function", "safe_function", ].iter() .map(|&x| x.into()) .collect::<Vec<String>>(); println!("{:?}", command_line_flags); assert!(test_cases.iter().all( |ref x| command_line_flags.contains(x), )); }
32.110334
98
0.572119
8973f30d6b2d75ad81661b3c28517f84802528c0
18,699
use std::sync::{Arc, Mutex, RwLock}; use std::sync::mpsc::{Sender, Receiver, channel}; use std::io::{Write, stdin}; use termion::event::{Event, Key, MouseEvent}; use termion::input::TermRead; use async_value::{Async, Stale}; use crate::coordinates::{Coordinates, Position, Size}; use crate::fail::{HResult, HError, ErrorLog}; use crate::minibuffer::MiniBuffer; use crate::term; use crate::term::{Screen, ScreenExt}; use crate::dirty::{Dirtyable, DirtyBit}; use crate::signal_notify::{notify, Signal}; use crate::config::Config; #[derive(Debug)] pub enum Events { InputEvent(Event), WidgetReady, TerminalResized, InputUpdated(String), ExclusiveEvent(Option<Mutex<Option<Sender<Events>>>>), InputEnabled(bool), RequestInput, Status(String), ConfigLoaded, } impl PartialEq for WidgetCore { fn eq(&self, other: &WidgetCore) -> bool { if self.coordinates == other.coordinates { true } else { false } } } impl std::fmt::Debug for WidgetCore { fn fmt(&self, formatter: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { let output = format!("{:?}{:?}{:?}", self.coordinates, self.minibuffer, self.status_bar_content); formatter.write_str(&output) } } #[derive(Clone)] pub struct WidgetCore { pub screen: Screen, pub coordinates: Coordinates, pub minibuffer: Arc<Mutex<Option<MiniBuffer>>>, pub event_sender: Arc<Mutex<Sender<Events>>>, event_receiver: Arc<Mutex<Option<Receiver<Events>>>>, pub status_bar_content: Arc<Mutex<Option<String>>>, term_size: (usize, usize), dirty: DirtyBit, pub config: Arc<RwLock<Async<Config>>> } impl WidgetCore { pub fn new() -> HResult<WidgetCore> { let screen = Screen::new()?; let (xsize, ysize) = screen.size()?; let coords = Coordinates::new_at(term::xsize(), term::ysize() - 2, 1, 2); let (sender, receiver) = channel(); let status_bar_content = Arc::new(Mutex::new(None)); let mut config = Async::new(move |_| Ok(Config::load()?)); let confsender = sender.clone(); config.on_ready(move |_, _| { confsender.send(Events::ConfigLoaded).ok(); Ok(()) }).log(); config.run().log(); let core = WidgetCore { screen: screen, coordinates: coords, minibuffer: Arc::new(Mutex::new(None)), event_sender: Arc::new(Mutex::new(sender)), event_receiver: Arc::new(Mutex::new(Some(receiver))), status_bar_content: status_bar_content, term_size: (xsize, ysize), dirty: DirtyBit::new(), config: Arc::new(RwLock::new(config)) }; let minibuffer = MiniBuffer::new(&core); *core.minibuffer.lock().unwrap() = Some(minibuffer); Ok(core) } pub fn get_sender(&self) -> Sender<Events> { self.event_sender.lock().unwrap().clone() } pub fn draw_status(&self) -> HResult<()> { let xsize = term::xsize_u(); let status = match self.status_bar_content.lock()?.as_ref() { Some(status) => status.to_string(), None => "".to_string(), }; let sized_status = term::sized_string_u(&status, xsize); self.write_to_screen( &format!( "{}{}{}", term::move_bottom(), term::status_bg(), sized_status )).log(); Ok(()) } pub fn show_status(&self, status: &str) -> HResult<()> { HError::log::<()>(status).ok(); { let mut status_content = self.status_bar_content.lock()?; *status_content = Some(status.to_string()); } self.draw_status()?; Ok(()) } pub fn clear_status(&self) -> HResult<()> { if self.status_bar_content.lock()?.take().is_some() { self.draw_status().log(); } Ok(()) } pub fn minibuffer(&self, query: &str) -> HResult<String> { let answer = self.minibuffer .lock()? .as_mut()? .query(query, false); let mut screen = self.screen()?; screen.cursor_hide().log(); answer } pub fn minibuffer_continuous(&self, query: &str) -> HResult<String> { let answer = self.minibuffer .lock()? .as_mut()? .query(query, true); let mut screen = self.screen()?; screen.cursor_hide().log(); answer } pub fn screen(&self) -> HResult<Screen> { Ok(self.screen.clone()) } pub fn clear(&self) -> HResult<()> { let clearlist = self.get_clearlist()?; self.write_to_screen(&clearlist) } pub fn get_clearlist(&self) -> HResult<String> { let (xpos, ypos) = self.coordinates.u16position(); let (xsize, ysize) = self.coordinates.u16size(); let endpos = ypos + ysize; Ok((ypos..endpos) .map(|line| { format!( "{}{}{:xsize$}", crate::term::reset(), crate::term::goto_xy(xpos, line), " ", xsize = xsize as usize ) }) .collect()) } pub fn write_to_screen(&self, s: &str) -> HResult<()> { let mut screen = self.screen()?; screen.write_str(s) } pub fn config(&self) -> Config { self.get_conf() .unwrap_or_else(|_| Config::new()) } fn get_conf(&self) -> HResult<Config> { let conf = self.config .read()? .get()? .clone(); Ok(conf) } } impl Dirtyable for WidgetCore { fn is_dirty(&self) -> bool { self.dirty.is_dirty() } fn set_dirty(&mut self) { self.dirty.set_dirty(); } fn set_clean(&mut self) { self.dirty.set_clean(); } } pub trait Widget { fn get_core(&self) -> HResult<&WidgetCore>; // { // Err(HError::NoWidgetCoreError(Backtrace::new())) // } fn get_core_mut(&mut self) -> HResult<&mut WidgetCore> ;// { // Err(HError::NoWidgetCoreError(Backtrace::new())) // } fn get_coordinates(&self) -> HResult<&Coordinates> { Ok(&self.get_core()?.coordinates) } fn set_coordinates(&mut self, coordinates: &Coordinates) -> HResult<()> { let core = &mut self.get_core_mut()?; if &core.coordinates != coordinates { core.coordinates = coordinates.clone(); core.set_dirty(); } Ok(()) } fn render_header(&self) -> HResult<String> { Err(HError::NoHeaderError) } fn render_footer(&self) -> HResult<String> { Err(HError::NoHeaderError) } fn refresh(&mut self) -> HResult<()>; fn get_drawlist(&self) -> HResult<String>; fn after_draw(&self) -> HResult<()> { Ok(()) } fn config_loaded(&mut self) -> HResult<()> { Ok(()) } fn on_event(&mut self, event: Event) -> HResult<()> { self.get_core()?.clear_status().log(); match event { Event::Key(key) => self.on_key(key), Event::Mouse(button) => self.on_mouse(button), Event::Unsupported(wtf) => self.on_wtf(wtf), } } fn on_key(&mut self, key: Key) -> HResult<()> { match key { _ => { self.bad(Event::Key(key))? }, } Ok(()) } fn on_mouse(&mut self, event: MouseEvent) -> HResult<()> { match event { _ => { self.bad(Event::Mouse(event)).unwrap() }, } Ok(()) } fn on_wtf(&mut self, event: Vec<u8>) -> HResult<()> { match event { _ => { self.bad(Event::Unsupported(event)).unwrap() }, } Ok(()) } fn bad(&mut self, event: Event) -> HResult<()> { self.get_core()?.show_status(&format!("Stop it!! {:?} does nothing!", event)).log(); if let Event::Key(key) = event { HError::undefined_key(key) } else { Ok(()) } } fn get_header_drawlist(&mut self) -> HResult<String> { Ok(format!( "{}{}{:xsize$}{}{}", crate::term::goto_xy(1, 1), crate::term::header_color(), " ", crate::term::goto_xy(1, 1), self.render_header()?, xsize = self.get_coordinates()?.xsize() as usize )) } fn get_footer_drawlist(&mut self) -> HResult<String> { let xsize = self.get_coordinates()?.xsize(); let ypos = crate::term::ysize(); Ok(format!( "{}{}{:xsize$}{}{}", crate::term::goto_xy(1, ypos), crate::term::header_color(), " ", crate::term::goto_xy(1, ypos), self.render_footer()?, xsize = xsize as usize)) } fn get_redraw_empty_list(&self, lines: usize) -> HResult<String> { let (xpos, ypos) = self.get_coordinates()?.u16position(); let (xsize, ysize) = self.get_coordinates()?.u16size(); let start_y = lines + ypos as usize; Ok((start_y..(ysize + 2) as usize) .map(|i| { format!( "{}{:xsize$}", crate::term::goto_xy(xpos, i as u16), " ", xsize = xsize as usize ) }) .collect()) } fn popup(&mut self) -> HResult<()> { // Image will draw over widget otherwise if self.get_core()?.config().graphics == "kitty" { let ypos = self.get_coordinates()?.ypos(); print!("\x1b_Ga=d,d=y,y={}\x1b\\", ypos+1); } let result = self.run_widget(); self.get_core()?.clear().log(); self.get_core()?.get_sender().send(Events::ExclusiveEvent(None))?; result } fn popup_finnished(&self) -> HResult<()> { HError::popup_finnished() } fn run_widget(&mut self) -> HResult<()> { let (tx_event, rx_event) = channel(); self.get_core()? .get_sender() .send(Events::ExclusiveEvent(Some(Mutex::new(Some(tx_event)))))?; self.get_core()?.get_sender().send(Events::RequestInput)?; self.get_core()?.clear()?; self.refresh().log(); self.draw()?; for event in rx_event.iter() { match event { Events::InputEvent(input) => { match self.on_event(input) { err @ Err(HError::PopupFinnished) | err @ Err(HError::Quit) | err @ Err(HError::MiniBufferCancelledInput) => err?, err @ Err(HError::MiniBufferInputUpdated(_)) => err?, err @ Err(HError::WidgetResizedError) => err?, err @ Err(_) => err.log(), Ok(_) => {} } self.get_core()?.get_sender().send(Events::RequestInput)?; } Events::WidgetReady => { self.refresh().log(); self.draw().log(); } Events::Status(status) => { self.get_core()?.show_status(&status).log(); } Events::TerminalResized => { self.get_core()?.screen()?.clear().log(); match self.resize() { err @ Err(HError::TerminalResizedError) => err?, _ => {} } } Events::InputUpdated(input) => { HError::input_updated(input)? } Events::ConfigLoaded => { self.get_core_mut()?.config.write()?.pull_async()?; } _ => {} } self.refresh().log(); self.draw().log(); self.after_draw().log(); } Ok(()) } fn animate_slide_up(&mut self, animator: Option<&Stale>) -> HResult<()> { if !self.get_core()?.config().animate() { return Ok(()); } let coords = self.get_coordinates()?.clone(); let xpos = coords.position().x(); let ypos = coords.position().y(); let xsize = coords.xsize(); let ysize = coords.ysize(); let clear = self.get_core()?.get_clearlist()?; let animation_hz = self.get_core()?.config().animation_refresh_frequency as u64; let pause_millis = 1000/animation_hz; const ANIMATION_DURATION_MILLIS: u64 = 64; let number_of_frames= (ANIMATION_DURATION_MILLIS/pause_millis) as u16; let pause = std::time::Duration::from_millis(pause_millis); if let Some(ref animator) = animator { if animator.is_stale()? { return Ok(()) } } self.get_core()?.write_to_screen(&clear).log(); for i in (0..number_of_frames).rev() { if let Some(ref animator) = animator { if animator.is_stale()? { self.set_coordinates(&coords).log(); return Ok(()) } } let ani_coords = Coordinates { size: Size((xsize,ysize-i)), position: Position ((xpos, ypos+i)) }; self.set_coordinates(&ani_coords).log(); let buffer = self.get_drawlist()?; if !animator.as_ref()?.is_stale()? { self.get_core()?.write_to_screen(&buffer).log(); } std::thread::sleep(pause); } Ok(()) } fn draw(&mut self) -> HResult<()> { let output = self.get_drawlist().unwrap_or("".to_string()) + &self.get_header_drawlist().unwrap_or("".to_string()) + &self.get_footer_drawlist().unwrap_or("".to_string()); self.get_core()?.write_to_screen(&output).log(); self.get_core()?.screen()?.flush().ok(); Ok(()) } fn handle_input(&mut self) -> HResult<()> { let (tx_internal_event, rx_internal_event) = channel(); let rx_global_event = self.get_core()?.event_receiver.lock()?.take()?; dispatch_events(tx_internal_event, rx_global_event, self.get_core()?.screen()?); for event in rx_internal_event.iter() { match event { Events::InputEvent(event) => { match self.on_event(event) { Err(HError::Quit) => { HError::quit()?; }, _ => {} } self.get_core()?.get_sender().send(Events::RequestInput)?; } Events::Status(status) => { self.get_core()?.show_status(&status).log(); } Events::TerminalResized => { self.get_core()?.screen()?.clear().log(); } Events::ConfigLoaded => { self.get_core_mut()?.config.write()?.pull_async().ok(); self.config_loaded().log(); } _ => {} } self.resize().log(); if self.get_core()?.screen()?.is_resized()? { self.get_core()?.screen()?.take_size().ok(); } self.refresh().ok(); self.draw().ok(); } Ok(()) } fn resize(&mut self) -> HResult<()> { if let Ok(true) = self.get_core()?.screen()?.is_resized() { let (xsize, ysize) = self.get_core()?.screen()?.get_size()?; let mut coords = self.get_core()?.coordinates.clone(); coords.set_size_u(xsize, ysize-2); self.set_coordinates(&coords)?; } Ok(()) } } fn dispatch_events(tx_internal: Sender<Events>, rx_global: Receiver<Events>, screen: Screen) { let (tx_event, rx_event) = channel(); let (tx_input_req, rx_input_req) = channel(); input_thread(tx_event.clone(), rx_input_req); event_thread(rx_global, tx_event.clone()); signal_thread(tx_event.clone()); std::thread::spawn(move || { let mut tx_exclusive_event: Option<Sender<Events>> = None; let mut input_enabled = true; for event in rx_event.iter() { match &event { Events::ExclusiveEvent(tx_event) => { tx_exclusive_event = match tx_event { Some(locked_sender) => locked_sender.lock().unwrap().take(), None => None } } Events::InputEnabled(state) => { input_enabled = *state; continue; } Events::RequestInput => { if input_enabled { tx_input_req.send(()).unwrap(); } continue; } Events::TerminalResized => { if let Ok(size) = term::size() { screen.set_size(size).log(); } } _ => {} } if let Some(tx_exclusive) = &tx_exclusive_event { tx_exclusive.send(event).ok(); } else { tx_internal.send(event).ok(); } } }); } fn event_thread(rx_global: Receiver<Events>, tx: Sender<Events>) { std::thread::spawn(move || { for event in rx_global.iter() { tx.send(event).unwrap(); } }); } fn input_thread(tx: Sender<Events>, rx_input_request: Receiver<()>) { std::thread::spawn(move || { for input in stdin().events() { input.map(|input| { tx.send(Events::InputEvent(input)).unwrap(); rx_input_request.recv().unwrap(); term::flush_stdin(); }).map_err(|e| HError::from(e)).log(); } }); } fn signal_thread(tx: Sender<Events>) { std::thread::spawn(move || { let rx = notify(&[Signal::WINCH]); for _ in rx.iter() { tx.send(Events::TerminalResized).unwrap(); } }); }
31.747029
88
0.487459
14392c2bab6e078623ee34d200969afbc3bfae45
276
extern crate tar; use std::fs::File; use tar::Builder; fn main() { let file = File::create("foo.tar").unwrap(); let mut a = Builder::new(file); a.append_path("README.md").unwrap(); a.append_file("lib.rs", &mut File::open("src/lib.rs").unwrap()).unwrap(); }
21.230769
77
0.612319
2f1461db4b9c55261c23a99d0656ee407dc39605
1,946
use crate::{radio::StyleSheet, Primitive, Renderer}; use iced_native::{mouse, radio, Background, Color, Rectangle}; const SIZE: f32 = 28.0; const DOT_SIZE: f32 = SIZE / 2.0; impl radio::Renderer for Renderer { type Style = Box<dyn StyleSheet>; fn default_size(&self) -> u32 { SIZE as u32 } fn draw( &mut self, bounds: Rectangle, is_selected: bool, is_mouse_over: bool, (label, _): Self::Output, style_sheet: &Self::Style, ) -> Self::Output { let style = if is_mouse_over { style_sheet.hovered() } else { style_sheet.active() }; let radio = Primitive::Quad { bounds, background: style.background, border_radius: (SIZE / 2.0) as u16, border_width: style.border_width, border_color: style.border_color, }; ( Primitive::Group { primitives: if is_selected { let radio_circle = Primitive::Quad { bounds: Rectangle { x: bounds.x + DOT_SIZE / 2.0, y: bounds.y + DOT_SIZE / 2.0, width: bounds.width - DOT_SIZE, height: bounds.height - DOT_SIZE, }, background: Background::Color(style.dot_color), border_radius: (DOT_SIZE / 2.0) as u16, border_width: 0, border_color: Color::TRANSPARENT, }; vec![radio, radio_circle, label] } else { vec![radio, label] }, }, if is_mouse_over { mouse::Interaction::Pointer } else { mouse::Interaction::default() }, ) } }
29.938462
71
0.457862
f5819b16ba62d3c161d240be3869f720a6735ab1
1,067
// Copyright 2022 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use {argh::FromArgs, ffx_core::ffx_command}; #[ffx_command()] #[derive(FromArgs, Debug, PartialEq)] #[argh( subcommand, name = "doctor", description = "Perform diagnostic checks on a component at runtime.", example = "To run diagnostics: $ ffx component doctor /core/appmgr This will run the following checks: * Verify that the component's lists of outgoing and exposed capabilities match", note = "When using the `doctor` subcommand, the following diagnostic checks are ran on a component: 1- Check that the lists of `outgoing` and `exposed` capabilities match All the checks are ran consecutively. In the case of a check failing, the following checks WILL be ran and the command will return an error code afterwards. " )] pub struct DoctorCommand { #[argh(positional)] /// the component's moniker. Example: `/core/appmgr`. pub moniker: Vec<String>, }
33.34375
103
0.723524
90aa6c89e5844378c14aa9f7b7f1e73c4f08e679
1,453
use amethyst::derive::SystemDesc; use amethyst::ecs::{Join, Read, ReadStorage, System, SystemData, WriteStorage}; use amethyst::input::{InputHandler, StringBindings}; use crate::components::{ControlState, LocalPlayer}; #[derive(SystemDesc)] pub struct LocalPlayerSystem { jump_held: bool, } impl<'s> System<'s> for LocalPlayerSystem { type SystemData = ( ReadStorage<'s, LocalPlayer>, WriteStorage<'s, ControlState>, Read<'s, InputHandler<StringBindings>>, ); fn run(&mut self, (local_players, mut control_states, input): Self::SystemData) { for (_, control_state) in (&local_players, &mut control_states).join() { control_state.clear(); let jump = if input.action_is_down("jump").unwrap_or(false) { if self.jump_held { false } else { self.jump_held = true; true } } else { self.jump_held = false; false }; let up = input.action_is_down("up").unwrap_or(false); let down = input.action_is_down("down").unwrap_or(false); let left = input.action_is_down("left").unwrap_or(false); let right = input.action_is_down("right").unwrap_or(false); control_state.set_all( up && !down, down && !up, left && !right, right && !left, jump, ); break; } } } impl Default for LocalPlayerSystem { fn default() -> Self { Self { jump_held: false } } }
25.051724
83
0.616655
db7481f136c3bff6db1bfdbf41c4605ad676a1f8
6,251
#![allow(unused_imports)] use super::*; use wasm_bindgen::prelude::*; #[wasm_bindgen] extern "C" { # [ wasm_bindgen ( extends = :: js_sys :: Object , js_name = FakePluginTagInit ) ] #[derive(Debug, Clone, PartialEq, Eq)] #[doc = "The `FakePluginTagInit` dictionary."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `FakePluginTagInit`*"] pub type FakePluginTagInit; } impl FakePluginTagInit { #[doc = "Construct a new `FakePluginTagInit`."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `FakePluginTagInit`*"] pub fn new(handler_uri: &str, mime_entries: &::wasm_bindgen::JsValue) -> Self { #[allow(unused_mut)] let mut ret: Self = ::wasm_bindgen::JsCast::unchecked_into(::js_sys::Object::new()); ret.handler_uri(handler_uri); ret.mime_entries(mime_entries); ret } #[doc = "Change the `description` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `FakePluginTagInit`*"] pub fn description(&mut self, val: &str) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("description"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[doc = "Change the `fileName` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `FakePluginTagInit`*"] pub fn file_name(&mut self, val: &str) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("fileName"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[doc = "Change the `fullPath` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `FakePluginTagInit`*"] pub fn full_path(&mut self, val: &str) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("fullPath"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[doc = "Change the `handlerURI` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `FakePluginTagInit`*"] pub fn handler_uri(&mut self, val: &str) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("handlerURI"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[doc = "Change the `mimeEntries` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `FakePluginTagInit`*"] pub fn mime_entries(&mut self, val: &::wasm_bindgen::JsValue) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("mimeEntries"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[doc = "Change the `name` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `FakePluginTagInit`*"] pub fn name(&mut self, val: &str) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set(self.as_ref(), &JsValue::from("name"), &JsValue::from(val)); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[doc = "Change the `niceName` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `FakePluginTagInit`*"] pub fn nice_name(&mut self, val: &str) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("niceName"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[doc = "Change the `sandboxScript` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `FakePluginTagInit`*"] pub fn sandbox_script(&mut self, val: &str) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("sandboxScript"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[doc = "Change the `version` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `FakePluginTagInit`*"] pub fn version(&mut self, val: &str) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set( self.as_ref(), &JsValue::from("version"), &JsValue::from(val), ); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } }
35.925287
100
0.552552
39019eea401995c1f9bac397c609c62205284599
26,743
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::DMASR { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct TSR { bits: bool, } impl TSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct TPSSR { bits: bool, } impl TPSSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct TBUSR { bits: bool, } impl TBUSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct TJTSR { bits: bool, } impl TJTSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct ROSR { bits: bool, } impl ROSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct TUSR { bits: bool, } impl TUSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct RSR { bits: bool, } impl RSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct RBUSR { bits: bool, } impl RBUSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct RPSSR { bits: bool, } impl RPSSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct PWTSR { bits: bool, } impl PWTSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct ETSR { bits: bool, } impl ETSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FBESR { bits: bool, } impl FBESR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct ERSR { bits: bool, } impl ERSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AISR { bits: bool, } impl AISR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct NISR { bits: bool, } impl NISR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct RPSR { bits: u8, } impl RPSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct TPSR { bits: u8, } impl TPSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct EBSR { bits: u8, } impl EBSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct MMCSR { bits: bool, } impl MMCSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct PMTSR { bits: bool, } impl PMTSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct TSTSR { bits: bool, } impl TSTSR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Proxy"] pub struct _TSW<'a> { w: &'a mut W, } impl<'a> _TSW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _TPSSW<'a> { w: &'a mut W, } impl<'a> _TPSSW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 1; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _TBUSW<'a> { w: &'a mut W, } impl<'a> _TBUSW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 2; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _TJTSW<'a> { w: &'a mut W, } impl<'a> _TJTSW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 3; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _ROSW<'a> { w: &'a mut W, } impl<'a> _ROSW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 4; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _TUSW<'a> { w: &'a mut W, } impl<'a> _TUSW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 5; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _RSW<'a> { w: &'a mut W, } impl<'a> _RSW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 6; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _RBUSW<'a> { w: &'a mut W, } impl<'a> _RBUSW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 7; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _RPSSW<'a> { w: &'a mut W, } impl<'a> _RPSSW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 8; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _PWTSW<'a> { w: &'a mut W, } impl<'a> _PWTSW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 9; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _ETSW<'a> { w: &'a mut W, } impl<'a> _ETSW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 10; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FBESW<'a> { w: &'a mut W, } impl<'a> _FBESW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 13; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _ERSW<'a> { w: &'a mut W, } impl<'a> _ERSW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 14; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AISW<'a> { w: &'a mut W, } impl<'a> _AISW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 15; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _NISW<'a> { w: &'a mut W, } impl<'a> _NISW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 16; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bit 0 - TS"] #[inline] pub fn ts(&self) -> TSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) != 0 }; TSR { bits } } #[doc = "Bit 1 - TPSS"] #[inline] pub fn tpss(&self) -> TPSSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 1; ((self.bits >> OFFSET) & MASK as u32) != 0 }; TPSSR { bits } } #[doc = "Bit 2 - TBUS"] #[inline] pub fn tbus(&self) -> TBUSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 2; ((self.bits >> OFFSET) & MASK as u32) != 0 }; TBUSR { bits } } #[doc = "Bit 3 - TJTS"] #[inline] pub fn tjts(&self) -> TJTSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 3; ((self.bits >> OFFSET) & MASK as u32) != 0 }; TJTSR { bits } } #[doc = "Bit 4 - ROS"] #[inline] pub fn ros(&self) -> ROSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 4; ((self.bits >> OFFSET) & MASK as u32) != 0 }; ROSR { bits } } #[doc = "Bit 5 - TUS"] #[inline] pub fn tus(&self) -> TUSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 5; ((self.bits >> OFFSET) & MASK as u32) != 0 }; TUSR { bits } } #[doc = "Bit 6 - RS"] #[inline] pub fn rs(&self) -> RSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 6; ((self.bits >> OFFSET) & MASK as u32) != 0 }; RSR { bits } } #[doc = "Bit 7 - RBUS"] #[inline] pub fn rbus(&self) -> RBUSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 7; ((self.bits >> OFFSET) & MASK as u32) != 0 }; RBUSR { bits } } #[doc = "Bit 8 - RPSS"] #[inline] pub fn rpss(&self) -> RPSSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u32) != 0 }; RPSSR { bits } } #[doc = "Bit 9 - PWTS"] #[inline] pub fn pwts(&self) -> PWTSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 9; ((self.bits >> OFFSET) & MASK as u32) != 0 }; PWTSR { bits } } #[doc = "Bit 10 - ETS"] #[inline] pub fn ets(&self) -> ETSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 10; ((self.bits >> OFFSET) & MASK as u32) != 0 }; ETSR { bits } } #[doc = "Bit 13 - FBES"] #[inline] pub fn fbes(&self) -> FBESR { let bits = { const MASK: bool = true; const OFFSET: u8 = 13; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FBESR { bits } } #[doc = "Bit 14 - ERS"] #[inline] pub fn ers(&self) -> ERSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 14; ((self.bits >> OFFSET) & MASK as u32) != 0 }; ERSR { bits } } #[doc = "Bit 15 - AIS"] #[inline] pub fn ais(&self) -> AISR { let bits = { const MASK: bool = true; const OFFSET: u8 = 15; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AISR { bits } } #[doc = "Bit 16 - NIS"] #[inline] pub fn nis(&self) -> NISR { let bits = { const MASK: bool = true; const OFFSET: u8 = 16; ((self.bits >> OFFSET) & MASK as u32) != 0 }; NISR { bits } } #[doc = "Bits 17:19 - RPS"] #[inline] pub fn rps(&self) -> RPSR { let bits = { const MASK: u8 = 7; const OFFSET: u8 = 17; ((self.bits >> OFFSET) & MASK as u32) as u8 }; RPSR { bits } } #[doc = "Bits 20:22 - TPS"] #[inline] pub fn tps(&self) -> TPSR { let bits = { const MASK: u8 = 7; const OFFSET: u8 = 20; ((self.bits >> OFFSET) & MASK as u32) as u8 }; TPSR { bits } } #[doc = "Bits 23:25 - EBS"] #[inline] pub fn ebs(&self) -> EBSR { let bits = { const MASK: u8 = 7; const OFFSET: u8 = 23; ((self.bits >> OFFSET) & MASK as u32) as u8 }; EBSR { bits } } #[doc = "Bit 27 - MMCS"] #[inline] pub fn mmcs(&self) -> MMCSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 27; ((self.bits >> OFFSET) & MASK as u32) != 0 }; MMCSR { bits } } #[doc = "Bit 28 - PMTS"] #[inline] pub fn pmts(&self) -> PMTSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 28; ((self.bits >> OFFSET) & MASK as u32) != 0 }; PMTSR { bits } } #[doc = "Bit 29 - TSTS"] #[inline] pub fn tsts(&self) -> TSTSR { let bits = { const MASK: bool = true; const OFFSET: u8 = 29; ((self.bits >> OFFSET) & MASK as u32) != 0 }; TSTSR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bit 0 - TS"] #[inline] pub fn ts(&mut self) -> _TSW { _TSW { w: self } } #[doc = "Bit 1 - TPSS"] #[inline] pub fn tpss(&mut self) -> _TPSSW { _TPSSW { w: self } } #[doc = "Bit 2 - TBUS"] #[inline] pub fn tbus(&mut self) -> _TBUSW { _TBUSW { w: self } } #[doc = "Bit 3 - TJTS"] #[inline] pub fn tjts(&mut self) -> _TJTSW { _TJTSW { w: self } } #[doc = "Bit 4 - ROS"] #[inline] pub fn ros(&mut self) -> _ROSW { _ROSW { w: self } } #[doc = "Bit 5 - TUS"] #[inline] pub fn tus(&mut self) -> _TUSW { _TUSW { w: self } } #[doc = "Bit 6 - RS"] #[inline] pub fn rs(&mut self) -> _RSW { _RSW { w: self } } #[doc = "Bit 7 - RBUS"] #[inline] pub fn rbus(&mut self) -> _RBUSW { _RBUSW { w: self } } #[doc = "Bit 8 - RPSS"] #[inline] pub fn rpss(&mut self) -> _RPSSW { _RPSSW { w: self } } #[doc = "Bit 9 - PWTS"] #[inline] pub fn pwts(&mut self) -> _PWTSW { _PWTSW { w: self } } #[doc = "Bit 10 - ETS"] #[inline] pub fn ets(&mut self) -> _ETSW { _ETSW { w: self } } #[doc = "Bit 13 - FBES"] #[inline] pub fn fbes(&mut self) -> _FBESW { _FBESW { w: self } } #[doc = "Bit 14 - ERS"] #[inline] pub fn ers(&mut self) -> _ERSW { _ERSW { w: self } } #[doc = "Bit 15 - AIS"] #[inline] pub fn ais(&mut self) -> _AISW { _AISW { w: self } } #[doc = "Bit 16 - NIS"] #[inline] pub fn nis(&mut self) -> _NISW { _NISW { w: self } } }
24.179928
59
0.476685
56fda0a7d2fb89807c9493307b26d869f365ef0f
7,730
use crate::{loans::Timestamp, rate_model::*}; use codec::Codec; use frame_support::{pallet_prelude::*, sp_runtime::Perquintill, sp_std::vec::Vec}; use scale_info::TypeInfo; use sp_runtime::Percent; pub type CollateralLpAmountOf<T> = <T as Lending>::Balance; pub type BorrowAmountOf<T> = <T as Lending>::Balance; #[derive(Encode, Decode, Default, TypeInfo)] pub struct MarketConfigInput<AccountId> where AccountId: core::cmp::Ord, { pub reserved: Perquintill, pub manager: AccountId, /// can pause borrow & deposits of assets pub collateral_factor: NormalizedCollateralFactor, pub under_collaterized_warn_percent: Percent, } #[derive(Encode, Decode, Default, TypeInfo)] pub struct MarketConfig<VaultId, AssetId, AccountId> { pub manager: AccountId, pub borrow: VaultId, pub collateral: AssetId, pub collateral_factor: NormalizedCollateralFactor, pub interest_rate_model: InterestRateModel, pub under_collaterized_warn_percent: Percent, } /// Basic lending with no its own wrapper (liquidity) token. /// User will deposit borrow and collateral assets via `Vault`. /// `Liquidation` is other trait. /// Based on Blacksmith (Warp v2) IBSLendingPair.sol and Parallel Finance. /// Fees will be withdrawing to vault. /// Lenders with be rewarded via vault. pub trait Lending { type AssetId; type VaultId: Codec; type MarketId: Codec; /// (deposit VaultId, collateral VaultId) <-> MarketId type AccountId: core::cmp::Ord + Codec; type Balance; type BlockNumber; /// Generates the underlying owned vault that will hold borrowable asset (may be shared with /// specific set of defined collaterals). Creates market for new pair in specified vault. if /// market exists under specified manager, updates its parameters `deposit` - asset users want /// to borrow. `collateral` - asset users will put as collateral. /// ```svgbob /// ----------- /// | vault | I /// ----------- /// | /// ------------- /// | strategy | P /// ------------- /// | M /// | ------------------- /// | | --------- | /// -----------------------> | | | /// | | vault | | /// -----------------------> | | | /// | | --------- | /// | ------------------- /// | /// ------------- /// | strategy | Q /// ------------- /// | /// ---------- /// | vault | J /// ---------- /// ``` /// Let's assume a group of users X want to use a strategy P /// and a group of users Y want to use a strategy Q: /// Assuming both groups are interested in lending an asset A, they can create two vaults I and /// J. They would deposit in I and J, then set P and respectively Q as their strategy. /// Now imagine that our lending market M has a good APY, both strategy P and Q /// could decide to allocate a share for it, transferring from I and J to the borrow asset vault /// of M. Their allocated share could differ because of the strategies being different, /// but the lending Market would have all the lendable funds in a single vault. fn create( borrow_asset: Self::AssetId, collateral_asset_vault: Self::AssetId, config: MarketConfigInput<Self::AccountId>, interest_rate_model: &InterestRateModel, ) -> Result<(Self::MarketId, Self::VaultId), DispatchError>; /// AccountId of the market instance fn account_id(market_id: &Self::MarketId) -> Self::AccountId; /// Deposit collateral in order to borrow. fn deposit_collateral( market_id: &Self::MarketId, account_id: &Self::AccountId, amount: CollateralLpAmountOf<Self>, ) -> Result<(), DispatchError>; /// Withdraw a part/total of previously deposited collateral. /// In practice if used has borrow user will not withdraw v because it would probably result in /// quick liquidation, if he has any borrows. ```python /// withdrawable = total_collateral - total_borrows /// withdrawable = collateral_balance * collateral_price - borrower_balance_with_interest * /// borrow_price * collateral_factor ``` fn withdraw_collateral( market_id: &Self::MarketId, account: &Self::AccountId, amount: CollateralLpAmountOf<Self>, ) -> Result<(), DispatchError>; /// get all existing markets for current deposit fn get_markets_for_borrow(vault: Self::VaultId) -> Vec<Self::MarketId>; #[allow(clippy::type_complexity)] fn get_all_markets( ) -> Vec<(Self::MarketId, MarketConfig<Self::VaultId, Self::AssetId, Self::AccountId>)>; /// `amount_to_borrow` is the amount of the borrow asset lendings's vault shares the user wants /// to borrow. Normalizes amounts for calculations. /// Borrows as exact amount as possible with some inaccuracies for oracle price based /// normalization. If there is not enough collateral or borrow amounts - fails fn borrow( market_id: &Self::MarketId, debt_owner: &Self::AccountId, amount_to_borrow: BorrowAmountOf<Self>, ) -> Result<(), DispatchError>; /// `from` repays some of `beneficiary` debts. /// - `market_id` : the market_id on which to be repaid. /// - `repay_amount`: the amount to be repaid in underlying. /// Interest will be repaid first and then remaining amount from `repay_amount` will be used to /// repay principal value. fn repay_borrow( market_id: &Self::MarketId, from: &Self::AccountId, beneficiary: &Self::AccountId, repay_amount: Option<BorrowAmountOf<Self>>, ) -> Result<(), DispatchError>; /// total debts principals (not includes interest) fn total_borrows(market_id: &Self::MarketId) -> Result<Self::Balance, DispatchError>; /// Floored down to zero. fn total_interest(market_id: &Self::MarketId) -> Result<Self::Balance, DispatchError>; /// ````python /// delta_interest_rate = delta_time / period_interest_rate /// debt_delta = debt_principal * delta_interest_rate /// new_accrued_debt = accrued_debt + debt_delta /// total_debt = debt_principal + new_accrued_debt /// ``` fn accrue_interest(market_id: &Self::MarketId, now: Timestamp) -> Result<(), DispatchError>; fn total_cash(market_id: &Self::MarketId) -> Result<Self::Balance, DispatchError>; /// utilization_ratio = total_borrows / (total_cash + total_borrows). /// utilization ratio is 0 when there are no borrows. fn calc_utilization_ratio( cash: &Self::Balance, borrows: &Self::Balance, ) -> Result<Percent, DispatchError>; /// Borrow asset amount account should repay to be debt free for specific market pair. /// Calculate account's borrow balance using the borrow index at the start of block time. /// ```python /// new_borrow_balance = principal * (market_borrow_index / borrower_borrow_index) /// ``` fn borrow_balance_current( market_id: &Self::MarketId, account: &Self::AccountId, ) -> Result<Option<BorrowAmountOf<Self>>, DispatchError>; fn collateral_of_account( market_id: &Self::MarketId, account: &Self::AccountId, ) -> Result<CollateralLpAmountOf<Self>, DispatchError>; /// Borrower shouldn't borrow more than his total collateral value fn collateral_required( market_id: &Self::MarketId, borrow_amount: Self::Balance, ) -> Result<Self::Balance, DispatchError>; /// Returns the borrow limit for an account. /// Calculation uses indexes from start of block time. /// Depends on overall collateral put by user into vault. /// This borrow limit of specific user, depends only on prices and users collateral, not on /// state of vault. /// ```python /// collateral_balance * collateral_price / collateral_factor - borrower_balance_with_interest * borrow_price /// ``` fn get_borrow_limit( market_id: &Self::MarketId, account: &Self::AccountId, ) -> Result<Self::Balance, DispatchError>; }
38.65
110
0.67956
23efc6acc8de310175130c4b81e14eb3baa41efe
2,351
// Copyright 2020 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use common_base::tokio; use common_exception::Result; use common_planners::*; use futures::TryStreamExt; use pretty_assertions::assert_eq; use crate::interpreters::*; use crate::sql::*; #[tokio::test] async fn interpreter_describe_table_test() -> Result<()> { let ctx = crate::tests::try_create_context()?; // Create table. { if let PlanNode::CreateTable(plan) = PlanParser::create(ctx.clone()) .build_from_sql("create table default.a(a bigint, b int, c varchar(255), d smallint, e Date ) Engine = Null")? { let executor = CreateTableInterpreter::try_create(ctx.clone(), plan.clone())?; let _ = executor.execute(None).await?; } } // describe table. { if let PlanNode::DescribeTable(plan) = PlanParser::create(ctx.clone()).build_from_sql("describe a")? { let executor = DescribeTableInterpreter::try_create(ctx.clone(), plan.clone())?; assert_eq!(executor.name(), "DescribeTableInterpreter"); let stream = executor.execute(None).await?; let result = stream.try_collect::<Vec<_>>().await?; let expected = vec![ "+-------+--------+------+", "| Field | Type | Null |", "+-------+--------+------+", "| a | Int64 | NO |", "| b | Int32 | NO |", "| c | String | NO |", "| d | Int16 | NO |", "| e | Date16 | NO |", "+-------+--------+------+", ]; common_datablocks::assert_blocks_sorted_eq(expected, result.as_slice()); } else { panic!() } } Ok(()) }
35.089552
122
0.556784
38598df93b11376ffe12151cc243d926723d468e
64
pub fn say_hello(name: &str) { println!("Hello, {name}"); }
16
30
0.578125
0316a66a7886b3b1dc596bd5b0dd42b4d212e5c5
1,181
use crate::props; use serde_derive::{Serialize, Deserialize}; #[derive(Serialize, Deserialize, Clone, Default, Eq, PartialEq, Debug)] #[serde(rename_all = "camelCase")] pub struct FactSet { #[serde(skip_serializing_if = "Vec::is_empty", default)] pub facts: Vec<Fact>, #[serde(skip_serializing_if = "Option::is_none", default)] pub id: Option<String>, #[serde(skip_serializing_if = "props::Spacing::is_default", default)] pub spacing: props::Spacing, #[serde(default)] pub seperator: bool, } fn print_fact_type<S>(_:&(), serializer:S) -> Result<S::Ok, S::Error> where S: serde::ser::Serializer { serializer.serialize_str("Fact") } #[derive(Serialize, Deserialize, Clone, Default, Eq, PartialEq, Debug)] #[serde(rename_all = "camelCase")] pub struct Fact { #[serde(rename="type", serialize_with = "print_fact_type", skip_deserializing)] _type: (), #[serde(default)] pub title: String, #[serde(default)] pub value: String, } impl Fact { pub fn new (title: &str, value: &str) -> Fact { Fact { title: title.into(), value: value.into(), _type: () } } }
24.604167
83
0.628281
0aeb24fc634b8d3bd860cd2ac3d29c3067daca48
2,603
use instruction_def::*; use test::run_test; use Operand::*; use Reg::*; use RegScale::*; use RegType::*; use {BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode}; #[test] fn sha256rnds2_1() { run_test( &Instruction { mnemonic: Mnemonic::SHA256RNDS2, operand1: Some(Direct(XMM0)), operand2: Some(Direct(XMM3)), operand3: Some(Direct(XMM0)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None, }, &[15, 56, 203, 195], OperandSize::Dword, ) } #[test] fn sha256rnds2_2() { run_test( &Instruction { mnemonic: Mnemonic::SHA256RNDS2, operand1: Some(Direct(XMM5)), operand2: Some(IndirectScaledIndexed( EDI, EDX, Eight, Some(OperandSize::Xmmword), None, )), operand3: Some(Direct(XMM0)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None, }, &[15, 56, 203, 44, 215], OperandSize::Dword, ) } #[test] fn sha256rnds2_3() { run_test( &Instruction { mnemonic: Mnemonic::SHA256RNDS2, operand1: Some(Direct(XMM5)), operand2: Some(Direct(XMM4)), operand3: Some(Direct(XMM0)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None, }, &[15, 56, 203, 236], OperandSize::Qword, ) } #[test] fn sha256rnds2_4() { run_test( &Instruction { mnemonic: Mnemonic::SHA256RNDS2, operand1: Some(Direct(XMM3)), operand2: Some(IndirectScaledIndexedDisplaced( RBX, RDX, Eight, 1481418717, Some(OperandSize::Xmmword), None, )), operand3: Some(Direct(XMM0)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None, }, &[15, 56, 203, 156, 211, 221, 167, 76, 88], OperandSize::Qword, ) }
24.790476
95
0.472148
f7e27f1902f695ab553ebb846b8a6cf42d796613
2,266
use std::convert::TryFrom; use nom::combinator::map_res; use nom::number::complete::le_i8; use crate::ParseResult; #[derive(Debug, Clone, Copy, Eq, PartialEq)] #[repr(i8)] pub enum Flag { Unknown = -1, None = 0, Green = 1, Blue = 2, Yellow = 3, Red = 4, } #[non_exhaustive] pub struct InvalidFlag(()); impl InvalidFlag { fn new() -> Self { InvalidFlag(()) } } impl TryFrom<i8> for Flag { type Error = InvalidFlag; fn try_from(item: i8) -> Result<Self, Self::Error> { match item { -1 => Ok(Flag::Unknown), 0 => Ok(Flag::None), 1 => Ok(Flag::Green), 2 => Ok(Flag::Blue), 3 => Ok(Flag::Yellow), 4 => Ok(Flag::Red), _ => Err(InvalidFlag::new()), } } } impl Flag { pub fn parse(input: &[u8]) -> ParseResult<Self> { map_res(le_i8, Flag::try_from)(input) } } #[cfg(test)] mod tests { use super::*; use nom::error::ErrorKind; use nom::Err; #[test] fn test_parse() { let packet = (-1i8).to_le_bytes(); let result = Flag::parse(&packet[..]); assert_eq!(result, Ok((&[][..], Flag::Unknown))); let packet = 0i8.to_le_bytes(); let result = Flag::parse(&packet[..]); assert_eq!(result, Ok((&[][..], Flag::None))); let packet = 1i8.to_le_bytes(); let result = Flag::parse(&packet[..]); assert_eq!(result, Ok((&[][..], Flag::Green))); let packet = 2i8.to_le_bytes(); let result = Flag::parse(&packet[..]); assert_eq!(result, Ok((&[][..], Flag::Blue))); let packet = 3i8.to_le_bytes(); let result = Flag::parse(&packet[..]); assert_eq!(result, Ok((&[][..], Flag::Yellow))); let packet = 4i8.to_le_bytes(); let result = Flag::parse(&packet[..]); assert_eq!(result, Ok((&[][..], Flag::Red))); let packet = (-2i8).to_le_bytes(); let result = Flag::parse(&packet[..]); assert_eq!(result, Err(Err::Error((&packet[..], ErrorKind::MapRes)))); let packet = 5i8.to_le_bytes(); let result = Flag::parse(&packet[..]); assert_eq!(result, Err(Err::Error((&packet[..], ErrorKind::MapRes)))); } }
24.901099
78
0.518094
62df646a7360d7f43baecf92decac1526e4d1a89
1,246
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under both the MIT license found in the * LICENSE-MIT file in the root directory of this source tree and the Apache * License, Version 2.0 found in the LICENSE-APACHE file in the root directory * of this source tree. */ use anyhow::{Error, Result}; use futures_ext::BoxFuture; use std::time::Duration; use super::lrucache::VolatileLruCachePool; pub fn get_cached_or_fill<T, F>( _cache_pool: &VolatileLruCachePool, _cache_key: String, fetch: F, ) -> BoxFuture<Option<T>, Error> where T: abomonation::Abomonation + Clone + Send + 'static, F: FnOnce() -> BoxFuture<Option<T>, Error>, { fetch() } pub fn get_cached<T>(_cache_pool: &VolatileLruCachePool, _cache_key: &String) -> Result<Option<T>> where T: abomonation::Abomonation + Clone + Send + 'static, { Ok(None) } /// Returns `false` if the entry could not be inserted (e.g. another entry with the same /// key was inserted first) pub fn set_cached<T>( _cache_pool: &VolatileLruCachePool, _cache_key: &String, _entry: &T, _ttl: Option<Duration>, ) -> Result<bool> where T: abomonation::Abomonation + Clone + Send + 'static, { Ok(false) }
25.958333
98
0.691011
61da16cebad3e752fd0ffced9e89898ed3c139ae
24,602
#![allow(missing_docs, trivial_casts, unused_variables, unused_mut, unused_imports, unused_extern_crates, non_camel_case_types)] use async_trait::async_trait; use futures::Stream; use std::error::Error; use std::task::{Poll, Context}; use swagger::{ApiError, ContextWrapper}; use serde::{Serialize, Deserialize}; type ServiceError = Box<dyn Error + Send + Sync + 'static>; pub const BASE_PATH: &'static str = ""; pub const API_VERSION: &'static str = "1.0.7"; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[must_use] pub enum AnyOfGetResponse { /// Success Success (models::AnyOfObject) , /// AlternateSuccess AlternateSuccess (models::Model12345AnyOfObject) , /// AnyOfSuccess AnyOfSuccess (models::AnyOfGet202Response) } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum CallbackWithHeaderPostResponse { /// OK OK } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum ComplexQueryParamGetResponse { /// Success Success } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum EnumInPathPathParamGetResponse { /// Success Success } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum JsonComplexQueryParamGetResponse { /// Success Success } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum MandatoryRequestHeaderGetResponse { /// Success Success } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum MergePatchJsonGetResponse { /// merge-patch+json-encoded response Merge (models::AnotherXmlObject) } #[derive(Debug, PartialEq, Serialize, Deserialize)] #[must_use] pub enum MultigetGetResponse { /// JSON rsp JSONRsp (models::AnotherXmlObject) , /// XML rsp XMLRsp (models::MultigetGet201Response) , /// octet rsp OctetRsp (swagger::ByteArray) , /// string rsp StringRsp (String) , /// Duplicate Response long text. One. DuplicateResponseLongText (models::AnotherXmlObject) , /// Duplicate Response long text. Two. DuplicateResponseLongText_2 (models::AnotherXmlObject) , /// Duplicate Response long text. Three. DuplicateResponseLongText_3 (models::AnotherXmlObject) } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum MultipleAuthSchemeGetResponse { /// Check that limiting to multiple required auth schemes works CheckThatLimitingToMultipleRequiredAuthSchemesWorks } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum OneOfGetResponse { /// Success Success (models::OneOfGet200Response) } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum OverrideServerGetResponse { /// Success. Success } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum ParamgetGetResponse { /// JSON rsp JSONRsp (models::AnotherXmlObject) } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum ReadonlyAuthSchemeGetResponse { /// Check that limiting to a single required auth scheme works CheckThatLimitingToASingleRequiredAuthSchemeWorks } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum RegisterCallbackPostResponse { /// OK OK } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum RequiredOctetStreamPutResponse { /// OK OK } #[derive(Debug, PartialEq, Serialize, Deserialize)] #[must_use] pub enum ResponsesWithHeadersGetResponse { /// Success Success { body: String, success_info: String , bool_header: Option< bool > , object_header: Option< models::ObjectHeader > } , /// Precondition Failed PreconditionFailed { further_info: Option< String > , failure_info: Option< String > } } #[derive(Debug, PartialEq, Serialize, Deserialize)] #[must_use] pub enum Rfc7807GetResponse { /// OK OK (models::ObjectWithArrayOfObjects) , /// NotFound NotFound (models::ObjectWithArrayOfObjects) , /// NotAcceptable NotAcceptable (models::ObjectWithArrayOfObjects) } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum UntypedPropertyGetResponse { /// Check that untyped properties works CheckThatUntypedPropertiesWorks } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum UuidGetResponse { /// Duplicate Response long text. One. DuplicateResponseLongText (uuid::Uuid) } #[derive(Debug, PartialEq, Serialize, Deserialize)] #[must_use] pub enum XmlExtraPostResponse { /// OK OK , /// Bad Request BadRequest } #[derive(Debug, PartialEq, Serialize, Deserialize)] #[must_use] pub enum XmlOtherPostResponse { /// OK OK (models::AnotherXmlObject) , /// Bad Request BadRequest } #[derive(Debug, PartialEq, Serialize, Deserialize)] #[must_use] pub enum XmlOtherPutResponse { /// OK OK , /// Bad Request BadRequest } #[derive(Debug, PartialEq, Serialize, Deserialize)] #[must_use] pub enum XmlPostResponse { /// OK OK , /// Bad Request BadRequest } #[derive(Debug, PartialEq, Serialize, Deserialize)] #[must_use] pub enum XmlPutResponse { /// OK OK , /// Bad Request BadRequest } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum CreateRepoResponse { /// Success Success } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum GetRepoInfoResponse { /// OK OK (String) } /// API #[async_trait] pub trait Api<C: Send + Sync> { fn poll_ready(&self, _cx: &mut Context) -> Poll<Result<(), Box<dyn Error + Send + Sync + 'static>>> { Poll::Ready(Ok(())) } async fn any_of_get( &self, any_of: Option<&Vec<models::AnyOfObject>>, context: &C) -> Result<AnyOfGetResponse, ApiError>; async fn callback_with_header_post( &self, url: String, context: &C) -> Result<CallbackWithHeaderPostResponse, ApiError>; async fn complex_query_param_get( &self, list_of_strings: Option<&Vec<models::StringObject>>, context: &C) -> Result<ComplexQueryParamGetResponse, ApiError>; async fn enum_in_path_path_param_get( &self, path_param: models::StringEnum, context: &C) -> Result<EnumInPathPathParamGetResponse, ApiError>; async fn json_complex_query_param_get( &self, list_of_strings: Option<&Vec<models::StringObject>>, context: &C) -> Result<JsonComplexQueryParamGetResponse, ApiError>; async fn mandatory_request_header_get( &self, x_header: String, context: &C) -> Result<MandatoryRequestHeaderGetResponse, ApiError>; async fn merge_patch_json_get( &self, context: &C) -> Result<MergePatchJsonGetResponse, ApiError>; /// Get some stuff. async fn multiget_get( &self, context: &C) -> Result<MultigetGetResponse, ApiError>; async fn multiple_auth_scheme_get( &self, context: &C) -> Result<MultipleAuthSchemeGetResponse, ApiError>; async fn one_of_get( &self, context: &C) -> Result<OneOfGetResponse, ApiError>; async fn override_server_get( &self, context: &C) -> Result<OverrideServerGetResponse, ApiError>; /// Get some stuff with parameters. async fn paramget_get( &self, uuid: Option<uuid::Uuid>, some_object: Option<models::ObjectParam>, some_list: Option<models::MyIdList>, context: &C) -> Result<ParamgetGetResponse, ApiError>; async fn readonly_auth_scheme_get( &self, context: &C) -> Result<ReadonlyAuthSchemeGetResponse, ApiError>; async fn register_callback_post( &self, url: String, context: &C) -> Result<RegisterCallbackPostResponse, ApiError>; async fn required_octet_stream_put( &self, body: swagger::ByteArray, context: &C) -> Result<RequiredOctetStreamPutResponse, ApiError>; async fn responses_with_headers_get( &self, context: &C) -> Result<ResponsesWithHeadersGetResponse, ApiError>; async fn rfc7807_get( &self, context: &C) -> Result<Rfc7807GetResponse, ApiError>; async fn untyped_property_get( &self, object_untyped_props: Option<models::ObjectUntypedProps>, context: &C) -> Result<UntypedPropertyGetResponse, ApiError>; async fn uuid_get( &self, context: &C) -> Result<UuidGetResponse, ApiError>; async fn xml_extra_post( &self, duplicate_xml_object: Option<models::DuplicateXmlObject>, context: &C) -> Result<XmlExtraPostResponse, ApiError>; async fn xml_other_post( &self, another_xml_object: Option<models::AnotherXmlObject>, context: &C) -> Result<XmlOtherPostResponse, ApiError>; async fn xml_other_put( &self, another_xml_array: Option<models::AnotherXmlArray>, context: &C) -> Result<XmlOtherPutResponse, ApiError>; /// Post an array async fn xml_post( &self, xml_array: Option<models::XmlArray>, context: &C) -> Result<XmlPostResponse, ApiError>; async fn xml_put( &self, xml_object: Option<models::XmlObject>, context: &C) -> Result<XmlPutResponse, ApiError>; async fn create_repo( &self, object_param: models::ObjectParam, context: &C) -> Result<CreateRepoResponse, ApiError>; async fn get_repo_info( &self, repo_id: String, context: &C) -> Result<GetRepoInfoResponse, ApiError>; } /// API where `Context` isn't passed on every API call #[async_trait] pub trait ApiNoContext<C: Send + Sync> { fn poll_ready(&self, _cx: &mut Context) -> Poll<Result<(), Box<dyn Error + Send + Sync + 'static>>>; fn context(&self) -> &C; async fn any_of_get( &self, any_of: Option<&Vec<models::AnyOfObject>>, ) -> Result<AnyOfGetResponse, ApiError>; async fn callback_with_header_post( &self, url: String, ) -> Result<CallbackWithHeaderPostResponse, ApiError>; async fn complex_query_param_get( &self, list_of_strings: Option<&Vec<models::StringObject>>, ) -> Result<ComplexQueryParamGetResponse, ApiError>; async fn enum_in_path_path_param_get( &self, path_param: models::StringEnum, ) -> Result<EnumInPathPathParamGetResponse, ApiError>; async fn json_complex_query_param_get( &self, list_of_strings: Option<&Vec<models::StringObject>>, ) -> Result<JsonComplexQueryParamGetResponse, ApiError>; async fn mandatory_request_header_get( &self, x_header: String, ) -> Result<MandatoryRequestHeaderGetResponse, ApiError>; async fn merge_patch_json_get( &self, ) -> Result<MergePatchJsonGetResponse, ApiError>; /// Get some stuff. async fn multiget_get( &self, ) -> Result<MultigetGetResponse, ApiError>; async fn multiple_auth_scheme_get( &self, ) -> Result<MultipleAuthSchemeGetResponse, ApiError>; async fn one_of_get( &self, ) -> Result<OneOfGetResponse, ApiError>; async fn override_server_get( &self, ) -> Result<OverrideServerGetResponse, ApiError>; /// Get some stuff with parameters. async fn paramget_get( &self, uuid: Option<uuid::Uuid>, some_object: Option<models::ObjectParam>, some_list: Option<models::MyIdList>, ) -> Result<ParamgetGetResponse, ApiError>; async fn readonly_auth_scheme_get( &self, ) -> Result<ReadonlyAuthSchemeGetResponse, ApiError>; async fn register_callback_post( &self, url: String, ) -> Result<RegisterCallbackPostResponse, ApiError>; async fn required_octet_stream_put( &self, body: swagger::ByteArray, ) -> Result<RequiredOctetStreamPutResponse, ApiError>; async fn responses_with_headers_get( &self, ) -> Result<ResponsesWithHeadersGetResponse, ApiError>; async fn rfc7807_get( &self, ) -> Result<Rfc7807GetResponse, ApiError>; async fn untyped_property_get( &self, object_untyped_props: Option<models::ObjectUntypedProps>, ) -> Result<UntypedPropertyGetResponse, ApiError>; async fn uuid_get( &self, ) -> Result<UuidGetResponse, ApiError>; async fn xml_extra_post( &self, duplicate_xml_object: Option<models::DuplicateXmlObject>, ) -> Result<XmlExtraPostResponse, ApiError>; async fn xml_other_post( &self, another_xml_object: Option<models::AnotherXmlObject>, ) -> Result<XmlOtherPostResponse, ApiError>; async fn xml_other_put( &self, another_xml_array: Option<models::AnotherXmlArray>, ) -> Result<XmlOtherPutResponse, ApiError>; /// Post an array async fn xml_post( &self, xml_array: Option<models::XmlArray>, ) -> Result<XmlPostResponse, ApiError>; async fn xml_put( &self, xml_object: Option<models::XmlObject>, ) -> Result<XmlPutResponse, ApiError>; async fn create_repo( &self, object_param: models::ObjectParam, ) -> Result<CreateRepoResponse, ApiError>; async fn get_repo_info( &self, repo_id: String, ) -> Result<GetRepoInfoResponse, ApiError>; } /// Trait to extend an API to make it easy to bind it to a context. pub trait ContextWrapperExt<C: Send + Sync> where Self: Sized { /// Binds this API to a context. fn with_context(self: Self, context: C) -> ContextWrapper<Self, C>; } impl<T: Api<C> + Send + Sync, C: Clone + Send + Sync> ContextWrapperExt<C> for T { fn with_context(self: T, context: C) -> ContextWrapper<T, C> { ContextWrapper::<T, C>::new(self, context) } } #[async_trait] impl<T: Api<C> + Send + Sync, C: Clone + Send + Sync> ApiNoContext<C> for ContextWrapper<T, C> { fn poll_ready(&self, cx: &mut Context) -> Poll<Result<(), ServiceError>> { self.api().poll_ready(cx) } fn context(&self) -> &C { ContextWrapper::context(self) } async fn any_of_get( &self, any_of: Option<&Vec<models::AnyOfObject>>, ) -> Result<AnyOfGetResponse, ApiError> { let context = self.context().clone(); self.api().any_of_get(any_of, &context).await } async fn callback_with_header_post( &self, url: String, ) -> Result<CallbackWithHeaderPostResponse, ApiError> { let context = self.context().clone(); self.api().callback_with_header_post(url, &context).await } async fn complex_query_param_get( &self, list_of_strings: Option<&Vec<models::StringObject>>, ) -> Result<ComplexQueryParamGetResponse, ApiError> { let context = self.context().clone(); self.api().complex_query_param_get(list_of_strings, &context).await } async fn enum_in_path_path_param_get( &self, path_param: models::StringEnum, ) -> Result<EnumInPathPathParamGetResponse, ApiError> { let context = self.context().clone(); self.api().enum_in_path_path_param_get(path_param, &context).await } async fn json_complex_query_param_get( &self, list_of_strings: Option<&Vec<models::StringObject>>, ) -> Result<JsonComplexQueryParamGetResponse, ApiError> { let context = self.context().clone(); self.api().json_complex_query_param_get(list_of_strings, &context).await } async fn mandatory_request_header_get( &self, x_header: String, ) -> Result<MandatoryRequestHeaderGetResponse, ApiError> { let context = self.context().clone(); self.api().mandatory_request_header_get(x_header, &context).await } async fn merge_patch_json_get( &self, ) -> Result<MergePatchJsonGetResponse, ApiError> { let context = self.context().clone(); self.api().merge_patch_json_get(&context).await } /// Get some stuff. async fn multiget_get( &self, ) -> Result<MultigetGetResponse, ApiError> { let context = self.context().clone(); self.api().multiget_get(&context).await } async fn multiple_auth_scheme_get( &self, ) -> Result<MultipleAuthSchemeGetResponse, ApiError> { let context = self.context().clone(); self.api().multiple_auth_scheme_get(&context).await } async fn one_of_get( &self, ) -> Result<OneOfGetResponse, ApiError> { let context = self.context().clone(); self.api().one_of_get(&context).await } async fn override_server_get( &self, ) -> Result<OverrideServerGetResponse, ApiError> { let context = self.context().clone(); self.api().override_server_get(&context).await } /// Get some stuff with parameters. async fn paramget_get( &self, uuid: Option<uuid::Uuid>, some_object: Option<models::ObjectParam>, some_list: Option<models::MyIdList>, ) -> Result<ParamgetGetResponse, ApiError> { let context = self.context().clone(); self.api().paramget_get(uuid, some_object, some_list, &context).await } async fn readonly_auth_scheme_get( &self, ) -> Result<ReadonlyAuthSchemeGetResponse, ApiError> { let context = self.context().clone(); self.api().readonly_auth_scheme_get(&context).await } async fn register_callback_post( &self, url: String, ) -> Result<RegisterCallbackPostResponse, ApiError> { let context = self.context().clone(); self.api().register_callback_post(url, &context).await } async fn required_octet_stream_put( &self, body: swagger::ByteArray, ) -> Result<RequiredOctetStreamPutResponse, ApiError> { let context = self.context().clone(); self.api().required_octet_stream_put(body, &context).await } async fn responses_with_headers_get( &self, ) -> Result<ResponsesWithHeadersGetResponse, ApiError> { let context = self.context().clone(); self.api().responses_with_headers_get(&context).await } async fn rfc7807_get( &self, ) -> Result<Rfc7807GetResponse, ApiError> { let context = self.context().clone(); self.api().rfc7807_get(&context).await } async fn untyped_property_get( &self, object_untyped_props: Option<models::ObjectUntypedProps>, ) -> Result<UntypedPropertyGetResponse, ApiError> { let context = self.context().clone(); self.api().untyped_property_get(object_untyped_props, &context).await } async fn uuid_get( &self, ) -> Result<UuidGetResponse, ApiError> { let context = self.context().clone(); self.api().uuid_get(&context).await } async fn xml_extra_post( &self, duplicate_xml_object: Option<models::DuplicateXmlObject>, ) -> Result<XmlExtraPostResponse, ApiError> { let context = self.context().clone(); self.api().xml_extra_post(duplicate_xml_object, &context).await } async fn xml_other_post( &self, another_xml_object: Option<models::AnotherXmlObject>, ) -> Result<XmlOtherPostResponse, ApiError> { let context = self.context().clone(); self.api().xml_other_post(another_xml_object, &context).await } async fn xml_other_put( &self, another_xml_array: Option<models::AnotherXmlArray>, ) -> Result<XmlOtherPutResponse, ApiError> { let context = self.context().clone(); self.api().xml_other_put(another_xml_array, &context).await } /// Post an array async fn xml_post( &self, xml_array: Option<models::XmlArray>, ) -> Result<XmlPostResponse, ApiError> { let context = self.context().clone(); self.api().xml_post(xml_array, &context).await } async fn xml_put( &self, xml_object: Option<models::XmlObject>, ) -> Result<XmlPutResponse, ApiError> { let context = self.context().clone(); self.api().xml_put(xml_object, &context).await } async fn create_repo( &self, object_param: models::ObjectParam, ) -> Result<CreateRepoResponse, ApiError> { let context = self.context().clone(); self.api().create_repo(object_param, &context).await } async fn get_repo_info( &self, repo_id: String, ) -> Result<GetRepoInfoResponse, ApiError> { let context = self.context().clone(); self.api().get_repo_info(repo_id, &context).await } } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum CallbackCallbackWithHeaderPostResponse { /// OK OK } #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum CallbackCallbackPostResponse { /// OK OK } /// Callback API #[async_trait] pub trait CallbackApi<C: Send + Sync> { fn poll_ready(&self, _cx: &mut Context) -> Poll<Result<(), Box<dyn Error + Send + Sync + 'static>>> { Poll::Ready(Ok(())) } async fn callback_callback_with_header_post( &self, callback_request_query_url: String, information: Option<String>, context: &C) -> Result<CallbackCallbackWithHeaderPostResponse, ApiError>; async fn callback_callback_post( &self, callback_request_query_url: String, context: &C) -> Result<CallbackCallbackPostResponse, ApiError>; } /// Callback API without a `Context` #[async_trait] pub trait CallbackApiNoContext<C: Send + Sync> { fn poll_ready(&self, _cx: &mut Context) -> Poll<Result<(), Box<dyn Error + Send + Sync + 'static>>>; fn context(&self) -> &C; async fn callback_callback_with_header_post( &self, callback_request_query_url: String, information: Option<String>, ) -> Result<CallbackCallbackWithHeaderPostResponse, ApiError>; async fn callback_callback_post( &self, callback_request_query_url: String, ) -> Result<CallbackCallbackPostResponse, ApiError>; } pub trait CallbackContextWrapperExt<C: Send + Sync> where Self: Sized { /// Binds this API to a context. fn with_context(self: Self, context: C) -> ContextWrapper<Self, C>; } impl<T: CallbackApi<C> + Send + Sync, C: Clone + Send + Sync> CallbackContextWrapperExt<C> for T { fn with_context(self: T, context: C) -> ContextWrapper<T, C> { ContextWrapper::<T, C>::new(self, context) } } #[async_trait] impl<T: CallbackApi<C> + Send + Sync, C: Clone + Send + Sync> CallbackApiNoContext<C> for ContextWrapper<T, C> { fn poll_ready(&self, cx: &mut Context) -> Poll<Result<(), ServiceError>> { self.api().poll_ready(cx) } fn context(&self) -> &C { ContextWrapper::context(self) } async fn callback_callback_with_header_post( &self, callback_request_query_url: String, information: Option<String>, ) -> Result<CallbackCallbackWithHeaderPostResponse, ApiError> { let context = self.context().clone(); self.api().callback_callback_with_header_post( callback_request_query_url, information, &context).await } async fn callback_callback_post( &self, callback_request_query_url: String, ) -> Result<CallbackCallbackPostResponse, ApiError> { let context = self.context().clone(); self.api().callback_callback_post( callback_request_query_url, &context).await } } #[cfg(feature = "client")] pub mod client; // Re-export Client as a top-level name #[cfg(feature = "client")] pub use client::Client; #[cfg(feature = "server")] pub mod server; // Re-export router() as a top-level name #[cfg(feature = "server")] pub use self::server::Service; #[cfg(any(feature = "client", feature = "server"))] pub mod context; pub mod models; #[cfg(any(feature = "client", feature = "server"))] pub(crate) mod header;
26.654388
128
0.639013
1e7dd615a1f7ddb2448282f3b09ae18ca6cb32bb
6,345
//! Keymap macro // Keymap macro implementation. // // Token-tree muncher: { rest } ( visible ) ( modifiers ) ( keys ) [ data ] // // Consumes definition from 'rest'. Modifiers are accumulated in 'modifiers'. Key definitions are // accumulated in 'keys'. Bindings are accumulated in 'data'. macro_rules! keymap_impl { // Base case: generate keymap data. ( {} ( $visible:literal ) () () $data:tt ) => { pub(crate) static KEYMAP: $crate::keymaps::KeymapData = &$data; }; // , (consume comma between keys) ( { , $( $rest:tt )* } ( $visible:literal ) ( ) ( $( $keys:tt )* ) [ $( $data:tt )* ] ) => { keymap_impl! { { $( $rest )* } ( $visible ) ( ) ( $( $keys )* ) [ $( $data )* ] } }; // => Binding (termination) ( { => $action:ident $( ( $( $action_params:tt )* ) )? ; $( $rest:tt )* } ( $visible:literal ) ( ) ( ) [ $( $data:tt )* ] ) => { keymap_impl! { { $( $rest )* } ( $visible ) ( ) ( ) [ $( $data )* ] } }; // => Binding (assign key) ( { => $action:ident $( ( $( $action_params:tt )* ) )? ; $( $rest:tt )* } ( $visible:literal ) ( ) ( $key:tt $key_visible:literal $( $keys:tt )* ) [ $( $data:tt )* ] ) => { keymap_impl! { { => $action $( ( $( $action_params )* ) )? ; $( $rest )* } ( $visible ) ( ) ( $( $keys )* ) [ $( $data )* ( $key, $crate::bindings::BindingConfig { binding: $crate::bindings::Binding::Action( $crate::action::Action::$action $( ( $( $action_params )* ) )? ), visible: $key_visible, }, ), ] } }; // CTRL ( { CTRL $( $rest:tt )* } ( $visible:literal ) ( $( $modifier:ident )* ) ( $( $keys:tt )* ) [ $( $data:tt )* ] ) => { keymap_impl! { { $( $rest )* } ( $visible ) ( $( $modifier )* CTRL ) ( $( $keys )* ) [ $( $data )* ] } }; // SHIFT ( { SHIFT $( $rest:tt )* } ( $visible:literal ) ( $( $modifier:ident )* ) ( $( $keys:tt )* ) [ $( $data:tt )* ] ) => { keymap_impl! { { $( $rest )* } ( $visible ) ( $( $modifier )* SHIFT ) ( $( $keys )* ) [ $( $data )* ] } }; // ALT ( { ALT $( $rest:tt )* } ( $visible:literal ) ( $( $modifier:ident )* ) ( $( $keys:tt )* ) [ $( $data:tt )* ] ) => { keymap_impl! { { $( $rest )* } ( $visible ) ( $( $modifier )* ALT ) ( $( $keys )* ) [ $( $data )* ] } }; // SUPER ( { SUPER $( $rest:tt )* } ( $visible:literal ) ( $( $modifier:ident )* ) ( $( $keys:tt )* ) [ $( $data:tt )* ] ) => { keymap_impl! { { $( $rest )* } ( $visible ) ( $( $modifier )* SUPER ) ( $( $keys )* ) [ $( $data )* ] } }; // Character key (e.g. 'c') ( { $key:literal $( $rest:tt )* } ( $visible:literal ) ( $( $modifier:ident )* ) ( $( $keys:tt )* ) [ $( $data:tt )* ] ) => { keymap_impl! { { $( $rest )* } ( true ) ( ) ( $( $keys )* ( termwiz::input::Modifiers::from_bits_truncate( $( termwiz::input::Modifiers::$modifier.bits() | )* termwiz::input::Modifiers::NONE.bits() ), termwiz::input::KeyCode::Char($key), ) $visible ) [ $( $data )* ] } }; // F <number> ( { F $num:literal $( $rest:tt )* } ( $visible:literal ) ( $( $modifier:ident )* ) ( $( $keys:tt )* ) [ $( $data:tt )* ] ) => { keymap_impl! { { $( $rest )* } ( true ) ( ) ( $( $keys )* ( termwiz::input::Modifiers::from_bits_truncate( $( termwiz::input::Modifiers::$modifier.bits() | )* termwiz::input::Modifiers::NONE.bits() ), termwiz::input::KeyCode::Function($num), ) $visible ) [ $( $data )* ] } }; // KeyCode ( { $key:ident $( $rest:tt )* } ( $visible:literal ) ( $( $modifier:ident )* ) ( $( $keys:tt )* ) [ $( $data:tt )* ] ) => { keymap_impl! { { $( $rest )* } ( true ) ( ) ( $( $keys )* ( termwiz::input::Modifiers::from_bits_truncate( $( termwiz::input::Modifiers::$modifier.bits() | )* termwiz::input::Modifiers::NONE.bits() ), termwiz::input::KeyCode::$key, ) $visible ) [ $( $data )* ] } }; // ( hidden binding ) ( { ( $( $bind:tt )* ) $( $rest:tt )* } ( $visible:literal ) ( $( $modifier:ident )* ) ( $( $keys:tt )* ) [ $( $data:tt )* ] ) => { keymap_impl! { { $( $bind )* $( $rest )* } ( false ) ( $( $modifier )* ) ( $( $keys )* ) [ $( $data )* ] } }; } macro_rules! keymap { ( $( $all:tt )* ) => { keymap_impl! { { $( $all )* } (true) () () [] } }; }
24.593023
99
0.30922
1db197c6c3a7946642d064cbcb85d946e45a7907
1,413
mod interaction; mod message; use std::{collections::BTreeMap, fmt::Write}; use crate::{ core::{commands::CommandDataCompact, Context}, embeds::EmbedBuilder, util::{constants::RED, MessageBuilder, MessageExt}, BotResult, }; pub use self::{ interaction::{define_help, handle_autocomplete, handle_menu_select, slash_help}, message::{failed_help, help, help_command}, }; async fn failed_message_( ctx: &Context, data: CommandDataCompact, dists: BTreeMap<usize, &'static str>, ) -> BotResult<()> { // Needs tighter scope for some reason or tokio complains about something being not `Send` let content = { let mut names = dists.iter().take(5).map(|(_, &name)| name); if let Some(name) = names.next() { let count = dists.len().min(5); let mut content = String::with_capacity(14 + count * (5 + 2) + (count - 1) * 2); content.push_str("Did you mean "); write!(content, "`{name}`")?; for name in names { write!(content, ", `{name}`")?; } content.push('?'); content } else { "There is no such command".to_owned() } }; let embed = EmbedBuilder::new().description(content).color(RED).build(); let builder = MessageBuilder::new().embed(embed); data.create_message(ctx, builder).await?; Ok(()) }
27.173077
94
0.585987
c109dae0e5b7fb82e082c95e04d517113fad34d9
12,942
use futures::future::Future; use bytes::Bytes; use grpc::*; use chrono::*; use std::time::SystemTime; use test_grpc::TestServiceClient; use empty::Empty; use messages::SimpleRequest; use messages::Payload; use messages::StreamingInputCallRequest; use messages::StreamingOutputCallRequest; use messages::ResponseParameters; fn empty_unary(client: TestServiceClient) { client .empty_call(grpc::RequestOptions::new(), Empty::new()) .wait_drop_metadata() .expect("failed to get EmptyUnary result"); println!("{} EmptyUnary done", Local::now().to_rfc3339()); } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#cacheable_unary fn cacheable_unary(client: TestServiceClient) { let mut request = SimpleRequest::new(); request.set_payload({ let mut payload = Payload::new(); payload.set_body(format!("{}", SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos()).into_bytes()); payload }); let mut options = RequestOptions::new(); options.metadata.add(MetadataKey::from("x-user-ip"), "1.2.3.4".into()); options.cachable = true; let (_, r1, _) = client.cacheable_unary_call(options.clone(), request.clone()) .wait().expect("call"); let (_, r2, _) = client.cacheable_unary_call(options, request) .wait().expect("call"); assert_eq!(r1.get_payload(), r2.get_payload()); } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#large_unary fn large_unary(client: TestServiceClient) { let mut payload = Payload::new(); payload.set_body(vec![0; 271828]); let mut request = SimpleRequest::new(); request.set_payload(payload); request.set_response_size(314159); let response = client .unary_call(grpc::RequestOptions::new(), request) .wait_drop_metadata() .expect("expected full frame"); assert_eq!(314159, response.get_payload().body.len()); println!("{} LargeUnary done", Local::now().to_rfc3339()); } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#client_compressed_unary fn client_compressed_unary(_client: TestServiceClient) { unimplemented!() } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#server_compressed_unary fn server_compressed_unary(_client: TestServiceClient) { unimplemented!() } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#client_streaming fn client_streaming(client: TestServiceClient) { let (mut req, resp) = client .streaming_input_call( grpc::RequestOptions::new(), ).wait().expect("expected response"); for size in [27182, 8, 1828, 45904].iter() { let mut request = StreamingInputCallRequest::new(); let mut payload = Payload::new(); payload.set_body(vec![0; size.to_owned()]); request.set_payload(payload); req.block_wait().expect("block_wait"); req.send_data(request).expect("send_data"); } req.finish().expect("finish"); let resp = resp.wait_drop_metadata().expect("wait_drop_metadata"); assert_eq!(resp.aggregated_payload_size, 74922); println!("{} ClientStreaming done", Local::now().to_rfc3339()); } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#client_streaming fn client_compressed_streaming(_client: TestServiceClient) { unimplemented!() } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#server_streaming // TODO: test fails with an assertion error, we never see a response from the client. // fails with 'expected a response: Other("partial frame")' fn server_streaming(client: TestServiceClient) { let mut req = StreamingOutputCallRequest::new(); let mut params = Vec::new(); for &size in [31415, 9, 2653, 58979].iter() { let mut rp = ResponseParameters::new(); rp.set_size(size as i32); params.push(rp); } req.set_response_parameters(::protobuf::RepeatedField::from_vec(params)); let response_stream = client .streaming_output_call(grpc::RequestOptions::new(), req) .wait_drop_metadata(); let mut response_sizes = Vec::new(); { // this scope is to satisfy the borrow checker. let bar = response_stream.map(|response| { response_sizes.push( response .expect("expected a response") .get_payload() .body .len(), ); }); assert!(bar.count() == 4); } assert!(response_sizes.len() == 4); assert!(response_sizes[0] == 31415); assert!(response_sizes[1] == 9); assert!(response_sizes[2] == 2653); assert!(response_sizes[3] == 58979); println!("{} ServerStreaming done", Local::now().to_rfc3339()); } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#server_streaming fn server_compressed_streaming(_client: TestServiceClient) { unimplemented!() } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#ping_pong fn ping_pong(client: TestServiceClient) { let (mut req, resp) = client .full_duplex_call( grpc::RequestOptions::new(), ).wait().expect("start request"); let mut resp = resp.wait_drop_metadata(); for &(size, body_len) in [(31415, 27182), (9, 8), (2653, 1828), (58979, 45904)].iter() { let mut req_m = StreamingOutputCallRequest::new(); let mut params = ResponseParameters::new(); params.set_size(size); req_m.set_response_parameters(::protobuf::RepeatedField::from_vec(vec![params])); let mut payload = Payload::new(); payload.set_body(vec![0; body_len]); req_m.set_payload(payload); req.block_wait().expect("block_wait"); req.send_data(req_m).expect("send_data"); let resp_m = resp.next().expect("next").unwrap(); assert_eq!(size as usize, resp_m.payload.get_ref().body.len()); } req.finish().expect("finish"); assert!(resp.next().is_none()); println!("{} PingPong done", Local::now().to_rfc3339()); } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#empty_stream fn empty_stream(client: TestServiceClient) { let (mut req, resp) = client .full_duplex_call(grpc::RequestOptions::new()).wait().expect("wait"); req.finish().expect("finish"); let resp: Vec<_> = resp.wait_drop_metadata().collect(); assert!(resp.len() == 0); println!("{} EmptyStream done", Local::now().to_rfc3339()); } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#compute_engine_creds fn compute_engine_creds(_client: TestServiceClient) { unimplemented!() } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#jwt_token_creds fn jwt_token_creds(_client: TestServiceClient) { unimplemented!() } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#oauth2_auth_token fn oauth2_auth_token(_client: TestServiceClient) { unimplemented!() } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#per_rpc_creds fn per_rpc_creds(_client: TestServiceClient) { unimplemented!() } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#google_default_credentials fn google_default_credentials(_client: TestServiceClient) { unimplemented!() } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#custom_metadata fn custom_metadata(client: TestServiceClient) { fn make_options() -> grpc::RequestOptions { // The client attaches custom metadata with the following keys and values: // key: "x-grpc-test-echo-initial", value: "test_initial_metadata_value" // key: "x-grpc-test-echo-initial", value: "test_initial_metadata_value" let mut options = grpc::RequestOptions::new(); options.metadata.add( MetadataKey::from("x-grpc-test-echo-initial"), Bytes::from("test_initial_metadata_value"), ); options.metadata.add( MetadataKey::from("x-grpc-test-echo-trailing-bin"), Bytes::from(&b"\xab\xab\xab"[..]), ); options } fn assert_result_metadata(initial: grpc::Metadata, trailing: grpc::Metadata) { assert_eq!( Some(&b"test_initial_metadata_value"[..]), initial.get("x-grpc-test-echo-initial") ); assert_eq!( Some(&b"\xab\xab\xab"[..]), trailing.get("x-grpc-test-echo-trailing-bin") ); } { // to a UnaryCall with request: // { // response_size: 314159 // payload:{ // body: 271828 bytes of zeros // } // } let mut req = SimpleRequest::new(); req.set_response_size(314159); let mut payload = Payload::new(); payload.set_body(vec![0; 271828]); req.set_payload(payload); let (initial, _result, trailing) = client .unary_call(make_options(), req) .wait() .expect("UnaryCall"); assert_result_metadata(initial, trailing); } { // to a FullDuplexCall with request: // { // response_parameters:{ // size: 314159 // } // payload:{ // body: 271828 bytes of zeros // } // } let mut req = StreamingOutputCallRequest::new(); { let mut rp = ResponseParameters::new(); rp.set_size(314159); req.mut_response_parameters().push(rp); } { let mut p = Payload::new(); p.set_body(vec![0; 271828]); req.set_payload(p); } let (mut req1, resp) = client .full_duplex_call(make_options()).wait().expect("start request"); req1.send_data(req).expect("send_data"); req1.finish().expect("finish"); let (initial, _, trailing) = resp.collect().wait().expect("collect"); assert_result_metadata(initial, trailing); } } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#status_code_and_message fn status_code_and_message(_client: TestServiceClient) { unimplemented!() } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#special_status_message fn special_status_message(_client: TestServiceClient) { unimplemented!() } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#unimplemented_method fn unimplemented_method(_client: TestServiceClient) { unimplemented!() } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#unimplemented_service fn unimplemented_service(_client: TestServiceClient) { unimplemented!() } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#cancel_after_begin fn cancel_after_begin(client: TestServiceClient) { let (req, resp) = client.streaming_input_call(RequestOptions::new()).wait().expect("start"); drop(req); // TODO: hangs match resp.wait() { Ok(_) => panic!("expecting err"), Err(_) => unimplemented!(), } } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#cancel_after_first_response fn cancel_after_first_response(_client: TestServiceClient) { unimplemented!() } // https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md#timeout_on_sleeping_server fn timeout_on_sleeping_server(_client: TestServiceClient) { unimplemented!() } pub static TESTS: &[(&str, fn(TestServiceClient))] = &[ ("empty_unary", empty_unary), ("cacheable_unary", cacheable_unary), ("large_unary", large_unary), ("client_compressed_unary", client_compressed_unary), ("server_compressed_unary", server_compressed_unary), ("client_streaming", client_streaming), ("client_compressed_streaming", client_compressed_streaming), ("server_streaming", server_streaming), ("server_compressed_streaming", server_compressed_streaming), ("ping_pong", ping_pong), ("empty_stream", empty_stream), ("compute_engine_creds", compute_engine_creds), ("jwt_token_creds", jwt_token_creds), ("oauth2_auth_token", oauth2_auth_token), ("per_rpc_creds", per_rpc_creds), ("google_default_credentials", google_default_credentials), ("custom_metadata", custom_metadata), ("status_code_and_message", status_code_and_message), ("special_status_message", special_status_message), ("unimplemented_method", unimplemented_method), ("unimplemented_service", unimplemented_service), ("cancel_after_begin", cancel_after_begin), ("cancel_after_first_response", cancel_after_first_response), ("timeout_on_sleeping_server", timeout_on_sleeping_server), ];
36.050139
131
0.666744
50391a2d0e5658924823d3b5336a6d59f93c11aa
193
mod config; mod data_structures; mod github; pub use config::config_github; pub use data_structures::*; pub use github::repo; pub use github::check_merge_methods; pub use github::check_states;
21.444444
36
0.792746
0e3fe0c26106a407ce33f2aaa86320d67e3e7cd5
1,058
#![deny(missing_docs)] //! A library for creating retro computing platforms //! //! # Introduction //! `melon` is like a virtual 16bit CPU. When building a retro computing platform e.g. a gaming //! console or old computer architecture, `melon` takes care of handling basic parts like stack //! management, calls, memory management and exception handling. Its most common interface, the //! [System][system] trait makes it possible to not only implement the CPU into any platform but //! makes it also really easy to extend its functionality. //! //! The [Program][program] struct takes care of loading and saving programs written for an //! implementation of the `melon` backend. `melon` roms are gzipped msgpack files. //! //! [system]: trait.System.html //! [program]: struct.Program.html #[macro_use] extern crate failure; mod consts; mod debugger; mod instruction; mod program; mod system; pub mod typedef; mod vm; pub use crate::debugger::*; pub use crate::instruction::*; pub use crate::program::*; pub use crate::system::*; pub use crate::vm::*;
31.117647
96
0.732514
fb5b0cbde345dc81a3d45e8e4ec66076e4dc3694
3,441
// Copyright 2019-2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Flash storage abstraction. /// Represents a byte position in a storage. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct StorageIndex { pub page: usize, pub byte: usize, } /// Represents a possible storage error. #[derive(Debug, PartialEq, Eq)] pub enum StorageError { /// Arguments are not correctly aligned. NotAligned, /// Arguments are out of bounds. OutOfBounds, /// Implementation-specific error. CustomError, } pub type StorageResult<T> = Result<T, StorageError>; /// Abstracts a flash storage. pub trait Storage { /// The size of a word in bytes. /// /// A word is the smallest unit of writable flash. fn word_size(&self) -> usize; /// The size of a page in bytes. /// /// A page is the smallest unit of erasable flash. fn page_size(&self) -> usize; /// The number of pages in the storage. fn num_pages(&self) -> usize; /// Maximum number of times a word can be written between page erasures. fn max_word_writes(&self) -> usize; /// Maximum number of times a page can be erased. fn max_page_erases(&self) -> usize; /// Reads a byte slice from the storage. /// /// The `index` must designate `length` bytes in the storage. fn read_slice(&self, index: StorageIndex, length: usize) -> StorageResult<&[u8]>; /// Writes a word slice to the storage. /// /// The following pre-conditions must hold: /// - The `index` must designate `value.len()` bytes in the storage. /// - Both `index` and `value.len()` must be word-aligned. /// - The written words should not have been written [too many](Self::max_word_writes) times /// since the last page erasure. fn write_slice(&mut self, index: StorageIndex, value: &[u8]) -> StorageResult<()>; /// Erases a page of the storage. /// /// The `page` must be in the storage, i.e. less than [`Storage::num_pages`]. And the page /// should not have been erased [too many](Self::max_page_erases) times. fn erase_page(&mut self, page: usize) -> StorageResult<()>; } impl StorageIndex { /// Whether a slice fits in a storage page. fn is_valid(self, length: usize, storage: &impl Storage) -> bool { let page_size = storage.page_size(); self.page < storage.num_pages() && length <= page_size && self.byte <= page_size - length } /// Returns the range of a valid slice. /// /// The range starts at `self` with `length` bytes. pub fn range( self, length: usize, storage: &impl Storage, ) -> StorageResult<core::ops::Range<usize>> { if self.is_valid(length, storage) { let start = self.page * storage.page_size() + self.byte; Ok(start..start + length) } else { Err(StorageError::OutOfBounds) } } }
33.086538
97
0.646324
67741cf79e92705f18f7d4f337907b7087209684
6,046
// Generated from definition io.k8s.api.core.v1.PodAntiAffinity /// Pod anti affinity is a group of inter pod anti affinity scheduling rules. #[derive(Clone, Debug, Default, PartialEq)] pub struct PodAntiAffinity { /// The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. pub preferred_during_scheduling_ignored_during_execution: Option<Vec<crate::v1_12::api::core::v1::WeightedPodAffinityTerm>>, /// If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. pub required_during_scheduling_ignored_during_execution: Option<Vec<crate::v1_12::api::core::v1::PodAffinityTerm>>, } impl<'de> serde::Deserialize<'de> for PodAntiAffinity { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_preferred_during_scheduling_ignored_during_execution, Key_required_during_scheduling_ignored_during_execution, Other, } impl<'de> serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error { Ok(match v { "preferredDuringSchedulingIgnoredDuringExecution" => Field::Key_preferred_during_scheduling_ignored_during_execution, "requiredDuringSchedulingIgnoredDuringExecution" => Field::Key_required_during_scheduling_ignored_during_execution, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = PodAntiAffinity; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "struct PodAntiAffinity") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> { let mut value_preferred_during_scheduling_ignored_during_execution: Option<Vec<crate::v1_12::api::core::v1::WeightedPodAffinityTerm>> = None; let mut value_required_during_scheduling_ignored_during_execution: Option<Vec<crate::v1_12::api::core::v1::PodAffinityTerm>> = None; while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_preferred_during_scheduling_ignored_during_execution => value_preferred_during_scheduling_ignored_during_execution = serde::de::MapAccess::next_value(&mut map)?, Field::Key_required_during_scheduling_ignored_during_execution => value_required_during_scheduling_ignored_during_execution = serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(PodAntiAffinity { preferred_during_scheduling_ignored_during_execution: value_preferred_during_scheduling_ignored_during_execution, required_during_scheduling_ignored_during_execution: value_required_during_scheduling_ignored_during_execution, }) } } deserializer.deserialize_struct( "PodAntiAffinity", &[ "preferredDuringSchedulingIgnoredDuringExecution", "requiredDuringSchedulingIgnoredDuringExecution", ], Visitor, ) } } impl serde::Serialize for PodAntiAffinity { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer { let mut state = serializer.serialize_struct( "PodAntiAffinity", self.preferred_during_scheduling_ignored_during_execution.as_ref().map_or(0, |_| 1) + self.required_during_scheduling_ignored_during_execution.as_ref().map_or(0, |_| 1), )?; if let Some(value) = &self.preferred_during_scheduling_ignored_during_execution { serde::ser::SerializeStruct::serialize_field(&mut state, "preferredDuringSchedulingIgnoredDuringExecution", value)?; } if let Some(value) = &self.required_during_scheduling_ignored_during_execution { serde::ser::SerializeStruct::serialize_field(&mut state, "requiredDuringSchedulingIgnoredDuringExecution", value)?; } serde::ser::SerializeStruct::end(state) } }
59.861386
638
0.659775
1ef01564a41d9770f9de94b1a77b2b2b9e86935e
2,062
mod convert; use tonic::{transport::Server, Request, Response, Status}; use uuid::Uuid; use crate::{config::ServerConfig, connector::Connectors, db::Db}; use ::shared::proto::{ self, gateway::*, lnctl_gateway_server::{LnctlGateway, LnctlGatewayServer}, }; struct GatewayServer { id: Uuid, connectors: Connectors, db: Db, } impl GatewayServer { pub fn new(id: Uuid, connectors: Connectors, db: Db) -> Self { Self { id, connectors, db } } } #[tonic::async_trait] impl LnctlGateway for GatewayServer { async fn get_status( &self, _request: Request<GetStatusRequest>, ) -> Result<Response<GetStatusResponse>, Status> { let connectors = self.connectors.read().await; let ret = GetStatusResponse { gateway_id: self.id.to_string(), connectors: connectors .iter() .map(|(id, con)| ConnectorInfo { id: id.to_string(), monitored_node_id: con.monitored_node_id.to_string(), r#type: con.r#type.clone(), }) .collect(), }; Ok(Response::new(ret)) } async fn list_monitored_channel_snapshots( &self, request: Request<ListMonitoredChannelSnapshotsRequest>, ) -> Result<Response<ListMonitoredChannelSnapshotsResponse>, Status> { let request = request.into_inner(); let snapshots = self .db .list_monitored_channel_snapshots(request.short_channel_id)?; Ok(Response::new(ListMonitoredChannelSnapshotsResponse { snapshots, })) } } pub(crate) async fn run_server( config: ServerConfig, id: Uuid, connectors: Connectors, db: Db, ) -> anyhow::Result<()> { println!("Listening on port {}", config.port); Server::builder() .add_service(LnctlGatewayServer::new(GatewayServer::new( id, connectors, db, ))) .serve(([0, 0, 0, 0], config.port).into()) .await?; Ok(()) }
27.493333
74
0.587779
1a96bb00046d2c5c844b2fc8e05296fe0f3793d7
1,041
//! Tests auto-converted from "sass-spec/spec/libsass-closed-issues/issue_701.hrx" #[test] fn test() { assert_eq!( crate::rsass( ".test-1 {\ \n content: null;\ \n content: inspect(null);\ \n content: inspect(false);\ \n content: inspect(true);\ \n content: inspect(42);\ \n content: inspect(42.3);\ \n content: inspect(42px);\ \n content: inspect(\"string\");\ \n $list: 1, 2, 3;\ \n content: inspect($list);\ \n $map: ( a: 1, b: 2, c: 3 );\ \n content: inspect($map);\ \n}\ \n" ) .unwrap(), ".test-1 {\ \n content: null;\ \n content: false;\ \n content: true;\ \n content: 42;\ \n content: 42.3;\ \n content: 42px;\ \n content: \"string\";\ \n content: 1, 2, 3;\ \n content: (a: 1, b: 2, c: 3);\ \n}\ \n" ); }
27.394737
82
0.40634
d707634fd85031842ead4b3326e4999b3db5c5f8
8,547
use crate::{App, Plugin}; use bevy_utils::{tracing::debug, tracing::warn, HashMap}; use std::any::TypeId; /// Combines multiple [`Plugin`]s into a single unit. pub trait PluginGroup { /// Configures the [`Plugin`]s that are to be added. fn build(&mut self, group: &mut PluginGroupBuilder); } struct PluginEntry { plugin: Box<dyn Plugin>, enabled: bool, } /// Facilitates the creation and configuration of a [`PluginGroup`]. /// Provides a build ordering to ensure that [`Plugin`]s which produce/require a [`Resource`](bevy_ecs::system::Resource) /// are built before/after dependent/depending [`Plugin`]s. [`Plugin`]s inside the group /// can be disabled, enabled or reordered. #[derive(Default)] pub struct PluginGroupBuilder { plugins: HashMap<TypeId, PluginEntry>, order: Vec<TypeId>, } impl PluginGroupBuilder { /// Finds the index of a target [`Plugin`]. Panics if the target's [`TypeId`] is not found. fn index_of<Target: Plugin>(&mut self) -> usize { let index = self .order .iter() .position(|&ty| ty == TypeId::of::<Target>()); match index { Some(i) => i, None => panic!( "Plugin does not exist in group: {}.", std::any::type_name::<Target>() ), } } // Insert the new plugin as enabled, and removes its previous ordering if it was // already present fn upsert_plugin_state<T: Plugin>(&mut self, plugin: T, added_at_index: usize) { if let Some(entry) = self.plugins.insert( TypeId::of::<T>(), PluginEntry { plugin: Box::new(plugin), enabled: true, }, ) { if entry.enabled { warn!( "You are replacing plugin '{}' that was not disabled.", entry.plugin.name() ); } if let Some(to_remove) = self .order .iter() .enumerate() .find(|(i, ty)| *i != added_at_index && **ty == TypeId::of::<T>()) .map(|(i, _)| i) { self.order.remove(to_remove); } } } /// Adds the plugin [`Plugin`] at the end of this [`PluginGroupBuilder`]. If the plugin was /// already in the group, it is removed from its previous place. pub fn add<T: Plugin>(&mut self, plugin: T) -> &mut Self { let target_index = self.order.len(); self.order.push(TypeId::of::<T>()); self.upsert_plugin_state(plugin, target_index); self } /// Adds a [`Plugin`] in this [`PluginGroupBuilder`] before the plugin of type `Target`. /// If the plugin was already the group, it is removed from its previous place. There must /// be a plugin of type `Target` in the group or it will panic. pub fn add_before<Target: Plugin, T: Plugin>(&mut self, plugin: T) -> &mut Self { let target_index = self.index_of::<Target>(); self.order.insert(target_index, TypeId::of::<T>()); self.upsert_plugin_state(plugin, target_index); self } /// Adds a [`Plugin`] in this [`PluginGroupBuilder`] after the plugin of type `Target`. /// If the plugin was already the group, it is removed from its previous place. There must /// be a plugin of type `Target` in the group or it will panic. pub fn add_after<Target: Plugin, T: Plugin>(&mut self, plugin: T) -> &mut Self { let target_index = self.index_of::<Target>() + 1; self.order.insert(target_index, TypeId::of::<T>()); self.upsert_plugin_state(plugin, target_index); self } /// Enables a [`Plugin`]. /// /// [`Plugin`]s within a [`PluginGroup`] are enabled by default. This function is used to /// opt back in to a [`Plugin`] after [disabling](Self::disable) it. If there are no plugins /// of type `T` in this group, it will panic. pub fn enable<T: Plugin>(&mut self) -> &mut Self { let mut plugin_entry = self .plugins .get_mut(&TypeId::of::<T>()) .expect("Cannot enable a plugin that does not exist."); plugin_entry.enabled = true; self } /// Disables a [`Plugin`], preventing it from being added to the [`App`] with the rest of the /// [`PluginGroup`]. The disabled [`Plugin`] keeps its place in the [`PluginGroup`], so it can /// still be used for ordering with [`add_before`](Self::add_before) or /// [`add_after`](Self::add_after), or it can be [re-enabled](Self::enable). If there are no /// plugins of type `T` in this group, it will panic. pub fn disable<T: Plugin>(&mut self) -> &mut Self { let mut plugin_entry = self .plugins .get_mut(&TypeId::of::<T>()) .expect("Cannot disable a plugin that does not exist."); plugin_entry.enabled = false; self } /// Consumes the [`PluginGroupBuilder`] and [builds](Plugin::build) the contained [`Plugin`]s /// in the order specified. pub fn finish(self, app: &mut App) { for ty in &self.order { if let Some(entry) = self.plugins.get(ty) { if entry.enabled { debug!("added plugin: {}", entry.plugin.name()); entry.plugin.build(app); } } } } } #[cfg(test)] mod tests { use super::PluginGroupBuilder; use crate::{App, Plugin}; struct PluginA; impl Plugin for PluginA { fn build(&self, _: &mut App) {} } struct PluginB; impl Plugin for PluginB { fn build(&self, _: &mut App) {} } struct PluginC; impl Plugin for PluginC { fn build(&self, _: &mut App) {} } #[test] fn basic_ordering() { let mut group = PluginGroupBuilder::default(); group.add(PluginA); group.add(PluginB); group.add(PluginC); assert_eq!( group.order, vec![ std::any::TypeId::of::<PluginA>(), std::any::TypeId::of::<PluginB>(), std::any::TypeId::of::<PluginC>(), ] ); } #[test] fn add_after() { let mut group = PluginGroupBuilder::default(); group.add(PluginA); group.add(PluginB); group.add_after::<PluginA, PluginC>(PluginC); assert_eq!( group.order, vec![ std::any::TypeId::of::<PluginA>(), std::any::TypeId::of::<PluginC>(), std::any::TypeId::of::<PluginB>(), ] ); } #[test] fn add_before() { let mut group = PluginGroupBuilder::default(); group.add(PluginA); group.add(PluginB); group.add_before::<PluginB, PluginC>(PluginC); assert_eq!( group.order, vec![ std::any::TypeId::of::<PluginA>(), std::any::TypeId::of::<PluginC>(), std::any::TypeId::of::<PluginB>(), ] ); } #[test] fn readd() { let mut group = PluginGroupBuilder::default(); group.add(PluginA); group.add(PluginB); group.add(PluginC); group.add(PluginB); assert_eq!( group.order, vec![ std::any::TypeId::of::<PluginA>(), std::any::TypeId::of::<PluginC>(), std::any::TypeId::of::<PluginB>(), ] ); } #[test] fn readd_after() { let mut group = PluginGroupBuilder::default(); group.add(PluginA); group.add(PluginB); group.add(PluginC); group.add_after::<PluginA, PluginC>(PluginC); assert_eq!( group.order, vec![ std::any::TypeId::of::<PluginA>(), std::any::TypeId::of::<PluginC>(), std::any::TypeId::of::<PluginB>(), ] ); } #[test] fn readd_before() { let mut group = PluginGroupBuilder::default(); group.add(PluginA); group.add(PluginB); group.add(PluginC); group.add_before::<PluginB, PluginC>(PluginC); assert_eq!( group.order, vec![ std::any::TypeId::of::<PluginA>(), std::any::TypeId::of::<PluginC>(), std::any::TypeId::of::<PluginB>(), ] ); } }
32.011236
121
0.530245
fb09118035c7e3cea4d962cb7f87689005ebe555
21,320
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! This is an Earley-like parser, without support for in-grammar nonterminals, //! only by calling out to the main rust parser for named nonterminals (which it //! commits to fully when it hits one in a grammar). This means that there are no //! completer or predictor rules, and therefore no need to store one column per //! token: instead, there's a set of current Earley items and a set of next //! ones. Instead of NTs, we have a special case for Kleene star. The big-O, in //! pathological cases, is worse than traditional Earley parsing, but it's an //! easier fit for Macro-by-Example-style rules, and I think the overhead is //! lower. (In order to prevent the pathological case, we'd need to lazily //! construct the resulting `NamedMatch`es at the very end. It'd be a pain, //! and require more memory to keep around old items, but it would also save //! overhead) //! //! Quick intro to how the parser works: //! //! A 'position' is a dot in the middle of a matcher, usually represented as a //! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`. //! //! The parser walks through the input a character at a time, maintaining a list //! of items consistent with the current position in the input string: `cur_eis`. //! //! As it processes them, it fills up `eof_eis` with items that would be valid if //! the macro invocation is now over, `bb_eis` with items that are waiting on //! a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting //! on a particular token. Most of the logic concerns moving the · through the //! repetitions indicated by Kleene stars. It only advances or calls out to the //! real Rust parser when no `cur_eis` items remain //! //! Example: Start parsing `a a a a b` against [· a $( a )* a b]. //! //! Remaining input: `a a a a b` //! next_eis: [· a $( a )* a b] //! //! - - - Advance over an `a`. - - - //! //! Remaining input: `a a a b` //! cur: [a · $( a )* a b] //! Descend/Skip (first item). //! next: [a $( · a )* a b] [a $( a )* · a b]. //! //! - - - Advance over an `a`. - - - //! //! Remaining input: `a a b` //! cur: [a $( a · )* a b] next: [a $( a )* a · b] //! Finish/Repeat (first item) //! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] //! //! - - - Advance over an `a`. - - - (this looks exactly like the last step) //! //! Remaining input: `a b` //! cur: [a $( a · )* a b] next: [a $( a )* a · b] //! Finish/Repeat (first item) //! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b] //! //! - - - Advance over an `a`. - - - (this looks exactly like the last step) //! //! Remaining input: `b` //! cur: [a $( a · )* a b] next: [a $( a )* a · b] //! Finish/Repeat (first item) //! next: [a $( a )* · a b] [a $( · a )* a b] //! //! - - - Advance over a `b`. - - - //! //! Remaining input: `` //! eof: [a $( a )* a b ·] pub use self::NamedMatch::*; pub use self::ParseResult::*; use self::TokenTreeOrTokenTreeVec::*; use ast; use ast::{TokenTree, Name, Ident}; use codemap::{BytePos, mk_sp, Span, Spanned}; use codemap; use parse::lexer::*; //resolve bug? use parse::ParseSess; use parse::parser::{LifetimeAndTypesWithoutColons, Parser}; use parse::token::{DocComment, MatchNt, SubstNt}; use parse::token::{Token, Nonterminal}; use parse::token; use print::pprust; use ptr::P; use std::mem; use std::rc::Rc; use std::collections::HashMap; use std::collections::hash_map::Entry::{Vacant, Occupied}; // To avoid costly uniqueness checks, we require that `MatchSeq` always has // a nonempty body. #[derive(Clone)] enum TokenTreeOrTokenTreeVec { Tt(ast::TokenTree), TtSeq(Rc<Vec<ast::TokenTree>>), } impl TokenTreeOrTokenTreeVec { fn len(&self) -> usize { match *self { TtSeq(ref v) => v.len(), Tt(ref tt) => tt.len(), } } fn get_tt(&self, index: usize) -> TokenTree { match *self { TtSeq(ref v) => v[index].clone(), Tt(ref tt) => tt.get_tt(index), } } } /// an unzipping of `TokenTree`s #[derive(Clone)] struct MatcherTtFrame { elts: TokenTreeOrTokenTreeVec, idx: usize, } #[derive(Clone)] pub struct MatcherPos { stack: Vec<MatcherTtFrame>, top_elts: TokenTreeOrTokenTreeVec, sep: Option<Token>, idx: usize, up: Option<Box<MatcherPos>>, matches: Vec<Vec<Rc<NamedMatch>>>, match_lo: usize, match_cur: usize, match_hi: usize, sp_lo: BytePos, } pub fn count_names(ms: &[TokenTree]) -> usize { ms.iter().fold(0, |count, elt| { count + match *elt { TokenTree::Sequence(_, ref seq) => { seq.num_captures } TokenTree::Delimited(_, ref delim) => { count_names(&delim.tts) } TokenTree::Token(_, MatchNt(..)) => { 1 } TokenTree::Token(_, _) => 0, } }) } pub fn initial_matcher_pos(ms: Rc<Vec<TokenTree>>, sep: Option<Token>, lo: BytePos) -> Box<MatcherPos> { let match_idx_hi = count_names(&ms[..]); let matches: Vec<_> = (0..match_idx_hi).map(|_| Vec::new()).collect(); Box::new(MatcherPos { stack: vec![], top_elts: TtSeq(ms), sep: sep, idx: 0, up: None, matches: matches, match_lo: 0, match_cur: 0, match_hi: match_idx_hi, sp_lo: lo }) } /// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL: /// so it is associated with a single ident in a parse, and all /// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type /// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a /// single token::MATCH_NONTERMINAL in the TokenTree that produced it. /// /// The in-memory structure of a particular NamedMatch represents the match /// that occurred when a particular subset of a matcher was applied to a /// particular token tree. /// /// The width of each MatchedSeq in the NamedMatch, and the identity of the /// `MatchedNonterminal`s, will depend on the token tree it was applied to: /// each MatchedSeq corresponds to a single TTSeq in the originating /// token tree. The depth of the NamedMatch structure will therefore depend /// only on the nesting depth of `ast::TTSeq`s in the originating /// token tree it was derived from. pub enum NamedMatch { MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span), MatchedNonterminal(Nonterminal) } pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>]) -> ParseResult<HashMap<Name, Rc<NamedMatch>>> { fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>], ret_val: &mut HashMap<Name, Rc<NamedMatch>>, idx: &mut usize) -> Result<(), (codemap::Span, String)> { match *m { TokenTree::Sequence(_, ref seq) => { for next_m in &seq.tts { try!(n_rec(p_s, next_m, res, ret_val, idx)) } } TokenTree::Delimited(_, ref delim) => { for next_m in &delim.tts { try!(n_rec(p_s, next_m, res, ret_val, idx)); } } TokenTree::Token(sp, MatchNt(bind_name, _, _, _)) => { match ret_val.entry(bind_name.name) { Vacant(spot) => { spot.insert(res[*idx].clone()); *idx += 1; } Occupied(..) => { return Err((sp, format!("duplicated bind name: {}", bind_name))) } } } TokenTree::Token(sp, SubstNt(..)) => { return Err((sp, "missing fragment specifier".to_string())) } TokenTree::Token(_, _) => (), } Ok(()) } let mut ret_val = HashMap::new(); let mut idx = 0; for m in ms { match n_rec(p_s, m, res, &mut ret_val, &mut idx) { Ok(_) => {}, Err((sp, msg)) => return Error(sp, msg), } } Success(ret_val) } pub enum ParseResult<T> { Success(T), /// Arm failed to match Failure(codemap::Span, String), /// Fatal error (malformed macro?). Abort compilation. Error(codemap::Span, String) } pub type NamedParseResult = ParseResult<HashMap<Name, Rc<NamedMatch>>>; pub type PositionalParseResult = ParseResult<Vec<Rc<NamedMatch>>>; /// Perform a token equality check, ignoring syntax context (that is, an /// unhygienic comparison) pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool { match (t1,t2) { (&token::Ident(id1,_),&token::Ident(id2,_)) | (&token::Lifetime(id1),&token::Lifetime(id2)) => id1.name == id2.name, _ => *t1 == *t2 } } pub fn parse(sess: &ParseSess, cfg: ast::CrateConfig, mut rdr: TtReader, ms: &[TokenTree]) -> NamedParseResult { let mut cur_eis = Vec::new(); cur_eis.push(initial_matcher_pos(Rc::new(ms.iter() .cloned() .collect()), None, rdr.peek().sp.lo)); loop { let mut bb_eis = Vec::new(); // black-box parsed by parser.rs let mut next_eis = Vec::new(); // or proceed normally let mut eof_eis = Vec::new(); let TokenAndSpan { tok, sp } = rdr.peek(); /* we append new items to this while we go */ loop { let mut ei = match cur_eis.pop() { None => break, /* for each Earley Item */ Some(ei) => ei, }; // When unzipped trees end, remove them while ei.idx >= ei.top_elts.len() { match ei.stack.pop() { Some(MatcherTtFrame { elts, idx }) => { ei.top_elts = elts; ei.idx = idx + 1; } None => break } } let idx = ei.idx; let len = ei.top_elts.len(); /* at end of sequence */ if idx >= len { // can't move out of `match`es, so: if ei.up.is_some() { // hack: a matcher sequence is repeating iff it has a // parent (the top level is just a container) // disregard separator, try to go up // (remove this condition to make trailing seps ok) if idx == len { // pop from the matcher position let mut new_pos = ei.up.clone().unwrap(); // update matches (the MBE "parse tree") by appending // each tree as a subtree. // I bet this is a perf problem: we're preemptively // doing a lot of array work that will get thrown away // most of the time. // Only touch the binders we have actually bound for idx in ei.match_lo..ei.match_hi { let sub = (ei.matches[idx]).clone(); (&mut new_pos.matches[idx]) .push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo, sp.hi)))); } new_pos.match_cur = ei.match_hi; new_pos.idx += 1; cur_eis.push(new_pos); } // can we go around again? // the *_t vars are workarounds for the lack of unary move match ei.sep { Some(ref t) if idx == len => { // we need a separator // i'm conflicted about whether this should be hygienic.... // though in this case, if the separators are never legal // idents, it shouldn't matter. if token_name_eq(&tok, t) { //pass the separator let mut ei_t = ei.clone(); // ei_t.match_cur = ei_t.match_lo; ei_t.idx += 1; next_eis.push(ei_t); } } _ => { // we don't need a separator let mut ei_t = ei; ei_t.match_cur = ei_t.match_lo; ei_t.idx = 0; cur_eis.push(ei_t); } } } else { eof_eis.push(ei); } } else { match ei.top_elts.get_tt(idx) { /* need to descend into sequence */ TokenTree::Sequence(sp, seq) => { if seq.op == ast::ZeroOrMore { let mut new_ei = ei.clone(); new_ei.match_cur += seq.num_captures; new_ei.idx += 1; //we specifically matched zero repeats. for idx in ei.match_cur..ei.match_cur + seq.num_captures { (&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp))); } cur_eis.push(new_ei); } let matches: Vec<_> = (0..ei.matches.len()) .map(|_| Vec::new()).collect(); let ei_t = ei; cur_eis.push(Box::new(MatcherPos { stack: vec![], sep: seq.separator.clone(), idx: 0, matches: matches, match_lo: ei_t.match_cur, match_cur: ei_t.match_cur, match_hi: ei_t.match_cur + seq.num_captures, up: Some(ei_t), sp_lo: sp.lo, top_elts: Tt(TokenTree::Sequence(sp, seq)), })); } TokenTree::Token(_, MatchNt(..)) => { // Built-in nonterminals never start with these tokens, // so we can eliminate them from consideration. match tok { token::CloseDelim(_) => {}, _ => bb_eis.push(ei), } } TokenTree::Token(sp, SubstNt(..)) => { return Error(sp, "missing fragment specifier".to_string()) } seq @ TokenTree::Delimited(..) | seq @ TokenTree::Token(_, DocComment(..)) => { let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq)); let idx = ei.idx; ei.stack.push(MatcherTtFrame { elts: lower_elts, idx: idx, }); ei.idx = 0; cur_eis.push(ei); } TokenTree::Token(_, ref t) => { let mut ei_t = ei.clone(); if token_name_eq(t,&tok) { ei_t.idx += 1; next_eis.push(ei_t); } } } } } /* error messages here could be improved with links to orig. rules */ if token_name_eq(&tok, &token::Eof) { if eof_eis.len() == 1 { let mut v = Vec::new(); for dv in &mut (&mut eof_eis[0]).matches { v.push(dv.pop().unwrap()); } return nameize(sess, ms, &v[..]); } else if eof_eis.len() > 1 { return Error(sp, "ambiguity: multiple successful parses".to_string()); } else { return Failure(sp, "unexpected end of macro invocation".to_string()); } } else { if (!bb_eis.is_empty() && !next_eis.is_empty()) || bb_eis.len() > 1 { let nts = bb_eis.iter().map(|ei| match ei.top_elts.get_tt(ei.idx) { TokenTree::Token(_, MatchNt(bind, name, _, _)) => { format!("{} ('{}')", name, bind) } _ => panic!() }).collect::<Vec<String>>().join(" or "); return Error(sp, format!( "local ambiguity: multiple parsing options: {}", match next_eis.len() { 0 => format!("built-in NTs {}.", nts), 1 => format!("built-in NTs {} or 1 other option.", nts), n => format!("built-in NTs {} or {} other options.", nts, n), } )) } else if bb_eis.is_empty() && next_eis.is_empty() { return Failure(sp, format!("no rules expected the token `{}`", pprust::token_to_string(&tok))); } else if !next_eis.is_empty() { /* Now process the next token */ while !next_eis.is_empty() { cur_eis.push(next_eis.pop().unwrap()); } rdr.next_token(); } else /* bb_eis.len() == 1 */ { let mut rust_parser = Parser::new(sess, cfg.clone(), Box::new(rdr.clone())); let mut ei = bb_eis.pop().unwrap(); match ei.top_elts.get_tt(ei.idx) { TokenTree::Token(span, MatchNt(_, ident, _, _)) => { let match_cur = ei.match_cur; (&mut ei.matches[match_cur]).push(Rc::new(MatchedNonterminal( parse_nt(&mut rust_parser, span, &ident.name.as_str())))); ei.idx += 1; ei.match_cur += 1; } _ => panic!() } cur_eis.push(ei); for _ in 0..rust_parser.tokens_consumed { let _ = rdr.next_token(); } } } assert!(!cur_eis.is_empty()); } } pub fn parse_nt(p: &mut Parser, sp: Span, name: &str) -> Nonterminal { match name { "tt" => { p.quote_depth += 1; //but in theory, non-quoted tts might be useful let res = token::NtTT(P(panictry!(p.parse_token_tree()))); p.quote_depth -= 1; return res; } _ => {} } // check at the beginning and the parser checks after each bump panictry!(p.check_unknown_macro_variable()); match name { "item" => match panictry!(p.parse_item()) { Some(i) => token::NtItem(i), None => panic!(p.fatal("expected an item keyword")) }, "block" => token::NtBlock(panictry!(p.parse_block())), "stmt" => match panictry!(p.parse_stmt()) { Some(s) => token::NtStmt(s), None => panic!(p.fatal("expected a statement")) }, "pat" => token::NtPat(panictry!(p.parse_pat())), "expr" => token::NtExpr(panictry!(p.parse_expr())), "ty" => token::NtTy(panictry!(p.parse_ty())), // this could be handled like a token, since it is one "ident" => match p.token { token::Ident(sn,b) => { panictry!(p.bump()); token::NtIdent(Box::new(Spanned::<Ident>{node: sn, span: p.span}),b) } _ => { let token_str = pprust::token_to_string(&p.token); panic!(p.fatal(&format!("expected ident, found {}", &token_str[..]))) } }, "path" => { token::NtPath(Box::new(panictry!(p.parse_path(LifetimeAndTypesWithoutColons)))) }, "meta" => token::NtMeta(panictry!(p.parse_meta_item())), _ => { panic!(p.span_fatal_help(sp, &format!("invalid fragment specifier `{}`", name), "valid fragment specifiers are `ident`, `block`, \ `stmt`, `expr`, `pat`, `ty`, `path`, `meta`, `tt` \ and `item`")) } } }
38.623188
99
0.479925
d9c728a0a8d319d807a6ad1a240b9d8b2a9ff53b
1,997
use std::rc::Rc; use std::fmt; use std::collections::HashMap; use super::*; #[derive(Debug, Clone, PartialEq)] pub enum StatementNode { Expression(Expression), Table(Expression), Assignment(Expression, Expression), } #[derive(Debug, Clone, PartialEq)] pub struct Statement { pub node: StatementNode, pub pos: Pos, } impl Statement { pub fn new(node: StatementNode, pos: Pos) -> Self { Statement { node, pos, } } } #[derive(Debug, Clone, PartialEq)] pub enum ExpressionNode { Number(f64), Text(String), Char(char), Bool(bool), Identifier(String), Binary(Rc<Expression>, Operator, Rc<Expression>), EOF, } #[derive(Debug, Clone, PartialEq)] pub struct Expression { pub node: ExpressionNode, pub pos: Pos, } impl Expression { pub fn new(node: ExpressionNode, pos: Pos) -> Self { Expression { node, pos, } } } #[derive(Debug, Clone, PartialEq)] pub enum Operator { Add, Sub, Mul, Div, Mod, Pow, Eq, Lt, Gt, NEq, LtEq, GtEq, } impl Operator { pub fn from_str(operator: &str) -> Option<(Operator, u8)> { use self::Operator::*; let op_prec = match operator { "==" => (Eq, 1), "<" => (Lt, 1), ">" => (Gt, 1), "!=" => (NEq, 1), "<=" => (LtEq, 1), ">=" => (GtEq, 1), "+" => (Add, 2), "-" => (Sub, 2), "*" => (Mul, 3), "/" => (Div, 3), "%" => (Mod, 3), "^" => (Pow, 4), _ => return None, }; Some(op_prec) } } impl fmt::Display for Operator { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Operator::*; let t = match *self { Add => "+", Sub => "-", Pow => "^", Mul => "*", Div => "/", Mod => "%", Eq => "==", Lt => "<", Gt => ">", NEq => "!=", LtEq => "<=", GtEq => ">=", }; write!(f, "{}", t) } }
16.641667
61
0.470205
91c04bc15fa03770b1c4746a5945d592e342cbcf
1,173
use std::fs; fn main() { let filename = "data.txt"; println!("In file {}", filename); let contents = fs::read_to_string(filename) .expect("Something went wrong reading the file"); let lines: Vec<String> = contents.split("\n").map(|line| line.to_string()).collect(); let mut valid_password_count = 0; lines.iter().for_each(|line| { let parts: Vec<&str> = line.split(":").collect(); let (rule, password) = (parts[0].trim(), parts[1].trim()); let rule_parts: Vec<&str> = rule.split(" ").collect(); let letter: char = rule_parts[1].trim().chars().next().unwrap(); let bounds: Vec<u32> = rule_parts[0].split("-").map(|digits| digits.parse().unwrap() ).collect(); let lower_bound = bounds[0]; let upper_bound = bounds[1]; let instances_of_letter: usize = password.chars().filter(|char| { *char == letter}).count(); if instances_of_letter >= lower_bound as usize && instances_of_letter <= upper_bound as usize { valid_password_count += 1; println!("{}", line) } }); println!("There are {} valid passwords", valid_password_count); }
40.448276
105
0.599318
0a39f4150704fb168d33408460d00a0a7c8418a1
209
use quote::quote; fn main() { let nonrep = ""; // Without some protection against repetitions with no iterator somewhere // inside, this would loop infinitely. quote!(#(#nonrep #nonrep)*); }
20.9
77
0.650718
eb69de5abaf89f90c018b38b18f1c1e6b035f41e
7,524
use crate::{ update_asset_storage_system, Asset, AssetLoader, AssetServer, AssetStage, Handle, HandleId, RefChange, }; use bevy_app::{prelude::Events, AppBuilder}; use bevy_ecs::{ system::{IntoSystem, ResMut}, world::FromWorld, }; use bevy_utils::HashMap; use crossbeam_channel::Sender; use std::fmt::Debug; /// Events that happen on assets of type `T` pub enum AssetEvent<T: Asset> { Created { handle: Handle<T> }, Modified { handle: Handle<T> }, Removed { handle: Handle<T> }, } impl<T: Asset> Debug for AssetEvent<T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { AssetEvent::Created { handle } => f .debug_struct(&format!( "AssetEvent<{}>::Created", std::any::type_name::<T>() )) .field("handle", &handle.id) .finish(), AssetEvent::Modified { handle } => f .debug_struct(&format!( "AssetEvent<{}>::Modified", std::any::type_name::<T>() )) .field("handle", &handle.id) .finish(), AssetEvent::Removed { handle } => f .debug_struct(&format!( "AssetEvent<{}>::Removed", std::any::type_name::<T>() )) .field("handle", &handle.id) .finish(), } } } /// Stores Assets of a given type and tracks changes to them. #[derive(Debug)] pub struct Assets<T: Asset> { assets: HashMap<HandleId, T>, events: Events<AssetEvent<T>>, pub(crate) ref_change_sender: Sender<RefChange>, } impl<T: Asset> Assets<T> { pub(crate) fn new(ref_change_sender: Sender<RefChange>) -> Self { Assets { assets: HashMap::default(), events: Events::default(), ref_change_sender, } } pub fn add(&mut self, asset: T) -> Handle<T> { let id = HandleId::random::<T>(); self.assets.insert(id, asset); self.events.send(AssetEvent::Created { handle: Handle::weak(id), }); self.get_handle(id) } pub fn set<H: Into<HandleId>>(&mut self, handle: H, asset: T) -> Handle<T> { let id: HandleId = handle.into(); if self.assets.insert(id, asset).is_some() { self.events.send(AssetEvent::Modified { handle: Handle::weak(id), }); } else { self.events.send(AssetEvent::Created { handle: Handle::weak(id), }); } self.get_handle(id) } pub fn set_untracked<H: Into<HandleId>>(&mut self, handle: H, asset: T) { let id: HandleId = handle.into(); if self.assets.insert(id, asset).is_some() { self.events.send(AssetEvent::Modified { handle: Handle::weak(id), }); } else { self.events.send(AssetEvent::Created { handle: Handle::weak(id), }); } } pub fn get<H: Into<HandleId>>(&self, handle: H) -> Option<&T> { self.assets.get(&handle.into()) } pub fn contains<H: Into<HandleId>>(&self, handle: H) -> bool { self.assets.contains_key(&handle.into()) } pub fn get_mut<H: Into<HandleId>>(&mut self, handle: H) -> Option<&mut T> { let id: HandleId = handle.into(); self.events.send(AssetEvent::Modified { handle: Handle::weak(id), }); self.assets.get_mut(&id) } pub fn get_handle<H: Into<HandleId>>(&self, handle: H) -> Handle<T> { Handle::strong(handle.into(), self.ref_change_sender.clone()) } pub fn get_or_insert_with<H: Into<HandleId>>( &mut self, handle: H, insert_fn: impl FnOnce() -> T, ) -> &mut T { let mut event = None; let id: HandleId = handle.into(); let borrowed = self.assets.entry(id).or_insert_with(|| { event = Some(AssetEvent::Created { handle: Handle::weak(id), }); insert_fn() }); if let Some(event) = event { self.events.send(event); } borrowed } pub fn iter(&self) -> impl Iterator<Item = (HandleId, &T)> { self.assets.iter().map(|(k, v)| (*k, v)) } pub fn ids(&self) -> impl Iterator<Item = HandleId> + '_ { self.assets.keys().cloned() } pub fn remove<H: Into<HandleId>>(&mut self, handle: H) -> Option<T> { let id: HandleId = handle.into(); let asset = self.assets.remove(&id); if asset.is_some() { self.events.send(AssetEvent::Removed { handle: Handle::weak(id), }); } asset } /// Clears the inner asset map, removing all key-value pairs. /// /// Keeps the allocated memory for reuse. pub fn clear(&mut self) { self.assets.clear() } /// Reserves capacity for at least additional more elements to be inserted into the assets. /// /// The collection may reserve more space to avoid frequent reallocations. pub fn reserve(&mut self, additional: usize) { self.assets.reserve(additional) } /// Shrinks the capacity of the asset map as much as possible. /// /// It will drop down as much as possible while maintaining the internal rules and possibly /// leaving some space in accordance with the resize policy. pub fn shrink_to_fit(&mut self) { self.assets.shrink_to_fit() } pub fn asset_event_system( mut events: ResMut<Events<AssetEvent<T>>>, mut assets: ResMut<Assets<T>>, ) { events.extend(assets.events.drain()) } pub fn len(&self) -> usize { self.assets.len() } pub fn is_empty(&self) -> bool { self.assets.is_empty() } } /// [AppBuilder] extension methods for adding new asset types pub trait AddAsset { fn add_asset<T>(&mut self) -> &mut Self where T: Asset; fn init_asset_loader<T>(&mut self) -> &mut Self where T: AssetLoader + FromWorld; fn add_asset_loader<T>(&mut self, loader: T) -> &mut Self where T: AssetLoader; } impl AddAsset for AppBuilder { fn add_asset<T>(&mut self) -> &mut Self where T: Asset, { let assets = { let asset_server = self.world().get_resource::<AssetServer>().unwrap(); asset_server.register_asset_type::<T>() }; self.insert_resource(assets) .add_system_to_stage( AssetStage::AssetEvents, Assets::<T>::asset_event_system.system(), ) .add_system_to_stage( AssetStage::LoadAssets, update_asset_storage_system::<T>.system(), ) .register_type::<Handle<T>>() .add_event::<AssetEvent<T>>() } fn init_asset_loader<T>(&mut self) -> &mut Self where T: AssetLoader + FromWorld, { let result = T::from_world(self.world_mut()); self.add_asset_loader(result) } fn add_asset_loader<T>(&mut self, loader: T) -> &mut Self where T: AssetLoader, { self.world_mut() .get_resource_mut::<AssetServer>() .expect("AssetServer does not exist. Consider adding it as a resource.") .add_loader(loader); self } }
29.505882
95
0.539474
e6829a27e2caec5f1ea487c0b255943e01cb58cd
2,633
use na::{self, RealField, Vector3}; use crate::math::{Isometry, Point}; use crate::query::{Ray, RayCast, RayIntersection}; use crate::shape::{FeatureId, Triangle}; impl<N: RealField + Copy> RayCast<N> for Triangle<N> { #[inline] fn toi_and_normal_with_ray( &self, m: &Isometry<N>, ray: &Ray<N>, max_toi: N, _: bool, ) -> Option<RayIntersection<N>> { let ls_ray = ray.inverse_transform_by(m); let mut inter = ray_intersection_with_triangle(&self.a, &self.b, &self.c, &ls_ray)?.0; if inter.toi <= max_toi { inter.normal = m * inter.normal; Some(inter) } else { None } } } /// Computes the intersection between a triangle and a ray. /// /// If an intersection is found, the time of impact, the normal and the barycentric coordinates of /// the intersection point are returned. pub fn ray_intersection_with_triangle<N: RealField + Copy>( a: &Point<N>, b: &Point<N>, c: &Point<N>, ray: &Ray<N>, ) -> Option<(RayIntersection<N>, Vector3<N>)> { let ab = *b - *a; let ac = *c - *a; // normal let n = ab.cross(&ac); let d = n.dot(&ray.dir); // the normal and the ray direction are parallel if d.is_zero() { return None; } let ap = ray.origin - *a; let t = ap.dot(&n); // the ray does not intersect the plane defined by the triangle if (t < na::zero() && d < na::zero()) || (t > na::zero() && d > na::zero()) { return None; } let fid = if d < N::zero() { 0 } else { 1 }; let d = d.abs(); // // intersection: compute barycentric coordinates // let e = -ray.dir.cross(&ap); let mut v; let mut w; let toi; let normal; if t < na::zero() { v = -ac.dot(&e); if v < na::zero() || v > d { return None; } w = ab.dot(&e); if w < na::zero() || v + w > d { return None; } let invd = na::one::<N>() / d; toi = -t * invd; normal = -n.normalize(); v = v * invd; w = w * invd; } else { v = ac.dot(&e); if v < na::zero() || v > d { return None; } w = -ab.dot(&e); if w < na::zero() || v + w > d { return None; } let invd = na::one::<N>() / d; toi = t * invd; normal = n.normalize(); v = v * invd; w = w * invd; } Some(( RayIntersection::new(toi, normal, FeatureId::Face(fid)), Vector3::new(-v - w + na::one(), v, w), )) }
22.895652
98
0.488036
56e8fb6f0fb0ae391a4ae1764400621930991f29
10,087
use std::fmt; use std::future::Future; use std::pin::Pin; use std::sync::Arc; use std::task::{Context as FutContext, Poll}; use std::time::Duration; use futures::future::BoxFuture; use futures::stream::{Stream, StreamExt}; use tokio::sync::mpsc::{ unbounded_channel, UnboundedReceiver as Receiver, UnboundedSender as Sender, }; use tokio::time::{sleep, Sleep}; use crate::client::bridge::gateway::ShardMessenger; use crate::collector::LazyArc; use crate::model::channel::Message; macro_rules! impl_message_collector { ($($name:ident;)*) => { $( impl $name { /// Limits how many messages will attempt to be filtered. /// /// The filter checks whether the message has been sent /// in the right guild, channel, and by the right author. #[must_use] pub fn filter_limit(mut self, limit: u32) -> Self { self.filter.as_mut().unwrap().filter_limit = Some(limit); self } /// Sets a filter function where messages passed to the `function` must /// return `true`, otherwise the message won't be collected and failed the filter /// process. /// This is the last instance to pass for a message to count as *collected*. /// /// This function is intended to be a message content filter. #[must_use] pub fn filter<F: Fn(&Arc<Message>) -> bool + 'static + Send + Sync>(mut self, function: F) -> Self { self.filter.as_mut().unwrap().filter = Some(Arc::new(function)); self } /// Sets the required author ID of a message. /// If a message does not meet this ID, it won't be received. #[must_use] pub fn author_id(mut self, author_id: impl Into<u64>) -> Self { self.filter.as_mut().unwrap().author_id = Some(author_id.into()); self } /// Sets the required channel ID of a message. /// If a message does not meet this ID, it won't be received. #[must_use] pub fn channel_id(mut self, channel_id: impl Into<u64>) -> Self { self.filter.as_mut().unwrap().channel_id = Some(channel_id.into()); self } /// Sets the required guild ID of a message. /// If a message does not meet this ID, it won't be received. #[must_use] pub fn guild_id(mut self, guild_id: impl Into<u64>) -> Self { self.filter.as_mut().unwrap().guild_id = Some(guild_id.into()); self } /// Sets a `duration` for how long the collector shall receive /// messages. #[must_use] pub fn timeout(mut self, duration: Duration) -> Self { self.timeout = Some(Box::pin(sleep(duration))); self } } )* } } /// Filters events on the shard's end and sends them to the collector. #[derive(Clone, Debug)] pub struct MessageFilter { filtered: u32, collected: u32, options: FilterOptions, sender: Sender<Arc<Message>>, } impl MessageFilter { /// Creates a new filter fn new(options: FilterOptions) -> (Self, Receiver<Arc<Message>>) { let (sender, receiver) = unbounded_channel(); let filter = Self { filtered: 0, collected: 0, sender, options, }; (filter, receiver) } /// Sends a `message` to the consuming collector if the `message` conforms /// to the constraints and the limits are not reached yet. pub(crate) fn send_message(&mut self, message: &mut LazyArc<'_, Message>) -> bool { if self.is_passing_constraints(message) { // TODO: On next branch, switch filter arg to &T so this as_arc() call can be removed. if self.options.filter.as_ref().map_or(true, |f| f(&message.as_arc())) { self.collected += 1; if self.sender.send(message.as_arc()).is_err() { return false; } } } self.filtered += 1; self.is_within_limits() && !self.sender.is_closed() } /// Checks if the `message` passes set constraints. /// Constraints are optional, as it is possible to limit messages to /// be sent by a specific author or in a specific guild. fn is_passing_constraints(&self, message: &Message) -> bool { self.options.guild_id.map_or(true, |g| Some(g) == message.guild_id.map(|g| g.0)) && self.options.channel_id.map_or(true, |g| g == message.channel_id.0) && self.options.author_id.map_or(true, |g| g == message.author.id.0) } /// Checks if the filter is within set receive and collect limits. /// A message is considered *received* even when it does not meet the /// constraints. fn is_within_limits(&self) -> bool { self.options.filter_limit.as_ref().map_or(true, |limit| self.filtered < *limit) && self.options.collect_limit.as_ref().map_or(true, |limit| self.collected < *limit) } } #[derive(Clone, Default)] struct FilterOptions { filter_limit: Option<u32>, collect_limit: Option<u32>, filter: Option<Arc<dyn Fn(&Arc<Message>) -> bool + 'static + Send + Sync>>, channel_id: Option<u64>, guild_id: Option<u64>, author_id: Option<u64>, } // Implement the common setters for all message collector types. impl_message_collector! { CollectReply; MessageCollectorBuilder; } /// Future building a stream of messages. pub struct MessageCollectorBuilder { filter: Option<FilterOptions>, shard: Option<ShardMessenger>, timeout: Option<Pin<Box<Sleep>>>, } impl MessageCollectorBuilder { /// A future that builds a [`MessageCollector`] based on the settings. pub fn new(shard_messenger: impl AsRef<ShardMessenger>) -> Self { Self { filter: Some(FilterOptions::default()), shard: Some(shard_messenger.as_ref().clone()), timeout: None, } } /// Limits how many messages can be collected. /// /// A message is considered *collected*, if the message /// passes all the requirements. #[allow(clippy::unwrap_used)] #[must_use] pub fn collect_limit(mut self, limit: u32) -> Self { self.filter.as_mut().unwrap().collect_limit = Some(limit); self } /// Use the given configuration to build the [`MessageCollector`]. #[allow(clippy::unwrap_used)] #[must_use] pub fn build(self) -> MessageCollector { let shard_messenger = self.shard.unwrap(); let (filter, receiver) = MessageFilter::new(self.filter.unwrap()); let timeout = self.timeout; shard_messenger.set_message_filter(filter); MessageCollector { receiver: Box::pin(receiver), timeout, } } } pub struct CollectReply { filter: Option<FilterOptions>, shard: Option<ShardMessenger>, timeout: Option<Pin<Box<Sleep>>>, fut: Option<BoxFuture<'static, Option<Arc<Message>>>>, } impl CollectReply { pub fn new(shard_messenger: impl AsRef<ShardMessenger>) -> Self { Self { filter: Some(FilterOptions::default()), shard: Some(shard_messenger.as_ref().clone()), timeout: None, fut: None, } } } impl Future for CollectReply { type Output = Option<Arc<Message>>; #[allow(clippy::unwrap_used)] fn poll(mut self: Pin<&mut Self>, ctx: &mut FutContext<'_>) -> Poll<Self::Output> { if self.fut.is_none() { let shard_messenger = self.shard.take().unwrap(); let (filter, receiver) = MessageFilter::new(self.filter.take().unwrap()); let timeout = self.timeout.take(); self.fut = Some(Box::pin(async move { shard_messenger.set_message_filter(filter); MessageCollector { receiver: Box::pin(receiver), timeout, } .next() .await })); } self.fut.as_mut().unwrap().as_mut().poll(ctx) } } impl fmt::Debug for FilterOptions { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("MessageFilter") .field("collect_limit", &self.collect_limit) .field("filter", &"Option<Arc<dyn Fn(&Arc<Message>) -> bool + 'static + Send + Sync>>") .field("channel_id", &self.channel_id) .field("guild_id", &self.guild_id) .field("author_id", &self.author_id) .finish() } } /// A message collector receives messages matching the given filter for a /// set duration. pub struct MessageCollector { receiver: Pin<Box<Receiver<Arc<Message>>>>, timeout: Option<Pin<Box<Sleep>>>, } impl MessageCollector { /// Stops collecting, this will implicitly be done once the /// collector drops. /// In case the drop does not appear until later, it is preferred to /// stop the collector early. pub fn stop(mut self) { self.receiver.close(); } } impl Stream for MessageCollector { type Item = Arc<Message>; fn poll_next(mut self: Pin<&mut Self>, ctx: &mut FutContext<'_>) -> Poll<Option<Self::Item>> { if let Some(ref mut timeout) = self.timeout { match timeout.as_mut().poll(ctx) { Poll::Ready(_) => { return Poll::Ready(None); }, Poll::Pending => (), } } self.receiver.as_mut().poll_recv(ctx) } } impl Drop for MessageCollector { fn drop(&mut self) { self.receiver.close(); } }
33.072131
116
0.569049
0399e5d9ccb5e6e2524415ae21276a735de835be
8,595
use proc_macro2::{Span, TokenStream}; use serde::Deserialize; use std::{fs, path::PathBuf}; use syn::{ spanned::Spanned, AttributeArgs, Error, Ident, ItemEnum, Lit, Meta, MetaNameValue, NestedMeta, Path, Result, Variant, }; #[derive(Debug)] pub struct Passes { pub base_enum: ItemEnum, pub config: NanopassConfig, pub passes: Vec<Pass>, } impl Passes { pub fn parse_from_attr(args: AttributeArgs, base_enum: ItemEnum) -> Result<(Self, Span)> { let (file_path, file_span): (String, Span) = args .into_iter() .find_map(|arg| match arg { NestedMeta::Meta(Meta::NameValue(MetaNameValue { path, lit: Lit::Str(name), .. })) if path.is_ident("file") => Some((name.value(), name.span())), _ => None, }) .ok_or_else(|| Error::new(base_enum.span(), "Missing the `file` attribute"))?; let manifest_dir: PathBuf = std::env::var("CARGO_MANIFEST_DIR") .expect("Cargo should set CARGO_MANIFEST DIR") .into(); let toml = fs::read_to_string(manifest_dir.join(&file_path)).map_err(|err| { Error::new( file_span, format!( "Failed to read '{}' in the '{}' directory: {:?}", &file_path, manifest_dir.display(), err, ), ) })?; let raw::RawNanopass { config, passes } = toml::from_str(&toml) .map_err(|err| Error::new(file_span, format!("Toml error: {}", err)))?; let passes = passes .into_iter() .map(|pass| pass.parse()) .collect::<Result<_>>()?; Ok(( Self { base_enum, config, passes, }, file_span, )) } } #[derive(Debug)] pub struct Pass { pub name: String, pub description: Option<String>, pub function_name: Ident, pub function_vis: TokenStream, pub function_context: Context, pub input_enum: Ident, pub output_enum: Option<Ident>, pub transformations: Vec<Transformation>, } #[derive(Debug)] pub enum Context { None, Mutable(Path), Immutable(Path), } impl Context { pub fn is_none(&self) -> bool { matches!(self, Self::None) } } #[derive(Debug)] pub struct Transformation { pub input_variant: Ident, pub operation: Operation, pub user_function: Path, } impl Transformation { pub fn is_scan(&self) -> bool { self.operation.is_scan() } } #[derive(Debug)] pub enum Operation { Create(Variant), Replace(Variant), Merge(Ident), Delete, Scan, } impl Operation { pub fn is_scan(&self) -> bool { matches!(self, Self::Scan) } } #[derive(Debug, Deserialize)] #[serde(rename_all = "snake_case")] pub struct NanopassConfig { pub logging: bool, } mod raw { use super::{Context, NanopassConfig, Operation, Pass, Transformation}; use serde::Deserialize; use std::fmt::Display; use syn::{parse::Parse, Error, Result}; fn maybe_parse<T: Parse, M: Display>(input: Option<&str>, msg: M) -> Result<Option<T>> { input .map_or_else(|| Ok(None), |out| syn::parse_str(&out).map(Some)) .map_err(|err| annotate_error(err, msg)) } fn annotate_error<M: Display>(orig: Error, msg: M) -> Error { let mut err = Error::new(orig.span(), msg); err.combine(orig); err } #[derive(Deserialize)] #[serde(rename = "Nanopass", rename_all = "snake_case")] pub struct RawNanopass { pub config: NanopassConfig, pub passes: Vec<RawPass>, } #[derive(Deserialize)] #[serde(rename = "Context")] enum RawContext { Immutable(String), Mutable(String), None, } impl RawContext { fn parse(self) -> Result<Context> { let ctx = match self { Self::Immutable(ctx) => { let path = syn::parse_str(&ctx).map_err(|err| { annotate_error(err, "Context.Immutable must be a valid Rust path") })?; Context::Immutable(path) } Self::Mutable(ctx) => { let path = syn::parse_str(&ctx).map_err(|err| { annotate_error(err, "Context.Mutable must be a valid Rust path") })?; Context::Mutable(path) } Self::None => Context::None, }; Ok(ctx) } } #[derive(Deserialize)] #[serde(rename = "Pass", rename_all = "snake_case")] pub struct RawPass { name: String, description: Option<String>, function_name: String, function_vis: String, function_context: RawContext, input_enum: String, output_enum: Option<String>, transformations: Vec<RawTransformation>, } impl RawPass { pub fn parse(self) -> Result<Pass> { let function_name = syn::parse_str(&self.function_name).map_err(|err| { annotate_error(err, "Pass.functionName must be a valid Rust ident") })?; let input_enum = syn::parse_str(&self.input_enum) .map_err(|err| annotate_error(err, "Pass.inputEnum must be a valid Rust ident"))?; let output_enum = maybe_parse( self.output_enum.as_deref(), "Pass.outputEnum must be a valid Rust ident", )?; let transformations = self .transformations .into_iter() .map(|transformation| transformation.parse()) .collect::<Result<Vec<Transformation>>>()?; Ok(Pass { name: self.name, description: self.description, function_name, function_vis: self.function_vis.parse()?, function_context: self.function_context.parse()?, input_enum, output_enum, transformations, }) } } #[derive(Deserialize)] #[serde(rename = "Operation")] enum RawOperation { Create(String), Merge(String), Replace(String), Delete, Scan, } impl RawOperation { fn parse(self) -> Result<Operation> { let op = match self { Self::Create(variant) => { let variant = syn::parse_str(&variant).map_err(|err| { annotate_error(err, "Operation.Create must be a valid Rust variant") })?; Operation::Create(variant) } Self::Merge(variant) => { let variant = syn::parse_str(&variant).map_err(|err| { annotate_error(err, "Operation.Merge must be a valid Rust variant") })?; Operation::Merge(variant) } Self::Replace(variant) => { let variant = syn::parse_str(&variant).map_err(|err| { annotate_error(err, "Operation.Replace must be a valid Rust variant") })?; Operation::Replace(variant) } Self::Delete => Operation::Delete, Self::Scan => Operation::Scan, }; Ok(op) } } #[derive(Deserialize)] #[serde(rename = "Transformation", rename_all = "snake_case")] struct RawTransformation { input_variant: String, operation: RawOperation, user_function: String, } impl RawTransformation { fn parse(self) -> Result<Transformation> { let input_variant = syn::parse_str(&self.input_variant).map_err(|err| { annotate_error( err, "Transformation.inputVariant must be a valid Rust ident", ) })?; let user_function = syn::parse_str(&self.user_function).map_err(|err| { annotate_error(err, "Transformation.userFunction must be a valid Rust path") })?; Ok(Transformation { input_variant, operation: self.operation.parse()?, user_function, }) } } }
28.65
98
0.516928
753b23d62ba91d5f80db979d9a3e1248a128566a
76,883
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ loaded_data::{struct_def::StructDef, types::Type}, native_functions::dispatch::{native_gas, NativeResult}, }; use libra_types::{ account_address::{AccountAddress, ADDRESS_LENGTH}, byte_array::ByteArray, language_storage::TypeTag, vm_error::{sub_status::NFE_VECTOR_ERROR_BASE, StatusCode, VMStatus}, }; use std::{ cell::{Ref, RefCell, RefMut}, collections::VecDeque, fmt::{self, Debug, Display}, iter, mem::size_of, ops::Add, rc::Rc, }; use vm::{ errors::*, file_format::SignatureToken, gas_schedule::{ words_in, AbstractMemorySize, CostTable, GasAlgebra, GasCarrier, NativeCostIndex, CONST_SIZE, REFERENCE_SIZE, STRUCT_SIZE, }, }; /*************************************************************************************** * * Internal Types * * Internal representation of the Move value calculus. These types are abstractions * over the concrete Move concepts and may carry additonal information that is not * defined by the language, but required by the implementation. * **************************************************************************************/ /// Runtime representation of a Move value. #[derive(Debug)] enum ValueImpl { Invalid, U8(u8), U64(u64), U128(u128), Bool(bool), Address(AccountAddress), ByteArray(ByteArray), Container(Rc<RefCell<Container>>), ContainerRef(ContainerRef), IndexedRef(IndexedRef), } /// A container is a collection of values. It is used to represent data structures like a /// Move vector or struct. /// /// There is one general container that can be used to store an array of any values, same /// type or not, and a few specialized flavors to offer compact memory layout for small /// primitive types. /// /// Except when not owned by the VM stack, a container always lives inside an Rc<RefCell<>>, /// making it possible to be shared by references. #[derive(Debug)] enum Container { General(Vec<ValueImpl>), U8(Vec<u8>), U64(Vec<u64>), U128(Vec<u128>), Bool(Vec<bool>), } /// A ContainerRef is a direct reference to a container, which could live either in the frame /// or in global storage. In the latter case, it also keeps a status flag indicating whether /// the container has been possibly modified. #[derive(Debug)] enum ContainerRef { Local(Rc<RefCell<Container>>), Global { status: Rc<RefCell<GlobalDataStatus>>, container: Rc<RefCell<Container>>, }, } /// Status for global (on-chain) data: /// Clean - the data was only read. /// Dirty - the data was possibly modified. #[derive(Debug, Clone, Copy)] enum GlobalDataStatus { Clean, Dirty, } /// A Move reference pointing to an element in a container. #[derive(Debug)] struct IndexedRef { idx: usize, container_ref: ContainerRef, } /// An umbrella enum for references. It is used to hide the internals of the public type /// Reference. #[derive(Debug)] enum ReferenceImpl { IndexedRef(IndexedRef), ContainerRef(ContainerRef), } /*************************************************************************************** * * Public Types * * Types visible from outside the module. They are almost exclusively wrappers around * the internal representation, acting as public interfaces. The methods they provide * closely resemble the Move concepts their names suggest: move_local, borrow_field, * pack, unpack, etc. * * They are opaque to an external caller by design -- no knowledge about the internal * representation is given and they can only be manipulated via the public methods, * which is to ensure no arbitratry invalid states can be created unless some crucial * internal invariants are violated. * **************************************************************************************/ /// A reference to a Move struct that allows you to take a reference to one of its fields. #[derive(Debug)] pub struct StructRef(ContainerRef); /// A generic Move reference that offers two functinalities: read_ref & write_ref. #[derive(Debug)] pub struct Reference(ReferenceImpl); /// A Move value -- a wrapper around `ValueImpl` which can be created only through valid /// means. #[derive(Debug)] pub struct Value(ValueImpl); /// The locals for a function frame. It allows values to be read, written or taken /// reference from. #[derive(Debug)] pub struct Locals(Rc<RefCell<Container>>); /// An integer value in Move. #[derive(Debug)] pub enum IntegerValue { U8(u8), U64(u64), U128(u128), } /// A Move struct. #[derive(Debug)] pub struct Struct(Container); /// A special value that lives in global storage. /// /// Callers are allowed to take global references from a `GlobalValue`. A global value also contains /// an internal flag, indicating whether the value has potentially been modified or not. /// /// For any given value in storage, only one `GlobalValue` may exist to represent it at any time. /// This means that: /// * `GlobalValue` **does not** and **cannot** implement `Clone`! /// * a borrowed reference through `borrow_global` is represented through a `&GlobalValue`. /// * `borrow_global_mut` is also represented through a `&GlobalValue` -- the bytecode verifier /// enforces mutability restrictions. /// * `move_from` is represented through an owned `GlobalValue`. #[derive(Debug)] pub struct GlobalValue { status: Rc<RefCell<GlobalDataStatus>>, container: Rc<RefCell<Container>>, } /*************************************************************************************** * * Misc * * Miscellaneous helper functions. * **************************************************************************************/ impl Container { fn len(&self) -> usize { use Container::*; match self { General(v) => v.len(), U8(v) => v.len(), U64(v) => v.len(), U128(v) => v.len(), Bool(v) => v.len(), } } } impl ValueImpl { fn new_container(container: Container) -> Self { Self::Container(Rc::new(RefCell::new(container))) } } impl Value { pub fn is_valid_script_arg(&self, sig: &SignatureToken) -> bool { match (sig, &self.0) { (SignatureToken::U8, ValueImpl::U8(_)) => true, (SignatureToken::U64, ValueImpl::U64(_)) => true, (SignatureToken::U128, ValueImpl::U128(_)) => true, (SignatureToken::Bool, ValueImpl::Bool(_)) => true, (SignatureToken::Address, ValueImpl::Address(_)) => true, (SignatureToken::ByteArray, ValueImpl::ByteArray(_)) => true, (SignatureToken::Vector(ty), ValueImpl::Container(r)) => match (&**ty, &*r.borrow()) { (SignatureToken::U8, Container::U8(_)) => true, _ => false, }, _ => false, } } } /*************************************************************************************** * * Borrows (Internal) * * Helper functions to handle Rust borrows. When borrowing from a RefCell, we want * to return an error instead of panicking. * **************************************************************************************/ fn take_unique_ownership<T: Debug>(r: Rc<RefCell<T>>) -> VMResult<T> { match Rc::try_unwrap(r) { Ok(cell) => Ok(cell.into_inner()), Err(r) => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message(format!("moving value {:?} with dangling references", r))), } } impl ContainerRef { fn borrow(&self) -> Ref<Container> { match self { Self::Local(container) | Self::Global { container, .. } => container.borrow(), } } fn borrow_mut(&self) -> RefMut<Container> { match self { Self::Local(container) => container.borrow_mut(), Self::Global { container, status } => { *status.borrow_mut() = GlobalDataStatus::Dirty; container.borrow_mut() } } } } /*************************************************************************************** * * Reference Conversions (Internal) * * Helpers to obtain a Rust reference to a value via a VM reference. Required for * equalities. * **************************************************************************************/ trait VMValueRef<T> { fn value_ref(&self) -> VMResult<&T>; } macro_rules! impl_vm_value_ref { ($ty: ty, $tc: ident) => { impl VMValueRef<$ty> for ValueImpl { fn value_ref(&self) -> VMResult<&$ty> { match self { ValueImpl::$tc(x) => Ok(x), _ => Err( VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(format!( "cannot take {:?} as &{}", self, stringify!($ty) )), ), } } } }; } impl_vm_value_ref!(u8, U8); impl_vm_value_ref!(u64, U64); impl_vm_value_ref!(u128, U128); impl_vm_value_ref!(bool, Bool); impl_vm_value_ref!(AccountAddress, Address); impl_vm_value_ref!(ByteArray, ByteArray); impl ValueImpl { fn as_value_ref<T>(&self) -> VMResult<&T> where Self: VMValueRef<T>, { VMValueRef::value_ref(self) } } /*************************************************************************************** * * Copy Value * * Implementation of Move copy. Extra care needs to be taken when copying references. * It is intentional we avoid implementing the standard library trait Clone, to prevent * surprising behaviors from happening. * **************************************************************************************/ impl ValueImpl { fn copy_value(&self) -> Self { use ValueImpl::*; match self { Invalid => Invalid, U8(x) => U8(*x), U64(x) => U64(*x), U128(x) => U128(*x), Bool(x) => Bool(*x), Address(x) => Address(*x), ByteArray(x) => ByteArray(x.clone()), ContainerRef(r) => ContainerRef(r.copy_value()), IndexedRef(r) => IndexedRef(r.copy_value()), // When cloning a container, we need to make sure we make a deep // copy of the data instead of a shallow copy of the Rc. Container(c) => Container(Rc::new(RefCell::new(c.borrow().copy_value()))), } } } impl Container { fn copy_value(&self) -> Self { use Container::*; match self { General(v) => General(v.iter().map(|x| x.copy_value()).collect()), U8(v) => U8(v.clone()), U64(v) => U64(v.clone()), U128(v) => U128(v.clone()), Bool(v) => Bool(v.clone()), } } } impl IndexedRef { fn copy_value(&self) -> Self { Self { idx: self.idx, container_ref: self.container_ref.copy_value(), } } } impl ContainerRef { fn copy_value(&self) -> Self { match self { Self::Local(container) => Self::Local(Rc::clone(container)), Self::Global { status, container } => Self::Global { status: Rc::clone(status), container: Rc::clone(container), }, } } } impl Value { pub fn copy_value(&self) -> Self { Self(self.0.copy_value()) } } /*************************************************************************************** * * Equality * * Equality tests of Move values. Errors are raised when types mismatch. * * It is intented to NOT use or even implement the standard library traits Eq and * Partial Eq due to: * 1. They do not allow errors to be returned. * 2. They can be invoked without the user being noticed thanks to operator * overloading. * * Eq and Partial Eq must also NOT be derived for the reasons above plus that the * derived implementation differs from the semantics we want. * **************************************************************************************/ impl ValueImpl { fn equals(&self, other: &Self) -> VMResult<bool> { use ValueImpl::*; let res = match (self, other) { (U8(l), U8(r)) => l == r, (U64(l), U64(r)) => l == r, (U128(l), U128(r)) => l == r, (Bool(l), Bool(r)) => l == r, (ByteArray(l), ByteArray(r)) => l == r, (Address(l), Address(r)) => l == r, (Container(l), Container(r)) => l.borrow().equals(&*r.borrow())?, (ContainerRef(l), ContainerRef(r)) => l.equals(r)?, (IndexedRef(l), IndexedRef(r)) => l.equals(r)?, _ => { return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR) .with_message(format!("cannot compare values: {:?}, {:?}", self, other))) } }; Ok(res) } } impl Container { fn equals(&self, other: &Self) -> VMResult<bool> { use Container::*; let res = match (self, other) { (General(l), General(r)) => { if l.len() != r.len() { return Ok(false); } for (v1, v2) in l.iter().zip(r.iter()) { if !v1.equals(v2)? { return Ok(false); } } true } (U8(l), U8(r)) => l == r, (U64(l), U64(r)) => l == r, (U128(l), U128(r)) => l == r, (Bool(l), Bool(r)) => l == r, _ => { return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message( format!("cannot compare container values: {:?}, {:?}", self, other), )) } }; Ok(res) } } impl ContainerRef { fn equals(&self, other: &Self) -> VMResult<bool> { self.borrow().equals(&*other.borrow()) } } impl IndexedRef { fn equals(&self, other: &Self) -> VMResult<bool> { use Container::*; let res = match ( &*self.container_ref.borrow(), &*other.container_ref.borrow(), ) { (General(v1), General(v2)) => v1[self.idx].equals(&v2[other.idx])?, (U8(v1), U8(v2)) => v1[self.idx] == v2[other.idx], (U64(v1), U64(v2)) => v1[self.idx] == v2[other.idx], (U128(v1), U128(v2)) => v1[self.idx] == v2[other.idx], (Bool(v1), Bool(v2)) => v1[self.idx] == v2[other.idx], // Equality between a generic and a specialized container. (General(v1), U8(v2)) => *v1[self.idx].as_value_ref::<u8>()? == v2[other.idx], (U8(v1), General(v2)) => v1[self.idx] == *v2[other.idx].as_value_ref::<u8>()?, (General(v1), U64(v2)) => *v1[self.idx].as_value_ref::<u64>()? == v2[other.idx], (U64(v1), General(v2)) => v1[self.idx] == *v2[other.idx].as_value_ref::<u64>()?, (General(v1), U128(v2)) => *v1[self.idx].as_value_ref::<u128>()? == v2[other.idx], (U128(v1), General(v2)) => v1[self.idx] == *v2[other.idx].as_value_ref::<u128>()?, (General(v1), Bool(v2)) => *v1[self.idx].as_value_ref::<bool>()? == v2[other.idx], (Bool(v1), General(v2)) => v1[self.idx] == *v2[other.idx].as_value_ref::<bool>()?, // All other combinations are illegal. _ => { return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR) .with_message(format!("cannot compare references {:?}, {:?}", self, other))) } }; Ok(res) } } impl Value { pub fn equals(&self, other: &Self) -> VMResult<bool> { self.0.equals(&other.0) } } /*************************************************************************************** * * Read Ref * * Implementation of the Move operation read ref. * **************************************************************************************/ impl ContainerRef { fn read_ref(self) -> VMResult<Value> { Ok(Value(ValueImpl::new_container(self.borrow().copy_value()))) } } impl IndexedRef { fn read_ref(self) -> VMResult<Value> { use Container::*; let res = match &*self.container_ref.borrow() { General(v) => v[self.idx].copy_value(), U8(v) => ValueImpl::U8(v[self.idx]), U64(v) => ValueImpl::U64(v[self.idx]), U128(v) => ValueImpl::U128(v[self.idx]), Bool(v) => ValueImpl::Bool(v[self.idx]), }; Ok(Value(res)) } } impl ReferenceImpl { fn read_ref(self) -> VMResult<Value> { match self { Self::ContainerRef(r) => r.read_ref(), Self::IndexedRef(r) => r.read_ref(), } } } impl StructRef { pub fn read_ref(self) -> VMResult<Value> { self.0.read_ref() } } impl Reference { pub fn read_ref(self) -> VMResult<Value> { self.0.read_ref() } } /*************************************************************************************** * * Write Ref * * Implementation of the Move operation write ref. * **************************************************************************************/ impl ContainerRef { fn write_ref(self, v: Value) -> VMResult<()> { match v.0 { ValueImpl::Container(r) => { *self.borrow_mut() = take_unique_ownership(r)? // TODO: can we simply take the Rc? } _ => { return Err( VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(format!( "cannot write value {:?} to container ref {:?}", v, self )), ) } } Ok(()) } } impl IndexedRef { fn write_ref(self, x: Value) -> VMResult<()> { match &x.0 { ValueImpl::IndexedRef(_) | ValueImpl::ContainerRef(_) | ValueImpl::Invalid | ValueImpl::Container(_) => { return Err( VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(format!( "cannot write value {:?} to indexed ref {:?}", x, self )), ) } _ => (), } match (&mut *self.container_ref.borrow_mut(), &x.0) { (Container::General(v), _) => { v[self.idx] = x.0; } (Container::U8(v), ValueImpl::U8(x)) => v[self.idx] = *x, (Container::U64(v), ValueImpl::U64(x)) => v[self.idx] = *x, (Container::U128(v), ValueImpl::U128(x)) => v[self.idx] = *x, (Container::Bool(v), ValueImpl::Bool(x)) => v[self.idx] = *x, _ => { return Err( VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(format!( "cannot write value {:?} to indexed ref {:?}", x, self )), ) } } Ok(()) } } impl ReferenceImpl { fn write_ref(self, x: Value) -> VMResult<()> { match self { Self::ContainerRef(r) => r.write_ref(x), Self::IndexedRef(r) => r.write_ref(x), } } } impl Reference { pub fn write_ref(self, x: Value) -> VMResult<()> { self.0.write_ref(x) } } /*************************************************************************************** * * Borrows (Move) * * Implementation of borrowing in Move: borrow field, borrow local and infrastructure * to support borrowing an element from a vector. * **************************************************************************************/ impl ContainerRef { fn borrow_elem(&self, idx: usize) -> VMResult<ValueImpl> { let r = self.borrow(); if idx >= r.len() { return Err( VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR).with_message(format!( "index out of bounds when borrowing container element: got: {}, len: {}", idx, r.len() )), ); } let res = match &*r { Container::General(v) => match &v[idx] { // TODO: check for the impossible combinations. ValueImpl::Container(container) => { let r = match self { Self::Local(_) => Self::Local(Rc::clone(container)), Self::Global { status, .. } => Self::Global { status: Rc::clone(status), container: Rc::clone(container), }, }; ValueImpl::ContainerRef(r) } _ => ValueImpl::IndexedRef(IndexedRef { idx, container_ref: self.copy_value(), }), }, _ => ValueImpl::IndexedRef(IndexedRef { idx, container_ref: self.copy_value(), }), }; Ok(res) } } impl StructRef { pub fn borrow_field(&self, idx: usize) -> VMResult<Value> { Ok(Value(self.0.borrow_elem(idx)?)) } } impl Locals { pub fn borrow_loc(&self, idx: usize) -> VMResult<Value> { // TODO: this is very similar to SharedContainer::borrow_elem. Find a way to // reuse that code? let r = self.0.borrow(); if idx >= r.len() { return Err( VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR).with_message(format!( "index out of bounds when borrowing local: got: {}, len: {}", idx, r.len() )), ); } match &*r { Container::General(v) => match &v[idx] { ValueImpl::Container(r) => Ok(Value(ValueImpl::ContainerRef(ContainerRef::Local( Rc::clone(r), )))), ValueImpl::U8(_) | ValueImpl::U64(_) | ValueImpl::U128(_) | ValueImpl::Bool(_) | ValueImpl::Address(_) | ValueImpl::ByteArray(_) => Ok(Value(ValueImpl::IndexedRef(IndexedRef { container_ref: ContainerRef::Local(Rc::clone(&self.0)), idx, }))), ValueImpl::ContainerRef(_) | ValueImpl::Invalid | ValueImpl::IndexedRef(_) => { Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message(format!("cannot borrow local {:?}", &v[idx]))) } }, v => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message(format!("bad container for locals: {:?}", v))), } } } /*************************************************************************************** * * Locals * * Public APIs for Locals to support reading, writing and moving of values. * **************************************************************************************/ impl Locals { pub fn new(n: usize) -> Self { Self(Rc::new(RefCell::new(Container::General( iter::repeat_with(|| ValueImpl::Invalid).take(n).collect(), )))) } pub fn copy_loc(&self, idx: usize) -> VMResult<Value> { let r = self.0.borrow(); let v = match &*r { Container::General(v) => v, _ => unreachable!(), }; match v.get(idx) { Some(ValueImpl::Invalid) => { Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message(format!("cannot copy invalid value at index {}", idx))) } Some(v) => Ok(Value(v.copy_value())), None => Err( VMStatus::new(StatusCode::VERIFIER_INVARIANT_VIOLATION).with_message(format!( "local index out of bounds: got {}, len: {}", idx, v.len() )), ), } } fn swap_loc(&mut self, idx: usize, x: Value) -> VMResult<Value> { let mut r = self.0.borrow_mut(); let v = match &mut *r { Container::General(v) => v, _ => unreachable!(), }; match v.get_mut(idx) { Some(v) => { if let ValueImpl::Container(r) = v { if Rc::strong_count(r) > 1 { return Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message( "moving container with dangling references".to_string(), )); } } Ok(Value(std::mem::replace(v, x.0))) } None => Err( VMStatus::new(StatusCode::VERIFIER_INVARIANT_VIOLATION).with_message(format!( "local index out of bounds: got {}, len: {}", idx, v.len() )), ), } } pub fn move_loc(&mut self, idx: usize) -> VMResult<Value> { match self.swap_loc(idx, Value(ValueImpl::Invalid))? { Value(ValueImpl::Invalid) => { Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message(format!("cannot move invalid value at index {}", idx))) } v => Ok(v), } } pub fn store_loc(&mut self, idx: usize, x: Value) -> VMResult<()> { self.swap_loc(idx, x)?; Ok(()) } } /*************************************************************************************** * * Public Value Constructors * * Constructors to allow values to be created outside this module. * **************************************************************************************/ impl Value { pub fn u8(x: u8) -> Self { Self(ValueImpl::U8(x)) } pub fn u64(x: u64) -> Self { Self(ValueImpl::U64(x)) } pub fn u128(x: u128) -> Self { Self(ValueImpl::U128(x)) } pub fn bool(x: bool) -> Self { Self(ValueImpl::Bool(x)) } pub fn byte_array(x: ByteArray) -> Self { Self(ValueImpl::ByteArray(x)) } pub fn address(x: AccountAddress) -> Self { Self(ValueImpl::Address(x)) } pub fn struct_(s: Struct) -> Self { Self(ValueImpl::new_container(s.0)) } // TODO: consider whether we want to replace this with fn vector(v: Vec<Value>). pub fn vector_u8(v: Vec<u8>) -> Self { Self(ValueImpl::new_container(Container::U8(v))) } } /*************************************************************************************** * * Casting * * Due to the public value types being opaque to an external user, the following * public APIs are required to enable conversion between types in order to gain access * to specific operations certain more refined types offer. * For example, one must convert a `Value` to a `Struct` before unpack can be called. * * It is expected that the caller will keep track of the invariants and guarantee * the conversion will succeed. An error will be raised in case of a violation. * **************************************************************************************/ pub trait VMValueCast<T> { fn cast(self) -> VMResult<T>; } macro_rules! impl_vm_value_cast { ($ty: ty, $tc: ident) => { impl VMValueCast<$ty> for Value { fn cast(self) -> VMResult<$ty> { match self.0 { ValueImpl::$tc(x) => Ok(x), v => Err( VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(format!( "cannot cast {:?} to {}", v, stringify!($ty) )), ), } } } }; } impl_vm_value_cast!(u8, U8); impl_vm_value_cast!(u64, U64); impl_vm_value_cast!(u128, U128); impl_vm_value_cast!(bool, Bool); impl_vm_value_cast!(AccountAddress, Address); impl_vm_value_cast!(ByteArray, ByteArray); impl_vm_value_cast!(ContainerRef, ContainerRef); impl_vm_value_cast!(IndexedRef, IndexedRef); impl VMValueCast<IntegerValue> for Value { fn cast(self) -> VMResult<IntegerValue> { match self.0 { ValueImpl::U8(x) => Ok(IntegerValue::U8(x)), ValueImpl::U64(x) => Ok(IntegerValue::U64(x)), ValueImpl::U128(x) => Ok(IntegerValue::U128(x)), v => Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR) .with_message(format!("cannot cast {:?} to integer", v,))), } } } impl VMValueCast<Reference> for Value { fn cast(self) -> VMResult<Reference> { match self.0 { ValueImpl::ContainerRef(r) => Ok(Reference(ReferenceImpl::ContainerRef(r))), ValueImpl::IndexedRef(r) => Ok(Reference(ReferenceImpl::IndexedRef(r))), v => Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR) .with_message(format!("cannot cast {:?} to reference", v,))), } } } impl VMValueCast<Container> for Value { fn cast(self) -> VMResult<Container> { match self.0 { ValueImpl::Container(r) => take_unique_ownership(r), v => Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR) .with_message(format!("cannot cast {:?} to container", v,))), } } } impl VMValueCast<Struct> for Value { fn cast(self) -> VMResult<Struct> { match self.0 { ValueImpl::Container(r) => Ok(Struct(take_unique_ownership(r)?)), v => Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR) .with_message(format!("cannot cast {:?} to struct", v,))), } } } impl VMValueCast<StructRef> for Value { fn cast(self) -> VMResult<StructRef> { Ok(StructRef(VMValueCast::cast(self)?)) } } impl VMValueCast<Vec<u8>> for Value { fn cast(self) -> VMResult<Vec<u8>> { match self.0 { ValueImpl::Container(r) => match take_unique_ownership(r)? { Container::U8(v) => Ok(v), v => Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR) .with_message(format!("cannot cast {:?} to vector<u8>", v,))), }, v => Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR) .with_message(format!("cannot cast {:?} to vector<u8>", v,))), } } } impl Value { pub fn value_as<T>(self) -> VMResult<T> where Self: VMValueCast<T>, { VMValueCast::cast(self) } } impl VMValueCast<u8> for IntegerValue { fn cast(self) -> VMResult<u8> { match self { Self::U8(x) => Ok(x), v => Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR) .with_message(format!("cannot cast {:?} to u8", v,))), } } } impl VMValueCast<u64> for IntegerValue { fn cast(self) -> VMResult<u64> { match self { Self::U64(x) => Ok(x), v => Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR) .with_message(format!("cannot cast {:?} to u64", v,))), } } } impl VMValueCast<u128> for IntegerValue { fn cast(self) -> VMResult<u128> { match self { Self::U128(x) => Ok(x), v => Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR) .with_message(format!("cannot cast {:?} to u128", v,))), } } } impl IntegerValue { pub fn value_as<T>(self) -> VMResult<T> where Self: VMValueCast<T>, { VMValueCast::cast(self) } } /*************************************************************************************** * * Integer Operations * * Arithmetic operations and conversions for integer values. * **************************************************************************************/ impl IntegerValue { pub fn add_checked(self, other: Self) -> VMResult<Self> { use IntegerValue::*; let res = match (self, other) { (U8(l), U8(r)) => u8::checked_add(l, r).map(IntegerValue::U8), (U64(l), U64(r)) => u64::checked_add(l, r).map(IntegerValue::U64), (U128(l), U128(r)) => u128::checked_add(l, r).map(IntegerValue::U128), (l, r) => { let msg = format!("Cannot add {:?} and {:?}", l, r); return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); } }; res.ok_or_else(|| VMStatus::new(StatusCode::ARITHMETIC_ERROR)) } pub fn sub_checked(self, other: Self) -> VMResult<Self> { use IntegerValue::*; let res = match (self, other) { (U8(l), U8(r)) => u8::checked_sub(l, r).map(IntegerValue::U8), (U64(l), U64(r)) => u64::checked_sub(l, r).map(IntegerValue::U64), (U128(l), U128(r)) => u128::checked_sub(l, r).map(IntegerValue::U128), (l, r) => { let msg = format!("Cannot sub {:?} from {:?}", r, l); return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); } }; res.ok_or_else(|| VMStatus::new(StatusCode::ARITHMETIC_ERROR)) } pub fn mul_checked(self, other: Self) -> VMResult<Self> { use IntegerValue::*; let res = match (self, other) { (U8(l), U8(r)) => u8::checked_mul(l, r).map(IntegerValue::U8), (U64(l), U64(r)) => u64::checked_mul(l, r).map(IntegerValue::U64), (U128(l), U128(r)) => u128::checked_mul(l, r).map(IntegerValue::U128), (l, r) => { let msg = format!("Cannot mul {:?} and {:?}", l, r); return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); } }; res.ok_or_else(|| VMStatus::new(StatusCode::ARITHMETIC_ERROR)) } pub fn div_checked(self, other: Self) -> VMResult<Self> { use IntegerValue::*; let res = match (self, other) { (U8(l), U8(r)) => u8::checked_div(l, r).map(IntegerValue::U8), (U64(l), U64(r)) => u64::checked_div(l, r).map(IntegerValue::U64), (U128(l), U128(r)) => u128::checked_div(l, r).map(IntegerValue::U128), (l, r) => { let msg = format!("Cannot div {:?} by {:?}", l, r); return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); } }; res.ok_or_else(|| VMStatus::new(StatusCode::ARITHMETIC_ERROR)) } pub fn rem_checked(self, other: Self) -> VMResult<Self> { use IntegerValue::*; let res = match (self, other) { (U8(l), U8(r)) => u8::checked_rem(l, r).map(IntegerValue::U8), (U64(l), U64(r)) => u64::checked_rem(l, r).map(IntegerValue::U64), (U128(l), U128(r)) => u128::checked_rem(l, r).map(IntegerValue::U128), (l, r) => { let msg = format!("Cannot rem {:?} by {:?}", l, r); return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); } }; res.ok_or_else(|| VMStatus::new(StatusCode::ARITHMETIC_ERROR)) } pub fn bit_or(self, other: Self) -> VMResult<Self> { use IntegerValue::*; Ok(match (self, other) { (U8(l), U8(r)) => IntegerValue::U8(l | r), (U64(l), U64(r)) => IntegerValue::U64(l | r), (U128(l), U128(r)) => IntegerValue::U128(l | r), (l, r) => { let msg = format!("Cannot bit_or {:?} and {:?}", l, r); return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); } }) } pub fn bit_and(self, other: Self) -> VMResult<Self> { use IntegerValue::*; Ok(match (self, other) { (U8(l), U8(r)) => IntegerValue::U8(l & r), (U64(l), U64(r)) => IntegerValue::U64(l & r), (U128(l), U128(r)) => IntegerValue::U128(l & r), (l, r) => { let msg = format!("Cannot bit_and {:?} and {:?}", l, r); return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); } }) } pub fn bit_xor(self, other: Self) -> VMResult<Self> { use IntegerValue::*; Ok(match (self, other) { (U8(l), U8(r)) => IntegerValue::U8(l ^ r), (U64(l), U64(r)) => IntegerValue::U64(l ^ r), (U128(l), U128(r)) => IntegerValue::U128(l ^ r), (l, r) => { let msg = format!("Cannot bit_xor {:?} and {:?}", l, r); return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); } }) } pub fn shl_checked(self, n_bits: u8) -> VMResult<Self> { use IntegerValue::*; Ok(match self { U8(x) => { if n_bits >= 8 { return Err(VMStatus::new(StatusCode::ARITHMETIC_ERROR)); } IntegerValue::U8(x << n_bits) } U64(x) => { if n_bits >= 64 { return Err(VMStatus::new(StatusCode::ARITHMETIC_ERROR)); } IntegerValue::U64(x << n_bits) } U128(x) => { if n_bits >= 128 { return Err(VMStatus::new(StatusCode::ARITHMETIC_ERROR)); } IntegerValue::U128(x << n_bits) } }) } pub fn shr_checked(self, n_bits: u8) -> VMResult<Self> { use IntegerValue::*; Ok(match self { U8(x) => { if n_bits >= 8 { return Err(VMStatus::new(StatusCode::ARITHMETIC_ERROR)); } IntegerValue::U8(x >> n_bits) } U64(x) => { if n_bits >= 64 { return Err(VMStatus::new(StatusCode::ARITHMETIC_ERROR)); } IntegerValue::U64(x >> n_bits) } U128(x) => { if n_bits >= 128 { return Err(VMStatus::new(StatusCode::ARITHMETIC_ERROR)); } IntegerValue::U128(x >> n_bits) } }) } pub fn lt(self, other: Self) -> VMResult<bool> { use IntegerValue::*; Ok(match (self, other) { (U8(l), U8(r)) => l < r, (U64(l), U64(r)) => l < r, (U128(l), U128(r)) => l < r, (l, r) => { let msg = format!( "Cannot compare {:?} and {:?}: incompatible integer types", l, r ); return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); } }) } pub fn le(self, other: Self) -> VMResult<bool> { use IntegerValue::*; Ok(match (self, other) { (U8(l), U8(r)) => l <= r, (U64(l), U64(r)) => l <= r, (U128(l), U128(r)) => l <= r, (l, r) => { let msg = format!( "Cannot compare {:?} and {:?}: incompatible integer types", l, r ); return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); } }) } pub fn gt(self, other: Self) -> VMResult<bool> { use IntegerValue::*; Ok(match (self, other) { (U8(l), U8(r)) => l > r, (U64(l), U64(r)) => l > r, (U128(l), U128(r)) => l > r, (l, r) => { let msg = format!( "Cannot compare {:?} and {:?}: incompatible integer types", l, r ); return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); } }) } pub fn ge(self, other: Self) -> VMResult<bool> { use IntegerValue::*; Ok(match (self, other) { (U8(l), U8(r)) => l >= r, (U64(l), U64(r)) => l >= r, (U128(l), U128(r)) => l >= r, (l, r) => { let msg = format!( "Cannot compare {:?} and {:?}: incompatible integer types", l, r ); return Err(VMStatus::new(StatusCode::INTERNAL_TYPE_ERROR).with_message(msg)); } }) } pub fn into_value(self) -> Value { use IntegerValue::*; match self { U8(x) => Value::u8(x), U64(x) => Value::u64(x), U128(x) => Value::u128(x), } } } impl IntegerValue { pub fn cast_u8(self) -> VMResult<u8> { use IntegerValue::*; match self { U8(x) => Ok(x), U64(x) => { if x > (std::u8::MAX as u64) { Err(VMStatus::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u64({}) to u8", x))) } else { Ok(x as u8) } } U128(x) => { if x > (std::u8::MAX as u128) { Err(VMStatus::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u128({}) to u8", x))) } else { Ok(x as u8) } } } } pub fn cast_u64(self) -> VMResult<u64> { use IntegerValue::*; match self { U8(x) => Ok(x as u64), U64(x) => Ok(x), U128(x) => { if x > (std::u64::MAX as u128) { Err(VMStatus::new(StatusCode::ARITHMETIC_ERROR) .with_message(format!("Cannot cast u128({}) to u64", x))) } else { Ok(x as u64) } } } } pub fn cast_u128(self) -> VMResult<u128> { use IntegerValue::*; Ok(match self { U8(x) => x as u128, U64(x) => x as u128, U128(x) => x, }) } } /*************************************************************************************** * * Vector * * Native function imeplementations of the Vector module. * * TODO: split the code into two parts: * 1) Internal vector APIs that define & implements the core operations (and operations only). * 2) Native function adapters that the dispatcher can call into. These will * check if arguments are valid and deal with gas metering. * **************************************************************************************/ pub mod vector { use super::*; pub const INDEX_OUT_OF_BOUNDS: u64 = NFE_VECTOR_ERROR_BASE + 1; pub const POP_EMPTY_VEC: u64 = NFE_VECTOR_ERROR_BASE + 2; pub const DESTROY_NON_EMPTY_VEC: u64 = NFE_VECTOR_ERROR_BASE + 3; macro_rules! ensure_len { ($v: expr, $expected_len: expr, $type: expr, $fn: expr) => {{ let actual_len = $v.len(); let expected_len = $expected_len; if actual_len != expected_len { let msg = format!( "wrong number of {} for {} expected {} found {}", ($type), ($fn), expected_len, actual_len, ); return Err(VMStatus::new(StatusCode::UNREACHABLE).with_message(msg)); } }}; } macro_rules! pop_arg_front { ($arguments:ident, $t:ty) => { $arguments.pop_front().unwrap().value_as::<$t>()? }; } macro_rules! err_vector_elem_ty_mismatch { ($tag: expr, $val: expr) => {{ return Err( VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR).with_message(format!( "vector elem type mismatch -- expected {:?}, got {:?}", $tag, $val )), ); }}; } pub fn native_empty( ty_args: Vec<TypeTag>, args: VecDeque<Value>, cost_table: &CostTable, ) -> VMResult<NativeResult> { ensure_len!(ty_args, 1, "type arguments", "empty"); ensure_len!(args, 0, "arguments", "empty"); let cost = native_gas(cost_table, NativeCostIndex::EMPTY, 1); let container = match &ty_args[0] { TypeTag::U8 => Container::U8(vec![]), TypeTag::U64 => Container::U64(vec![]), TypeTag::U128 => Container::U128(vec![]), TypeTag::Bool => Container::Bool(vec![]), _ => Container::General(vec![]), }; Ok(NativeResult::ok( cost, vec![Value(ValueImpl::new_container(container))], )) } pub fn native_length( ty_args: Vec<TypeTag>, mut args: VecDeque<Value>, cost_table: &CostTable, ) -> VMResult<NativeResult> { ensure_len!(ty_args, 1, "type arguments", "length"); ensure_len!(args, 1, "arguments", "length"); let cost = native_gas(cost_table, NativeCostIndex::LENGTH, 1); let r = pop_arg_front!(args, ContainerRef); let v = r.borrow(); let len = match (&ty_args[0], &*v) { (TypeTag::U8, Container::U8(v)) => v.len(), (TypeTag::U64, Container::U64(v)) => v.len(), (TypeTag::U128, Container::U128(v)) => v.len(), (TypeTag::Bool, Container::Bool(v)) => v.len(), (TypeTag::Struct(_), Container::General(v)) | (TypeTag::ByteArray, Container::General(v)) | (TypeTag::Address, Container::General(v)) => v.len(), (tag, v) => err_vector_elem_ty_mismatch!(tag, v), }; Ok(NativeResult::ok(cost, vec![Value::u64(len as u64)])) } pub fn native_push_back( ty_args: Vec<TypeTag>, mut args: VecDeque<Value>, cost_table: &CostTable, ) -> VMResult<NativeResult> { ensure_len!(ty_args, 1, "type arguments", "push back"); ensure_len!(args, 2, "arguments", "push back"); let r = pop_arg_front!(args, ContainerRef); let mut v = r.borrow_mut(); let e = args.pop_front().unwrap(); let cost = cost_table .native_cost(NativeCostIndex::PUSH_BACK) .total() .mul(e.size()); match (&ty_args[0], &mut *v) { (TypeTag::U8, Container::U8(v)) => v.push(e.value_as()?), (TypeTag::U64, Container::U64(v)) => v.push(e.value_as()?), (TypeTag::U128, Container::U128(v)) => v.push(e.value_as()?), (TypeTag::Bool, Container::Bool(v)) => v.push(e.value_as()?), (TypeTag::Struct(_), Container::General(v)) | (TypeTag::ByteArray, Container::General(v)) | (TypeTag::Address, Container::General(v)) => v.push(e.0), (tag, v) => err_vector_elem_ty_mismatch!(tag, v), } Ok(NativeResult::ok(cost, vec![])) } pub fn native_borrow( ty_args: Vec<TypeTag>, mut args: VecDeque<Value>, cost_table: &CostTable, ) -> VMResult<NativeResult> { ensure_len!(ty_args, 1, "type arguments", "borrow"); ensure_len!(args, 2, "arguments", "borrow"); let cost = native_gas(cost_table, NativeCostIndex::BORROW, 1); let r = pop_arg_front!(args, ContainerRef); let v = r.borrow(); let idx = pop_arg_front!(args, u64) as usize; // TODO: check if the type tag matches the real type? if idx >= v.len() { return Ok(NativeResult::err( cost, VMStatus::new(StatusCode::NATIVE_FUNCTION_ERROR) .with_sub_status(INDEX_OUT_OF_BOUNDS), )); } let v = Value(r.borrow_elem(idx)?); Ok(NativeResult::ok(cost, vec![v])) } pub fn native_pop( ty_args: Vec<TypeTag>, mut args: VecDeque<Value>, cost_table: &CostTable, ) -> VMResult<NativeResult> { ensure_len!(ty_args, 1, "type arguments", "pop"); ensure_len!(args, 1, "arguments", "pop"); let cost = native_gas(cost_table, NativeCostIndex::POP_BACK, 1); let r = pop_arg_front!(args, ContainerRef); let mut v = r.borrow_mut(); macro_rules! err_pop_empty_vec { () => { return Ok(NativeResult::err( cost, VMStatus::new(StatusCode::NATIVE_FUNCTION_ERROR).with_sub_status(POP_EMPTY_VEC), )); }; } let res = match (&ty_args[0], &mut *v) { (TypeTag::U8, Container::U8(v)) => match v.pop() { Some(x) => Value::u8(x), None => err_pop_empty_vec!(), }, (TypeTag::U64, Container::U64(v)) => match v.pop() { Some(x) => Value::u64(x), None => err_pop_empty_vec!(), }, (TypeTag::U128, Container::U128(v)) => match v.pop() { Some(x) => Value::u128(x), None => err_pop_empty_vec!(), }, (TypeTag::Bool, Container::Bool(v)) => match v.pop() { Some(x) => Value::bool(x), None => err_pop_empty_vec!(), }, (TypeTag::Struct(_), Container::General(v)) | (TypeTag::ByteArray, Container::General(v)) | (TypeTag::Address, Container::General(v)) => match v.pop() { Some(x) => Value(x), None => err_pop_empty_vec!(), }, (tag, v) => err_vector_elem_ty_mismatch!(tag, v), }; Ok(NativeResult::ok(cost, vec![res])) } pub fn native_destroy_empty( ty_args: Vec<TypeTag>, mut args: VecDeque<Value>, cost_table: &CostTable, ) -> VMResult<NativeResult> { ensure_len!(ty_args, 1, "type arguments", "destroy empty"); ensure_len!(args, 1, "arguments", "destroy empty"); let cost = native_gas(cost_table, NativeCostIndex::DESTROY_EMPTY, 1); let v = args.pop_front().unwrap().value_as::<Container>()?; let is_empty = match (&ty_args[0], &v) { (TypeTag::U8, Container::U8(v)) => v.is_empty(), (TypeTag::U64, Container::U64(v)) => v.is_empty(), (TypeTag::U128, Container::U128(v)) => v.is_empty(), (TypeTag::Bool, Container::Bool(v)) => v.is_empty(), (TypeTag::Struct(_), Container::General(v)) | (TypeTag::ByteArray, Container::General(v)) | (TypeTag::Address, Container::General(v)) => v.is_empty(), (tag, v) => err_vector_elem_ty_mismatch!(tag, v), }; if is_empty { Ok(NativeResult::ok(cost, vec![])) } else { Ok(NativeResult::err( cost, VMStatus::new(StatusCode::NATIVE_FUNCTION_ERROR) .with_sub_status(DESTROY_NON_EMPTY_VEC), )) } } pub fn native_swap( ty_args: Vec<TypeTag>, mut args: VecDeque<Value>, cost_table: &CostTable, ) -> VMResult<NativeResult> { ensure_len!(ty_args, 1, "type arguments", "swap"); ensure_len!(args, 3, "arguments", "swap"); let cost = native_gas(cost_table, NativeCostIndex::SWAP, 1); let r = pop_arg_front!(args, ContainerRef); let mut v = r.borrow_mut(); let idx1 = pop_arg_front!(args, u64) as usize; let idx2 = pop_arg_front!(args, u64) as usize; macro_rules! swap { ($v: ident) => {{ if idx1 >= $v.len() || idx2 >= $v.len() { return Ok(NativeResult::err( cost, VMStatus::new(StatusCode::NATIVE_FUNCTION_ERROR) .with_sub_status(INDEX_OUT_OF_BOUNDS), )); } $v.swap(idx1, idx2); }}; } match (&ty_args[0], &mut *v) { (TypeTag::U8, Container::U8(v)) => swap!(v), (TypeTag::U64, Container::U64(v)) => swap!(v), (TypeTag::U128, Container::U128(v)) => swap!(v), (TypeTag::Bool, Container::Bool(v)) => swap!(v), (TypeTag::Struct(_), Container::General(v)) | (TypeTag::Address, Container::General(v)) | (TypeTag::ByteArray, Container::General(v)) => swap!(v), (tag, v) => err_vector_elem_ty_mismatch!(tag, v), } Ok(NativeResult::ok(cost, vec![])) } } /*************************************************************************************** * * Gas * * Abstract memory sizes of the VM values. * **************************************************************************************/ impl Container { fn size(&self) -> AbstractMemorySize<GasCarrier> { match self { Self::General(v) => v .iter() .fold(STRUCT_SIZE, |acc, v| acc.map2(v.size(), Add::add)), Self::U8(v) => AbstractMemorySize::new((v.len() * size_of::<u8>()) as u64), Self::U64(v) => AbstractMemorySize::new((v.len() * size_of::<u64>()) as u64), Self::U128(v) => AbstractMemorySize::new((v.len() * size_of::<u128>()) as u64), Self::Bool(v) => AbstractMemorySize::new((v.len() * size_of::<bool>()) as u64), } } } impl ContainerRef { fn size(&self) -> AbstractMemorySize<GasCarrier> { words_in(REFERENCE_SIZE) } } impl IndexedRef { fn size(&self) -> AbstractMemorySize<GasCarrier> { words_in(REFERENCE_SIZE) } } impl ValueImpl { fn size(&self) -> AbstractMemorySize<GasCarrier> { use ValueImpl::*; match self { Invalid | U8(_) | U64(_) | U128(_) | Bool(_) => CONST_SIZE, Address(_) => AbstractMemorySize::new(ADDRESS_LENGTH as u64), ByteArray(key) => AbstractMemorySize::new(key.len() as u64), ContainerRef(r) => r.size(), IndexedRef(r) => r.size(), // TODO: in case the borrow fails the VM will panic. Container(r) => r.borrow().size(), } } } impl Struct { pub fn size(&self) -> AbstractMemorySize<GasCarrier> { self.0.size() } } impl Value { pub fn size(&self) -> AbstractMemorySize<GasCarrier> { self.0.size() } } impl ReferenceImpl { fn size(&self) -> AbstractMemorySize<GasCarrier> { match self { Self::ContainerRef(r) => r.size(), Self::IndexedRef(r) => r.size(), } } } impl Reference { pub fn size(&self) -> AbstractMemorySize<GasCarrier> { self.0.size() } } impl GlobalValue { pub fn size(&self) -> AbstractMemorySize<GasCarrier> { // TODO: should it be self.container.borrow().size() words_in(REFERENCE_SIZE) } } /*************************************************************************************** * * Struct Operations * * Public APIs for Struct. * **************************************************************************************/ impl Struct { pub fn pack<I: IntoIterator<Item = Value>>(vals: I) -> Self { Self(Container::General(vals.into_iter().map(|v| v.0).collect())) } pub fn unpack(self) -> VMResult<impl Iterator<Item = Value>> { match self.0 { Container::General(v) => Ok(v.into_iter().map(Value)), Container::U8(_) | Container::U64(_) | Container::U128(_) | Container::Bool(_) => { Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message("not a struct".to_string())) } } } } /*************************************************************************************** * * Global Value Operations * * Public APIs for GlobalValue. They allow global values to be created from external * source (a.k.a. storage), and references to be taken from them. At the end of the * transaction execution the dirty ones can be identified and wrote back to storage. * **************************************************************************************/ impl GlobalValue { pub fn new(v: Value) -> VMResult<Self> { match v.0 { ValueImpl::Container(container) => { // TODO: check strong count? Ok(Self { status: Rc::new(RefCell::new(GlobalDataStatus::Clean)), container, }) } v => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message(format!("cannot create global ref from {:?}", v))), } } pub fn borrow_global(&self) -> VMResult<Value> { Ok(Value(ValueImpl::ContainerRef(ContainerRef::Global { status: Rc::clone(&self.status), container: Rc::clone(&self.container), }))) } pub fn mark_dirty(&self) -> VMResult<()> { *self.status.borrow_mut() = GlobalDataStatus::Dirty; Ok(()) } pub fn is_clean(&self) -> VMResult<bool> { match &*self.status.borrow() { GlobalDataStatus::Clean => Ok(true), _ => Ok(false), } } pub fn is_dirty(&self) -> VMResult<bool> { match &*self.status.borrow() { GlobalDataStatus::Dirty => Ok(true), _ => Ok(false), } } pub fn into_owned_struct(self) -> VMResult<Struct> { Ok(Struct(take_unique_ownership(self.container)?)) } } /*************************************************************************************** * * Display * * Implementation of the Display trait for VM Values. These are supposed to be more * friendly & readable than the generated Debug dump. * **************************************************************************************/ impl Display for ValueImpl { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Self::Invalid => write!(f, "Invalid"), Self::U8(x) => write!(f, "U8({})", x), Self::U64(x) => write!(f, "U64({})", x), Self::U128(x) => write!(f, "U128({})", x), Self::Bool(x) => write!(f, "{}", x), Self::Address(addr) => write!(f, "Address({})", addr.short_str()), Self::ByteArray(x) => write!(f, "ByteArray({})", x), Self::Container(r) => write!(f, "Container({})", &*r.borrow()), Self::ContainerRef(r) => write!(f, "{}", r), Self::IndexedRef(r) => write!(f, "{}", r), } } } fn display_list_of_items<T, I>(items: I, f: &mut fmt::Formatter) -> fmt::Result where T: Display, I: IntoIterator<Item = T>, { write!(f, "[")?; let mut items = items.into_iter(); if let Some(x) = items.next() { write!(f, "{}", x)?; for x in items { write!(f, ", {}", x)?; } } write!(f, "]") } impl Display for ContainerRef { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // TODO: this could panic. match self { Self::Local(r) => write!(f, "({}, {})", Rc::strong_count(r), &*r.borrow()), Self::Global { status, container } => write!( f, "({:?}, {}, {})", &*status.borrow(), Rc::strong_count(container), &*container.borrow() ), } } } impl Display for IndexedRef { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}<{}>", self.container_ref, self.idx) } } impl Display for Container { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Self::General(v) => display_list_of_items(v, f), Self::U8(v) => display_list_of_items(v, f), Self::U64(v) => display_list_of_items(v, f), Self::U128(v) => display_list_of_items(v, f), Self::Bool(v) => display_list_of_items(v, f), } } } impl Display for Value { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Display::fmt(&self.0, f) } } impl Display for Locals { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // TODO: this could panic. match &*self.0.borrow() { Container::General(v) => write!( f, "{}", v.iter() .enumerate() .map(|(idx, val)| format!("[{}] {}", idx, val)) .collect::<Vec<_>>() .join("\n") ), _ => unreachable!(), } } } /*************************************************************************************** * * Serialization & Deserialization * * LCS implementation for VM values. Note although values are represented as Rust * enums that carry type info in the tags, we should NOT rely on them for * serialization: * 1) Depending on the specific internal representation, it may be impossible to * reconstruct the layout from a value. For example, one cannot tell if a general * container is a struct or a value. * 2) Even if 1) is not a problem at a certain time, we may change to a different * internal representation that breaks the 1-1 mapping. Extremely speaking, if * we switch to untagged unions one day, none of the type info will be carried * by the value. * * Therefore the appropriate & robust way to implement serialization & deserialization * is to involve an explicit representation of the type layout. * **************************************************************************************/ use serde::{ de::Error as DeError, ser::{Error as SerError, SerializeSeq, SerializeTuple}, Deserialize, }; impl Value { pub fn simple_deserialize(blob: &[u8], layout: Type) -> VMResult<Value> { lcs::from_bytes_seed(&layout, blob) .map_err(|e| VMStatus::new(StatusCode::INVALID_DATA).with_message(e.to_string())) } pub fn simple_serialize(&self, layout: &Type) -> Option<Vec<u8>> { lcs::to_bytes(&AnnotatedValue { layout, val: &self.0, }) .ok() } } impl Struct { pub fn simple_serialize(&self, layout: &StructDef) -> Option<Vec<u8>> { lcs::to_bytes(&AnnotatedValue { layout, val: &self.0, }) .ok() } } struct AnnotatedValue<'a, 'b, T1, T2> { layout: &'a T1, val: &'b T2, } impl<'a, 'b> serde::Serialize for AnnotatedValue<'a, 'b, Type, ValueImpl> { fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { macro_rules! serialize_vec { ($tc: ident, $layout: expr, $v: expr) => {{ match $layout { Type::$tc => (), _ => { return Err(S::Error::custom( VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message( "cannot serialize vector -- element type mismatch".to_string(), ), )) } } let mut t = serializer.serialize_seq(Some($v.len()))?; for val in $v { t.serialize_element(val)?; } t.end() }}; } match (self.layout, self.val) { (Type::U8, ValueImpl::U8(x)) => serializer.serialize_u8(*x), (Type::U64, ValueImpl::U64(x)) => serializer.serialize_u64(*x), (Type::U128, ValueImpl::U128(x)) => serializer.serialize_u128(*x), (Type::Bool, ValueImpl::Bool(x)) => serializer.serialize_bool(*x), (Type::Address, ValueImpl::Address(x)) => x.serialize(serializer), (Type::ByteArray, ValueImpl::ByteArray(x)) => x.serialize(serializer), (Type::Struct(layout), ValueImpl::Container(r)) => { let r = r.borrow(); (AnnotatedValue { layout, val: &*r }).serialize(serializer) } (Type::Vector(layout), ValueImpl::Container(r)) => { let layout = &**layout; match (layout, &*r.borrow()) { (Type::Vector(_), Container::General(v)) | (Type::Struct(_), Container::General(v)) | (Type::Address, Container::General(v)) | (Type::ByteArray, Container::General(v)) => { let mut t = serializer.serialize_seq(Some(v.len()))?; for val in v { t.serialize_element(&AnnotatedValue { layout, val })?; } t.end() } (Type::U8, Container::U8(v)) => serialize_vec!(U8, layout, v), (Type::U64, Container::U64(v)) => serialize_vec!(U64, layout, v), (Type::U128, Container::U128(v)) => serialize_vec!(U128, layout, v), (Type::Bool, Container::Bool(v)) => serialize_vec!(Bool, layout, v), (layout, container) => Err(S::Error::custom( VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR).with_message( format!("cannot serialize container {:?} as {:?}", container, layout), ), )), } } (layout, val) => Err(S::Error::custom( VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message(format!("cannot serialize value {:?} as {:?}", val, layout)), )), } } } impl<'a, 'b> serde::Serialize for AnnotatedValue<'a, 'b, StructDef, Container> { fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { match (self.layout, self.val) { (StructDef::Struct(inner), Container::General(v)) => { if inner.field_definitions().len() != v.len() { return Err(S::Error::custom( VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR).with_message( format!( "cannot serialize struct value {:?} as {:?} -- number of fields mismatch", self.val, self.layout ), ), )); } let mut t = serializer.serialize_tuple(v.len())?; for (layout, val) in inner.field_definitions().iter().zip(v.iter()) { t.serialize_element(&AnnotatedValue { layout, val })?; } t.end() } (val, layout) => Err(S::Error::custom( VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR).with_message(format!( "cannot serialize container value {:?} as {:?}", val, layout )), )), } } } impl<'d> serde::de::DeserializeSeed<'d> for &Type { type Value = Value; fn deserialize<D: serde::de::Deserializer<'d>>( self, deserializer: D, ) -> Result<Self::Value, D::Error> { match self { Type::Bool => bool::deserialize(deserializer).map(Value::bool), Type::U8 => u8::deserialize(deserializer).map(Value::u8), Type::U64 => u64::deserialize(deserializer).map(Value::u64), Type::U128 => u128::deserialize(deserializer).map(Value::u128), Type::ByteArray => ByteArray::deserialize(deserializer).map(Value::byte_array), Type::Address => AccountAddress::deserialize(deserializer).map(Value::address), Type::Vector(layout) => { struct GeneralVectorVisitor<'a>(&'a Type); impl<'d, 'a> serde::de::Visitor<'d> for GeneralVectorVisitor<'a> { type Value = Container; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("Vector") } fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error> where A: serde::de::SeqAccess<'d>, { let mut vals = Vec::new(); while let Some(elem) = seq.next_element_seed(self.0)? { vals.push(elem.0) } Ok(Container::General(vals)) } } macro_rules! deserialize_specialized_vec { ($tc: ident, $tc2: ident, $ty: ident) => {{ struct $tc; impl<'d> serde::de::Visitor<'d> for $tc { type Value = Vec<$ty>; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str(stringify!($ty)) } fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error> where A: serde::de::SeqAccess<'d>, { let mut vals = Vec::new(); while let Some(elem) = seq.next_element::<$ty>()? { vals.push(elem) } Ok(vals) } } Ok(Value(ValueImpl::Container(Rc::new(RefCell::new( Container::$tc2(deserializer.deserialize_seq($tc)?), ))))) }}; } match &**layout { Type::U8 => deserialize_specialized_vec!(U8VectorVisitor, U8, u8), Type::U64 => deserialize_specialized_vec!(U64VectorVisitor, U64, u64), Type::U128 => deserialize_specialized_vec!(U128VectorVisitor, U128, u128), Type::Bool => deserialize_specialized_vec!(BoolVectorVisitor, Bool, bool), layout => Ok(Value(ValueImpl::Container(Rc::new(RefCell::new( deserializer.deserialize_seq(GeneralVectorVisitor(layout))?, ))))), } } Type::Struct(layout) => layout.deserialize(deserializer), Type::Reference(_) | Type::MutableReference(_) | Type::TypeVariable(_) => { Err(D::Error::custom( VMStatus::new(StatusCode::INVALID_DATA) .with_message(format!("Value type {:?} not possible", self)), )) } } } } impl<'d> serde::de::DeserializeSeed<'d> for &StructDef { type Value = Value; fn deserialize<D: serde::de::Deserializer<'d>>( self, deserializer: D, ) -> Result<Self::Value, D::Error> { match self { StructDef::Struct(inner) => { struct StructVisitor<'a>(&'a [Type]); impl<'d, 'a> serde::de::Visitor<'d> for StructVisitor<'a> { type Value = Struct; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("Struct") } fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error> where A: serde::de::SeqAccess<'d>, { let mut val = Vec::new(); for (i, field_type) in self.0.iter().enumerate() { if let Some(elem) = seq.next_element_seed(field_type)? { val.push(elem) } else { return Err(A::Error::invalid_length(i, &self)); } } Ok(Struct::pack(val)) } } let field_layouts = inner.field_definitions(); Ok(Value::struct_(deserializer.deserialize_tuple( field_layouts.len(), StructVisitor(field_layouts), )?)) } StructDef::Native(_) => unreachable!("we do not have any native structs right now"), } } } /*************************************************************************************** * * Prop Testing * * Random generation of values that fit into a given layout. * **************************************************************************************/ #[cfg(feature = "fuzzing")] pub mod prop { use super::*; use proptest::{collection::vec, prelude::*}; pub fn value_strategy_with_layout(layout: &Type) -> impl Strategy<Value = Value> { match layout { Type::U8 => any::<u8>().prop_map(Value::u8).boxed(), Type::U64 => any::<u64>().prop_map(Value::u64).boxed(), Type::U128 => any::<u128>().prop_map(Value::u128).boxed(), Type::Bool => any::<bool>().prop_map(Value::bool).boxed(), Type::Address => any::<AccountAddress>().prop_map(Value::address).boxed(), Type::ByteArray => any::<ByteArray>().prop_map(Value::byte_array).boxed(), Type::Vector(layout) => match &**layout { Type::U8 => vec(any::<u8>(), 0..10) .prop_map(|vals| Value(ValueImpl::new_container(Container::U8(vals)))) .boxed(), Type::U64 => vec(any::<u64>(), 0..10) .prop_map(|vals| Value(ValueImpl::new_container(Container::U64(vals)))) .boxed(), Type::U128 => vec(any::<u128>(), 0..10) .prop_map(|vals| Value(ValueImpl::new_container(Container::U128(vals)))) .boxed(), Type::Bool => vec(any::<bool>(), 0..10) .prop_map(|vals| Value(ValueImpl::new_container(Container::Bool(vals)))) .boxed(), layout => vec(value_strategy_with_layout(layout), 0..10) .prop_map(|vals| { Value(ValueImpl::new_container(Container::General( vals.into_iter().map(|val| val.0).collect(), ))) }) .boxed(), }, Type::Struct(StructDef::Struct(inner)) => inner .field_definitions() .iter() .map(|layout| value_strategy_with_layout(layout)) .collect::<Vec<_>>() .prop_map(|vals| { Value(ValueImpl::new_container(Container::General( vals.into_iter().map(|val| val.0).collect(), ))) }) .boxed(), Type::Struct(StructDef::Native(_)) => { unreachable!("we do not have any native structs now") } Type::Reference(..) | Type::MutableReference(..) => { panic!("cannot generate references for prop tests") } Type::TypeVariable(..) => panic!("cannot generate type variables for prop tests"), } } pub fn layout_and_value_strategy() -> impl Strategy<Value = (Type, Value)> { any::<Type>().no_shrink().prop_flat_map(|layout| { let value_strategy = value_strategy_with_layout(&layout); (Just(layout), value_strategy) }) } }
34.338097
106
0.482057
64ad50c78433944ac4eb8c683f2be4ddda93df3e
4,391
use std::{fmt::Debug, io::Read}; #[derive(Clone, Copy, PartialEq, Eq)] enum Item { Open, Close, Value(u8), } impl Debug for Item { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Open => write!(f, "["), Self::Close => write!(f, "]"), Self::Value(v) => write!(f, "{}", v), } } } fn parse_input() -> Vec<Vec<Item>> { let mut input = String::new(); std::io::stdin().read_to_string(&mut input).unwrap(); input .trim() .split('\n') .map(|line| { line.chars() .filter_map(|c| match c { '0'..='9' => Some(Item::Value(c.to_digit(10).unwrap() as u8)), '[' => Some(Item::Open), ']' => Some(Item::Close), _ => None, }) .collect() }) .collect() } fn split(seq: &[Item]) -> Vec<Item> { let mut new_seq = vec![]; let mut changed = false; for &item in seq { if let Item::Value(v) = item { if !changed && v >= 10 { new_seq.push(Item::Open); new_seq.push(Item::Value(v / 2)); new_seq.push(Item::Value(v - v / 2)); new_seq.push(Item::Close); changed = true; continue; } } new_seq.push(item); } new_seq } fn explode(seq: &[Item]) -> Vec<Item> { let mut new_seq = vec![]; let mut depth = 0; let mut to_add = None; let mut seq_iter = seq.iter(); while let Some(&item) = seq_iter.next() { match item { Item::Open => { depth += 1; new_seq.push(item); } Item::Close => { depth -= 1; new_seq.push(item); } Item::Value(v) => { if let Some(a) = to_add { new_seq.push(Item::Value(v + a)); break; } else if depth == 5 { new_seq.pop(); // remove opening bracket depth -= 1; let mut stack = vec![]; while let Some(p) = new_seq.pop() { if let Item::Value(n) = p { new_seq.push(Item::Value(n + v)); break; } else { stack.push(p); } } while let Some(p) = stack.pop() { new_seq.push(p); } new_seq.push(Item::Value(0)); let next = seq_iter.next().unwrap(); if let Item::Value(n) = next { to_add = Some(n); } else { unreachable!() } seq_iter.next(); // skip over closing bracket. } else { new_seq.push(item); } } } } new_seq.extend(seq_iter); new_seq } fn reduce(seq: &[Item]) -> Vec<Item> { let r1 = explode(seq); if r1 != seq { reduce(&r1) } else { let r2 = split(seq); if r2 != seq { reduce(&r2) } else { seq.to_vec() } } } fn magnitude(seq: &[Item]) -> i64 { if seq.len() == 1 { if let Item::Value(n) = seq[0] { return n as i64; } unreachable!() } let mut count = 0; let mut idx = None; for (i, &item) in seq.iter().enumerate().skip(1) { match item { Item::Open => count += 1, Item::Close => count -= 1, Item::Value(_) => (), } if count == 0 { idx = Some(i + 1); break; } } let idx = idx.unwrap(); 3 * magnitude(&seq[1..idx]) + 2 * magnitude(&seq[idx..seq.len() - 1]) } fn main() { let mut input = parse_input(); let mut seq = input.remove(0); for line in input { let mut new_seq = vec![Item::Open]; new_seq.extend(seq); new_seq.extend(line); new_seq.push(Item::Close); seq = reduce(&new_seq); } let m = magnitude(&seq); println!("{}", m); }
26.293413
82
0.393532
d9e07d0b887c813284598bae382a509a14ce531e
5,939
use crate::{LogicalAddr, Profile, Receiver, ReceiverStream, Target}; use futures::{prelude::*, ready}; use indexmap::IndexSet; use linkerd_addr::NameAddr; use linkerd_error::Error; use linkerd_proxy_api_resolve::ConcreteAddr; use linkerd_stack::{layer, NewService, Param}; use rand::distributions::{Distribution, WeightedIndex}; use rand::{rngs::SmallRng, thread_rng, SeedableRng}; use std::{ marker::PhantomData, pin::Pin, task::{Context, Poll}, }; use tower::ready_cache::ReadyCache; use tracing::{debug, trace}; pub fn layer<N, S, Req>() -> impl layer::Layer<N, Service = NewSplit<N, S, Req>> + Clone { // This RNG doesn't need to be cryptographically secure. Small and fast is // preferable. layer::mk(move |inner| NewSplit { inner, _service: PhantomData, }) } #[derive(Debug)] pub struct NewSplit<N, S, Req> { inner: N, _service: PhantomData<fn(Req) -> S>, } pub struct Split<T, N, S, Req> { rng: SmallRng, rx: ReceiverStream, target: T, new_service: N, distribution: WeightedIndex<u32>, addrs: IndexSet<NameAddr>, services: ReadyCache<NameAddr, S, Req>, } // === impl NewSplit === impl<N: Clone, S, Req> Clone for NewSplit<N, S, Req> { fn clone(&self) -> Self { Self { inner: self.inner.clone(), _service: self._service, } } } impl<T, N, S, Req> NewService<T> for NewSplit<N, S, Req> where T: Clone + Param<LogicalAddr> + Param<Receiver>, N: NewService<(ConcreteAddr, T), Service = S> + Clone, S: tower::Service<Req>, S::Error: Into<Error>, { type Service = Split<T, N, S, Req>; fn new_service(&mut self, target: T) -> Self::Service { let rx: Receiver = target.param(); let mut targets = rx.targets(); if targets.is_empty() { let LogicalAddr(addr) = target.param(); targets.push(Target { addr, weight: 1 }) } trace!(?targets, "Building split service"); let mut addrs = IndexSet::with_capacity(targets.len()); let mut weights = Vec::with_capacity(targets.len()); let mut services = ReadyCache::default(); let mut new_service = self.inner.clone(); for Target { weight, addr } in targets.into_iter() { services.push( addr.clone(), new_service.new_service((ConcreteAddr(addr.clone()), target.clone())), ); addrs.insert(addr); weights.push(weight); } Split { rx: rx.into(), target, new_service, services, addrs, distribution: WeightedIndex::new(weights).unwrap(), rng: SmallRng::from_rng(&mut thread_rng()).expect("RNG must initialize"), } } } // === impl Split === impl<T, N, S, Req> tower::Service<Req> for Split<T, N, S, Req> where Req: Send + 'static, T: Clone + Param<LogicalAddr>, N: NewService<(ConcreteAddr, T), Service = S> + Clone, S: tower::Service<Req> + Send + 'static, S::Response: Send + 'static, S::Error: Into<Error>, S::Future: Send, { type Response = S::Response; type Error = Error; type Future = Pin<Box<dyn Future<Output = Result<S::Response, Error>> + Send + 'static>>; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { let mut update = None; while let Poll::Ready(Some(up)) = self.rx.poll_next_unpin(cx) { update = Some(up); } // Every time the profile updates, rebuild the distribution, reusing // services that existed in the prior state. if let Some(Profile { mut targets, .. }) = update { if targets.is_empty() { let LogicalAddr(addr) = self.target.param(); targets.push(Target { addr, weight: 1 }) } debug!(?targets, "Updating"); // Replace the old set of addresses with an empty set. The // prior set is used to determine whether a new service // needs to be created and what stale services should be // removed. let mut prior_addrs = std::mem::replace(&mut self.addrs, IndexSet::with_capacity(targets.len())); let mut weights = Vec::with_capacity(targets.len()); // Create an updated distribution and set of services. for Target { weight, addr } in targets.into_iter() { // Reuse the prior services whenever possible. if !prior_addrs.remove(&addr) { debug!(%addr, "Creating target"); let svc = self .new_service .new_service((ConcreteAddr(addr.clone()), self.target.clone())); self.services.push(addr.clone(), svc); } else { trace!(%addr, "Target already exists"); } self.addrs.insert(addr); weights.push(weight); } self.distribution = WeightedIndex::new(weights).unwrap(); // Remove all prior services that did not exist in the new // set of targets. for addr in prior_addrs.into_iter() { self.services.evict(&addr); } } // Wait for all target services to be ready. If any services fail, then // the whole service fails. Poll::Ready(ready!(self.services.poll_pending(cx)).map_err(Into::into)) } fn call(&mut self, req: Req) -> Self::Future { let idx = if self.addrs.len() == 1 { 0 } else { self.distribution.sample(&mut self.rng) }; let addr = self.addrs.get_index(idx).expect("invalid index"); trace!(?addr, "Dispatching"); Box::pin(self.services.call_ready(addr, req).err_into::<Error>()) } }
33.553672
93
0.565078
3340c9540290600b2da05f8bc8cd366ca850a2c0
517
use crate::error::*; use hyena_common::libc::cvt_r; use libc::{fallocate, FALLOC_FL_KEEP_SIZE, FALLOC_FL_PUNCH_HOLE}; use std::os::unix::io::AsRawFd; pub fn punch_hole<F: AsRawFd>(file: &F, size: usize) -> Result<()> { let fd = file.as_raw_fd(); // punch one enormous hole :) unsafe { cvt_r(|| { fallocate( fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, size as i64, ) })?; } Ok(()) }
21.541667
68
0.516441
56eeeb99856c5c28d05855dfcfa98a48ed773d89
823
// This file is part of security-keys-rust. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/raphaelcohn/security-keys-rust/master/COPYRIGHT. No part of security-keys-rust, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file. // Copyright © 2021 The developers of security-keys-rust. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/raphaelcohn/security-keys-rust/master/COPYRIGHT. #[cfg(not(any(target_os = "ios", target_os = "macos")))] pub(in crate::pcsc) type DWORD = c_ulong; #[cfg(any(target_os = "ios", target_os = "macos"))] pub(in crate::pcsc) type DWORD = u32;
82.3
411
0.770352
f8f3ac66399b03fdfcf36d8adfecacba7cd6316d
2,346
//! ```elixir //! {:ok, document} = Lumen.Web.Document.new() //! {:ok, reference_child} = Lumen.Web.Document.create_element(document, "table") //! {:ok, parent} = Lumen.Web.Document.create_element(parent_document, "div") //! :ok = Lumen.Web.Node.append_child(document, parent) //! :ok = Lumen.Web.Node.append_child(parent, reference_child) //! {:ok, new_child} = Lumen.Web.Document.create_element(document, "ul"); //! {:ok, inserted_child} = Lumen.Web.insert_before(parent, new_child, reference_child) //! ``` #[path = "with_reference_child_inserts_before_reference_child/label_1.rs"] pub mod label_1; #[path = "with_reference_child_inserts_before_reference_child/label_2.rs"] pub mod label_2; #[path = "with_reference_child_inserts_before_reference_child/label_3.rs"] pub mod label_3; #[path = "with_reference_child_inserts_before_reference_child/label_4.rs"] pub mod label_4; #[path = "with_reference_child_inserts_before_reference_child/label_5.rs"] pub mod label_5; #[path = "with_reference_child_inserts_before_reference_child/label_6.rs"] pub mod label_6; use liblumen_alloc::erts::process::Process; use liblumen_alloc::erts::term::prelude::*; use liblumen_web::document; #[native_implemented::function(Elixir.Lumen.Web.Node.InsertBefore3:with_reference_child_inserts_before_reference_child/0)] fn result(process: &Process) -> Term { // ```elixir // # pushed to stack: () // # returned from call: N/A // # full stack: () // # returns: {:ok, parent_document} // ``` process.queue_frame_with_arguments(document::new_0::frame().with_arguments(false, &[])); // ```elixir // # label 1 // # pushed to stack: () // # returned form call: {:ok, document} // # full stack: ({:ok, document}) // # returns: {:ok, old_child} // {:ok, reference_child} = Lumen.Web.Document.create_element(document, "table") // {:ok, parent} = Lumen.Web.Document.create_element(parent_document, "div") // :ok = Lumen.Web.Node.append_child(document, parent) // :ok = Lumen.Web.Node.append_child(parent, reference_child) // {:ok, new_child} = Lumen.Web.Document.create_element(document, "ul"); // {:ok, inserted_child} = Lumen.Web.insert_before(parent, new_child, reference_child) // ``` process.queue_frame_with_arguments(label_1::frame().with_arguments(true, &[])); Term::NONE }
42.654545
122
0.708866
0318c483959f2182a47f996dba7f88f6d584fc04
5,697
use rustc_ast::{ast, attr}; use rustc_errors::Applicability; use rustc_session::Session; use rustc_span::sym; use std::str::FromStr; /// Deprecation status of attributes known by Clippy. #[allow(dead_code)] pub enum DeprecationStatus { /// Attribute is deprecated Deprecated, /// Attribute is deprecated and was replaced by the named attribute Replaced(&'static str), None, } pub const BUILTIN_ATTRIBUTES: &[(&str, DeprecationStatus)] = &[ ("author", DeprecationStatus::None), ("cognitive_complexity", DeprecationStatus::None), ( "cyclomatic_complexity", DeprecationStatus::Replaced("cognitive_complexity"), ), ("dump", DeprecationStatus::None), ("msrv", DeprecationStatus::None), ]; pub struct LimitStack { stack: Vec<u64>, } impl Drop for LimitStack { fn drop(&mut self) { assert_eq!(self.stack.len(), 1); } } impl LimitStack { #[must_use] pub fn new(limit: u64) -> Self { Self { stack: vec![limit] } } pub fn limit(&self) -> u64 { *self.stack.last().expect("there should always be a value in the stack") } pub fn push_attrs(&mut self, sess: &Session, attrs: &[ast::Attribute], name: &'static str) { let stack = &mut self.stack; parse_attrs(sess, attrs, name, |val| stack.push(val)); } pub fn pop_attrs(&mut self, sess: &Session, attrs: &[ast::Attribute], name: &'static str) { let stack = &mut self.stack; parse_attrs(sess, attrs, name, |val| assert_eq!(stack.pop(), Some(val))); } } pub fn get_attr<'a>( sess: &'a Session, attrs: &'a [ast::Attribute], name: &'static str, ) -> impl Iterator<Item = &'a ast::Attribute> { attrs.iter().filter(move |attr| { let attr = if let ast::AttrKind::Normal(ref attr, _) = attr.kind { attr } else { return false; }; let attr_segments = &attr.path.segments; if attr_segments.len() == 2 && attr_segments[0].ident.name == sym::clippy { BUILTIN_ATTRIBUTES .iter() .find_map(|&(builtin_name, ref deprecation_status)| { if attr_segments[1].ident.name.as_str() == builtin_name { Some(deprecation_status) } else { None } }) .map_or_else( || { sess.span_err(attr_segments[1].ident.span, "usage of unknown attribute"); false }, |deprecation_status| { let mut diag = sess.struct_span_err(attr_segments[1].ident.span, "usage of deprecated attribute"); match *deprecation_status { DeprecationStatus::Deprecated => { diag.emit(); false }, DeprecationStatus::Replaced(new_name) => { diag.span_suggestion( attr_segments[1].ident.span, "consider using", new_name.to_string(), Applicability::MachineApplicable, ); diag.emit(); false }, DeprecationStatus::None => { diag.cancel(); attr_segments[1].ident.name.as_str() == name }, } }, ) } else { false } }) } fn parse_attrs<F: FnMut(u64)>(sess: &Session, attrs: &[ast::Attribute], name: &'static str, mut f: F) { for attr in get_attr(sess, attrs, name) { if let Some(ref value) = attr.value_str() { if let Ok(value) = FromStr::from_str(&value.as_str()) { f(value); } else { sess.span_err(attr.span, "not a number"); } } else { sess.span_err(attr.span, "bad clippy attribute"); } } } pub fn get_unique_inner_attr(sess: &Session, attrs: &[ast::Attribute], name: &'static str) -> Option<ast::Attribute> { let mut unique_attr = None; for attr in get_attr(sess, attrs, name) { match attr.style { ast::AttrStyle::Inner if unique_attr.is_none() => unique_attr = Some(attr.clone()), ast::AttrStyle::Inner => { sess.struct_span_err(attr.span, &format!("`{}` is defined multiple times", name)) .span_note(unique_attr.as_ref().unwrap().span, "first definition found here") .emit(); }, ast::AttrStyle::Outer => { sess.span_err(attr.span, &format!("`{}` cannot be an outer attribute", name)); }, } } unique_attr } /// Return true if the attributes contain any of `proc_macro`, /// `proc_macro_derive` or `proc_macro_attribute`, false otherwise pub fn is_proc_macro(sess: &Session, attrs: &[ast::Attribute]) -> bool { attrs.iter().any(|attr| sess.is_proc_macro_attr(attr)) } /// Return true if the attributes contain `#[doc(hidden)]` pub fn is_doc_hidden(attrs: &[ast::Attribute]) -> bool { attrs .iter() .filter(|attr| attr.has_name(sym::doc)) .filter_map(ast::Attribute::meta_item_list) .any(|l| attr::list_contains_name(&l, sym::hidden)) }
35.60625
118
0.508513
5645e63daa21359172f4393cc37c4a4aee5f2afe
7,922
use cosmwasm_std::testing::{MockApi, MockQuerier, MockStorage, MOCK_CONTRACT_ADDR}; use cosmwasm_std::{ from_binary, from_slice, to_binary, Coin, ContractResult, Decimal, OwnedDeps, Querier, QuerierResult, QueryRequest, SystemError, SystemResult, Uint128, WasmQuery, }; use std::collections::HashMap; use cw20::{BalanceResponse as Cw20BalanceResponse, Cw20QueryMsg, TokenInfoResponse}; use terra_cosmwasm::{TaxCapResponse, TaxRateResponse, TerraQuery, TerraQueryWrapper, TerraRoute}; /// mock_dependencies is a drop-in replacement for cosmwasm_std::testing::mock_dependencies /// this uses our CustomQuerier. pub fn mock_dependencies( contract_balance: &[Coin], ) -> OwnedDeps<MockStorage, MockApi, WasmMockQuerier> { let custom_querier: WasmMockQuerier = WasmMockQuerier::new(MockQuerier::new(&[(MOCK_CONTRACT_ADDR, contract_balance)])); OwnedDeps { storage: MockStorage::default(), api: MockApi::default(), querier: custom_querier, } } pub struct WasmMockQuerier { base: MockQuerier<TerraQueryWrapper>, token_querier: TokenQuerier, tax_querier: TaxQuerier, } #[derive(Clone, Default)] pub struct TokenQuerier { // this lets us iterate over all pairs that match the first string balances: HashMap<String, HashMap<String, Uint128>>, } impl TokenQuerier { pub fn new(balances: &[(&String, &[(&String, &Uint128)])]) -> Self { TokenQuerier { balances: balances_to_map(balances), } } } pub(crate) fn balances_to_map( balances: &[(&String, &[(&String, &Uint128)])], ) -> HashMap<String, HashMap<String, Uint128>> { let mut balances_map: HashMap<String, HashMap<String, Uint128>> = HashMap::new(); for (contract_addr, balances) in balances.iter() { let mut contract_balances_map: HashMap<String, Uint128> = HashMap::new(); for (addr, balance) in balances.iter() { contract_balances_map.insert(addr.to_string(), **balance); } balances_map.insert(contract_addr.to_string(), contract_balances_map); } balances_map } #[derive(Clone, Default)] pub struct TaxQuerier { rate: Decimal, // this lets us iterate over all pairs that match the first string caps: HashMap<String, Uint128>, } impl Querier for WasmMockQuerier { fn raw_query(&self, bin_request: &[u8]) -> QuerierResult { // MockQuerier doesn't support Custom, so we ignore it completely here let request: QueryRequest<TerraQueryWrapper> = match from_slice(bin_request) { Ok(v) => v, Err(e) => { return SystemResult::Err(SystemError::InvalidRequest { error: format!("Parsing query request: {}", e), request: bin_request.into(), }) } }; self.handle_query(&request) } } impl WasmMockQuerier { pub fn handle_query(&self, request: &QueryRequest<TerraQueryWrapper>) -> QuerierResult { match &request { QueryRequest::Custom(TerraQueryWrapper { route, query_data }) => { if route == &TerraRoute::Treasury { match query_data { TerraQuery::TaxRate {} => { let res = TaxRateResponse { rate: self.tax_querier.rate, }; SystemResult::Ok(ContractResult::from(to_binary(&res))) } TerraQuery::TaxCap { denom } => { let cap = self .tax_querier .caps .get(denom) .copied() .unwrap_or_default(); let res = TaxCapResponse { cap }; SystemResult::Ok(ContractResult::from(to_binary(&res))) } _ => panic!("DO NOT ENTER HERE"), } } else { panic!("DO NOT ENTER HERE") } } QueryRequest::Wasm(WasmQuery::Smart { contract_addr, msg }) => { match from_binary(msg).unwrap() { Cw20QueryMsg::TokenInfo {} => { let balances: &HashMap<String, Uint128> = match self.token_querier.balances.get(contract_addr) { Some(balances) => balances, None => { return SystemResult::Err(SystemError::InvalidRequest { error: format!( "No balance info exists for the contract {}", contract_addr ), request: msg.as_slice().into(), }) } }; let mut total_supply = Uint128::zero(); for balance in balances { total_supply += *balance.1; } SystemResult::Ok(ContractResult::Ok( to_binary(&TokenInfoResponse { name: "mAAPL".to_string(), symbol: "mAAPL".to_string(), decimals: 6, total_supply, }) .unwrap(), )) } Cw20QueryMsg::Balance { address } => { let balances: &HashMap<String, Uint128> = match self.token_querier.balances.get(contract_addr) { Some(balances) => balances, None => { return SystemResult::Err(SystemError::InvalidRequest { error: format!( "No balance info exists for the contract {}", contract_addr ), request: msg.as_slice().into(), }) } }; let balance = match balances.get(&address) { Some(v) => *v, None => { return SystemResult::Ok(ContractResult::Ok( to_binary(&Cw20BalanceResponse { balance: Uint128::zero(), }) .unwrap(), )); } }; SystemResult::Ok(ContractResult::Ok( to_binary(&Cw20BalanceResponse { balance }).unwrap(), )) } _ => panic!("DO NOT ENTER HERE"), } } _ => self.base.handle_query(request), } } } impl WasmMockQuerier { pub fn new(base: MockQuerier<TerraQueryWrapper>) -> Self { WasmMockQuerier { base, token_querier: TokenQuerier::default(), tax_querier: TaxQuerier::default(), } } // configure the mint whitelist mock querier pub fn with_token_balances(&mut self, balances: &[(&String, &[(&String, &Uint128)])]) { self.token_querier = TokenQuerier::new(balances); } }
40.010101
97
0.465413
e6afa2cf13632d447bdcae0a6ced3bbb83b78d03
5,951
//! An atom containg a series of other atoms. //! //! This atom is just like a [sequence](../sequence/index.html), only without time stamps: It contains multiple arbitrary atoms which you can either iterate through or write in sequence. //! //! # Example //! ``` //! use lv2_core::prelude::*; //! use lv2_urid::prelude::*; //! use lv2_atom::prelude::*; //! use lv2_atom::tuple::{TupleIterator, TupleWriter}; //! //! #[derive(PortContainer)] //! struct MyPorts { //! input: InputPort<AtomPort>, //! output: OutputPort<AtomPort>, //! } //! //! fn run(ports: &mut MyPorts, urids: &AtomURIDCache) { //! let input: TupleIterator = ports.input.read(urids.tuple, ()).unwrap(); //! let mut output: TupleWriter = ports.output.init(urids.tuple, ()).unwrap(); //! for atom in input { //! if let Some(integer) = atom.read(urids.int, ()) { //! output.init(urids.int, integer * 2).unwrap(); //! } else { //! output.init(urids.int, -1).unwrap(); //! } //! } //! } //! ``` //! //! # Specification //! //! [http://lv2plug.in/ns/ext/atom/atom.html#Tuple](http://lv2plug.in/ns/ext/atom/atom.html#Tuple) use crate::space::*; use crate::*; use core::prelude::*; use urid::prelude::*; /// An atom containing a series of other atoms. /// /// [See also the module documentation.](index.html) pub struct Tuple; unsafe impl UriBound for Tuple { const URI: &'static [u8] = sys::LV2_ATOM__Tuple; } impl<'a, 'b> Atom<'a, 'b> for Tuple where 'a: 'b, { type ReadParameter = (); type ReadHandle = TupleIterator<'a>; type WriteParameter = (); type WriteHandle = TupleWriter<'a, 'b>; fn read(body: Space<'a>, _: ()) -> Option<TupleIterator<'a>> { Some(TupleIterator { space: body }) } fn init(frame: FramedMutSpace<'a, 'b>, _: ()) -> Option<TupleWriter<'a, 'b>> { Some(TupleWriter { frame }) } } /// An iterator over all atoms in a tuple. /// /// The item of this iterator is simply the space a single atom occupies. pub struct TupleIterator<'a> { space: Space<'a>, } impl<'a> Iterator for TupleIterator<'a> { type Item = UnidentifiedAtom<'a>; fn next(&mut self) -> Option<UnidentifiedAtom<'a>> { let (atom, space) = self.space.split_atom()?; self.space = space; Some(UnidentifiedAtom::new(atom)) } } /// The writing handle to add atoms to a tuple. pub struct TupleWriter<'a, 'b> { frame: FramedMutSpace<'a, 'b>, } impl<'a, 'b> TupleWriter<'a, 'b> { /// Initialize a new tuple element. pub fn init<'c, A: Atom<'a, 'c>>( &'c mut self, child_urid: URID<A>, child_parameter: A::WriteParameter, ) -> Option<A::WriteHandle> { let child_frame = (&mut self.frame as &mut dyn MutSpace).create_atom_frame(child_urid)?; A::init(child_frame, child_parameter) } } #[cfg(test)] mod tests { use crate::prelude::*; use crate::space::*; use std::mem::size_of; use urid::mapper::*; use urid::prelude::*; #[test] fn test_tuple() { let mut mapper = Box::pin(HashURIDMapper::new()); let interface = mapper.as_mut().make_map_interface(); let map = Map::new(&interface); let urids = crate::AtomURIDCache::from_map(&map).unwrap(); let mut raw_space: Box<[u8]> = Box::new([0; 256]); // writing { let mut space = RootMutSpace::new(raw_space.as_mut()); let frame = (&mut space as &mut dyn MutSpace) .create_atom_frame(urids.tuple) .unwrap(); let mut writer = Tuple::init(frame, ()).unwrap(); { let mut vector_writer = writer.init::<Vector<Int>>(urids.vector, urids.int).unwrap(); vector_writer.append(&[17; 9]).unwrap(); } writer.init::<Int>(urids.int, 42).unwrap(); } // verifying { let (atom, space) = raw_space.split_at(size_of::<sys::LV2_Atom>()); let atom = unsafe { &*(atom.as_ptr() as *const sys::LV2_Atom) }; assert_eq!(atom.type_, urids.tuple); assert_eq!( atom.size as usize, size_of::<sys::LV2_Atom_Vector>() + size_of::<i32>() * 9 + 4 + size_of::<sys::LV2_Atom_Int>() ); let (vector, space) = space.split_at(size_of::<sys::LV2_Atom_Vector>()); let vector = unsafe { &*(vector.as_ptr() as *const sys::LV2_Atom_Vector) }; assert_eq!(vector.atom.type_, urids.vector); assert_eq!( vector.atom.size as usize, size_of::<sys::LV2_Atom_Vector_Body>() + size_of::<i32>() * 9 ); assert_eq!(vector.body.child_size as usize, size_of::<i32>()); assert_eq!(vector.body.child_type, urids.int); let (vector_items, space) = space.split_at(size_of::<i32>() * 9); let vector_items = unsafe { std::slice::from_raw_parts(vector_items.as_ptr() as *const i32, 9) }; assert_eq!(vector_items, &[17; 9]); let (_, space) = space.split_at(4); let (int, _) = space.split_at(size_of::<sys::LV2_Atom_Int>()); let int = unsafe { &*(int.as_ptr() as *const sys::LV2_Atom_Int) }; assert_eq!(int.atom.type_, urids.int); assert_eq!(int.atom.size as usize, size_of::<i32>()); assert_eq!(int.body, 42); } // reading { let space = Space::from_slice(raw_space.as_ref()); let (body, _) = space.split_atom_body(urids.tuple).unwrap(); let items: Vec<UnidentifiedAtom> = Tuple::read(body, ()).unwrap().collect(); assert_eq!(items[0].read(urids.vector, urids.int).unwrap(), [17; 9]); assert_eq!(items[1].read(urids.int, ()).unwrap(), 42); } } }
33.432584
186
0.558226
22f99678c7c5440493f9b4829f359e23dbc5ad20
47,819
#![feature( drain_filter, exact_size_is_empty, option_expect_none, duration_zero, clamp )] #![deny(broken_intra_doc_links)] use { anyhow::*, crossbeam_channel::{Receiver, Sender}, derivative::*, hashbrown::HashMap, nalgebra as na, rlua::prelude::*, serde::{Deserialize, Serialize}, smallvec::SmallVec, std::{ any::Any, cmp::Ordering, collections::BinaryHeap, error::Error as StdError, fmt, io::{Read, Write}, iter, }, string_cache::DefaultAtom, thunderdome::{Arena, Index}, }; pub type Atom = DefaultAtom; pub mod api; pub mod assets; pub mod chunked_grid; pub mod components; pub mod conf; pub mod dependency_graph; pub mod dispatcher; pub mod ecs; pub mod event; pub mod filesystem; pub mod graphics; pub mod hierarchy; pub mod input; pub mod math; pub mod path_clean; pub mod persist; pub mod resources; pub mod scene; pub mod sprite; pub mod systems; pub mod tiled; pub mod timer; pub mod transform; pub mod vfs; pub mod prelude { pub use anyhow::*; pub use inventory; pub use rlua::prelude::*; pub use crate::{ api::LuaEntity, ecs::*, math::*, resources::{BorrowExt, OwnedResources, Resources, SharedResources, UnifiedResources}, Scheduler, SludgeLuaContextExt, SludgeResultExt, Space, System, }; pub use sludge_macros::*; } #[doc(hidden)] pub use {anyhow, inventory, nalgebra, ncollide2d, rlua, rlua_serde, serde, sludge_macros::*}; #[doc(hidden)] pub mod sludge { #[doc(hidden)] pub use { crate::ecs::{Entity, FlaggedComponent, ScContext, SmartComponent}, inventory, std::any::TypeId, }; } #[doc(hidden)] pub use crate::sludge::*; use crate::{api::EntityUserDataRegistry, dispatcher::Dispatcher, ecs::World, resources::*}; pub trait SludgeResultExt: Sized { type Ok; type Err; fn log_err(self, target: &str, level: log::Level) -> Self where Self::Err: fmt::Display; fn log_warn_err(self, target: &str) -> Self where Self::Err: fmt::Display, { self.log_err(target, log::Level::Warn) } fn log_error_err(self, target: &str) -> Self where Self::Err: fmt::Display, { self.log_err(target, log::Level::Error) } } impl<T, E: fmt::Debug> SludgeResultExt for Result<T, E> { type Ok = T; type Err = E; #[track_caller] fn log_err(self, target: &str, level: log::Level) -> Self where E: fmt::Display, { if let Err(ref e) = &self { log::log!(target: target, level, "{:?}", e); } self } } const RESOURCES_REGISTRY_KEY: &'static str = "sludge.resources"; pub trait SludgeLuaContextExt<'lua>: Sized { fn resources(self) -> UnifiedResources<'static>; fn spawn<T, U>(self, task: T, args: U) -> LuaResult<LuaThread<'lua>> where T: ToLua<'lua>, U: ToLuaMulti<'lua>; fn broadcast<S, T>(self, event_name: S, args: T) -> LuaResult<()> where S: AsRef<str>, T: ToLuaMulti<'lua>; fn notify<T>(self, thread: LuaThread<'lua>, args: T) -> LuaResult<()> where T: ToLuaMulti<'lua>; fn kill<T>(self, thread: LuaThread<'lua>, args: T) -> LuaResult<()> where T: ToLuaMulti<'lua>; } impl<'lua> SludgeLuaContextExt<'lua> for LuaContext<'lua> { fn resources(self) -> UnifiedResources<'static> { self.named_registry_value::<_, UnifiedResources>(RESOURCES_REGISTRY_KEY) .with_context(|| anyhow!("error while extracing resources from Lua registry")) .unwrap() } fn spawn<T, U>(self, task: T, args: U) -> LuaResult<LuaThread<'lua>> where T: ToLua<'lua>, U: ToLuaMulti<'lua>, { self.fetch_one::<SchedulerQueue>()? .borrow() .spawn(self, task, args) } fn broadcast<S: AsRef<str>, T: ToLuaMulti<'lua>>( self, event_name: S, args: T, ) -> LuaResult<()> { self.fetch_one::<SchedulerQueue>()? .borrow() .broadcast(self, event_name, args) } fn notify<T: ToLuaMulti<'lua>>(self, thread: LuaThread<'lua>, args: T) -> LuaResult<()> { self.fetch_one::<SchedulerQueue>()? .borrow() .notify(self, thread, args) } fn kill<T: ToLuaMulti<'lua>>(self, thread: LuaThread<'lua>, args: T) -> LuaResult<()> { self.fetch_one::<SchedulerQueue>()? .borrow() .kill(self, thread, args) } } impl<'lua> Resources<'static> for LuaContext<'lua> { fn borrow(&self) -> atomic_refcell::AtomicRef<OwnedResources<'static>> { unimplemented!("unimplementable for LuaContext"); } fn borrow_mut(&self) -> atomic_refcell::AtomicRefMut<OwnedResources<'static>> { unimplemented!("unimplementable for LuaContext"); } fn fetch_one<T: Fetchable>(&self) -> Result<Shared<'static, T>, resources::NotFound> { self.resources().fetch_one() } fn fetch<T: FetchAll<'static>>(&self) -> Result<T::Fetched, resources::NotFound> { self.resources().fetch::<T>() } } pub trait System { fn init( &self, _lua: LuaContext, _local: &mut OwnedResources, _global: Option<&SharedResources>, ) -> Result<()> { Ok(()) } fn update(&self, lua: LuaContext, resources: &UnifiedResources) -> Result<()>; } #[derive(Derivative)] #[derivative(Debug)] pub struct Space { #[derivative(Debug = "ignore")] lua: Lua, #[derivative(Debug = "ignore")] resources: UnifiedResources<'static>, #[derivative(Debug = "ignore")] maintainers: Dispatcher<'static>, } impl Space { pub fn new() -> Result<Self> { Self::with_global_resources(SharedResources::new()) } pub fn with_global_resources(global: SharedResources<'static>) -> Result<Self> { use rlua::StdLib; let lua = Lua::new_with( StdLib::BASE | StdLib::COROUTINE | StdLib::TABLE | StdLib::STRING | StdLib::UTF8 | StdLib::MATH | StdLib::ERIS, ); let mut local = OwnedResources::new(); local.insert(World::new()); let scheduler = lua.context(Scheduler::new)?; let queue_handle = scheduler.queue().clone(); local.insert(scheduler); local.insert(queue_handle); local.insert(EntityUserDataRegistry::new()); let local = SharedResources::from(local); let resources = UnifiedResources { local, global }; lua.context(|lua_ctx| -> Result<_> { lua_ctx.set_named_registry_value(RESOURCES_REGISTRY_KEY, resources.clone())?; crate::api::load(lua_ctx)?; Ok(()) })?; let mut this = Self { lua, resources, maintainers: Dispatcher::new(), }; this.register(crate::systems::WorldEventSystem, "WorldEvent", &[])?; this.register( crate::systems::DefaultHierarchySystem::new(), "Hierarchy", &["WorldEvent"], )?; this.register( crate::systems::DefaultTransformSystem::new(), "Transform", &["WorldEvent", "Hierarchy"], )?; let resources = &this.resources; let maintainers = &mut this.maintainers; this.lua.context(|lua| { maintainers.refresh( lua, &mut resources.local.borrow_mut(), Some(&resources.global), ) })?; this.maintain()?; Ok(this) } pub fn register<S>(&mut self, system: S, name: &str, deps: &[&str]) -> Result<()> where S: System + 'static, { self.maintainers.register(system, name, deps) } pub fn maintain(&mut self) -> Result<()> { let Self { lua, maintainers, resources, } = self; lua.context(|lua| maintainers.update(lua, resources)) } pub fn fetch<T: FetchAll<'static>>(&self) -> Result<T::Fetched, NotFound> { self.resources.fetch::<T>() } pub fn fetch_one<T: Any + Send + Sync>(&self) -> Result<Shared<'static, T>, NotFound> { self.resources.fetch_one() } pub fn resources(&self) -> &UnifiedResources<'static> { &self.resources } pub fn lua(&self) -> &Lua { &self.lua } pub fn refresh(&self, dispatcher: &mut Dispatcher) -> Result<()> { let local_resources = &mut *self.resources.local.borrow_mut(); let global_resources = &self.resources.global; self.lua .context(|lua| dispatcher.refresh(lua, local_resources, Some(global_resources))) } pub fn dispatch(&self, dispatcher: &mut Dispatcher) -> Result<()> { self.lua .context(|lua| dispatcher.update(lua, &self.resources)) } #[inline] pub fn world(&self) -> Result<Shared<'static, World>, NotFound> { self.fetch_one() } #[inline] pub fn scheduler(&self) -> Result<Shared<'static, Scheduler>, NotFound> { self.fetch_one() } pub fn save<W: Write>(&self, writer: W) -> Result<()> { self.lua.context(|lua| persist::persist(lua, self, writer)) } pub fn load<R: Read>(&self, reader: R) -> Result<()> { self.lua .context(|lua| persist::unpersist(lua, self, reader)) } } /// A pending wake-up for a thread, living in the scheduler's queue. This /// can represent a thread which is scheduled for a given tick, or a thread /// which was waiting for an event which was previously broadcast this tick /// and is ready to be run. /// /// A given thread may have multiple wake-ups pointing to it in the scheduler's /// queue at any time, for example if it's waiting on two events which are /// both broadcast on the same update, or if an event is broadcast and then /// the thread is notified. The behavior of the thread, whether it's woken /// multiple times or only once, depends on the behavior of the type of /// wakeups involved: a `Notify` wakeup will not invalidate other wakeups, /// but a `Broadcast` or `Timed` wakeup will invalidate other `Broadcast` /// or `Timed` wakeups (but *not* a `Notify` wakeup.) One way to think about /// it is that any number of `Broadcast` or `Timed` wakeups targeting a /// specific thread will wake up a thread at most once on a given update, /// while `Notify` wakeups will resume the target thread no matter what happens /// before or after (unless the thread throws an error and dies or something.) /// /// An event wakeup will always appear as if it's scheduled for tick 0, and /// as such will always be at the front of the priority queue. /// /// Wakeups may not point to a valid thread. When a thread is resumed, all /// previous indices referring to it become invalidated. Popping a wakeup /// which no longer has a valid thread is not an error, but simply to be /// ignored. #[derive(Debug)] pub enum Wakeup { Call { thread: Index, args: Option<Index>, }, Notify { thread: Index, args: Option<Index>, }, Kill { thread: Index, args: Option<Index>, }, Broadcast { thread: Index, name: EventName, args: Option<Index>, }, Timed { thread: Index, scheduled_for: u64, }, } impl Wakeup { pub fn scheduled_for(&self) -> u64 { match self { Self::Call { .. } | Self::Notify { .. } | Self::Kill { .. } | Self::Broadcast { .. } => 0, Self::Timed { scheduled_for, .. } => *scheduled_for, } } pub fn thread(&self) -> Index { match self { Self::Call { thread, .. } | Self::Notify { thread, .. } | Self::Kill { thread, .. } | Self::Broadcast { thread, .. } | Self::Timed { thread, .. } => *thread, } } } impl PartialEq for Wakeup { fn eq(&self, rhs: &Self) -> bool { self.scheduled_for() == rhs.scheduled_for() && self.thread() == rhs.thread() } } impl Eq for Wakeup {} impl PartialOrd for Wakeup { fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> { Some(self.cmp(rhs)) } } /// We want wakeups with *lesser* wakeup times to be "greater" than wakups with later /// times, so that the stdlib `BinaryHeap` (which is a max-heap) gives us the proper /// result. // FIXME(sleffy): this is fucking horrid, dude. impl Ord for Wakeup { fn cmp(&self, rhs: &Self) -> Ordering { if matches!(self, Self::Call{..}) || matches!(rhs, Self::Call{..}) { if matches!(self, Self::Call{..}) && matches!(rhs, Self::Call{..}) { return Ordering::Equal; } else if matches!(self, Self::Call{..}) { return Ordering::Greater; } else if matches!(rhs, Self::Call{..}) { return Ordering::Less; } } else if matches!(self, Self::Kill{..}) || matches!(rhs, Self::Kill{..}) { if matches!(self, Self::Kill{..}) && matches!(rhs, Self::Kill{..}) { return Ordering::Equal; } else if matches!(self, Self::Kill{..}) { return Ordering::Greater; } else if matches!(rhs, Self::Kill{..}) { return Ordering::Less; } } else if matches!(self, Self::Notify{..}) || matches!(rhs, Self::Notify{..}) { if matches!(self, Self::Notify{..}) && matches!(rhs, Self::Notify{..}) { return Ordering::Equal; } else if matches!(self, Self::Notify{..}) { return Ordering::Greater; } else if matches!(rhs, Self::Notify{..}) { return Ordering::Less; } } self.scheduled_for() .cmp(&rhs.scheduled_for()) .reverse() .then_with(|| self.thread().cmp(&rhs.thread())) } } /// The type of an event name. Internally, it's implemented as an interned string. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct EventName(Atom); pub type EventArgs = SmallVec<[LuaRegistryKey; 3]>; /// The type of an event to be sent into a scheduler's queue. /// /// You shouldn't usually need to construct this type by hand; there are convenience /// methods which will both construct the `Event` and push it onto the queue by /// themselves. #[derive(Debug)] pub enum Event { Broadcast { name: EventName, args: Option<EventArgs>, }, Notify { thread: LuaRegistryKey, args: Option<EventArgs>, }, Kill { thread: LuaRegistryKey, args: Option<EventArgs>, }, Call { thread: LuaRegistryKey, args: Option<EventArgs>, }, } /// The `SchedulerQueue` is one half of a concurrent MPSC queue corresponding to /// a specific `Scheduler`. It can be cheaply cloned and send to other threads /// or into the Lua state for use inside userdata. #[derive(Debug, Clone)] pub struct SchedulerQueue { spawn: Sender<LuaRegistryKey>, event: Sender<Event>, } impl SchedulerQueue { /// Push an already encoded `Event` into the event queue. /// /// If you don't have an `Event` at hand for some reason or another, /// you can use [`broadcast`](SchedulerQueue::broadcast) or /// [`notify`](SchedulerQueue::notify) for a simpler and more convenient /// API. pub fn push_event(&self, event: Event) { self.event .try_send(event) .expect("unbounded channel should never fail to send"); } /// Push a Lua thread which is already encoded into a registry key into /// the spawn queue. /// /// If you don't have a registry key handy or you're working in a Lua /// context, there's the more convenient [`spawn`](SchedulerQueue::spawn) /// method. Most of the time that's probably what you'll want. pub fn push_spawn(&self, spawn: LuaRegistryKey) { self.spawn .try_send(spawn) .expect("unbounded channel should never fail to send"); } /// Spawn a Lua thread, pushing it into the scheduler's queue. /// /// This method will look at the input type and coerce it into /// being a Lua thread, and the resulting thread value will /// be returned. It will successfully convert either already /// constructed threads or functions, where in the latter case /// the function is very simply turned into a thread using a /// method equivalent to the basic Lua `coroutine.create` /// API function. pub fn spawn<'lua, T, U>( &self, lua: LuaContext<'lua>, task: T, args: U, ) -> LuaResult<LuaThread<'lua>> where T: ToLua<'lua>, U: ToLuaMulti<'lua>, { let thread = match task.to_lua(lua)? { LuaValue::Function(f) => lua.create_thread(f)?, LuaValue::Thread(th) => th, _ => { return Err(LuaError::FromLuaConversionError { to: "thread or function", from: "lua value", message: None, }) } }; let key = lua.create_registry_value(thread.clone())?; self.push_spawn(key); self.call(lua, thread.clone(), args)?; Ok(thread) } /// Broadcast an event to all threads waiting for it. /// /// Events have string names and take any number of arguments. /// The arguments are stored into the Lua registry for safety /// and subsequently pushed into the event queue as an `Event::Broadcast`. /// /// When threads waiting on events are resumed, the event's /// arguments are returned from the yield call which caused the thread /// to wait. The same values are returned from *every* yield call /// which waits on the same event, so any changes to the yielded /// arguments will be seen by other threads waiting on the same /// event during the same broadcast, in arbitrary order. pub fn broadcast<'lua, S: AsRef<str>, T: ToLuaMulti<'lua>>( &self, lua: LuaContext<'lua>, event_name: S, args: T, ) -> LuaResult<()> { let args = args.to_lua_multi(lua)?; let event = Event::Broadcast { name: EventName(Atom::from(event_name.as_ref())), args: if args.is_empty() { None } else { Some( args.into_iter() .map(|v| lua.create_registry_value(v)) .collect::<LuaResult<_>>()?, ) }, }; self.push_event(event); Ok(()) } /// Notify a single specific thread to continue execution the next /// time the scheduler is updated. /// /// This function will wake a thread *regardless* of whether it has /// previously been woken on a given scheduler update. If called /// multiple times with same *or* different arguments, it will wake /// that thread as many times as it is called; unlike for event /// broadcasts and "sleep" calls/timed wakeups, notify has *no* /// protection against double-waking (waking the same thread twice /// when it needed to only be woken once on an update.) A notified /// thread will have any timed wakeups or events it was waiting on /// invalidated, and will not subsequently be woken by those events /// unless the next yield also requests it. pub fn notify<'lua, T: ToLuaMulti<'lua>>( &self, lua: LuaContext<'lua>, thread: LuaThread<'lua>, args: T, ) -> LuaResult<()> { let args = args.to_lua_multi(lua)?; let thread = lua.create_registry_value(thread)?; let event = Event::Notify { thread, args: if args.is_empty() { None } else { Some( args.into_iter() .map(|v| lua.create_registry_value(v)) .collect::<LuaResult<_>>()?, ) }, }; self.push_event(event); Ok(()) } fn call<'lua, T: ToLuaMulti<'lua>>( &self, lua: LuaContext<'lua>, thread: LuaThread<'lua>, args: T, ) -> LuaResult<()> { let args = args.to_lua_multi(lua)?; let thread = lua.create_registry_value(thread)?; let event = Event::Call { thread, args: if args.is_empty() { None } else { Some( args.into_iter() .map(|v| lua.create_registry_value(v)) .collect::<LuaResult<_>>()?, ) }, }; self.push_event(event); Ok(()) } /// Send a "kill" signal to a thread, allowing it to resume once more /// before it's terminated. pub fn kill<'lua, T: ToLuaMulti<'lua>>( &self, lua: LuaContext<'lua>, thread: LuaThread<'lua>, args: T, ) -> LuaResult<()> { let args = args.to_lua_multi(lua)?; let thread = lua.create_registry_value(thread)?; let event = Event::Kill { thread, args: if args.is_empty() { None } else { Some( args.into_iter() .map(|v| lua.create_registry_value(v)) .collect::<LuaResult<_>>()?, ) }, }; self.push_event(event); Ok(()) } } /// The scheduler controls the execution of Lua "threads", under a cooperative /// concurrency model. It is a priority queue of coroutines to be resumed, /// ordered by how soon they should be woken. It also supports waking threads /// via string-keyed events, with Lua-valued arguments for event broadcasts. /// /// # Scenes, `Space`s and the `Scheduler` /// /// By default, all `Space`s are initialized with a `Scheduler` in their local /// resources. This `Scheduler` is expected to be used for scripting purposes /// irrespective of whatever state the user's application is in; it can be updated /// at the user's discretion in their main loop, or simply not used at all. However, /// it should be noted that the main `Scheduler` in the space's resources is what /// is manipulated by the `sludge.thread` Lua API's `spawn`, `broadcast`, `notify`, /// and `kill`. methods. /// /// Sometimes, it may be useful to create secondary schedulers, for example in /// order to script events that have to be individually paused and stopped from /// updating for some purpose. For example, during a bossfight, a boss shouldn't /// have its time "advance" at all, and so if its AI is scripted using threading /// and the scheduler, the scheduler somehow needs to be prevented from updating /// during that time. For that purpose it is useful to create a scheduler which is /// used to schedule *only* combat-related threads, so that the space's scheduler /// can always be updated and the combat scheduler can be paused during a scripted /// event or otherwise. /// /// # Persistence and the `Scheduler` /// /// In order to robustly save/load the state of a `Space`, it is necessary to /// persist/load the scheduler itself. There are a few things to note about this. /// /// Persistence of Lua values is implemented through Eris, which is capable of /// robustly serializing *any* pure Lua value, up to and including coroutines /// and closures. Userdata cannot be persisted, and is serialized through a sort /// of bridging which persists userdata objects as closures which reconstruct /// equivalent objects. /// /// It is not possible for Eris to persist the currently running thread. As a /// corollary, it seems like a good idea for serialization to be forced only /// outside of Lua, and provide in Lua only an API which *requests* serialization /// asynchronously. /// /// Persisting a `Space`'s state involves serializing data from the ECS, among /// other sources. The ECS is particularly troublesome because it references through /// indices which are not stable across instances of a program. As a result, /// we must leverage Eris's "permanents" table, which allows for custom handling /// of non-trivial data on a per-value basis. The permanents table will have /// to be generated separately, and will contain all userdata and bound functions /// from Sludge's API as well as mappings from userdata to tables containing the /// necessary data to reconstruct them. /// /// The scheduler itself can be represented purely in Lua. In order to serialize /// it, it may be beneficial to convert the scheduler to a Lua representation to /// be bundled alongside all other Lua data and then serialized in the context of /// the permanents table. Whether it should be legal to serialize a scheduler /// with pending non-timed wakeups is an unanswered question. If the answer is "yes" /// then it actually does become possible to serialize "synchronously" from Lua /// by setting a flag, yielding from the requesting thread, breaking from the /// scheduler, and then immediately serializing the resulting state, with the /// requesting thread given a special wakeup priority. #[derive(Debug)] pub struct Scheduler { /// Priority queue of scheduled threads, ordered by wakeup. queue: BinaryHeap<Wakeup>, /// Hashmap of threads which aren't currently scheduled. These /// will be woken when the scheduler is notified of an event, /// and added to the queue with `wakeup == 0`. waiting: HashMap<EventName, Vec<Index>>, /// The generational arena allows us to ensure that threads that /// are waiting for multiple events and also possibly a timer don't /// get woken up multiple times. threads: Arena<LuaRegistryKey>, /// On the Lua side, this table maps threads (coroutines) to slots /// in the `threads` arena, *not* generational indices, so that /// they're always valid indices as long as the thread is alive. /// /// Useful for waking threads "by name" (by the coroutine/thread /// key itself.) slots: LuaRegistryKey, /// `EventArgs` are bundles of Lua multivalues, and having them in /// an arena means they can be 1.) shared between different `Wakeup`s /// and 2.) we clear the entire arena all in one go later! event_args: Arena<EventArgs>, /// Receiving half of the shared channel for sending events to wake up /// sleeping threads. event_receiver: Receiver<Event>, /// Receiving half of the shared channel for sending new threads to be /// scheduled. spawn_receiver: Receiver<LuaRegistryKey>, /// Sending halves of the shared channels for sending events/new threads. senders: SchedulerQueue, /// "Discrete" time in "ticks" (60ths of a second, 60FPS) discrete: u64, /// "Continuous" time used to convert from seconds to ticks /// (stored in 60ths of a second, "consumed" and converted /// to discrete time on update, used to measure how many ticks /// to run per a given update) continuous: f32, } impl Scheduler { const CHANNEL_BOUND: usize = 4096; /// Construct a new scheduler in the given Lua context. Schedulers are tied /// to a given Lua state and cannot be moved from one to another; they store /// a significant amount of state in the registry of their bound Lua state. pub fn new(lua: LuaContext) -> Result<Self> { let (spawn_sender, spawn_channel) = crossbeam_channel::bounded(Self::CHANNEL_BOUND); let (event_sender, event_channel) = crossbeam_channel::bounded(Self::CHANNEL_BOUND); let senders = SchedulerQueue { spawn: spawn_sender, event: event_sender, }; let slots = lua.create_registry_value(lua.create_table()?)?; Ok(Self { queue: BinaryHeap::new(), waiting: HashMap::new(), threads: Arena::new(), slots, event_args: Arena::new(), event_receiver: event_channel, spawn_receiver: spawn_channel, senders, discrete: 0, continuous: 0., }) } /// Check to see if the scheduler is "idle", meaning that if `update` were to run /// and step the scheduler forward, no threads would be resumed on that step. /// /// The scheduler is considered idle only if no events are waiting to be resumed /// on the current step and there are no events or threads to be spawned waiting in /// its queue. pub fn is_idle(&self) -> bool { let nothing_in_queue = self.queue.is_empty() || self.queue.peek().unwrap().scheduled_for() > self.discrete; let no_pending_events = self.spawn_receiver.is_empty() && self.event_receiver.is_empty(); nothing_in_queue && no_pending_events } /// Returns a reference to the scheduler's queue handle, for spawning threads and /// events. pub fn queue(&self) -> &SchedulerQueue { &self.senders } /// Drains the spawn channel, pushing new threads onto the scheduler's heap with a wakeup /// time of 0 (so that they're immediately resumed on the next run through the queue) /// and inserting them into the reverse-lookup table (slots). pub(crate) fn queue_all_spawned<'lua>( &mut self, lua: LuaContext<'lua>, slots: &LuaTable<'lua>, ) -> Result<()> { for key in self.spawn_receiver.try_iter() { let thread = match lua.registry_value::<LuaThread>(&key) { Ok(t) => t, Err(e) => { let c = anyhow!("failed to spawn thread: failed to extract Lua thread from registry key `{:?}`", key); let err = Error::from(e).context(c); log::error!("error queuing thread: {:#?}", err); continue; } }; let index = self.threads.insert(key); slots.set(thread, index.slot())?; // self.queue.push(Wakeup::Timed { // thread: index, // scheduled_for: 0, // }); } Ok(()) } /// Drains the event channel and adds relevant `Wakeup`s to the queue. pub(crate) fn poll_events_and_queue_all_notified<'lua>( &mut self, lua: LuaContext<'lua>, slots: &LuaTable<'lua>, ) -> Result<()> { let Self { queue, threads, waiting, event_args, event_receiver: event_channel, .. } = self; for event in event_channel.try_iter() { match event { Event::Broadcast { name, args } => { let event_index = args.map(|args| event_args.insert(args)); if let Some(running_threads) = waiting.get_mut(&name) { for index in running_threads.drain(..) { // `None` will get returned here if the thread's already been rescheduled. // `threads.increment_gen` invalidates all of the indices which previously // pointed to this thread. if let Some(new_index) = threads.invalidate(index) { queue.push(Wakeup::Broadcast { thread: new_index, name: name.clone(), args: event_index, }); } } } } Event::Notify { thread, args } => { let event_index = args.map(|args| event_args.insert(args)); let value = lua.registry_value(&thread)?; let maybe_slot = slots.get::<LuaThread, Option<u32>>(value)?; // Thread may have died by the time we get around to notifying it. if let Some(slot) = maybe_slot { let index = threads.contains_slot(slot).unwrap(); queue.push(Wakeup::Notify { thread: threads.invalidate(index).unwrap(), args: event_index, }); } } Event::Kill { thread, args } => { let event_index = args.map(|args| event_args.insert(args)); let value = lua.registry_value(&thread)?; let maybe_slot = slots.get::<LuaThread, Option<u32>>(value)?; // Thread may have died by the time we get around to notifying it. if let Some(slot) = maybe_slot { let index = threads.contains_slot(slot).unwrap(); queue.push(Wakeup::Kill { thread: threads.invalidate(index).unwrap(), args: event_index, }); } } Event::Call { thread, args } => { let event_index = args.map(|args| event_args.insert(args)); let value = lua.registry_value(&thread)?; let maybe_slot = slots.get::<LuaThread, Option<u32>>(value)?; // Thread may have died by the time we get around to notifying it. if let Some(slot) = maybe_slot { let index = threads.contains_slot(slot).unwrap(); queue.push(Wakeup::Call { thread: threads.invalidate(index).unwrap(), args: event_index, }); } } } } Ok(()) } /// Resume threads at the top of the heap until the heap contains only /// idle threads which do not want to be run on the current step. /// /// Threads which are woken are popped from the queue and then reinserted /// with a fresh `Wakeup` depending on what their yield value requests. pub(crate) fn run_all_queued<'lua>( &mut self, lua: LuaContext<'lua>, slots: &LuaTable<'lua>, ) -> Result<()> { while let Some(top) = self.queue.peek() { // If this thread isn't ready to wake up on this tick, then // none of the other threads in this queue are. if top.scheduled_for() > self.discrete { break; } let sleeping = self.queue.pop().unwrap(); if let Some(key) = self.threads.get(sleeping.thread()) { let thread = lua.registry_value::<LuaThread>(key)?; let resumed = match &sleeping { Wakeup::Call { args: Some(args), .. } => { let args_unpacked = self.event_args[*args] .iter() .map(|key| lua.registry_value(key)) .collect::<Result<LuaMultiValue, _>>(); args_unpacked.and_then(|xs| thread.resume::<_, LuaMultiValue>(xs)) } Wakeup::Call { args: None, .. } => thread.resume::<_, LuaMultiValue>(()), Wakeup::Notify { args: Some(args), .. } | Wakeup::Kill { args: Some(args), .. } => { let args_unpacked = iter::once(Ok(LuaValue::Boolean( matches!(sleeping, Wakeup::Notify { .. }), ))) .chain( self.event_args[*args] .iter() .map(|key| lua.registry_value(key)), ) .collect::<Result<LuaMultiValue, _>>(); args_unpacked.and_then(|xs| thread.resume::<_, LuaMultiValue>(xs)) } Wakeup::Notify { args: None, .. } | Wakeup::Kill { args: None, .. } => { thread.resume::<_, LuaMultiValue>(matches!(sleeping, Wakeup::Notify { .. })) } Wakeup::Timed { .. } => thread.resume::<_, LuaMultiValue>(true), Wakeup::Broadcast { name, args: Some(args), .. } => { let args_unpacked = iter::once(Ok(LuaValue::Boolean(true))) .chain(iter::once( lua.create_string(name.0.as_ref()).map(LuaValue::String), )) .chain( self.event_args[*args] .iter() .map(|key| lua.registry_value(key)), ) .collect::<Result<LuaMultiValue, _>>(); args_unpacked.and_then(|xs| thread.resume::<_, LuaMultiValue>(xs)) } Wakeup::Broadcast { name, args: None, .. } => thread.resume::<_, LuaMultiValue>((true, name.0.as_ref())), }; let status = thread.status(); match resumed { Ok(mv) if status == LuaThreadStatus::Resumable && !matches!(sleeping, Wakeup::Kill { .. }) => { let new_index = self.threads.invalidate(sleeping.thread()).unwrap(); // Take the yielded values provided by the coroutine and turn // them into events/wakeup times. // // If no values are provided, the thread will sleep until it is directly woken // by a `Notify`. for value in mv.into_iter() { match value { // If we see an integer, then treat it as ticks-until-next-wake. LuaValue::Integer(i) => { self.queue.push(Wakeup::Timed { thread: new_index, // Threads aren't allowed to yield and resume on the same tick // forever. scheduled_for: self.discrete + na::max(i, 1) as u64, }); } // If we see a float, then round it and treat it as ticks-until-next-wake. LuaValue::Number(f) => { let i = f as i64; self.queue.push(Wakeup::Timed { thread: new_index, // Threads aren't allowed to yield and resume on the same tick // forever. scheduled_for: self.discrete + na::max(i, 1) as u64, }); } // If we see a string, then treat it as an event which the thread // wants to listen for. LuaValue::String(lua_str) => { if let Ok(s) = lua_str.to_str() { let threads = self .waiting .entry(EventName(Atom::from(s))) .or_default(); match threads.binary_search(&sleeping.thread()) { Ok(i) => threads[i] = new_index, Err(i) if threads.get(i) != Some(&new_index) => { threads.insert(i, new_index) } _ => {} } } } other => { log::error!("unknown yield return value {:?}", other); } } } } Ok(_) => { // The `sludge.thread.yield` function is written to cause a graceful // exit when passed a nil first value. We do this in order for the // thread to present a "dead" or "finished" state if checked. if status == LuaThreadStatus::Resumable && !thread.resume::<_, ()>(()).is_err() { log::warn!("killed Lua thread returned non-error value"); } slots.set(thread, LuaValue::Nil)?; self.threads.remove(sleeping.thread()); } Err(lua_error) => { slots.set(thread, LuaValue::Nil)?; self.threads.remove(sleeping.thread()); match lua_error.source() { Some(src) => log::error!( "fatal error in Lua thread {:?}: {}", sleeping.thread(), src ), None => log::error!( "fatal error in Lua thread {:?}: {}", sleeping.thread(), lua_error ), } } } } } Ok(()) } /// Run the scheduler for `dt` steps. /// /// The scheduler contains a very simple internal timestep which simply waits /// for accumulated time to be greater than 0., and steps the scheduler forward /// repeatedly, subtracting 1. from the accumulated time until it's equal to /// or less than zero. /// /// To prevent infinite loops of threads spawning and yielding themselves, the /// scheduler internally has a loop cap which counts how many times the scheduler /// is resumed from the top (all queued events spawned and then spawn and event /// queues drained) on a given step. Once the loop cap is reached, the scheduler /// will emit a warning to the logger and it will halt the update for the given /// tick. This is not a panacea, and any time you see a warning that the loop /// cap was exceeded, it's a good idea to look back at your code and check to /// ensure there's nothing infinitely spawning/waking itself. The loop cap is /// currently hardcoded to 8, but may be made parameterizable in the future. pub fn update(&mut self, lua: LuaContext, dt: f32) -> Result<()> { let old_queue = lua.named_registry_value::<_, Option<LuaValue>>(api::SCHEDULER_QUEUE_REGISTRY_KEY)?; lua.set_named_registry_value(api::SCHEDULER_QUEUE_REGISTRY_KEY, self.senders.clone())?; let mut block = move || -> Result<()> { self.continuous += dt; let slots = lua.registry_value(&self.slots)?; while self.continuous > 0. { // Our core update step consists of two steps: // 1. Run all threads scheduled to run on or before the current tick. // 2. Check for threads spawned/woken by newly run threads. If there are new // threads to be run immediately, go to step 1. // // `LOOP_CAP` is our limit on how many times we go to step 1 in a given // tick. This stops us from hitting an infinitely spawning loop. const LOOP_CAP: usize = 8; for i in 0..LOOP_CAP { self.run_all_queued(lua, &slots)?; self.event_args.clear(); self.queue_all_spawned(lua, &slots)?; self.poll_events_and_queue_all_notified(lua, &slots)?; if self.is_idle() { break; } else if i == LOOP_CAP - 1 { log::warn!("trampoline loop cap exceeded"); } } self.continuous -= 1.; self.discrete += 1; } Ok(()) }; let result = block(); lua.expire_registry_values(); lua.set_named_registry_value(api::SCHEDULER_QUEUE_REGISTRY_KEY, old_queue)?; result } } impl LuaUserData for Scheduler { fn add_methods<'lua, T: LuaUserDataMethods<'lua, Self>>(methods: &mut T) { methods.add_method( "spawn", |lua, this, (task, args): (LuaValue, LuaMultiValue)| { this.queue().spawn(lua, task, args).to_lua_err() }, ); methods.add_method( "broadcast", |lua, this, (event_name, args): (LuaString, LuaMultiValue)| { this.queue() .broadcast(lua, event_name.to_str()?, args) .to_lua_err() }, ); methods.add_method( "notify", |lua, this, (thread, args): (LuaThread, LuaMultiValue)| { this.queue().notify(lua, thread, args).to_lua_err() }, ); methods.add_method( "kill", |lua, this, (thread, args): (LuaThread, LuaMultiValue)| { this.queue().kill(lua, thread, args).to_lua_err() }, ); methods.add_method_mut("update", |lua, this, ()| this.update(lua, 1.).to_lua_err()); methods.add_method("queue", |_lua, this, ()| Ok(this.queue().clone())); } } impl LuaUserData for SchedulerQueue { fn add_methods<'lua, T: LuaUserDataMethods<'lua, Self>>(methods: &mut T) { methods.add_method( "spawn", |lua, this, (task, args): (LuaValue, LuaMultiValue)| { this.spawn(lua, task, args).to_lua_err() }, ); methods.add_method( "broadcast", |lua, this, (event_name, args): (LuaString, LuaMultiValue)| { this.broadcast(lua, event_name.to_str()?, args).to_lua_err() }, ); methods.add_method( "notify", |lua, this, (thread, args): (LuaThread, LuaMultiValue)| { this.notify(lua, thread, args).to_lua_err() }, ); methods.add_method( "kill", |lua, this, (thread, args): (LuaThread, LuaMultiValue)| { this.kill(lua, thread, args).to_lua_err() }, ); } }
36.812163
122
0.535227
ab7c7c6a64664d820795db12f14480d24d3a6374
2,817
// Copyright (C) 2020 Miklos Maroti // Licensed under the MIT license (see LICENSE) use crate::*; /// A quotient ring of an Euclidean domain by a principal ideal. #[derive(Clone, Debug)] pub struct QuotientRing<A> where A: EuclideanDomain, { base: A, modulo: A::Elem, } impl<A> QuotientRing<A> where A: EuclideanDomain, { /// Creates a new quotient ring from the given Euclidean domain and /// one of its element. pub fn new(base: A, modulo: A::Elem) -> Self { assert!(base.contains(&modulo)); QuotientRing { base, modulo } } /// Returns the base ring from which this ring was constructed. pub fn base(&self) -> &A { &self.base } /// Returns the modulo element from which this ring was constructed. pub fn modulo(&self) -> &A::Elem { &self.modulo } } impl<A> Domain for QuotientRing<A> where A: EuclideanDomain, { type Elem = A::Elem; fn contains(&self, elem: &Self::Elem) -> bool { self.base.reduced(elem, &self.modulo) } fn equals(&self, elem1: &Self::Elem, elem2: &Self::Elem) -> bool { self.base.equals(elem1, elem2) } } impl<A> Semigroup for QuotientRing<A> where A: EuclideanDomain, { fn mul(&self, elem1: &Self::Elem, elem2: &Self::Elem) -> Self::Elem { self.base.rem(&self.base.mul(elem1, elem2), &self.modulo) } } impl<A> Monoid for QuotientRing<A> where A: EuclideanDomain, { fn one(&self) -> Self::Elem { self.base.one() } fn is_one(&self, elem: &Self::Elem) -> bool { self.base.is_one(elem) } fn try_inv(&self, elem: &Self::Elem) -> Option<Self::Elem> { let (g, _, r) = self.base.extended_gcd(&self.modulo, elem); self.base.try_inv(&g).map(|a| self.mul(&a, &r)) } } impl<A> AbelianGroup for QuotientRing<A> where A: EuclideanDomain, { fn zero(&self) -> Self::Elem { self.base.zero() } fn neg(&self, elem: &Self::Elem) -> Self::Elem { self.base.rem(&self.base.neg(elem), &self.modulo) } fn add(&self, elem1: &Self::Elem, elem2: &Self::Elem) -> Self::Elem { self.base.rem(&self.base.add(elem1, elem2), &self.modulo) } } impl<A> UnitaryRing for QuotientRing<A> where A: EuclideanDomain {} #[cfg(test)] mod tests { use super::*; #[test] fn zstar_1584() { let ring = QuotientRing::new(I32, 1584); // 16 * 9 *11 let mut count = 0; for a in 0..1584 { assert!(ring.contains(&a)); if a != 0 { if let Some(b) = ring.try_inv(&a) { assert!(ring.contains(&b)); assert!(ring.is_one(&ring.mul(&a, &b))); count += 1; } } } assert_eq!(count, 8 * 6 * 10); } }
23.280992
73
0.56017