hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
283b34d583fc5b7027ee1ebc5ba6ab788ece106d
4,735
//! RPC request support. use http; use hyper; use prost; use futures::prelude::*; use super::frame; use futures::{future, stream}; use super::future::*; use super::metadata::*; use super::status::*; /// Common trait for RPC request types. pub trait Request { /// Converts an HTTP request into a future that produces an instance of this type. fn from_http_request(request: hyper::Request<hyper::Body>) -> GrpcFuture<Self>; /// Converts an instance of this type into a future that produces an HTTP request to be sent to /// the specified URI. fn into_http_request(self, uri: hyper::Uri) -> GrpcFuture<hyper::Request<hyper::Body>>; } /// RPC request containing a single message. #[derive(Default)] pub struct UnaryRequest<M> where M: prost::Message + Default + 'static, { pub metadata: Metadata, pub data: M, } impl<M> Request for UnaryRequest<M> where M: prost::Message + Default + 'static, { fn from_http_request(request: hyper::Request<hyper::Body>) -> GrpcFuture<Self> { Box::new(future::lazy(move || { let metadata = Metadata::from_header_map(request.headers()); SingleItem::new(message_stream(request.into_body())).and_then(move |message_opt| { match message_opt { Some(data) => Ok(Self { metadata, data }), None => Err(Status::new( StatusCode::Unimplemented, Some("expected single-message body, received no messages"), )), } }) })) } fn into_http_request(self, uri: hyper::Uri) -> GrpcFuture<hyper::Request<hyper::Body>> { Box::new(future::lazy(move || { let mut data = Vec::new(); frame::encode(&self.data, &mut data)?; request_builder(uri, self.metadata).and_then(|mut builder| { builder .body(hyper::Body::from(data)) .map_err(|e| Status::from_display(StatusCode::Internal, e)) }) })) } } impl<M> From<M> for UnaryRequest<M> where M: prost::Message + Default + 'static, { fn from(data: M) -> Self { Self { metadata: Metadata::default(), data, } } } /// RPC request containing a message stream. pub struct StreamingRequest<M> where M: prost::Message + Default + 'static, { pub metadata: Metadata, pub data: GrpcStream<M>, } impl<M> Default for StreamingRequest<M> where M: prost::Message + Default + 'static, { fn default() -> Self { Self { metadata: Metadata::default(), data: Box::new(stream::empty()), } } } impl<M> Request for StreamingRequest<M> where M: prost::Message + Default + 'static, { fn from_http_request(request: hyper::Request<hyper::Body>) -> GrpcFuture<Self> { Box::new(future::lazy(move || { Ok(Self { metadata: Metadata::from_header_map(request.headers()), data: Box::new(message_stream(request.into_body())), }) })) } fn into_http_request(self, uri: hyper::Uri) -> GrpcFuture<hyper::Request<hyper::Body>> { Box::new(future::lazy(move || { let data_stream = self.data.and_then(|message| { let mut data = Vec::new(); frame::encode(&message, &mut data)?; Ok(hyper::Chunk::from(data)) }); request_builder(uri, self.metadata).and_then(|mut builder| { builder .body(hyper::Body::wrap_stream(data_stream)) .map_err(|e| Status::from_display(StatusCode::Internal, e)) }) })) } } impl<M> From<GrpcStream<M>> for StreamingRequest<M> where M: prost::Message + Default + 'static, { fn from(data: GrpcStream<M>) -> Self { Self { metadata: Metadata::default(), data, } } } /// Returns a builder for a [`hyper::Request`] for the specified URI and metadata, with standard /// settings for gRPC use. /// /// [`hyper::Request`]: https://docs.rs/hyper/0.12/hyper/struct.Request.html fn request_builder(uri: hyper::Uri, metadata: Metadata) -> Result<http::request::Builder, Status> { let mut builder = hyper::Request::post(uri); builder .version(http::Version::HTTP_2) .header(http::header::TE, "trailers") .header(http::header::CONTENT_TYPE, "application/grpc") .header( http::header::USER_AGENT, format!("{}/{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")).as_str(), ); metadata.append_to_headers(&mut builder)?; Ok(builder) }
28.871951
99
0.574234
76e5fd2026991698ba3ab138e9d9b498a34968ca
2,554
//! @ \TeX\ always knows at least one font, namely the null font. It has no //! characters, and its seven parameters are all equal to zero. // // @<Initialize table...@>= #[allow(unused_variables)] pub(crate) macro Initialize_table_entries_done_by_initex_only_0552($globals:expr) {{ let globals = &mut *$globals; // font_ptr:=null_font; fmem_ptr:=7; globals.font_ptr = null_font; globals.fmem_ptr = 7.into(); // font_name[null_font]:="nullfont"; font_area[null_font]:=""; globals.font_name[null_font] = crate::strpool_str!("nullfont"); globals.font_area[null_font] = crate::strpool_str!(""); // hyphen_char[null_font]:="-"; skew_char[null_font]:=-1; globals.hyphen_char[null_font] = b'-' as _; globals.skew_char[null_font] = -1; // bchar_label[null_font]:=non_address; globals.bchar_label[null_font] = non_address; // font_bchar[null_font]:=non_char; font_false_bchar[null_font]:=non_char; globals.font_bchar[null_font] = non_char; globals.font_false_bchar[null_font] = non_char; // font_bc[null_font]:=1; font_ec[null_font]:=0; globals.font_bc[null_font] = 1.into(); globals.font_ec[null_font] = 0.into(); // font_size[null_font]:=0; font_dsize[null_font]:=0; globals.font_size[null_font] = scaled::zero(); globals.font_dsize[null_font] = scaled::zero(); // char_base[null_font]:=0; width_base[null_font]:=0; globals.char_base[null_font] = 0; globals.width_base[null_font] = 0; // height_base[null_font]:=0; depth_base[null_font]:=0; globals.height_base[null_font] = 0; globals.depth_base[null_font] = 0; // italic_base[null_font]:=0; lig_kern_base[null_font]:=0; globals.italic_base[null_font] = 0; globals.lig_kern_base[null_font] = 0; // kern_base[null_font]:=0; exten_base[null_font]:=0; globals.kern_base[null_font] = 0; globals.exten_base[null_font] = 0; // font_glue[null_font]:=null; font_params[null_font]:=7; globals.font_glue[null_font] = null; globals.font_params[null_font] = 7.into(); // param_base[null_font]:=-1; globals.param_base[null_font] = -1; // for k:=0 to 6 do font_info[k].sc:=0; for k in 0..=6 { globals.font_info[k][MEMORY_WORD_SC] = scaled::zero(); } }} use crate::section_0004::TeXGlobals; use crate::section_0101::scaled; use crate::section_0101::MEMORY_WORD_SC; use crate::section_0115::null; use crate::section_0232::null_font; use crate::section_0548::internal_font_number; use crate::section_0549::non_address; use crate::section_0549::non_char;
43.288136
84
0.696554
1dd46efd8003c70418c52a322ded6217a463b47b
1,009
// option2.rs // Make me compile! Execute `rustlings hint option2` for hints fn main() { let optional_value = Some(String::from("rustlings")); // TODO: Make this an if let statement whose value is "Some" type if let value = optional_value { println!("the value of optional value is: {:?}", Some(value)); } else { println!("The optional value doesn't contain anything!"); } let mut optional_values_vec: Vec<Option<i8>> = Vec::new(); for x in 1..10 { optional_values_vec.push(Some(x)); } // TODO: make this a while let statement - remember that vector.pop also adds another layer of Option<T> // You can stack `Option<T>`'s into while let and if let while let value = optional_values_vec.pop() { match value { Some(value) => println!("current value: {:?}", value), None => { println!("get to the vec end"); break; }, } } }
33.633333
109
0.564916
d9a7a7726ed3417c6275530efdfb08c80a02531b
7,738
use derivative::Derivative; use futures::{stream::BoxStream, Stream, StreamExt}; use kube::{ api::{ListParams, Meta, WatchEvent}, Api, }; use serde::de::DeserializeOwned; use smallvec::SmallVec; use snafu::{Backtrace, ResultExt, Snafu}; use std::clone::Clone; #[derive(Snafu, Debug)] pub enum Error { #[snafu(display("failed to perform initial object list: {}", source))] InitialListFailed { source: kube::Error, backtrace: Backtrace, }, #[snafu(display("failed to start watching object: {}", source))] WatchStartFailed { source: kube::Error, backtrace: Backtrace, }, #[snafu(display("error returned by apiserver during watch: {}", source))] WatchError { source: kube::error::ErrorResponse, backtrace: Backtrace, }, #[snafu(display("watch stream failed: {}", source))] WatchFailed { source: kube::Error, backtrace: Backtrace, }, } pub type Result<T, E = Error> = std::result::Result<T, E>; #[derive(Debug, Clone)] /// Watch events returned from the `Watcher` pub enum Event<K> { /// A resource was added or modified Applied(K), /// A resource was deleted /// /// NOTE: This should not be used for managing persistent state elsewhere, since /// events may be lost if the watcher is unavailable. Use Finalizers instead. Deleted(K), /// The watch stream was restarted, so `Deleted` events may have been missed /// /// Should be used as a signal to replace the store contents atomically. Restarted(Vec<K>), } impl<K> Event<K> { /// Flattens out all objects that were added or modified in the event. /// /// `Deleted` objects are ignored, all objects mentioned by `Restarted` events are /// emitted individually. pub fn into_iter_applied(self) -> impl Iterator<Item = K> { match self { Event::Applied(obj) => SmallVec::from_buf([obj]), Event::Deleted(_) => SmallVec::new(), Event::Restarted(objs) => SmallVec::from_vec(objs), } .into_iter() } /// Flattens out all objects that were added, modified, or deleted in the event. /// /// Note that `Deleted` events may be missed when restarting the stream. Use finalizers /// or owner references instead if you care about cleaning up external resources after /// deleted objects. pub fn into_iter_touched(self) -> impl Iterator<Item = K> { match self { Event::Applied(obj) | Event::Deleted(obj) => SmallVec::from_buf([obj]), Event::Restarted(objs) => SmallVec::from_vec(objs), } .into_iter() } } #[derive(Derivative)] #[derivative(Debug)] /// The internal finite state machine driving the [`Watcher`](struct.Watcher.html) enum State<K: Meta + Clone> { /// The Watcher is empty, and the next poll() will start the initial LIST to get all existing objects Empty, /// The initial LIST was successful, so we should move on to starting the actual watch. InitListed { resource_version: String }, /// The watch is in progress, from this point we just return events from the server. /// /// If the connection is disrupted then we propagate the error but try to restart the watch stream by /// returning to the `InitListed` state. /// If we fall out of the K8s watch window then we propagate the error and fall back doing a re-list /// with `Empty`. Watching { resource_version: String, #[derivative(Debug = "ignore")] stream: BoxStream<'static, kube::Result<WatchEvent<K>>>, }, } /// Progresses the watcher a single step, returning (event, state) /// /// This function should be trampolined: if event == `None` /// then the function should be called again until it returns a Some. async fn step_trampolined<K: Meta + Clone + DeserializeOwned + Send + 'static>( api: &Api<K>, list_params: &ListParams, state: State<K>, ) -> (Option<Result<Event<K>>>, State<K>) { match state { State::Empty => match api.list(&list_params).await { Ok(list) => (Some(Ok(Event::Restarted(list.items))), State::InitListed { resource_version: list.metadata.resource_version.unwrap(), }), Err(err) => (Some(Err(err).context(InitialListFailed)), State::Empty), }, State::InitListed { resource_version } => match api.watch(&list_params, &resource_version).await { Ok(stream) => (None, State::Watching { resource_version, stream: stream.boxed(), }), Err(err) => (Some(Err(err).context(WatchStartFailed)), State::InitListed { resource_version, }), }, State::Watching { resource_version, mut stream, } => match stream.next().await { Some(Ok(WatchEvent::Added(obj))) | Some(Ok(WatchEvent::Modified(obj))) => { let resource_version = obj.resource_ver().unwrap(); (Some(Ok(Event::Applied(obj))), State::Watching { resource_version, stream, }) } Some(Ok(WatchEvent::Deleted(obj))) => { let resource_version = obj.resource_ver().unwrap(); (Some(Ok(Event::Deleted(obj))), State::Watching { resource_version, stream, }) } Some(Ok(WatchEvent::Bookmark(bm))) => (None, State::Watching { resource_version: bm.metadata.resource_version, stream, }), Some(Ok(WatchEvent::Error(err))) => { // HTTP GONE, means we have desynced and need to start over and re-list :( let new_state = if err.code == 410 { State::Empty } else { State::Watching { resource_version, stream, } }; (Some(Err(err).context(WatchError)), new_state) } Some(Err(err)) => (Some(Err(err).context(WatchFailed)), State::Watching { resource_version, stream, }), None => (None, State::InitListed { resource_version }), }, } } /// Trampoline helper for `step_trampolined` async fn step<K: Meta + Clone + DeserializeOwned + Send + 'static>( api: &Api<K>, list_params: &ListParams, mut state: State<K>, ) -> (Result<Event<K>>, State<K>) { loop { match step_trampolined(&api, &list_params, state).await { (Some(result), new_state) => return (result, new_state), (None, new_state) => state = new_state, } } } /// Watches a Kubernetes Resource for changes /// /// Errors are propagated to the client as `Err`. Tries to recover (by reconnecting and resyncing as required) /// if polled again after an error. /// /// # Migration from `kube::runtime` /// /// This is similar to the legacy `kube::runtime::Informer`, or the watching half of client-go's `Reflector`. /// Renamed to avoid confusion with client-go's `Informer` (which watches a `Reflector` for updates, rather /// the Kubernetes API). pub fn watcher<K: Meta + Clone + DeserializeOwned + Send + 'static>( api: Api<K>, list_params: ListParams, ) -> impl Stream<Item = Result<Event<K>>> + Send { futures::stream::unfold( (api, list_params, State::Empty), |(api, list_params, state)| async { let (event, state) = step(&api, &list_params, state).await; Some((event, (api, list_params, state))) }, ) }
37.563107
110
0.588912
9c98dc1e84fa5e0a7b8a4c0f09562453297b3979
1,223
use std::io; use sdl2::video::WindowBuildError; use sdl2::IntegerOrSdlError; use sdl2::render::TextureValueError; use sdl2::render::TargetRenderError; use sdl2::render::UpdateTextureError; #[derive(Debug)] pub enum Error { Io(io::Error), Text(String), WindowBuildError, IntegerOrSdlError, TextureValueError, TargetRenderError, UpdateTextureError, } impl From<io::Error> for Error { fn from(err: io::Error) -> Error { Error::Io(err) } } impl From<String> for Error { fn from(err: String) -> Error { Error::Text(err) } } impl From<WindowBuildError> for Error { fn from(_: WindowBuildError) -> Error { Error::WindowBuildError } } impl From<IntegerOrSdlError> for Error { fn from(_: IntegerOrSdlError) -> Error { Error::IntegerOrSdlError } } impl From<TextureValueError> for Error { fn from(_: TextureValueError) -> Error { Error::TextureValueError } } impl From<TargetRenderError> for Error { fn from(_: TargetRenderError) -> Error { Error::TargetRenderError } } impl From<UpdateTextureError> for Error { fn from(_: UpdateTextureError) -> Error { Error::UpdateTextureError } }
20.04918
45
0.659035
8abea75a5f4b74fed1d5d7a6513d0f37c3ed752c
1,690
#[allow(unused_imports)] use serde_json::Value; #[allow(unused_imports)] use std::borrow::Borrow; #[allow(unused_imports)] use super::*; #[derive(Debug, Default, Serialize, Deserialize, PartialEq)] pub struct r#GroupProfile { #[serde(rename = "description", skip_serializing_if = "Option::is_none")] r#description: Option<String>, #[serde(rename = "name", skip_serializing_if = "Option::is_none")] r#name: Option<String>, } impl r#GroupProfile { pub fn new( ) -> Self { Self { r#description: None, r#name: None, } } pub fn set_description(&mut self, r#description: String) { self.r#description = Some(r#description); } pub fn with_description(mut self, r#description: String) -> Self { self.r#description = Some(r#description); self } pub fn with_option_description(mut self, r#description: Option<String>) -> Self { self.r#description = r#description; self } pub fn r#description(&self) -> Option<&str> { self.r#description.as_ref().map(|x| x.borrow()) } pub fn reset_description(&mut self) { self.r#description = None; } pub fn set_name(&mut self, r#name: String) { self.r#name = Some(r#name); } pub fn with_name(mut self, r#name: String) -> Self { self.r#name = Some(r#name); self } pub fn with_option_name(mut self, r#name: Option<String>) -> Self { self.r#name = r#name; self } pub fn r#name(&self) -> Option<&str> { self.r#name.as_ref().map(|x| x.borrow()) } pub fn reset_name(&mut self) { self.r#name = None; } }
23.802817
85
0.590533
d60bdb6899c574a11ba09496d1bbf188e7b7d54d
5,007
//! A simple GVN pass. use cursor::{Cursor, FuncCursor}; use dominator_tree::DominatorTree; use ir::{Function, Inst, InstructionData, Opcode, Type}; use scoped_hash_map::ScopedHashMap; use std::cell::{Ref, RefCell}; use std::hash::{Hash, Hasher}; use std::vec::Vec; use timing; /// Test whether the given opcode is unsafe to even consider for GVN. fn trivially_unsafe_for_gvn(opcode: Opcode) -> bool { opcode.is_call() || opcode.is_branch() || opcode.is_terminator() || opcode.is_return() || opcode.can_trap() || opcode.other_side_effects() || opcode.can_store() || opcode.writes_cpu_flags() } /// Test that, if the specified instruction is a load, it doesn't have the `readonly` memflag. fn is_load_and_not_readonly(inst_data: &InstructionData) -> bool { match *inst_data { InstructionData::Load { flags, .. } | InstructionData::LoadComplex { flags, .. } => { !flags.readonly() } _ => inst_data.opcode().can_load(), } } /// Wrapper around `InstructionData` which implements `Eq` and `Hash` #[derive(Clone)] struct HashKey<'a, 'f: 'a> { inst: InstructionData, ty: Type, pos: &'a RefCell<FuncCursor<'f>>, } impl<'a, 'f: 'a> Hash for HashKey<'a, 'f> { fn hash<H: Hasher>(&self, state: &mut H) { let pool = &self.pos.borrow().func.dfg.value_lists; self.inst.hash(state, pool); self.ty.hash(state); } } impl<'a, 'f: 'a> PartialEq for HashKey<'a, 'f> { fn eq(&self, other: &Self) -> bool { let pool = &self.pos.borrow().func.dfg.value_lists; self.inst.eq(&other.inst, pool) && self.ty == other.ty } } impl<'a, 'f: 'a> Eq for HashKey<'a, 'f> {} /// Perform simple GVN on `func`. /// pub fn do_simple_gvn(func: &mut Function, domtree: &mut DominatorTree) { let _tt = timing::gvn(); debug_assert!(domtree.is_valid()); // Visit EBBs in a reverse post-order. // // The RefCell here is a bit ugly since the HashKeys in the ScopedHashMap // need a reference to the function. let pos = RefCell::new(FuncCursor::new(func)); let mut visible_values: ScopedHashMap<HashKey, Inst> = ScopedHashMap::new(); let mut scope_stack: Vec<Inst> = Vec::new(); for &ebb in domtree.cfg_postorder().iter().rev() { { // Pop any scopes that we just exited. let layout = &pos.borrow().func.layout; loop { if let Some(current) = scope_stack.last() { if domtree.dominates(*current, ebb, layout) { break; } } else { break; } scope_stack.pop(); visible_values.decrement_depth(); } // Push a scope for the current block. scope_stack.push(layout.first_inst(ebb).unwrap()); visible_values.increment_depth(); } pos.borrow_mut().goto_top(ebb); while let Some(inst) = { let mut pos = pos.borrow_mut(); pos.next_inst() } { // Resolve aliases, particularly aliases we created earlier. pos.borrow_mut().func.dfg.resolve_aliases_in_arguments(inst); let func = Ref::map(pos.borrow(), |pos| &pos.func); let opcode = func.dfg[inst].opcode(); if opcode.is_branch() && !opcode.is_terminator() { scope_stack.push(func.layout.next_inst(inst).unwrap()); visible_values.increment_depth(); } if trivially_unsafe_for_gvn(opcode) { continue; } // These are split up to separate concerns. if is_load_and_not_readonly(&func.dfg[inst]) { continue; } let ctrl_typevar = func.dfg.ctrl_typevar(inst); let key = HashKey { inst: func.dfg[inst].clone(), ty: ctrl_typevar, pos: &pos, }; use scoped_hash_map::Entry::*; match visible_values.entry(key) { Occupied(entry) => { debug_assert!(domtree.dominates(*entry.get(), inst, &func.layout)); // If the redundant instruction is representing the current // scope, pick a new representative. let old = scope_stack.last_mut().unwrap(); if *old == inst { *old = func.layout.next_inst(inst).unwrap(); } // Replace the redundant instruction and remove it. drop(func); let mut pos = pos.borrow_mut(); pos.func.dfg.replace_with_aliases(inst, *entry.get()); pos.remove_inst_and_step_back(); } Vacant(entry) => { entry.insert(inst); } } } } }
34.061224
94
0.540643
50c82fe63e3abfdbcc5aa385c6282c680911d128
193
mod api; mod dispath_request; mod method; mod request; pub use api::API; pub use dispath_request::{DispatchRequest, GetRequest, PostRequest}; pub use method::Method; pub use request::Request;
19.3
68
0.777202
bb31f072c638517c0fbfd9821cd3017e32876447
768
// traits2.rs // // Your task is to implement the trait // `AppendBar' for a vector of strings. // // To implement this trait, consider for // a moment what it means to 'append "Bar"' // to a vector of strings. // // No boiler plate code this time, // you can do this! trait AppendBar { fn append_bar(self) -> Self; } //TODO: Add your code here impl AppendBar for Vec<String> { fn append_bar(mut self) -> Self { self.push("Bar".to_string()); return self; } } #[cfg(test)] mod tests { use super::*; #[test] fn is_vec_pop_eq_bar() { let mut foo = vec![String::from("Foo")].append_bar(); assert_eq!(foo.pop().unwrap(), String::from("Bar")); assert_eq!(foo.pop().unwrap(), String::from("Foo")); } }
20.756757
61
0.60026
09a7464253d392d8e2dc057c60845a9f426f02f5
1,357
extern crate px4_ulog; use px4_ulog::models::ULogData; use px4_ulog::parser::dataset::*; use std::collections::HashSet; use std::fs::File; fn main() { let mut args = std::env::args(); let cmd = args.next(); if let Some(filename) = args.next() { let mut log_file = File::open(&filename).unwrap(); if let Some(dataset_name) = args.next() { let datasets: Vec<ULogData> = log_file.get_dataset(&dataset_name).unwrap().collect(); println!("Measurements: {}", datasets.len()); let filters = args.collect::<HashSet<String>>(); for dataset in datasets.iter() { println!("--------------------------"); for item in dataset.iter() { if filters.len() == 0 || filters.contains(item.name()) { println!("{} at {}: {:?}", item.name(), item.index(), item.data()); } } } } else { let messages = log_file.get_message_names().unwrap(); println!("Messages: {}", messages.len()); for msg in messages { println!("{}", msg); } } } else { eprintln!( "usage: {} log-file.ulg [dataset] [list of filters]", cmd.unwrap_or("px4-ulog".to_string()) ); } }
31.55814
97
0.484893
f88d7d07fa562c6fc724d28b4fb9d2e97ad63725
8,236
extern crate redis; use self::redis::aio::ConnectionManager; use self::redis::ConnectionInfo; use crate::counter::Counter; use crate::limit::{Limit, Namespace}; use crate::storage::keys::*; use crate::storage::redis::scripts::{SCRIPT_DELETE_LIMIT, SCRIPT_UPDATE_COUNTER}; use crate::storage::{AsyncStorage, Authorization, StorageErr}; use async_trait::async_trait; use redis::AsyncCommands; use std::collections::HashSet; use std::str::FromStr; use std::time::Duration; // Note: this implementation does no guarantee exact limits. Ensuring that we // never go over the limits would hurt performance. This implementation // sacrifices a bit of accuracy to be more performant. // TODO: the code of this implementation is almost identical to the blocking // one. The only exception is that the functions defined are "async" and all the // calls to the client need to include ".await". We'll need to think about how // to remove this duplication. #[derive(Clone)] pub struct AsyncRedisStorage { conn_manager: ConnectionManager, } #[async_trait] impl AsyncStorage for AsyncRedisStorage { async fn get_namespaces(&self) -> Result<HashSet<Namespace>, StorageErr> { let mut con = self.conn_manager.clone(); let namespaces = con .smembers::<String, HashSet<String>>(key_for_namespaces_set()) .await?; Ok(namespaces.iter().map(|ns| ns.parse().unwrap()).collect()) } async fn add_limit(&self, limit: &Limit) -> Result<(), StorageErr> { let mut con = self.conn_manager.clone(); let set_key = key_for_limits_of_namespace(limit.namespace()); let serialized_limit = serde_json::to_string(limit).unwrap(); redis::pipe() .atomic() .sadd::<String, String>(set_key, serialized_limit) .sadd::<String, &str>(key_for_namespaces_set(), limit.namespace().as_ref()) .query_async(&mut con) .await?; Ok(()) } async fn get_limits(&self, namespace: &Namespace) -> Result<HashSet<Limit>, StorageErr> { let mut con = self.conn_manager.clone(); let set_key = key_for_limits_of_namespace(namespace); let limits: HashSet<Limit> = con .smembers::<String, HashSet<String>>(set_key) .await? .iter() .map(|limit_json| serde_json::from_str(limit_json).unwrap()) .collect(); Ok(limits) } async fn delete_limit(&self, limit: &Limit) -> Result<(), StorageErr> { let mut con = self.conn_manager.clone(); self.delete_counters_associated_with_limit(limit).await?; con.del(key_for_counters_of_limit(limit)).await?; let serialized_limit = serde_json::to_string(limit).unwrap(); redis::Script::new(SCRIPT_DELETE_LIMIT) .key(key_for_limits_of_namespace(limit.namespace())) .key(key_for_namespaces_set()) .arg(serialized_limit) .arg(limit.namespace().as_ref()) .invoke_async::<_, _>(&mut con) .await?; Ok(()) } async fn delete_limits(&self, namespace: &Namespace) -> Result<(), StorageErr> { let mut con = self.conn_manager.clone(); self.delete_counters_of_namespace(namespace).await?; for limit in self.get_limits(namespace).await? { con.del(key_for_counters_of_limit(&limit)).await?; } let set_key = key_for_limits_of_namespace(namespace); con.del(set_key).await?; Ok(()) } async fn is_within_limits(&self, counter: &Counter, delta: i64) -> Result<bool, StorageErr> { let mut con = self.conn_manager.clone(); match con .get::<String, Option<i64>>(key_for_counter(counter)) .await? { Some(val) => Ok(val - delta >= 0), None => Ok(counter.max_value() - delta >= 0), } } async fn update_counter(&self, counter: &Counter, delta: i64) -> Result<(), StorageErr> { let mut con = self.conn_manager.clone(); redis::Script::new(SCRIPT_UPDATE_COUNTER) .key(key_for_counter(counter)) .key(key_for_counters_of_limit(counter.limit())) .arg(counter.max_value()) .arg(counter.seconds()) .arg(delta) .invoke_async::<_, _>(&mut con) .await?; Ok(()) } async fn check_and_update<'c>( &self, counters: &HashSet<&'c Counter>, delta: i64, ) -> Result<Authorization<'c>, StorageErr> { let mut con = self.conn_manager.clone(); let counter_keys: Vec<String> = counters .iter() .map(|counter| key_for_counter(counter)) .collect(); let counter_vals: Vec<Option<i64>> = redis::cmd("MGET") .arg(counter_keys) .query_async(&mut con) .await?; for (i, counter) in counters.iter().enumerate() { match counter_vals[i] { Some(val) => { if val - delta < 0 { return Ok(Authorization::Limited(counter)); } } None => { if counter.max_value() - delta < 0 { return Ok(Authorization::Limited(counter)); } } } } // TODO: this can be optimized by using pipelines with multiple updates for counter in counters { self.update_counter(counter, delta).await? } Ok(Authorization::Ok) } async fn get_counters(&self, namespace: &Namespace) -> Result<HashSet<Counter>, StorageErr> { let mut res = HashSet::new(); let mut con = self.conn_manager.clone(); for limit in self.get_limits(namespace).await? { let counter_keys = con .smembers::<String, HashSet<String>>(key_for_counters_of_limit(&limit)) .await?; for counter_key in counter_keys { let mut counter: Counter = counter_from_counter_key(&counter_key); // If the key does not exist, it means that the counter expired, // so we don't have to return it. // TODO: we should delete the counter from the set of counters // associated with the limit taking into account that we should // do the "get" + "delete if none" atomically. // This does not cause any bugs, but consumes memory // unnecessarily. if let Some(val) = con.get::<String, Option<i64>>(counter_key.clone()).await? { counter.set_remaining(val); let ttl = con.ttl(&counter_key).await?; counter.set_expires_in(Duration::from_secs(ttl)); res.insert(counter); } } } Ok(res) } async fn clear(&self) -> Result<(), StorageErr> { let mut con = self.conn_manager.clone(); redis::cmd("FLUSHDB").query_async(&mut con).await?; Ok(()) } } impl AsyncRedisStorage { pub async fn new(redis_url: &str) -> Self { Self { conn_manager: ConnectionManager::new( redis::Client::open(ConnectionInfo::from_str(redis_url).unwrap()).unwrap(), ) .await .unwrap(), } } pub fn new_with_conn_manager(conn_manager: ConnectionManager) -> Self { Self { conn_manager } } async fn delete_counters_of_namespace(&self, namespace: &Namespace) -> Result<(), StorageErr> { for limit in self.get_limits(namespace).await? { self.delete_counters_associated_with_limit(&limit).await? } Ok(()) } async fn delete_counters_associated_with_limit(&self, limit: &Limit) -> Result<(), StorageErr> { let mut con = self.conn_manager.clone(); let counter_keys = con .smembers::<String, HashSet<String>>(key_for_counters_of_limit(limit)) .await?; for counter_key in counter_keys { con.del(counter_key).await?; } Ok(()) } }
32.812749
100
0.581472
ff801b67045c53335c7c48470ff04639c1b9111a
4,098
use std::borrow::Cow; use std::fs; use std::io::{self, BufRead, BufReader}; use std::path::PathBuf; use std::process; use structopt::StructOpt; use sudachi::prelude::*; #[cfg(feature = "bake_dictionary")] const BAKED_DICTIONARY_BYTES: &[u8] = include_bytes!(env!("SUDACHI_DICT_PATH")); /// A Japanese tokenizer #[derive(StructOpt)] #[structopt(name = "sudachi", author = "")] struct Cli { /// Input text file: If not present, read from STDIN #[structopt(parse(from_os_str))] file: Option<PathBuf>, /// Split unit: "A" (short), "B" (middle), or "C" (Named Entity) #[structopt(short = "m", long = "mode", default_value = "C")] mode: String, /// Prints all fields #[structopt(short = "a", long = "all")] print_all: bool, /// Outputs only surface form #[structopt(short = "w", long = "wakati")] wakati: bool, /// Debug mode: Dumps lattice #[structopt(short = "d", long = "debug")] enable_debug: bool, // Dictionary is optional if baked in /// Path to sudachi dictionary #[cfg(feature = "bake_dictionary")] #[structopt(short = "l", long = "dict")] dictionary_path: Option<PathBuf>, // Dictionary is not baked in, so it must be specified /// Path to sudachi dictionary #[cfg(not(feature = "bake_dictionary"))] #[structopt(short = "l", long = "dict")] dictionary_path: PathBuf, } fn get_dictionary_bytes(args: &Cli) -> Cow<'static, [u8]> { let dictionary_path = { cfg_if::cfg_if! { if #[cfg(feature="bake_dictionary")] { if let Some(dictionary_path) = &args.dictionary_path { dictionary_path } else { return Cow::Borrowed(BAKED_DICTIONARY_BYTES); } } else { &args.dictionary_path } } }; let storage_buf = dictionary_bytes_from_path(&dictionary_path) .expect("Failed to get dictionary bytes from file"); Cow::Owned(storage_buf) } fn main() { let args = Cli::from_args(); let mode = match args.mode.as_str().parse() { Ok(mode) => mode, Err(err) => { eprintln!("Invalid mode: {}", err); process::exit(1); } }; let print_all = args.print_all; let wakati = args.wakati; let enable_debug = args.enable_debug; // load and parse dictionary binary to create a tokenizer let dictionary_bytes = get_dictionary_bytes(&args); let tokenizer = Tokenizer::from_dictionary_bytes(&dictionary_bytes) .expect("Failed to create Tokenizer from dictionary bytes"); // input: stdin or file let reader: Box<dyn BufRead> = match args.file { None => Box::new(BufReader::new(io::stdin())), Some(input_path) => Box::new(BufReader::new( fs::File::open(&input_path) .unwrap_or_else(|_| panic!("Failed to open file {:?}", &input_path)), )), }; for line in reader.lines() { let input = line.expect("Failed to reead line").to_string(); let morpheme_list = tokenizer .tokenize(&input, mode, enable_debug) .expect("failed to tokenize input"); if wakati { let surface_list = morpheme_list .iter() .map(|m| m.surface().to_string()) .collect::<Vec<_>>(); println!("{}", surface_list.join(" ")); } else { for morpheme in morpheme_list { print!( "{}\t{}\t{}", morpheme.surface(), morpheme.pos().expect("Missing part of speech").join(","), morpheme.normalized_form(), ); if print_all { print!( "\t{}\t{}", morpheme.dictionary_form(), morpheme.reading_form(), // TODO: is_oov ); } println!(); } println!("EOS"); } } }
30.81203
85
0.537335
1c0605888aad5e41cce8e3342b9c0ad32f32eebe
40
pub mod fft; use fft::*; pub mod plot;
8
13
0.625
48379601414f34a039339394de4479683516afcd
220
fn main() { // `n` will take the values: 1, 2, ..., 100 in each iteration for n in 1..101 { if n % 2 == 0 { println!("even"); } else { println!("{}", n); } } }
20
65
0.372727
33b4957f75872a0aee22246eeacb3c42508d9fa1
1,589
//! Crate name parsing. use anyhow::Context as _; use super::Dependency; use super::RegistrySource; use crate::util::validate_package_name; use crate::CargoResult; /// User-specified crate /// /// This can be a /// - Name (e.g. `docopt`) /// - Name and a version req (e.g. `docopt@^0.8`) /// - Path #[derive(Debug)] pub struct CrateSpec { /// Crate name name: String, /// Optional version requirement version_req: Option<String>, } impl CrateSpec { /// Convert a string to a `Crate` pub fn resolve(pkg_id: &str) -> CargoResult<Self> { let (name, version) = pkg_id .split_once('@') .map(|(n, v)| (n, Some(v))) .unwrap_or((pkg_id, None)); validate_package_name(name, "dependency name", "")?; if let Some(version) = version { semver::VersionReq::parse(version) .with_context(|| format!("invalid version requirement `{version}`"))?; } let id = Self { name: name.to_owned(), version_req: version.map(|s| s.to_owned()), }; Ok(id) } /// Generate a dependency entry for this crate specifier pub fn to_dependency(&self) -> CargoResult<Dependency> { let mut dep = Dependency::new(self.name()); if let Some(version_req) = self.version_req() { dep = dep.set_source(RegistrySource::new(version_req)); } Ok(dep) } pub fn name(&self) -> &str { &self.name } pub fn version_req(&self) -> Option<&str> { self.version_req.as_deref() } }
24.828125
86
0.569541
50830fa69f96562c362f124785c5dde9314a948a
1,109
/// If this is a valid NUL terminated C string, this function will return /// it as a Rust str reference. If there is no NUL terminator, this will just consider /// the entire array as the C string. /// /// **NOTE** This function is only for debug purposes and may allocate if invalud UTF-8 sequences /// are present in the string. pub fn cstr_display<'s>(full_arr: &'s [libc::c_char]) -> std::borrow::Cow<'s, str> { let byte_slice = into_byte_slice(full_arr); if let Ok(cstr) = std::ffi::CStr::from_bytes_with_nul(byte_slice) { cstr.to_string_lossy() } else { // if there is no nul terminator, just use the entire slice: String::from_utf8_lossy(byte_slice) } } /// Converts slice of any sized type into a slice of bytes. pub fn into_byte_slice<T: Sized>(orig: &[T]) -> &[u8] { // FIXME I don't think the behavior here is undefined since u8 should have an alignment of 1, but // I might be wrong :P let byte_len = orig.len() * std::mem::size_of::<T>(); let ptr = orig.as_ptr() as *const u8; unsafe { std::slice::from_raw_parts(ptr, byte_len) } }
41.074074
101
0.669071
39cc8bced5005f793b4222bd3ac73f84f37db973
25,792
#[doc = "Register `TCD0_CSR` reader"] pub struct R(crate::R<TCD0_CSR_SPEC>); impl core::ops::Deref for R { type Target = crate::R<TCD0_CSR_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<TCD0_CSR_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<TCD0_CSR_SPEC>) -> Self { R(reader) } } #[doc = "Register `TCD0_CSR` writer"] pub struct W(crate::W<TCD0_CSR_SPEC>); impl core::ops::Deref for W { type Target = crate::W<TCD0_CSR_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<TCD0_CSR_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<TCD0_CSR_SPEC>) -> Self { W(writer) } } #[doc = "Channel Start\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum START_A { #[doc = "0: The channel is not explicitly started."] START_0 = 0, #[doc = "1: The channel is explicitly started via a software initiated service request."] START_1 = 1, } impl From<START_A> for bool { #[inline(always)] fn from(variant: START_A) -> Self { variant as u8 != 0 } } #[doc = "Field `START` reader - Channel Start"] pub struct START_R(crate::FieldReader<bool, START_A>); impl START_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { START_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> START_A { match self.bits { false => START_A::START_0, true => START_A::START_1, } } #[doc = "Checks if the value of the field is `START_0`"] #[inline(always)] pub fn is_start_0(&self) -> bool { **self == START_A::START_0 } #[doc = "Checks if the value of the field is `START_1`"] #[inline(always)] pub fn is_start_1(&self) -> bool { **self == START_A::START_1 } } impl core::ops::Deref for START_R { type Target = crate::FieldReader<bool, START_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `START` writer - Channel Start"] pub struct START_W<'a> { w: &'a mut W, } impl<'a> START_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: START_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "The channel is not explicitly started."] #[inline(always)] pub fn start_0(self) -> &'a mut W { self.variant(START_A::START_0) } #[doc = "The channel is explicitly started via a software initiated service request."] #[inline(always)] pub fn start_1(self) -> &'a mut W { self.variant(START_A::START_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u16 & 0x01); self.w } } #[doc = "Enable an interrupt when major iteration count completes.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum INTMAJOR_A { #[doc = "0: The end-of-major loop interrupt is disabled."] INTMAJOR_0 = 0, #[doc = "1: The end-of-major loop interrupt is enabled."] INTMAJOR_1 = 1, } impl From<INTMAJOR_A> for bool { #[inline(always)] fn from(variant: INTMAJOR_A) -> Self { variant as u8 != 0 } } #[doc = "Field `INTMAJOR` reader - Enable an interrupt when major iteration count completes."] pub struct INTMAJOR_R(crate::FieldReader<bool, INTMAJOR_A>); impl INTMAJOR_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { INTMAJOR_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> INTMAJOR_A { match self.bits { false => INTMAJOR_A::INTMAJOR_0, true => INTMAJOR_A::INTMAJOR_1, } } #[doc = "Checks if the value of the field is `INTMAJOR_0`"] #[inline(always)] pub fn is_intmajor_0(&self) -> bool { **self == INTMAJOR_A::INTMAJOR_0 } #[doc = "Checks if the value of the field is `INTMAJOR_1`"] #[inline(always)] pub fn is_intmajor_1(&self) -> bool { **self == INTMAJOR_A::INTMAJOR_1 } } impl core::ops::Deref for INTMAJOR_R { type Target = crate::FieldReader<bool, INTMAJOR_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `INTMAJOR` writer - Enable an interrupt when major iteration count completes."] pub struct INTMAJOR_W<'a> { w: &'a mut W, } impl<'a> INTMAJOR_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: INTMAJOR_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "The end-of-major loop interrupt is disabled."] #[inline(always)] pub fn intmajor_0(self) -> &'a mut W { self.variant(INTMAJOR_A::INTMAJOR_0) } #[doc = "The end-of-major loop interrupt is enabled."] #[inline(always)] pub fn intmajor_1(self) -> &'a mut W { self.variant(INTMAJOR_A::INTMAJOR_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u16 & 0x01) << 1); self.w } } #[doc = "Enable an interrupt when major counter is half complete.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum INTHALF_A { #[doc = "0: The half-point interrupt is disabled."] INTHALF_0 = 0, #[doc = "1: The half-point interrupt is enabled."] INTHALF_1 = 1, } impl From<INTHALF_A> for bool { #[inline(always)] fn from(variant: INTHALF_A) -> Self { variant as u8 != 0 } } #[doc = "Field `INTHALF` reader - Enable an interrupt when major counter is half complete."] pub struct INTHALF_R(crate::FieldReader<bool, INTHALF_A>); impl INTHALF_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { INTHALF_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> INTHALF_A { match self.bits { false => INTHALF_A::INTHALF_0, true => INTHALF_A::INTHALF_1, } } #[doc = "Checks if the value of the field is `INTHALF_0`"] #[inline(always)] pub fn is_inthalf_0(&self) -> bool { **self == INTHALF_A::INTHALF_0 } #[doc = "Checks if the value of the field is `INTHALF_1`"] #[inline(always)] pub fn is_inthalf_1(&self) -> bool { **self == INTHALF_A::INTHALF_1 } } impl core::ops::Deref for INTHALF_R { type Target = crate::FieldReader<bool, INTHALF_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `INTHALF` writer - Enable an interrupt when major counter is half complete."] pub struct INTHALF_W<'a> { w: &'a mut W, } impl<'a> INTHALF_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: INTHALF_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "The half-point interrupt is disabled."] #[inline(always)] pub fn inthalf_0(self) -> &'a mut W { self.variant(INTHALF_A::INTHALF_0) } #[doc = "The half-point interrupt is enabled."] #[inline(always)] pub fn inthalf_1(self) -> &'a mut W { self.variant(INTHALF_A::INTHALF_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u16 & 0x01) << 2); self.w } } #[doc = "Disable Request\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DREQ_A { #[doc = "0: The channel's ERQ bit is not affected."] DREQ_0 = 0, #[doc = "1: The channel's ERQ bit is cleared when the major loop is complete."] DREQ_1 = 1, } impl From<DREQ_A> for bool { #[inline(always)] fn from(variant: DREQ_A) -> Self { variant as u8 != 0 } } #[doc = "Field `DREQ` reader - Disable Request"] pub struct DREQ_R(crate::FieldReader<bool, DREQ_A>); impl DREQ_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { DREQ_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DREQ_A { match self.bits { false => DREQ_A::DREQ_0, true => DREQ_A::DREQ_1, } } #[doc = "Checks if the value of the field is `DREQ_0`"] #[inline(always)] pub fn is_dreq_0(&self) -> bool { **self == DREQ_A::DREQ_0 } #[doc = "Checks if the value of the field is `DREQ_1`"] #[inline(always)] pub fn is_dreq_1(&self) -> bool { **self == DREQ_A::DREQ_1 } } impl core::ops::Deref for DREQ_R { type Target = crate::FieldReader<bool, DREQ_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DREQ` writer - Disable Request"] pub struct DREQ_W<'a> { w: &'a mut W, } impl<'a> DREQ_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DREQ_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "The channel's ERQ bit is not affected."] #[inline(always)] pub fn dreq_0(self) -> &'a mut W { self.variant(DREQ_A::DREQ_0) } #[doc = "The channel's ERQ bit is cleared when the major loop is complete."] #[inline(always)] pub fn dreq_1(self) -> &'a mut W { self.variant(DREQ_A::DREQ_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u16 & 0x01) << 3); self.w } } #[doc = "Enable Scatter/Gather Processing\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ESG_A { #[doc = "0: The current channel's TCD is normal format."] ESG_0 = 0, #[doc = "1: The current channel's TCD specifies a scatter gather format. The DLASTSGA field provides a memory pointer to the next TCD to be loaded into this channel after the major loop completes its execution."] ESG_1 = 1, } impl From<ESG_A> for bool { #[inline(always)] fn from(variant: ESG_A) -> Self { variant as u8 != 0 } } #[doc = "Field `ESG` reader - Enable Scatter/Gather Processing"] pub struct ESG_R(crate::FieldReader<bool, ESG_A>); impl ESG_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { ESG_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ESG_A { match self.bits { false => ESG_A::ESG_0, true => ESG_A::ESG_1, } } #[doc = "Checks if the value of the field is `ESG_0`"] #[inline(always)] pub fn is_esg_0(&self) -> bool { **self == ESG_A::ESG_0 } #[doc = "Checks if the value of the field is `ESG_1`"] #[inline(always)] pub fn is_esg_1(&self) -> bool { **self == ESG_A::ESG_1 } } impl core::ops::Deref for ESG_R { type Target = crate::FieldReader<bool, ESG_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `ESG` writer - Enable Scatter/Gather Processing"] pub struct ESG_W<'a> { w: &'a mut W, } impl<'a> ESG_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ESG_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "The current channel's TCD is normal format."] #[inline(always)] pub fn esg_0(self) -> &'a mut W { self.variant(ESG_A::ESG_0) } #[doc = "The current channel's TCD specifies a scatter gather format. The DLASTSGA field provides a memory pointer to the next TCD to be loaded into this channel after the major loop completes its execution."] #[inline(always)] pub fn esg_1(self) -> &'a mut W { self.variant(ESG_A::ESG_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u16 & 0x01) << 4); self.w } } #[doc = "Enable channel-to-channel linking on major loop complete\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum MAJORELINK_A { #[doc = "0: The channel-to-channel linking is disabled."] MAJORELINK_0 = 0, #[doc = "1: The channel-to-channel linking is enabled."] MAJORELINK_1 = 1, } impl From<MAJORELINK_A> for bool { #[inline(always)] fn from(variant: MAJORELINK_A) -> Self { variant as u8 != 0 } } #[doc = "Field `MAJORELINK` reader - Enable channel-to-channel linking on major loop complete"] pub struct MAJORELINK_R(crate::FieldReader<bool, MAJORELINK_A>); impl MAJORELINK_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { MAJORELINK_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> MAJORELINK_A { match self.bits { false => MAJORELINK_A::MAJORELINK_0, true => MAJORELINK_A::MAJORELINK_1, } } #[doc = "Checks if the value of the field is `MAJORELINK_0`"] #[inline(always)] pub fn is_majorelink_0(&self) -> bool { **self == MAJORELINK_A::MAJORELINK_0 } #[doc = "Checks if the value of the field is `MAJORELINK_1`"] #[inline(always)] pub fn is_majorelink_1(&self) -> bool { **self == MAJORELINK_A::MAJORELINK_1 } } impl core::ops::Deref for MAJORELINK_R { type Target = crate::FieldReader<bool, MAJORELINK_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `MAJORELINK` writer - Enable channel-to-channel linking on major loop complete"] pub struct MAJORELINK_W<'a> { w: &'a mut W, } impl<'a> MAJORELINK_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: MAJORELINK_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "The channel-to-channel linking is disabled."] #[inline(always)] pub fn majorelink_0(self) -> &'a mut W { self.variant(MAJORELINK_A::MAJORELINK_0) } #[doc = "The channel-to-channel linking is enabled."] #[inline(always)] pub fn majorelink_1(self) -> &'a mut W { self.variant(MAJORELINK_A::MAJORELINK_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u16 & 0x01) << 5); self.w } } #[doc = "Field `ACTIVE` reader - Channel Active"] pub struct ACTIVE_R(crate::FieldReader<bool, bool>); impl ACTIVE_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { ACTIVE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for ACTIVE_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DONE` reader - Channel Done"] pub struct DONE_R(crate::FieldReader<bool, bool>); impl DONE_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { DONE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for DONE_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DONE` writer - Channel Done"] pub struct DONE_W<'a> { w: &'a mut W, } impl<'a> DONE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u16 & 0x01) << 7); self.w } } #[doc = "Field `MAJORLINKCH` reader - Major Loop Link Channel Number"] pub struct MAJORLINKCH_R(crate::FieldReader<u8, u8>); impl MAJORLINKCH_R { #[inline(always)] pub(crate) fn new(bits: u8) -> Self { MAJORLINKCH_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for MAJORLINKCH_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `MAJORLINKCH` writer - Major Loop Link Channel Number"] pub struct MAJORLINKCH_W<'a> { w: &'a mut W, } impl<'a> MAJORLINKCH_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x0f << 8)) | ((value as u16 & 0x0f) << 8); self.w } } #[doc = "Bandwidth Control\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum BWC_A { #[doc = "0: No eDMA engine stalls."] BWC_0 = 0, #[doc = "2: eDMA engine stalls for 4 cycles after each R/W."] BWC_2 = 2, #[doc = "3: eDMA engine stalls for 8 cycles after each R/W."] BWC_3 = 3, } impl From<BWC_A> for u8 { #[inline(always)] fn from(variant: BWC_A) -> Self { variant as _ } } #[doc = "Field `BWC` reader - Bandwidth Control"] pub struct BWC_R(crate::FieldReader<u8, BWC_A>); impl BWC_R { #[inline(always)] pub(crate) fn new(bits: u8) -> Self { BWC_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<BWC_A> { match self.bits { 0 => Some(BWC_A::BWC_0), 2 => Some(BWC_A::BWC_2), 3 => Some(BWC_A::BWC_3), _ => None, } } #[doc = "Checks if the value of the field is `BWC_0`"] #[inline(always)] pub fn is_bwc_0(&self) -> bool { **self == BWC_A::BWC_0 } #[doc = "Checks if the value of the field is `BWC_2`"] #[inline(always)] pub fn is_bwc_2(&self) -> bool { **self == BWC_A::BWC_2 } #[doc = "Checks if the value of the field is `BWC_3`"] #[inline(always)] pub fn is_bwc_3(&self) -> bool { **self == BWC_A::BWC_3 } } impl core::ops::Deref for BWC_R { type Target = crate::FieldReader<u8, BWC_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `BWC` writer - Bandwidth Control"] pub struct BWC_W<'a> { w: &'a mut W, } impl<'a> BWC_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: BWC_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "No eDMA engine stalls."] #[inline(always)] pub fn bwc_0(self) -> &'a mut W { self.variant(BWC_A::BWC_0) } #[doc = "eDMA engine stalls for 4 cycles after each R/W."] #[inline(always)] pub fn bwc_2(self) -> &'a mut W { self.variant(BWC_A::BWC_2) } #[doc = "eDMA engine stalls for 8 cycles after each R/W."] #[inline(always)] pub fn bwc_3(self) -> &'a mut W { self.variant(BWC_A::BWC_3) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 14)) | ((value as u16 & 0x03) << 14); self.w } } impl R { #[doc = "Bit 0 - Channel Start"] #[inline(always)] pub fn start(&self) -> START_R { START_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Enable an interrupt when major iteration count completes."] #[inline(always)] pub fn intmajor(&self) -> INTMAJOR_R { INTMAJOR_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Enable an interrupt when major counter is half complete."] #[inline(always)] pub fn inthalf(&self) -> INTHALF_R { INTHALF_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Disable Request"] #[inline(always)] pub fn dreq(&self) -> DREQ_R { DREQ_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - Enable Scatter/Gather Processing"] #[inline(always)] pub fn esg(&self) -> ESG_R { ESG_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Enable channel-to-channel linking on major loop complete"] #[inline(always)] pub fn majorelink(&self) -> MAJORELINK_R { MAJORELINK_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - Channel Active"] #[inline(always)] pub fn active(&self) -> ACTIVE_R { ACTIVE_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 7 - Channel Done"] #[inline(always)] pub fn done(&self) -> DONE_R { DONE_R::new(((self.bits >> 7) & 0x01) != 0) } #[doc = "Bits 8:11 - Major Loop Link Channel Number"] #[inline(always)] pub fn majorlinkch(&self) -> MAJORLINKCH_R { MAJORLINKCH_R::new(((self.bits >> 8) & 0x0f) as u8) } #[doc = "Bits 14:15 - Bandwidth Control"] #[inline(always)] pub fn bwc(&self) -> BWC_R { BWC_R::new(((self.bits >> 14) & 0x03) as u8) } } impl W { #[doc = "Bit 0 - Channel Start"] #[inline(always)] pub fn start(&mut self) -> START_W { START_W { w: self } } #[doc = "Bit 1 - Enable an interrupt when major iteration count completes."] #[inline(always)] pub fn intmajor(&mut self) -> INTMAJOR_W { INTMAJOR_W { w: self } } #[doc = "Bit 2 - Enable an interrupt when major counter is half complete."] #[inline(always)] pub fn inthalf(&mut self) -> INTHALF_W { INTHALF_W { w: self } } #[doc = "Bit 3 - Disable Request"] #[inline(always)] pub fn dreq(&mut self) -> DREQ_W { DREQ_W { w: self } } #[doc = "Bit 4 - Enable Scatter/Gather Processing"] #[inline(always)] pub fn esg(&mut self) -> ESG_W { ESG_W { w: self } } #[doc = "Bit 5 - Enable channel-to-channel linking on major loop complete"] #[inline(always)] pub fn majorelink(&mut self) -> MAJORELINK_W { MAJORELINK_W { w: self } } #[doc = "Bit 7 - Channel Done"] #[inline(always)] pub fn done(&mut self) -> DONE_W { DONE_W { w: self } } #[doc = "Bits 8:11 - Major Loop Link Channel Number"] #[inline(always)] pub fn majorlinkch(&mut self) -> MAJORLINKCH_W { MAJORLINKCH_W { w: self } } #[doc = "Bits 14:15 - Bandwidth Control"] #[inline(always)] pub fn bwc(&mut self) -> BWC_W { BWC_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u16) -> &mut Self { self.0.bits(bits); self } } #[doc = "TCD Control and Status\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [tcd0_csr](index.html) module"] pub struct TCD0_CSR_SPEC; impl crate::RegisterSpec for TCD0_CSR_SPEC { type Ux = u16; } #[doc = "`read()` method returns [tcd0_csr::R](R) reader structure"] impl crate::Readable for TCD0_CSR_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [tcd0_csr::W](W) writer structure"] impl crate::Writable for TCD0_CSR_SPEC { type Writer = W; } #[doc = "`reset()` method sets TCD0_CSR to value 0"] impl crate::Resettable for TCD0_CSR_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
30.925659
411
0.578241
5b0a539a8944dbcb38ca0b84cf52f5f8433c43c3
3,456
use anyhow::Context; use crate::{ nlas::nsid::Nla, traits::{Emitable, Parseable}, DecodeError, NsidHeader, NsidMessageBuffer, }; #[derive(Debug, PartialEq, Eq, Clone, Default)] pub struct NsidMessage { pub header: NsidHeader, pub nlas: Vec<Nla>, } impl<'a, T: AsRef<[u8]> + 'a> Parseable<NsidMessageBuffer<&'a T>> for NsidMessage { fn parse(buf: &NsidMessageBuffer<&'a T>) -> Result<Self, DecodeError> { Ok(Self { header: NsidHeader::parse(buf).context("failed to parse nsid message header")?, nlas: Vec::<Nla>::parse(buf).context("failed to parse nsid message NLAs")?, }) } } impl<'a, T: AsRef<[u8]> + 'a> Parseable<NsidMessageBuffer<&'a T>> for Vec<Nla> { fn parse(buf: &NsidMessageBuffer<&'a T>) -> Result<Self, DecodeError> { let mut nlas = vec![]; for nla_buf in buf.nlas() { nlas.push(Nla::parse(&nla_buf?)?); } Ok(nlas) } } impl Emitable for NsidMessage { fn buffer_len(&self) -> usize { self.header.buffer_len() + self.nlas.as_slice().buffer_len() } fn emit(&self, buffer: &mut [u8]) { self.header.emit(buffer); self.nlas .as_slice() .emit(&mut buffer[self.header.buffer_len()..]); } } #[cfg(test)] mod test { use crate::{ nlas::nsid::Nla, traits::ParseableParametrized, NetlinkBuffer, NsidHeader, NsidMessage, RtnlMessage, RtnlMessageBuffer, NETNSA_NSID_NOT_ASSIGNED, RTM_GETNSID, RTM_NEWNSID, }; #[rustfmt::skip] #[test] fn get_ns_id_request() { let data = vec![ 0x1c, 0x00, 0x00, 0x00, // length = 28 0x5a, 0x00, // message type = 90 = RTM_GETNSID 0x01, 0x00, // flags 0x00, 0x00, 0x00, 0x00, // seq number 0x00, 0x00, 0x00, 0x00, // pid // GETNSID message 0x00, // rtgen family 0x00, 0x00, 0x00, // padding // NLA 0x08, 0x00, // length = 8 0x03, 0x00, // type = 3 (Fd) 0x04, 0x00, 0x00, 0x00 // 4 ]; let expected = RtnlMessage::GetNsId(NsidMessage { header: NsidHeader { rtgen_family: 0 }, nlas: vec![Nla::Fd(4)], }); let actual = RtnlMessage::parse_with_param(&RtnlMessageBuffer::new(&NetlinkBuffer::new(&data).payload()), RTM_GETNSID).unwrap(); assert_eq!(expected, actual); } #[rustfmt::skip] #[test] fn get_ns_id_response() { let data = vec![ 0x1c, 0x00, 0x00, 0x00, // length = 28 0x58, 0x00, // message type = RTM_NEWNSID 0x00, 0x00, // flags 0x00, 0x00, 0x00, 0x00, // seq number 0x76, 0x12, 0x00, 0x00, // pid // NETNSID message 0x00, // rtgen family 0x00, 0x00, 0x00, // padding // NLA 0x08, 0x00, // length 0x01, 0x00, // type = NETNSA_NSID 0xff, 0xff, 0xff, 0xff // -1 ]; let expected = RtnlMessage::NewNsId(NsidMessage { header: NsidHeader { rtgen_family: 0 }, nlas: vec![Nla::Id(NETNSA_NSID_NOT_ASSIGNED)], }); let nl_buffer = NetlinkBuffer::new(&data).payload(); let rtnl_buffer = RtnlMessageBuffer::new(&nl_buffer); let actual = RtnlMessage::parse_with_param(&rtnl_buffer, RTM_NEWNSID).unwrap(); assert_eq!(expected, actual); } }
32
136
0.554398
5007d585fb92b94d05b1027899af5c3279b3c7e1
830
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use move_vm_types::{ gas_schedule::NativeCostIndex, loaded_data::runtime_types::Type, natives::function::{native_gas, NativeContext, NativeResult}, values::{values_impl::SignerRef, Value}, }; use std::collections::VecDeque; use vm::errors::VMResult; pub fn native_borrow_address( context: &impl NativeContext, _ty_args: Vec<Type>, mut arguments: VecDeque<Value>, ) -> VMResult<NativeResult> { debug_assert!(_ty_args.is_empty()); debug_assert!(arguments.len() == 1); let signer_reference = pop_arg!(arguments, SignerRef); let cost = native_gas(context.cost_table(), NativeCostIndex::SIGNER_BORROW, 1); Ok(NativeResult::ok( cost, vec![signer_reference.borrow_signer()?], )) }
28.62069
83
0.7
6702e253920927b2c97dffd5c38ee7afdb261999
65,840
// Copyright 2013-2019, The Gtk-rs Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <https://opensource.org/licenses/MIT> #![allow(non_camel_case_types)] #![cfg_attr(feature = "cargo-clippy", allow(unreadable_literal, write_literal))] extern crate libc; #[cfg(feature = "use_glib")] extern crate glib_sys as glib_ffi; #[cfg(any(feature = "xlib", feature = "dox"))] extern crate x11; #[cfg(all(windows, feature = "win32-surface"))] extern crate winapi as winapi_orig; #[cfg(all(windows, feature = "win32-surface"))] pub mod winapi { pub use winapi_orig::shared::windef::HDC; } #[cfg(all(feature = "dox", not(all(windows, feature = "win32-surface"))))] pub mod winapi { use libc::c_void; #[repr(C)] pub struct HDC(c_void); } use libc::{c_char, c_double, c_int, c_uchar, c_uint, c_ulong, c_void}; #[cfg(any(feature = "xlib", feature = "dox"))] use x11::xlib; pub type cairo_antialias_t = c_int; pub type cairo_content_t = c_int; pub type cairo_device_type_t = c_int; pub type cairo_extend_t = c_int; pub type cairo_fill_rule_t = c_int; pub type cairo_filter_t = c_int; pub type cairo_font_slant_t = c_int; pub type cairo_font_type_t = c_int; pub type cairo_font_weight_t = c_int; pub type cairo_format_t = c_int; pub type cairo_ft_synthesize_t = c_uint; pub type cairo_hint_metrics_t = c_int; pub type cairo_hint_style_t = c_int; pub type cairo_line_cap_t = c_int; pub type cairo_line_join_t = c_int; pub type cairo_operator_t = c_int; pub type cairo_pattern_type_t = c_int; pub type cairo_path_data_type_t = c_int; pub type cairo_region_overlap_t = c_int; #[cfg(any(feature = "script", feature = "dox"))] pub type cairo_script_mode_t = c_int; pub type cairo_status_t = c_int; pub type cairo_subpixel_order_t = c_int; pub type cairo_surface_type_t = c_int; #[cfg(any(all(feature = "svg", feature = "v1_16"), feature = "dox"))] pub type cairo_svg_unit_t = c_int; pub type cairo_text_cluster_flags_t = c_int; #[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))] pub type cairo_pdf_outline_flags_t = c_int; #[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))] pub type cairo_pdf_metadata_t = c_int; #[cfg(any(feature = "pdf", feature = "dox"))] pub type cairo_pdf_version_t = c_int; #[cfg(any(feature = "svg", feature = "dox"))] pub type cairo_svg_version_t = c_int; #[cfg(any(feature = "ps", feature = "dox"))] pub type cairo_ps_level_t = c_int; pub type cairo_mesh_corner_t = c_uint; macro_rules! debug_impl { ($name:ty) => { impl ::std::fmt::Debug for $name { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "{} @ {:?}", stringify!($name), self as *const _) } } }; } #[repr(C)] pub struct cairo_t(c_void); debug_impl!(cairo_t); #[repr(C)] pub struct cairo_surface_t(c_void); debug_impl!(cairo_surface_t); #[repr(C)] pub struct cairo_device_t(c_void); debug_impl!(cairo_device_t); #[repr(C)] pub struct cairo_pattern_t(c_void); #[cfg(any(feature = "xcb", feature = "dox"))] #[repr(C)] pub struct xcb_connection_t(c_void); #[cfg(any(feature = "xcb", feature = "dox"))] debug_impl!(xcb_connection_t); #[cfg(any(feature = "xcb", feature = "dox"))] pub type xcb_drawable_t = u32; #[cfg(any(feature = "xcb", feature = "dox"))] pub type xcb_pixmap_t = u32; #[cfg(any(feature = "xcb", feature = "dox"))] #[repr(C)] pub struct xcb_visualtype_t(c_void); #[cfg(any(feature = "xcb", feature = "dox"))] debug_impl!(xcb_visualtype_t); #[cfg(any(feature = "xcb", feature = "dox"))] #[repr(C)] pub struct xcb_screen_t(c_void); #[cfg(any(feature = "xcb", feature = "dox"))] debug_impl!(xcb_screen_t); #[cfg(any(feature = "xcb", feature = "dox"))] #[repr(C)] pub struct xcb_render_pictforminfo_t(c_void); #[cfg(any(feature = "xcb", feature = "dox"))] debug_impl!(xcb_render_pictforminfo_t); #[repr(C)] #[derive(Clone, Copy, Debug, PartialEq)] pub struct cairo_rectangle_t { pub x: f64, pub y: f64, pub width: f64, pub height: f64, } #[repr(C)] #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct cairo_rectangle_int_t { pub x: i32, pub y: i32, pub width: i32, pub height: i32, } #[repr(C)] #[derive(Debug, Clone, Copy)] pub struct cairo_rectangle_list_t { pub status: cairo_status_t, pub rectangles: *mut cairo_rectangle_t, pub num_rectangles: c_int, } #[repr(C)] #[derive(Debug, Clone, Copy)] pub struct cairo_path_t { pub status: cairo_status_t, pub data: *mut cairo_path_data, pub num_data: c_int, } #[repr(C)] #[derive(Debug, Clone, Copy)] pub struct cairo_path_data_header { pub data_type: cairo_path_data_type_t, pub length: c_int, } #[repr(C)] #[derive(Clone, Copy)] pub union cairo_path_data { pub header: cairo_path_data_header, pub point: [f64; 2], } #[repr(C)] pub struct cairo_glyph_t(c_void); debug_impl!(cairo_glyph_t); #[repr(C)] pub struct cairo_region_t(c_void); debug_impl!(cairo_region_t); #[repr(C)] pub struct cairo_font_face_t(c_void); debug_impl!(cairo_font_face_t); #[repr(C)] pub struct cairo_scaled_font_t(c_void); debug_impl!(cairo_scaled_font_t); #[repr(C)] pub struct cairo_font_options_t(c_void); debug_impl!(cairo_font_options_t); #[repr(C)] #[derive(Clone, Copy, Debug)] pub struct FontExtents { pub ascent: c_double, pub descent: c_double, pub height: c_double, pub max_x_advance: c_double, pub max_y_advance: c_double, } #[repr(C)] #[derive(Clone, Copy, Debug)] pub struct Glyph { pub index: c_ulong, pub x: c_double, pub y: c_double, } #[repr(C)] #[derive(Clone, Copy, Debug)] pub struct TextCluster { pub num_bytes: c_int, pub num_glyphs: c_int, } #[repr(C)] #[derive(Clone, Copy, Debug)] pub struct TextExtents { pub x_bearing: c_double, pub y_bearing: c_double, pub width: c_double, pub height: c_double, pub x_advance: c_double, pub y_advance: c_double, } #[repr(C)] #[derive(Debug, Clone, Copy, PartialEq)] pub struct Matrix { pub xx: c_double, pub yx: c_double, pub xy: c_double, pub yy: c_double, pub x0: c_double, pub y0: c_double, } impl ::std::fmt::Display for Matrix { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "Matrix") } } #[repr(C)] #[derive(Clone, Copy, Default, Debug)] pub struct cairo_user_data_key_t { pub unused: c_int, } #[repr(C)] #[derive(Debug, Clone, Copy)] pub struct cairo_bool_t { value: c_int, } impl cairo_bool_t { pub fn as_bool(self) -> bool { self.value != 0 } } impl From<bool> for cairo_bool_t { fn from(b: bool) -> cairo_bool_t { let value = if b { 1 } else { 0 }; cairo_bool_t { value } } } pub type CGContextRef = *mut c_void; pub type cairo_destroy_func_t = Option<unsafe extern "C" fn(*mut c_void)>; pub type cairo_read_func_t = Option<unsafe extern "C" fn(*mut c_void, *mut c_uchar, c_uint) -> cairo_status_t>; pub type cairo_write_func_t = Option<unsafe extern "C" fn(*mut c_void, *mut c_uchar, c_uint) -> cairo_status_t>; #[cfg(any(feature = "freetype", feature = "dox"))] pub type FT_Face = *mut c_void; #[cfg(any(feature = "freetype", feature = "dox"))] pub type FcPattern = c_void; extern "C" { // CAIRO CONTEXT pub fn cairo_create(target: *mut cairo_surface_t) -> *mut cairo_t; pub fn cairo_reference(cr: *mut cairo_t) -> *mut cairo_t; pub fn cairo_destroy(cr: *mut cairo_t); pub fn cairo_status(cr: *mut cairo_t) -> cairo_status_t; pub fn cairo_save(cr: *mut cairo_t); pub fn cairo_restore(cr: *mut cairo_t); pub fn cairo_get_target(cr: *mut cairo_t) -> *mut cairo_surface_t; pub fn cairo_push_group(cr: *mut cairo_t); pub fn cairo_push_group_with_content(cr: *mut cairo_t, content: cairo_content_t); pub fn cairo_pop_group(cr: *mut cairo_t) -> *mut cairo_pattern_t; pub fn cairo_pop_group_to_source(cr: *mut cairo_t); pub fn cairo_get_group_target(cr: *mut cairo_t) -> *mut cairo_surface_t; pub fn cairo_set_source_rgb(cr: *mut cairo_t, red: c_double, green: c_double, blue: c_double); pub fn cairo_set_source_rgba( cr: *mut cairo_t, red: c_double, green: c_double, blue: c_double, alpha: c_double, ); pub fn cairo_set_source(cr: *mut cairo_t, source: *mut cairo_pattern_t); pub fn cairo_set_source_surface( cr: *mut cairo_t, surface: *mut cairo_surface_t, x: c_double, y: c_double, ); pub fn cairo_get_source(cr: *mut cairo_t) -> *mut cairo_pattern_t; pub fn cairo_set_antialias(cr: *mut cairo_t, antialias: cairo_antialias_t); pub fn cairo_get_antialias(cr: *mut cairo_t) -> cairo_antialias_t; pub fn cairo_set_dash( cr: *mut cairo_t, dashes: *const c_double, num_dashes: c_int, offset: c_double, ); pub fn cairo_get_dash_count(cr: *mut cairo_t) -> c_int; pub fn cairo_get_dash(cr: *mut cairo_t, dashes: *mut c_double, offset: *mut c_double); pub fn cairo_set_fill_rule(cr: *mut cairo_t, fill_rule: cairo_fill_rule_t); pub fn cairo_get_fill_rule(cr: *mut cairo_t) -> cairo_fill_rule_t; pub fn cairo_set_line_cap(cr: *mut cairo_t, line_cap: cairo_line_cap_t); pub fn cairo_get_line_cap(cr: *mut cairo_t) -> cairo_line_cap_t; pub fn cairo_set_line_join(cr: *mut cairo_t, line_join: cairo_line_join_t); pub fn cairo_get_line_join(cr: *mut cairo_t) -> cairo_line_join_t; pub fn cairo_set_line_width(cr: *mut cairo_t, width: c_double); pub fn cairo_get_line_width(cr: *mut cairo_t) -> c_double; pub fn cairo_set_miter_limit(cr: *mut cairo_t, limit: c_double); pub fn cairo_get_miter_limit(cr: *mut cairo_t) -> c_double; pub fn cairo_set_operator(cr: *mut cairo_t, op: cairo_operator_t); pub fn cairo_get_operator(cr: *mut cairo_t) -> cairo_operator_t; pub fn cairo_set_tolerance(cr: *mut cairo_t, tolerance: c_double); pub fn cairo_get_tolerance(cr: *mut cairo_t) -> c_double; pub fn cairo_clip(cr: *mut cairo_t); pub fn cairo_clip_preserve(cr: *mut cairo_t); pub fn cairo_clip_extents( cr: *mut cairo_t, x1: *mut c_double, y1: *mut c_double, x2: *mut c_double, y2: *mut c_double, ); pub fn cairo_in_clip(cr: *mut cairo_t, x: c_double, y: c_double) -> cairo_bool_t; pub fn cairo_reset_clip(cr: *mut cairo_t); pub fn cairo_rectangle_list_destroy(rectangle_list: *mut cairo_rectangle_list_t); pub fn cairo_copy_clip_rectangle_list(cr: *mut cairo_t) -> *mut cairo_rectangle_list_t; pub fn cairo_fill(cr: *mut cairo_t); pub fn cairo_fill_preserve(cr: *mut cairo_t); pub fn cairo_fill_extents( cr: *mut cairo_t, x1: *mut c_double, y1: *mut c_double, x2: *mut c_double, y2: *mut c_double, ); pub fn cairo_in_fill(cr: *mut cairo_t, x: c_double, y: c_double) -> cairo_bool_t; pub fn cairo_mask(cr: *mut cairo_t, pattern: *mut cairo_pattern_t); pub fn cairo_mask_surface( cr: *mut cairo_t, surface: *mut cairo_surface_t, surface_x: c_double, surface_y: c_double, ); pub fn cairo_paint(cr: *mut cairo_t); pub fn cairo_paint_with_alpha(cr: *mut cairo_t, alpha: c_double); pub fn cairo_stroke(cr: *mut cairo_t); pub fn cairo_stroke_preserve(cr: *mut cairo_t); pub fn cairo_stroke_extents( cr: *mut cairo_t, x1: *mut c_double, y1: *mut c_double, x2: *mut c_double, y2: *mut c_double, ); pub fn cairo_in_stroke(cr: *mut cairo_t, x: c_double, y: c_double) -> cairo_bool_t; pub fn cairo_copy_page(cr: *mut cairo_t); pub fn cairo_show_page(cr: *mut cairo_t); pub fn cairo_get_reference_count(cr: *mut cairo_t) -> c_uint; #[cfg(any(feature = "v1_16", feature = "dox"))] pub fn cairo_tag_begin(cr: *mut cairo_t, tag_name: *const c_char, attributes: *const c_char); #[cfg(any(feature = "v1_16", feature = "dox"))] pub fn cairo_tag_end(cr: *mut cairo_t, tag_name: *const c_char); // CAIRO UTILS pub fn cairo_status_to_string(status: cairo_status_t) -> *const c_char; pub fn cairo_debug_reset_static_data(); pub fn cairo_version() -> c_int; pub fn cairo_version_string() -> *const c_char; // CAIRO PATHS pub fn cairo_copy_path(cr: *mut cairo_t) -> *mut cairo_path_t; pub fn cairo_copy_path_flat(cr: *mut cairo_t) -> *mut cairo_path_t; pub fn cairo_path_destroy(path: *mut cairo_path_t); pub fn cairo_append_path(cr: *mut cairo_t, path: *mut cairo_path_t); pub fn cairo_has_current_point(cr: *mut cairo_t) -> cairo_bool_t; pub fn cairo_get_current_point(cr: *mut cairo_t, x: *mut c_double, y: *mut c_double); pub fn cairo_new_path(cr: *mut cairo_t); pub fn cairo_new_sub_path(cr: *mut cairo_t); pub fn cairo_close_path(cr: *mut cairo_t); pub fn cairo_arc( cr: *mut cairo_t, xc: c_double, yc: c_double, radius: c_double, angle1: c_double, angle2: c_double, ); pub fn cairo_arc_negative( cr: *mut cairo_t, xc: c_double, yc: c_double, radius: c_double, angle1: c_double, angle2: c_double, ); pub fn cairo_curve_to( cr: *mut cairo_t, x1: c_double, y1: c_double, x2: c_double, y2: c_double, x3: c_double, y3: c_double, ); pub fn cairo_line_to(cr: *mut cairo_t, x: c_double, y: c_double); pub fn cairo_move_to(cr: *mut cairo_t, x: c_double, y: c_double); pub fn cairo_rectangle( cr: *mut cairo_t, x: c_double, y: c_double, width: c_double, height: c_double, ); pub fn cairo_glyph_path(cr: *mut cairo_t, glyphs: *const Glyph, num_glyphs: c_int); pub fn cairo_text_path(cr: *mut cairo_t, utf8: *const c_char); pub fn cairo_rel_curve_to( cr: *mut cairo_t, dx1: c_double, dy1: c_double, dx2: c_double, dy2: c_double, dx3: c_double, dy3: c_double, ); pub fn cairo_rel_line_to(cr: *mut cairo_t, dx: c_double, dy: c_double); pub fn cairo_rel_move_to(cr: *mut cairo_t, dx: c_double, dy: c_double); pub fn cairo_path_extents( cr: *mut cairo_t, x1: *mut c_double, y1: *mut c_double, x2: *mut c_double, y2: *mut c_double, ); // CAIRO TRANSFORMATIONS pub fn cairo_translate(cr: *mut cairo_t, tx: c_double, ty: c_double); pub fn cairo_scale(cr: *mut cairo_t, sx: c_double, sy: c_double); pub fn cairo_rotate(cr: *mut cairo_t, angle: c_double); pub fn cairo_transform(cr: *mut cairo_t, matrix: *const Matrix); pub fn cairo_set_matrix(cr: *mut cairo_t, matrix: *const Matrix); pub fn cairo_get_matrix(cr: *mut cairo_t, matrix: *mut Matrix); pub fn cairo_identity_matrix(cr: *mut cairo_t); pub fn cairo_user_to_device(cr: *mut cairo_t, x: *mut c_double, y: *mut c_double); pub fn cairo_user_to_device_distance(cr: *mut cairo_t, dx: *mut c_double, dy: *mut c_double); pub fn cairo_device_to_user(cr: *mut cairo_t, x: *mut c_double, y: *mut c_double); pub fn cairo_device_to_user_distance(cr: *mut cairo_t, dx: *mut c_double, dy: *mut c_double); // CAIRO PATTERNS pub fn cairo_pattern_add_color_stop_rgb( pattern: *mut cairo_pattern_t, offset: c_double, red: c_double, green: c_double, blue: c_double, ); pub fn cairo_pattern_add_color_stop_rgba( pattern: *mut cairo_pattern_t, offset: c_double, red: c_double, green: c_double, blue: c_double, alpha: c_double, ); pub fn cairo_pattern_get_color_stop_count( pattern: *mut cairo_pattern_t, count: *mut c_int, ) -> cairo_status_t; pub fn cairo_pattern_get_color_stop_rgba( pattern: *mut cairo_pattern_t, index: c_int, offset: *mut c_double, red: *mut c_double, green: *mut c_double, blue: *mut c_double, alpha: *mut c_double, ) -> cairo_status_t; pub fn cairo_pattern_create_rgb( red: c_double, green: c_double, blue: c_double, ) -> *mut cairo_pattern_t; pub fn cairo_pattern_create_rgba( red: c_double, green: c_double, blue: c_double, alpha: c_double, ) -> *mut cairo_pattern_t; pub fn cairo_pattern_get_rgba( pattern: *mut cairo_pattern_t, red: *mut c_double, green: *mut c_double, blue: *mut c_double, alpha: *mut c_double, ) -> cairo_status_t; pub fn cairo_pattern_create_for_surface(surface: *mut cairo_surface_t) -> *mut cairo_pattern_t; pub fn cairo_pattern_get_surface( pattern: *mut cairo_pattern_t, surface: *mut *mut cairo_surface_t, ) -> cairo_status_t; pub fn cairo_pattern_create_linear( x0: c_double, y0: c_double, x1: c_double, y1: c_double, ) -> *mut cairo_pattern_t; pub fn cairo_pattern_get_linear_points( pattern: *mut cairo_pattern_t, x0: *mut c_double, y0: *mut c_double, x1: *mut c_double, y1: *mut c_double, ) -> cairo_status_t; pub fn cairo_pattern_create_radial( cx0: c_double, cy0: c_double, radius0: c_double, cx1: c_double, cy1: c_double, radius1: c_double, ) -> *mut cairo_pattern_t; pub fn cairo_pattern_get_radial_circles( pattern: *mut cairo_pattern_t, x0: *mut c_double, y0: *mut c_double, r0: *mut c_double, x1: *mut c_double, y1: *mut c_double, r1: *mut c_double, ) -> cairo_status_t; pub fn cairo_pattern_create_mesh() -> *mut cairo_pattern_t; pub fn cairo_mesh_pattern_begin_patch(pattern: *mut cairo_pattern_t); pub fn cairo_mesh_pattern_end_patch(pattern: *mut cairo_pattern_t); pub fn cairo_mesh_pattern_move_to(pattern: *mut cairo_pattern_t, x: c_double, y: c_double); pub fn cairo_mesh_pattern_line_to(pattern: *mut cairo_pattern_t, x: c_double, y: c_double); pub fn cairo_mesh_pattern_curve_to( pattern: *mut cairo_pattern_t, x1: c_double, y1: c_double, x2: c_double, y2: c_double, x3: c_double, y3: c_double, ); pub fn cairo_mesh_pattern_set_control_point( pattern: *mut cairo_pattern_t, point_num: cairo_mesh_corner_t, x: c_double, y: c_double, ); pub fn cairo_mesh_pattern_set_corner_color_rgb( pattern: *mut cairo_pattern_t, corner_num: cairo_mesh_corner_t, red: c_double, green: c_double, blue: c_double, ); pub fn cairo_mesh_pattern_set_corner_color_rgba( pattern: *mut cairo_pattern_t, corner_num: cairo_mesh_corner_t, red: c_double, green: c_double, blue: c_double, alpha: c_double, ); pub fn cairo_mesh_pattern_get_patch_count( pattern: *mut cairo_pattern_t, count: *mut c_uint, ) -> cairo_status_t; pub fn cairo_mesh_pattern_get_path( pattern: *mut cairo_pattern_t, patch_num: c_uint, ) -> *mut cairo_path_t; pub fn cairo_mesh_pattern_get_control_point( pattern: *mut cairo_pattern_t, patch_num: c_uint, point_num: cairo_mesh_corner_t, x: *mut c_double, y: *mut c_double, ) -> cairo_status_t; pub fn cairo_mesh_pattern_get_corner_color_rgba( pattern: *mut cairo_pattern_t, patch_num: c_uint, corner_num: cairo_mesh_corner_t, red: *mut c_double, green: *mut c_double, blue: *mut c_double, alpha: *mut c_double, ) -> cairo_status_t; pub fn cairo_pattern_reference(pattern: *mut cairo_pattern_t) -> *mut cairo_pattern_t; pub fn cairo_pattern_destroy(pattern: *mut cairo_pattern_t); pub fn cairo_pattern_status(pattern: *mut cairo_pattern_t) -> cairo_status_t; pub fn cairo_pattern_set_extend(pattern: *mut cairo_pattern_t, extend: cairo_extend_t); pub fn cairo_pattern_get_extend(pattern: *mut cairo_pattern_t) -> cairo_extend_t; pub fn cairo_pattern_set_filter(pattern: *mut cairo_pattern_t, filter: cairo_filter_t); pub fn cairo_pattern_get_filter(pattern: *mut cairo_pattern_t) -> cairo_filter_t; pub fn cairo_pattern_set_matrix(pattern: *mut cairo_pattern_t, matrix: *const Matrix); pub fn cairo_pattern_get_matrix(pattern: *mut cairo_pattern_t, matrix: *mut Matrix); pub fn cairo_pattern_get_type(pattern: *mut cairo_pattern_t) -> cairo_pattern_type_t; pub fn cairo_pattern_get_reference_count(pattern: *mut cairo_pattern_t) -> c_uint; pub fn cairo_pattern_set_user_data( pattern: *mut cairo_pattern_t, key: *const cairo_user_data_key_t, user_data: *mut c_void, destroy: cairo_destroy_func_t, ) -> cairo_status_t; pub fn cairo_pattern_get_user_data( pattern: *mut cairo_pattern_t, key: *const cairo_user_data_key_t, ) -> *mut c_void; // CAIRO REGIONS pub fn cairo_region_create() -> *mut cairo_region_t; pub fn cairo_region_create_rectangle( rectangle: *mut cairo_rectangle_int_t, ) -> *mut cairo_region_t; pub fn cairo_region_create_rectangles( rects: *mut cairo_rectangle_int_t, count: c_int, ) -> *mut cairo_region_t; pub fn cairo_region_copy(original: *mut cairo_region_t) -> *mut cairo_region_t; pub fn cairo_region_reference(region: *mut cairo_region_t) -> *mut cairo_region_t; pub fn cairo_region_destroy(region: *mut cairo_region_t); pub fn cairo_region_status(region: *mut cairo_region_t) -> cairo_status_t; pub fn cairo_region_get_extents( region: *mut cairo_region_t, extents: *mut cairo_rectangle_int_t, ); pub fn cairo_region_num_rectangles(region: *mut cairo_region_t) -> c_int; pub fn cairo_region_get_rectangle( region: *mut cairo_region_t, nth: c_int, rectangle: *mut cairo_rectangle_int_t, ); pub fn cairo_region_is_empty(region: *mut cairo_region_t) -> cairo_bool_t; pub fn cairo_region_contains_point( region: *mut cairo_region_t, x: c_int, y: c_int, ) -> cairo_bool_t; pub fn cairo_region_contains_rectangle( region: *mut cairo_region_t, rectangle: *mut cairo_rectangle_int_t, ) -> cairo_region_overlap_t; pub fn cairo_region_equal(a: *mut cairo_region_t, b: *mut cairo_region_t) -> cairo_bool_t; pub fn cairo_region_translate(region: *mut cairo_region_t, dx: c_int, dy: c_int); pub fn cairo_region_intersect( dst: *mut cairo_region_t, other: *mut cairo_region_t, ) -> cairo_status_t; pub fn cairo_region_intersect_rectangle( dst: *mut cairo_region_t, rectangle: *mut cairo_rectangle_int_t, ) -> cairo_status_t; pub fn cairo_region_subtract( dst: *mut cairo_region_t, other: *mut cairo_region_t, ) -> cairo_status_t; pub fn cairo_region_subtract_rectangle( dst: *mut cairo_region_t, rectangle: *mut cairo_rectangle_int_t, ) -> cairo_status_t; pub fn cairo_region_union( dst: *mut cairo_region_t, other: *mut cairo_region_t, ) -> cairo_status_t; pub fn cairo_region_union_rectangle( dst: *mut cairo_region_t, rectangle: *mut cairo_rectangle_int_t, ) -> cairo_status_t; pub fn cairo_region_xor(dst: *mut cairo_region_t, other: *mut cairo_region_t) -> cairo_status_t; pub fn cairo_region_xor_rectangle( dst: *mut cairo_region_t, rectangle: *mut cairo_rectangle_int_t, ) -> cairo_status_t; // text pub fn cairo_select_font_face( cr: *mut cairo_t, family: *const c_char, slant: cairo_font_slant_t, weight: cairo_font_weight_t, ); pub fn cairo_set_font_size(cr: *mut cairo_t, size: c_double); pub fn cairo_set_font_matrix(cr: *mut cairo_t, matrix: *const Matrix); pub fn cairo_get_font_matrix(cr: *mut cairo_t, matrix: *mut Matrix); pub fn cairo_set_font_options(cr: *mut cairo_t, options: *const cairo_font_options_t); pub fn cairo_get_font_options(cr: *mut cairo_t, options: *mut cairo_font_options_t); pub fn cairo_set_font_face(cr: *mut cairo_t, font_face: *mut cairo_font_face_t); pub fn cairo_get_font_face(cr: *mut cairo_t) -> *mut cairo_font_face_t; pub fn cairo_set_scaled_font(cr: *mut cairo_t, scaled_font: *mut cairo_scaled_font_t); pub fn cairo_get_scaled_font(cr: *mut cairo_t) -> *mut cairo_scaled_font_t; pub fn cairo_show_text(cr: *mut cairo_t, utf8: *const c_char); pub fn cairo_show_glyphs(cr: *mut cairo_t, glyphs: *const Glyph, num_glyphs: c_int); pub fn cairo_show_text_glyphs( cr: *mut cairo_t, utf8: *const c_char, utf8_len: c_int, glyphs: *const Glyph, num_glyphs: c_int, clusters: *const TextCluster, num_clusters: c_int, cluster_flags: cairo_text_cluster_flags_t, ); pub fn cairo_font_extents(cr: *mut cairo_t, extents: *mut FontExtents); pub fn cairo_text_extents(cr: *mut cairo_t, utf8: *const c_char, extents: *mut TextExtents); pub fn cairo_glyph_extents( cr: *mut cairo_t, glyphs: *const Glyph, num_glyphs: c_int, extents: *mut TextExtents, ); pub fn cairo_toy_font_face_create( family: *const c_char, slant: cairo_font_slant_t, weight: cairo_font_weight_t, ) -> *mut cairo_font_face_t; pub fn cairo_toy_font_face_get_family(font_face: *mut cairo_font_face_t) -> *const c_char; pub fn cairo_toy_font_face_get_slant(font_face: *mut cairo_font_face_t) -> cairo_font_slant_t; pub fn cairo_toy_font_face_get_weight(font_face: *mut cairo_font_face_t) -> cairo_font_weight_t; pub fn cairo_glyph_allocate(num_glyphs: c_int) -> *mut Glyph; pub fn cairo_glyph_free(glyphs: *mut Glyph); pub fn cairo_text_cluster_allocate(num_clusters: c_int) -> *mut TextCluster; pub fn cairo_text_cluster_free(clusters: *mut TextCluster); #[cfg(any(feature = "freetype", feature = "dox"))] pub fn cairo_ft_font_face_create_for_ft_face( face: FT_Face, load_flags: c_int, ) -> *mut cairo_font_face_t; #[cfg(any(feature = "freetype", feature = "dox"))] pub fn cairo_ft_font_face_create_for_pattern(pattern: *mut FcPattern) -> *mut cairo_font_face_t; #[cfg(any(feature = "freetype", feature = "dox"))] pub fn cairo_ft_font_option_substitute( options: *const cairo_font_options_t, pattern: *mut FcPattern, ); #[cfg(any(feature = "freetype", feature = "dox"))] pub fn cairo_ft_scaled_font_lock_face(scaled_font: *mut cairo_scaled_font_t) -> FT_Face; #[cfg(any(feature = "freetype", feature = "dox"))] pub fn cairo_ft_scaled_font_unlock_face(scaled_font: *mut cairo_scaled_font_t); #[cfg(any(feature = "freetype", feature = "dox"))] pub fn cairo_ft_font_face_get_synthesize( font_face: *mut cairo_font_face_t, ) -> cairo_ft_synthesize_t; #[cfg(any(feature = "freetype", feature = "dox"))] pub fn cairo_ft_font_face_set_synthesize( font_face: *mut cairo_font_face_t, synth_flags: cairo_ft_synthesize_t, ); #[cfg(any(feature = "freetype", feature = "dox"))] pub fn cairo_ft_font_face_unset_synthesize( font_face: *mut cairo_font_face_t, synth_flags: cairo_ft_synthesize_t, ); // CAIRO RASTER //pub fn cairo_pattern_create_raster_source(user_data: *mut void, content: Content, width: c_int, height: c_int) -> *mut cairo_pattern_t; //pub fn cairo_raster_source_pattern_set_callback_data(pattern: *mut cairo_pattern_t, data: *mut void); //pub fn cairo_raster_source_pattern_get_callback_data(pattern: *mut cairo_pattern_t) -> *mut void; /* FIXME how do we do these _func_t types? pub fn cairo_raster_source_pattern_set_acquire(pattern: *mut cairo_pattern_t, acquire: cairo_raster_source_acquire_func_t, release: cairo_raster_source_release_func_t); pub fn cairo_raster_source_pattern_get_acquire(pattern: *mut cairo_pattern_t, acquire: *mut cairo_raster_source_acquire_func_t, release: *mut cairo_raster_source_release_func_t); pub fn cairo_raster_source_pattern_set_snapshot(pattern: *mut cairo_pattern_t, snapshot: cairo_raster_source_snapshot_func_t); pub fn cairo_raster_source_pattern_get_snapshot(pattern: *mut cairo_pattern_t) -> cairo_raster_source_snapshot_func_t; pub fn cairo_raster_source_pattern_set_copy(pattern: *mut cairo_pattern_t, copy: cairo_raster_source_copy_func_t); pub fn cairo_raster_source_pattern_get_copy(pattern: *mut cairo_pattern_t) -> cairo_raster_source_copy_func_t; pub fn cairo_raster_source_pattern_set_finish(pattern: *mut cairo_pattern_t, finish: cairo_raster_source_finish_func_t); pub fn cairo_raster_source_pattern_get_finish(pattern: *mut cairo_pattern_t) -> cairo_raster_source_finish_func_t; */ //cairo_surface_t (*mut cairo_raster_source_acquire_func_t) // (pattern: *mut cairo_pattern_t, callback_data: *mut void, target: *mut cairo_surface_t, extents: *mut cairo_rectangle_int_t); //void (*mut cairo_raster_source_release_func_t) // (pattern: *mut cairo_pattern_t, callback_data: *mut void, surface: *mut cairo_surface_t); //Status (*mut cairo_raster_source_snapshot_func_t) // (pattern: *mut cairo_pattern_t, callback_data: *mut void); //Status (*mut cairo_raster_source_copy_func_t) (pattern: *mut cairo_pattern_t, callback_data: *mut void, other: *mut cairo_pattern_t); //void (*mut cairo_raster_source_finish_func_t) // (pattern: *mut cairo_pattern_t, callback_data: *mut void); //CAIRO FONT pub fn cairo_font_face_reference(font_face: *mut cairo_font_face_t) -> *mut cairo_font_face_t; pub fn cairo_font_face_destroy(font_face: *mut cairo_font_face_t); pub fn cairo_font_face_status(font_face: *mut cairo_font_face_t) -> cairo_status_t; pub fn cairo_font_face_get_type(font_face: *mut cairo_font_face_t) -> cairo_font_type_t; pub fn cairo_font_face_get_reference_count(font_face: *mut cairo_font_face_t) -> c_uint; pub fn cairo_font_face_set_user_data( font_face: *mut cairo_font_face_t, key: *const cairo_user_data_key_t, user_data: *mut c_void, destroy: cairo_destroy_func_t, ) -> cairo_status_t; pub fn cairo_font_face_get_user_data( font_face: *mut cairo_font_face_t, key: *const cairo_user_data_key_t, ) -> *mut c_void; // CAIRO SCALED FONT pub fn cairo_scaled_font_create( font_face: *mut cairo_font_face_t, font_matrix: *const Matrix, ctm: *const Matrix, options: *const cairo_font_options_t, ) -> *mut cairo_scaled_font_t; pub fn cairo_scaled_font_reference( scaled_font: *mut cairo_scaled_font_t, ) -> *mut cairo_scaled_font_t; pub fn cairo_scaled_font_destroy(scaled_font: *mut cairo_scaled_font_t); pub fn cairo_scaled_font_status(scaled_font: *mut cairo_scaled_font_t) -> cairo_status_t; pub fn cairo_scaled_font_extents( scaled_font: *mut cairo_scaled_font_t, extents: *mut FontExtents, ); pub fn cairo_scaled_font_text_extents( scaled_font: *mut cairo_scaled_font_t, utf8: *const c_char, extents: *mut TextExtents, ); pub fn cairo_scaled_font_glyph_extents( scaled_font: *mut cairo_scaled_font_t, glyphs: *const Glyph, num_glyphs: c_int, extents: *mut TextExtents, ); pub fn cairo_scaled_font_text_to_glyphs( scaled_font: *mut cairo_scaled_font_t, x: c_double, y: c_double, utf8: *const c_char, utf8_len: c_int, glyphs: *mut *mut Glyph, num_glyphs: *mut c_int, clusters: *mut *mut TextCluster, num_clusters: *mut c_int, cluster_flags: *mut cairo_text_cluster_flags_t, ) -> cairo_status_t; pub fn cairo_scaled_font_get_font_face( scaled_font: *mut cairo_scaled_font_t, ) -> *mut cairo_font_face_t; pub fn cairo_scaled_font_get_font_options( scaled_font: *mut cairo_scaled_font_t, options: *mut cairo_font_options_t, ); pub fn cairo_scaled_font_get_font_matrix( scaled_font: *mut cairo_scaled_font_t, font_matrix: *mut Matrix, ); pub fn cairo_scaled_font_get_ctm(scaled_font: *mut cairo_scaled_font_t, ctm: *mut Matrix); pub fn cairo_scaled_font_get_scale_matrix( scaled_font: *mut cairo_scaled_font_t, scale_matrix: *mut Matrix, ); pub fn cairo_scaled_font_get_type(scaled_font: *mut cairo_scaled_font_t) -> cairo_font_type_t; pub fn cairo_scaled_font_get_reference_count(font_face: *mut cairo_scaled_font_t) -> c_uint; pub fn cairo_scaled_font_set_user_data( scaled_font: *mut cairo_scaled_font_t, key: *const cairo_user_data_key_t, user_data: *mut c_void, destroy: cairo_destroy_func_t, ) -> cairo_status_t; pub fn cairo_scaled_font_get_user_data( scaled_font: *mut cairo_scaled_font_t, key: *const cairo_user_data_key_t, ) -> *mut c_void; // CAIRO FONT OPTIONS pub fn cairo_font_options_create() -> *mut cairo_font_options_t; pub fn cairo_font_options_copy( original: *const cairo_font_options_t, ) -> *mut cairo_font_options_t; pub fn cairo_font_options_destroy(options: *mut cairo_font_options_t); pub fn cairo_font_options_status(options: *mut cairo_font_options_t) -> cairo_status_t; pub fn cairo_font_options_merge( options: *mut cairo_font_options_t, other: *const cairo_font_options_t, ); pub fn cairo_font_options_hash(options: *const cairo_font_options_t) -> c_ulong; pub fn cairo_font_options_equal( options: *const cairo_font_options_t, other: *const cairo_font_options_t, ) -> cairo_bool_t; pub fn cairo_font_options_set_antialias( options: *mut cairo_font_options_t, antialias: cairo_antialias_t, ); pub fn cairo_font_options_get_antialias( options: *const cairo_font_options_t, ) -> cairo_antialias_t; pub fn cairo_font_options_set_subpixel_order( options: *mut cairo_font_options_t, subpixel_order: cairo_subpixel_order_t, ); pub fn cairo_font_options_get_subpixel_order( options: *const cairo_font_options_t, ) -> cairo_subpixel_order_t; pub fn cairo_font_options_set_hint_style( options: *mut cairo_font_options_t, hint_style: cairo_hint_style_t, ); pub fn cairo_font_options_get_hint_style( options: *const cairo_font_options_t, ) -> cairo_hint_style_t; pub fn cairo_font_options_set_hint_metrics( options: *mut cairo_font_options_t, hint_metrics: cairo_hint_metrics_t, ); pub fn cairo_font_options_get_hint_metrics( options: *const cairo_font_options_t, ) -> cairo_hint_metrics_t; #[cfg(any(feature = "v1_16", feature = "dox"))] pub fn cairo_font_options_get_variations(options: *mut cairo_font_options_t) -> *const c_char; #[cfg(any(feature = "v1_16", feature = "dox"))] pub fn cairo_font_options_set_variations( options: *mut cairo_font_options_t, variations: *const c_char, ); // CAIRO MATRIX pub fn cairo_matrix_multiply(matrix: *mut Matrix, left: *const Matrix, right: *const Matrix); pub fn cairo_matrix_init( matrix: *mut Matrix, xx: f64, yx: f64, xy: f64, yy: f64, x0: f64, y0: f64, ); pub fn cairo_matrix_init_identity(matrix: *mut Matrix); pub fn cairo_matrix_translate(matrix: *mut Matrix, tx: f64, ty: f64); pub fn cairo_matrix_scale(matrix: *mut Matrix, sx: f64, sy: f64); pub fn cairo_matrix_rotate(matrix: *mut Matrix, angle: f64); pub fn cairo_matrix_invert(matrix: *mut Matrix) -> cairo_status_t; pub fn cairo_matrix_transform_distance(matrix: *const Matrix, dx: *mut f64, dy: *mut f64); pub fn cairo_matrix_transform_point(matrix: *const Matrix, x: *mut f64, y: *mut f64); // CAIRO SURFACE pub fn cairo_surface_destroy(surface: *mut cairo_surface_t); pub fn cairo_surface_flush(surface: *mut cairo_surface_t); pub fn cairo_surface_finish(surface: *mut cairo_surface_t); pub fn cairo_surface_status(surface: *mut cairo_surface_t) -> cairo_status_t; pub fn cairo_surface_get_type(surface: *mut cairo_surface_t) -> cairo_surface_type_t; pub fn cairo_surface_reference(surface: *mut cairo_surface_t) -> *mut cairo_surface_t; pub fn cairo_surface_get_user_data( surface: *mut cairo_surface_t, key: *const cairo_user_data_key_t, ) -> *mut c_void; pub fn cairo_surface_set_user_data( surface: *mut cairo_surface_t, key: *const cairo_user_data_key_t, user_data: *mut c_void, destroy: cairo_destroy_func_t, ) -> cairo_status_t; pub fn cairo_surface_get_reference_count(surface: *mut cairo_surface_t) -> c_uint; pub fn cairo_surface_mark_dirty(surface: *mut cairo_surface_t); pub fn cairo_surface_mark_dirty_rectangle( surface: *mut cairo_surface_t, x: c_int, y: c_int, width: c_int, height: c_int, ); pub fn cairo_surface_create_similar( surface: *mut cairo_surface_t, content: cairo_content_t, width: c_int, height: c_int, ) -> *mut cairo_surface_t; pub fn cairo_surface_create_for_rectangle( surface: *mut cairo_surface_t, x: c_double, y: c_double, width: c_double, height: c_double, ) -> *mut cairo_surface_t; pub fn cairo_surface_get_mime_data( surface: *mut cairo_surface_t, mime_type: *const c_char, data: *const *mut u8, length: *mut c_ulong, ); pub fn cairo_surface_set_mime_data( surface: *mut cairo_surface_t, mime_type: *const c_char, data: *const u8, length: c_ulong, destroy: cairo_destroy_func_t, closure: *const u8, ) -> cairo_status_t; pub fn cairo_surface_supports_mime_type( surface: *mut cairo_surface_t, mime_type: *const c_char, ) -> cairo_bool_t; pub fn cairo_surface_get_device(surface: *mut cairo_surface_t) -> *mut cairo_device_t; pub fn cairo_surface_set_device_offset( surface: *mut cairo_surface_t, x_offset: c_double, y_offset: c_double, ); pub fn cairo_surface_get_device_offset( surface: *mut cairo_surface_t, x_offset: *mut c_double, y_offset: *mut c_double, ); #[cfg(any(feature = "v1_14", feature = "dox"))] pub fn cairo_surface_get_device_scale( surface: *mut cairo_surface_t, x_scale: *mut c_double, y_scale: *mut c_double, ); #[cfg(any(feature = "v1_14", feature = "dox"))] pub fn cairo_surface_set_device_scale( surface: *mut cairo_surface_t, x_scale: c_double, y_scale: c_double, ); pub fn cairo_surface_get_fallback_resolution( surface: *mut cairo_surface_t, x_pixels_per_inch: *mut c_double, y_pixels_per_inch: *mut c_double, ); pub fn cairo_surface_set_fallback_resolution( surface: *mut cairo_surface_t, x_pixels_per_inch: c_double, x_pixels_per_inch: c_double, ); pub fn cairo_recording_surface_get_extents( surface: *mut cairo_surface_t, extents: *mut cairo_rectangle_t, ) -> cairo_bool_t; pub fn cairo_recording_surface_create( content: cairo_content_t, extents: *const cairo_rectangle_t, ) -> *mut cairo_surface_t; pub fn cairo_recording_surface_ink_extents( surface: *mut cairo_surface_t, x0: *mut c_double, y0: *mut c_double, width: *mut c_double, height: *mut c_double, ); pub fn cairo_surface_create_similar_image( other: *mut cairo_surface_t, format: cairo_format_t, width: c_int, height: c_int, ) -> *mut cairo_surface_t; pub fn cairo_surface_map_to_image( surface: *mut cairo_surface_t, extents: *const cairo_rectangle_int_t, ) -> *mut cairo_surface_t; pub fn cairo_surface_unmap_image(surface: *mut cairo_surface_t, image: *mut cairo_surface_t); // CAIRO IMAGE SURFACE pub fn cairo_image_surface_create( format: cairo_format_t, width: c_int, height: c_int, ) -> *mut cairo_surface_t; pub fn cairo_image_surface_create_for_data( data: *mut u8, format: cairo_format_t, width: c_int, height: c_int, stride: c_int, ) -> *mut cairo_surface_t; pub fn cairo_image_surface_get_data(surface: *mut cairo_surface_t) -> *mut u8; pub fn cairo_image_surface_get_format(surface: *mut cairo_surface_t) -> cairo_format_t; pub fn cairo_image_surface_get_height(surface: *mut cairo_surface_t) -> c_int; pub fn cairo_image_surface_get_stride(surface: *mut cairo_surface_t) -> c_int; pub fn cairo_image_surface_get_width(surface: *mut cairo_surface_t) -> c_int; pub fn cairo_format_stride_for_width(format: cairo_format_t, width: c_int) -> c_int; #[cfg(any(feature = "png", feature = "dox"))] pub fn cairo_image_surface_create_from_png_stream( read_func: cairo_read_func_t, closure: *mut c_void, ) -> *mut cairo_surface_t; #[cfg(any(feature = "png", feature = "dox"))] pub fn cairo_surface_write_to_png_stream( surface: *mut cairo_surface_t, write_func: cairo_write_func_t, closure: *mut c_void, ) -> cairo_status_t; // CAIRO PDF #[cfg(any(feature = "pdf", feature = "dox"))] pub fn cairo_pdf_surface_create( filename: *const c_char, width_in_points: c_double, height_in_points: c_double, ) -> *mut cairo_surface_t; #[cfg(any(feature = "pdf", feature = "dox"))] pub fn cairo_pdf_surface_create_for_stream( write_func: cairo_write_func_t, closure: *mut c_void, width_in_points: c_double, height_in_points: c_double, ) -> *mut cairo_surface_t; #[cfg(any(feature = "pdf", feature = "dox"))] pub fn cairo_pdf_surface_restrict_to_version( surface: *mut cairo_surface_t, version: cairo_pdf_version_t, ); #[cfg(any(feature = "pdf", feature = "dox"))] pub fn cairo_pdf_get_versions( versions: *mut *mut cairo_pdf_version_t, num_versions: *mut c_int, ); #[cfg(any(feature = "pdf", feature = "dox"))] pub fn cairo_pdf_version_to_string(version: cairo_pdf_version_t) -> *const c_char; #[cfg(any(feature = "pdf", feature = "dox"))] pub fn cairo_pdf_surface_set_size( surface: *mut cairo_surface_t, width_in_points: f64, height_in_points: f64, ); #[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))] pub fn cairo_pdf_surface_add_outline( surface: *mut cairo_surface_t, parent_id: c_int, utf8: *const c_char, link_attribs: *const c_char, flags: cairo_pdf_outline_flags_t, ) -> c_int; #[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))] pub fn cairo_pdf_surface_set_metadata( surface: *mut cairo_surface_t, metadata: cairo_pdf_metadata_t, utf8: *const c_char, ); #[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))] pub fn cairo_pdf_surface_set_page_label(surface: *mut cairo_surface_t, utf8: *const c_char); #[cfg(any(all(feature = "pdf", feature = "v1_16"), feature = "dox"))] pub fn cairo_pdf_surface_set_thumbnail_size( surface: *mut cairo_surface_t, width: c_int, height: c_int, ); // CAIRO SVG #[cfg(any(feature = "svg", feature = "dox"))] pub fn cairo_svg_surface_create( filename: *const c_char, width_in_points: c_double, height_in_points: c_double, ) -> *mut cairo_surface_t; #[cfg(any(feature = "svg", feature = "dox"))] pub fn cairo_svg_surface_create_for_stream( write_func: cairo_write_func_t, closure: *mut c_void, width_in_points: c_double, height_in_points: c_double, ) -> *mut cairo_surface_t; #[cfg(any(feature = "svg", feature = "dox"))] pub fn cairo_svg_surface_restrict_to_version( surface: *mut cairo_surface_t, version: cairo_svg_version_t, ); #[cfg(any(all(feature = "svg", feature = "v1_16"), feature = "dox"))] pub fn cairo_svg_surface_get_document_unit(surface: *const cairo_surface_t) -> cairo_svg_unit_t; #[cfg(any(all(feature = "svg", feature = "v1_16"), feature = "dox"))] pub fn cairo_svg_surface_set_document_unit( surface: *mut cairo_surface_t, unit: cairo_svg_unit_t, ); #[cfg(any(feature = "svg", feature = "dox"))] pub fn cairo_svg_get_versions( versions: *mut *mut cairo_svg_version_t, num_versions: *mut c_int, ); #[cfg(any(feature = "svg", feature = "dox"))] pub fn cairo_svg_version_to_string(version: cairo_svg_version_t) -> *const c_char; // CAIRO PS #[cfg(any(feature = "ps", feature = "dox"))] pub fn cairo_ps_surface_create( filename: *const c_char, width_in_points: c_double, height_in_points: c_double, ) -> *mut cairo_surface_t; #[cfg(any(feature = "ps", feature = "dox"))] pub fn cairo_ps_surface_create_for_stream( write_func: cairo_write_func_t, closure: *mut c_void, width_in_points: c_double, height_in_points: c_double, ) -> *mut cairo_surface_t; #[cfg(any(feature = "ps", feature = "dox"))] pub fn cairo_ps_surface_restrict_to_level( surface: *mut cairo_surface_t, version: cairo_ps_level_t, ); #[cfg(any(feature = "ps", feature = "dox"))] pub fn cairo_ps_get_levels(levels: *mut *mut cairo_ps_level_t, num_levels: *mut c_int); #[cfg(any(feature = "ps", feature = "dox"))] pub fn cairo_ps_level_to_string(level: cairo_ps_level_t) -> *const c_char; #[cfg(any(feature = "ps", feature = "dox"))] pub fn cairo_ps_surface_set_eps(surface: *mut cairo_surface_t, eps: cairo_bool_t); #[cfg(any(feature = "ps", feature = "dox"))] pub fn cairo_ps_surface_get_eps(surface: *mut cairo_surface_t) -> cairo_bool_t; #[cfg(any(feature = "ps", feature = "dox"))] pub fn cairo_ps_surface_set_size( surface: *mut cairo_surface_t, width_in_points: f64, height_in_points: f64, ); #[cfg(any(feature = "ps", feature = "dox"))] pub fn cairo_ps_surface_dsc_begin_setup(surface: *mut cairo_surface_t); #[cfg(any(feature = "ps", feature = "dox"))] pub fn cairo_ps_surface_dsc_begin_page_setup(surface: *mut cairo_surface_t); #[cfg(any(feature = "ps", feature = "dox"))] pub fn cairo_ps_surface_dsc_comment(surface: *mut cairo_surface_t, comment: *const c_char); // CAIRO XCB #[cfg(any(feature = "xcb", feature = "dox"))] pub fn cairo_xcb_surface_create( connection: *mut xcb_connection_t, drawable: xcb_drawable_t, visual: *mut xcb_visualtype_t, width: c_int, height: c_int, ) -> *mut cairo_surface_t; #[cfg(any(feature = "xcb", feature = "dox"))] pub fn cairo_xcb_surface_create_for_bitmap( connection: *mut xcb_connection_t, screen: *mut xcb_screen_t, bitmap: xcb_pixmap_t, width: c_int, height: c_int, ) -> *mut cairo_surface_t; #[cfg(any(feature = "xcb", feature = "dox"))] pub fn cairo_xcb_surface_create_with_xrender_format( connection: *mut xcb_connection_t, screen: *mut xcb_screen_t, drawable: xcb_drawable_t, format: *mut xcb_render_pictforminfo_t, width: c_int, height: c_int, ) -> *mut cairo_surface_t; #[cfg(any(feature = "xcb", feature = "dox"))] pub fn cairo_xcb_surface_set_size(surface: *mut cairo_surface_t, width: c_int, height: c_int); #[cfg(any(feature = "xcb", feature = "dox"))] pub fn cairo_xcb_surface_set_drawable( surface: *mut cairo_surface_t, drawable: xcb_drawable_t, width: c_int, height: c_int, ); #[cfg(any(feature = "xcb", feature = "dox"))] pub fn cairo_xcb_device_get_connection(device: *mut cairo_device_t) -> *mut xcb_connection_t; #[cfg(any(feature = "xcb", feature = "dox"))] pub fn cairo_xcb_device_debug_cap_xrender_version( device: *mut cairo_device_t, major_version: c_int, minor_version: c_int, ); #[cfg(any(feature = "xcb", feature = "dox"))] pub fn cairo_xcb_device_debug_cap_xshm_version( device: *mut cairo_device_t, major_version: c_int, minor_version: c_int, ); #[cfg(any(feature = "xcb", feature = "dox"))] pub fn cairo_xcb_device_debug_get_precision(device: *mut cairo_device_t) -> c_int; #[cfg(any(feature = "xcb", feature = "dox"))] pub fn cairo_xcb_device_debug_set_precision(device: *mut cairo_device_t, precision: c_int); // CAIRO XLIB SURFACE #[cfg(any(feature = "xlib", feature = "dox"))] pub fn cairo_xlib_surface_create( dpy: *mut xlib::Display, drawable: xlib::Drawable, visual: *mut xlib::Visual, width: c_int, height: c_int, ) -> *mut cairo_surface_t; #[cfg(any(feature = "xlib", feature = "dox"))] pub fn cairo_xlib_surface_create_for_bitmap( dpy: *mut xlib::Display, bitmap: xlib::Pixmap, screen: *mut xlib::Screen, width: c_int, height: c_int, ) -> *mut cairo_surface_t; #[cfg(any(feature = "xlib", feature = "dox"))] pub fn cairo_xlib_surface_set_size(surface: *mut cairo_surface_t, width: c_int, height: c_int); #[cfg(any(feature = "xlib", feature = "dox"))] pub fn cairo_xlib_surface_set_drawable( surface: *mut cairo_surface_t, drawable: xlib::Drawable, width: c_int, height: c_int, ); #[cfg(any(feature = "xlib", feature = "dox"))] pub fn cairo_xlib_surface_get_display(surface: *mut cairo_surface_t) -> *mut xlib::Display; #[cfg(any(feature = "xlib", feature = "dox"))] pub fn cairo_xlib_surface_get_drawable(surface: *mut cairo_surface_t) -> xlib::Drawable; #[cfg(any(feature = "xlib", feature = "dox"))] pub fn cairo_xlib_surface_get_screen(surface: *mut cairo_surface_t) -> *mut xlib::Screen; #[cfg(any(feature = "xlib", feature = "dox"))] pub fn cairo_xlib_surface_get_visual(surface: *mut cairo_surface_t) -> *mut xlib::Visual; #[cfg(any(feature = "xlib", feature = "dox"))] pub fn cairo_xlib_surface_get_depth(surface: *mut cairo_surface_t) -> c_int; #[cfg(any(feature = "xlib", feature = "dox"))] pub fn cairo_xlib_surface_get_width(surface: *mut cairo_surface_t) -> c_int; #[cfg(any(feature = "xlib", feature = "dox"))] pub fn cairo_xlib_surface_get_height(surface: *mut cairo_surface_t) -> c_int; #[cfg(any(feature = "xlib", feature = "dox"))] pub fn cairo_xlib_device_debug_cap_xrender_version( device: *mut cairo_device_t, major_version: c_int, minor_version: c_int, ); #[cfg(any(feature = "xlib", feature = "dox"))] pub fn cairo_xlib_device_debug_get_precision(device: *mut cairo_device_t) -> c_int; #[cfg(any(feature = "xlib", feature = "dox"))] pub fn cairo_xlib_device_debug_set_precision(device: *mut cairo_device_t, precision: c_int); // CAIRO WINDOWS SURFACE #[cfg(any(all(windows, feature = "win32-surface"), feature = "dox"))] pub fn cairo_win32_surface_create(hdc: winapi::HDC) -> *mut cairo_surface_t; #[cfg(any( all(windows, feature = "win32-surface", feature = "v1_14"), feature = "dox" ))] pub fn cairo_win32_surface_create_with_format( hdc: winapi::HDC, format: cairo_format_t, ) -> *mut cairo_surface_t; #[cfg(any(all(windows, feature = "win32-surface"), feature = "dox"))] pub fn cairo_win32_surface_create_with_dib( format: cairo_format_t, width: c_int, height: c_int, ) -> *mut cairo_surface_t; #[cfg(any(all(windows, feature = "win32-surface"), feature = "dox"))] pub fn cairo_win32_surface_create_with_ddb( hdc: winapi::HDC, format: cairo_format_t, width: c_int, height: c_int, ) -> *mut cairo_surface_t; #[cfg(any(all(windows, feature = "win32-surface"), feature = "dox"))] pub fn cairo_win32_printing_surface_create(hdc: winapi::HDC) -> *mut cairo_surface_t; #[cfg(any(all(windows, feature = "win32-surface"), feature = "dox"))] pub fn cairo_win32_surface_get_dc(surface: *mut cairo_surface_t) -> winapi::HDC; #[cfg(any(all(windows, feature = "win32-surface"), feature = "dox"))] pub fn cairo_win32_surface_get_image(surface: *mut cairo_surface_t) -> *mut cairo_surface_t; #[cfg(any(target_os = "macos", target_os = "ios", feature = "dox"))] pub fn cairo_quartz_surface_create( format: cairo_format_t, width: c_uint, height: c_uint, ) -> *mut cairo_surface_t; #[cfg(any(target_os = "macos", target_os = "ios", feature = "dox"))] pub fn cairo_quartz_surface_create_for_cg_context( cg_context: CGContextRef, width: c_uint, height: c_uint, ) -> *mut cairo_surface_t; #[cfg(any(target_os = "macos", target_os = "ios", feature = "dox"))] pub fn cairo_quartz_surface_get_cg_context(surface: *mut cairo_surface_t) -> CGContextRef; // CAIRO SCRIPT #[cfg(any(feature = "script", feature = "dox"))] pub fn cairo_script_create(filename: *const c_char) -> *mut cairo_device_t; #[cfg(any(feature = "script", feature = "dox"))] pub fn cairo_script_create_for_stream( write_func: cairo_write_func_t, closure: *mut c_void, ) -> cairo_status_t; #[cfg(any(feature = "script", feature = "dox"))] pub fn cairo_script_from_recording_surface( script: *mut cairo_device_t, surface: *mut cairo_surface_t, ) -> cairo_status_t; #[cfg(any(feature = "script", feature = "dox"))] pub fn cairo_script_get_mode(script: *mut cairo_device_t) -> cairo_script_mode_t; #[cfg(any(feature = "script", feature = "dox"))] pub fn cairo_script_set_mode(script: *mut cairo_device_t, mode: cairo_script_mode_t); #[cfg(any(feature = "script", feature = "dox"))] pub fn cairo_script_surface_create( script: *mut cairo_device_t, content: cairo_content_t, width: c_double, height: c_double, ) -> *mut cairo_surface_t; #[cfg(any(feature = "script", feature = "dox"))] pub fn cairo_script_surface_create_for_target( script: *mut cairo_device_t, target: *mut cairo_surface_t, ) -> *mut cairo_surface_t; #[cfg(any(feature = "script", feature = "dox"))] pub fn cairo_script_write_comment( script: *mut cairo_device_t, comment: *const c_char, len: c_int, ); pub fn cairo_device_destroy(device: *mut cairo_device_t); pub fn cairo_device_status(device: *mut cairo_device_t) -> cairo_status_t; pub fn cairo_device_finish(device: *mut cairo_device_t); pub fn cairo_device_flush(device: *mut cairo_device_t); pub fn cairo_device_get_type(device: *mut cairo_device_t) -> cairo_device_type_t; pub fn cairo_device_reference(device: *mut cairo_device_t) -> *mut cairo_device_t; pub fn cairo_device_get_reference_count(device: *mut cairo_device_t) -> c_uint; pub fn cairo_device_set_user_data( device: *mut cairo_device_t, key: *const cairo_user_data_key_t, user_data: *mut c_void, destroy: cairo_destroy_func_t, ) -> cairo_status_t; pub fn cairo_device_get_user_data( device: *mut cairo_device_t, key: *const cairo_user_data_key_t, ) -> *mut c_void; pub fn cairo_device_acquire(device: *mut cairo_device_t) -> cairo_status_t; pub fn cairo_device_release(device: *mut cairo_device_t); pub fn cairo_device_observer_elapsed(device: *mut cairo_device_t) -> c_double; pub fn cairo_device_observer_fill_elapsed(device: *mut cairo_device_t) -> c_double; pub fn cairo_device_observer_glyphs_elapsed(device: *mut cairo_device_t) -> c_double; pub fn cairo_device_observer_mask_elapsed(device: *mut cairo_device_t) -> c_double; pub fn cairo_device_observer_paint_elapsed(device: *mut cairo_device_t) -> c_double; pub fn cairo_device_observer_stroke_elapsed(device: *mut cairo_device_t) -> c_double; pub fn cairo_device_observer_print( device: *mut cairo_device_t, write_func: cairo_write_func_t, closure: *mut c_void, ) -> cairo_status_t; } #[cfg(feature = "use_glib")] pub mod gobject; pub const STATUS_SUCCESS: i32 = 0; pub const STATUS_NO_MEMORY: i32 = 1; pub const STATUS_INVALID_RESTORE: i32 = 2; pub const STATUS_INVALID_POP_GROUP: i32 = 3; pub const STATUS_NO_CURRENT_POINT: i32 = 4; pub const STATUS_INVALID_MATRIX: i32 = 5; pub const STATUS_INVALID_STATUS: i32 = 6; pub const STATUS_NULL_POINTER: i32 = 7; pub const STATUS_INVALID_STRING: i32 = 8; pub const STATUS_INVALID_PATH_DATA: i32 = 9; pub const STATUS_READ_ERROR: i32 = 10; pub const STATUS_WRITE_ERROR: i32 = 11; pub const STATUS_SURFACE_FINISHED: i32 = 12; pub const STATUS_SURFACE_TYPE_MISMATCH: i32 = 13; pub const STATUS_PATTERN_TYPE_MISMATCH: i32 = 14; pub const STATUS_INVALID_CONTENT: i32 = 15; pub const STATUS_INVALID_FORMAT: i32 = 16; pub const STATUS_INVALID_VISUAL: i32 = 17; pub const STATUS_FILE_NOT_FOUND: i32 = 18; pub const STATUS_INVALID_DASH: i32 = 19; pub const STATUS_INVALID_DSC_COMMENT: i32 = 20; pub const STATUS_INVALID_INDEX: i32 = 21; pub const STATUS_CLIP_NOT_REPRESENTABLE: i32 = 22; pub const STATUS_TEMP_FILE_ERROR: i32 = 23; pub const STATUS_INVALID_STRIDE: i32 = 24; pub const STATUS_FONT_TYPE_MISMATCH: i32 = 25; pub const STATUS_USER_FONT_IMMUTABLE: i32 = 26; pub const STATUS_USER_FONT_ERROR: i32 = 27; pub const STATUS_NEGATIVE_COUNT: i32 = 28; pub const STATUS_INVALID_CLUSTERS: i32 = 29; pub const STATUS_INVALID_SLANT: i32 = 30; pub const STATUS_INVALID_WEIGHT: i32 = 31; pub const STATUS_INVALID_SIZE: i32 = 32; pub const STATUS_USER_FONT_NOT_IMPLEMENTED: i32 = 33; pub const STATUS_DEVICE_TYPE_MISMATCH: i32 = 34; pub const STATUS_DEVICE_ERROR: i32 = 35; pub const STATUS_INVALID_MESH_CONSTRUCTION: i32 = 36; pub const STATUS_DEVICE_FINISHED: i32 = 37; pub const STATUS_J_BIG2_GLOBAL_MISSING: i32 = 38; pub const STATUS_PNG_ERROR: i32 = 39; pub const STATUS_FREETYPE_ERROR: i32 = 40; pub const STATUS_WIN32_GDI_ERROR: i32 = 41; pub const STATUS_LAST_STATUS: i32 = 42; pub const ANTIALIAS_DEFAULT: i32 = 0; pub const ANTIALIAS_NONE: i32 = 1; pub const ANTIALIAS_GRAY: i32 = 2; pub const ANTIALIAS_SUBPIXEL: i32 = 3; pub const ANTIALIAS_FAST: i32 = 4; pub const ANTIALIAS_GOOD: i32 = 5; pub const ANTIALIAS_BEST: i32 = 6; pub const FILL_RULE_WINDING: i32 = 0; pub const FILL_RULE_EVEN_ODD: i32 = 1; pub const LINE_CAP_BUTT: i32 = 0; pub const LINE_CAP_ROUND: i32 = 1; pub const LINE_CAP_SQUARE: i32 = 2; pub const LINE_JOIN_MITER: i32 = 0; pub const LINE_JOIN_ROUND: i32 = 1; pub const LINE_JOIN_BEVEL: i32 = 2; pub const OPERATOR_CLEAR: i32 = 0; pub const OPERATOR_SOURCE: i32 = 1; pub const OPERATOR_OVER: i32 = 2; pub const OPERATOR_IN: i32 = 3; pub const OPERATOR_OUT: i32 = 4; pub const OPERATOR_ATOP: i32 = 5; pub const OPERATOR_DEST: i32 = 6; pub const OPERATOR_DEST_OVER: i32 = 7; pub const OPERATOR_DEST_IN: i32 = 8; pub const OPERATOR_DEST_OUT: i32 = 9; pub const OPERATOR_DEST_ATOP: i32 = 10; pub const OPERATOR_XOR: i32 = 11; pub const OPERATOR_ADD: i32 = 12; pub const OPERATOR_SATURATE: i32 = 13; pub const OPERATOR_MULTIPLY: i32 = 14; pub const OPERATOR_SCREEN: i32 = 15; pub const OPERATOR_OVERLAY: i32 = 16; pub const OPERATOR_DARKEN: i32 = 17; pub const OPERATOR_LIGHTEN: i32 = 18; pub const OPERATOR_COLOR_DODGE: i32 = 19; pub const OPERATOR_COLOR_BURN: i32 = 20; pub const OPERATOR_HARD_LIGHT: i32 = 21; pub const OPERATOR_SOFT_LIGHT: i32 = 22; pub const OPERATOR_DIFFERENCE: i32 = 23; pub const OPERATOR_EXCLUSION: i32 = 24; pub const OPERATOR_HSL_HUE: i32 = 25; pub const OPERATOR_HSL_SATURATION: i32 = 26; pub const OPERATOR_HSL_COLOR: i32 = 27; pub const OPERATOR_HSL_LUMINOSITY: i32 = 28; pub const PATH_DATA_TYPE_MOVE_TO: i32 = 0; pub const PATH_DATA_TYPE_LINE_TO: i32 = 1; pub const PATH_DATA_TYPE_CURVE_TO: i32 = 2; pub const PATH_DATA_TYPE_CLOSE_PATH: i32 = 3; pub const CONTENT_COLOR: i32 = 0x1000; pub const CONTENT_ALPHA: i32 = 0x2000; pub const CONTENT_COLOR_ALPHA: i32 = 0x3000; pub const EXTEND_NONE: i32 = 0; pub const EXTEND_REPEAT: i32 = 1; pub const EXTEND_REFLECT: i32 = 2; pub const EXTEND_PAD: i32 = 3; pub const FILTER_FAST: i32 = 0; pub const FILTER_GOOD: i32 = 1; pub const FILTER_BEST: i32 = 2; pub const FILTER_NEAREST: i32 = 3; pub const FILTER_BILINEAR: i32 = 4; pub const FILTER_GAUSSIAN: i32 = 5; pub const PATTERN_TYPE_SOLID: i32 = 0; pub const PATTERN_TYPE_SURFACE: i32 = 1; pub const PATTERN_TYPE_LINEAR_GRADIENT: i32 = 2; pub const PATTERN_TYPE_RADIAL_GRADIENT: i32 = 3; pub const PATTERN_TYPE_MESH: i32 = 4; pub const PATTERN_TYPE_RASTER_SOURCE: i32 = 5; pub const FONT_SLANT_NORMAL: i32 = 0; pub const FONT_SLANT_ITALIC: i32 = 1; pub const FONT_SLANT_OBLIQUE: i32 = 2; pub const FONT_WEIGHT_NORMAL: i32 = 0; pub const FONT_WEIGHT_BOLD: i32 = 1; pub const TEXT_CLUSTER_FLAGS_NONE: i32 = 0x00000000; pub const TEXT_CLUSTER_FLAGS_BACKWARD: i32 = 0x00000001; pub const FONT_TYPE_FONT_TYPE_TOY: i32 = 0; pub const FONT_TYPE_FONT_TYPE_FT: i32 = 1; pub const FONT_TYPE_FONT_TYPE_WIN32: i32 = 2; pub const FONT_TYPE_FONT_TYPE_QUARTZ: i32 = 3; pub const FONT_TYPE_FONT_TYPE_USER: i32 = 4; pub const SUBPIXEL_ORDER_DEFAULT: i32 = 0; pub const SUBPIXEL_ORDER_RGB: i32 = 1; pub const SUBPIXEL_ORDER_BGR: i32 = 2; pub const SUBPIXEL_ORDER_VRGB: i32 = 3; pub const SUBPIXEL_ORDER_VBGR: i32 = 4; pub const HINT_STYLE_DEFAULT: i32 = 0; pub const HINT_STYLE_NONE: i32 = 1; pub const HINT_STYLE_SLIGHT: i32 = 2; pub const HINT_STYLE_MEDIUM: i32 = 3; pub const HINT_STYLE_FULL: i32 = 4; pub const HINT_METRICS_DEFAULT: i32 = 0; pub const HINT_METRICS_OFF: i32 = 1; pub const HINT_METRICS_ON: i32 = 2; pub const SURFACE_TYPE_IMAGE: i32 = 0; pub const SURFACE_TYPE_PDF: i32 = 1; pub const SURFACE_TYPE_PS: i32 = 2; pub const SURFACE_TYPE_XLIB: i32 = 3; pub const SURFACE_TYPE_XCB: i32 = 4; pub const SURFACE_TYPE_GLITZ: i32 = 5; pub const SURFACE_TYPE_QUARTZ: i32 = 6; pub const SURFACE_TYPE_WIN32: i32 = 7; pub const SURFACE_TYPE_BE_OS: i32 = 8; pub const SURFACE_TYPE_DIRECT_FB: i32 = 9; pub const SURFACE_TYPE_SVG: i32 = 10; pub const SURFACE_TYPE_OS2: i32 = 11; pub const SURFACE_TYPE_WIN32_PRINTING: i32 = 12; pub const SURFACE_TYPE_QUARTZ_IMAGE: i32 = 13; pub const SURFACE_TYPE_SCRIPT: i32 = 14; pub const SURFACE_TYPE_QT: i32 = 15; pub const SURFACE_TYPE_RECORDING: i32 = 16; pub const SURFACE_TYPE_VG: i32 = 17; pub const SURFACE_TYPE_GL: i32 = 18; pub const SURFACE_TYPE_DRM: i32 = 19; pub const SURFACE_TYPE_TEE: i32 = 20; pub const SURFACE_TYPE_XML: i32 = 21; pub const SURFACE_TYPE_SKIA: i32 = 22; pub const SURFACE_TYPE_SUBSURFACE: i32 = 23; pub const SURFACE_TYPE_COGL: i32 = 24; pub const SVG_UNIT_USER: i32 = 0; pub const SVG_UNIT_EM: i32 = 1; pub const SVG_UNIT_EX: i32 = 2; pub const SVG_UNIT_PX: i32 = 3; pub const SVG_UNIT_IN: i32 = 4; pub const SVG_UNIT_CM: i32 = 5; pub const SVG_UNIT_MM: i32 = 6; pub const SVG_UNIT_PT: i32 = 7; pub const SVG_UNIT_PC: i32 = 8; pub const SVG_UNIT_PERCENT: i32 = 9; pub const FORMAT_INVALID: i32 = -1; pub const FORMAT_A_RGB32: i32 = 0; pub const FORMAT_RGB24: i32 = 1; pub const FORMAT_A8: i32 = 2; pub const FORMAT_A1: i32 = 3; pub const FORMAT_RGB16_565: i32 = 4; pub const FORMAT_RGB30: i32 = 5; pub const REGION_OVERLAP_IN: i32 = 0; pub const REGION_OVERLAP_OUT: i32 = 1; pub const REGION_OVERLAP_PART: i32 = 2; pub const PDF_OUTLINE_FLAG_OPEN: i32 = 0x1; pub const PDF_OUTLINE_FLAG_BOLD: i32 = 0x2; pub const PDF_OUTLINE_FLAG_ITALIC: i32 = 0x4; pub const PDF_METADATA_TITLE: i32 = 0; pub const PDF_METADATA_AUTHOR: i32 = 1; pub const PDF_METADATA_SUBJECT: i32 = 2; pub const PDF_METADATA_KEYWORDS: i32 = 3; pub const PDF_METADATA_CREATOR: i32 = 4; pub const PDF_METADATA_CREATE_DATE: i32 = 5; pub const PDF_METADATA_MOD_DATE: i32 = 6; pub const PDF_VERSION__1_4: i32 = 0; pub const PDF_VERSION__1_5: i32 = 1; pub const SVG_VERSION__1_1: i32 = 0; pub const SVG_VERSION__1_2: i32 = 1; pub const PS_LEVEL__2: i32 = 0; pub const PS_LEVEL__3: i32 = 1; pub const MESH_CORNER_MESH_CORNER0: u32 = 0; pub const MESH_CORNER_MESH_CORNER1: u32 = 1; pub const MESH_CORNER_MESH_CORNER2: u32 = 2; pub const MESH_CORNER_MESH_CORNER3: u32 = 3; pub const CAIRO_FT_SYNTHESIZE_BOLD: u32 = 1; pub const CAIRO_FT_SYNTHESIZE_OBLIQUE: u32 = 2; pub const CAIRO_SCRIPT_MODE_ASCII: i32 = 0; pub const CAIRO_SCRIPT_MODE_BINARY: i32 = 1; pub const CAIRO_DEVICE_TYPE_DRM: i32 = 0; pub const CAIRO_DEVICE_TYPE_GL: i32 = 1; pub const CAIRO_DEVICE_TYPE_SCRIPT: i32 = 2; pub const CAIRO_DEVICE_TYPE_XCB: i32 = 3; pub const CAIRO_DEVICE_TYPE_XLIB: i32 = 4; pub const CAIRO_DEVICE_TYPE_XML: i32 = 5; pub const CAIRO_DEVICE_TYPE_COGL: i32 = 6; pub const CAIRO_DEVICE_TYPE_WIN32: i32 = 7; pub const CAIRO_DEVICE_TYPE_INVALID: i32 = -1;
39.28401
187
0.685556
c1a9636c415d574c9e68b159ced4b4a6a29dbed0
183
#[derive(Debug, Clone)] pub struct Memory { pub items: Vec<u64>, } impl Memory { pub fn new() -> Memory { Memory { items: vec![0; 100], } } }
14.076923
32
0.47541
fc1380c50d2d1daa0c6af4f531214865a9de0d4e
10,110
use crate::result::*; use crate::service::applet; use crate::service::hid; use crate::service::hid::IAppletResource; use crate::service::hid::IHidServer; use crate::ipc::sf; use crate::svc; use crate::mem; use crate::vmem; use crate::service; use core::mem as cmem; bit_enum! { Key (u64) { A = bit!(0), B = bit!(1), X = bit!(2), Y = bit!(3), LStick = bit!(4), RStick = bit!(5), L = bit!(6), R = bit!(7), ZL = bit!(8), ZR = bit!(9), Plus = bit!(10), Minus = bit!(11), Left = bit!(12), Right = bit!(13), Up = bit!(14), Down = bit!(15), LStickLeft = bit!(16), LStickUp = bit!(17), LStickRight = bit!(18), LStickDown = bit!(19), RStickLeft = bit!(20), RStickUp = bit!(21), RStickRight = bit!(22), RStickDown = bit!(23), SLLeft = bit!(24), SRLeft = bit!(25), SLRight = bit!(26), SRRight = bit!(27), Touch = bit!(28) } } #[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] #[repr(C)] pub struct TouchData { pub timestamp: u64, pub pad: u32, pub index: u32, pub x: u32, pub y: u32, pub diameter_x: u32, pub diameter_y: u32, pub angle: u32, pub pad_2: u32 } #[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] #[repr(C)] pub struct TouchEntry { pub timestamp: u64, pub count: u64, pub touches: [TouchData; 16], pub pad: u64 } #[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] #[repr(C)] pub struct TouchState { pub timestamp_ticks: u64, pub entry_count: u64, pub latest_index: u64, pub max_index: u64, pub timestamp: u64, pub entries: [TouchEntry; 17] } #[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] #[repr(C)] pub struct JoystickPosition { pub x: u32, pub y: u32 } bit_enum! { ConnectionState (u64) { None = 0, Connected = bit!(0), Wired = bit!(1) } } #[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] #[repr(C)] pub struct ControllerStateEntry { pub timestamp: u64, pub timestamp_2: u64, pub button_state: u64, pub left_position: JoystickPosition, pub right_position: JoystickPosition, pub connection_state: ConnectionState } #[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] #[repr(C)] pub struct ControllerState { pub timestamp: u64, pub entry_count: u64, pub latest_index: u64, pub max_index: u64, pub entries: [ControllerStateEntry; 17] } #[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] #[repr(C)] pub struct ControllerMacAddress { pub address: [u8; 0x10] } #[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] #[repr(C)] pub struct ControllerColor { pub body: u32, pub buttons: u32 } #[derive(Copy, Clone)] #[repr(C)] pub struct ControllerData { pub status: u32, pub is_joycon_half: bool, pub pad: [u8; 3], pub color_descriptor_single: u32, pub color_single: ControllerColor, pub color_descriptor_split: u32, pub color_right: ControllerColor, pub color_left: ControllerColor, pub pro_controller_state: ControllerState, pub handheld_state: ControllerState, pub joined_state: ControllerState, pub left_state: ControllerState, pub right_state: ControllerState, pub main_no_analog_state: ControllerState, pub main_state: ControllerState, pub unk: [u8; 0x2A78], pub mac_addresses: [ControllerMacAddress; 2], pub unk_2: [u8; 0xE10] } #[derive(Copy, Clone)] #[repr(C)] pub struct SharedMemoryData { pub header: [u8; 0x400], pub touch_state: TouchState, pub pad: [u8; 0x3C0], pub mouse: [u8; 0x400], pub keyboard: [u8; 0x400], pub unk: [u8; 0x400], pub unk_2: [u8; 0x400], pub unk_3: [u8; 0x400], pub unk_4: [u8; 0x400], pub unk_5: [u8; 0x200], pub unk_6: [u8; 0x200], pub unk_7: [u8; 0x200], pub unk_8: [u8; 0x800], pub controller_serials: [u8; 0x4000], pub controllers: [ControllerData; 10], pub unk_9: [u8; 0x4600] } pub struct Player { controller: hid::ControllerId, data: *const ControllerData, prev_button_state: u64 } impl Player { pub fn new(controller: hid::ControllerId, data: *const ControllerData) -> Self { Self { controller: controller, data: data, prev_button_state: 0 } } fn get_latest_state_entry(&self) -> *const ControllerStateEntry { unsafe { &(*self.data).main_state.entries[(*self.data).main_state.latest_index as usize] } } fn get_button_state(&self) -> u64 { let last_entry = self.get_latest_state_entry(); unsafe { (*last_entry).button_state } } pub fn get_button_state_held(&mut self) -> Key { let button_state = self.get_button_state(); self.prev_button_state = button_state; Key(button_state) } pub fn get_button_state_down(&mut self) -> Key { let button_state = self.get_button_state(); let down_state = (!self.prev_button_state) & button_state; self.prev_button_state = button_state; Key(down_state) } pub fn get_button_state_up(&mut self) -> Key { let button_state = self.get_button_state(); let up_state = self.prev_button_state & (!button_state); self.prev_button_state = button_state; Key(up_state) } pub fn get_controller(&self) -> hid::ControllerId { self.controller } } #[allow(dead_code)] pub struct InputContext { hid_service: mem::Shared<hid::HidServer>, applet_resource: mem::Shared<hid::AppletResource>, shared_mem_handle: svc::Handle, aruid: applet::AppletResourceUserId, shared_mem_data: *const SharedMemoryData } macro_rules! set_all_controllers_mode_dual_impl { (? $srv:expr, $process_id:expr, $( $id:expr ),*) => { $( $srv.get().set_npad_joy_assignment_mode_dual($process_id, $id)?; )* }; ($srv:expr, $process_id:expr, $( $id:expr ),*) => { $( let _ = $srv.get().set_npad_joy_assignment_mode_dual($process_id, $id); )* }; } #[allow(unreachable_patterns)] fn get_index_for_controller(controller: hid::ControllerId) -> Result<usize> { match controller { hid::ControllerId::Player1 | hid::ControllerId::Player2 | hid::ControllerId::Player3 | hid::ControllerId::Player4 | hid::ControllerId::Player5 | hid::ControllerId::Player6 | hid::ControllerId::Player7 | hid::ControllerId::Player8 => Ok(controller as usize), hid::ControllerId::Handheld => Ok(8), _ => Err(ResultCode::new(0xBAAF)) } } impl InputContext { pub fn new(aruid: applet::AppletResourceUserId, supported_tags: hid::NpadStyleTag, controllers: &[hid::ControllerId]) -> Result<Self> { let hid_srv = service::new_service_object::<hid::HidServer>()?; let hid_process_id = sf::ProcessId::from(aruid); let applet_res = hid_srv.get().create_applet_resource(hid_process_id)?.to::<hid::AppletResource>(); let shmem_handle = applet_res.get().get_shared_memory_handle()?; let shmem_size = cmem::size_of::<SharedMemoryData>(); let shmem_address = vmem::allocate(shmem_size)?; svc::map_shared_memory(shmem_handle.handle, shmem_address, shmem_size, svc::MemoryPermission::Read())?; hid_srv.get().activate_npad(hid_process_id)?; hid_srv.get().set_supported_npad_style_set(hid_process_id, supported_tags)?; hid_srv.get().set_supported_npad_id_type(hid_process_id, sf::Buffer::from_array(controllers))?; hid_srv.get().activate_npad(hid_process_id)?; set_all_controllers_mode_dual_impl!(? hid_srv, hid_process_id, hid::ControllerId::Player1, hid::ControllerId::Player2, hid::ControllerId::Player3, hid::ControllerId::Player4, hid::ControllerId::Player5, hid::ControllerId::Player6, hid::ControllerId::Player7, hid::ControllerId::Player8, hid::ControllerId::Handheld); Ok(Self { hid_service: hid_srv, applet_resource: applet_res, shared_mem_handle: shmem_handle.handle, aruid: aruid, shared_mem_data: shmem_address as *const SharedMemoryData }) } pub fn is_controller_connected(&mut self, controller: hid::ControllerId) -> bool { if let Ok(index) = get_index_for_controller(controller) { let controller_data = unsafe { &(*self.shared_mem_data).controllers[index] }; let last_entry = controller_data.main_state.entries[controller_data.main_state.latest_index as usize]; last_entry.connection_state.contains(ConnectionState::Connected()) } else { false } } pub fn get_player(&mut self, controller: hid::ControllerId) -> Result<Player> { let index = get_index_for_controller(controller)?; let controller_data: *const ControllerData = unsafe { &(*self.shared_mem_data).controllers[index] }; Ok(Player::new(controller, controller_data)) } pub fn get_touch_data(&mut self, touch_index: u32) -> Result<TouchData> { unsafe { let touch_entry: *const TouchEntry = &(*self.shared_mem_data).touch_state.entries[(*self.shared_mem_data).touch_state.latest_index as usize]; result_return_unless!((touch_index as u64) < (*touch_entry).count, 0xBAEEF); Ok((*touch_entry).touches[touch_index as usize]) } } } impl Drop for InputContext { fn drop(&mut self) { let hid_process_id = sf::ProcessId::from(self.aruid); set_all_controllers_mode_dual_impl!(self.hid_service, hid_process_id, hid::ControllerId::Player1, hid::ControllerId::Player2, hid::ControllerId::Player3, hid::ControllerId::Player4, hid::ControllerId::Player5, hid::ControllerId::Player6, hid::ControllerId::Player7, hid::ControllerId::Player8, hid::ControllerId::Handheld); let _ = self.hid_service.get().deactivate_npad(hid_process_id); let _ = svc::unmap_shared_memory(self.shared_mem_handle, self.shared_mem_data as *mut u8, cmem::size_of::<SharedMemoryData>()); let _ = svc::close_handle(self.shared_mem_handle); } }
33.58804
331
0.653215
016b5fbb9625c12db721c3064393ad2bc7684367
15,427
use crate::collection::CollectionName; use crate::collection::{Collection, IndexingPolicy, PartitionKey}; use crate::prelude::*; use crate::responses::CreateCollectionResponse; use crate::{Offer, ResourceType}; use azure_core::errors::{check_status_extract_headers_and_body, AzureError}; use azure_core::prelude::*; use azure_core::{No, ToAssign, Yes}; use hyper::StatusCode; use std::convert::TryInto; use std::marker::PhantomData; #[derive(Debug, Clone)] pub struct CreateCollectionBuilder< 'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet, > where OfferSet: ToAssign, CollectionNameSet: ToAssign, IndexingPolicySet: ToAssign, PartitionKeySet: ToAssign, C: CosmosClient, { database_client: &'a dyn DatabaseClient<C>, p_offer: PhantomData<OfferSet>, p_collection_name: PhantomData<CollectionNameSet>, p_indexing_policy: PhantomData<IndexingPolicySet>, p_partition_key: PhantomData<PartitionKeySet>, offer: Option<Offer>, collection_name: Option<&'a dyn CollectionName>, indexing_policy: Option<&'a IndexingPolicy>, partition_key: Option<&'a PartitionKey>, user_agent: Option<&'a str>, activity_id: Option<&'a str>, consistency_level: Option<ConsistencyLevel<'a>>, } impl<'a, C> CreateCollectionBuilder<'a, C, No, No, No, No> where C: CosmosClient, { #[inline] pub(crate) fn new( database_client: &'a dyn DatabaseClient<C>, ) -> CreateCollectionBuilder<'a, C, No, No, No, No> { CreateCollectionBuilder { database_client, p_offer: PhantomData {}, offer: None, p_collection_name: PhantomData {}, collection_name: None, p_indexing_policy: PhantomData {}, indexing_policy: None, p_partition_key: PhantomData {}, partition_key: None, user_agent: None, activity_id: None, consistency_level: None, } } } impl<'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet> DatabaseClientRequired<'a, C> for CreateCollectionBuilder< 'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet, > where OfferSet: ToAssign, CollectionNameSet: ToAssign, IndexingPolicySet: ToAssign, PartitionKeySet: ToAssign, C: CosmosClient, { #[inline] fn database_client(&self) -> &'a dyn DatabaseClient<C> { self.database_client } } //get mandatory no traits methods //set mandatory no traits methods impl<'a, C, CollectionNameSet, IndexingPolicySet, PartitionKeySet> OfferRequired for CreateCollectionBuilder<'a, C, Yes, CollectionNameSet, IndexingPolicySet, PartitionKeySet> where CollectionNameSet: ToAssign, IndexingPolicySet: ToAssign, PartitionKeySet: ToAssign, C: CosmosClient, { #[inline] fn offer(&self) -> Offer { self.offer.unwrap() } } impl<'a, C, OfferSet, IndexingPolicySet, PartitionKeySet> CollectionNameRequired<'a> for CreateCollectionBuilder<'a, C, OfferSet, Yes, IndexingPolicySet, PartitionKeySet> where OfferSet: ToAssign, IndexingPolicySet: ToAssign, PartitionKeySet: ToAssign, C: CosmosClient, { #[inline] fn collection_name(&self) -> &'a dyn CollectionName { self.collection_name.unwrap() } } impl<'a, C, OfferSet, CollectionNameSet, PartitionKeySet> IndexingPolicyRequired<'a> for CreateCollectionBuilder<'a, C, OfferSet, CollectionNameSet, Yes, PartitionKeySet> where OfferSet: ToAssign, CollectionNameSet: ToAssign, PartitionKeySet: ToAssign, C: CosmosClient, { #[inline] fn indexing_policy(&self) -> &'a IndexingPolicy { self.indexing_policy.unwrap() } } impl<'a, C, OfferSet, CollectionNameSet, IndexingPolicySet> PartitionKeyRequired<'a> for CreateCollectionBuilder<'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, Yes> where OfferSet: ToAssign, CollectionNameSet: ToAssign, IndexingPolicySet: ToAssign, C: CosmosClient, { #[inline] fn partition_key(&self) -> &'a PartitionKey { self.partition_key.unwrap() } } impl<'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet> UserAgentOption<'a> for CreateCollectionBuilder< 'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet, > where OfferSet: ToAssign, CollectionNameSet: ToAssign, IndexingPolicySet: ToAssign, PartitionKeySet: ToAssign, C: CosmosClient, { #[inline] fn user_agent(&self) -> Option<&'a str> { self.user_agent } } impl<'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet> ActivityIdOption<'a> for CreateCollectionBuilder< 'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet, > where OfferSet: ToAssign, CollectionNameSet: ToAssign, IndexingPolicySet: ToAssign, PartitionKeySet: ToAssign, C: CosmosClient, { #[inline] fn activity_id(&self) -> Option<&'a str> { self.activity_id } } impl<'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet> ConsistencyLevelOption<'a> for CreateCollectionBuilder< 'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet, > where OfferSet: ToAssign, CollectionNameSet: ToAssign, IndexingPolicySet: ToAssign, PartitionKeySet: ToAssign, C: CosmosClient, { #[inline] fn consistency_level(&self) -> Option<ConsistencyLevel<'a>> { self.consistency_level.clone() } } impl<'a, C, CollectionNameSet, IndexingPolicySet, PartitionKeySet> OfferSupport for CreateCollectionBuilder<'a, C, No, CollectionNameSet, IndexingPolicySet, PartitionKeySet> where CollectionNameSet: ToAssign, IndexingPolicySet: ToAssign, PartitionKeySet: ToAssign, C: CosmosClient, { type O = CreateCollectionBuilder<'a, C, Yes, CollectionNameSet, IndexingPolicySet, PartitionKeySet>; #[inline] fn with_offer(self, offer: Offer) -> Self::O { CreateCollectionBuilder { database_client: self.database_client, p_offer: PhantomData {}, p_collection_name: PhantomData {}, p_indexing_policy: PhantomData {}, p_partition_key: PhantomData {}, offer: Some(offer), collection_name: self.collection_name, indexing_policy: self.indexing_policy, partition_key: self.partition_key, user_agent: self.user_agent, activity_id: self.activity_id, consistency_level: self.consistency_level, } } } impl<'a, C, OfferSet, IndexingPolicySet, PartitionKeySet> CollectionNameSupport<'a> for CreateCollectionBuilder<'a, C, OfferSet, No, IndexingPolicySet, PartitionKeySet> where OfferSet: ToAssign, IndexingPolicySet: ToAssign, PartitionKeySet: ToAssign, C: CosmosClient, { type O = CreateCollectionBuilder<'a, C, OfferSet, Yes, IndexingPolicySet, PartitionKeySet>; #[inline] fn with_collection_name(self, collection_name: &'a dyn CollectionName) -> Self::O { CreateCollectionBuilder { database_client: self.database_client, p_offer: PhantomData {}, p_collection_name: PhantomData {}, p_indexing_policy: PhantomData {}, p_partition_key: PhantomData {}, offer: self.offer, collection_name: Some(collection_name), indexing_policy: self.indexing_policy, partition_key: self.partition_key, user_agent: self.user_agent, activity_id: self.activity_id, consistency_level: self.consistency_level, } } } impl<'a, C, OfferSet, CollectionNameSet, PartitionKeySet> IndexingPolicySupport<'a> for CreateCollectionBuilder<'a, C, OfferSet, CollectionNameSet, No, PartitionKeySet> where OfferSet: ToAssign, CollectionNameSet: ToAssign, PartitionKeySet: ToAssign, C: CosmosClient, { type O = CreateCollectionBuilder<'a, C, OfferSet, CollectionNameSet, Yes, PartitionKeySet>; #[inline] fn with_indexing_policy(self, indexing_policy: &'a IndexingPolicy) -> Self::O { CreateCollectionBuilder { database_client: self.database_client, p_offer: PhantomData {}, p_collection_name: PhantomData {}, p_indexing_policy: PhantomData {}, p_partition_key: PhantomData {}, offer: self.offer, collection_name: self.collection_name, indexing_policy: Some(indexing_policy), partition_key: self.partition_key, user_agent: self.user_agent, activity_id: self.activity_id, consistency_level: self.consistency_level, } } } impl<'a, C, OfferSet, CollectionNameSet, IndexingPolicySet> PartitionKeySupport<'a> for CreateCollectionBuilder<'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, No> where OfferSet: ToAssign, CollectionNameSet: ToAssign, IndexingPolicySet: ToAssign, C: CosmosClient, { type O = CreateCollectionBuilder<'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, Yes>; #[inline] fn with_partition_key(self, partition_key: &'a PartitionKey) -> Self::O { CreateCollectionBuilder { database_client: self.database_client, p_offer: PhantomData {}, p_collection_name: PhantomData {}, p_indexing_policy: PhantomData {}, p_partition_key: PhantomData {}, offer: self.offer, collection_name: self.collection_name, indexing_policy: self.indexing_policy, partition_key: Some(partition_key), user_agent: self.user_agent, activity_id: self.activity_id, consistency_level: self.consistency_level, } } } impl<'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet> UserAgentSupport<'a> for CreateCollectionBuilder< 'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet, > where OfferSet: ToAssign, CollectionNameSet: ToAssign, IndexingPolicySet: ToAssign, PartitionKeySet: ToAssign, C: CosmosClient, { type O = CreateCollectionBuilder< 'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet, >; #[inline] fn with_user_agent(self, user_agent: &'a str) -> Self::O { CreateCollectionBuilder { database_client: self.database_client, p_offer: PhantomData {}, p_collection_name: PhantomData {}, p_indexing_policy: PhantomData {}, p_partition_key: PhantomData {}, offer: self.offer, collection_name: self.collection_name, indexing_policy: self.indexing_policy, partition_key: self.partition_key, user_agent: Some(user_agent), activity_id: self.activity_id, consistency_level: self.consistency_level, } } } impl<'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet> ActivityIdSupport<'a> for CreateCollectionBuilder< 'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet, > where OfferSet: ToAssign, CollectionNameSet: ToAssign, IndexingPolicySet: ToAssign, PartitionKeySet: ToAssign, C: CosmosClient, { type O = CreateCollectionBuilder< 'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet, >; #[inline] fn with_activity_id(self, activity_id: &'a str) -> Self::O { CreateCollectionBuilder { database_client: self.database_client, p_offer: PhantomData {}, p_collection_name: PhantomData {}, p_indexing_policy: PhantomData {}, p_partition_key: PhantomData {}, offer: self.offer, collection_name: self.collection_name, indexing_policy: self.indexing_policy, partition_key: self.partition_key, user_agent: self.user_agent, activity_id: Some(activity_id), consistency_level: self.consistency_level, } } } impl<'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet> ConsistencyLevelSupport<'a> for CreateCollectionBuilder< 'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet, > where OfferSet: ToAssign, CollectionNameSet: ToAssign, IndexingPolicySet: ToAssign, PartitionKeySet: ToAssign, C: CosmosClient, { type O = CreateCollectionBuilder< 'a, C, OfferSet, CollectionNameSet, IndexingPolicySet, PartitionKeySet, >; #[inline] fn with_consistency_level(self, consistency_level: ConsistencyLevel<'a>) -> Self::O { CreateCollectionBuilder { database_client: self.database_client, p_offer: PhantomData {}, p_collection_name: PhantomData {}, p_indexing_policy: PhantomData {}, p_partition_key: PhantomData {}, offer: self.offer, collection_name: self.collection_name, indexing_policy: self.indexing_policy, partition_key: self.partition_key, user_agent: self.user_agent, activity_id: self.activity_id, consistency_level: Some(consistency_level), } } } // methods callable only when every mandatory field has been filled impl<'a, C> CreateCollectionBuilder<'a, C, Yes, Yes, Yes, Yes> where C: CosmosClient, { pub async fn execute(&self) -> Result<CreateCollectionResponse, AzureError> { trace!("CreateCollectionBuilder::execute called"); let mut req = self.database_client.cosmos_client().prepare_request( &format!("dbs/{}/colls", self.database_client.database_name()), hyper::Method::POST, ResourceType::Collections, ); req = req.header(http::header::CONTENT_TYPE, "application/json"); // add trait headers let req = OfferRequired::add_header(self, req); let req = UserAgentOption::add_header(self, req); let req = ActivityIdOption::add_header(self, req); let req = ConsistencyLevelOption::add_header(self, req); let mut collection = Collection::new( self.collection_name().name(), self.indexing_policy().to_owned(), ); collection.parition_key = self.partition_key().to_owned(); let body = serde_json::to_string(&collection)?; debug!("body == {}", body); let req = req.body(hyper::Body::from(body))?; debug!("\nreq == {:?}", req); let (headers, body) = check_status_extract_headers_and_body( self.database_client.hyper_client().request(req), StatusCode::CREATED, ) .await?; Ok((&headers, &body as &[u8]).try_into()?) } }
30.308448
99
0.648538
01b0d3416dff7509c7daa8f1b7444261c938e141
4,240
//! ListGroupsRequest //! //! See the schema for this message [here](https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ListGroupsRequest.json). // WARNING: the items of this module are generated and should not be edited directly #![allow(unused)] use std::borrow::Borrow; use std::collections::BTreeMap; use bytes::Bytes; use log::error; use uuid::Uuid; use crate::protocol::{ Encodable, Decodable, MapEncodable, MapDecodable, Encoder, Decoder, EncodeError, DecodeError, Message, HeaderVersion, VersionRange, types, write_unknown_tagged_fields, compute_unknown_tagged_fields_size, StrBytes, buf::{ByteBuf, ByteBufMut} }; /// Valid versions: 0-4 #[derive(Debug, Clone, PartialEq, derive_builder::Builder)] pub struct ListGroupsRequest { /// The states of the groups we want to list. If empty all groups are returned with their state. /// /// Supported API versions: 4 pub states_filter: Vec<StrBytes>, /// Other tagged fields pub unknown_tagged_fields: BTreeMap<i32, Vec<u8>>, } impl Encodable for ListGroupsRequest { fn encode<B: ByteBufMut>(&self, buf: &mut B, version: i16) -> Result<(), EncodeError> { if version >= 4 { types::CompactArray(types::CompactString).encode(buf, &self.states_filter)?; } else { if !self.states_filter.is_empty() { return Err(EncodeError) } } if version >= 3 { let num_tagged_fields = self.unknown_tagged_fields.len(); if num_tagged_fields > std::u32::MAX as usize { error!("Too many tagged fields to encode ({} fields)", num_tagged_fields); return Err(EncodeError); } types::UnsignedVarInt.encode(buf, num_tagged_fields as u32)?; write_unknown_tagged_fields(buf, 0.., &self.unknown_tagged_fields)?; } Ok(()) } fn compute_size(&self, version: i16) -> Result<usize, EncodeError> { let mut total_size = 0; if version >= 4 { total_size += types::CompactArray(types::CompactString).compute_size(&self.states_filter)?; } else { if !self.states_filter.is_empty() { return Err(EncodeError) } } if version >= 3 { let num_tagged_fields = self.unknown_tagged_fields.len(); if num_tagged_fields > std::u32::MAX as usize { error!("Too many tagged fields to encode ({} fields)", num_tagged_fields); return Err(EncodeError); } total_size += types::UnsignedVarInt.compute_size(num_tagged_fields as u32)?; total_size += compute_unknown_tagged_fields_size(&self.unknown_tagged_fields)?; } Ok(total_size) } } impl Decodable for ListGroupsRequest { fn decode<B: ByteBuf>(buf: &mut B, version: i16) -> Result<Self, DecodeError> { let states_filter = if version >= 4 { types::CompactArray(types::CompactString).decode(buf)? } else { Default::default() }; let mut unknown_tagged_fields = BTreeMap::new(); if version >= 3 { let num_tagged_fields = types::UnsignedVarInt.decode(buf)?; for _ in 0..num_tagged_fields { let tag: u32 = types::UnsignedVarInt.decode(buf)?; let size: u32 = types::UnsignedVarInt.decode(buf)?; let mut unknown_value = vec![0; size as usize]; buf.try_copy_to_slice(&mut unknown_value)?; unknown_tagged_fields.insert(tag as i32, unknown_value); } } Ok(Self { states_filter, unknown_tagged_fields, }) } } impl Default for ListGroupsRequest { fn default() -> Self { Self { states_filter: Default::default(), unknown_tagged_fields: BTreeMap::new(), } } } impl Message for ListGroupsRequest { const VERSIONS: VersionRange = VersionRange { min: 0, max: 4 }; } impl HeaderVersion for ListGroupsRequest { fn header_version(version: i16) -> i16 { if version >= 3 { 2 } else { 1 } } }
34.193548
152
0.607075
09b7c0bb15d77366e5c05122ef977a41a2f87e4c
281
use tokio::sync::oneshot; #[derive(Debug)] pub enum Command { Get { key: Key, cb: oneshot::Sender<Option<Value>>, }, Set { key: Key, value: Value, }, } pub type Key = String; pub type KeyRef<'a> = &'a str; pub type Value = String;
15.611111
43
0.533808
ffc61a1d75f2eec5277045c42f85ece967ebe11d
17,954
#![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); use pallet_grandpa::{ fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, }; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, MultiSignature, }; use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; // A few exports that help ease life for downstream crates. pub use frame_support::{ construct_runtime, parameter_types, traits::{KeyOwnerProofSystem, Randomness, StorageInfo}, weights::{ constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, IdentityFee, Weight, }, StorageValue, }; pub use pallet_balances::Call as BalancesCall; pub use pallet_timestamp::Call as TimestampCall; use pallet_transaction_payment::CurrencyAdapter; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; // pub use pallet_recurringpayments; // use runtime_common:: /// Import the template pallet. pub use pallet_template; /// An index to a block. pub type BlockNumber = u32; /// Alias to 512-bit hash when used in the context of a transaction signature on the chain. pub type Signature = MultiSignature; /// Some way of identifying an account on the chain. We intentionally make it equivalent /// to the public key of our transaction signing scheme. pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId; /// Balance of an account. pub type Balance = u128; /// Index of a transaction in the chain. pub type Index = u32; /// A hash of some data used by the chain. pub type Hash = sp_core::H256; /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats /// of data like extrinsics, allowing for them to continue syncing the network through upgrades /// to even the core data structures. pub mod opaque { use super::*; pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; /// Opaque block header type. pub type Header = generic::Header<BlockNumber, BlakeTwo256>; /// Opaque block type. pub type Block = generic::Block<Header, UncheckedExtrinsic>; /// Opaque block identifier type. pub type BlockId = generic::BlockId<Block>; impl_opaque_keys! { pub struct SessionKeys { pub aura: Aura, pub grandpa: Grandpa, } } } // To learn more about runtime versioning and what each of the following value means: // https://docs.substrate.io/v3/runtime/upgrades#runtime-versioning #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("node-template"), impl_name: create_runtime_str!("node-template"), authoring_version: 1, // The version of the runtime specification. A full node will not attempt to use its native // runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. spec_version: 100, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, }; /// This determines the average expected block time that we are targeting. /// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. /// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked /// up by `pallet_aura` to implement `fn slot_duration()`. /// /// Change this to adjust the block time. pub const MILLISECS_PER_BLOCK: u64 = 3000; // NOTE: Currently it is not possible to change the slot duration after the chain has started. // Attempting to do so will brick block production. pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; // Time is measured by number of blocks. pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); pub const HOURS: BlockNumber = MINUTES * 60; pub const DAYS: BlockNumber = HOURS * 24; /// The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { pub const Version: RuntimeVersion = VERSION; pub const BlockHashCount: BlockNumber = 2400; /// We allow for 2 seconds of compute with a 6 second average block time. pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights ::with_sensible_defaults(2 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub const SS58Prefix: u8 = 42; } // Configure FRAME pallets to include in runtime. impl frame_system::Config for Runtime { /// The basic call filter to use in dispatchable. type BaseCallFilter = frame_support::traits::Everything; /// Block & extrinsics weights: base values and limits. type BlockWeights = BlockWeights; /// The maximum length of a block (in bytes). type BlockLength = BlockLength; /// The identifier used to distinguish between accounts. type AccountId = AccountId; /// The aggregated dispatch type that is available for extrinsics. type Call = Call; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. type Lookup = AccountIdLookup<AccountId, ()>; /// The index type for storing how many extrinsics an account has signed. type Index = Index; /// The index type for blocks. type BlockNumber = BlockNumber; /// The type for hashing blocks and tries. type Hash = Hash; /// The hashing algorithm used. type Hashing = BlakeTwo256; /// The header type. type Header = generic::Header<BlockNumber, BlakeTwo256>; /// The ubiquitous event type. type Event = Event; /// The ubiquitous origin type. type Origin = Origin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; /// Version of the runtime. type Version = Version; /// Converts a module to the index of the module in `construct_runtime!`. /// /// This type is being generated by `construct_runtime!`. type PalletInfo = PalletInfo; /// What to do if a new account is created. type OnNewAccount = (); /// What to do if an account is fully reaped from the system. type OnKilledAccount = (); /// The data to be stored in an account. type AccountData = pallet_balances::AccountData<Balance>; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = (); /// This is used as an identifier of the chain. 42 is the generic substrate prefix. type SS58Prefix = SS58Prefix; /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); } impl pallet_randomness_collective_flip::Config for Runtime {} parameter_types! { pub const MaxAuthorities: u32 = 32; } impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; type DisabledValidators = (); type MaxAuthorities = MaxAuthorities; } impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; type KeyOwnerProofSystem = (); type KeyOwnerProof = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof; type KeyOwnerIdentification = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<( KeyTypeId, GrandpaId, )>>::IdentificationTuple; type HandleEquivocation = (); type WeightInfo = (); type MaxAuthorities = MaxAuthorities; } parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } impl pallet_timestamp::Config for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = Aura; type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } parameter_types! { pub const ExistentialDeposit: u128 = 500; pub const MaxLocks: u32 = 50; } impl pallet_balances::Config for Runtime { type MaxLocks = MaxLocks; type MaxReserves = (); type ReserveIdentifier = [u8; 8]; /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight<Runtime>; } parameter_types! { pub const TransactionByteFee: Balance = 1; pub OperationalFeeMultiplier: u8 = 5; } impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter<Balances, ()>; type TransactionByteFee = TransactionByteFee; type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = IdentityFee<Balance>; type FeeMultiplierUpdate = (); } impl pallet_sudo::Config for Runtime { type Event = Event; type Call = Call; } /// Configure the pallet-template in pallets/template. impl pallet_template::Config for Runtime { type Event = Event; } //* --------- */ impl sumstorage::Config for Runtime { type Event = Event; } // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime where Block = Block, NodeBlock = opaque::Block, UncheckedExtrinsic = UncheckedExtrinsic { System: frame_system, RandomnessCollectiveFlip: pallet_randomness_collective_flip, Timestamp: pallet_timestamp, Aura: pallet_aura, Grandpa: pallet_grandpa, Balances: pallet_balances, TransactionPayment: pallet_transaction_payment, Sudo: pallet_sudo, // Include the custom logic from the pallet-template in the runtime. TemplateModule: pallet_template, SumStorage: pallet_sumstorage, } ); /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress<AccountId, ()>; /// Block header type as expected by this runtime. pub type Header = generic::Header<BlockNumber, BlakeTwo256>; /// Block type as expected by this runtime. pub type Block = generic::Block<Header, UncheckedExtrinsic>; /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckSpecVersion<Runtime>, frame_system::CheckTxVersion<Runtime>, frame_system::CheckGenesis<Runtime>, frame_system::CheckEra<Runtime>, frame_system::CheckNonce<Runtime>, frame_system::CheckWeight<Runtime>, pallet_transaction_payment::ChargeTransactionPayment<Runtime>, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<Address, Call, Signature, SignedExtra>; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, Block, frame_system::ChainContext<Runtime>, Runtime, AllPallets, >; impl_runtime_apis! { impl sp_api::Core<Block> for Runtime { fn version() -> RuntimeVersion { VERSION } fn execute_block(block: Block) { Executive::execute_block(block); } fn initialize_block(header: &<Block as BlockT>::Header) { Executive::initialize_block(header) } } impl sp_api::Metadata<Block> for Runtime { fn metadata() -> OpaqueMetadata { OpaqueMetadata::new(Runtime::metadata().into()) } } impl sp_block_builder::BlockBuilder<Block> for Runtime { fn apply_extrinsic(extrinsic: <Block as BlockT>::Extrinsic) -> ApplyExtrinsicResult { Executive::apply_extrinsic(extrinsic) } fn finalize_block() -> <Block as BlockT>::Header { Executive::finalize_block() } fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<<Block as BlockT>::Extrinsic> { data.create_extrinsics() } fn check_inherents( block: Block, data: sp_inherents::InherentData, ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime { fn validate_transaction( source: TransactionSource, tx: <Block as BlockT>::Extrinsic, block_hash: <Block as BlockT>::Hash, ) -> TransactionValidity { Executive::validate_transaction(source, tx, block_hash) } } impl sp_offchain::OffchainWorkerApi<Block> for Runtime { fn offchain_worker(header: &<Block as BlockT>::Header) { Executive::offchain_worker(header) } } impl sp_consensus_aura::AuraApi<Block, AuraId> for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) } fn authorities() -> Vec<AuraId> { Aura::authorities().into_inner() } } impl sp_session::SessionKeys<Block> for Runtime { fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> { opaque::SessionKeys::generate(seed) } fn decode_session_keys( encoded: Vec<u8>, ) -> Option<Vec<(Vec<u8>, KeyTypeId)>> { opaque::SessionKeys::decode_into_raw_public_keys(&encoded) } } impl fg_primitives::GrandpaApi<Block> for Runtime { fn grandpa_authorities() -> GrandpaAuthorityList { Grandpa::grandpa_authorities() } fn current_set_id() -> fg_primitives::SetId { Grandpa::current_set_id() } fn submit_report_equivocation_unsigned_extrinsic( _equivocation_proof: fg_primitives::EquivocationProof< <Block as BlockT>::Hash, NumberFor<Block>, >, _key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, ) -> Option<()> { None } fn generate_key_ownership_proof( _set_id: fg_primitives::SetId, _authority_id: GrandpaId, ) -> Option<fg_primitives::OpaqueKeyOwnershipProof> { // NOTE: this is the only implementation possible since we've // defined our key owner proof type as a bottom type (i.e. a type // with no values). None } } impl frame_system_rpc_runtime_api::AccountNonceApi<Block, AccountId, Index> for Runtime { fn account_nonce(account: AccountId) -> Index { System::account_nonce(account) } } impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance> for Runtime { fn query_info( uxt: <Block as BlockT>::Extrinsic, len: u32, ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo<Balance> { TransactionPayment::query_info(uxt, len) } fn query_fee_details( uxt: <Block as BlockT>::Extrinsic, len: u32, ) -> pallet_transaction_payment::FeeDetails<Balance> { TransactionPayment::query_fee_details(uxt, len) } } //* ------------------------------------------------------------------------------ */ // Implement a custom run time api impl sumstorage_rpc_runtime_api::SumStorageApi<Block> for Runtime { fn get_sum() -> u32 { SumStorage::get_sum() } #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark<Block> for Runtime { fn benchmark_metadata(extra: bool) -> ( Vec<frame_benchmarking::BenchmarkList>, Vec<frame_support::traits::StorageInfo>, ) { use frame_benchmarking::{list_benchmark, baseline, Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use baseline::Pallet as BaselineBench; let mut list = Vec::<BenchmarkList>::new(); list_benchmark!(list, extra, frame_benchmarking, BaselineBench::<Runtime>); list_benchmark!(list, extra, frame_system, SystemBench::<Runtime>); list_benchmark!(list, extra, pallet_balances, Balances); list_benchmark!(list, extra, pallet_timestamp, Timestamp); list_benchmark!(list, extra, pallet_template, TemplateModule); let storage_info = AllPalletsWithSystem::storage_info(); return (list, storage_info) } fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result<Vec<frame_benchmarking::BenchmarkBatch>, sp_runtime::RuntimeString> { use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; use frame_system_benchmarking::Pallet as SystemBench; use baseline::Pallet as BaselineBench; impl frame_system_benchmarking::Config for Runtime {} impl baseline::Config for Runtime {} let whitelist: Vec<TrackedStorageKey> = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), // Total Issuance hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), // Execution Phase hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), // Event Count hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), // System Events hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), ]; let mut batches = Vec::<BenchmarkBatch>::new(); let params = (&config, &whitelist); add_benchmark!(params, batches, frame_benchmarking, BaselineBench::<Runtime>); add_benchmark!(params, batches, frame_system, SystemBench::<Runtime>); add_benchmark!(params, batches, pallet_balances, Balances); add_benchmark!(params, batches, pallet_timestamp, Timestamp); add_benchmark!(params, batches, pallet_template, TemplateModule); Ok(batches) } } }
32.703097
106
0.743066
796c4d76096b5ac6bd96bbf26c3c5e32f6b7800c
103
#![feature(try_from)] extern crate combine; extern crate pretty; pub mod cst; mod lexer; mod parser;
11.444444
21
0.737864
50d3c4096a20538f74be13ecbb95d414f817c694
2,574
#[doc = r"Value read from the register"] pub struct R { bits: u32, } #[doc = r"Value to write to the register"] pub struct W { bits: u32, } impl super::DATA_10_IN { #[doc = r"Modifies the contents of the register"] #[inline(always)] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); self.register.set(f(&R { bits }, &mut W { bits }).bits); } #[doc = r"Reads the contents of the register"] #[inline(always)] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r"Writes to the register"] #[inline(always)] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { self.register.set( f(&mut W { bits: Self::reset_value(), }) .bits, ); } #[doc = r"Reset value of the register"] #[inline(always)] pub const fn reset_value() -> u32 { 0 } #[doc = r"Writes the reset value to the register"] #[inline(always)] pub fn reset(&self) { self.register.set(Self::reset_value()) } } #[doc = r"Value of the field"] pub struct SHAMD5_DATA_10_IN_DATAR { bits: u32, } impl SHAMD5_DATA_10_IN_DATAR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bits(&self) -> u32 { self.bits } } #[doc = r"Proxy"] pub struct _SHAMD5_DATA_10_IN_DATAW<'a> { w: &'a mut W, } impl<'a> _SHAMD5_DATA_10_IN_DATAW<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u32) -> &'a mut W { self.w.bits &= !(4294967295 << 0); self.w.bits |= ((value as u32) & 4294967295) << 0; self.w } } impl R { #[doc = r"Value of the register as raw bits"] #[inline(always)] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:31 - Digest/Key Data"] #[inline(always)] pub fn shamd5_data_10_in_data(&self) -> SHAMD5_DATA_10_IN_DATAR { let bits = ((self.bits >> 0) & 4294967295) as u32; SHAMD5_DATA_10_IN_DATAR { bits } } } impl W { #[doc = r"Writes raw bits to the register"] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:31 - Digest/Key Data"] #[inline(always)] pub fn shamd5_data_10_in_data(&mut self) -> _SHAMD5_DATA_10_IN_DATAW { _SHAMD5_DATA_10_IN_DATAW { w: self } } }
25.74
74
0.544678
2325de001a53a374e027ffe3ff01557c84738cfd
1,059
use serde::Serialize; use crate::{ net, requests::{Request, ResponseResult}, types::{Chat, ChatId, User, ChatMember}, Bot, }; use std::sync::Arc; /// Use this method to get up to date information about the chat (current name /// of the user for one-on-one conversations, current username of a user, group /// or channel, etc.). /// /// [The official docs](https://core.telegram.org/bots/api#getchat). #[serde_with_macros::skip_serializing_none] #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SearchGuildMember { #[serde(skip)] bot: Arc<Bot>, pub guild_id: i64, pub query: String, } #[async_trait::async_trait] impl Request for SearchGuildMember { type Output = Vec<ChatMember>; async fn send(&self) -> ResponseResult<Vec<ChatMember>> { net::request_json(self.bot.client(), self.bot.token(), "searchGuildMember", &self) .await } } impl SearchGuildMember { pub(crate) fn new(bot: Arc<Bot>, guild_id: i64, query: String) -> Self { Self { bot, guild_id, query } } }
26.475
90
0.66289
752bbc65dd37137de70beb873808f1ece020a4dd
5,601
#[doc = "Register `MATRIX_WPMR` reader"] pub struct R(crate::R<MATRIX_WPMR_SPEC>); impl core::ops::Deref for R { type Target = crate::R<MATRIX_WPMR_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::convert::From<crate::R<MATRIX_WPMR_SPEC>> for R { fn from(reader: crate::R<MATRIX_WPMR_SPEC>) -> Self { R(reader) } } #[doc = "Register `MATRIX_WPMR` writer"] pub struct W(crate::W<MATRIX_WPMR_SPEC>); impl core::ops::Deref for W { type Target = crate::W<MATRIX_WPMR_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl core::convert::From<crate::W<MATRIX_WPMR_SPEC>> for W { fn from(writer: crate::W<MATRIX_WPMR_SPEC>) -> Self { W(writer) } } #[doc = "Field `WPEN` reader - Write Protection Enable"] pub struct WPEN_R(crate::FieldReader<bool, bool>); impl WPEN_R { pub(crate) fn new(bits: bool) -> Self { WPEN_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for WPEN_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `WPEN` writer - Write Protection Enable"] pub struct WPEN_W<'a> { w: &'a mut W, } impl<'a> WPEN_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } #[doc = "Write Protection Key\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u32)] pub enum WPKEY_A { #[doc = "5062996: Writing any other value in this field aborts the write operation of the WPEN bit.Always reads as 0."] PASSWD = 5062996, } impl From<WPKEY_A> for u32 { #[inline(always)] fn from(variant: WPKEY_A) -> Self { variant as _ } } #[doc = "Field `WPKEY` reader - Write Protection Key"] pub struct WPKEY_R(crate::FieldReader<u32, WPKEY_A>); impl WPKEY_R { pub(crate) fn new(bits: u32) -> Self { WPKEY_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<WPKEY_A> { match self.bits { 5062996 => Some(WPKEY_A::PASSWD), _ => None, } } #[doc = "Checks if the value of the field is `PASSWD`"] #[inline(always)] pub fn is_passwd(&self) -> bool { **self == WPKEY_A::PASSWD } } impl core::ops::Deref for WPKEY_R { type Target = crate::FieldReader<u32, WPKEY_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `WPKEY` writer - Write Protection Key"] pub struct WPKEY_W<'a> { w: &'a mut W, } impl<'a> WPKEY_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: WPKEY_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "Writing any other value in this field aborts the write operation of the WPEN bit.Always reads as 0."] #[inline(always)] pub fn passwd(self) -> &'a mut W { self.variant(WPKEY_A::PASSWD) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u32) -> &'a mut W { self.w.bits = (self.w.bits & !(0x00ff_ffff << 8)) | ((value as u32 & 0x00ff_ffff) << 8); self.w } } impl R { #[doc = "Bit 0 - Write Protection Enable"] #[inline(always)] pub fn wpen(&self) -> WPEN_R { WPEN_R::new((self.bits & 0x01) != 0) } #[doc = "Bits 8:31 - Write Protection Key"] #[inline(always)] pub fn wpkey(&self) -> WPKEY_R { WPKEY_R::new(((self.bits >> 8) & 0x00ff_ffff) as u32) } } impl W { #[doc = "Bit 0 - Write Protection Enable"] #[inline(always)] pub fn wpen(&mut self) -> WPEN_W { WPEN_W { w: self } } #[doc = "Bits 8:31 - Write Protection Key"] #[inline(always)] pub fn wpkey(&mut self) -> WPKEY_W { WPKEY_W { w: self } } #[doc = "Writes raw bits to the register."] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Write Protection Mode Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [matrix_wpmr](index.html) module"] pub struct MATRIX_WPMR_SPEC; impl crate::RegisterSpec for MATRIX_WPMR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [matrix_wpmr::R](R) reader structure"] impl crate::Readable for MATRIX_WPMR_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [matrix_wpmr::W](W) writer structure"] impl crate::Writable for MATRIX_WPMR_SPEC { type Writer = W; } #[doc = "`reset()` method sets MATRIX_WPMR to value 0"] impl crate::Resettable for MATRIX_WPMR_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
30.774725
422
0.595429
640bb19a0d9c843d7e0e92a996454f6aa631b90b
9,367
use crate::cli::{ show_values, API_KEY_OPT, CONFIRM_FLAG, DELETE_SUBCMD, DESCRIPTION_OPT, EDIT_SUBCMD, FORMAT_OPT, LIST_SUBCMD, NAME_ARG, SECRETS_FLAG, SET_SUBCMD, }; use crate::config::{ Config, ConfigValue, PARAM_API_KEY, PARAM_ORG, PARAM_PROFILE, PARAM_ROLE, PARAM_USER, }; use crate::database::{OpenApiConfig, Users}; use crate::lib::{ error_message, user_confirm, warn_missing_subcommand, warning_message, DEL_CONFIRM, REDACTED, }; use crate::table::Table; use clap::ArgMatches; use color_eyre::eyre::Result; use std::process; fn proc_config_edit() -> Result<()> { let filename = Config::filename(); let orig_content = Config::read_or_create_config()?; let mut content = orig_content.clone(); loop { content = edit::edit(content.as_bytes())?; let action = if orig_content == content { "No changes made to" } else { "Edited" }; let validation = Config::validate_content(&content); if validation.is_ok() { Config::update_config(&content)?; println!("{} {}", action, filename); break; } warning_message(format!( "The provided content is not valid due to:\n{}", validation.unwrap_err().to_string() )); let continue_editing = "Do you want to continue editing".to_string(); if user_confirm(continue_editing, Some(true)) { continue; } let save_invalid = "Do you want to save the invalid edits".to_string(); if user_confirm(save_invalid, Some(false)) { Config::update_config(&content)?; println!("Saving invalid edits to {}", filename); break; } println!("Discarding the invalid edits to {}", filename); break; } Ok(()) } fn proc_config_prof_list(subcmd_args: &ArgMatches) -> Result<()> { let show_secrets = subcmd_args.is_present(SECRETS_FLAG); let show_values = show_values(subcmd_args); let fmt = subcmd_args.value_of(FORMAT_OPT).unwrap(); let details = Config::get_profile_details()?; if details.is_empty() { println!("No profiles exist in config."); } else if !show_values { let profile_names: Vec<String> = details.iter().map(|v| v.name.clone()).collect(); println!("{}", profile_names.join("\n")); } else { let mut table = Table::new("profile"); table.set_header(&["Name", "API", "Environment", "Project", "Description"]); for entry in details { let mut api_value = "".to_string(); if let Some(api_key) = entry.api_key { if show_secrets { api_value = api_key; } else if !api_key.is_empty() { api_value = REDACTED.to_string(); } } table.add_row(vec![ entry.name, api_value, entry.environment.unwrap_or_default(), entry.project.unwrap_or_default(), entry.description.unwrap_or_default(), ]); } table.render(fmt)?; } Ok(()) } fn find_property_value(list: &[ConfigValue], property_name: &str) -> Option<String> { for item in list { if item.name == property_name { if !item.value.is_empty() { return Some(item.value.clone()); } break; } } None } fn update_property_value(list: &mut [ConfigValue], property_name: &str, value: &str, source: &str) { for item in list { if item.name == property_name { item.value = value.to_string(); item.source = source.to_string(); break; } } } fn proc_config_current( subcmd_args: &ArgMatches, profile_name: Option<&str>, api_key: Option<&str>, proj_name: Option<&str>, env_name: Option<&str>, ) -> Result<()> { let show_secrets = subcmd_args.is_present(SECRETS_FLAG); let show_extended = subcmd_args.is_present("extended"); let fmt = subcmd_args.value_of(FORMAT_OPT).unwrap(); let mut values = Config::get_sources(profile_name, api_key, proj_name, env_name)?; if let Some(api_key) = find_property_value(&values, PARAM_API_KEY) { // pull API key and profile name from the list, since the values passed in here are just the CLI arguments, // and need to be informed by the environment variables. let prof_name = find_property_value(&values, PARAM_PROFILE); let config = Config::load_config(Some(&api_key), prof_name.as_deref(), env_name, proj_name).unwrap(); let rest_cfg = OpenApiConfig::from(&config); let users = Users::new(); // NOTE: these only get updated if we can fetch info from the server if let Ok(current_user) = users.get_current_user(&rest_cfg) { let source = "API key"; update_property_value(&mut values, PARAM_USER, &current_user.name, source); update_property_value(&mut values, PARAM_ROLE, &current_user.role, source); update_property_value(&mut values, PARAM_ORG, &current_user.organization, source); } } let mut table = Table::new("profile"); table.set_header(&["Parameter", "Value", "Source"]); for v in values { if show_extended || !v.extension { let val_str = if show_secrets || !v.secret || v.value.is_empty() { v.value } else { REDACTED.to_string() }; table.add_row(vec![v.name, val_str, v.source]); } } table.render(fmt)?; Ok(()) } fn proc_config_prof_delete(subcmd_args: &ArgMatches) -> Result<()> { let mut confirmed = subcmd_args.is_present(CONFIRM_FLAG); let prof_name = subcmd_args.value_of(NAME_ARG).unwrap(); let result = Config::get_profile_details_by_name(prof_name)?; if result.is_some() { if !confirmed { confirmed = user_confirm(format!("Delete profile '{}'", prof_name), DEL_CONFIRM); } if !confirmed { warning_message(format!("Profile '{}' not deleted!", prof_name)); } else { Config::delete_profile(prof_name)?; println!("Deleted profile '{}'", prof_name); } } else { warning_message(format!("Profile '{}' does not exist!", prof_name)); } Ok(()) } fn proc_config_prof_set(subcmd_args: &ArgMatches) -> Result<()> { let prof_name = subcmd_args.value_of(NAME_ARG).unwrap(); let api_key = subcmd_args.value_of(API_KEY_OPT); let description = subcmd_args.value_of(DESCRIPTION_OPT); let project = subcmd_args.value_of("PROJECT"); let environment = subcmd_args.value_of("ENVIRONMENT"); let source = subcmd_args.value_of("SOURCE"); // make sure there's a parent profile if let Some(source_profile) = source { if Config::get_profile_details_by_name(source_profile)?.is_none() { error_message(format!( "Source profile '{}' does not exist", source_profile )); process::exit(18); } } if api_key.is_none() && description.is_none() && project.is_none() && environment.is_none() && source.is_none() { warning_message(format!("Nothing to change for profile '{}'", prof_name)); } else { let pre_exists = Config::get_profile_details_by_name(prof_name)?.is_some(); Config::update_profile( prof_name, api_key, description, environment, project, source, )?; let post_exists = Config::get_profile_details_by_name(prof_name)?.is_some(); let action = if !post_exists { "Deleted" } else if !pre_exists { "Created" } else { "Updated" }; println!( "{} profile '{}' in '{}'", action, prof_name, Config::filename() ); } Ok(()) } pub fn proc_config_profile_command(subcmd_args: &ArgMatches) -> Result<()> { if let Some(subcmd_args) = subcmd_args.subcommand_matches(DELETE_SUBCMD) { proc_config_prof_delete(subcmd_args)?; } else if let Some(subcmd_args) = subcmd_args.subcommand_matches(LIST_SUBCMD) { proc_config_prof_list(subcmd_args)?; } else if let Some(subcmd_args) = subcmd_args.subcommand_matches(SET_SUBCMD) { proc_config_prof_set(subcmd_args)?; } else { warn_missing_subcommand("configuration profiles"); } Ok(()) } /// Process the 'config' sub-command pub fn process_config_command( subcmd_args: &ArgMatches, profile_name: Option<&str>, api_key: Option<&str>, proj_name: Option<&str>, env_name: Option<&str>, ) -> Result<()> { if subcmd_args.subcommand_matches(EDIT_SUBCMD).is_some() { proc_config_edit()?; } else if let Some(subcmd_args) = subcmd_args.subcommand_matches("current") { proc_config_current(subcmd_args, profile_name, api_key, proj_name, env_name)?; } else if let Some(subcmd_args) = subcmd_args.subcommand_matches("profiles") { proc_config_profile_command(subcmd_args)?; } else { warn_missing_subcommand("configuration"); } Ok(()) }
34.311355
115
0.601687
72c5a040c6d03f5ea38ddf538c796f3304e2cbf8
3,722
use boring::ssl::{SslAcceptor, SslConnector, SslFiletype, SslMethod}; use futures::future; use std::future::Future; use std::net::{SocketAddr, ToSocketAddrs}; use std::pin::Pin; use tokio::io::{AsyncReadExt, AsyncWrite, AsyncWriteExt}; use tokio::net::{TcpListener, TcpStream}; use tokio_boring::{HandshakeError, SslStream}; #[tokio::test] async fn google() { let addr = "google.com:443".to_socket_addrs().unwrap().next().unwrap(); let stream = TcpStream::connect(&addr).await.unwrap(); let config = SslConnector::builder(SslMethod::tls()) .unwrap() .build() .configure() .unwrap(); let mut stream = tokio_boring::connect(config, "google.com", stream) .await .unwrap(); stream.write_all(b"GET / HTTP/1.0\r\n\r\n").await.unwrap(); let mut buf = vec![]; stream.read_to_end(&mut buf).await.unwrap(); let response = String::from_utf8_lossy(&buf); let response = response.trim_end(); // any response code is fine assert!(response.starts_with("HTTP/1.0 ")); assert!(response.ends_with("</html>") || response.ends_with("</HTML>")); } fn create_server() -> ( impl Future<Output = Result<SslStream<TcpStream>, HandshakeError<TcpStream>>>, SocketAddr, ) { let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); listener.set_nonblocking(true).unwrap(); let listener = TcpListener::from_std(listener).unwrap(); let addr = listener.local_addr().unwrap(); let server = async move { let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); acceptor .set_private_key_file("tests/key.pem", SslFiletype::PEM) .unwrap(); acceptor .set_certificate_chain_file("tests/cert.pem") .unwrap(); let acceptor = acceptor.build(); let stream = listener.accept().await.unwrap().0; tokio_boring::accept(&acceptor, stream).await }; (server, addr) } #[tokio::test] async fn server() { let (stream, addr) = create_server(); let server = async { let mut stream = stream.await.unwrap(); let mut buf = [0; 4]; stream.read_exact(&mut buf).await.unwrap(); assert_eq!(&buf, b"asdf"); stream.write_all(b"jkl;").await.unwrap(); future::poll_fn(|ctx| Pin::new(&mut stream).poll_shutdown(ctx)) .await .unwrap(); }; let client = async { let mut connector = SslConnector::builder(SslMethod::tls()).unwrap(); connector.set_ca_file("tests/cert.pem").unwrap(); let config = connector.build().configure().unwrap(); let stream = TcpStream::connect(&addr).await.unwrap(); let mut stream = tokio_boring::connect(config, "localhost", stream) .await .unwrap(); stream.write_all(b"asdf").await.unwrap(); let mut buf = vec![]; stream.read_to_end(&mut buf).await.unwrap(); assert_eq!(buf, b"jkl;"); }; future::join(server, client).await; } #[tokio::test] async fn handshake_error() { let (stream, addr) = create_server(); let server = async { let err = stream.await.unwrap_err(); assert!(err.into_source_stream().is_some()); }; let client = async { let connector = SslConnector::builder(SslMethod::tls()).unwrap(); let config = connector.build().configure().unwrap(); let stream = TcpStream::connect(&addr).await.unwrap(); let err = tokio_boring::connect(config, "localhost", stream) .await .unwrap_err(); assert!(err.into_source_stream().is_some()); }; future::join(server, client).await; }
29.539683
88
0.611231
f7eb7b47df07c1c4def219ed6cee8325b036c8d0
1,076
//! Wasmtime's embedding API //! //! This crate contains a high-level API used to interact with WebAssembly //! modules. The API here is intended to mirror the proposed [WebAssembly C //! API](https://github.com/WebAssembly/wasm-c-api), with small extensions here //! and there to implement Rust idioms. This crate also defines the actual C API //! itself for consumption from other languages. #![deny(missing_docs, intra_doc_link_resolution_failure)] mod externals; mod frame_info; mod func; mod instance; mod module; mod r#ref; mod runtime; mod trampoline; mod trap; mod types; mod values; pub use crate::externals::*; pub use crate::frame_info::FrameInfo; pub use crate::func::*; pub use crate::instance::Instance; pub use crate::module::Module; pub use crate::r#ref::{AnyRef, HostInfo, HostRef}; pub use crate::runtime::*; pub use crate::trap::Trap; pub use crate::types::*; pub use crate::values::*; cfg_if::cfg_if! { if #[cfg(unix)] { pub mod unix; } else if #[cfg(windows)] { pub mod windows; } else { // ... unknown os! } }
25.023256
80
0.693309
086ee4ffbcc6279709b175aa3e6e43008b247910
7,920
// Copyright 2020 WeDPR Lab Project Authors. Licensed under Apache-2.0. //! Signature function wrappers. #![cfg(all( feature = "wedpr_f_signature_secp256k1", feature = "wedpr_f_signature_sm2" ))] use wedpr_l_utils::traits::Signature; #[cfg(feature = "wedpr_f_signature_secp256k1")] use crate::config::SIGNATURE_SECP256K1; use crate::{config, get_result_jobject}; #[cfg(feature = "wedpr_f_signature_sm2")] use config::SIGNATURE_SM2; use jni::{ objects::{JClass, JObject, JString, JValue}, sys::jobject, JNIEnv, }; #[cfg(feature = "wedpr_f_base64")] use wedpr_ffi_common_base64::utils::{ bytes_to_string, java_jstring_to_bytes, java_set_error_field_and_extract_jobject, }; #[cfg(feature = "wedpr_f_hex")] use wedpr_ffi_common_hex::utils::{ bytes_to_string, java_jstring_to_bytes, java_set_error_field_and_extract_jobject, }; // Secp256k1 implementation. #[cfg(feature = "wedpr_f_signature_secp256k1")] #[no_mangle] /// Java interface for /// 'com.webank.wedpr.crypto.NativeInterface->secp256k1GenKeyPair'. pub extern "system" fn Java_com_webank_wedpr_crypto_NativeInterface_secp256k1GenKeyPair( _env: JNIEnv, _class: JClass, ) -> jobject { let result_jobject = get_result_jobject(&_env); let (pk, sk) = SIGNATURE_SECP256K1.generate_keypair(); java_safe_set_string_field!( _env, result_jobject, bytes_to_string(&pk), "publicKey" ); java_safe_set_string_field!( _env, result_jobject, bytes_to_string(&sk), "privateKey" ); result_jobject.into_inner() } #[cfg(feature = "wedpr_f_signature_secp256k1")] #[no_mangle] /// Java interface for /// 'com.webank.wedpr.crypto.NativeInterface->secp256k1Sign'. // TODO: Add secp256k1SignUtf8 to allow non-encoded UTF8 input. pub extern "system" fn Java_com_webank_wedpr_crypto_NativeInterface_secp256k1Sign( _env: JNIEnv, _class: JClass, private_key_jstring: JString, msg_hash_jstring: JString, ) -> jobject { let result_jobject = get_result_jobject(&_env); let private_key = java_safe_jstring_to_bytes!(_env, result_jobject, private_key_jstring); let msg_hash = java_safe_jstring_to_bytes!(_env, result_jobject, msg_hash_jstring); let signature = match SIGNATURE_SECP256K1.sign(&private_key, &msg_hash) { Ok(v) => v, Err(_) => { return java_set_error_field_and_extract_jobject( &_env, &result_jobject, &format!( "secp256k1 sign failed, msg_hash={}", bytes_to_string(&msg_hash) ), ) }, }; java_safe_set_string_field!( _env, result_jobject, bytes_to_string(&signature), "signature" ); result_jobject.into_inner() } #[cfg(feature = "wedpr_f_signature_secp256k1")] #[no_mangle] /// Java interface for /// 'com.webank.wedpr.crypto.NativeInterface->secp256k1Verify'. pub extern "system" fn Java_com_webank_wedpr_crypto_NativeInterface_secp256k1Verify( _env: JNIEnv, _class: JClass, public_key_jstring: JString, msg_hash_jstring: JString, signature_jstring: JString, ) -> jobject { let result_jobject = get_result_jobject(&_env); let public_key = java_safe_jstring_to_bytes!(_env, result_jobject, public_key_jstring); let msg_hash = java_safe_jstring_to_bytes!(_env, result_jobject, msg_hash_jstring); let signature = java_safe_jstring_to_bytes!(_env, result_jobject, signature_jstring); let result = SIGNATURE_SECP256K1.verify(&public_key, &msg_hash, &signature); java_safe_set_boolean_field!(_env, result_jobject, result, "booleanResult"); result_jobject.into_inner() } // SM2 implementation. #[cfg(feature = "wedpr_f_signature_sm2")] #[no_mangle] /// Java interface for /// 'com.webank.wedpr.crypto.NativeInterface->sm2GenKeyPair'. pub extern "system" fn Java_com_webank_wedpr_crypto_NativeInterface_sm2GenKeyPair( _env: JNIEnv, _class: JClass, ) -> jobject { let result_jobject = get_result_jobject(&_env); let (pk, sk) = SIGNATURE_SM2.generate_keypair(); java_safe_set_string_field!( _env, result_jobject, bytes_to_string(&pk), "publicKey" ); java_safe_set_string_field!( _env, result_jobject, bytes_to_string(&sk), "privateKey" ); result_jobject.into_inner() } #[cfg(feature = "wedpr_f_signature_sm2")] #[no_mangle] /// Java interface for /// 'com.webank.wedpr.crypto.NativeInterface->sm2Sign'. pub extern "system" fn Java_com_webank_wedpr_crypto_NativeInterface_sm2Sign( _env: JNIEnv, _class: JClass, private_key_jstring: JString, msg_hash_jstring: JString, ) -> jobject { let result_jobject = get_result_jobject(&_env); let private_key = java_safe_jstring_to_bytes!(_env, result_jobject, private_key_jstring); let msg_hash = java_safe_jstring_to_bytes!(_env, result_jobject, msg_hash_jstring); let signature = match SIGNATURE_SM2.sign(&private_key, &msg_hash) { Ok(v) => v, Err(_) => { return java_set_error_field_and_extract_jobject( &_env, &result_jobject, &format!( "secp256k1 sign failed, msg_hash={}", bytes_to_string(&msg_hash) ), ) }, }; java_safe_set_string_field!( _env, result_jobject, bytes_to_string(&signature), "signature" ); result_jobject.into_inner() } #[cfg(feature = "wedpr_f_signature_sm2")] #[no_mangle] /// Java interface for /// 'com.webank.wedpr.crypto.NativeInterface->sm2SignFast'. pub extern "system" fn Java_com_webank_wedpr_crypto_NativeInterface_sm2SignFast( _env: JNIEnv, _class: JClass, private_key_jstring: JString, public_key_jstring: JString, msg_hash_jstring: JString, ) -> jobject { let result_jobject = get_result_jobject(&_env); let private_key = java_safe_jstring_to_bytes!(_env, result_jobject, private_key_jstring); let public_key = java_safe_jstring_to_bytes!(_env, result_jobject, public_key_jstring); let msg_hash = java_safe_jstring_to_bytes!(_env, result_jobject, msg_hash_jstring); let signature = match SIGNATURE_SM2.sign_fast(&private_key, &public_key, &msg_hash) { Ok(v) => v, Err(_) => { return java_set_error_field_and_extract_jobject( &_env, &result_jobject, &format!( "secp256k1 sign failed, msg_hash={}", bytes_to_string(&msg_hash) ), ) }, }; java_safe_set_string_field!( _env, result_jobject, bytes_to_string(&signature), "signature" ); result_jobject.into_inner() } #[cfg(feature = "wedpr_f_signature_sm2")] #[no_mangle] /// Java interface for /// 'com.webank.wedpr.crypto.NativeInterface->sm2Verify'. pub extern "system" fn Java_com_webank_wedpr_crypto_NativeInterface_sm2Verify( _env: JNIEnv, _class: JClass, public_key_jstring: JString, msg_hash_jstring: JString, signature_jstring: JString, ) -> jobject { let result_jobject = get_result_jobject(&_env); let public_key = java_safe_jstring_to_bytes!(_env, result_jobject, public_key_jstring); let msg_hash = java_safe_jstring_to_bytes!(_env, result_jobject, msg_hash_jstring); let signature = java_safe_jstring_to_bytes!(_env, result_jobject, signature_jstring); let result = SIGNATURE_SM2.verify(&public_key, &msg_hash, &signature); java_safe_set_boolean_field!(_env, result_jobject, result, "booleanResult"); result_jobject.into_inner() }
28.387097
88
0.671086
de65d9bbaba2b97863f157ea8ca1ad3af53d40ac
592
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern mod extra; use std::task; fn f() { let _a = @0; fail2!(); } pub fn main() { task::spawn_unlinked(f); }
25.73913
68
0.699324
3884efc13210e94703fd0671490d257f6926a3d5
1,363
#[doc = "Reader of register AFEC_COSR"] pub type R = crate::R<u32, super::AFEC_COSR>; #[doc = "Writer for register AFEC_COSR"] pub type W = crate::W<u32, super::AFEC_COSR>; #[doc = "Register AFEC_COSR `reset()`'s with value 0"] impl crate::ResetValue for super::AFEC_COSR { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `CSEL`"] pub type CSEL_R = crate::R<bool, bool>; #[doc = "Write proxy for field `CSEL`"] pub struct CSEL_W<'a> { w: &'a mut W, } impl<'a> CSEL_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } impl R { #[doc = "Bit 0 - Sample & Hold unit Correction Select"] #[inline(always)] pub fn csel(&self) -> CSEL_R { CSEL_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Sample & Hold unit Correction Select"] #[inline(always)] pub fn csel(&mut self) -> CSEL_W { CSEL_W { w: self } } }
26.72549
70
0.558327
338becabd381f9fb7f069cfa0c93bd9c1e22740e
225
#![deny(warnings, rust_2018_idioms)] #![allow(clippy::inconsistent_struct_constructor)] mod body; mod request; mod response; pub use self::{ body::{BoxBody, Data}, request::BoxRequest, response::BoxResponse, };
17.307692
50
0.706667
f7c3a6d26422644303a674c2719218f4d5faa38e
19,117
/* EVMC: Ethereum Client-VM Connector API. * Copyright 2019 The EVMC Authors. * Licensed under the Apache License, Version 2.0. */ mod container; pub use container::EvmcContainer; pub use evmc_sys as ffi; pub trait EvmcVm { fn init() -> Self; fn execute(&self, code: &[u8], context: &ExecutionContext) -> ExecutionResult; } /// EVMC result structure. pub struct ExecutionResult { status_code: ffi::evmc_status_code, gas_left: i64, output: Option<Vec<u8>>, create_address: Option<ffi::evmc_address>, } /// EVMC context structure. Exposes the EVMC host functions, message data, and transaction context /// to the executing VM. pub struct ExecutionContext<'a> { message: &'a ffi::evmc_message, context: &'a mut ffi::evmc_context, tx_context: ffi::evmc_tx_context, } impl ExecutionResult { pub fn new( _status_code: ffi::evmc_status_code, _gas_left: i64, _output: Option<Vec<u8>>, ) -> Self { ExecutionResult { status_code: _status_code, gas_left: _gas_left, output: _output, create_address: None, } } pub fn failure() -> Self { ExecutionResult::new(ffi::evmc_status_code::EVMC_FAILURE, 0, None) } pub fn success(_gas_left: i64, _output: Option<Vec<u8>>) -> Self { ExecutionResult::new(ffi::evmc_status_code::EVMC_SUCCESS, _gas_left, _output) } pub fn get_status_code(&self) -> ffi::evmc_status_code { self.status_code } pub fn get_gas_left(&self) -> i64 { self.gas_left } pub fn get_output(&self) -> Option<&Vec<u8>> { self.output.as_ref() } pub fn get_create_address(&self) -> Option<&ffi::evmc_address> { self.create_address.as_ref() } } impl<'a> ExecutionContext<'a> { pub fn new(_message: &'a ffi::evmc_message, _context: &'a mut ffi::evmc_context) -> Self { let _tx_context = unsafe { assert!((*(_context.host)).get_tx_context.is_some()); (*(_context.host)).get_tx_context.unwrap()(_context as *mut ffi::evmc_context) }; ExecutionContext { message: _message, context: _context, tx_context: _tx_context, } } pub fn get_message(&self) -> &ffi::evmc_message { &self.message } pub fn get_tx_context(&mut self) -> &ffi::evmc_tx_context { &self.tx_context } pub fn account_exists(&mut self, address: &ffi::evmc_address) -> bool { unsafe { assert!((*self.context.host).account_exists.is_some()); (*self.context.host).account_exists.unwrap()( self.context as *mut ffi::evmc_context, address as *const ffi::evmc_address, ) } } pub fn get_storage( &mut self, address: &ffi::evmc_address, key: &ffi::evmc_bytes32, ) -> ffi::evmc_bytes32 { unsafe { assert!((*self.context.host).get_storage.is_some()); (*self.context.host).get_storage.unwrap()( self.context as *mut ffi::evmc_context, address as *const ffi::evmc_address, key as *const ffi::evmc_bytes32, ) } } pub fn set_storage( &mut self, address: &ffi::evmc_address, key: &ffi::evmc_bytes32, value: &ffi::evmc_bytes32, ) -> ffi::evmc_storage_status { unsafe { assert!((*self.context.host).set_storage.is_some()); (*self.context.host).set_storage.unwrap()( self.context as *mut ffi::evmc_context, address as *const ffi::evmc_address, key as *const ffi::evmc_bytes32, value as *const ffi::evmc_bytes32, ) } } pub fn get_balance(&mut self, address: &ffi::evmc_address) -> ffi::evmc_bytes32 { unsafe { assert!((*self.context.host).get_balance.is_some()); (*self.context.host).get_balance.unwrap()( self.context as *mut ffi::evmc_context, address as *const ffi::evmc_address, ) } } pub fn get_code_size(&mut self, address: &ffi::evmc_address) -> usize { unsafe { assert!((*self.context.host).get_code_size.is_some()); (*self.context.host).get_code_size.unwrap()( self.context as *mut ffi::evmc_context, address as *const ffi::evmc_address, ) } } pub fn get_code_hash(&mut self, address: &ffi::evmc_address) -> ffi::evmc_bytes32 { unsafe { assert!((*self.context.host).get_code_size.is_some()); (*self.context.host).get_code_hash.unwrap()( self.context as *mut ffi::evmc_context, address as *const ffi::evmc_address, ) } } pub fn copy_code( &mut self, address: &ffi::evmc_address, code_offset: usize, buffer: &mut [u8], ) -> usize { unsafe { assert!((*self.context.host).copy_code.is_some()); (*self.context.host).copy_code.unwrap()( self.context as *mut ffi::evmc_context, address as *const ffi::evmc_address, code_offset, // FIXME: ensure that alignment of the array elements is OK buffer.as_mut_ptr(), buffer.len(), ) } } pub fn selfdestruct(&mut self, address: &ffi::evmc_address, beneficiary: &ffi::evmc_address) { unsafe { assert!((*self.context.host).selfdestruct.is_some()); (*self.context.host).selfdestruct.unwrap()( self.context as *mut ffi::evmc_context, address as *const ffi::evmc_address, beneficiary as *const ffi::evmc_address, ) } } pub fn call(&mut self, message: &ffi::evmc_message) -> ExecutionResult { unsafe { assert!((*self.context.host).call.is_some()); (*self.context.host).call.unwrap()( self.context as *mut ffi::evmc_context, message as *const ffi::evmc_message, ) .into() } } pub fn get_block_hash(&mut self, num: i64) -> ffi::evmc_bytes32 { unsafe { assert!((*self.context.host).get_block_hash.is_some()); (*self.context.host).get_block_hash.unwrap()( self.context as *mut ffi::evmc_context, num, ) } } pub fn emit_log( &mut self, address: &ffi::evmc_address, data: &[u8], topics: &[ffi::evmc_bytes32], ) { unsafe { assert!((*self.context.host).emit_log.is_some()); (*self.context.host).emit_log.unwrap()( self.context as *mut ffi::evmc_context, address as *const ffi::evmc_address, // FIXME: ensure that alignment of the array elements is OK data.as_ptr(), data.len(), topics.as_ptr(), topics.len(), ) } } } impl From<ffi::evmc_result> for ExecutionResult { fn from(result: ffi::evmc_result) -> Self { let ret = ExecutionResult { status_code: result.status_code, gas_left: result.gas_left, output: if !result.output_data.is_null() { // Pre-allocate a vector. let mut buf: Vec<u8> = Vec::with_capacity(result.output_size); unsafe { // Set the len of the vec manually. buf.set_len(result.output_size); // Copy from the C struct's buffer to the vec's buffer. std::ptr::copy(result.output_data, buf.as_mut_ptr(), result.output_size); } Some(buf) } else { None }, // Consider it is always valid. create_address: Some(result.create_address), }; // Release allocated ffi struct. if result.release.is_some() { unsafe { result.release.unwrap()(&result as *const ffi::evmc_result); } } ret } } fn allocate_output_data(output: Option<Vec<u8>>) -> (*const u8, usize) { if let Some(buf) = output { let buf_len = buf.len(); // Manually allocate heap memory for the new home of the output buffer. let memlayout = std::alloc::Layout::from_size_align(buf_len, 1).expect("Bad layout"); let new_buf = unsafe { std::alloc::alloc(memlayout) }; unsafe { // Copy the data into the allocated buffer. std::ptr::copy(buf.as_ptr(), new_buf, buf_len); } (new_buf as *const u8, buf_len) } else { (std::ptr::null(), 0) } } unsafe fn deallocate_output_data(ptr: *const u8, size: usize) { if !ptr.is_null() { let buf_layout = std::alloc::Layout::from_size_align(size, 1).expect("Bad layout"); std::alloc::dealloc(ptr as *mut u8, buf_layout); } } /// Returns a pointer to a heap-allocated evmc_result. impl Into<*const ffi::evmc_result> for ExecutionResult { fn into(self) -> *const ffi::evmc_result { let mut result: ffi::evmc_result = self.into(); result.release = Some(release_heap_result); Box::into_raw(Box::new(result)) } } /// Callback to pass across FFI, de-allocating the optional output_data. extern "C" fn release_heap_result(result: *const ffi::evmc_result) { unsafe { let tmp = Box::from_raw(result as *mut ffi::evmc_result); deallocate_output_data(tmp.output_data, tmp.output_size); } } /// Returns a pointer to a stack-allocated evmc_result. impl Into<ffi::evmc_result> for ExecutionResult { fn into(self) -> ffi::evmc_result { let (buffer, len) = allocate_output_data(self.output); ffi::evmc_result { status_code: self.status_code, gas_left: self.gas_left, output_data: buffer, output_size: len, release: Some(release_stack_result), create_address: if self.create_address.is_some() { self.create_address.unwrap() } else { ffi::evmc_address { bytes: [0u8; 20] } }, padding: [0u8; 4], } } } /// Callback to pass across FFI, de-allocating the optional output_data. extern "C" fn release_stack_result(result: *const ffi::evmc_result) { unsafe { let tmp = *result; deallocate_output_data(tmp.output_data, tmp.output_size); } } #[cfg(test)] mod tests { use super::*; #[test] fn new_result() { let r = ExecutionResult::new(ffi::evmc_status_code::EVMC_FAILURE, 420, None); assert!(r.get_status_code() == ffi::evmc_status_code::EVMC_FAILURE); assert!(r.get_gas_left() == 420); assert!(r.get_output().is_none()); assert!(r.get_create_address().is_none()); } // Test-specific helper to dispose of execution results in unit tests extern "C" fn test_result_dispose(result: *const ffi::evmc_result) { unsafe { if !result.is_null() { let owned = *result; Vec::from_raw_parts( owned.output_data as *mut u8, owned.output_size, owned.output_size, ); } } } #[test] fn from_ffi() { let f = ffi::evmc_result { status_code: ffi::evmc_status_code::EVMC_SUCCESS, gas_left: 1337, output_data: Box::into_raw(Box::new([0xde, 0xad, 0xbe, 0xef])) as *const u8, output_size: 4, release: Some(test_result_dispose), create_address: ffi::evmc_address { bytes: [0u8; 20] }, padding: [0u8; 4], }; let r: ExecutionResult = f.into(); assert!(r.get_status_code() == ffi::evmc_status_code::EVMC_SUCCESS); assert!(r.get_gas_left() == 1337); assert!(r.get_output().is_some()); assert!(r.get_output().unwrap().len() == 4); assert!(r.get_create_address().is_some()); } #[test] fn into_heap_ffi() { let r = ExecutionResult::new( ffi::evmc_status_code::EVMC_FAILURE, 420, Some(vec![0xc0, 0xff, 0xee, 0x71, 0x75]), ); let f: *const ffi::evmc_result = r.into(); assert!(!f.is_null()); unsafe { assert!((*f).status_code == ffi::evmc_status_code::EVMC_FAILURE); assert!((*f).gas_left == 420); assert!(!(*f).output_data.is_null()); assert!((*f).output_size == 5); assert!( std::slice::from_raw_parts((*f).output_data, 5) as &[u8] == &[0xc0, 0xff, 0xee, 0x71, 0x75] ); assert!((*f).create_address.bytes == [0u8; 20]); if (*f).release.is_some() { (*f).release.unwrap()(f); } } } #[test] fn into_heap_ffi_empty_data() { let r = ExecutionResult::new(ffi::evmc_status_code::EVMC_FAILURE, 420, None); let f: *const ffi::evmc_result = r.into(); assert!(!f.is_null()); unsafe { assert!((*f).status_code == ffi::evmc_status_code::EVMC_FAILURE); assert!((*f).gas_left == 420); assert!((*f).output_data.is_null()); assert!((*f).output_size == 0); assert!((*f).create_address.bytes == [0u8; 20]); if (*f).release.is_some() { (*f).release.unwrap()(f); } } } #[test] fn into_stack_ffi() { let r = ExecutionResult::new( ffi::evmc_status_code::EVMC_FAILURE, 420, Some(vec![0xc0, 0xff, 0xee, 0x71, 0x75]), ); let f: ffi::evmc_result = r.into(); unsafe { assert!(f.status_code == ffi::evmc_status_code::EVMC_FAILURE); assert!(f.gas_left == 420); assert!(!f.output_data.is_null()); assert!(f.output_size == 5); assert!( std::slice::from_raw_parts(f.output_data, 5) as &[u8] == &[0xc0, 0xff, 0xee, 0x71, 0x75] ); assert!(f.create_address.bytes == [0u8; 20]); if f.release.is_some() { f.release.unwrap()(&f); } } } #[test] fn into_stack_ffi_empty_data() { let r = ExecutionResult::new(ffi::evmc_status_code::EVMC_FAILURE, 420, None); let f: ffi::evmc_result = r.into(); unsafe { assert!(f.status_code == ffi::evmc_status_code::EVMC_FAILURE); assert!(f.gas_left == 420); assert!(f.output_data.is_null()); assert!(f.output_size == 0); assert!(f.create_address.bytes == [0u8; 20]); if f.release.is_some() { f.release.unwrap()(&f); } } } unsafe extern "C" fn get_dummy_tx_context( _context: *mut ffi::evmc_context, ) -> ffi::evmc_tx_context { ffi::evmc_tx_context { tx_gas_price: ffi::evmc_uint256be { bytes: [0u8; 32] }, tx_origin: ffi::evmc_address { bytes: [0u8; 20] }, block_coinbase: ffi::evmc_address { bytes: [0u8; 20] }, block_number: 42, block_timestamp: 235117, block_gas_limit: 105023, block_difficulty: ffi::evmc_uint256be { bytes: [0xaa; 32] }, } } unsafe extern "C" fn get_dummy_code_size( _context: *mut ffi::evmc_context, _addr: *const ffi::evmc_address, ) -> usize { 105023 as usize } // Update these when needed for tests fn get_dummy_context() -> ffi::evmc_context { ffi::evmc_context { host: Box::into_raw(Box::new(ffi::evmc_host_interface { account_exists: None, get_storage: None, set_storage: None, get_balance: None, get_code_size: Some(get_dummy_code_size), get_code_hash: None, copy_code: None, selfdestruct: None, call: None, get_tx_context: Some(get_dummy_tx_context), get_block_hash: None, emit_log: None, })), } } // Helper to safely dispose of the dummy context, and not bring up false positives in the // sanitizers. fn dummy_context_dispose(context: ffi::evmc_context) { unsafe { Box::from_raw(context.host as *mut ffi::evmc_host_interface); } } fn get_dummy_message() -> ffi::evmc_message { ffi::evmc_message { kind: ffi::evmc_call_kind::EVMC_CALL, flags: 0, depth: 123, gas: 105023, destination: ffi::evmc_address { bytes: [0u8; 20] }, sender: ffi::evmc_address { bytes: [0u8; 20] }, input_data: std::ptr::null() as *const u8, input_size: 0, value: ffi::evmc_uint256be { bytes: [0u8; 32] }, create2_salt: ffi::evmc_uint256be { bytes: [0u8; 32] }, } } #[test] fn execution_context() { let msg = get_dummy_message(); let mut context_raw = get_dummy_context(); // Make a copy here so we don't let get_dummy_context() go out of scope when called again // in get_dummy_tx_context() and cause LLVM // sanitizers to complain let mut context_raw_copy = context_raw.clone(); let mut exe_context = ExecutionContext::new(&msg, &mut context_raw); let a = exe_context.get_tx_context(); let b = unsafe { get_dummy_tx_context(&mut context_raw_copy as *mut ffi::evmc_context) }; assert_eq!(a.block_gas_limit, b.block_gas_limit); assert_eq!(a.block_timestamp, b.block_timestamp); assert_eq!(a.block_number, b.block_number); let c = exe_context.get_message(); let d = get_dummy_message(); assert_eq!(c.kind, d.kind); assert_eq!(c.flags, d.flags); assert_eq!(c.depth, d.depth); assert_eq!(c.gas, d.gas); assert_eq!(c.input_data, d.input_data); assert_eq!(c.input_size, d.input_size); dummy_context_dispose(context_raw); } #[test] fn get_code_size() { let msg = get_dummy_message(); // This address is useless. Just a dummy parameter for the interface function. let test_addr = ffi::evmc_address { bytes: [0u8; 20] }; let mut context_raw = get_dummy_context(); let mut exe_context = ExecutionContext::new(&msg, &mut context_raw); let a: usize = 105023; let b = exe_context.get_code_size(&test_addr); assert_eq!(a, b); dummy_context_dispose(context_raw); } }
32.511905
98
0.551603
d732b1071d8a94830543dfde91f7fbbd82932dd2
451
// build-pass // compile-flags:-Zpolymorphize=on pub trait ParallelIterator: Sized { fn drive<C: Consumer<()>>(_: C) { C::into_folder(); } } pub trait Consumer<T>: Sized { type Result; fn into_folder() -> Self::Result; } impl ParallelIterator for () {} impl<F: Fn(), T> Consumer<T> for F { type Result = (); fn into_folder() -> Self::Result { unimplemented!() } } fn main() { <()>::drive(|| ()); }
16.703704
38
0.556541
4b7c88029d35f1d5f1f510427add353a6a189fac
14,329
// This file is generated automatically. Do not edit it directly. // See the Contributing section in README on how to make changes to it. use std::cmp; use rand::Rng; use tcod::colors::*; use tcod::console::*; use tcod::map::{FovAlgorithm, Map as FovMap}; // actual size of the window const SCREEN_WIDTH: i32 = 80; const SCREEN_HEIGHT: i32 = 50; // size of the map const MAP_WIDTH: i32 = 80; const MAP_HEIGHT: i32 = 45; //parameters for dungeon generator const ROOM_MAX_SIZE: i32 = 10; const ROOM_MIN_SIZE: i32 = 6; const MAX_ROOMS: i32 = 30; const MAX_ROOM_MONSTERS: i32 = 3; const FOV_ALGO: FovAlgorithm = FovAlgorithm::Basic; // default FOV algorithm const FOV_LIGHT_WALLS: bool = true; // light walls or not const TORCH_RADIUS: i32 = 10; const LIMIT_FPS: i32 = 20; // 20 frames-per-second maximum const COLOR_DARK_WALL: Color = Color { r: 0, g: 0, b: 100 }; const COLOR_LIGHT_WALL: Color = Color { r: 130, g: 110, b: 50, }; const COLOR_DARK_GROUND: Color = Color { r: 50, g: 50, b: 150, }; const COLOR_LIGHT_GROUND: Color = Color { r: 200, g: 180, b: 50, }; // player will always be the first object const PLAYER: usize = 0; struct Tcod { root: Root, con: Offscreen, fov: FovMap, } type Map = Vec<Vec<Tile>>; struct Game { map: Map, } /// A tile of the map and its properties #[derive(Clone, Copy, Debug)] struct Tile { blocked: bool, explored: bool, block_sight: bool, } impl Tile { pub fn empty() -> Self { Tile { blocked: false, explored: false, block_sight: false, } } pub fn wall() -> Self { Tile { blocked: true, explored: false, block_sight: true, } } } /// A rectangle on the map, used to characterise a room. #[derive(Clone, Copy, Debug)] struct Rect { x1: i32, y1: i32, x2: i32, y2: i32, } impl Rect { pub fn new(x: i32, y: i32, w: i32, h: i32) -> Self { Rect { x1: x, y1: y, x2: x + w, y2: y + h, } } pub fn center(&self) -> (i32, i32) { let center_x = (self.x1 + self.x2) / 2; let center_y = (self.y1 + self.y2) / 2; (center_x, center_y) } pub fn intersects_with(&self, other: &Rect) -> bool { // returns true if this rectangle intersects with another one (self.x1 <= other.x2) && (self.x2 >= other.x1) && (self.y1 <= other.y2) && (self.y2 >= other.y1) } } /// This is a generic object: the player, a monster, an item, the stairs... /// It's always represented by a character on screen. #[derive(Debug)] struct Object { x: i32, y: i32, char: char, color: Color, name: String, blocks: bool, alive: bool, } impl Object { pub fn new(x: i32, y: i32, char: char, name: &str, color: Color, blocks: bool) -> Self { Object { x: x, y: y, char: char, color: color, name: name.into(), blocks: blocks, alive: false, } } /// set the color and then draw the character that represents this object at its position pub fn draw(&self, con: &mut dyn Console) { con.set_default_foreground(self.color); con.put_char(self.x, self.y, self.char, BackgroundFlag::None); } pub fn pos(&self) -> (i32, i32) { (self.x, self.y) } pub fn set_pos(&mut self, x: i32, y: i32) { self.x = x; self.y = y; } } /// move by the given amount, if the destination is not blocked fn move_by(id: usize, dx: i32, dy: i32, map: &Map, objects: &mut [Object]) { let (x, y) = objects[id].pos(); if !is_blocked(x + dx, y + dy, map, objects) { objects[id].set_pos(x + dx, y + dy); } } fn is_blocked(x: i32, y: i32, map: &Map, objects: &[Object]) -> bool { // first test the map tile if map[x as usize][y as usize].blocked { return true; } // now check for any blocking objects objects .iter() .any(|object| object.blocks && object.pos() == (x, y)) } fn create_room(room: Rect, map: &mut Map) { // go through the tiles in the rectangle and make them passable for x in (room.x1 + 1)..room.x2 { for y in (room.y1 + 1)..room.y2 { map[x as usize][y as usize] = Tile::empty(); } } } fn create_h_tunnel(x1: i32, x2: i32, y: i32, map: &mut Map) { // horizontal tunnel. `min()` and `max()` are used in case `x1 > x2` for x in cmp::min(x1, x2)..(cmp::max(x1, x2) + 1) { map[x as usize][y as usize] = Tile::empty(); } } fn create_v_tunnel(y1: i32, y2: i32, x: i32, map: &mut Map) { // vertical tunnel for y in cmp::min(y1, y2)..(cmp::max(y1, y2) + 1) { map[x as usize][y as usize] = Tile::empty(); } } fn make_map(objects: &mut Vec<Object>) -> Map { // fill map with "blocked" tiles let mut map = vec![vec![Tile::wall(); MAP_HEIGHT as usize]; MAP_WIDTH as usize]; let mut rooms = vec![]; for _ in 0..MAX_ROOMS { // random width and height let w = rand::thread_rng().gen_range(ROOM_MIN_SIZE, ROOM_MAX_SIZE + 1); let h = rand::thread_rng().gen_range(ROOM_MIN_SIZE, ROOM_MAX_SIZE + 1); // random position without going out of the boundaries of the map let x = rand::thread_rng().gen_range(0, MAP_WIDTH - w); let y = rand::thread_rng().gen_range(0, MAP_HEIGHT - h); let new_room = Rect::new(x, y, w, h); // run through the other rooms and see if they intersect with this one let failed = rooms .iter() .any(|other_room| new_room.intersects_with(other_room)); if !failed { // this means there are no intersections, so this room is valid // "paint" it to the map's tiles create_room(new_room, &mut map); // add some content to this room, such as monsters place_objects(new_room, &map, objects); // center coordinates of the new room, will be useful later let (new_x, new_y) = new_room.center(); if rooms.is_empty() { // this is the first room, where the player starts at objects[PLAYER].set_pos(new_x, new_y); } else { // all rooms after the first: // connect it to the previous room with a tunnel // center coordinates of the previous room let (prev_x, prev_y) = rooms[rooms.len() - 1].center(); // toss a coin (random bool value -- either true or false) if rand::random() { // first move horizontally, then vertically create_h_tunnel(prev_x, new_x, prev_y, &mut map); create_v_tunnel(prev_y, new_y, new_x, &mut map); } else { // first move vertically, then horizontally create_v_tunnel(prev_y, new_y, prev_x, &mut map); create_h_tunnel(prev_x, new_x, new_y, &mut map); } } // finally, append the new room to the list rooms.push(new_room); } } map } fn place_objects(room: Rect, map: &Map, objects: &mut Vec<Object>) { // choose random number of monsters let num_monsters = rand::thread_rng().gen_range(0, MAX_ROOM_MONSTERS + 1); for _ in 0..num_monsters { // choose random spot for this monster let x = rand::thread_rng().gen_range(room.x1 + 1, room.x2); let y = rand::thread_rng().gen_range(room.y1 + 1, room.y2); // only place it if the tile is not blocked if !is_blocked(x, y, map, objects) { let mut monster = if rand::random::<f32>() < 0.8 { // 80% chance of getting an orc // create an orc Object::new(x, y, 'o', "orc", DESATURATED_GREEN, true) } else { // create a troll Object::new(x, y, 'T', "troll", DARKER_GREEN, true) }; monster.alive = true; objects.push(monster); } } } fn render_all(tcod: &mut Tcod, game: &mut Game, objects: &[Object], fov_recompute: bool) { if fov_recompute { // recompute FOV if needed (the player moved or something) let player = &objects[PLAYER]; tcod.fov .compute_fov(player.x, player.y, TORCH_RADIUS, FOV_LIGHT_WALLS, FOV_ALGO); } // go through all tiles, and set their background color for y in 0..MAP_HEIGHT { for x in 0..MAP_WIDTH { let visible = tcod.fov.is_in_fov(x, y); let wall = game.map[x as usize][y as usize].block_sight; let color = match (visible, wall) { // outside of field of view: (false, true) => COLOR_DARK_WALL, (false, false) => COLOR_DARK_GROUND, // inside fov: (true, true) => COLOR_LIGHT_WALL, (true, false) => COLOR_LIGHT_GROUND, }; let explored = &mut game.map[x as usize][y as usize].explored; if visible { // since it's visible, explore it *explored = true; } if *explored { // show explored tiles only (any visible tile is explored already) tcod.con .set_char_background(x, y, color, BackgroundFlag::Set); } } } // draw all objects in the list for object in objects { if tcod.fov.is_in_fov(object.x, object.y) { object.draw(&mut tcod.con); } } // blit the contents of "con" to the root console blit( &tcod.con, (0, 0), (MAP_WIDTH, MAP_HEIGHT), &mut tcod.root, (0, 0), 1.0, 1.0, ); } fn player_move_or_attack(dx: i32, dy: i32, game: &Game, objects: &mut [Object]) { // the coordinates the player is moving to/attacking let x = objects[PLAYER].x + dx; let y = objects[PLAYER].y + dy; // try to find an attackable object there let target_id = objects.iter().position(|object| object.pos() == (x, y)); // attack if target found, move otherwise match target_id { Some(target_id) => { println!( "The {} laughs at your puny efforts to attack him!", objects[target_id].name ); } None => { move_by(PLAYER, dx, dy, &game.map, objects); } } } fn handle_keys(tcod: &mut Tcod, game: &Game, objects: &mut Vec<Object>) -> PlayerAction { use tcod::input::Key; use tcod::input::KeyCode::*; use PlayerAction::*; let key = tcod.root.wait_for_keypress(true); let player_alive = objects[PLAYER].alive; match (key, key.text(), player_alive) { ( Key { code: Enter, alt: true, .. }, _, _, ) => { // Alt+Enter: toggle fullscreen let fullscreen = tcod.root.is_fullscreen(); tcod.root.set_fullscreen(!fullscreen); DidntTakeTurn } (Key { code: Escape, .. }, _, _) => Exit, // exit game // movement keys (Key { code: Up, .. }, _, true) => { player_move_or_attack(0, -1, game, objects); TookTurn } (Key { code: Down, .. }, _, true) => { player_move_or_attack(0, 1, game, objects); TookTurn } (Key { code: Left, .. }, _, true) => { player_move_or_attack(-1, 0, game, objects); TookTurn } (Key { code: Right, .. }, _, true) => { player_move_or_attack(1, 0, game, objects); TookTurn } _ => DidntTakeTurn, } } #[derive(Clone, Copy, Debug, PartialEq)] enum PlayerAction { TookTurn, DidntTakeTurn, Exit, } fn main() { tcod::system::set_fps(LIMIT_FPS); let root = Root::initializer() .font("arial10x10.png", FontLayout::Tcod) .font_type(FontType::Greyscale) .size(SCREEN_WIDTH, SCREEN_HEIGHT) .title("Rust/libtcod tutorial") .init(); let mut tcod = Tcod { root, con: Offscreen::new(MAP_WIDTH, MAP_HEIGHT), fov: FovMap::new(MAP_WIDTH, MAP_HEIGHT), }; // create object representing the player let mut player = Object::new(0, 0, '@', "player", WHITE, true); player.alive = true; // the list of objects with just the player let mut objects = vec![player]; let mut game = Game { // generate map (at this point it's not drawn to the screen) map: make_map(&mut objects), }; // populate the FOV map, according to the generated map for y in 0..MAP_HEIGHT { for x in 0..MAP_WIDTH { tcod.fov.set( x, y, !game.map[x as usize][y as usize].block_sight, !game.map[x as usize][y as usize].blocked, ); } } // force FOV "recompute" first time through the game loop let mut previous_player_position = (-1, -1); while !tcod.root.window_closed() { // clear the screen of the previous frame tcod.con.clear(); // render the screen let fov_recompute = previous_player_position != (objects[PLAYER].pos()); render_all(&mut tcod, &mut game, &objects, fov_recompute); tcod.root.flush(); // handle keys and exit game if needed previous_player_position = objects[PLAYER].pos(); let player_action = handle_keys(&mut tcod, &game, &mut objects); if player_action == PlayerAction::Exit { break; } // let monsters take their turn if objects[PLAYER].alive && player_action != PlayerAction::DidntTakeTurn { for object in &objects { // only if object is not player if (object as *const _) != (&objects[PLAYER] as *const _) { println!("The {} growls!", object.name); } } } } }
29.006073
93
0.541419
e28d2db9076433aebeb547f7e5743bf634c2486a
1,286
// ====================================== // This file was automatically generated. // ====================================== use crate::ids::{BitcoinTransactionId}; use crate::params::{Object, Timestamp}; use crate::resources::{Currency}; use serde_derive::{Deserialize, Serialize}; /// The resource representing a Stripe "BitcoinTransaction". #[derive(Clone, Debug, Deserialize, Serialize)] pub struct BitcoinTransaction { /// Unique identifier for the object. pub id: BitcoinTransactionId, /// The amount of `currency` that the transaction was converted to in real-time. pub amount: i64, /// The amount of bitcoin contained in the transaction. pub bitcoin_amount: i64, /// Time at which the object was created. /// /// Measured in seconds since the Unix epoch. pub created: Timestamp, /// Three-letter [ISO code for the currency](https://stripe.com/docs/currencies) to which this transaction was converted. pub currency: Currency, /// The receiver to which this transaction was sent. pub receiver: String, } impl Object for BitcoinTransaction { type Id = BitcoinTransactionId; fn id(&self) -> Self::Id { self.id.clone() } fn object(&self) -> &'static str { "bitcoin_transaction" } }
29.906977
125
0.643857
6a7fd36c643df2cffb70de339fa534494e666660
32,839
//! A hash set implemented using `OrderMap` use std::cmp::Ordering; use std::collections::hash_map::RandomState; use std::fmt; use std::iter::{FromIterator, Chain}; use std::hash::{Hash, BuildHasher}; use std::mem::replace; use std::ops::RangeFull; use std::ops::{BitAnd, BitOr, BitXor, Sub}; use std::slice; use std::vec; use super::{OrderMap, Equivalent}; type Bucket<T> = super::Bucket<T, ()>; /// A hash set where the iteration order of the values is independent of their /// hash values. /// /// The interface is closely compatible with the standard `HashSet`, but also /// has additional features. /// /// # Order /// /// The values have a consistent order that is determined by the sequence of /// insertion and removal calls on the set. The order does not depend on the /// values or the hash function at all. Note that insertion order and value /// are not affected if a re-insertion is attempted once an element is /// already present. /// /// All iterators traverse the set *in order*. Set operation iterators like /// `union` produce a concatenated order, as do their matching "bitwise" /// operators. See their documentation for specifics. /// /// # Indices /// /// The values are indexed in a compact range without holes in the range /// `0..self.len()`. For example, the method `.get_full` looks up the index for /// a value, and the method `.get_index` looks up the value by index. /// /// # Examples /// /// ``` /// use ordermap::OrderSet; /// /// // Collects which letters appear in a sentence. /// let letters: OrderSet<_> = "a short treatise on fungi".chars().collect(); /// /// assert!(letters.contains(&'s')); /// assert!(letters.contains(&'t')); /// assert!(letters.contains(&'u')); /// assert!(!letters.contains(&'y')); /// ``` #[derive(Clone)] pub struct OrderSet<T, S = RandomState> { map: OrderMap<T, (), S>, } impl<T, S> fmt::Debug for OrderSet<T, S> where T: fmt::Debug + Hash + Eq, S: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if cfg!(not(feature = "test_debug")) { f.debug_set().entries(self.iter()).finish() } else { // Let the inner `OrderMap` print all of its details f.debug_struct("OrderSet").field("map", &self.map).finish() } } } impl<T> OrderSet<T> { /// Create a new set. (Does not allocate.) pub fn new() -> Self { OrderSet { map: OrderMap::new() } } /// Create a new set with capacity for `n` elements. /// (Does not allocate if `n` is zero.) /// /// Computes in **O(n)** time. pub fn with_capacity(n: usize) -> Self { OrderSet { map: OrderMap::with_capacity(n) } } } impl<T, S> OrderSet<T, S> { /// Create a new set with capacity for `n` elements. /// (Does not allocate if `n` is zero.) /// /// Computes in **O(n)** time. pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self where S: BuildHasher { OrderSet { map: OrderMap::with_capacity_and_hasher(n, hash_builder) } } /// Return the number of elements in the set. /// /// Computes in **O(1)** time. pub fn len(&self) -> usize { self.map.len() } /// Returns true if the set contains no elements. /// /// Computes in **O(1)** time. pub fn is_empty(&self) -> bool { self.map.is_empty() } /// Create a new set with `hash_builder` pub fn with_hasher(hash_builder: S) -> Self where S: BuildHasher { OrderSet { map: OrderMap::with_hasher(hash_builder) } } /// Return a reference to the set's `BuildHasher`. pub fn hasher(&self) -> &S where S: BuildHasher { self.map.hasher() } /// Computes in **O(1)** time. pub fn capacity(&self) -> usize { self.map.capacity() } } impl<T, S> OrderSet<T, S> where T: Hash + Eq, S: BuildHasher, { /// Remove all elements in the set, while preserving its capacity. /// /// Computes in **O(n)** time. pub fn clear(&mut self) { self.map.clear(); } /// FIXME Not implemented fully yet pub fn reserve(&mut self, additional: usize) { self.map.reserve(additional); } /// Insert the value into the set. /// /// If an equivalent item already exists in the set, it returns /// `false` leaving the original value in the set and without /// altering its insertion order. Otherwise, it inserts the new /// item and returns `true`. /// /// Computes in **O(1)** time (amortized average). pub fn insert(&mut self, value: T) -> bool { self.map.insert(value, ()).is_none() } /// Return an iterator over the values of the set, in their order pub fn iter(&self) -> Iter<T> { Iter { iter: self.map.keys().iter } } /// Return an iterator over the values that are in `self` but not `other`. /// /// Values are produced in the same order that they appear in `self`. pub fn difference<'a, S2>(&'a self, other: &'a OrderSet<T, S2>) -> Difference<'a, T, S2> where S2: BuildHasher { Difference { iter: self.iter(), other: other, } } /// Return an iterator over the values that are in `self` or `other`, /// but not in both. /// /// Values from `self` are produced in their original order, followed by /// values from `other` in their original order. pub fn symmetric_difference<'a, S2>(&'a self, other: &'a OrderSet<T, S2>) -> SymmetricDifference<'a, T, S, S2> where S2: BuildHasher { SymmetricDifference { iter: self.difference(other).chain(other.difference(self)), } } /// Return an iterator over the values that are in both `self` and `other`. /// /// Values are produced in the same order that they appear in `self`. pub fn intersection<'a, S2>(&'a self, other: &'a OrderSet<T, S2>) -> Intersection<'a, T, S2> where S2: BuildHasher { Intersection { iter: self.iter(), other: other, } } /// Return an iterator over all values that are in `self` or `other`. /// /// Values from `self` are produced in their original order, followed by /// values that are unique to `other` in their original order. pub fn union<'a, S2>(&'a self, other: &'a OrderSet<T, S2>) -> Union<'a, T, S> where S2: BuildHasher { Union { iter: self.iter().chain(other.difference(self)), } } /// Return `true` if an equivalent to `value` exists in the set. /// /// Computes in **O(1)** time (average). pub fn contains<Q: ?Sized>(&self, value: &Q) -> bool where Q: Hash + Equivalent<T>, { self.map.contains_key(value) } /// Return a reference to the value stored in the set, if it is present, /// else `None`. /// /// Computes in **O(1)** time (average). pub fn get<Q: ?Sized>(&self, value: &Q) -> Option<&T> where Q: Hash + Equivalent<T>, { self.map.get_full(value).map(|(_, x, &())| x) } /// Return item index and value pub fn get_full<Q: ?Sized>(&self, value: &Q) -> Option<(usize, &T)> where Q: Hash + Equivalent<T>, { self.map.get_full(value).map(|(i, x, &())| (i, x)) } /// Adds a value to the set, replacing the existing value, if any, that is /// equal to the given one. Returns the replaced value. /// /// Computes in **O(1)** time (average). pub fn replace(&mut self, value: T) -> Option<T> { use super::Entry::*; match self.map.entry(value) { Vacant(e) => { e.insert(()); None }, Occupied(e) => { // FIXME uses private fields! let old_key = &mut e.map.entries[e.index].key; Some(replace(old_key, e.key)) } } } /// FIXME Same as .swap_remove /// /// Computes in **O(1)** time (average). pub fn remove<Q: ?Sized>(&mut self, value: &Q) -> bool where Q: Hash + Equivalent<T>, { self.swap_remove(value) } /// Remove the value from the set, and return `true` if it was present. /// /// Like `Vec::swap_remove`, the value is removed by swapping it with the /// last element of the set and popping it off. **This perturbs /// the postion of what used to be the last element!** /// /// Return `false` if `value` was not in the set. /// /// Computes in **O(1)** time (average). pub fn swap_remove<Q: ?Sized>(&mut self, value: &Q) -> bool where Q: Hash + Equivalent<T>, { self.map.swap_remove(value).is_some() } /// FIXME Same as .swap_take /// /// Computes in **O(1)** time (average). pub fn take<Q: ?Sized>(&mut self, value: &Q) -> Option<T> where Q: Hash + Equivalent<T>, { self.swap_take(value) } /// Removes and returns the value in the set, if any, that is equal to the /// given one. /// /// Like `Vec::swap_remove`, the value is removed by swapping it with the /// last element of the set and popping it off. **This perturbs /// the postion of what used to be the last element!** /// /// Return `None` if `value` was not in the set. /// /// Computes in **O(1)** time (average). pub fn swap_take<Q: ?Sized>(&mut self, value: &Q) -> Option<T> where Q: Hash + Equivalent<T>, { self.map.swap_remove_full(value).map(|(_, x, ())| x) } /// Remove the value from the set return it and the index it had. /// /// Like `Vec::swap_remove`, the value is removed by swapping it with the /// last element of the set and popping it off. **This perturbs /// the postion of what used to be the last element!** /// /// Return `None` if `value` was not in the set. pub fn swap_remove_full<Q: ?Sized>(&mut self, value: &Q) -> Option<(usize, T)> where Q: Hash + Equivalent<T>, { self.map.swap_remove_full(value).map(|(i, x, ())| (i, x)) } /// Remove the last value /// /// Computes in **O(1)** time (average). pub fn pop(&mut self) -> Option<T> { self.map.pop().map(|(x, ())| x) } /// Scan through each value in the set and keep those where the /// closure `keep` returns `true`. /// /// The elements are visited in order, and remaining elements keep their /// order. /// /// Computes in **O(n)** time (average). pub fn retain<F>(&mut self, mut keep: F) where F: FnMut(&T) -> bool, { self.map.retain(move |x, &mut ()| keep(x)) } /// Sort the set’s values by their default ordering. /// /// See `sort_by` for details. pub fn sort(&mut self) where T: Ord, { self.map.sort_keys() } /// Sort the set’s values in place using the comparison function `compare`. /// /// Computes in **O(n log n)** time and **O(n)** space. The sort is stable. pub fn sort_by<F>(&mut self, mut compare: F) where F: FnMut(&T, &T) -> Ordering, { self.map.sort_by(move |a, _, b, _| compare(a, b)); } /// Sort the values of the set and return a by value iterator of /// the values with the result. /// /// The sort is stable. pub fn sorted_by<F>(self, mut cmp: F) -> IntoIter<T> where F: FnMut(&T, &T) -> Ordering { IntoIter { iter: self.map.sorted_by(move |a, &(), b, &()| cmp(a, b)).iter, } } /// Clears the `OrderSet`, returning all values as a drain iterator. /// Keeps the allocated memory for reuse. pub fn drain(&mut self, range: RangeFull) -> Drain<T> { Drain { iter: self.map.drain(range).iter, } } } impl<T, S> OrderSet<T, S> { /// Get a value by index /// /// Valid indices are *0 <= index < self.len()* /// /// Computes in **O(1)** time. pub fn get_index(&self, index: usize) -> Option<&T> { self.map.get_index(index).map(|(x, &())| x) } /// Remove the key-value pair by index /// /// Valid indices are *0 <= index < self.len()* /// /// Computes in **O(1)** time (average). pub fn swap_remove_index(&mut self, index: usize) -> Option<T> { self.map.swap_remove_index(index).map(|(x, ())| x) } } pub struct IntoIter<T> { iter: vec::IntoIter<Bucket<T>>, } impl<T> Iterator for IntoIter<T> { type Item = T; iterator_methods!(|entry| entry.key); } impl<T> DoubleEndedIterator for IntoIter<T> { fn next_back(&mut self) -> Option<Self::Item> { self.iter.next_back().map(|entry| entry.key) } } impl<T> ExactSizeIterator for IntoIter<T> { fn len(&self) -> usize { self.iter.len() } } pub struct Iter<'a, T: 'a> { iter: slice::Iter<'a, Bucket<T>>, } impl<'a, T> Iterator for Iter<'a, T> { type Item = &'a T; iterator_methods!(|entry| &entry.key); } impl<'a, T> DoubleEndedIterator for Iter<'a, T> { fn next_back(&mut self) -> Option<Self::Item> { self.iter.next_back().map(|entry| &entry.key) } } impl<'a, T> ExactSizeIterator for Iter<'a, T> { fn len(&self) -> usize { self.iter.len() } } pub struct Drain<'a, T: 'a> { iter: vec::Drain<'a, Bucket<T>>, } impl<'a, T> Iterator for Drain<'a, T> { type Item = T; iterator_methods!(|bucket| bucket.key); } impl<'a, T> DoubleEndedIterator for Drain<'a, T> { double_ended_iterator_methods!(|bucket| bucket.key); } impl<'a, T, S> IntoIterator for &'a OrderSet<T, S> where T: Hash + Eq, S: BuildHasher, { type Item = &'a T; type IntoIter = Iter<'a, T>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<T, S> IntoIterator for OrderSet<T, S> where T: Hash + Eq, S: BuildHasher, { type Item = T; type IntoIter = IntoIter<T>; fn into_iter(self) -> Self::IntoIter { IntoIter { iter: self.map.into_iter().iter, } } } impl<T, S> FromIterator<T> for OrderSet<T, S> where T: Hash + Eq, S: BuildHasher + Default, { fn from_iter<I: IntoIterator<Item=T>>(iterable: I) -> Self { let iter = iterable.into_iter().map(|x| (x, ())); OrderSet { map: OrderMap::from_iter(iter) } } } impl<T, S> Extend<T> for OrderSet<T, S> where T: Hash + Eq, S: BuildHasher, { fn extend<I: IntoIterator<Item=T>>(&mut self, iterable: I) { let iter = iterable.into_iter().map(|x| (x, ())); self.map.extend(iter); } } impl<'a, T, S> Extend<&'a T> for OrderSet<T, S> where T: Hash + Eq + Copy, S: BuildHasher, { fn extend<I: IntoIterator<Item=&'a T>>(&mut self, iterable: I) { let iter = iterable.into_iter().map(|&x| x); self.extend(iter); } } impl<T, S> Default for OrderSet<T, S> where S: BuildHasher + Default, { /// Return an empty `OrderSet` fn default() -> Self { OrderSet { map: OrderMap::default() } } } impl<T, S1, S2> PartialEq<OrderSet<T, S2>> for OrderSet<T, S1> where T: Hash + Eq, S1: BuildHasher, S2: BuildHasher { fn eq(&self, other: &OrderSet<T, S2>) -> bool { self.len() == other.len() && self.is_subset(other) } } impl<T, S> Eq for OrderSet<T, S> where T: Eq + Hash, S: BuildHasher { } impl<T, S> OrderSet<T, S> where T: Eq + Hash, S: BuildHasher { /// Returns `true` if `self` has no elements in common with `other`. pub fn is_disjoint<S2>(&self, other: &OrderSet<T, S2>) -> bool where S2: BuildHasher { if self.len() <= other.len() { self.iter().all(move |value| !other.contains(value)) } else { other.iter().all(move |value| !self.contains(value)) } } /// Returns `true` if all elements of `self` are contained in `other`. pub fn is_subset<S2>(&self, other: &OrderSet<T, S2>) -> bool where S2: BuildHasher { self.len() <= other.len() && self.iter().all(move |value| other.contains(value)) } /// Returns `true` if all elements of `other` are contained in `self`. pub fn is_superset<S2>(&self, other: &OrderSet<T, S2>) -> bool where S2: BuildHasher { other.is_subset(self) } } pub struct Difference<'a, T: 'a, S: 'a> { iter: Iter<'a, T>, other: &'a OrderSet<T, S>, } impl<'a, T, S> Iterator for Difference<'a, T, S> where T: Eq + Hash, S: BuildHasher { type Item = &'a T; fn next(&mut self) -> Option<Self::Item> { while let Some(item) = self.iter.next() { if !self.other.contains(item) { return Some(item); } } None } fn size_hint(&self) -> (usize, Option<usize>) { (0, self.iter.size_hint().1) } } impl<'a, T, S> DoubleEndedIterator for Difference<'a, T, S> where T: Eq + Hash, S: BuildHasher { fn next_back(&mut self) -> Option<Self::Item> { while let Some(item) = self.iter.next_back() { if !self.other.contains(item) { return Some(item); } } None } } pub struct Intersection<'a, T: 'a, S: 'a> { iter: Iter<'a, T>, other: &'a OrderSet<T, S>, } impl<'a, T, S> Iterator for Intersection<'a, T, S> where T: Eq + Hash, S: BuildHasher { type Item = &'a T; fn next(&mut self) -> Option<Self::Item> { while let Some(item) = self.iter.next() { if self.other.contains(item) { return Some(item); } } None } fn size_hint(&self) -> (usize, Option<usize>) { (0, self.iter.size_hint().1) } } impl<'a, T, S> DoubleEndedIterator for Intersection<'a, T, S> where T: Eq + Hash, S: BuildHasher { fn next_back(&mut self) -> Option<Self::Item> { while let Some(item) = self.iter.next_back() { if self.other.contains(item) { return Some(item); } } None } } pub struct SymmetricDifference<'a, T: 'a, S1: 'a, S2: 'a> { iter: Chain<Difference<'a, T, S2>, Difference<'a, T, S1>>, } impl<'a, T, S1, S2> Iterator for SymmetricDifference<'a, T, S1, S2> where T: Eq + Hash, S1: BuildHasher, S2: BuildHasher, { type Item = &'a T; fn next(&mut self) -> Option<Self::Item> { self.iter.next() } fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } fn fold<B, F>(self, init: B, f: F) -> B where F: FnMut(B, Self::Item) -> B { self.iter.fold(init, f) } } impl<'a, T, S1, S2> DoubleEndedIterator for SymmetricDifference<'a, T, S1, S2> where T: Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn next_back(&mut self) -> Option<Self::Item> { self.iter.next_back() } } pub struct Union<'a, T: 'a, S: 'a> { iter: Chain<Iter<'a, T>, Difference<'a, T, S>>, } impl<'a, T, S> Iterator for Union<'a, T, S> where T: Eq + Hash, S: BuildHasher, { type Item = &'a T; fn next(&mut self) -> Option<Self::Item> { self.iter.next() } fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } fn fold<B, F>(self, init: B, f: F) -> B where F: FnMut(B, Self::Item) -> B { self.iter.fold(init, f) } } impl<'a, T, S> DoubleEndedIterator for Union<'a, T, S> where T: Eq + Hash, S: BuildHasher, { fn next_back(&mut self) -> Option<Self::Item> { self.iter.next_back() } } impl<'a, 'b, T, S1, S2> BitAnd<&'b OrderSet<T, S2>> for &'a OrderSet<T, S1> where T: Eq + Hash + Clone, S1: BuildHasher + Default, S2: BuildHasher, { type Output = OrderSet<T, S1>; /// Returns the set intersection, cloned into a new set. /// /// Values are collected in the same order that they appear in `self`. fn bitand(self, other: &'b OrderSet<T, S2>) -> Self::Output { self.intersection(other).cloned().collect() } } impl<'a, 'b, T, S1, S2> BitOr<&'b OrderSet<T, S2>> for &'a OrderSet<T, S1> where T: Eq + Hash + Clone, S1: BuildHasher + Default, S2: BuildHasher, { type Output = OrderSet<T, S1>; /// Returns the set union, cloned into a new set. /// /// Values from `self` are collected in their original order, followed by /// values that are unique to `other` in their original order. fn bitor(self, other: &'b OrderSet<T, S2>) -> Self::Output { self.union(other).cloned().collect() } } impl<'a, 'b, T, S1, S2> BitXor<&'b OrderSet<T, S2>> for &'a OrderSet<T, S1> where T: Eq + Hash + Clone, S1: BuildHasher + Default, S2: BuildHasher, { type Output = OrderSet<T, S1>; /// Returns the set symmetric-difference, cloned into a new set. /// /// Values from `self` are collected in their original order, followed by /// values from `other` in their original order. fn bitxor(self, other: &'b OrderSet<T, S2>) -> Self::Output { self.symmetric_difference(other).cloned().collect() } } impl<'a, 'b, T, S1, S2> Sub<&'b OrderSet<T, S2>> for &'a OrderSet<T, S1> where T: Eq + Hash + Clone, S1: BuildHasher + Default, S2: BuildHasher, { type Output = OrderSet<T, S1>; /// Returns the set difference, cloned into a new set. /// /// Values are collected in the same order that they appear in `self`. fn sub(self, other: &'b OrderSet<T, S2>) -> Self::Output { self.difference(other).cloned().collect() } } #[cfg(test)] mod tests { use super::*; use util::enumerate; #[test] fn it_works() { let mut set = OrderSet::new(); assert_eq!(set.is_empty(), true); set.insert(1); set.insert(1); assert_eq!(set.len(), 1); assert!(set.get(&1).is_some()); assert_eq!(set.is_empty(), false); } #[test] fn new() { let set = OrderSet::<String>::new(); println!("{:?}", set); assert_eq!(set.capacity(), 0); assert_eq!(set.len(), 0); assert_eq!(set.is_empty(), true); } #[test] fn insert() { let insert = [0, 4, 2, 12, 8, 7, 11, 5]; let not_present = [1, 3, 6, 9, 10]; let mut set = OrderSet::with_capacity(insert.len()); for (i, &elt) in enumerate(&insert) { assert_eq!(set.len(), i); set.insert(elt); assert_eq!(set.len(), i + 1); assert_eq!(set.get(&elt), Some(&elt)); } println!("{:?}", set); for &elt in &not_present { assert!(set.get(&elt).is_none()); } } #[test] fn insert_2() { let mut set = OrderSet::with_capacity(16); let mut values = vec![]; values.extend(0..16); values.extend(128..267); for &i in &values { let old_set = set.clone(); set.insert(i); for value in old_set.iter() { if !set.get(value).is_some() { println!("old_set: {:?}", old_set); println!("set: {:?}", set); panic!("did not find {} in set", value); } } } for &i in &values { assert!(set.get(&i).is_some(), "did not find {}", i); } } #[test] fn insert_dup() { let mut elements = vec![0, 2, 4, 6, 8]; let mut set: OrderSet<u8> = elements.drain(..).collect(); { let (i, v) = set.get_full(&0).unwrap(); assert_eq!(set.len(), 5); assert_eq!(i, 0); assert_eq!(*v, 0); } { let inserted = set.insert(0); let (i, v) = set.get_full(&0).unwrap(); assert_eq!(set.len(), 5); assert_eq!(inserted, false); assert_eq!(i, 0); assert_eq!(*v, 0); } } #[test] fn insert_order() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut set = OrderSet::new(); for &elt in &insert { set.insert(elt); } assert_eq!(set.iter().count(), set.len()); assert_eq!(set.iter().count(), insert.len()); for (a, b) in insert.iter().zip(set.iter()) { assert_eq!(a, b); } for (i, v) in (0..insert.len()).zip(set.iter()) { assert_eq!(set.get_index(i).unwrap(), v); } } #[test] fn grow() { let insert = [0, 4, 2, 12, 8, 7, 11]; let not_present = [1, 3, 6, 9, 10]; let mut set = OrderSet::with_capacity(insert.len()); for (i, &elt) in enumerate(&insert) { assert_eq!(set.len(), i); set.insert(elt); assert_eq!(set.len(), i + 1); assert_eq!(set.get(&elt), Some(&elt)); } println!("{:?}", set); for &elt in &insert { set.insert(elt * 10); } for &elt in &insert { set.insert(elt * 100); } for (i, &elt) in insert.iter().cycle().enumerate().take(100) { set.insert(elt * 100 + i as i32); } println!("{:?}", set); for &elt in &not_present { assert!(set.get(&elt).is_none()); } } #[test] fn remove() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut set = OrderSet::new(); for &elt in &insert { set.insert(elt); } assert_eq!(set.iter().count(), set.len()); assert_eq!(set.iter().count(), insert.len()); for (a, b) in insert.iter().zip(set.iter()) { assert_eq!(a, b); } let remove_fail = [99, 77]; let remove = [4, 12, 8, 7]; for &value in &remove_fail { assert!(set.swap_remove_full(&value).is_none()); } println!("{:?}", set); for &value in &remove { //println!("{:?}", set); let index = set.get_full(&value).unwrap().0; assert_eq!(set.swap_remove_full(&value), Some((index, value))); } println!("{:?}", set); for value in &insert { assert_eq!(set.get(value).is_some(), !remove.contains(value)); } assert_eq!(set.len(), insert.len() - remove.len()); assert_eq!(set.iter().count(), insert.len() - remove.len()); } #[test] fn swap_remove_index() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut set = OrderSet::new(); for &elt in &insert { set.insert(elt); } let mut vector = insert.to_vec(); let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; // check that the same swap remove sequence on vec and set // have the same result. for &rm in remove_sequence { let out_vec = vector.swap_remove(rm); let out_set = set.swap_remove_index(rm).unwrap(); assert_eq!(out_vec, out_set); } assert_eq!(vector.len(), set.len()); for (a, b) in vector.iter().zip(set.iter()) { assert_eq!(a, b); } } #[test] fn partial_eq_and_eq() { let mut set_a = OrderSet::new(); set_a.insert(1); set_a.insert(2); let mut set_b = set_a.clone(); assert_eq!(set_a, set_b); set_b.remove(&1); assert_ne!(set_a, set_b); let set_c: OrderSet<_> = set_b.into_iter().collect(); assert_ne!(set_a, set_c); assert_ne!(set_c, set_a); } #[test] fn extend() { let mut set = OrderSet::new(); set.extend(vec![&1, &2, &3, &4]); set.extend(vec![5, 6]); assert_eq!(set.into_iter().collect::<Vec<_>>(), vec![1, 2, 3, 4, 5, 6]); } #[test] fn comparisons() { let set_a: OrderSet<_> = (0..3).collect(); let set_b: OrderSet<_> = (3..6).collect(); let set_c: OrderSet<_> = (0..6).collect(); let set_d: OrderSet<_> = (3..9).collect(); assert!(!set_a.is_disjoint(&set_a)); assert!(set_a.is_subset(&set_a)); assert!(set_a.is_superset(&set_a)); assert!(set_a.is_disjoint(&set_b)); assert!(set_b.is_disjoint(&set_a)); assert!(!set_a.is_subset(&set_b)); assert!(!set_b.is_subset(&set_a)); assert!(!set_a.is_superset(&set_b)); assert!(!set_b.is_superset(&set_a)); assert!(!set_a.is_disjoint(&set_c)); assert!(!set_c.is_disjoint(&set_a)); assert!(set_a.is_subset(&set_c)); assert!(!set_c.is_subset(&set_a)); assert!(!set_a.is_superset(&set_c)); assert!(set_c.is_superset(&set_a)); assert!(!set_c.is_disjoint(&set_d)); assert!(!set_d.is_disjoint(&set_c)); assert!(!set_c.is_subset(&set_d)); assert!(!set_d.is_subset(&set_c)); assert!(!set_c.is_superset(&set_d)); assert!(!set_d.is_superset(&set_c)); } #[test] fn iter_comparisons() { use std::iter::empty; fn check<'a, I1, I2>(iter1: I1, iter2: I2) where I1: Iterator<Item = &'a i32>, I2: Iterator<Item = i32>, { assert!(iter1.cloned().eq(iter2)); } let set_a: OrderSet<_> = (0..3).collect(); let set_b: OrderSet<_> = (3..6).collect(); let set_c: OrderSet<_> = (0..6).collect(); let set_d: OrderSet<_> = (3..9).rev().collect(); check(set_a.difference(&set_a), empty()); check(set_a.symmetric_difference(&set_a), empty()); check(set_a.intersection(&set_a), 0..3); check(set_a.union(&set_a), 0..3); check(set_a.difference(&set_b), 0..3); check(set_b.difference(&set_a), 3..6); check(set_a.symmetric_difference(&set_b), 0..6); check(set_b.symmetric_difference(&set_a), (3..6).chain(0..3)); check(set_a.intersection(&set_b), empty()); check(set_b.intersection(&set_a), empty()); check(set_a.union(&set_b), 0..6); check(set_b.union(&set_a), (3..6).chain(0..3)); check(set_a.difference(&set_c), empty()); check(set_c.difference(&set_a), 3..6); check(set_a.symmetric_difference(&set_c), 3..6); check(set_c.symmetric_difference(&set_a), 3..6); check(set_a.intersection(&set_c), 0..3); check(set_c.intersection(&set_a), 0..3); check(set_a.union(&set_c), 0..6); check(set_c.union(&set_a), 0..6); check(set_c.difference(&set_d), 0..3); check(set_d.difference(&set_c), (6..9).rev()); check(set_c.symmetric_difference(&set_d), (0..3).chain((6..9).rev())); check(set_d.symmetric_difference(&set_c), (6..9).rev().chain(0..3)); check(set_c.intersection(&set_d), 3..6); check(set_d.intersection(&set_c), (3..6).rev()); check(set_c.union(&set_d), (0..6).chain((6..9).rev())); check(set_d.union(&set_c), (3..9).rev().chain(0..3)); } #[test] fn ops() { let empty = OrderSet::<i32>::new(); let set_a: OrderSet<_> = (0..3).collect(); let set_b: OrderSet<_> = (3..6).collect(); let set_c: OrderSet<_> = (0..6).collect(); let set_d: OrderSet<_> = (3..9).rev().collect(); assert_eq!(&set_a & &set_a, set_a); assert_eq!(&set_a | &set_a, set_a); assert_eq!(&set_a ^ &set_a, empty); assert_eq!(&set_a - &set_a, empty); assert_eq!(&set_a & &set_b, empty); assert_eq!(&set_b & &set_a, empty); assert_eq!(&set_a | &set_b, set_c); assert_eq!(&set_b | &set_a, set_c); assert_eq!(&set_a ^ &set_b, set_c); assert_eq!(&set_b ^ &set_a, set_c); assert_eq!(&set_a - &set_b, set_a); assert_eq!(&set_b - &set_a, set_b); assert_eq!(&set_a & &set_c, set_a); assert_eq!(&set_c & &set_a, set_a); assert_eq!(&set_a | &set_c, set_c); assert_eq!(&set_c | &set_a, set_c); assert_eq!(&set_a ^ &set_c, set_b); assert_eq!(&set_c ^ &set_a, set_b); assert_eq!(&set_a - &set_c, empty); assert_eq!(&set_c - &set_a, set_b); assert_eq!(&set_c & &set_d, set_b); assert_eq!(&set_d & &set_c, set_b); assert_eq!(&set_c | &set_d, &set_a | &set_d); assert_eq!(&set_d | &set_c, &set_a | &set_d); assert_eq!(&set_c ^ &set_d, &set_a | &(&set_d - &set_b)); assert_eq!(&set_d ^ &set_c, &set_a | &(&set_d - &set_b)); assert_eq!(&set_c - &set_d, set_a); assert_eq!(&set_d - &set_c, &set_d - &set_b); } }
28.882146
96
0.53945
08d6bbc6fb7518c3d36055e48de7a96c4120d3bb
5,123
//! A scalar that represents a number or a string. use serde::{Deserialize, Serialize}; /// An int, float or a string value. #[allow(missing_docs)] #[derive(Serialize, Deserialize, Clone, PartialEq, Debug)] #[cfg_attr(feature = "graphql", derive(async_graphql::Description))] #[serde(untagged)] pub enum SortedValue { Null, Int(u64), Float(f64), String(String), } impl From<u8> for SortedValue { #[inline] fn from(val: u8) -> Self { SortedValue::Int(val as u64) } } impl From<u32> for SortedValue { #[inline] fn from(val: u32) -> Self { SortedValue::Int(val as u64) } } impl From<u64> for SortedValue { #[inline] fn from(val: u64) -> Self { SortedValue::Int(val) } } impl From<f64> for SortedValue { #[inline] fn from(val: f64) -> Self { SortedValue::Float(val) } } impl From<String> for SortedValue { #[inline] fn from(val: String) -> Self { SortedValue::String(val) } } #[cfg(feature = "graphql")] #[async_graphql::Scalar(use_type_description)] impl async_graphql::ScalarType for SortedValue { #[inline] fn parse(value: async_graphql::Value) -> async_graphql::InputValueResult<Self> { match value { async_graphql::Value::Null => Ok(SortedValue::Null), async_graphql::Value::Number(ref val) => { if let Some(v) = val.as_u64() { Ok(v.into()) } else if let Some(v) = val.as_f64() { if v < 0.0 { Err(async_graphql::InputValueError::expected_type(value)) } else { Ok(v.into()) } } else { Err(async_graphql::InputValueError::expected_type(value)) } } async_graphql::Value::String(val) => Ok(SortedValue::String(val)), async_graphql::Value::Object(_) | async_graphql::Value::Binary(_) | async_graphql::Value::Boolean(_) | async_graphql::Value::Enum(_) | async_graphql::Value::List(_) => { Err(async_graphql::InputValueError::expected_type(value)) } } } #[inline] fn to_value(&self) -> async_graphql::Value { match *self { SortedValue::Null => async_graphql::Value::Null, SortedValue::Int(val) => async_graphql::Value::Number(val.into()), SortedValue::Float(val) => { let val = async_graphql::Number::from_f64(val).unwrap_or_else(|| { // `NaN` and `infinite` values are not valid JSON panic!( "invalid JSON float value: `{}` encountered when \ converting a `ScoreValue` to a `graphql::Value`", val ) }); async_graphql::Value::Number(val) } SortedValue::String(ref val) => async_graphql::Value::String(val.clone()), } } } #[cfg(test)] #[cfg(feature = "graphql")] mod tests { use super::*; use async_graphql::{ScalarType, Value as GraphQLValue}; use serde_json::Number as JsonNumber; #[test] fn can_parse_null() { let val = SortedValue::parse(GraphQLValue::Null).unwrap(); assert_eq!(val, SortedValue::Null); } #[test] fn can_parse_string() { let x: String = "x".to_string(); let val = SortedValue::parse(GraphQLValue::String(x.clone())).unwrap(); assert_eq!(val, SortedValue::String(x)); } #[test] fn can_parse_u8() { let x: u8 = 101; let val = SortedValue::parse(GraphQLValue::Number(x.into())).unwrap(); assert_eq!(val, SortedValue::from(x)); } #[test] fn can_parse_u32() { let x: u32 = 101; let val = SortedValue::parse(GraphQLValue::Number(x.into())).unwrap(); assert_eq!(val, SortedValue::from(x)); } #[test] fn can_parse_u64() { let x: u64 = 101; let val = SortedValue::parse(GraphQLValue::Number(x.into())).unwrap(); assert_eq!(val, SortedValue::Int(x)); } // TODO: implement `Eq` to assert we received the correct error #[test] fn parse_negative_is_err() { let json_number = JsonNumber::from_f64(-0.00000000000001).unwrap(); let result = SortedValue::parse(GraphQLValue::Number(json_number)); // use assert instead of `#[should_panic]` so we keep the output pretty with `--no-capture` assert!(result.is_err()); let x: i64 = -101; let result = SortedValue::parse(GraphQLValue::Number(x.into())); // use assert instead of `#[should_panic]` so we keep the output pretty with `--no-capture` assert!(result.is_err()); } #[test] fn can_parse_f64() { let x: f64 = 101.0; let json_number = JsonNumber::from_f64(x).unwrap(); let val = SortedValue::parse(GraphQLValue::Number(json_number)).unwrap(); assert_eq!(val, SortedValue::Float(x)); } }
30.494048
99
0.555924
11f27dd52984f5e85ef5c5d08c6426efe70998a4
70,295
// Copyright © 2019 Intel Corporation // // SPDX-License-Identifier: Apache-2.0 // extern crate hypervisor; #[cfg(target_arch = "x86_64")] use crate::config::SgxEpcConfig; use crate::config::{HotplugMethod, MemoryConfig, MemoryZoneConfig}; use crate::MEMORY_MANAGER_SNAPSHOT_ID; #[cfg(feature = "acpi")] use acpi_tables::{aml, aml::Aml}; use anyhow::anyhow; #[cfg(target_arch = "x86_64")] use arch::x86_64::{SgxEpcRegion, SgxEpcSection}; use arch::{get_host_cpu_phys_bits, layout, RegionType}; #[cfg(target_arch = "x86_64")] use devices::ioapic; #[cfg(target_arch = "x86_64")] use libc::{MAP_NORESERVE, MAP_POPULATE, MAP_SHARED, PROT_READ, PROT_WRITE}; use std::collections::HashMap; use std::convert::TryInto; use std::ffi; use std::fs::{File, OpenOptions}; use std::io; use std::ops::Deref; use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; use std::path::PathBuf; use std::result; use std::sync::{Arc, Mutex}; use url::Url; #[cfg(target_arch = "x86_64")] use vm_allocator::GsiApic; use vm_allocator::SystemAllocator; use vm_device::BusDevice; use vm_memory::guest_memory::FileOffset; use vm_memory::{ mmap::MmapRegionError, Address, Bytes, Error as MmapError, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic, GuestMemoryLoadGuard, GuestMemoryMmap, GuestMemoryRegion, GuestRegionMmap, GuestUsize, MemoryRegionAddress, MmapRegion, }; use vm_migration::{ Migratable, MigratableError, Pausable, Snapshot, SnapshotDataSection, Snapshottable, Transportable, }; const DEFAULT_MEMORY_ZONE: &str = "mem0"; #[cfg(target_arch = "x86_64")] const X86_64_IRQ_BASE: u32 = 5; const HOTPLUG_COUNT: usize = 8; // Memory policy constants const MPOL_BIND: u32 = 2; const MPOL_MF_STRICT: u32 = 1 << 0; const MPOL_MF_MOVE: u32 = 1 << 1; #[derive(Default)] struct HotPlugState { base: u64, length: u64, active: bool, inserting: bool, removing: bool, } pub struct VirtioMemZone { region: Arc<GuestRegionMmap>, resize_handler: virtio_devices::Resize, hotplugged_size: u64, } impl VirtioMemZone { pub fn region(&self) -> &Arc<GuestRegionMmap> { &self.region } pub fn resize_handler(&self) -> &virtio_devices::Resize { &self.resize_handler } pub fn hotplugged_size(&self) -> u64 { self.hotplugged_size } } #[derive(Default)] pub struct MemoryZone { regions: Vec<Arc<GuestRegionMmap>>, virtio_mem_zone: Option<VirtioMemZone>, } impl MemoryZone { pub fn regions(&self) -> &Vec<Arc<GuestRegionMmap>> { &self.regions } pub fn virtio_mem_zone(&self) -> &Option<VirtioMemZone> { &self.virtio_mem_zone } } pub type MemoryZones = HashMap<String, MemoryZone>; pub struct MemoryManager { boot_guest_memory: GuestMemoryMmap, guest_memory: GuestMemoryAtomic<GuestMemoryMmap>, next_memory_slot: u32, start_of_device_area: GuestAddress, end_of_device_area: GuestAddress, pub vm: Arc<dyn hypervisor::Vm>, hotplug_slots: Vec<HotPlugState>, selected_slot: usize, mergeable: bool, allocator: Arc<Mutex<SystemAllocator>>, hotplug_method: HotplugMethod, boot_ram: u64, current_ram: u64, next_hotplug_slot: usize, snapshot: Mutex<Option<GuestMemoryLoadGuard<GuestMemoryMmap>>>, shared: bool, hugepages: bool, balloon: Option<Arc<Mutex<virtio_devices::Balloon>>>, #[cfg(target_arch = "x86_64")] sgx_epc_region: Option<SgxEpcRegion>, user_provided_zones: bool, snapshot_memory_regions: Vec<MemoryRegion>, memory_zones: MemoryZones, } #[derive(Debug)] pub enum Error { /// Failed to create shared file. SharedFileCreate(io::Error), /// Failed to set shared file length. SharedFileSetLen(io::Error), /// Mmap backed guest memory error GuestMemory(MmapError), /// Failed to allocate a memory range. MemoryRangeAllocation, /// Failed to create map region MmapRegion(), /// Error from region creation GuestMemoryRegion(MmapRegionError), /// No ACPI slot available NoSlotAvailable, /// Not enough space in the hotplug RAM region InsufficientHotplugRAM, /// The requested hotplug memory addition is not a valid size InvalidSize, /// Failed to set the user memory region. SetUserMemoryRegion(hypervisor::HypervisorVmError), /// Failed to EventFd. EventFdFail(io::Error), /// Eventfd write error EventfdError(io::Error), /// Failed to virtio-mem resize VirtioMemResizeFail(virtio_devices::mem::Error), /// Cannot restore VM Restore(MigratableError), /// Cannot create the system allocator CreateSystemAllocator, /// The number of external backing files doesn't match the number of /// memory regions. InvalidAmountExternalBackingFiles, /// Failed to virtio-balloon resize VirtioBalloonResizeFail(virtio_devices::balloon::Error), /// Invalid SGX EPC section size #[cfg(target_arch = "x86_64")] EpcSectionSizeInvalid, /// Failed allocating SGX EPC region #[cfg(target_arch = "x86_64")] SgxEpcRangeAllocation, /// Failed opening SGX virtual EPC device #[cfg(target_arch = "x86_64")] SgxVirtEpcOpen(io::Error), /// Failed setting the SGX virtual EPC section size #[cfg(target_arch = "x86_64")] SgxVirtEpcFileSetLen(io::Error), /// Failed creating a new MmapRegion instance. #[cfg(target_arch = "x86_64")] NewMmapRegion(vm_memory::mmap::MmapRegionError), /// No memory zones found. MissingMemoryZones, /// Memory configuration is not valid. InvalidMemoryParameters, /// Forbidden operation. Impossible to resize guest memory if it is /// backed by user defined memory regions. InvalidResizeWithMemoryZones, /// It's invalid to try applying a NUMA policy to a memory zone that is /// memory mapped with MAP_SHARED. InvalidSharedMemoryZoneWithHostNuma, /// Failed applying NUMA memory policy. ApplyNumaPolicy(io::Error), /// Memory zone identifier is not unique. DuplicateZoneId, /// No virtio-mem resizing handler found. MissingVirtioMemHandler, /// Unknown memory zone. UnknownMemoryZone, /// Invalid size for resizing. Can be anything except 0. InvalidHotplugSize, /// Invalid hotplug method associated with memory zones resizing capability. InvalidHotplugMethodWithMemoryZones, /// Could not find specified memory zone identifier from hash map. MissingZoneIdentifier, /// Resizing the memory zone failed. ResizeZone, } const ENABLE_FLAG: usize = 0; const INSERTING_FLAG: usize = 1; const REMOVING_FLAG: usize = 2; const EJECT_FLAG: usize = 3; const BASE_OFFSET_LOW: u64 = 0; const BASE_OFFSET_HIGH: u64 = 0x4; const LENGTH_OFFSET_LOW: u64 = 0x8; const LENGTH_OFFSET_HIGH: u64 = 0xC; const STATUS_OFFSET: u64 = 0x14; const SELECTION_OFFSET: u64 = 0; // The MMIO address space size is substracted with the size of a 4k page. This // is done on purpose to workaround a Linux bug when the VMM allocates devices // at the end of the addressable space. fn mmio_address_space_size() -> u64 { (1 << get_host_cpu_phys_bits()) - 0x1000 } impl BusDevice for MemoryManager { fn read(&mut self, _base: u64, offset: u64, data: &mut [u8]) { if self.selected_slot < self.hotplug_slots.len() { let state = &self.hotplug_slots[self.selected_slot]; match offset { BASE_OFFSET_LOW => { data.copy_from_slice(&state.base.to_le_bytes()[..4]); } BASE_OFFSET_HIGH => { data.copy_from_slice(&state.base.to_le_bytes()[4..]); } LENGTH_OFFSET_LOW => { data.copy_from_slice(&state.length.to_le_bytes()[..4]); } LENGTH_OFFSET_HIGH => { data.copy_from_slice(&state.length.to_le_bytes()[4..]); } STATUS_OFFSET => { if state.active { data[0] |= 1 << ENABLE_FLAG; } if state.inserting { data[0] |= 1 << INSERTING_FLAG; } if state.removing { data[0] |= 1 << REMOVING_FLAG; } } _ => { warn!( "Unexpected offset for accessing memory manager device: {:#}", offset ); } } } } fn write(&mut self, _base: u64, offset: u64, data: &[u8]) { match offset { SELECTION_OFFSET => { self.selected_slot = usize::from(data[0]); } STATUS_OFFSET => { let state = &mut self.hotplug_slots[self.selected_slot]; // The ACPI code writes back a 1 to acknowledge the insertion if (data[0] & (1 << INSERTING_FLAG) == 1 << INSERTING_FLAG) && state.inserting { state.inserting = false; } // Ditto for removal if (data[0] & (1 << REMOVING_FLAG) == 1 << REMOVING_FLAG) && state.removing { state.removing = false; } // Trigger removal of "DIMM" if data[0] & (1 << EJECT_FLAG) == 1 << EJECT_FLAG { warn!("Ejection of memory not currently supported"); } } _ => { warn!( "Unexpected offset for accessing memory manager device: {:#}", offset ); } } } } impl MemoryManager { /// Creates all memory regions based on the available RAM ranges defined /// by `ram_regions`, and based on the description of the memory zones. /// In practice, this function can perform multiple memory mappings of the /// same backing file if there's a hole in the address space between two /// RAM ranges. /// One example might be ram_regions containing 2 regions (0-3G and 4G-6G) /// and zones containing two zones (size 1G and size 4G). /// This function will create 3 resulting memory regions: /// - First one mapping entirely the first memory zone on 0-1G range /// - Second one mapping partially the second memory zone on 1G-3G range /// - Third one mapping partially the second memory zone on 4G-6G range fn create_memory_regions_from_zones( ram_regions: &[(GuestAddress, usize)], zones: &[MemoryZoneConfig], prefault: bool, ext_regions: Option<Vec<MemoryRegion>>, ) -> Result<(Vec<Arc<GuestRegionMmap>>, MemoryZones), Error> { let mut zones = zones.to_owned(); let mut mem_regions = Vec::new(); let mut zone = zones.remove(0); let mut zone_offset = 0; let mut memory_zones = HashMap::new(); // Add zone id to the list of memory zones. memory_zones.insert(zone.id.clone(), MemoryZone::default()); for ram_region in ram_regions.iter() { let mut ram_region_offset = 0; let mut exit = false; loop { let mut ram_region_consumed = false; let mut pull_next_zone = false; let ram_region_sub_size = ram_region.1 - ram_region_offset; let zone_sub_size = zone.size as usize - zone_offset; let file_offset = zone_offset as u64; let region_start = ram_region.0.unchecked_add(ram_region_offset as u64); let region_size = if zone_sub_size <= ram_region_sub_size { if zone_sub_size == ram_region_sub_size { ram_region_consumed = true; } ram_region_offset += zone_sub_size; pull_next_zone = true; zone_sub_size } else { zone_offset += ram_region_sub_size; ram_region_consumed = true; ram_region_sub_size }; let region = MemoryManager::create_ram_region( &zone.file, file_offset, region_start, region_size, prefault, zone.shared, zone.hugepages, zone.host_numa_node, &ext_regions, )?; // Add region to the list of regions associated with the // current memory zone. if let Some(memory_zone) = memory_zones.get_mut(&zone.id) { memory_zone.regions.push(region.clone()); } mem_regions.push(region); if pull_next_zone { // Get the next zone and reset the offset. zone_offset = 0; if zones.is_empty() { exit = true; break; } zone = zones.remove(0); // Check if zone id already exist. In case it does, throw // an error as we need unique identifiers. Otherwise, add // the new zone id to the list of memory zones. if memory_zones.contains_key(&zone.id) { error!( "Memory zone identifier '{}' found more than once. \ It must be unique", zone.id, ); return Err(Error::DuplicateZoneId); } memory_zones.insert(zone.id.clone(), MemoryZone::default()); } if ram_region_consumed { break; } } if exit { break; } } Ok((mem_regions, memory_zones)) } pub fn new( vm: Arc<dyn hypervisor::Vm>, config: &MemoryConfig, ext_regions: Option<Vec<MemoryRegion>>, prefault: bool, ) -> Result<Arc<Mutex<MemoryManager>>, Error> { let user_provided_zones = config.size == 0; let (ram_size, zones) = if !user_provided_zones { if config.zones.is_some() { error!( "User defined memory regions can't be provided if the \ memory size is not 0" ); return Err(Error::InvalidMemoryParameters); } if let Some(hotplugged_size) = config.hotplugged_size { if let Some(hotplug_size) = config.hotplug_size { if hotplugged_size > hotplug_size { error!( "'hotplugged_size' {} can't be bigger than \ 'hotplug_size' {}", hotplugged_size, hotplug_size, ); return Err(Error::InvalidMemoryParameters); } } else { error!( "Invalid to define 'hotplugged_size' when there is\ no 'hotplug_size'" ); return Err(Error::InvalidMemoryParameters); } if config.hotplug_method == HotplugMethod::Acpi { error!( "Invalid to define 'hotplugged_size' with hotplug \ method 'acpi'" ); return Err(Error::InvalidMemoryParameters); } } // Create a single zone from the global memory config. This lets // us reuse the codepath for user defined memory zones. let zones = vec![MemoryZoneConfig { id: String::from(DEFAULT_MEMORY_ZONE), size: config.size, file: None, shared: config.shared, hugepages: config.hugepages, host_numa_node: None, hotplug_size: config.hotplug_size, hotplugged_size: config.hotplugged_size, }]; (config.size, zones) } else { if config.zones.is_none() { error!( "User defined memory regions must be provided if the \ memory size is 0" ); return Err(Error::MissingMemoryZones); } // Safe to unwrap as we checked right above there were some // regions. let zones = config.zones.clone().unwrap(); if zones.is_empty() { return Err(Error::MissingMemoryZones); } let mut total_ram_size: u64 = 0; for zone in zones.iter() { total_ram_size += zone.size; if zone.shared && zone.file.is_some() && zone.host_numa_node.is_some() { error!( "Invalid to set host NUMA policy for a memory zone \ backed by a regular file and mapped as 'shared'" ); return Err(Error::InvalidSharedMemoryZoneWithHostNuma); } if zone.hotplug_size.is_some() && config.hotplug_method == HotplugMethod::Acpi { error!("Invalid to set ACPI hotplug method for memory zones"); return Err(Error::InvalidHotplugMethodWithMemoryZones); } if let Some(hotplugged_size) = zone.hotplugged_size { if let Some(hotplug_size) = zone.hotplug_size { if hotplugged_size > hotplug_size { error!( "'hotplugged_size' {} can't be bigger than \ 'hotplug_size' {}", hotplugged_size, hotplug_size, ); return Err(Error::InvalidMemoryParameters); } } else { error!( "Invalid to define 'hotplugged_size' when there is\ no 'hotplug_size' for a memory zone" ); return Err(Error::InvalidMemoryParameters); } if config.hotplug_method == HotplugMethod::Acpi { error!( "Invalid to define 'hotplugged_size' with hotplug \ method 'acpi'" ); return Err(Error::InvalidMemoryParameters); } } } (total_ram_size, zones) }; // Init guest memory let arch_mem_regions = arch::arch_memory_regions(ram_size); let ram_regions: Vec<(GuestAddress, usize)> = arch_mem_regions .iter() .filter(|r| r.2 == RegionType::Ram) .map(|r| (r.0, r.1)) .collect(); let (mem_regions, mut memory_zones) = Self::create_memory_regions_from_zones(&ram_regions, &zones, prefault, ext_regions)?; let guest_memory = GuestMemoryMmap::from_arc_regions(mem_regions).map_err(Error::GuestMemory)?; let boot_guest_memory = guest_memory.clone(); let end_of_device_area = GuestAddress(mmio_address_space_size() - 1); let mut start_of_device_area = MemoryManager::start_addr(guest_memory.last_addr(), false); let mut virtio_mem_regions: Vec<Arc<GuestRegionMmap>> = Vec::new(); // Update list of memory zones for resize. for zone in zones { if let Some(memory_zone) = memory_zones.get_mut(&zone.id) { if let Some(hotplug_size) = zone.hotplug_size { if hotplug_size == 0 { error!("'hotplug_size' can't be 0"); return Err(Error::InvalidHotplugSize); } if !user_provided_zones && config.hotplug_method == HotplugMethod::Acpi { start_of_device_area = start_of_device_area.unchecked_add(hotplug_size); } else { // Alignment must be "natural" i.e. same as size of block let start_addr = GuestAddress( (start_of_device_area.0 + virtio_devices::VIRTIO_MEM_ALIGN_SIZE - 1) / virtio_devices::VIRTIO_MEM_ALIGN_SIZE * virtio_devices::VIRTIO_MEM_ALIGN_SIZE, ); let region = MemoryManager::create_ram_region( &None, 0, start_addr, hotplug_size as usize, false, config.shared, config.hugepages, None, &None, )?; virtio_mem_regions.push(region.clone()); memory_zone.virtio_mem_zone = Some(VirtioMemZone { region, resize_handler: virtio_devices::Resize::new() .map_err(Error::EventFdFail)?, hotplugged_size: zone.hotplugged_size.unwrap_or(0), }); start_of_device_area = start_addr.unchecked_add(hotplug_size); } } } else { return Err(Error::MissingZoneIdentifier); } } let guest_memory = GuestMemoryAtomic::new(guest_memory); let mut hotplug_slots = Vec::with_capacity(HOTPLUG_COUNT); hotplug_slots.resize_with(HOTPLUG_COUNT, HotPlugState::default); // Both MMIO and PIO address spaces start at address 0. let allocator = Arc::new(Mutex::new( SystemAllocator::new( #[cfg(target_arch = "x86_64")] GuestAddress(0), #[cfg(target_arch = "x86_64")] (1 << 16 as GuestUsize), GuestAddress(0), mmio_address_space_size(), layout::MEM_32BIT_DEVICES_START, layout::MEM_32BIT_DEVICES_SIZE, #[cfg(target_arch = "x86_64")] vec![GsiApic::new( X86_64_IRQ_BASE, ioapic::NUM_IOAPIC_PINS as u32 - X86_64_IRQ_BASE, )], ) .ok_or(Error::CreateSystemAllocator)?, )); let memory_manager = Arc::new(Mutex::new(MemoryManager { boot_guest_memory, guest_memory: guest_memory.clone(), next_memory_slot: 0, start_of_device_area, end_of_device_area, vm, hotplug_slots, selected_slot: 0, mergeable: config.mergeable, allocator: allocator.clone(), hotplug_method: config.hotplug_method.clone(), boot_ram: ram_size, current_ram: ram_size, next_hotplug_slot: 0, snapshot: Mutex::new(None), shared: config.shared, hugepages: config.hugepages, balloon: None, #[cfg(target_arch = "x86_64")] sgx_epc_region: None, user_provided_zones, snapshot_memory_regions: Vec::new(), memory_zones, })); guest_memory.memory().with_regions(|_, region| { let _ = memory_manager.lock().unwrap().create_userspace_mapping( region.start_addr().raw_value(), region.len() as u64, region.as_ptr() as u64, config.mergeable, false, )?; Ok(()) })?; for region in virtio_mem_regions.drain(..) { let mut mm = memory_manager.lock().unwrap(); mm.create_userspace_mapping( region.start_addr().raw_value(), region.len() as u64, region.as_ptr() as u64, config.mergeable, false, )?; allocator .lock() .unwrap() .allocate_mmio_addresses(Some(region.start_addr()), region.len(), None) .ok_or(Error::MemoryRangeAllocation)?; mm.add_region(region)?; } // Allocate RAM and Reserved address ranges. for region in arch_mem_regions.iter() { allocator .lock() .unwrap() .allocate_mmio_addresses(Some(region.0), region.1 as GuestUsize, None) .ok_or(Error::MemoryRangeAllocation)?; } Ok(memory_manager) } pub fn new_from_snapshot( snapshot: &Snapshot, vm: Arc<dyn hypervisor::Vm>, config: &MemoryConfig, source_url: &str, prefault: bool, ) -> Result<Arc<Mutex<MemoryManager>>, Error> { let url = Url::parse(source_url).unwrap(); /* url must be valid dir which is verified in recv_vm_snapshot() */ let vm_snapshot_path = url.to_file_path().unwrap(); if let Some(mem_section) = snapshot .snapshot_data .get(&format!("{}-section", MEMORY_MANAGER_SNAPSHOT_ID)) { let mem_snapshot: MemoryManagerSnapshotData = match serde_json::from_slice(&mem_section.snapshot) { Ok(snapshot) => snapshot, Err(error) => { return Err(Error::Restore(MigratableError::Restore(anyhow!( "Could not deserialize MemoryManager {}", error )))) } }; // Here we turn the backing file name into a backing file path as // this will be needed when the memory region will be created with // mmap(). // We simply ignore the backing files that are None, as they // represent files that have been directly saved by the user, with // no need for saving into a dedicated external file. For these // files, the VmConfig already contains the information on where to // find them. let mut ext_regions = mem_snapshot.memory_regions; for region in ext_regions.iter_mut() { if let Some(backing_file) = &mut region.backing_file { let mut memory_region_path = vm_snapshot_path.clone(); memory_region_path.push(backing_file.clone()); *backing_file = memory_region_path; } } MemoryManager::new(vm, config, Some(ext_regions), prefault) } else { Err(Error::Restore(MigratableError::Restore(anyhow!( "Could not find {}-section from snapshot", MEMORY_MANAGER_SNAPSHOT_ID )))) } } fn memfd_create(name: &ffi::CStr, flags: u32) -> Result<RawFd, io::Error> { let res = unsafe { libc::syscall(libc::SYS_memfd_create, name.as_ptr(), flags) }; if res < 0 { Err(io::Error::last_os_error()) } else { Ok(res as RawFd) } } fn mbind( addr: *mut u8, len: u64, mode: u32, nodemask: Vec<u64>, maxnode: u64, flags: u32, ) -> Result<(), io::Error> { let res = unsafe { libc::syscall( libc::SYS_mbind, addr as *mut libc::c_void, len, mode, nodemask.as_ptr(), maxnode, flags, ) }; if res < 0 { Err(io::Error::last_os_error()) } else { Ok(()) } } #[allow(clippy::too_many_arguments)] fn create_ram_region( file: &Option<PathBuf>, mut file_offset: u64, start_addr: GuestAddress, size: usize, prefault: bool, shared: bool, hugepages: bool, host_numa_node: Option<u32>, ext_regions: &Option<Vec<MemoryRegion>>, ) -> Result<Arc<GuestRegionMmap>, Error> { let mut backing_file: Option<PathBuf> = file.clone(); let mut copy_ext_region_content: Option<PathBuf> = None; if let Some(ext_regions) = ext_regions { for ext_region in ext_regions.iter() { if ext_region.start_addr == start_addr && ext_region.size as usize == size { if ext_region.backing_file.is_some() { // If the region is memory mapped as "shared", then we // don't replace the backing file, but expect to copy // the content from the external backing file after the // region has been created. if shared { copy_ext_region_content = ext_region.backing_file.clone(); } else { backing_file = ext_region.backing_file.clone(); // We must override the file offset as in this case // we're restoring an existing region, which means // it will fit perfectly the calculated region. file_offset = 0; } } // No need to iterate further as we found the external // region matching the current region. break; } } } let (f, f_off) = match backing_file { Some(ref file) => { if file.is_dir() { // Override file offset as it does not apply in this case. info!( "Ignoring file offset since the backing file is a \ temporary file created from the specified directory." ); let fs_str = format!("{}{}", file.display(), "/tmpfile_XXXXXX"); let fs = ffi::CString::new(fs_str).unwrap(); let mut path = fs.as_bytes_with_nul().to_owned(); let path_ptr = path.as_mut_ptr() as *mut _; let fd = unsafe { libc::mkstemp(path_ptr) }; unsafe { libc::unlink(path_ptr) }; let f = unsafe { File::from_raw_fd(fd) }; f.set_len(size as u64).map_err(Error::SharedFileSetLen)?; (f, 0) } else { let f = OpenOptions::new() .read(true) .write(true) .open(file) .map_err(Error::SharedFileCreate)?; (f, file_offset) } } None => { let fd = Self::memfd_create( &ffi::CString::new("ch_ram").unwrap(), if hugepages { libc::MFD_HUGETLB | libc::MAP_HUGE_2MB as u32 } else { 0 }, ) .map_err(Error::SharedFileCreate)?; let f = unsafe { File::from_raw_fd(fd) }; f.set_len(size as u64).map_err(Error::SharedFileSetLen)?; (f, 0) } }; let mut mmap_flags = libc::MAP_NORESERVE | if shared { libc::MAP_SHARED } else { libc::MAP_PRIVATE }; if prefault { mmap_flags |= libc::MAP_POPULATE; } let region = GuestRegionMmap::new( MmapRegion::build( Some(FileOffset::new(f, f_off)), size, libc::PROT_READ | libc::PROT_WRITE, mmap_flags, ) .map_err(Error::GuestMemoryRegion)?, start_addr, ) .map_err(Error::GuestMemory)?; // Copy data to the region if needed if let Some(ext_backing_file) = &copy_ext_region_content { // Open (read only) the snapshot file for the given region. let mut memory_region_file = OpenOptions::new() .read(true) .open(ext_backing_file) .unwrap(); // Fill the region with the file content. region .read_from(MemoryRegionAddress(0), &mut memory_region_file, size) .unwrap(); } // Apply NUMA policy if needed. if let Some(node) = host_numa_node { let addr = region.deref().as_ptr(); let len = region.deref().size() as u64; let mode = MPOL_BIND; let mut nodemask: Vec<u64> = Vec::new(); let flags = MPOL_MF_STRICT | MPOL_MF_MOVE; // Linux is kind of buggy in the way it interprets maxnode as it // will cut off the last node. That's why we have to add 1 to what // we would consider as the proper maxnode value. let maxnode = node as u64 + 1 + 1; // Allocate the right size for the vector. nodemask.resize((node as usize / 64) + 1, 0); // Fill the global bitmask through the nodemask vector. let idx = (node / 64) as usize; let shift = node % 64; nodemask[idx] |= 1u64 << shift; // Policies are enforced by using MPOL_MF_MOVE flag as it will // force the kernel to move all pages that might have been already // allocated to the proper set of NUMA nodes. MPOL_MF_STRICT is // used to throw an error if MPOL_MF_MOVE didn't succeed. // MPOL_BIND is the selected mode as it specifies a strict policy // that restricts memory allocation to the nodes specified in the // nodemask. Self::mbind(addr, len, mode, nodemask, maxnode, flags) .map_err(Error::ApplyNumaPolicy)?; } Ok(Arc::new(region)) } // Update the GuestMemoryMmap with the new range fn add_region(&mut self, region: Arc<GuestRegionMmap>) -> Result<(), Error> { let guest_memory = self .guest_memory .memory() .insert_region(region) .map_err(Error::GuestMemory)?; self.guest_memory.lock().unwrap().replace(guest_memory); Ok(()) } // // Calculate the start address of an area next to RAM. // // If the next area is device space, there is no gap. // If the next area is hotplugged RAM, the start address needs to be aligned // to 128MiB boundary, and a gap of 256MiB need to be set before it. // On x86_64, it must also start at the 64bit start. #[allow(clippy::let_and_return)] fn start_addr(mem_end: GuestAddress, with_gap: bool) -> GuestAddress { let start_addr = if with_gap { GuestAddress((mem_end.0 + 1 + (256 << 20)) & !((128 << 20) - 1)) } else { mem_end.unchecked_add(1) }; #[cfg(target_arch = "x86_64")] if mem_end < arch::layout::MEM_32BIT_RESERVED_START { return arch::layout::RAM_64BIT_START; } start_addr } fn hotplug_ram_region(&mut self, size: usize) -> Result<Arc<GuestRegionMmap>, Error> { info!("Hotplugging new RAM: {}", size); // Check that there is a free slot if self.next_hotplug_slot >= HOTPLUG_COUNT { return Err(Error::NoSlotAvailable); } // "Inserted" DIMM must have a size that is a multiple of 128MiB if size % (128 << 20) != 0 { return Err(Error::InvalidSize); } let start_addr = MemoryManager::start_addr(self.guest_memory.memory().last_addr(), true); if start_addr.checked_add(size.try_into().unwrap()).unwrap() >= self.start_of_device_area() { return Err(Error::InsufficientHotplugRAM); } // Allocate memory for the region let region = MemoryManager::create_ram_region( &None, 0, start_addr, size, false, self.shared, self.hugepages, None, &None, )?; // Map it into the guest self.create_userspace_mapping( region.start_addr().0, region.len() as u64, region.as_ptr() as u64, self.mergeable, false, )?; // Tell the allocator self.allocator .lock() .unwrap() .allocate_mmio_addresses(Some(start_addr), size as GuestUsize, None) .ok_or(Error::MemoryRangeAllocation)?; // Update the slot so that it can be queried via the I/O port let mut slot = &mut self.hotplug_slots[self.next_hotplug_slot]; slot.active = true; slot.inserting = true; slot.base = region.start_addr().0; slot.length = region.len() as u64; self.next_hotplug_slot += 1; self.add_region(Arc::clone(&region))?; Ok(region) } pub fn set_balloon(&mut self, balloon: Arc<Mutex<virtio_devices::Balloon>>) { self.balloon = Some(balloon); } pub fn guest_memory(&self) -> GuestMemoryAtomic<GuestMemoryMmap> { self.guest_memory.clone() } pub fn boot_guest_memory(&self) -> GuestMemoryMmap { self.boot_guest_memory.clone() } pub fn allocator(&self) -> Arc<Mutex<SystemAllocator>> { self.allocator.clone() } pub fn start_of_device_area(&self) -> GuestAddress { self.start_of_device_area } pub fn end_of_device_area(&self) -> GuestAddress { self.end_of_device_area } pub fn allocate_memory_slot(&mut self) -> u32 { let slot_id = self.next_memory_slot; self.next_memory_slot += 1; slot_id } pub fn create_userspace_mapping( &mut self, guest_phys_addr: u64, memory_size: u64, userspace_addr: u64, mergeable: bool, readonly: bool, ) -> Result<u32, Error> { let slot = self.allocate_memory_slot(); let mem_region = self.vm.make_user_memory_region( slot, guest_phys_addr, memory_size, userspace_addr, readonly, ); self.vm .set_user_memory_region(mem_region) .map_err(Error::SetUserMemoryRegion)?; // Mark the pages as mergeable if explicitly asked for. if mergeable { // Safe because the address and size are valid since the // mmap succeeded. let ret = unsafe { libc::madvise( userspace_addr as *mut libc::c_void, memory_size as libc::size_t, libc::MADV_MERGEABLE, ) }; if ret != 0 { let err = io::Error::last_os_error(); // Safe to unwrap because the error is constructed with // last_os_error(), which ensures the output will be Some(). let errno = err.raw_os_error().unwrap(); if errno == libc::EINVAL { warn!("kernel not configured with CONFIG_KSM"); } else { warn!("madvise error: {}", err); } warn!("failed to mark pages as mergeable"); } } info!( "Created userspace mapping: {:x} -> {:x} {:x}", guest_phys_addr, userspace_addr, memory_size ); Ok(slot) } pub fn remove_userspace_mapping( &mut self, guest_phys_addr: u64, memory_size: u64, userspace_addr: u64, mergeable: bool, slot: u32, ) -> Result<(), Error> { let mem_region = self.vm.make_user_memory_region( slot, guest_phys_addr, 0, /* memory_size -- using 0 removes this slot */ userspace_addr, false, /* readonly -- don't care */ ); self.vm .set_user_memory_region(mem_region) .map_err(Error::SetUserMemoryRegion)?; // Mark the pages as unmergeable if there were previously marked as // mergeable. if mergeable { // Safe because the address and size are valid as the region was // previously advised. let ret = unsafe { libc::madvise( userspace_addr as *mut libc::c_void, memory_size as libc::size_t, libc::MADV_UNMERGEABLE, ) }; if ret != 0 { let err = io::Error::last_os_error(); // Safe to unwrap because the error is constructed with // last_os_error(), which ensures the output will be Some(). let errno = err.raw_os_error().unwrap(); if errno == libc::EINVAL { warn!("kernel not configured with CONFIG_KSM"); } else { warn!("madvise error: {}", err); } warn!("failed to mark pages as unmergeable"); } } info!( "Removed userspace mapping: {:x} -> {:x} {:x}", guest_phys_addr, userspace_addr, memory_size ); Ok(()) } pub fn virtio_mem_resize(&mut self, id: &str, size: u64) -> Result<(), Error> { if let Some(memory_zone) = self.memory_zones.get_mut(id) { if let Some(virtio_mem_zone) = memory_zone.virtio_mem_zone() { virtio_mem_zone .resize_handler() .work(size) .map_err(Error::VirtioMemResizeFail)?; } else { error!("Failed resizing virtio-mem region: No virtio-mem handler"); return Err(Error::MissingVirtioMemHandler); } return Ok(()); } error!("Failed resizing virtio-mem region: Unknown memory zone"); Err(Error::UnknownMemoryZone) } pub fn balloon_resize(&mut self, expected_ram: u64) -> Result<u64, Error> { let mut balloon_size = 0; if let Some(balloon) = &self.balloon { if expected_ram < self.current_ram { balloon_size = self.current_ram - expected_ram; } balloon .lock() .unwrap() .resize(balloon_size) .map_err(Error::VirtioBalloonResizeFail)?; } Ok(balloon_size) } /// In case this function resulted in adding a new memory region to the /// guest memory, the new region is returned to the caller. The virtio-mem /// use case never adds a new region as the whole hotpluggable memory has /// already been allocated at boot time. pub fn resize(&mut self, desired_ram: u64) -> Result<Option<Arc<GuestRegionMmap>>, Error> { if self.user_provided_zones { error!( "Not allowed to resize guest memory when backed with user \ defined memory zones." ); return Err(Error::InvalidResizeWithMemoryZones); } let mut region: Option<Arc<GuestRegionMmap>> = None; match self.hotplug_method { HotplugMethod::VirtioMem => { if desired_ram >= self.boot_ram { self.virtio_mem_resize(DEFAULT_MEMORY_ZONE, desired_ram - self.boot_ram)?; self.current_ram = desired_ram; } } HotplugMethod::Acpi => { if desired_ram >= self.current_ram { region = Some(self.hotplug_ram_region((desired_ram - self.current_ram) as usize)?); self.current_ram = desired_ram; } } } Ok(region) } pub fn resize_zone(&mut self, id: &str, virtio_mem_size: u64) -> Result<(), Error> { if !self.user_provided_zones { error!( "Not allowed to resize guest memory zone when no zone is \ defined." ); return Err(Error::ResizeZone); } self.virtio_mem_resize(id, virtio_mem_size) } #[cfg(target_arch = "x86_64")] pub fn setup_sgx(&mut self, sgx_epc_config: Vec<SgxEpcConfig>) -> Result<(), Error> { // Go over each EPC section and verify its size is a 4k multiple. At // the same time, calculate the total size needed for the contiguous // EPC region. let mut epc_region_size = 0; for epc_section in sgx_epc_config.iter() { if epc_section.size == 0 { return Err(Error::EpcSectionSizeInvalid); } if epc_section.size & 0x0fff != 0 { return Err(Error::EpcSectionSizeInvalid); } epc_region_size += epc_section.size; } // Now that we know about the total size for the EPC region, we can // proceed with the allocation of the entire range. The EPC region // must be 4kiB aligned. let epc_region_start = self .allocator .lock() .unwrap() .allocate_mmio_addresses(None, epc_region_size as GuestUsize, Some(0x1000)) .ok_or(Error::SgxEpcRangeAllocation)?; let mut sgx_epc_region = SgxEpcRegion::new(epc_region_start, epc_region_size as GuestUsize); // Each section can be memory mapped into the allocated region. let mut epc_section_start = epc_region_start.raw_value(); for epc_section in sgx_epc_config.iter() { let file = OpenOptions::new() .read(true) .write(true) .open("/dev/sgx/virt_epc") .map_err(Error::SgxVirtEpcOpen)?; let prot = PROT_READ | PROT_WRITE; let mut flags = MAP_NORESERVE | MAP_SHARED; if epc_section.prefault { flags |= MAP_POPULATE; } // We can't use the vm-memory crate to perform the memory mapping // here as it would try to ensure the size of the backing file is // matching the size of the expected mapping. The /dev/sgx/virt_epc // device does not work that way, it provides a file descriptor // which is not matching the mapping size, as it's a just a way to // let KVM know that an EPC section is being created for the guest. let host_addr = unsafe { libc::mmap( std::ptr::null_mut(), epc_section.size as usize, prot, flags, file.as_raw_fd(), 0 as libc::off_t, ) } as u64; let _mem_slot = self.create_userspace_mapping( epc_section_start, epc_section.size, host_addr, false, false, )?; sgx_epc_region.push(SgxEpcSection::new( GuestAddress(epc_section_start), epc_section.size as GuestUsize, )); epc_section_start += epc_section.size; } self.sgx_epc_region = Some(sgx_epc_region); Ok(()) } #[cfg(target_arch = "x86_64")] pub fn sgx_epc_region(&self) -> &Option<SgxEpcRegion> { &self.sgx_epc_region } pub fn is_hardlink(f: &File) -> bool { let mut stat = std::mem::MaybeUninit::<libc::stat>::uninit(); let ret = unsafe { libc::fstat(f.as_raw_fd(), stat.as_mut_ptr()) }; if ret != 0 { error!("Couldn't fstat the backing file"); return false; } unsafe { (*stat.as_ptr()).st_nlink as usize > 0 } } pub fn memory_zones(&self) -> &MemoryZones { &self.memory_zones } } #[cfg(feature = "acpi")] struct MemoryNotify { slot_id: usize, } #[cfg(feature = "acpi")] impl Aml for MemoryNotify { fn to_aml_bytes(&self) -> Vec<u8> { let object = aml::Path::new(&format!("M{:03}", self.slot_id)); aml::If::new( &aml::Equal::new(&aml::Arg(0), &self.slot_id), vec![&aml::Notify::new(&object, &aml::Arg(1))], ) .to_aml_bytes() } } #[cfg(feature = "acpi")] struct MemorySlot { slot_id: usize, } #[cfg(feature = "acpi")] impl Aml for MemorySlot { fn to_aml_bytes(&self) -> Vec<u8> { aml::Device::new( format!("M{:03}", self.slot_id).as_str().into(), vec![ &aml::Name::new("_HID".into(), &aml::EISAName::new("PNP0C80")), &aml::Name::new("_UID".into(), &self.slot_id), /* _STA return value: Bit [0] – Set if the device is present. Bit [1] – Set if the device is enabled and decoding its resources. Bit [2] – Set if the device should be shown in the UI. Bit [3] – Set if the device is functioning properly (cleared if device failed its diagnostics). Bit [4] – Set if the battery is present. Bits [31:5] – Reserved (must be cleared). */ &aml::Method::new( "_STA".into(), 0, false, // Call into MSTA method which will interrogate device vec![&aml::Return::new(&aml::MethodCall::new( "MSTA".into(), vec![&self.slot_id], ))], ), // Get details of memory &aml::Method::new( "_CRS".into(), 0, false, // Call into MCRS which provides actual memory details vec![&aml::Return::new(&aml::MethodCall::new( "MCRS".into(), vec![&self.slot_id], ))], ), // We don't expose any NUMA characteristics so all memory is in the same "proximity domain" &aml::Method::new( "_PXM".into(), 0, false, // We aren't NUMA so associate all RAM into the same proximity region (zero) vec![&aml::Return::new(&0u32)], ), ], ) .to_aml_bytes() } } #[cfg(feature = "acpi")] struct MemorySlots { slots: usize, } #[cfg(feature = "acpi")] impl Aml for MemorySlots { fn to_aml_bytes(&self) -> Vec<u8> { let mut bytes = Vec::new(); for slot_id in 0..self.slots { bytes.extend_from_slice(&MemorySlot { slot_id }.to_aml_bytes()); } bytes } } #[cfg(feature = "acpi")] struct MemoryMethods { slots: usize, } #[cfg(feature = "acpi")] impl Aml for MemoryMethods { fn to_aml_bytes(&self) -> Vec<u8> { let mut bytes = Vec::new(); // Add "MTFY" notification method let mut memory_notifies = Vec::new(); for slot_id in 0..self.slots { memory_notifies.push(MemoryNotify { slot_id }); } let mut memory_notifies_refs: Vec<&dyn aml::Aml> = Vec::new(); for memory_notifier in memory_notifies.iter() { memory_notifies_refs.push(memory_notifier); } bytes.extend_from_slice( &aml::Method::new("MTFY".into(), 2, true, memory_notifies_refs).to_aml_bytes(), ); // MSCN method bytes.extend_from_slice( &aml::Method::new( "MSCN".into(), 0, true, vec![ // Take lock defined above &aml::Acquire::new("MLCK".into(), 0xfff), &aml::Store::new(&aml::Local(0), &aml::ZERO), &aml::While::new( &aml::LessThan::new(&aml::Local(0), &self.slots), vec![ // Write slot number (in first argument) to I/O port via field &aml::Store::new(&aml::Path::new("\\_SB_.MHPC.MSEL"), &aml::Local(0)), // Check if MINS bit is set (inserting) &aml::If::new( &aml::Equal::new(&aml::Path::new("\\_SB_.MHPC.MINS"), &aml::ONE), // Notify device if it is vec![ &aml::MethodCall::new( "MTFY".into(), vec![&aml::Local(0), &aml::ONE], ), // Reset MINS bit &aml::Store::new( &aml::Path::new("\\_SB_.MHPC.MINS"), &aml::ONE, ), ], ), // Check if MRMV bit is set &aml::If::new( &aml::Equal::new(&aml::Path::new("\\_SB_.MHPC.MRMV"), &aml::ONE), // Notify device if it is (with the eject constant 0x3) vec![ &aml::MethodCall::new( "MTFY".into(), vec![&aml::Local(0), &3u8], ), // Reset MRMV bit &aml::Store::new( &aml::Path::new("\\_SB_.MHPC.MRMV"), &aml::ONE, ), ], ), &aml::Add::new(&aml::Local(0), &aml::Local(0), &aml::ONE), ], ), // Release lock &aml::Release::new("MLCK".into()), ], ) .to_aml_bytes(), ); bytes.extend_from_slice( // Memory status method &aml::Method::new( "MSTA".into(), 1, true, vec![ // Take lock defined above &aml::Acquire::new("MLCK".into(), 0xfff), // Write slot number (in first argument) to I/O port via field &aml::Store::new(&aml::Path::new("\\_SB_.MHPC.MSEL"), &aml::Arg(0)), &aml::Store::new(&aml::Local(0), &aml::ZERO), // Check if MEN_ bit is set, if so make the local variable 0xf (see _STA for details of meaning) &aml::If::new( &aml::Equal::new(&aml::Path::new("\\_SB_.MHPC.MEN_"), &aml::ONE), vec![&aml::Store::new(&aml::Local(0), &0xfu8)], ), // Release lock &aml::Release::new("MLCK".into()), // Return 0 or 0xf &aml::Return::new(&aml::Local(0)), ], ) .to_aml_bytes(), ); bytes.extend_from_slice( // Memory range method &aml::Method::new( "MCRS".into(), 1, true, vec![ // Take lock defined above &aml::Acquire::new("MLCK".into(), 0xfff), // Write slot number (in first argument) to I/O port via field &aml::Store::new(&aml::Path::new("\\_SB_.MHPC.MSEL"), &aml::Arg(0)), &aml::Name::new( "MR64".into(), &aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory( aml::AddressSpaceCachable::Cacheable, true, 0x0000_0000_0000_0000u64, 0xFFFF_FFFF_FFFF_FFFEu64, )]), ), &aml::CreateField::<u32>::new(&aml::Path::new("MR64"), &14usize, "MINL".into()), &aml::CreateField::<u32>::new(&aml::Path::new("MR64"), &18usize, "MINH".into()), &aml::CreateField::<u32>::new(&aml::Path::new("MR64"), &22usize, "MAXL".into()), &aml::CreateField::<u32>::new(&aml::Path::new("MR64"), &26usize, "MAXH".into()), &aml::CreateField::<u32>::new(&aml::Path::new("MR64"), &38usize, "LENL".into()), &aml::CreateField::<u32>::new(&aml::Path::new("MR64"), &42usize, "LENH".into()), &aml::Store::new(&aml::Path::new("MINL"), &aml::Path::new("\\_SB_.MHPC.MHBL")), &aml::Store::new(&aml::Path::new("MINH"), &aml::Path::new("\\_SB_.MHPC.MHBH")), &aml::Store::new(&aml::Path::new("LENL"), &aml::Path::new("\\_SB_.MHPC.MHLL")), &aml::Store::new(&aml::Path::new("LENH"), &aml::Path::new("\\_SB_.MHPC.MHLH")), &aml::Add::new( &aml::Path::new("MAXL"), &aml::Path::new("MINL"), &aml::Path::new("LENL"), ), &aml::Add::new( &aml::Path::new("MAXH"), &aml::Path::new("MINH"), &aml::Path::new("LENH"), ), &aml::Subtract::new( &aml::Path::new("MAXH"), &aml::Path::new("MAXH"), &aml::ONE, ), // Release lock &aml::Release::new("MLCK".into()), &aml::Return::new(&aml::Path::new("MR64")), ], ) .to_aml_bytes(), ); bytes } } #[cfg(feature = "acpi")] impl Aml for MemoryManager { fn to_aml_bytes(&self) -> Vec<u8> { let mut bytes = Vec::new(); // Memory Hotplug Controller bytes.extend_from_slice( &aml::Device::new( "_SB_.MHPC".into(), vec![ &aml::Name::new("_HID".into(), &aml::EISAName::new("PNP0A06")), &aml::Name::new("_UID".into(), &"Memory Hotplug Controller"), // Mutex to protect concurrent access as we write to choose slot and then read back status &aml::Mutex::new("MLCK".into(), 0), // I/O port for memory controller &aml::Name::new( "_CRS".into(), &aml::ResourceTemplate::new(vec![&aml::IO::new( 0x0a00, 0x0a00, 0x01, 0x18, )]), ), // OpRegion and Fields map I/O port into individual field values &aml::OpRegion::new("MHPR".into(), aml::OpRegionSpace::SystemIO, 0xa00, 0x18), &aml::Field::new( "MHPR".into(), aml::FieldAccessType::DWord, aml::FieldUpdateRule::Preserve, vec![ aml::FieldEntry::Named(*b"MHBL", 32), // Base (low 4 bytes) aml::FieldEntry::Named(*b"MHBH", 32), // Base (high 4 bytes) aml::FieldEntry::Named(*b"MHLL", 32), // Length (low 4 bytes) aml::FieldEntry::Named(*b"MHLH", 32), // Length (high 4 bytes) ], ), &aml::Field::new( "MHPR".into(), aml::FieldAccessType::DWord, aml::FieldUpdateRule::Preserve, vec![ aml::FieldEntry::Reserved(128), aml::FieldEntry::Named(*b"MHPX", 32), // PXM ], ), &aml::Field::new( "MHPR".into(), aml::FieldAccessType::Byte, aml::FieldUpdateRule::WriteAsZeroes, vec![ aml::FieldEntry::Reserved(160), aml::FieldEntry::Named(*b"MEN_", 1), // Enabled aml::FieldEntry::Named(*b"MINS", 1), // Inserting aml::FieldEntry::Named(*b"MRMV", 1), // Removing aml::FieldEntry::Named(*b"MEJ0", 1), // Ejecting ], ), &aml::Field::new( "MHPR".into(), aml::FieldAccessType::DWord, aml::FieldUpdateRule::Preserve, vec![ aml::FieldEntry::Named(*b"MSEL", 32), // Selector aml::FieldEntry::Named(*b"MOEV", 32), // Event aml::FieldEntry::Named(*b"MOSC", 32), // OSC ], ), &MemoryMethods { slots: self.hotplug_slots.len(), }, &MemorySlots { slots: self.hotplug_slots.len(), }, ], ) .to_aml_bytes(), ); #[cfg(target_arch = "x86_64")] { if let Some(sgx_epc_region) = &self.sgx_epc_region { let min = sgx_epc_region.start().raw_value() as u64; let max = min + sgx_epc_region.size() as u64 - 1; // SGX EPC region bytes.extend_from_slice( &aml::Device::new( "_SB_.EPC_".into(), vec![ &aml::Name::new("_HID".into(), &aml::EISAName::new("INT0E0C")), // QWORD describing the EPC region start and size &aml::Name::new( "_CRS".into(), &aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory( aml::AddressSpaceCachable::NotCacheable, true, min, max, )]), ), &aml::Method::new( "_STA".into(), 0, false, vec![&aml::Return::new(&0xfu8)], ), ], ) .to_aml_bytes(), ); } } bytes } } impl Pausable for MemoryManager {} #[derive(Serialize, Deserialize)] #[serde(remote = "GuestAddress")] pub struct GuestAddressDef(pub u64); #[derive(Clone, Serialize, Deserialize)] pub struct MemoryRegion { backing_file: Option<PathBuf>, #[serde(with = "GuestAddressDef")] start_addr: GuestAddress, size: GuestUsize, } #[derive(Serialize, Deserialize)] pub struct MemoryManagerSnapshotData { memory_regions: Vec<MemoryRegion>, } impl Snapshottable for MemoryManager { fn id(&self) -> String { MEMORY_MANAGER_SNAPSHOT_ID.to_string() } fn snapshot(&mut self) -> result::Result<Snapshot, MigratableError> { let mut memory_manager_snapshot = Snapshot::new(MEMORY_MANAGER_SNAPSHOT_ID); let guest_memory = self.guest_memory.memory(); let mut memory_regions: Vec<MemoryRegion> = Vec::new(); guest_memory.with_regions_mut(|index, region| { if region.len() == 0 { return Err(MigratableError::Snapshot(anyhow!("Zero length region"))); } let mut backing_file = Some(PathBuf::from(format!("memory-region-{}", index))); if let Some(file_offset) = region.file_offset() { if (region.flags() & libc::MAP_SHARED == libc::MAP_SHARED) && Self::is_hardlink(file_offset.file()) { // In this very specific case, we know the memory region // is backed by a file on the host filesystem that can be // accessed by the user, and additionally the mapping is // shared, which means that modifications to the content // are written to the actual file. // When meeting these conditions, we can skip the copy of // the memory content for this specific region, as we can // assume the user will have it saved through the backing // file already. backing_file = None; } } memory_regions.push(MemoryRegion { backing_file, start_addr: region.start_addr(), size: region.len(), }); Ok(()) })?; // Store locally this list of regions as it will be used through the // Transportable::send() implementation. The point is to avoid the // duplication of code regarding the creation of the path for each // region. The 'snapshot' step creates the list of memory regions, // including information about the need to copy a memory region or // not. This saves the 'send' step having to go through the same // process, and instead it can directly proceed with storing the // memory region content for the regions requiring it. self.snapshot_memory_regions = memory_regions.clone(); let snapshot_data_section = serde_json::to_vec(&MemoryManagerSnapshotData { memory_regions }) .map_err(|e| MigratableError::Snapshot(e.into()))?; memory_manager_snapshot.add_data_section(SnapshotDataSection { id: format!("{}-section", MEMORY_MANAGER_SNAPSHOT_ID), snapshot: snapshot_data_section, }); let mut memory_snapshot = self.snapshot.lock().unwrap(); *memory_snapshot = Some(guest_memory); Ok(memory_manager_snapshot) } } impl Transportable for MemoryManager { fn send( &self, _snapshot: &Snapshot, destination_url: &str, ) -> result::Result<(), MigratableError> { let url = Url::parse(destination_url).map_err(|e| { MigratableError::MigrateSend(anyhow!("Could not parse destination URL: {}", e)) })?; match url.scheme() { "file" => { let vm_memory_snapshot_path = url .to_file_path() .map_err(|_| { MigratableError::MigrateSend(anyhow!( "Could not convert file URL to a file path" )) }) .and_then(|path| { if !path.is_dir() { return Err(MigratableError::MigrateSend(anyhow!( "Destination is not a directory" ))); } Ok(path) })?; if let Some(guest_memory) = &*self.snapshot.lock().unwrap() { for region in self.snapshot_memory_regions.iter() { if let Some(backing_file) = &region.backing_file { let mut memory_region_path = vm_memory_snapshot_path.clone(); memory_region_path.push(backing_file); // Create the snapshot file for the region let mut memory_region_file = OpenOptions::new() .read(true) .write(true) .create_new(true) .open(memory_region_path) .map_err(|e| MigratableError::MigrateSend(e.into()))?; guest_memory .write_to( region.start_addr, &mut memory_region_file, region.size as usize, ) .map_err(|e| MigratableError::MigrateSend(e.into()))?; } } } } _ => { return Err(MigratableError::MigrateSend(anyhow!( "Unsupported VM transport URL scheme: {}", url.scheme() ))) } } Ok(()) } } impl Migratable for MemoryManager {}
36.861563
116
0.496707
21d90c2bb9ffd16b79982425732fcd1683b73966
5,983
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. pub mod task { use core::task::{Context, Poll}; use std::future::Future; use std::pin::Pin; /// A handle to a task. /// /// A task can be polled for the output of the future it is executing. A /// dropped task will be cancelled after dropping. To immediately cancel a /// task, call the cancel() method. To run a task to completion without /// retaining the Task handle, call the detach() method. #[derive(Debug)] pub struct Task<T>(async_executor::Task<T>); impl<T: 'static> Task<T> { /// Poll the given future on a thread dedicated to blocking tasks. /// /// Blocking tasks should ideally be constrained to only blocking regions /// of code, such as the system call invocation that is being made that /// needs to avoid blocking the reactor. For such a use case, using /// blocking::unblock() directly may be more efficient. pub fn blocking(fut: impl Future<Output = T> + Send + 'static) -> Self where T: Send, { Self::spawn(super::executor::blocking(fut)) } /// spawn a new `Send` task onto the executor. pub fn spawn(fut: impl Future<Output = T> + Send + 'static) -> Self where T: Send, { Self(super::executor::spawn(fut)) } /// spawn a new non-`Send` task onto the single threaded executor. pub fn local<'a>(fut: impl Future<Output = T> + 'static) -> Self { Self(super::executor::local(fut)) } /// detach the Task handle. The contained future will be polled until completion. pub fn detach(self) { self.0.detach() } /// cancel a task and wait for cancellation to complete. pub async fn cancel(self) -> Option<T> { self.0.cancel().await } } impl<T> Future for Task<T> { type Output = T; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { use futures_lite::FutureExt; self.0.poll(cx) } } } pub mod executor { use crate::runtime::WakeupTime; use easy_parallel::Parallel; use fuchsia_zircon_status as zx_status; use std::future::Future; /// A time relative to the executor's clock. pub use std::time::Instant as Time; impl WakeupTime for Time { fn into_time(self) -> Time { self } } pub(crate) fn blocking<T: Send + 'static>( fut: impl Future<Output = T> + Send + 'static, ) -> impl Future<Output = T> { blocking::unblock(|| LOCAL.with(|local| async_io::block_on(GLOBAL.run(local.run(fut))))) } pub(crate) fn spawn<T: 'static>( fut: impl Future<Output = T> + Send + 'static, ) -> async_executor::Task<T> where T: Send, { GLOBAL.spawn(fut) } pub(crate) fn local<T>(fut: impl Future<Output = T> + 'static) -> async_executor::Task<T> where T: 'static, { LOCAL.with(|local| local.spawn(fut)) } thread_local! { static LOCAL: async_executor::LocalExecutor<'static> = async_executor::LocalExecutor::new(); } static GLOBAL: async_executor::Executor<'_> = async_executor::Executor::new(); /// An executor. /// Mostly API-compatible with the Fuchsia variant (without the run_until_stalled or /// fake time pieces). /// The current implementation of Executor does not isolate work /// (as the underlying executor is not yet capable of this). pub struct Executor; impl Executor { /// Create a new executor running with actual time. pub fn new() -> Result<Self, zx_status::Status> { Ok(Self {}) } /// Run a single future to completion using multiple threads. // Takes `&mut self` to ensure that only one thread-manager is running at a time. pub fn run<T>(&mut self, main_future: impl Future<Output = T>, num_threads: usize) -> T { let (signal, shutdown) = async_channel::unbounded::<()>(); let (_, res) = Parallel::new() .each(0..num_threads, |_| { LOCAL.with(|local| { let _ = async_io::block_on(local.run(GLOBAL.run(shutdown.recv()))); }) }) .finish(|| { LOCAL.with(|local| { async_io::block_on(local.run(GLOBAL.run(async { let res = main_future.await; drop(signal); res }))) }) }); res } /// Run a single future to completion on a single thread. // Takes `&mut self` to ensure that only one thread-manager is running at a time. pub fn run_singlethreaded<T>(&mut self, main_future: impl Future<Output = T>) -> T { LOCAL.with(|local| async_io::block_on(GLOBAL.run(local.run(main_future)))) } } } pub mod timer { use crate::runtime::WakeupTime; use futures::prelude::*; use std::pin::Pin; use std::task::{Context, Poll}; /// An asynchronous timer. #[derive(Debug)] #[must_use = "futures do nothing unless polled"] pub struct Timer(async_io::Timer); impl Timer { /// Create a new timer scheduled to fire at `time`. pub fn new<WT>(time: WT) -> Self where WT: WakeupTime, { Timer(async_io::Timer::at(time.into_time())) } } impl Future for Timer { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { self.0.poll_unpin(cx).map(drop) } } }
32.873626
100
0.55875
188db9ac45a0a502e7c5b440cebec42008e5e1c0
1,612
// Copyright 2014-2015 The GeoRust Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use tokenizer::PeekableTokens; use types::linestring::LineString; use FromTokens; use Geometry; #[derive(Default)] pub struct Polygon(pub Vec<LineString>); impl Polygon { pub fn as_item(self) -> Geometry { Geometry::Polygon(self) } } impl FromTokens for Polygon { fn from_tokens(tokens: &mut PeekableTokens) -> Result<Self, &'static str> { let result = FromTokens::comma_many(<LineString as FromTokens>::from_tokens_with_parens, tokens); result.map(|vec| Polygon(vec)) } } #[cfg(test)] mod tests { use super::Polygon; use {Geometry, Wkt}; #[test] fn basic_polygon() { let mut wkt = Wkt::from_str("POLYGON ((8 4, 4 0, 0 4, 8 4), (7 3, 4 1, 1 4, 7 3))") .ok() .unwrap(); assert_eq!(1, wkt.items.len()); let lines = match wkt.items.pop().unwrap() { Geometry::Polygon(Polygon(lines)) => lines, _ => unreachable!(), }; assert_eq!(2, lines.len()); } }
29.309091
96
0.643921
ccc7d30b37992d96f5c1d3536d2003290567a7a2
9,197
use std::{thread, time}; use ftp::FtpStream; use pretty_assertions::assert_eq; macro_rules! start_server { ( $addr:expr, $path:expr ) => { thread::spawn(move || { let server = firetrap::Server::with_root($path); server.listen($addr.clone()); }); // Give the server some time to start thread::sleep(time::Duration::from_millis(100)); }; ( $addr:expr ) => { let root = std::env::temp_dir(); start_server!($addr, root) }; } #[test] fn connect() { let addr = "127.0.0.1:1234"; start_server!(addr); let mut _ftp_stream = FtpStream::connect(addr).unwrap(); } #[test] fn login() { let addr = "127.0.0.1:1235"; let username = "koen"; let password = "hoi"; start_server!(addr); let mut ftp_stream = FtpStream::connect(addr).unwrap(); let _ = ftp_stream.login(username, password).unwrap(); } #[test] fn noop() { let addr = "127.0.0.1:1236"; start_server!(addr); let mut ftp_stream = FtpStream::connect(addr).unwrap(); // Make sure we fail if we're not logged in let _ = ftp_stream.noop().unwrap_err(); let _ = ftp_stream.login("hoi", "jij").unwrap(); let _ = ftp_stream.noop().unwrap(); } #[test] fn get() { use std::io::Write; let addr = "127.0.0.1:1237"; let root = std::env::temp_dir(); let mut filename = root.clone(); start_server!(addr, root); // Create a temporary file in the FTP root that we'll retrieve filename.push("bla.txt"); let mut f = std::fs::File::create(filename.clone()).unwrap(); // Write some random data to our file let mut data = vec![0; 1024]; for x in data.iter_mut() { *x = rand::random(); } f.write_all(&data).unwrap(); // Retrieve the remote file let mut ftp_stream = FtpStream::connect(addr).unwrap(); // Make sure we fail if we're not logged in let _ = ftp_stream.simple_retr("bla.txt").unwrap_err(); let _ = ftp_stream.login("hoi", "jij").unwrap(); let remote_file = ftp_stream.simple_retr("bla.txt").unwrap(); let remote_data = remote_file.into_inner(); assert_eq!(remote_data, data); } #[test] fn put() { use std::io::Cursor; let addr = "127.0.0.1:1238"; start_server!(addr); let content = b"Hello from this test!\n"; let mut ftp_stream = FtpStream::connect(addr).unwrap(); let mut reader = Cursor::new(content); // Make sure we fail if we're not logged in ftp_stream.put("greeting.txt", &mut reader).unwrap_err(); let _ = ftp_stream.login("hoi", "jij").unwrap(); ftp_stream.put("greeting.txt", &mut reader).unwrap(); // retrieve file back again, and check if we got the same back. let remote_data = ftp_stream.simple_retr("greeting.txt").unwrap().into_inner(); assert_eq!(remote_data, content); } #[test] fn list() { let addr = "127.0.0.1:1239"; let root = std::env::temp_dir(); let path = root.clone(); start_server!(addr, path); // Create a filename in the ftp root that we will look for in the `LIST` output let path = root.join("test.txt"); { let _f = std::fs::File::create(path); } let mut ftp_stream = FtpStream::connect(addr).unwrap(); // Make sure we fail if we're not logged in let _list = ftp_stream.list(None).unwrap_err(); let _ = ftp_stream.login("hoi", "jij").unwrap(); let list = ftp_stream.list(None).unwrap(); let mut found = false; for entry in list { if entry.contains("test.txt") { found = true; break; } } assert!(found); } #[test] fn pwd() { let addr = "127.0.0.1:1240"; let root = std::env::temp_dir(); let path = root.clone(); start_server!(addr, path); let mut ftp_stream = FtpStream::connect(addr).unwrap(); // Make sure we fail if we're not logged in let _pwd = ftp_stream.pwd().unwrap_err(); let _ = ftp_stream.login("hoi", "jij").unwrap(); let pwd = ftp_stream.pwd().unwrap(); assert_eq!(&pwd, "/"); } #[test] fn cwd() { let addr = "127.0.0.1:1241"; let root = std::env::temp_dir(); let path = root.clone(); start_server!(addr, root); let mut ftp_stream = FtpStream::connect(addr).unwrap(); let dir_in_root = tempfile::TempDir::new_in(path).unwrap(); let basename = dir_in_root.path().file_name().unwrap(); // Make sure we fail if we're not logged in ftp_stream.cwd(basename.to_str().unwrap()).unwrap_err(); let _ = ftp_stream.login("hoi", "jij").unwrap(); ftp_stream.cwd(basename.to_str().unwrap()).unwrap(); let pwd = ftp_stream.pwd().unwrap(); assert_eq!(std::path::Path::new(&pwd), std::path::Path::new("/").join(&basename)); } #[test] fn cdup() { let addr = "127.0.0.1:1242"; let root = std::env::temp_dir(); let path = root.clone(); start_server!(addr, root); let mut ftp_stream = FtpStream::connect(addr).unwrap(); let dir_in_root = tempfile::TempDir::new_in(path).unwrap(); let basename = dir_in_root.path().file_name().unwrap(); // Make sure we fail if we're not logged in ftp_stream.cdup().unwrap_err(); let _ = ftp_stream.login("hoi", "jij").unwrap(); ftp_stream.cwd(basename.to_str().unwrap()).unwrap(); let pwd = ftp_stream.pwd().unwrap(); assert_eq!(std::path::Path::new(&pwd), std::path::Path::new("/").join(&basename)); ftp_stream.cdup().unwrap(); let pwd = ftp_stream.pwd().unwrap(); assert_eq!(std::path::Path::new(&pwd), std::path::Path::new("/")); } #[test] fn dele() { let addr = "127.0.0.1:1243"; let root = std::env::temp_dir(); start_server!(addr, root); let mut ftp_stream = FtpStream::connect(addr).unwrap(); let file_in_root = tempfile::NamedTempFile::new().unwrap(); let file_name = file_in_root.path().file_name().unwrap().to_str().unwrap(); // Make sure we fail if we're not logged in ftp_stream.rm(file_name).unwrap_err(); let _ = ftp_stream.login("hoi", "jij").unwrap(); ftp_stream.rm(file_name).unwrap(); assert_eq!(std::fs::metadata(file_name).unwrap_err().kind(), std::io::ErrorKind::NotFound); } #[test] fn quit() { let addr = "127.0.0.1:1244"; let root = std::env::temp_dir(); start_server!(addr, root); let mut ftp_stream = FtpStream::connect(addr).unwrap(); ftp_stream.quit().unwrap(); // Make sure the connection is actually closed // This may take some time, so we'll sleep for a bit. std::thread::sleep(std::time::Duration::from_millis(10)); ftp_stream.noop().unwrap_err(); } #[test] fn nlst() { let addr = "127.0.0.1:1245"; let root = tempfile::TempDir::new().unwrap().into_path(); let path = root.clone(); start_server!(addr, root); // Create a filename that we wanna see in the `NLST` output let path = path.join("test.txt"); { let _f = std::fs::File::create(path); } let mut ftp_stream = FtpStream::connect(addr).unwrap(); // Make sure we fail if we're not logged in let _list = ftp_stream.nlst(None).unwrap_err(); let _ = ftp_stream.login("hoi", "jij").unwrap(); let list = ftp_stream.nlst(None).unwrap(); assert_eq!(list, vec!["test.txt"]); } #[test] fn mkdir() { let addr = "127.0.0.1:1246"; let root = tempfile::TempDir::new().unwrap().into_path(); let server_root = root.clone(); start_server!(addr, server_root); let mut ftp_stream = FtpStream::connect(addr).unwrap(); let new_dir_name = "hallo"; // Make sure we fail if we're not logged in ftp_stream.mkdir(new_dir_name).unwrap_err(); let _ = ftp_stream.login("hoi", "jij").unwrap(); ftp_stream.mkdir(new_dir_name).unwrap(); let full_path = root.join(new_dir_name); let metadata = std::fs::metadata(full_path).unwrap(); assert!(metadata.is_dir()); } #[test] fn rename() { let addr = "127.0.0.1:1247"; let root = tempfile::TempDir::new().unwrap().into_path(); let server_root = root.clone(); start_server!(addr, server_root); // Create a file that we will rename let full_from = root.join("ikbenhier.txt"); let _f = std::fs::File::create(&full_from); let from_filename = full_from.file_name().unwrap().to_str().unwrap(); // What we'll rename our file to let full_to = root.join("nu ben ik hier.txt"); let to_filename = full_to.file_name().unwrap().to_str().unwrap(); let mut ftp_stream = FtpStream::connect(addr).expect("Failed to connect"); // Make sure we fail if we're not logged in ftp_stream.rename(&from_filename, &to_filename).expect_err("Rename accepted without logging in"); // Do the renaming let _ = ftp_stream.login("some", "user").unwrap(); ftp_stream.rename(&from_filename, &to_filename).expect("Failed to rename"); // Give the OS some time to actually rename the thingy. std::thread::sleep(std::time::Duration::from_millis(100)); // Make sure the old filename is gone std::fs::metadata(full_from).expect_err("Renamed file still exists with old name"); // Make sure the new filename exists let metadata = std::fs::metadata(full_to).expect("New filename not created"); assert!(metadata.is_file()); }
29.477564
101
0.624225
71d616338eed06fbf4b29bf1c29337e975901730
42,254
use crate::gen::block::Block; use crate::gen::nested::NamespaceEntries; use crate::gen::out::OutFile; use crate::gen::{builtin, include, Opt}; use crate::syntax::atom::Atom::{self, *}; use crate::syntax::symbol::Symbol; use crate::syntax::{ mangle, Api, Enum, ExternFn, ExternType, Pair, ResolvableName, Signature, Struct, Type, Types, Var, }; use proc_macro2::Ident; use std::collections::{HashMap, HashSet}; pub(super) fn gen(apis: &[Api], types: &Types, opt: &Opt, header: bool) -> Vec<u8> { let mut out_file = OutFile::new(header, opt, types); let out = &mut out_file; pick_includes_and_builtins(out, apis); out.include.extend(&opt.include); write_forward_declarations(out, apis); write_data_structures(out, apis); write_functions(out, apis); write_generic_instantiations(out); builtin::write(out); include::write(out); out_file.content() } fn write_forward_declarations(out: &mut OutFile, apis: &[Api]) { let needs_forward_declaration = |api: &&Api| match api { Api::Struct(_) | Api::CxxType(_) | Api::RustType(_) => true, Api::Enum(enm) => !out.types.cxx.contains(&enm.name.rust), _ => false, }; let apis_by_namespace = NamespaceEntries::new(apis.iter().filter(needs_forward_declaration).collect()); write(out, &apis_by_namespace, 0); fn write(out: &mut OutFile, ns_entries: &NamespaceEntries, indent: usize) { let apis = ns_entries.direct_content(); for api in apis { write!(out, "{:1$}", "", indent); match api { Api::Struct(strct) => write_struct_decl(out, &strct.name.cxx), Api::Enum(enm) => write_enum_decl(out, enm), Api::CxxType(ety) => write_struct_using(out, &ety.name), Api::RustType(ety) => write_struct_decl(out, &ety.name.cxx), _ => unreachable!(), } } for (namespace, nested_ns_entries) in ns_entries.nested_content() { writeln!(out, "{:2$}namespace {} {{", "", namespace, indent); write(out, nested_ns_entries, indent + 2); writeln!(out, "{:1$}}}", "", indent); } } } fn write_data_structures<'a>(out: &mut OutFile<'a>, apis: &'a [Api]) { let mut methods_for_type = HashMap::new(); for api in apis { if let Api::CxxFunction(efn) | Api::RustFunction(efn) = api { if let Some(receiver) = &efn.sig.receiver { methods_for_type .entry(&receiver.ty.rust) .or_insert_with(Vec::new) .push(efn); } } } let mut structs_written = HashSet::new(); let mut toposorted_structs = out.types.toposorted_structs.iter(); for api in apis { match api { Api::Struct(strct) if !structs_written.contains(&strct.name.rust) => { for next in &mut toposorted_structs { if !out.types.cxx.contains(&strct.name.rust) { out.next_section(); let methods = methods_for_type .get(&strct.name.rust) .map(Vec::as_slice) .unwrap_or_default(); write_struct(out, next, methods); } structs_written.insert(&next.name.rust); if next.name.rust == strct.name.rust { break; } } } Api::Enum(enm) => { out.next_section(); if out.types.cxx.contains(&enm.name.rust) { check_enum(out, enm); } else { write_enum(out, enm); } } Api::RustType(ety) => { if let Some(methods) = methods_for_type.get(&ety.name.rust) { out.next_section(); write_struct_with_methods(out, ety, methods); } } _ => {} } } out.next_section(); for api in apis { if let Api::TypeAlias(ety) = api { if out.types.required_trivial.contains_key(&ety.name.rust) { check_trivial_extern_type(out, &ety.name) } } } } fn write_functions<'a>(out: &mut OutFile<'a>, apis: &'a [Api]) { if !out.header { for api in apis { match api { Api::CxxFunction(efn) => write_cxx_function_shim(out, efn), Api::RustFunction(efn) => write_rust_function_decl(out, efn), _ => {} } } } for api in apis { if let Api::RustFunction(efn) = api { out.next_section(); write_rust_function_shim(out, efn); } } } fn pick_includes_and_builtins(out: &mut OutFile, apis: &[Api]) { for api in apis { if let Api::Include(include) = api { out.include.insert(include); } } for ty in out.types { match ty { Type::Ident(ident) => match Atom::from(&ident.rust) { Some(U8) | Some(U16) | Some(U32) | Some(U64) | Some(I8) | Some(I16) | Some(I32) | Some(I64) => out.include.cstdint = true, Some(Usize) => out.include.cstddef = true, Some(Isize) => out.builtin.rust_isize = true, Some(CxxString) => out.include.string = true, Some(RustString) => out.builtin.rust_string = true, Some(Bool) | Some(F32) | Some(F64) | None => {} }, Type::RustBox(_) => out.builtin.rust_box = true, Type::RustVec(_) => out.builtin.rust_vec = true, Type::UniquePtr(_) => out.include.memory = true, Type::Str(_) => out.builtin.rust_str = true, Type::CxxVector(_) => out.include.vector = true, Type::Fn(_) => out.builtin.rust_fn = true, Type::Slice(_) => out.builtin.rust_slice = true, Type::SliceRefU8(_) => { out.include.cstdint = true; out.builtin.rust_slice = true; } Type::Ref(_) | Type::Void(_) => {} } } } fn write_struct<'a>(out: &mut OutFile<'a>, strct: &'a Struct, methods: &[&ExternFn]) { out.set_namespace(&strct.name.namespace); let guard = format!("CXXBRIDGE1_STRUCT_{}", strct.name.to_symbol()); writeln!(out, "#ifndef {}", guard); writeln!(out, "#define {}", guard); for line in strct.doc.to_string().lines() { writeln!(out, "//{}", line); } writeln!(out, "struct {} final {{", strct.name.cxx); for field in &strct.fields { write!(out, " "); write_type_space(out, &field.ty); writeln!(out, "{};", field.ident); } if !methods.is_empty() { writeln!(out); } for method in methods { write!(out, " "); let sig = &method.sig; let local_name = method.name.cxx.to_string(); write_rust_function_shim_decl(out, &local_name, sig, false); writeln!(out, ";"); } writeln!(out, "}};"); writeln!(out, "#endif // {}", guard); } fn write_struct_decl(out: &mut OutFile, ident: &Ident) { writeln!(out, "struct {};", ident); } fn write_enum_decl(out: &mut OutFile, enm: &Enum) { write!(out, "enum class {} : ", enm.name.cxx); write_atom(out, enm.repr); writeln!(out, ";"); } fn write_struct_using(out: &mut OutFile, ident: &Pair) { writeln!(out, "using {} = {};", ident.cxx, ident.to_fully_qualified()); } fn write_struct_with_methods<'a>( out: &mut OutFile<'a>, ety: &'a ExternType, methods: &[&ExternFn], ) { out.set_namespace(&ety.name.namespace); let guard = format!("CXXBRIDGE1_STRUCT_{}", ety.name.to_symbol()); writeln!(out, "#ifndef {}", guard); writeln!(out, "#define {}", guard); for line in ety.doc.to_string().lines() { writeln!(out, "//{}", line); } writeln!(out, "struct {} final {{", ety.name.cxx); writeln!(out, " {}() = delete;", ety.name.cxx); writeln!( out, " {}(const {} &) = delete;", ety.name.cxx, ety.name.cxx, ); for method in methods { write!(out, " "); let sig = &method.sig; let local_name = method.name.cxx.to_string(); write_rust_function_shim_decl(out, &local_name, sig, false); writeln!(out, ";"); } writeln!(out, "}};"); writeln!(out, "#endif // {}", guard); } fn write_enum<'a>(out: &mut OutFile<'a>, enm: &'a Enum) { out.set_namespace(&enm.name.namespace); let guard = format!("CXXBRIDGE1_ENUM_{}", enm.name.to_symbol()); writeln!(out, "#ifndef {}", guard); writeln!(out, "#define {}", guard); for line in enm.doc.to_string().lines() { writeln!(out, "//{}", line); } write!(out, "enum class {} : ", enm.name.cxx); write_atom(out, enm.repr); writeln!(out, " {{"); for variant in &enm.variants { writeln!(out, " {} = {},", variant.ident, variant.discriminant); } writeln!(out, "}};"); writeln!(out, "#endif // {}", guard); } fn check_enum<'a>(out: &mut OutFile<'a>, enm: &'a Enum) { out.set_namespace(&enm.name.namespace); write!(out, "static_assert(sizeof({}) == sizeof(", enm.name.cxx); write_atom(out, enm.repr); writeln!(out, "), \"incorrect size\");"); for variant in &enm.variants { write!(out, "static_assert(static_cast<"); write_atom(out, enm.repr); writeln!( out, ">({}::{}) == {}, \"disagrees with the value in #[cxx::bridge]\");", enm.name.cxx, variant.ident, variant.discriminant, ); } } fn check_trivial_extern_type(out: &mut OutFile, id: &Pair) { // NOTE: The following static assertion is just nice-to-have and not // necessary for soundness. That's because triviality is always declared by // the user in the form of an unsafe impl of cxx::ExternType: // // unsafe impl ExternType for MyType { // type Id = cxx::type_id!("..."); // type Kind = cxx::kind::Trivial; // } // // Since the user went on the record with their unsafe impl to unsafely // claim they KNOW that the type is trivial, it's fine for that to be on // them if that were wrong. However, in practice correctly reasoning about // the relocatability of C++ types is challenging, particularly if the type // definition were to change over time, so for now we add this check. // // There may be legitimate reasons to opt out of this assertion for support // of types that the programmer knows are soundly Rust-movable despite not // being recognized as such by the C++ type system due to a move constructor // or destructor. To opt out of the relocatability check, they need to do // one of the following things in any header used by `include!` in their // bridge. // // --- if they define the type: // struct MyType { // ... // + using IsRelocatable = std::true_type; // }; // // --- otherwise: // + template <> // + struct rust::IsRelocatable<MyType> : std::true_type {}; // let id = id.to_fully_qualified(); out.builtin.relocatable = true; writeln!(out, "static_assert("); writeln!(out, " ::rust::IsRelocatable<{}>::value,", id); writeln!( out, " \"type {} marked as Trivial in Rust is not trivially move constructible and trivially destructible in C++\");", id, ); } fn write_cxx_function_shim<'a>(out: &mut OutFile<'a>, efn: &'a ExternFn) { out.next_section(); out.set_namespace(&efn.name.namespace); out.begin_block(Block::ExternC); if let Some(annotation) = &out.opt.cxx_impl_annotations { write!(out, "{} ", annotation); } if efn.throws { out.builtin.ptr_len = true; write!(out, "::rust::repr::PtrLen "); } else { write_extern_return_type_space(out, &efn.ret); } let mangled = mangle::extern_fn(efn, out.types); write!(out, "{}(", mangled); if let Some(receiver) = &efn.receiver { if !receiver.mutable { write!(out, "const "); } write!( out, "{} &self", out.types.resolve(&receiver.ty).to_fully_qualified(), ); } for (i, arg) in efn.args.iter().enumerate() { if i > 0 || efn.receiver.is_some() { write!(out, ", "); } if arg.ty == RustString { write!(out, "const "); } else if let Type::RustVec(_) = arg.ty { write!(out, "const "); } write_extern_arg(out, arg); } let indirect_return = indirect_return(efn, out.types); if indirect_return { if !efn.args.is_empty() || efn.receiver.is_some() { write!(out, ", "); } write_indirect_return_type_space(out, efn.ret.as_ref().unwrap()); write!(out, "*return$"); } writeln!(out, ") noexcept {{"); write!(out, " "); write_return_type(out, &efn.ret); match &efn.receiver { None => write!(out, "(*{}$)(", efn.name.rust), Some(receiver) => write!( out, "({}::*{}$)(", out.types.resolve(&receiver.ty).to_fully_qualified(), efn.name.rust, ), } for (i, arg) in efn.args.iter().enumerate() { if i > 0 { write!(out, ", "); } write_type(out, &arg.ty); } write!(out, ")"); if let Some(receiver) = &efn.receiver { if !receiver.mutable { write!(out, " const"); } } write!(out, " = "); match &efn.receiver { None => write!(out, "{}", efn.name.to_fully_qualified()), Some(receiver) => write!( out, "&{}::{}", out.types.resolve(&receiver.ty).to_fully_qualified(), efn.name.cxx, ), } writeln!(out, ";"); write!(out, " "); if efn.throws { out.builtin.ptr_len = true; out.builtin.trycatch = true; writeln!(out, "::rust::repr::PtrLen throw$;"); writeln!(out, " ::rust::behavior::trycatch("); writeln!(out, " [&] {{"); write!(out, " "); } if indirect_return { out.include.new = true; write!(out, "new (return$) "); write_indirect_return_type(out, efn.ret.as_ref().unwrap()); write!(out, "("); } else if efn.ret.is_some() { write!(out, "return "); } match &efn.ret { Some(Type::Ref(_)) => write!(out, "&"), Some(Type::Str(_)) if !indirect_return => { out.builtin.rust_str_repr = true; write!(out, "::rust::impl<::rust::Str>::repr("); } Some(Type::SliceRefU8(_)) if !indirect_return => { out.builtin.rust_slice_repr = true; write!(out, "::rust::impl<::rust::Slice<const uint8_t>>::repr(") } _ => {} } match &efn.receiver { None => write!(out, "{}$(", efn.name.rust), Some(_) => write!(out, "(self.*{}$)(", efn.name.rust), } for (i, arg) in efn.args.iter().enumerate() { if i > 0 { write!(out, ", "); } if let Type::RustBox(_) = &arg.ty { write_type(out, &arg.ty); write!(out, "::from_raw({})", arg.ident); } else if let Type::UniquePtr(_) = &arg.ty { write_type(out, &arg.ty); write!(out, "({})", arg.ident); } else if let Type::Str(_) = arg.ty { out.builtin.rust_str_new_unchecked = true; write!( out, "::rust::impl<::rust::Str>::new_unchecked({})", arg.ident, ); } else if arg.ty == RustString { out.builtin.unsafe_bitcopy = true; write!( out, "::rust::String(::rust::unsafe_bitcopy, *{})", arg.ident, ); } else if let Type::RustVec(_) = arg.ty { out.builtin.unsafe_bitcopy = true; write_type(out, &arg.ty); write!(out, "(::rust::unsafe_bitcopy, *{})", arg.ident); } else if let Type::SliceRefU8(_) = arg.ty { write!( out, "::rust::Slice<const uint8_t>(static_cast<const uint8_t *>({0}.ptr), {0}.len)", arg.ident, ); } else if out.types.needs_indirect_abi(&arg.ty) { out.include.utility = true; write!(out, "::std::move(*{})", arg.ident); } else { write!(out, "{}", arg.ident); } } write!(out, ")"); match &efn.ret { Some(Type::RustBox(_)) => write!(out, ".into_raw()"), Some(Type::UniquePtr(_)) => write!(out, ".release()"), Some(Type::Str(_)) | Some(Type::SliceRefU8(_)) if !indirect_return => write!(out, ")"), _ => {} } if indirect_return { write!(out, ")"); } writeln!(out, ";"); if efn.throws { out.include.cstring = true; out.builtin.exception = true; writeln!(out, " throw$.ptr = nullptr;"); writeln!(out, " }},"); writeln!(out, " [&](const char *catch$) noexcept {{"); writeln!(out, " throw$.len = ::std::strlen(catch$);"); writeln!( out, " throw$.ptr = ::cxxbridge1$exception(catch$, throw$.len);", ); writeln!(out, " }});"); writeln!(out, " return throw$;"); } writeln!(out, "}}"); for arg in &efn.args { if let Type::Fn(f) = &arg.ty { let var = &arg.ident; write_function_pointer_trampoline(out, efn, var, f); } } out.end_block(Block::ExternC); } fn write_function_pointer_trampoline( out: &mut OutFile, efn: &ExternFn, var: &Ident, f: &Signature, ) { let r_trampoline = mangle::r_trampoline(efn, var, out.types); let indirect_call = true; write_rust_function_decl_impl(out, &r_trampoline, f, indirect_call); out.next_section(); let c_trampoline = mangle::c_trampoline(efn, var, out.types).to_string(); write_rust_function_shim_impl(out, &c_trampoline, f, &r_trampoline, indirect_call); } fn write_rust_function_decl<'a>(out: &mut OutFile<'a>, efn: &'a ExternFn) { out.set_namespace(&efn.name.namespace); out.begin_block(Block::ExternC); let link_name = mangle::extern_fn(efn, out.types); let indirect_call = false; write_rust_function_decl_impl(out, &link_name, efn, indirect_call); out.end_block(Block::ExternC); } fn write_rust_function_decl_impl( out: &mut OutFile, link_name: &Symbol, sig: &Signature, indirect_call: bool, ) { out.next_section(); if sig.throws { out.builtin.ptr_len = true; write!(out, "::rust::repr::PtrLen "); } else { write_extern_return_type_space(out, &sig.ret); } write!(out, "{}(", link_name); let mut needs_comma = false; if let Some(receiver) = &sig.receiver { if !receiver.mutable { write!(out, "const "); } write!( out, "{} &self", out.types.resolve(&receiver.ty).to_fully_qualified(), ); needs_comma = true; } for arg in &sig.args { if needs_comma { write!(out, ", "); } write_extern_arg(out, arg); needs_comma = true; } if indirect_return(sig, out.types) { if needs_comma { write!(out, ", "); } write_return_type(out, &sig.ret); write!(out, "*return$"); needs_comma = true; } if indirect_call { if needs_comma { write!(out, ", "); } write!(out, "void *"); } writeln!(out, ") noexcept;"); } fn write_rust_function_shim<'a>(out: &mut OutFile<'a>, efn: &'a ExternFn) { out.set_namespace(&efn.name.namespace); for line in efn.doc.to_string().lines() { writeln!(out, "//{}", line); } let local_name = match &efn.sig.receiver { None => efn.name.cxx.to_string(), Some(receiver) => format!("{}::{}", out.types.resolve(&receiver.ty).cxx, efn.name.cxx), }; let invoke = mangle::extern_fn(efn, out.types); let indirect_call = false; write_rust_function_shim_impl(out, &local_name, efn, &invoke, indirect_call); } fn write_rust_function_shim_decl( out: &mut OutFile, local_name: &str, sig: &Signature, indirect_call: bool, ) { write_return_type(out, &sig.ret); write!(out, "{}(", local_name); for (i, arg) in sig.args.iter().enumerate() { if i > 0 { write!(out, ", "); } write_type_space(out, &arg.ty); write!(out, "{}", arg.ident); } if indirect_call { if !sig.args.is_empty() { write!(out, ", "); } write!(out, "void *extern$"); } write!(out, ")"); if let Some(receiver) = &sig.receiver { if !receiver.mutable { write!(out, " const"); } } if !sig.throws { write!(out, " noexcept"); } } fn write_rust_function_shim_impl( out: &mut OutFile, local_name: &str, sig: &Signature, invoke: &Symbol, indirect_call: bool, ) { if out.header && sig.receiver.is_some() { // We've already defined this inside the struct. return; } write_rust_function_shim_decl(out, local_name, sig, indirect_call); if out.header { writeln!(out, ";"); return; } writeln!(out, " {{"); for arg in &sig.args { if arg.ty != RustString && out.types.needs_indirect_abi(&arg.ty) { out.include.utility = true; out.builtin.manually_drop = true; write!(out, " ::rust::ManuallyDrop<"); write_type(out, &arg.ty); writeln!(out, "> {}$(::std::move({0}));", arg.ident); } } write!(out, " "); let indirect_return = indirect_return(sig, out.types); if indirect_return { out.builtin.maybe_uninit = true; write!(out, "::rust::MaybeUninit<"); write_type(out, sig.ret.as_ref().unwrap()); writeln!(out, "> return$;"); write!(out, " "); } else if let Some(ret) = &sig.ret { write!(out, "return "); match ret { Type::RustBox(_) => { write_type(out, ret); write!(out, "::from_raw("); } Type::UniquePtr(_) => { write_type(out, ret); write!(out, "("); } Type::Ref(_) => write!(out, "*"), Type::Str(_) => { out.builtin.rust_str_new_unchecked = true; write!(out, "::rust::impl<::rust::Str>::new_unchecked("); } Type::SliceRefU8(_) => { out.builtin.rust_slice_new = true; write!(out, "::rust::impl<::rust::Slice<const uint8_t>>::slice("); } _ => {} } } if sig.throws { out.builtin.ptr_len = true; write!(out, "::rust::repr::PtrLen error$ = "); } write!(out, "{}(", invoke); let mut needs_comma = false; if sig.receiver.is_some() { write!(out, "*this"); needs_comma = true; } for arg in &sig.args { if needs_comma { write!(out, ", "); } match &arg.ty { Type::Str(_) => { out.builtin.rust_str_repr = true; write!(out, "::rust::impl<::rust::Str>::repr("); } Type::SliceRefU8(_) => { out.builtin.rust_slice_repr = true; write!(out, "::rust::impl<::rust::Slice<const uint8_t>>::repr("); } ty if out.types.needs_indirect_abi(ty) => write!(out, "&"), _ => {} } write!(out, "{}", arg.ident); match &arg.ty { Type::RustBox(_) => write!(out, ".into_raw()"), Type::UniquePtr(_) => write!(out, ".release()"), Type::Str(_) | Type::SliceRefU8(_) => write!(out, ")"), ty if ty != RustString && out.types.needs_indirect_abi(ty) => write!(out, "$.value"), _ => {} } needs_comma = true; } if indirect_return { if needs_comma { write!(out, ", "); } write!(out, "&return$.value"); needs_comma = true; } if indirect_call { if needs_comma { write!(out, ", "); } write!(out, "extern$"); } write!(out, ")"); if !indirect_return { if let Some(ret) = &sig.ret { if let Type::RustBox(_) | Type::UniquePtr(_) | Type::Str(_) | Type::SliceRefU8(_) = ret { write!(out, ")"); } } } writeln!(out, ";"); if sig.throws { out.builtin.rust_error = true; writeln!(out, " if (error$.ptr) {{"); writeln!(out, " throw ::rust::impl<::rust::Error>::error(error$);"); writeln!(out, " }}"); } if indirect_return { out.include.utility = true; writeln!(out, " return ::std::move(return$.value);"); } writeln!(out, "}}"); } fn write_return_type(out: &mut OutFile, ty: &Option<Type>) { match ty { None => write!(out, "void "), Some(ty) => write_type_space(out, ty), } } fn indirect_return(sig: &Signature, types: &Types) -> bool { sig.ret .as_ref() .map_or(false, |ret| sig.throws || types.needs_indirect_abi(ret)) } fn write_indirect_return_type(out: &mut OutFile, ty: &Type) { match ty { Type::RustBox(ty) | Type::UniquePtr(ty) => { write_type_space(out, &ty.inner); write!(out, "*"); } Type::Ref(ty) => { if !ty.mutable { write!(out, "const "); } write_type(out, &ty.inner); write!(out, " *"); } _ => write_type(out, ty), } } fn write_indirect_return_type_space(out: &mut OutFile, ty: &Type) { write_indirect_return_type(out, ty); match ty { Type::RustBox(_) | Type::UniquePtr(_) | Type::Ref(_) => {} Type::Str(_) | Type::SliceRefU8(_) => write!(out, " "), _ => write_space_after_type(out, ty), } } fn write_extern_return_type_space(out: &mut OutFile, ty: &Option<Type>) { match ty { Some(Type::RustBox(ty)) | Some(Type::UniquePtr(ty)) => { write_type_space(out, &ty.inner); write!(out, "*"); } Some(Type::Ref(ty)) => { if !ty.mutable { write!(out, "const "); } write_type(out, &ty.inner); write!(out, " *"); } Some(Type::Str(_)) | Some(Type::SliceRefU8(_)) => { out.builtin.ptr_len = true; write!(out, "::rust::repr::PtrLen "); } Some(ty) if out.types.needs_indirect_abi(ty) => write!(out, "void "), _ => write_return_type(out, ty), } } fn write_extern_arg(out: &mut OutFile, arg: &Var) { match &arg.ty { Type::RustBox(ty) | Type::UniquePtr(ty) | Type::CxxVector(ty) => { write_type_space(out, &ty.inner); write!(out, "*"); } Type::Str(_) | Type::SliceRefU8(_) => { out.builtin.ptr_len = true; write!(out, "::rust::repr::PtrLen "); } _ => write_type_space(out, &arg.ty), } if out.types.needs_indirect_abi(&arg.ty) { write!(out, "*"); } write!(out, "{}", arg.ident); } fn write_type(out: &mut OutFile, ty: &Type) { match ty { Type::Ident(ident) => match Atom::from(&ident.rust) { Some(atom) => write_atom(out, atom), None => write!(out, "{}", out.types.resolve(ident).to_fully_qualified()), }, Type::RustBox(ty) => { write!(out, "::rust::Box<"); write_type(out, &ty.inner); write!(out, ">"); } Type::RustVec(ty) => { write!(out, "::rust::Vec<"); write_type(out, &ty.inner); write!(out, ">"); } Type::UniquePtr(ptr) => { write!(out, "::std::unique_ptr<"); write_type(out, &ptr.inner); write!(out, ">"); } Type::CxxVector(ty) => { write!(out, "::std::vector<"); write_type(out, &ty.inner); write!(out, ">"); } Type::Ref(r) => { if !r.mutable { write!(out, "const "); } write_type(out, &r.inner); write!(out, " &"); } Type::Slice(_) => { // For now, only U8 slices are supported, which are covered separately below unreachable!() } Type::Str(_) => { write!(out, "::rust::Str"); } Type::SliceRefU8(_) => { write!(out, "::rust::Slice<const uint8_t>"); } Type::Fn(f) => { write!(out, "::rust::{}<", if f.throws { "TryFn" } else { "Fn" }); match &f.ret { Some(ret) => write_type(out, ret), None => write!(out, "void"), } write!(out, "("); for (i, arg) in f.args.iter().enumerate() { if i > 0 { write!(out, ", "); } write_type(out, &arg.ty); } write!(out, ")>"); } Type::Void(_) => unreachable!(), } } fn write_atom(out: &mut OutFile, atom: Atom) { match atom { Bool => write!(out, "bool"), U8 => write!(out, "uint8_t"), U16 => write!(out, "uint16_t"), U32 => write!(out, "uint32_t"), U64 => write!(out, "uint64_t"), Usize => write!(out, "size_t"), I8 => write!(out, "int8_t"), I16 => write!(out, "int16_t"), I32 => write!(out, "int32_t"), I64 => write!(out, "int64_t"), Isize => write!(out, "::rust::isize"), F32 => write!(out, "float"), F64 => write!(out, "double"), CxxString => write!(out, "::std::string"), RustString => write!(out, "::rust::String"), } } fn write_type_space(out: &mut OutFile, ty: &Type) { write_type(out, ty); write_space_after_type(out, ty); } fn write_space_after_type(out: &mut OutFile, ty: &Type) { match ty { Type::Ident(_) | Type::RustBox(_) | Type::UniquePtr(_) | Type::Str(_) | Type::CxxVector(_) | Type::RustVec(_) | Type::SliceRefU8(_) | Type::Fn(_) => write!(out, " "), Type::Ref(_) => {} Type::Void(_) | Type::Slice(_) => unreachable!(), } } // Only called for legal referent types of unique_ptr and element types of // std::vector and Vec. fn to_typename(ty: &Type, types: &Types) -> String { match ty { Type::Ident(ident) => types.resolve(&ident).to_fully_qualified(), Type::CxxVector(ptr) => format!("::std::vector<{}>", to_typename(&ptr.inner, types)), _ => unreachable!(), } } // Only called for legal referent types of unique_ptr and element types of // std::vector and Vec. fn to_mangled(ty: &Type, types: &Types) -> Symbol { match ty { Type::Ident(ident) => ident.to_symbol(types), Type::CxxVector(ptr) => to_mangled(&ptr.inner, types).prefix_with("std$vector$"), _ => unreachable!(), } } fn write_generic_instantiations(out: &mut OutFile) { if out.header { return; } out.next_section(); out.set_namespace(Default::default()); out.begin_block(Block::ExternC); for ty in out.types { if let Type::RustBox(ty) = ty { if let Type::Ident(inner) = &ty.inner { out.next_section(); write_rust_box_extern(out, &out.types.resolve(&inner)); } } else if let Type::RustVec(ty) = ty { if let Type::Ident(inner) = &ty.inner { if Atom::from(&inner.rust).is_none() { out.next_section(); write_rust_vec_extern(out, inner); } } } else if let Type::UniquePtr(ptr) = ty { if let Type::Ident(inner) = &ptr.inner { if Atom::from(&inner.rust).is_none() && (!out.types.aliases.contains_key(&inner.rust) || out.types.explicit_impls.contains(ty)) { out.next_section(); write_unique_ptr(out, inner); } } } else if let Type::CxxVector(ptr) = ty { if let Type::Ident(inner) = &ptr.inner { if Atom::from(&inner.rust).is_none() && (!out.types.aliases.contains_key(&inner.rust) || out.types.explicit_impls.contains(ty)) { out.next_section(); write_cxx_vector(out, ty, inner); } } } } out.end_block(Block::ExternC); out.begin_block(Block::Namespace("rust")); out.begin_block(Block::InlineNamespace("cxxbridge1")); for ty in out.types { if let Type::RustBox(ty) = ty { if let Type::Ident(inner) = &ty.inner { write_rust_box_impl(out, &out.types.resolve(&inner)); } } else if let Type::RustVec(ty) = ty { if let Type::Ident(inner) = &ty.inner { if Atom::from(&inner.rust).is_none() { write_rust_vec_impl(out, inner); } } } } out.end_block(Block::InlineNamespace("cxxbridge1")); out.end_block(Block::Namespace("rust")); } fn write_rust_box_extern(out: &mut OutFile, ident: &Pair) { let inner = ident.to_fully_qualified(); let instance = ident.to_symbol(); writeln!(out, "#ifndef CXXBRIDGE1_RUST_BOX_{}", instance); writeln!(out, "#define CXXBRIDGE1_RUST_BOX_{}", instance); writeln!( out, "void cxxbridge1$box${}$uninit(::rust::Box<{}> *ptr) noexcept;", instance, inner, ); writeln!( out, "void cxxbridge1$box${}$drop(::rust::Box<{}> *ptr) noexcept;", instance, inner, ); writeln!(out, "#endif // CXXBRIDGE1_RUST_BOX_{}", instance); } fn write_rust_vec_extern(out: &mut OutFile, element: &ResolvableName) { let element = Type::Ident(element.clone()); let inner = to_typename(&element, out.types); let instance = to_mangled(&element, out.types); writeln!(out, "#ifndef CXXBRIDGE1_RUST_VEC_{}", instance); writeln!(out, "#define CXXBRIDGE1_RUST_VEC_{}", instance); writeln!( out, "void cxxbridge1$rust_vec${}$new(const ::rust::Vec<{}> *ptr) noexcept;", instance, inner, ); writeln!( out, "void cxxbridge1$rust_vec${}$drop(::rust::Vec<{}> *ptr) noexcept;", instance, inner, ); writeln!( out, "size_t cxxbridge1$rust_vec${}$len(const ::rust::Vec<{}> *ptr) noexcept;", instance, inner, ); writeln!( out, "const {} *cxxbridge1$rust_vec${}$data(const ::rust::Vec<{0}> *ptr) noexcept;", inner, instance, ); writeln!( out, "void cxxbridge1$rust_vec${}$reserve_total(::rust::Vec<{}> *ptr, size_t cap) noexcept;", instance, inner, ); writeln!( out, "void cxxbridge1$rust_vec${}$set_len(::rust::Vec<{}> *ptr, size_t len) noexcept;", instance, inner, ); writeln!( out, "size_t cxxbridge1$rust_vec${}$stride() noexcept;", instance, ); writeln!(out, "#endif // CXXBRIDGE1_RUST_VEC_{}", instance); } fn write_rust_box_impl(out: &mut OutFile, ident: &Pair) { let inner = ident.to_fully_qualified(); let instance = ident.to_symbol(); writeln!(out, "template <>"); writeln!(out, "void Box<{}>::uninit() noexcept {{", inner); writeln!(out, " cxxbridge1$box${}$uninit(this);", instance); writeln!(out, "}}"); writeln!(out, "template <>"); writeln!(out, "void Box<{}>::drop() noexcept {{", inner); writeln!(out, " cxxbridge1$box${}$drop(this);", instance); writeln!(out, "}}"); } fn write_rust_vec_impl(out: &mut OutFile, element: &ResolvableName) { let element = Type::Ident(element.clone()); let inner = to_typename(&element, out.types); let instance = to_mangled(&element, out.types); writeln!(out, "template <>"); writeln!(out, "Vec<{}>::Vec() noexcept {{", inner); writeln!(out, " cxxbridge1$rust_vec${}$new(this);", instance); writeln!(out, "}}"); writeln!(out, "template <>"); writeln!(out, "void Vec<{}>::drop() noexcept {{", inner); writeln!(out, " return cxxbridge1$rust_vec${}$drop(this);", instance); writeln!(out, "}}"); writeln!(out, "template <>"); writeln!(out, "size_t Vec<{}>::size() const noexcept {{", inner); writeln!(out, " return cxxbridge1$rust_vec${}$len(this);", instance); writeln!(out, "}}"); writeln!(out, "template <>"); writeln!(out, "const {} *Vec<{0}>::data() const noexcept {{", inner); writeln!(out, " return cxxbridge1$rust_vec${}$data(this);", instance); writeln!(out, "}}"); writeln!(out, "template <>"); writeln!( out, "void Vec<{}>::reserve_total(size_t cap) noexcept {{", inner, ); writeln!( out, " return cxxbridge1$rust_vec${}$reserve_total(this, cap);", instance, ); writeln!(out, "}}"); writeln!(out, "template <>"); writeln!(out, "void Vec<{}>::set_len(size_t len) noexcept {{", inner); writeln!( out, " return cxxbridge1$rust_vec${}$set_len(this, len);", instance, ); writeln!(out, "}}"); writeln!(out, "template <>"); writeln!(out, "size_t Vec<{}>::stride() noexcept {{", inner); writeln!(out, " return cxxbridge1$rust_vec${}$stride();", instance); writeln!(out, "}}"); } fn write_unique_ptr(out: &mut OutFile, ident: &ResolvableName) { let ty = Type::Ident(ident.clone()); let instance = to_mangled(&ty, out.types); writeln!(out, "#ifndef CXXBRIDGE1_UNIQUE_PTR_{}", instance); writeln!(out, "#define CXXBRIDGE1_UNIQUE_PTR_{}", instance); write_unique_ptr_common(out, &ty); writeln!(out, "#endif // CXXBRIDGE1_UNIQUE_PTR_{}", instance); } // Shared by UniquePtr<T> and UniquePtr<CxxVector<T>>. fn write_unique_ptr_common(out: &mut OutFile, ty: &Type) { out.include.new = true; out.include.utility = true; let inner = to_typename(ty, out.types); let instance = to_mangled(ty, out.types); let can_construct_from_value = match ty { // Some aliases are to opaque types; some are to trivial types. We can't // know at code generation time, so we generate both C++ and Rust side // bindings for a "new" method anyway. But the Rust code can't be called // for Opaque types because the 'new' method is not implemented. Type::Ident(ident) => { out.types.structs.contains_key(&ident.rust) || out.types.aliases.contains_key(&ident.rust) } _ => false, }; writeln!( out, "static_assert(sizeof(::std::unique_ptr<{}>) == sizeof(void *), \"\");", inner, ); writeln!( out, "static_assert(alignof(::std::unique_ptr<{}>) == alignof(void *), \"\");", inner, ); writeln!( out, "void cxxbridge1$unique_ptr${}$null(::std::unique_ptr<{}> *ptr) noexcept {{", instance, inner, ); writeln!(out, " new (ptr) ::std::unique_ptr<{}>();", inner); writeln!(out, "}}"); if can_construct_from_value { writeln!( out, "void cxxbridge1$unique_ptr${}$new(::std::unique_ptr<{}> *ptr, {} *value) noexcept {{", instance, inner, inner, ); writeln!( out, " new (ptr) ::std::unique_ptr<{}>(new {}(::std::move(*value)));", inner, inner, ); writeln!(out, "}}"); } writeln!( out, "void cxxbridge1$unique_ptr${}$raw(::std::unique_ptr<{}> *ptr, {} *raw) noexcept {{", instance, inner, inner, ); writeln!(out, " new (ptr) ::std::unique_ptr<{}>(raw);", inner); writeln!(out, "}}"); writeln!( out, "const {} *cxxbridge1$unique_ptr${}$get(const ::std::unique_ptr<{}>& ptr) noexcept {{", inner, instance, inner, ); writeln!(out, " return ptr.get();"); writeln!(out, "}}"); writeln!( out, "{} *cxxbridge1$unique_ptr${}$release(::std::unique_ptr<{}>& ptr) noexcept {{", inner, instance, inner, ); writeln!(out, " return ptr.release();"); writeln!(out, "}}"); writeln!( out, "void cxxbridge1$unique_ptr${}$drop(::std::unique_ptr<{}> *ptr) noexcept {{", instance, inner, ); writeln!(out, " ptr->~unique_ptr();"); writeln!(out, "}}"); } fn write_cxx_vector(out: &mut OutFile, vector_ty: &Type, element: &ResolvableName) { let element = Type::Ident(element.clone()); let inner = to_typename(&element, out.types); let instance = to_mangled(&element, out.types); writeln!(out, "#ifndef CXXBRIDGE1_VECTOR_{}", instance); writeln!(out, "#define CXXBRIDGE1_VECTOR_{}", instance); writeln!( out, "size_t cxxbridge1$std$vector${}$size(const ::std::vector<{}> &s) noexcept {{", instance, inner, ); writeln!(out, " return s.size();"); writeln!(out, "}}"); writeln!( out, "const {} *cxxbridge1$std$vector${}$get_unchecked(const ::std::vector<{}> &s, size_t pos) noexcept {{", inner, instance, inner, ); writeln!(out, " return &s[pos];"); writeln!(out, "}}"); write_unique_ptr_common(out, vector_ty); writeln!(out, "#endif // CXXBRIDGE1_VECTOR_{}", instance); }
32.959438
124
0.520258
626003adaad86e16357c5e3a79a20da2f2e8808f
1,284
use std::fmt::Debug; use std::rc::Rc; use crate::Color; use crate::focus::Focus; pub use crate::state::value_cell::{ValueCell, ValueRef, ValueRefMut}; pub use self::animated_state::*; pub use self::animation_curve::*; pub use self::env_state::EnvState; pub use self::global_state::GlobalState; pub use self::local_state::LocalState; pub use self::map_owned_state::*; pub use self::map_state::*; pub use self::state::State; pub use self::state_key::StateKey; pub use self::state_sync::StateSync; pub use self::value_state::ValueState; pub use self::widget_state::WidgetState; mod animated_state; mod animation_curve; mod env_state; mod global_state; mod local_state; mod map_owned_state; mod map_state; mod state; mod state_key; mod state_sync; mod value_cell; mod value_state; mod vec_state; mod widget_state; pub(crate) type InnerState<T> = Rc<ValueCell<T>>; pub type ColorState = TState<Color>; pub type StringState = TState<String>; pub type U32State = TState<u32>; pub type I32State = TState<i32>; pub type UsizeState = TState<usize>; pub type BoolState = TState<bool>; pub type F64State = TState<f64>; pub type FocusState = TState<Focus>; pub type TState<T> = WidgetState<T>; pub trait StateContract: Clone + Debug {} impl<T> StateContract for T where T: Clone + Debug {}
25.176471
69
0.753894
87c428aa05aa6b409a647794a8795191796e92ed
3,487
use crate::{ interfaces::{LinearFeeDef, ValueDef}, time::SystemTime, }; use chain_addr::Discrimination; use chain_impl_mockchain::block::Epoch; use chain_impl_mockchain::fee::LinearFee; use chain_impl_mockchain::rewards::{CompoundingType, Limit, Parameters, Ratio, TaxType}; use chain_impl_mockchain::value::Value; use serde::{Deserialize, Serialize}; use std::num::{NonZeroU32, NonZeroU64}; #[derive(Debug, Clone, Eq, Serialize, Deserialize)] #[serde(deny_unknown_fields, rename_all = "camelCase")] pub struct SettingsDto { pub block0_hash: String, pub block0_time: SystemTime, pub curr_slot_start_time: Option<SystemTime>, pub consensus_version: String, #[serde(with = "LinearFeeDef")] pub fees: LinearFee, pub block_content_max_size: u32, pub epoch_stability_depth: u32, pub slot_duration: u64, pub slots_per_epoch: u32, #[serde(with = "TaxTypeDef")] pub treasury_tax: TaxType, #[serde(with = "ParametersDef")] pub reward_params: Parameters, #[serde(with = "DiscriminationDef")] pub discrimination: Discrimination, pub tx_max_expiry_epochs: u8, } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(deny_unknown_fields, remote = "TaxType")] pub struct TaxTypeDef { #[serde(with = "ValueDef")] pub fixed: Value, #[serde(with = "RatioDef")] pub ratio: Ratio, #[serde(default, rename = "max", skip_serializing_if = "Option::is_none")] pub max_limit: Option<NonZeroU64>, } #[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)] #[serde(transparent)] pub struct TaxTypeSerde(#[serde(with = "TaxTypeDef")] pub TaxType); #[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)] #[serde(remote = "Limit")] pub enum LimitDef { None, ByStakeAbsolute(#[serde(with = "RatioDef")] Ratio), } #[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)] #[serde(remote = "Ratio")] pub struct RatioDef { pub numerator: u64, pub denominator: NonZeroU64, } #[derive(Deserialize, Serialize)] #[serde(remote = "Parameters", rename_all = "camelCase")] pub struct ParametersDef { pub initial_value: u64, #[serde(with = "RatioDef")] pub compounding_ratio: Ratio, #[serde(with = "CompoundingTypeDef")] pub compounding_type: CompoundingType, pub epoch_rate: NonZeroU32, pub epoch_start: Epoch, #[serde(with = "LimitDef")] pub reward_drawing_limit_max: Limit, pub pool_participation_capping: Option<(NonZeroU32, NonZeroU32)>, } #[derive(Deserialize, Serialize)] #[serde(remote = "CompoundingType")] pub enum CompoundingTypeDef { Linear, Halvening, } #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase", remote = "Discrimination")] enum DiscriminationDef { Test, Production, } impl PartialEq<SettingsDto> for SettingsDto { fn eq(&self, other: &SettingsDto) -> bool { self.block0_hash == other.block0_hash && self.block0_time == other.block0_time && self.consensus_version == other.consensus_version && self.fees == other.fees && self.block_content_max_size == other.block_content_max_size && self.epoch_stability_depth == other.epoch_stability_depth && self.slot_duration == other.slot_duration && self.slots_per_epoch == other.slots_per_epoch && self.treasury_tax == other.treasury_tax && self.reward_params == other.reward_params } }
31.990826
88
0.692859
6a8828e45d2027751d507c4a68420937ab8a5930
12,478
// Copyright 2015 The Servo Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use bincode; use std::sync::mpsc; use std::sync::{Arc, Mutex}; use std::collections::hash_map::HashMap; use std::cell::{RefCell}; use std::io::{Error, ErrorKind}; use std::slice; use std::fmt::{self, Debug, Formatter}; use std::cmp::{PartialEq}; use std::ops::{Deref, RangeFrom}; use std::mem; use std::usize; use uuid::Uuid; #[derive(Clone)] struct ServerRecord { sender: OsIpcSender, conn_sender: mpsc::Sender<bool>, conn_receiver: Arc<Mutex<mpsc::Receiver<bool>>>, } impl ServerRecord { fn new(sender: OsIpcSender) -> ServerRecord { let (tx, rx) = mpsc::channel::<bool>(); ServerRecord { sender: sender, conn_sender: tx, conn_receiver: Arc::new(Mutex::new(rx)), } } fn accept(&self) { self.conn_receiver.lock().unwrap().recv().unwrap(); } fn connect(&self) { self.conn_sender.send(true).unwrap(); } } lazy_static! { static ref ONE_SHOT_SERVERS: Mutex<HashMap<String,ServerRecord>> = Mutex::new(HashMap::new()); } struct MpscChannelMessage(Vec<u8>, Vec<OsIpcChannel>, Vec<OsIpcSharedMemory>); pub fn channel() -> Result<(OsIpcSender, OsIpcReceiver),MpscError> { let (base_sender, base_receiver) = mpsc::channel::<MpscChannelMessage>(); Ok((OsIpcSender::new(base_sender), OsIpcReceiver::new(base_receiver))) } #[derive(Debug)] pub struct OsIpcReceiver { receiver: RefCell<Option<mpsc::Receiver<MpscChannelMessage>>>, } impl PartialEq for OsIpcReceiver { fn eq(&self, other: &OsIpcReceiver) -> bool { self.receiver.borrow().as_ref().map(|rx| rx as *const _) == other.receiver.borrow().as_ref().map(|rx| rx as *const _) } } impl OsIpcReceiver { fn new(receiver: mpsc::Receiver<MpscChannelMessage>) -> OsIpcReceiver { OsIpcReceiver { receiver: RefCell::new(Some(receiver)), } } pub fn consume(&self) -> OsIpcReceiver { let receiver = self.receiver.borrow_mut().take(); OsIpcReceiver::new(receiver.unwrap()) } pub fn recv(&self) -> Result<(Vec<u8>, Vec<OsOpaqueIpcChannel>, Vec<OsIpcSharedMemory>),MpscError> { let r = self.receiver.borrow(); match r.as_ref().unwrap().recv() { Ok(MpscChannelMessage(d,c,s)) => Ok((d, c.into_iter().map(OsOpaqueIpcChannel::new).collect(), s)), Err(_) => Err(MpscError::ChannelClosedError), } } pub fn try_recv(&self) -> Result<(Vec<u8>, Vec<OsOpaqueIpcChannel>, Vec<OsIpcSharedMemory>),MpscError> { let r = self.receiver.borrow(); match r.as_ref().unwrap().try_recv() { Ok(MpscChannelMessage(d,c,s)) => Ok((d, c.into_iter().map(OsOpaqueIpcChannel::new).collect(), s)), Err(mpsc::TryRecvError::Disconnected) => Err(MpscError::ChannelClosedError), Err(_) => Err(MpscError::UnknownError), } } } #[derive(Clone, Debug)] pub struct OsIpcSender { sender: RefCell<mpsc::Sender<MpscChannelMessage>>, } impl PartialEq for OsIpcSender { fn eq(&self, other: &OsIpcSender) -> bool { &*self.sender.borrow() as *const _ == &*other.sender.borrow() as *const _ } } impl OsIpcSender { fn new(sender: mpsc::Sender<MpscChannelMessage>) -> OsIpcSender { OsIpcSender { sender: RefCell::new(sender), } } pub fn connect(name: String) -> Result<OsIpcSender,MpscError> { let record = ONE_SHOT_SERVERS.lock().unwrap().get(&name).unwrap().clone(); record.connect(); Ok(record.sender) } pub fn get_max_fragment_size() -> usize { usize::MAX } pub fn send(&self, data: &[u8], ports: Vec<OsIpcChannel>, shared_memory_regions: Vec<OsIpcSharedMemory>) -> Result<(),MpscError> { match self.sender.borrow().send(MpscChannelMessage(data.to_vec(), ports, shared_memory_regions)) { Err(_) => Err(MpscError::BrokenPipeError), Ok(_) => Ok(()), } } } pub struct OsIpcReceiverSet { incrementor: RangeFrom<u64>, receiver_ids: Vec<u64>, receivers: Vec<OsIpcReceiver>, } impl OsIpcReceiverSet { pub fn new() -> Result<OsIpcReceiverSet,MpscError> { Ok(OsIpcReceiverSet { incrementor: 0.., receiver_ids: vec![], receivers: vec![], }) } pub fn add(&mut self, receiver: OsIpcReceiver) -> Result<u64,MpscError> { let last_index = self.incrementor.next().unwrap(); self.receiver_ids.push(last_index); self.receivers.push(receiver.consume()); Ok(last_index) } pub fn select(&mut self) -> Result<Vec<OsIpcSelectionResult>,MpscError> { let mut receivers: Vec<Option<mpsc::Receiver<MpscChannelMessage>>> = Vec::with_capacity(self.receivers.len()); let mut r_id: Option<u64> = None; let mut r_index: usize = 0; { let select = mpsc::Select::new(); // we *must* allocate exact capacity for this, because the Handles *can't move* let mut handles: Vec<mpsc::Handle<MpscChannelMessage>> = Vec::with_capacity(self.receivers.len()); for r in &self.receivers { let inner_r = mem::replace(&mut *r.receiver.borrow_mut(), None); receivers.push(inner_r); } for r in &receivers { unsafe { handles.push(select.handle(r.as_ref().unwrap())); handles.last_mut().unwrap().add(); } } let id = select.wait(); for (index,h) in handles.iter().enumerate() { if h.id() == id { r_index = index; r_id = Some(self.receiver_ids[index]); break; } } } // put the receivers back for (index,r) in self.receivers.iter().enumerate() { mem::replace(&mut *r.receiver.borrow_mut(), mem::replace(&mut receivers[index], None)); } match r_id { None => Err(MpscError::UnknownError), Some(r_id) => { let receivers = &mut self.receivers; match receivers[r_index].recv() { Ok((data, channels, shmems)) => Ok(vec![OsIpcSelectionResult::DataReceived(r_id, data, channels, shmems)]), Err(MpscError::ChannelClosedError) => { receivers.remove(r_index); self.receiver_ids.remove(r_index); Ok(vec![OsIpcSelectionResult::ChannelClosed(r_id)]) }, Err(err) => Err(err), } } } } } pub enum OsIpcSelectionResult { DataReceived(u64, Vec<u8>, Vec<OsOpaqueIpcChannel>, Vec<OsIpcSharedMemory>), ChannelClosed(u64), } impl OsIpcSelectionResult { pub fn unwrap(self) -> (u64, Vec<u8>, Vec<OsOpaqueIpcChannel>, Vec<OsIpcSharedMemory>) { match self { OsIpcSelectionResult::DataReceived(id, data, channels, shared_memory_regions) => { (id, data, channels, shared_memory_regions) } OsIpcSelectionResult::ChannelClosed(id) => { panic!("OsIpcSelectionResult::unwrap(): receiver ID {} was closed!", id) } } } } pub struct OsIpcOneShotServer { receiver: OsIpcReceiver, name: String, } impl OsIpcOneShotServer { pub fn new() -> Result<(OsIpcOneShotServer, String),MpscError> { let (sender, receiver) = try!(channel()); let name = Uuid::new_v4().to_string(); let record = ServerRecord::new(sender); ONE_SHOT_SERVERS.lock().unwrap().insert(name.clone(), record); Ok((OsIpcOneShotServer { receiver: receiver, name: name.clone(), },name.clone())) } pub fn accept(self) -> Result<(OsIpcReceiver, Vec<u8>, Vec<OsOpaqueIpcChannel>, Vec<OsIpcSharedMemory>),MpscError> { let record = ONE_SHOT_SERVERS.lock().unwrap().get(&self.name).unwrap().clone(); record.accept(); ONE_SHOT_SERVERS.lock().unwrap().remove(&self.name).unwrap(); let (data, channels, shmems) = try!(self.receiver.recv()); Ok((self.receiver, data, channels, shmems)) } } #[derive(PartialEq, Debug)] pub enum OsIpcChannel { Sender(OsIpcSender), Receiver(OsIpcReceiver), } #[derive(PartialEq, Debug)] pub struct OsOpaqueIpcChannel { channel: RefCell<Option<OsIpcChannel>>, } impl OsOpaqueIpcChannel { fn new(channel: OsIpcChannel) -> OsOpaqueIpcChannel { OsOpaqueIpcChannel { channel: RefCell::new(Some(channel)) } } pub fn to_receiver(&self) -> OsIpcReceiver { match self.channel.borrow_mut().take().unwrap() { OsIpcChannel::Sender(_) => panic!("Opaque channel is not a receiver!"), OsIpcChannel::Receiver(r) => r } } pub fn to_sender(&mut self) -> OsIpcSender { match self.channel.borrow_mut().take().unwrap() { OsIpcChannel::Sender(s) => s, OsIpcChannel::Receiver(_) => panic!("Opaque channel is not a sender!"), } } } pub struct OsIpcSharedMemory { ptr: *mut u8, length: usize, data: Arc<Vec<u8>>, } unsafe impl Send for OsIpcSharedMemory {} unsafe impl Sync for OsIpcSharedMemory {} impl Clone for OsIpcSharedMemory { fn clone(&self) -> OsIpcSharedMemory { OsIpcSharedMemory { ptr: self.ptr, length: self.length, data: self.data.clone(), } } } impl PartialEq for OsIpcSharedMemory { fn eq(&self, other: &OsIpcSharedMemory) -> bool { **self == **other } } impl Debug for OsIpcSharedMemory { fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> { (**self).fmt(formatter) } } impl Deref for OsIpcSharedMemory { type Target = [u8]; #[inline] fn deref(&self) -> &[u8] { if self.ptr.is_null() { panic!("attempted to access a consumed `OsIpcSharedMemory`") } unsafe { slice::from_raw_parts(self.ptr, self.length) } } } impl OsIpcSharedMemory { pub fn from_byte(byte: u8, length: usize) -> OsIpcSharedMemory { let mut v = Arc::new(vec![byte; length]); OsIpcSharedMemory { ptr: Arc::get_mut(&mut v).unwrap().as_mut_ptr(), length: length, data: v } } pub fn from_bytes(bytes: &[u8]) -> OsIpcSharedMemory { let mut v = Arc::new(bytes.to_vec()); OsIpcSharedMemory { ptr: Arc::get_mut(&mut v).unwrap().as_mut_ptr(), length: v.len(), data: v } } } #[derive(Debug, PartialEq)] pub enum MpscError { ChannelClosedError, BrokenPipeError, UnknownError, } impl MpscError { #[allow(dead_code)] pub fn channel_is_closed(&self) -> bool { *self == MpscError::ChannelClosedError } } impl From<MpscError> for bincode::Error { fn from(mpsc_error: MpscError) -> Self { Error::from(mpsc_error).into() } } impl From<MpscError> for Error { fn from(mpsc_error: MpscError) -> Error { match mpsc_error { MpscError::ChannelClosedError => { Error::new(ErrorKind::ConnectionReset, "MPSC channel sender closed") } MpscError::BrokenPipeError => { Error::new(ErrorKind::BrokenPipe, "MPSC channel receiver closed") } MpscError::UnknownError => Error::new(ErrorKind::Other, "Other MPSC channel error"), } } }
30.508557
118
0.571406
d6e169279054920d89573c8bd5edb2d6718d87e2
5,285
use crate::gimli_common::gimli; use std::io; pub struct GimliAeadEncryptIter{ state: [u32; 12], message_len: usize, message: Box<dyn Iterator<Item = Result<u8, io::Error>>>, output_buffer: Vec<u8>, complete: bool, last_blocksize: usize, } impl GimliAeadEncryptIter{ pub fn new(key: [u8; 32], nonce: [u8; 16], message_len: usize, message: Box<dyn Iterator<Item = Result<u8, io::Error>>>, mut associated_data: &[u8]) -> Self{ let mut state: [u32; 12] = [0; 12]; let state_8 = unsafe {std::slice::from_raw_parts_mut(state.as_mut_ptr() as *mut u8, 48)}; state_8[..16].clone_from_slice(&nonce); state_8[16..48].clone_from_slice(&key); gimli(&mut state); while associated_data.len() >= 16 { let state_8 = unsafe {std::slice::from_raw_parts_mut(state.as_mut_ptr() as *mut u8, 48)}; for i in 0..16 { state_8[i] ^= associated_data[i] } gimli(&mut state); associated_data = &associated_data[16 as usize..]; } let state_8 = unsafe {std::slice::from_raw_parts_mut(state.as_mut_ptr() as *mut u8, 48)}; for i in 0..associated_data.len() { state_8[i] ^= associated_data[i] } state_8[associated_data.len() as usize] ^= 1; state_8[47] ^= 1; gimli(&mut state); GimliAeadEncryptIter{ state: state, message_len: message_len, message: message, output_buffer: Vec::new(), complete: false, last_blocksize: 0 } } } impl Iterator for GimliAeadEncryptIter{ type Item = u8; fn next(&mut self) -> Option<Self::Item> { if self.output_buffer.len() > 0{ return Some(self.output_buffer.remove(0)) } let state_8 = unsafe {std::slice::from_raw_parts_mut(self.state.as_mut_ptr() as *mut u8, 48)}; if self.message_len >= 16 { for i in 0..16 { state_8[i] ^= self.message.next().unwrap().expect("Read error on input"); self.output_buffer.push(state_8[i]); self.message_len -=1; } gimli(&mut self.state); return Some(self.output_buffer.remove(0)) } if self.message_len < 16 && self.message_len > 0 { self.last_blocksize = self.message_len; for i in 0..self.message_len { let foo = self.message.next().unwrap().expect("Read error on input"); state_8[i] ^= foo; self.output_buffer.push(state_8[i]); self.message_len -=1; } return Some(self.output_buffer.remove(0)) } if self.message_len == 0 && self.complete == false{ state_8[self.last_blocksize as usize] ^= 1; state_8[47] ^= 1; gimli(&mut self.state); let state_8 = unsafe {std::slice::from_raw_parts_mut(self.state.as_mut_ptr() as *mut u8, 48)}; for i in 0..16 { self.output_buffer.push(state_8[i]); } self.complete = true; return Some(self.output_buffer.remove(0)) } return None } } pub fn gimli_aead_encrypt( mut message: impl Iterator<Item = Result<u8, io::Error>>, mut message_len: usize, mut associated_data: &[u8], nonce: &[u8; 16], key: &[u8; 32], ) -> Vec<u8> { let mut output: Vec<u8> = Vec::new(); let mut state: [u32; 12] = [0; 12]; let state_8 = unsafe {std::slice::from_raw_parts_mut(state.as_mut_ptr() as *mut u8, 48)}; // Init state with key and nonce plus first permute state_8[..16].clone_from_slice(nonce); state_8[16..48].clone_from_slice(key); gimli(&mut state); while associated_data.len() >= 16 { let state_8 = unsafe {std::slice::from_raw_parts_mut(state.as_mut_ptr() as *mut u8, 48)}; for i in 0..16 { state_8[i] ^= associated_data[i] } gimli(&mut state); associated_data = &associated_data[16 as usize..]; } let state_8 = unsafe {std::slice::from_raw_parts_mut(state.as_mut_ptr() as *mut u8, 48)}; for i in 0..associated_data.len() { state_8[i] ^= associated_data[i] } state_8[associated_data.len() as usize] ^= 1; state_8[47] ^= 1; gimli(&mut state); while message_len >= 16 { let state_8 = unsafe {std::slice::from_raw_parts_mut(state.as_mut_ptr() as *mut u8, 48)}; for i in 0..16 { state_8[i] ^= message.next().unwrap().expect("Read error on input"); output.push(state_8[i]); message_len -=1; } gimli(&mut state); } let state_8 = unsafe {std::slice::from_raw_parts_mut(state.as_mut_ptr() as *mut u8, 48)}; for i in 0..message_len { state_8[i] ^= message.next().unwrap().expect("Read error on input"); output.push(state_8[i]); } state_8[message_len as usize] ^= 1; state_8[47] ^= 1; gimli(&mut state); let state_8 = unsafe {std::slice::from_raw_parts_mut(state.as_mut_ptr() as *mut u8, 48)}; for i in 0..16 { output.push(state_8[i]); } return output; }
34.318182
106
0.5614
146e0c712a33de3d3843f670365e0bc91212ada6
12,304
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! The `Loan` module deals with borrows of *uniquely mutable* data. We say that data is uniquely mutable if the current activation (stack frame) controls the only mutable reference to the data. The most common way that this can occur is if the current activation owns the data being borrowed, but it can also occur with `&mut` pointers. The primary characteristic of uniquely mutable data is that, at any given time, there is at most one path that can be used to mutate it, and that path is only accessible from the top stack frame. Given that some data found at a path P is being borrowed to a borrowed pointer with mutability M and lifetime L, the job of the code in this module is to compute the set of *loans* that are necessary to ensure that (1) the data found at P outlives L and that (2) if M is mutable then the path P will not be modified directly or indirectly except through that pointer. A *loan* is the combination of a path P_L, a mutability M_L, and a lifetime L_L where: - The path P_L indicates what data has been lent. - The mutability M_L indicates the access rights on the data: - const: the data cannot be moved - immutable/mutable: the data cannot be moved or mutated - The lifetime L_L indicates the *scope* of the loan. FIXME #4730 --- much more needed, don't have time to write this all up now */ // ---------------------------------------------------------------------- // Loan(Ex, M, S) = Ls holds if ToAddr(Ex) will remain valid for the entirety // of the scope S, presuming that the returned set of loans `Ls` are honored. use core::prelude::*; use middle::borrowck::{Loan, bckerr, bckres, BorrowckCtxt, err_mutbl}; use middle::borrowck::{LoanKind, TotalFreeze, PartialFreeze, TotalTake, PartialTake, Immobile}; use middle::borrowck::{err_out_of_scope}; use middle::mem_categorization::{cat_arg, cat_binding, cat_discr, cat_comp}; use middle::mem_categorization::{cat_deref, cat_discr, cat_local, cat_self}; use middle::mem_categorization::{cat_special, cat_stack_upvar, cmt}; use middle::mem_categorization::{comp_field, comp_index, comp_variant}; use middle::mem_categorization::{gc_ptr, region_ptr}; use middle::ty; use util::common::indenter; use core::result::{Err, Ok}; use syntax::ast::m_imm; use syntax::ast; pub fn loan(bccx: @BorrowckCtxt, cmt: cmt, scope_region: ty::Region, loan_kind: LoanKind) -> bckres<~[Loan]> { let mut lc = LoanContext { bccx: bccx, scope_region: scope_region, loans: ~[] }; match lc.loan(cmt, loan_kind, true) { Err(ref e) => return Err((*e)), Ok(()) => {} } // FIXME #4945: Workaround for borrow check bug. Ok(copy lc.loans) } struct LoanContext { bccx: @BorrowckCtxt, // the region scope for which we must preserve the memory scope_region: ty::Region, // accumulated list of loans that will be required loans: ~[Loan] } pub impl LoanContext { fn tcx(&self) -> ty::ctxt { self.bccx.tcx } fn loan(&mut self, cmt: cmt, loan_kind: LoanKind, owns_lent_data: bool) -> bckres<()> { /*! * * The main routine. * * # Parameters * * - `cmt`: the categorization of the data being borrowed * - `req_mutbl`: the mutability of the borrowed pointer * that was created * - `owns_lent_data`: indicates whether `cmt` owns the * data that is being lent. See * discussion in `issue_loan()`. */ debug!("loan(%s, %?)", self.bccx.cmt_to_repr(cmt), loan_kind); let _i = indenter(); // see stable() above; should only be called when `cmt` is lendable if cmt.lp.is_none() { self.bccx.tcx.sess.span_bug( cmt.span, ~"loan() called with non-lendable value"); } match cmt.cat { cat_binding(_) | cat_rvalue | cat_special(_) => { // should never be loanable self.bccx.tcx.sess.span_bug( cmt.span, ~"rvalue with a non-none lp"); } cat_local(local_id) | cat_arg(local_id) | cat_self(local_id) => { // FIXME(#4903) let local_scope_id = *self.bccx.tcx.region_map.get(&local_id); self.issue_loan(cmt, ty::re_scope(local_scope_id), loan_kind, owns_lent_data) } cat_stack_upvar(cmt) => { self.loan(cmt, loan_kind, owns_lent_data) } cat_discr(base, _) => { self.loan(base, loan_kind, owns_lent_data) } cat_comp(cmt_base, comp_field(_, m)) | cat_comp(cmt_base, comp_index(_, m)) => { // For most components, the type of the embedded data is // stable. Therefore, the base structure need only be // const---unless the component must be immutable. In // that case, it must also be embedded in an immutable // location, or else the whole structure could be // overwritten and the component along with it. self.loan_stable_comp(cmt, cmt_base, loan_kind, m, owns_lent_data) } cat_comp(cmt_base, comp_tuple) | cat_comp(cmt_base, comp_anon_field) => { // As above. self.loan_stable_comp(cmt, cmt_base, loan_kind, m_imm, owns_lent_data) } cat_comp(cmt_base, comp_variant(enum_did)) => { // For enums, the memory is unstable if there are multiple // variants, because if the enum value is overwritten then // the memory changes type. if ty::enum_is_univariant(self.bccx.tcx, enum_did) { self.loan_stable_comp(cmt, cmt_base, loan_kind, m_imm, owns_lent_data) } else { self.loan_unstable_deref(cmt, cmt_base, loan_kind, owns_lent_data) } } cat_deref(cmt_base, _, uniq_ptr) => { // For unique pointers, the memory being pointed out is // unstable because if the unique pointer is overwritten // then the memory is freed. self.loan_unstable_deref(cmt, cmt_base, loan_kind, owns_lent_data) } cat_deref(cmt_base, _, region_ptr(ast::m_mutbl, region)) => { // Mutable data can be loaned out as immutable or const. We must // loan out the base as well as the main memory. For example, // if someone borrows `*b`, we want to borrow `b` as immutable // as well. do self.loan(cmt_base, TotalFreeze, false).chain |_| { self.issue_loan(cmt, region, loan_kind, owns_lent_data) } } cat_deref(_, _, unsafe_ptr) | cat_deref(_, _, gc_ptr(_)) | cat_deref(_, _, region_ptr(_, _)) => { // Aliased data is simply not lendable. self.bccx.tcx.sess.span_bug( cmt.span, ~"aliased ptr with a non-none lp"); } } } // A "stable component" is one where assigning the base of the // component cannot cause the component itself to change types. // Example: record fields. fn loan_stable_comp(&mut self, cmt: cmt, cmt_base: cmt, loan_kind: LoanKind, comp_mutbl: ast::mutability, owns_lent_data: bool) -> bckres<()> { let base_kind = match (comp_mutbl, loan_kind) { // Declared as "immutable" means: inherited mutability and // hence mutable iff parent is mutable. So propagate // mutability on up. (m_imm, TotalFreeze) | (m_imm, PartialFreeze) => PartialFreeze, (m_imm, TotalTake) | (m_imm, PartialTake) => PartialTake, // Declared as "mutable" means: always mutable no matter // what the mutability of the base is. So that means we // can weaken the condition on the base to PartialFreeze. // This implies that the user could freeze the base, but // that is ok since the even with an &T base, the mut // field will still be considered mutable. (_, TotalTake) | (_, PartialTake) | (_, TotalFreeze) | (_, PartialFreeze) => { PartialFreeze } // If we just need to guarantee the value won't be moved, // it doesn't matter what mutability the component was // declared with. (_, Immobile) => Immobile, }; do self.loan(cmt_base, base_kind, owns_lent_data).chain |_ok| { // can use static for the scope because the base // determines the lifetime, ultimately self.issue_loan(cmt, ty::re_static, loan_kind, owns_lent_data) } } // An "unstable deref" means a deref of a ptr/comp where, if the // base of the deref is assigned to, pointers into the result of the // deref would be invalidated. Examples: interior of variants, uniques. fn loan_unstable_deref(&mut self, cmt: cmt, cmt_base: cmt, loan_kind: LoanKind, owns_lent_data: bool) -> bckres<()> { // Variant components: the base must be immutable, because // if it is overwritten, the types of the embedded data // could change. do self.loan(cmt_base, PartialFreeze, owns_lent_data).chain |_| { // can use static, as in loan_stable_comp() self.issue_loan(cmt, ty::re_static, loan_kind, owns_lent_data) } } fn issue_loan(&mut self, +cmt: cmt, +scope_ub: ty::Region, +loan_kind: LoanKind, +owns_lent_data: bool) -> bckres<()> { // Subtle: the `scope_ub` is the maximal lifetime of `cmt`. // Therefore, if `cmt` owns the data being lent, then the // scope of the loan must be less than `scope_ub`, or else the // data would be freed while the loan is active. // // However, if `cmt` does *not* own the data being lent, then // it is ok if `cmt` goes out of scope during the loan. This // can occur when you have an `&mut` parameter that is being // reborrowed. if !owns_lent_data || self.bccx.is_subregion_of(self.scope_region, scope_ub) { if loan_kind.is_take() && !cmt.mutbl.is_mutable() { // We do not allow non-mutable data to be "taken" // under any circumstances. return Err(bckerr { cmt:cmt, code:err_mutbl(loan_kind) }); } self.loans.push(Loan { // Note: cmt.lp must be Some(_) because otherwise this // loan process does not apply at all. lp: cmt.lp.get(), cmt: cmt, kind: loan_kind }); return Ok(()); } else { // The loan being requested lives longer than the data // being loaned out! return Err(bckerr { cmt:cmt, code:err_out_of_scope(scope_ub, self.scope_region) }); } } }
40.20915
77
0.568839
1101861c09edd4cd95d7a8e6738c1cfefd2c3bc7
621
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // run-pass #[repr(C)] pub enum CPOption<T> { PSome(T), } fn main() { println!("sizeof CPOption<i32> {}", std::mem::size_of::<CPOption<i32>>()); }
31.05
76
0.702093
e9fd7e353ba61cf7116af0a85a9eebbe56aae7de
409
/// Global ID generator. /// It is needed to uniquely identify a variable across scopes. /// In the canonical jlox implementation it's not needed because all Java objects /// have a globally unique ID provided by JVM. use std::sync::atomic::{AtomicUsize, Ordering}; static ID_GENERATOR: AtomicUsize = AtomicUsize::new(0); pub(crate) fn next_id() -> usize { ID_GENERATOR.fetch_add(1, Ordering::SeqCst) }
34.083333
81
0.738386
d7391b3c85ad7d75ad74c44a1c16e1ffb3d1ebfb
1,215
#![feature(test)] use edit_tree::{Apply, EditTree, ToLowerCharVec}; extern crate test; use test::{black_box, Bencher}; static FORM: &'static [&'static str] = &[ "ist", "Gelegenheiten", "aufgegangen", "letzte", "storniert", "gratulierte", "gelenkt", "abgesessen", ]; static LEMMA: &'static [&'static str] = &[ "sein", "Gelegenheit", "gehen", "letzt", "stornieren", "gratulieren", "lenken", "sitzen", ]; #[bench] pub fn bench_get_graph(b: &mut Bencher) { for (form, lemma) in FORM .iter() .zip(LEMMA) .map(|(form, lemma)| (form.to_lower_char_vec(), lemma.to_lower_char_vec())) { b.iter(|| { black_box(edit_tree::get_graph(&form, &lemma)); }); } } #[bench] pub fn bench_apply_graph(b: &mut Bencher) { let trees = FORM .iter() .zip(LEMMA) .map(|(form, lemma)| { edit_tree::get_graph(&form.to_lower_char_vec(), &lemma.to_lower_char_vec()) }) .collect::<Vec<EditTree<char>>>(); for (form, tree) in FORM.iter().zip(&trees) { b.iter(|| { black_box(tree.apply(&form.to_lower_char_vec())); }); } }
20.59322
87
0.542387
333fc5dc6e9629bae4b2ca85611bd37fc8944ab7
300
extern crate reqwest; pub struct Endpoint { base_url: String } impl Endpoint { pub fn new(base_url: String) -> Endpoint { Endpoint { base_url } } pub fn all(&self) -> Result<String, reqwest::Error> { reqwest::get(&self.base_url)?.text() } }
16.666667
57
0.563333
11e23403e43d3d91b91f9af02f74257f2aeb5ab5
8,039
#![crate_name = "wc"] #![feature(path_ext)] /* * This file is part of the uutils coreutils package. * * (c) Boden Garman <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ extern crate getopts; extern crate libc; use getopts::{Matches, Options}; use std::ascii::AsciiExt; use std::fs::{File, PathExt}; use std::io::{stdin, BufRead, BufReader, Read, Write}; use std::path::Path; use std::result::Result as StdResult; use std::str::from_utf8; #[path = "../common/util.rs"] #[macro_use] mod util; struct Settings { show_bytes: bool, show_chars: bool, show_lines: bool, show_words: bool, show_max_line_length: bool, } impl Settings { fn new(matches: &Matches) -> Settings { let settings = Settings { show_bytes: matches.opt_present("bytes"), show_chars: matches.opt_present("chars"), show_lines: matches.opt_present("lines"), show_words: matches.opt_present("words"), show_max_line_length: matches.opt_present("L"), }; if settings.show_bytes || settings.show_chars || settings.show_lines || settings.show_words || settings.show_max_line_length { return settings; } Settings { show_bytes: true, show_chars: false, show_lines: true, show_words: true, show_max_line_length: false, } } } struct Result { title: String, bytes: usize, chars: usize, lines: usize, words: usize, max_line_length: usize, } static NAME: &'static str = "wc"; static VERSION: &'static str = "1.0.0"; pub fn uumain(args: Vec<String>) -> i32 { let mut opts = Options::new(); opts.optflag("c", "bytes", "print the byte counts"); opts.optflag("m", "chars", "print the character counts"); opts.optflag("l", "lines", "print the newline counts"); opts.optflag("L", "max-line-length", "print the length of the longest line"); opts.optflag("w", "words", "print the word counts"); opts.optflag("h", "help", "display this help and exit"); opts.optflag("V", "version", "output version information and exit"); let mut matches = match opts.parse(&args[1..]) { Ok(m) => m, Err(f) => crash!(1, "Invalid options\n{}", f) }; if matches.opt_present("help") { println!("{} {}", NAME, VERSION); println!(""); println!("Usage:"); println!(" {0} [OPTION]... [FILE]...", NAME); println!(""); println!("{}", opts.usage("Print newline, word and byte counts for each FILE")); println!("With no FILE, or when FILE is -, read standard input."); return 0; } if matches.opt_present("version") { println!("{} {}", NAME, VERSION); return 0; } if matches.free.is_empty() { matches.free.push("-".to_string()); } let settings = Settings::new(&matches); match wc(matches.free, &settings) { Ok(()) => ( /* pass */ ), Err(e) => return e } 0 } const CR: u8 = '\r' as u8; const LF: u8 = '\n' as u8; const SPACE: u8 = ' ' as u8; const TAB: u8 = '\t' as u8; const SYN: u8 = 0x16 as u8; const FF: u8 = 0x0C as u8; #[inline(always)] fn is_word_seperator(byte: u8) -> bool { byte == SPACE || byte == TAB || byte == CR || byte == SYN || byte == FF } fn wc(files: Vec<String>, settings: &Settings) -> StdResult<(), i32> { let mut total_line_count: usize = 0; let mut total_word_count: usize = 0; let mut total_char_count: usize = 0; let mut total_byte_count: usize = 0; let mut total_longest_line_length: usize = 0; let mut results = vec!(); let mut max_width: usize = 0; for path in files.iter() { let mut reader = try!(open(&path[..])); let mut line_count: usize = 0; let mut word_count: usize = 0; let mut byte_count: usize = 0; let mut char_count: usize = 0; let mut longest_line_length: usize = 0; let mut raw_line = Vec::new(); // reading from a TTY seems to raise a condition on, rather than return Some(0) like a file. // hence the option wrapped in a result here while match reader.read_until(LF, &mut raw_line) { Ok(n) if n > 0 => true, Err(ref e) if raw_line.len() > 0 => { show_warning!("Error while reading {}: {}", path, e); raw_line.len() > 0 }, _ => false, } { // GNU 'wc' only counts lines that end in LF as lines if *raw_line.last().unwrap() == LF { line_count += 1; } byte_count += raw_line.len(); // try and convert the bytes to UTF-8 first let current_char_count; match from_utf8(&raw_line[..]) { Ok(line) => { word_count += line.split_whitespace().count(); current_char_count = line.chars().count(); }, Err(..) => { word_count += raw_line.split(|&x| is_word_seperator(x)).count(); current_char_count = raw_line.iter().filter(|c|c.is_ascii()).count() } } char_count += current_char_count; if current_char_count > longest_line_length { // we subtract one here because `line.len()` includes the LF // matches GNU 'wc' behaviour longest_line_length = current_char_count - 1; } raw_line.truncate(0); } results.push(Result { title: path.to_string(), bytes: byte_count, chars: char_count, lines: line_count, words: word_count, max_line_length: longest_line_length, }); total_line_count += line_count; total_word_count += word_count; total_char_count += char_count; total_byte_count += byte_count; if longest_line_length > total_longest_line_length { total_longest_line_length = longest_line_length; } // used for formatting max_width = total_byte_count.to_string().len() + 1; } for result in results.iter() { print_stats(settings, &result, max_width); } if files.len() > 1 { let result = Result { title: "total".to_string(), bytes: total_byte_count, chars: total_char_count, lines: total_line_count, words: total_word_count, max_line_length: total_longest_line_length, }; print_stats(settings, &result, max_width); } Ok(()) } fn print_stats(settings: &Settings, result: &Result, max_width: usize) { if settings.show_lines { print!("{:1$}", result.lines, max_width); } if settings.show_words { print!("{:1$}", result.words, max_width); } if settings.show_bytes { print!("{:1$}", result.bytes, max_width); } if settings.show_chars { print!("{:1$}", result.chars, max_width); } if settings.show_max_line_length { print!("{:1$}", result.max_line_length, max_width); } if result.title != "-" { println!(" {}", result.title); } else { println!(""); } } fn open(path: &str) -> StdResult<BufReader<Box<Read+'static>>, i32> { if "-" == path { let reader = Box::new(stdin()) as Box<Read>; return Ok(BufReader::new(reader)); } let fpath = Path::new(path); if fpath.is_dir() { show_info!("{}: is a directory", path); } match File::open(&fpath) { Ok(fd) => { let reader = Box::new(fd) as Box<Read>; Ok(BufReader::new(reader)) } Err(e) => { show_error!("wc: {}: {}", path, e); Err(1) } } }
28.608541
100
0.550939
e90835dd89d709984d207b6c97237157afab2dd8
1,254
struct Solution; #[allow(clippy::wrong_self_convention)] impl Solution { fn to_goat_latin(s: String) -> String { let words: Vec<&str> = s.split_whitespace().map(|s| s.chars().as_str()).collect(); let mut res: String = "".to_string(); let mut n = 1; for word in words { if n > 1 { res += " "; } match &word[0..1] { "a" | "e" | "i" | "o" | "u" | "A" | "E" | "I" | "O" | "U" => { res += word; } _ => { res += &word[1..]; res += &word[0..1]; } } res += "ma"; for _ in 0..n { res += "a"; } n += 1; } res } } #[test] fn test() { let s = "I speak Goat Latin".to_string(); let res = "Imaa peaksmaaa oatGmaaaa atinLmaaaaa".to_string(); assert_eq!(Solution::to_goat_latin(s), res); let s = "The quick brown fox jumped over the lazy dog".to_string(); let res = "heTmaa uickqmaaa rownbmaaaa oxfmaaaaa umpedjmaaaaaa overmaaaaaaa hetmaaaaaaaa azylmaaaaaaaaa ogdmaaaaaaaaaa".to_string(); assert_eq!(Solution::to_goat_latin(s), res); }
30.585366
136
0.460925
16de45e4d64804bd15d83ead5f0716b92ce82b1b
6,528
use swc_common::{Spanned, DUMMY_SP}; use swc_ecma_ast::*; use swc_ecma_transforms_base::perf::Check; use swc_ecma_transforms_macros::fast_path; use swc_ecma_utils::UsageFinder; use swc_ecma_visit::noop_visit_type; use swc_ecma_visit::Node; use swc_ecma_visit::Visit; use swc_ecma_visit::VisitWith; use swc_ecma_visit::{noop_fold_type, Fold, FoldWith}; pub fn block_scoped_functions() -> impl Fold { BlockScopedFns } #[derive(Clone, Copy)] struct BlockScopedFns; #[fast_path(BlockScopedFnFinder)] impl Fold for BlockScopedFns { noop_fold_type!(); fn fold_stmts(&mut self, items: Vec<Stmt>) -> Vec<Stmt> { let mut stmts = Vec::with_capacity(items.len()); let mut extra_stmts = Vec::with_capacity(items.len()); for stmt in items { if let Stmt::Expr(ExprStmt { ref expr, .. }) = stmt { if let Expr::Lit(Lit::Str(..)) = &**expr { stmts.push(stmt); continue; } } // This is to preserve function Class() if stmt.span().is_dummy() { extra_stmts.push(stmt) } else { match stmt { Stmt::Decl(Decl::Fn(decl)) => { if UsageFinder::find(&decl.ident, &decl.function) { extra_stmts.push(Stmt::Decl(Decl::Fn(decl))); continue; } stmts.push(Stmt::Decl(Decl::Var(VarDecl { span: DUMMY_SP, kind: VarDeclKind::Let, decls: vec![VarDeclarator { span: DUMMY_SP, name: Pat::Ident(decl.ident.clone().into()), init: Some(Box::new(Expr::Fn(FnExpr { ident: Some(decl.ident), function: decl.function, }))), definite: false, }], declare: false, }))) } _ => extra_stmts.push(stmt.fold_children_with(self)), } } } stmts.append(&mut extra_stmts); stmts } } #[derive(Default)] struct BlockScopedFnFinder { found: bool, } impl Visit for BlockScopedFnFinder { noop_visit_type!(); fn visit_stmts(&mut self, stmts: &[Stmt], _: &dyn Node) { for n in stmts { n.visit_with(&Invalid { span: DUMMY_SP }, self); } self.found |= stmts.iter().any(|stmt| match stmt { Stmt::Decl(Decl::Fn(..)) => true, _ => false, }); } } impl Check for BlockScopedFnFinder { fn should_handle(&self) -> bool { self.found } } #[cfg(test)] mod tests { use super::*; use swc_ecma_transforms_testing::test; test!( ::swc_ecma_parser::Syntax::default(), |_| BlockScopedFns, hoisting, r#" { function fn1() { fn2(); } fn1(); function fn2() { } } "#, r#" { let fn1 = function fn1() { fn2(); }; let fn2 = function fn2() { }; fn1(); } "# ); test!( ::swc_ecma_parser::Syntax::default(), |_| BlockScopedFns, basic, r#"{ function name (n) { return n; } } name("Steve");"#, r#"{ let name = function name(n) { return n; }; } name("Steve"); "# ); test!( ::swc_ecma_parser::Syntax::default(), |_| BlockScopedFns, issue_271, " function foo(scope) { scope.startOperation = startOperation; function startOperation(operation) { scope.agentOperation = operation; } } ", " function foo(scope) { let startOperation = function startOperation(operation) { scope.agentOperation = operation; }; scope.startOperation = startOperation; } " ); test!( ::swc_ecma_parser::Syntax::default(), |_| BlockScopedFns, issue_288_1, "function components_Link_extends() { components_Link_extends = Object.assign || function \ (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for \ (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { \ target[key] = source[key]; } } } return target; }; return \ components_Link_extends.apply(this, arguments); } ", "function components_Link_extends() { components_Link_extends = Object.assign || function \ (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for \ (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { \ target[key] = source[key]; } } } return target; }; return \ components_Link_extends.apply(this, arguments); } " ); test!( ::swc_ecma_parser::Syntax::default(), |_| BlockScopedFns, issue_288_2, "function _extends() { module.exports = _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } ", "function _extends() { module.exports = _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } " ); test!( ::swc_ecma_parser::Syntax::default(), |_| BlockScopedFns, hoisting_directives, "function foo() { 'use strict'; function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } }", "function foo() { 'use strict'; let _interopRequireDefault = function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }; } " ); }
25.802372
99
0.514246
b971edf17944859f2e0cfd3e3d8f0d2b7a3fd822
24,901
// Copyright 2015-2016 Brian Smith. // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY // SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION // OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN // CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. //! Authenticated Encryption with Associated Data (AEAD). //! //! See [Authenticated encryption: relations among notions and analysis of the //! generic composition paradigm][AEAD] for an introduction to the concept of //! AEADs. //! //! [AEAD]: http://www-cse.ucsd.edu/~mihir/papers/oem.html //! [`crypto.cipher.AEAD`]: https://golang.org/pkg/crypto/cipher/#AEAD use self::block::{Block, BLOCK_LEN}; use crate::{constant_time, cpu, error, hkdf, polyfill}; use core::ops::RangeFrom; pub use self::{ aes_gcm::{AES_128_GCM, AES_256_GCM}, chacha20_poly1305::CHACHA20_POLY1305, nonce::{Nonce, NONCE_LEN}, }; /// A sequences of unique nonces. /// /// A given `NonceSequence` must never return the same `Nonce` twice from /// `advance()`. /// /// A simple counter is a reasonable (but probably not ideal) `NonceSequence`. /// /// Intentionally not `Clone` or `Copy` since cloning would allow duplication /// of the sequence. pub trait NonceSequence { /// Returns the next nonce in the sequence. /// /// This may fail if "too many" nonces have been requested, where how many /// is too many is up to the implementation of `NonceSequence`. An /// implementation may that enforce a maximum number of records are /// sent/received under a key this way. Once `advance()` fails, it must /// fail for all subsequent calls. fn advance(&mut self) -> Result<Nonce, error::Unspecified>; /// Returns the current nonce in the sequence, used for partial decrypting fn current(&self) -> Result<Nonce, error::Unspecified>; } /// An AEAD key bound to a nonce sequence. pub trait BoundKey<N: NonceSequence>: core::fmt::Debug { /// Constructs a new key from the given `UnboundKey` and `NonceSequence`. fn new(key: UnboundKey, nonce_sequence: N) -> Self; /// The key's AEAD algorithm. fn algorithm(&self) -> &'static Algorithm; } /// An AEAD key for authenticating and decrypting ("opening"), bound to a nonce /// sequence. /// /// Intentionally not `Clone` or `Copy` since cloning would allow duplication /// of the nonce sequence. pub struct OpeningKey<N: NonceSequence> { key: UnboundKey, nonce_sequence: N, } impl<N: NonceSequence> BoundKey<N> for OpeningKey<N> { fn new(key: UnboundKey, nonce_sequence: N) -> Self { Self { key, nonce_sequence, } } #[inline] fn algorithm(&self) -> &'static Algorithm { self.key.algorithm } } impl<N: NonceSequence> core::fmt::Debug for OpeningKey<N> { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { f.debug_struct("OpeningKey") .field("algorithm", &self.algorithm()) .finish() } } impl<N: NonceSequence> OpeningKey<N> { /// Authenticates and decrypts (“opens”) data in place. /// /// `aad` is the additional authenticated data (AAD), if any. /// /// On input, `in_out` must be the ciphertext followed by the tag. When /// `open_in_place()` returns `Ok(plaintext)`, the input ciphertext /// has been overwritten by the plaintext; `plaintext` will refer to the /// plaintext without the tag. /// /// When `open_in_place()` returns `Err(..)`, `in_out` may have been /// overwritten in an unspecified way. #[inline] pub fn open_in_place<'in_out, A>( &mut self, aad: Aad<A>, in_out: &'in_out mut [u8], ) -> Result<&'in_out mut [u8], error::Unspecified> where A: AsRef<[u8]>, { self.open_within(aad, in_out, 0..) } /// Authenticates and decrypts (“opens”) data in place, with a shift. /// /// `aad` is the additional authenticated data (AAD), if any. /// /// On input, `in_out[ciphertext_and_tag]` must be the ciphertext followed /// by the tag. When `open_within()` returns `Ok(plaintext)`, the plaintext /// will be at `in_out[0..plaintext.len()]`. In other words, the following /// two code fragments are equivalent for valid values of /// `ciphertext_and_tag`, except `open_within` will often be more efficient: /// /// /// ```skip /// let plaintext = key.open_within(aad, in_out, cipertext_and_tag)?; /// ``` /// /// ```skip /// let ciphertext_and_tag_len = in_out[ciphertext_and_tag].len(); /// in_out.copy_within(ciphertext_and_tag, 0); /// let plaintext = key.open_in_place(aad, &mut in_out[..ciphertext_and_tag_len])?; /// ``` /// /// Similarly, `key.open_within(aad, in_out, 0..)` is equivalent to /// `key.open_in_place(aad, in_out)`. /// /// When `open_in_place()` returns `Err(..)`, `in_out` may have been /// overwritten in an unspecified way. /// /// The shifting feature is useful in the case where multiple packets are /// being reassembled in place. Consider this example where the peer has /// sent the message “Split stream reassembled in place” split into /// three sealed packets: /// /// ```ascii-art /// Packet 1 Packet 2 Packet 3 /// Input: [Header][Ciphertext][Tag][Header][Ciphertext][Tag][Header][Ciphertext][Tag] /// | +--------------+ | /// +------+ +-----+ +----------------------------------+ /// v v v /// Output: [Plaintext][Plaintext][Plaintext] /// “Split stream reassembled in place” /// ``` /// /// This reassembly be accomplished with three calls to `open_within()`. #[inline] pub fn open_within<'in_out, A>( &mut self, aad: Aad<A>, in_out: &'in_out mut [u8], ciphertext_and_tag: RangeFrom<usize>, ) -> Result<&'in_out mut [u8], error::Unspecified> where A: AsRef<[u8]>, { open_within_( &self.key, self.nonce_sequence.advance()?, aad, in_out, ciphertext_and_tag, ) } /// Like [`OpeningKey::open_in_place()`] /// This only partially decrypts the ciphertext, ignores the tag, and does not validate /// Useful only for speculation on partial messages. /// /// This DOES NOT advance the nonce sequence #[inline] pub fn open_in_place_partial<'in_out, A>( &self, aad: Aad<A>, in_out: &'in_out mut [u8], total_text_size: usize, ) -> Result<&'in_out mut [u8], error::Unspecified> where A: AsRef<[u8]>, { self.open_within_partial(aad, in_out, total_text_size, 0..) } /// Like [`OpeningKey::open_within()`] /// This only partially decrypts the ciphertext, ignores the tag, and does not validate /// Useful only for speculation on partial messages /// /// This DOES NOT advance the nonce sequence #[inline] pub fn open_within_partial<'in_out, A>( &self, aad: Aad<A>, in_out: &'in_out mut [u8], total_text_size: usize, ciphertext_and_tag: RangeFrom<usize>, ) -> Result<&'in_out mut [u8], error::Unspecified> where A: AsRef<[u8]>, { let nonce = self.nonce_sequence.current()?; open_within_partial_(&self.key, nonce, aad, in_out, total_text_size, ciphertext_and_tag) } } /// Partially opens a message without validating it or decrypting the entire message #[inline] fn open_within_partial_<'in_out, A: AsRef<[u8]>>( key: &UnboundKey, nonce: Nonce, Aad(aad): Aad<A>, in_out: &'in_out mut [u8], total_text_size: usize, ciphertext_and_tag: RangeFrom<usize>, ) -> Result<&'in_out mut [u8], error::Unspecified> { fn open_within<'in_out>( key: &UnboundKey, nonce: Nonce, aad: Aad<&[u8]>, in_out: &'in_out mut [u8], total_text_size: usize, ciphertext_and_tag: RangeFrom<usize>, ) -> Result<&'in_out mut [u8], error::Unspecified> { let in_prefix_len = ciphertext_and_tag.start; let useful_text_len = in_out .len() .checked_sub(in_prefix_len) .ok_or(error::Unspecified)?; if useful_text_len > total_text_size { return Err(error::Unspecified); } let total_ciphertext_len = total_text_size .checked_sub(TAG_LEN) .ok_or(error::Unspecified)?; let ciphertext_len = if useful_text_len > total_ciphertext_len { total_ciphertext_len } else { useful_text_len }; check_per_nonce_max_bytes(key.algorithm, ciphertext_len)?; let (in_out, _) = in_out.split_at_mut(in_prefix_len + ciphertext_len); // INTENTIONALLY INGORE THIS RIGHT HERE let _ = (key.algorithm.open)( &key.inner, nonce, aad, in_prefix_len, in_out, key.cpu_features, ); // `ciphertext_len` is also the plaintext length. Ok(&mut in_out[..ciphertext_len]) } open_within( key, nonce, Aad::from(aad.as_ref()), in_out, total_text_size, ciphertext_and_tag, ) } #[inline] fn open_within_<'in_out, A: AsRef<[u8]>>( key: &UnboundKey, nonce: Nonce, Aad(aad): Aad<A>, in_out: &'in_out mut [u8], ciphertext_and_tag: RangeFrom<usize>, ) -> Result<&'in_out mut [u8], error::Unspecified> { fn open_within<'in_out>( key: &UnboundKey, nonce: Nonce, aad: Aad<&[u8]>, in_out: &'in_out mut [u8], ciphertext_and_tag: RangeFrom<usize>, ) -> Result<&'in_out mut [u8], error::Unspecified> { let in_prefix_len = ciphertext_and_tag.start; let ciphertext_and_tag_len = in_out .len() .checked_sub(in_prefix_len) .ok_or(error::Unspecified)?; let ciphertext_len = ciphertext_and_tag_len .checked_sub(TAG_LEN) .ok_or(error::Unspecified)?; check_per_nonce_max_bytes(key.algorithm, ciphertext_len)?; let (in_out, received_tag) = in_out.split_at_mut(in_prefix_len + ciphertext_len); let Tag(calculated_tag) = (key.algorithm.open)( &key.inner, nonce, aad, in_prefix_len, in_out, key.cpu_features, ); if constant_time::verify_slices_are_equal(calculated_tag.as_ref(), received_tag).is_err() { // Zero out the plaintext so that it isn't accidentally leaked or used // after verification fails. It would be safest if we could check the // tag before decrypting, but some `open` implementations interleave // authentication with decryption for performance. for b in &mut in_out[..ciphertext_len] { *b = 0; } return Err(error::Unspecified); } // `ciphertext_len` is also the plaintext length. Ok(&mut in_out[..ciphertext_len]) } open_within( key, nonce, Aad::from(aad.as_ref()), in_out, ciphertext_and_tag, ) } /// An AEAD key for encrypting and signing ("sealing"), bound to a nonce /// sequence. /// /// Intentionally not `Clone` or `Copy` since cloning would allow duplication /// of the nonce sequence. pub struct SealingKey<N: NonceSequence> { key: UnboundKey, nonce_sequence: N, } impl<N: NonceSequence> BoundKey<N> for SealingKey<N> { fn new(key: UnboundKey, nonce_sequence: N) -> Self { Self { key, nonce_sequence, } } #[inline] fn algorithm(&self) -> &'static Algorithm { self.key.algorithm } } impl<N: NonceSequence> core::fmt::Debug for SealingKey<N> { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { f.debug_struct("SealingKey") .field("algorithm", &self.algorithm()) .finish() } } impl<N: NonceSequence> SealingKey<N> { /// Deprecated. Renamed to `seal_in_place_append_tag()`. #[deprecated(note = "Renamed to `seal_in_place_append_tag`.")] #[inline] pub fn seal_in_place<A, InOut>( &mut self, aad: Aad<A>, in_out: &mut InOut, ) -> Result<(), error::Unspecified> where A: AsRef<[u8]>, InOut: AsMut<[u8]> + for<'in_out> Extend<&'in_out u8>, { self.seal_in_place_append_tag(aad, in_out) } /// Encrypts and signs (“seals”) data in place, appending the tag to the /// resulting ciphertext. /// /// `key.seal_in_place_append_tag(aad, in_out)` is equivalent to: /// /// ```skip /// key.seal_in_place_separate_tag(aad, in_out.as_mut()) /// .map(|tag| in_out.extend(tag.as_ref())) /// ``` #[inline] pub fn seal_in_place_append_tag<A, InOut>( &mut self, aad: Aad<A>, in_out: &mut InOut, ) -> Result<(), error::Unspecified> where A: AsRef<[u8]>, InOut: AsMut<[u8]> + for<'in_out> Extend<&'in_out u8>, { self.seal_in_place_separate_tag(aad, in_out.as_mut()) .map(|tag| in_out.extend(tag.as_ref())) } /// Encrypts and signs (“seals”) data in place. /// /// `aad` is the additional authenticated data (AAD), if any. This is /// authenticated but not encrypted. The type `A` could be a byte slice /// `&[u8]`, a byte array `[u8; N]` for some constant `N`, `Vec<u8>`, etc. /// If there is no AAD then use `Aad::empty()`. /// /// The plaintext is given as the input value of `in_out`. `seal_in_place()` /// will overwrite the plaintext with the ciphertext and return the tag. /// For most protocols, the caller must append the tag to the ciphertext. /// The tag will be `self.algorithm.tag_len()` bytes long. #[inline] pub fn seal_in_place_separate_tag<A>( &mut self, aad: Aad<A>, in_out: &mut [u8], ) -> Result<Tag, error::Unspecified> where A: AsRef<[u8]>, { seal_in_place_separate_tag_( &self.key, self.nonce_sequence.advance()?, Aad::from(aad.as_ref()), in_out, ) } } #[inline] fn seal_in_place_separate_tag_( key: &UnboundKey, nonce: Nonce, aad: Aad<&[u8]>, in_out: &mut [u8], ) -> Result<Tag, error::Unspecified> { check_per_nonce_max_bytes(key.algorithm, in_out.len())?; Ok((key.algorithm.seal)( &key.inner, nonce, aad, in_out, key.cpu_features, )) } /// The additionally authenticated data (AAD) for an opening or sealing /// operation. This data is authenticated but is **not** encrypted. /// /// The type `A` could be a byte slice `&[u8]`, a byte array `[u8; N]` /// for some constant `N`, `Vec<u8>`, etc. pub struct Aad<A: AsRef<[u8]>>(A); impl<A: AsRef<[u8]>> Aad<A> { /// Construct the `Aad` from the given bytes. #[inline] pub fn from(aad: A) -> Self { Aad(aad) } } impl<A> AsRef<[u8]> for Aad<A> where A: AsRef<[u8]>, { fn as_ref(&self) -> &[u8] { self.0.as_ref() } } impl Aad<[u8; 0]> { /// Construct an empty `Aad`. pub fn empty() -> Self { Self::from([]) } } /// An AEAD key without a designated role or nonce sequence. pub struct UnboundKey { inner: KeyInner, algorithm: &'static Algorithm, cpu_features: cpu::Features, } impl core::fmt::Debug for UnboundKey { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { f.debug_struct("UnboundKey") .field("algorithm", &self.algorithm) .finish() } } #[allow(variant_size_differences)] enum KeyInner { AesGcm(aes_gcm::Key), ChaCha20Poly1305(chacha20_poly1305::Key), } impl UnboundKey { /// Constructs an `UnboundKey`. /// /// Fails if `key_bytes.len() != ` algorithm.key_len()`. pub fn new( algorithm: &'static Algorithm, key_bytes: &[u8], ) -> Result<Self, error::Unspecified> { let cpu_features = cpu::features(); Ok(Self { inner: (algorithm.init)(key_bytes, cpu_features)?, algorithm, cpu_features, }) } /// The key's AEAD algorithm. #[inline] pub fn algorithm(&self) -> &'static Algorithm { self.algorithm } } impl From<hkdf::Okm<'_, &'static Algorithm>> for UnboundKey { fn from(okm: hkdf::Okm<&'static Algorithm>) -> Self { let mut key_bytes = [0; MAX_KEY_LEN]; let key_bytes = &mut key_bytes[..okm.len().key_len]; let algorithm = *okm.len(); okm.fill(key_bytes).unwrap(); Self::new(algorithm, key_bytes).unwrap() } } impl hkdf::KeyType for &'static Algorithm { #[inline] fn len(&self) -> usize { self.key_len() } } /// Immutable keys for use in situations where `OpeningKey`/`SealingKey` and /// `NonceSequence` cannot reasonably be used. /// /// Prefer to use `OpeningKey`/`SealingKey` and `NonceSequence` when practical. pub struct LessSafeKey { key: UnboundKey, } impl LessSafeKey { /// Constructs a `LessSafeKey` from an `UnboundKey`. pub fn new(key: UnboundKey) -> Self { Self { key } } /// Like [`OpeningKey::open_in_place()`], except it accepts an arbitrary nonce. /// /// `nonce` must be unique for every use of the key to open data. #[inline] pub fn open_in_place<'in_out, A>( &self, nonce: Nonce, aad: Aad<A>, in_out: &'in_out mut [u8], ) -> Result<&'in_out mut [u8], error::Unspecified> where A: AsRef<[u8]>, { self.open_within(nonce, aad, in_out, 0..) } /// Like [`OpeningKey::open_within()`], except it accepts an arbitrary nonce. /// /// `nonce` must be unique for every use of the key to open data. #[inline] pub fn open_within<'in_out, A>( &self, nonce: Nonce, aad: Aad<A>, in_out: &'in_out mut [u8], ciphertext_and_tag: RangeFrom<usize>, ) -> Result<&'in_out mut [u8], error::Unspecified> where A: AsRef<[u8]>, { open_within_(&self.key, nonce, aad, in_out, ciphertext_and_tag) } /// Like [`OpeningKey::open_in_place()`], except it accepts an arbitrary nonce. /// This only partially decrypts the ciphertext, ignores the tag, and does not validate /// Useful only for speculation on partial messages /// /// `nonce` must be unique for every use of the key to open data. #[inline] pub fn open_in_place_partial<'in_out, A>( &self, nonce: Nonce, aad: Aad<A>, in_out: &'in_out mut [u8], total_text_size: usize, ) -> Result<&'in_out mut [u8], error::Unspecified> where A: AsRef<[u8]>, { self.open_within_partial(nonce, aad, in_out, total_text_size, 0..) } /// Like [`OpeningKey::open_within()`], except it accepts an arbitrary nonce. /// This only partially decrypts the ciphertext, ignores the tag, and does not validate /// Useful only for speculation on partial messages /// /// `nonce` must be unique for every use of the key to open data. #[inline] pub fn open_within_partial<'in_out, A>( &self, nonce: Nonce, aad: Aad<A>, in_out: &'in_out mut [u8], total_text_size: usize, ciphertext_and_tag: RangeFrom<usize>, ) -> Result<&'in_out mut [u8], error::Unspecified> where A: AsRef<[u8]>, { open_within_partial_(&self.key, nonce, aad, in_out, total_text_size, ciphertext_and_tag) } /// Deprecated. Renamed to `seal_in_place_append_tag()`. #[deprecated(note = "Renamed to `seal_in_place_append_tag`.")] #[inline] pub fn seal_in_place<A, InOut>( &self, nonce: Nonce, aad: Aad<A>, in_out: &mut InOut, ) -> Result<(), error::Unspecified> where A: AsRef<[u8]>, InOut: AsMut<[u8]> + for<'in_out> Extend<&'in_out u8>, { self.seal_in_place_append_tag(nonce, aad, in_out) } /// Like [`SealingKey::seal_in_place_append_tag()`], except it accepts an /// arbitrary nonce. /// /// `nonce` must be unique for every use of the key to seal data. #[inline] pub fn seal_in_place_append_tag<A, InOut>( &self, nonce: Nonce, aad: Aad<A>, in_out: &mut InOut, ) -> Result<(), error::Unspecified> where A: AsRef<[u8]>, InOut: AsMut<[u8]> + for<'in_out> Extend<&'in_out u8>, { self.seal_in_place_separate_tag(nonce, aad, in_out.as_mut()) .map(|tag| in_out.extend(tag.as_ref())) } /// Like `SealingKey::seal_in_place_separate_tag()`, except it accepts an /// arbitrary nonce. /// /// `nonce` must be unique for every use of the key to seal data. #[inline] pub fn seal_in_place_separate_tag<A>( &self, nonce: Nonce, aad: Aad<A>, in_out: &mut [u8], ) -> Result<Tag, error::Unspecified> where A: AsRef<[u8]>, { seal_in_place_separate_tag_(&self.key, nonce, Aad::from(aad.as_ref()), in_out) } /// The key's AEAD algorithm. #[inline] pub fn algorithm(&self) -> &'static Algorithm { &self.key.algorithm } } impl core::fmt::Debug for LessSafeKey { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { f.debug_struct("LessSafeKey") .field("algorithm", self.algorithm()) .finish() } } /// An AEAD Algorithm. pub struct Algorithm { init: fn(key: &[u8], cpu_features: cpu::Features) -> Result<KeyInner, error::Unspecified>, seal: fn( key: &KeyInner, nonce: Nonce, aad: Aad<&[u8]>, in_out: &mut [u8], cpu_features: cpu::Features, ) -> Tag, open: fn( key: &KeyInner, nonce: Nonce, aad: Aad<&[u8]>, in_prefix_len: usize, in_out: &mut [u8], cpu_features: cpu::Features, ) -> Tag, key_len: usize, id: AlgorithmID, /// Use `max_input_len!()` to initialize this. // TODO: Make this `usize`. max_input_len: u64, } const fn max_input_len(block_len: usize, overhead_blocks_per_nonce: usize) -> u64 { // Each of our AEADs use a 32-bit block counter so the maximum is the // largest input that will not overflow the counter. ((1u64 << 32) - polyfill::u64_from_usize(overhead_blocks_per_nonce)) * polyfill::u64_from_usize(block_len) } impl Algorithm { /// The length of the key. #[inline(always)] pub fn key_len(&self) -> usize { self.key_len } /// The length of a tag. /// /// See also `MAX_TAG_LEN`. #[inline(always)] pub fn tag_len(&self) -> usize { TAG_LEN } /// The length of the nonces. #[inline(always)] pub fn nonce_len(&self) -> usize { NONCE_LEN } } derive_debug_via_id!(Algorithm); #[derive(Debug, Eq, PartialEq)] enum AlgorithmID { AES_128_GCM, AES_256_GCM, CHACHA20_POLY1305, } impl PartialEq for Algorithm { fn eq(&self, other: &Self) -> bool { self.id == other.id } } impl Eq for Algorithm {} /// An authentication tag. #[must_use] #[repr(C)] pub struct Tag(Block); impl AsRef<[u8]> for Tag { fn as_ref(&self) -> &[u8] { self.0.as_ref() } } const MAX_KEY_LEN: usize = 32; // All the AEADs we support use 128-bit tags. const TAG_LEN: usize = BLOCK_LEN; /// The maximum length of a tag for the algorithms in this module. pub const MAX_TAG_LEN: usize = TAG_LEN; fn check_per_nonce_max_bytes(alg: &Algorithm, in_out_len: usize) -> Result<(), error::Unspecified> { if polyfill::u64_from_usize(in_out_len) > alg.max_input_len { return Err(error::Unspecified); } Ok(()) } #[derive(Clone, Copy)] enum Direction { Opening { in_prefix_len: usize }, Sealing, } mod aes; mod aes_gcm; mod block; mod chacha; mod chacha20_poly1305; pub mod chacha20_poly1305_openssh; mod gcm; mod nonce; mod poly1305; pub mod quic; mod shift;
30.18303
100
0.597767
8f60bb0de0e6648566fd1e835bca74f3fa292720
9,635
use core::pin::Pin; use core::task::{Context, Poll}; use std::collections::HashSet; use std::sync::Arc; use crate::contribution::AggregatableContribution; use crate::evaluator::Evaluator; use crate::update::LevelUpdate; use futures::stream::{BoxStream, Stream, StreamExt}; /// A TodoItem represents a contribution which has not yet been aggregated into the store. #[derive(Clone, Debug)] pub(crate) struct TodoItem<C: AggregatableContribution> { /// The contribution of this TodoItem pub contribution: C, /// The level the contribution of this TodoItem belongs to. pub level: usize, } impl<C: AggregatableContribution> TodoItem<C> { /// Evaluated the contribution of the tTdoItem. It returns a score representing how useful /// the contribution is, with `0` meaning not useful at all -> can be discarded and `>0` /// meaning more useful the higher the number. /// /// * `evaluator` - The evaluator used for the score computation pub fn evaluate<E: Evaluator<C>>(&self, evaluator: Arc<E>) -> usize { evaluator.evaluate(&self.contribution, self.level) } } impl<C: AggregatableContribution> PartialEq for TodoItem<C> { fn eq(&self, other: &TodoItem<C>) -> bool { self.level == other.level && self.contribution.contributors() == other.contribution.contributors() } } impl<C: AggregatableContribution> Eq for TodoItem<C> {} impl<C: AggregatableContribution> std::hash::Hash for TodoItem<C> { // TODO fn hash<H: std::hash::Hasher>(&self, state: &mut H) { std::hash::Hash::hash(&self.contribution.contributors().to_string(), state); } } /// TodoItem list. Implements Stream to poll for the next best scoring TodoItem. /// Will dry the input stream every time a TodoItem is polled. pub(crate) struct TodoList<C: AggregatableContribution, E: Evaluator<C>> { /// List of TodoItems already polled from input stream list: HashSet<TodoItem<C>>, /// The evaluator used for scoring the individual todos evaluator: Arc<E>, /// The Stream where LevelUpdates can be polled from, which are subsequently converted into TodoItems input_stream: BoxStream<'static, LevelUpdate<C>>, } impl<C: AggregatableContribution, E: Evaluator<C>> TodoList<C, E> { /// Create a new TodoList /// * `evaluator` - The evaluator which will be used for TodoItem scoring /// * `input_stream` - Thestream on which new LevelUpdates can be polled, which will then be converted into TodoItems pub fn new(evaluator: Arc<E>, input_stream: BoxStream<'static, LevelUpdate<C>>) -> Self { Self { list: HashSet::new(), evaluator, input_stream, } } } impl<C: AggregatableContribution, E: Evaluator<C>> Stream for TodoList<C, E> { type Item = TodoItem<C>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { // current best score let mut best_score: usize = 0; // retained set of todos. Same as self.list, but did not retain 0 score todos and the best todo. let mut new_set: HashSet<TodoItem<C>> = HashSet::new(); // the current best TodoItem let mut best_todo: Option<Self::Item> = None; // scoring of items needs to be done every time a item is polled, as the scores might have // changed with the last aggregated contribution // score already available TodoItems first for item in self.list.iter() { // Have the evaluator score each TodoItem. let score = item.evaluate(Arc::clone(&self.evaluator)); // if an item has a score greater than 0 it is retained. Otherwise it is discarded if score > 0 { if score > best_score { // In case it's a new best remember it and push the old best into the retained set. if let Some(todo) = best_todo { new_set.insert(todo); } best_todo = Some(item.clone()); // TODO get rid of clone() // remember the new best score best_score = score; } else { // in case the item is not the new best scoring item, push it into the retained set. new_set.insert(item.clone()); // TODO get rid of clone() } } } // update the retained list of Todos self.list = new_set; // Scan the input for better todos. loop exits once the input has run out of LevelUpdates for now. // Note that computations are limited to the bare minimum. No Verification in particular. // As Verification is very computationally expensive it should only be done for TodoItems with the highest score. while let Poll::Ready(msg) = self.input_stream.poll_next_unpin(cx) { match msg { // The input has ended, i.e. there is no producer left. // In testcases that could mean the other instances have completed their aggreagtions and droped their network instances. // In reality this should never happen as the network should not terminate those streams, but try to aquire new Peers in this situation. // Panic here is viable, but makes testing a bit harder. // TODO more robust handling of this case, as the aggregation might not be able to finish here (depending on what todos are left). None => break, // A new LevelUpdate is available. Some(msg) => { if self.evaluator.level_contains_id(msg.level as usize, msg.origin as usize) { // Every LevelUpdates contains an aggregate which can be turned into a TodoItem let aggregate_todo = TodoItem { contribution: msg.aggregate, level: msg.level as usize, }; // score the newly created TodoItem for the aggregate of the LevelUpdate let score = aggregate_todo.evaluate(Arc::clone(&self.evaluator)); // TodoItems with a score of 0 are discarded (meaning not added to the retained set of TodoItems). if score > 0 { if score > best_score { // If the score is a new best remember the score and put the former best item into the list. best_score = score; if let Some(best_todo) = best_todo { self.list.insert(best_todo); // self.list.push(best_todo); // TODO: dedupe! } best_todo = Some(aggregate_todo); } else { // If the score is not a new best put the TodoItem in the list. self.list.insert(aggregate_todo); // self.list.push(aggregate_todo); // TODO: dedupe! } } // Some of the Level Updates also contain an individual Signature. In which case it is also converted into a TodoItem if let Some(individual) = msg.individual { let individual_todo = TodoItem { contribution: individual, level: msg.level as usize, }; // Score the newly created TodoItem for the individual contribution of the LevelUpdate let score = individual_todo.evaluate(Arc::clone(&self.evaluator)); // TodoItems with a score of 0 are discarded (meaning not added to the retained set of TodoItems). if score > 0 { if score > best_score { // If the score is a new best remember the score and put the former best item into the list. best_score = score; if let Some(best_todo) = best_todo { self.list.insert(best_todo); } best_todo = Some(individual_todo); } else { // If the score is not a new best put the TodoItem in the list. self.list.insert(individual_todo); } } } } else { debug!("Sender of update :{} is not on level {}", msg.origin, msg.level); } } } } // If the best item has a score higher than 0 return it otherwise signal // that no TodoItem is currently available. // The function returns Poll<Option<TodoItem<C>>> but Ready(None) is never returned. if best_score > 0 { // best_todo is now always Some(todo) never None if let Some(todo) = best_todo { Poll::Ready(Some(todo)) } else { unreachable!(" Score was higher than 0 but there was no best TodoItem."); } } else { Poll::Pending } } }
49.92228
152
0.553088
f7a14a278030fda5a1c3ae6fbad0a3e98dfd7d0a
368
/// 颠倒二进制位 /// 颠倒给定的 32 位无符号整数的二进制位。翻转二进制数 pub fn reverse_bits(x: u32) -> u32 { let mut res = 0; let mut n = x; let mut count = 31; while n > 0 { res += (n & 1) << count; // 获取最后一位数字 移位到正确位置 n >>= 1; // 右移一位, 去掉末尾数字 count -= 1; } res } #[test] fn test_reverse_bits() { assert_eq!(reverse_bits(43261596),964176192) }
20.444444
52
0.540761
ff99a4b7ff63828a17b5fd2d44cd4144e63b7428
8,731
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // The outlines relation `T: 'a` or `'a: 'b`. This code frequently // refers to rules defined in RFC 1214 (`OutlivesFooBar`), so see that // RFC for reference. use ty::{self, Ty, TyCtxt, TypeFoldable}; #[derive(Debug)] pub enum Component<'tcx> { Region(ty::Region<'tcx>), Param(ty::ParamTy), UnresolvedInferenceVariable(ty::InferTy), // Projections like `T::Foo` are tricky because a constraint like // `T::Foo: 'a` can be satisfied in so many ways. There may be a // where-clause that says `T::Foo: 'a`, or the defining trait may // include a bound like `type Foo: 'static`, or -- in the most // conservative way -- we can prove that `T: 'a` (more generally, // that all components in the projection outlive `'a`). This code // is not in a position to judge which is the best technique, so // we just product the projection as a component and leave it to // the consumer to decide (but see `EscapingProjection` below). Projection(ty::ProjectionTy<'tcx>), // In the case where a projection has escaping regions -- meaning // regions bound within the type itself -- we always use // the most conservative rule, which requires that all components // outlive the bound. So for example if we had a type like this: // // for<'a> Trait1< <T as Trait2<'a,'b>>::Foo > // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // then the inner projection (underlined) has an escaping region // `'a`. We consider that outer trait `'c` to meet a bound if `'b` // outlives `'b: 'c`, and we don't consider whether the trait // declares that `Foo: 'static` etc. Therefore, we just return the // free components of such a projection (in this case, `'b`). // // However, in the future, we may want to get smarter, and // actually return a "higher-ranked projection" here. Therefore, // we mark that these components are part of an escaping // projection, so that implied bounds code can avoid relying on // them. This gives us room to improve the regionck reasoning in // the future without breaking backwards compat. EscapingProjection(Vec<Component<'tcx>>), } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// Returns all the things that must outlive `'a` for the condition /// `ty0: 'a` to hold. Note that `ty0` must be a **fully resolved type**. pub fn outlives_components(&self, ty0: Ty<'tcx>) -> Vec<Component<'tcx>> { let mut components = vec![]; self.compute_components(ty0, &mut components); debug!("components({:?}) = {:?}", ty0, components); components } fn compute_components(&self, ty: Ty<'tcx>, out: &mut Vec<Component<'tcx>>) { // Descend through the types, looking for the various "base" // components and collecting them into `out`. This is not written // with `collect()` because of the need to sometimes skip subtrees // in the `subtys` iterator (e.g., when encountering a // projection). match ty.sty { ty::TyClosure(def_id, ref substs) => { for upvar_ty in substs.upvar_tys(def_id, *self) { self.compute_components(upvar_ty, out); } } ty::TyGenerator(def_id, ref substs, _) => { // Same as the closure case for upvar_ty in substs.upvar_tys(def_id, *self) { self.compute_components(upvar_ty, out); } // We ignore regions in the generator interior as we don't // want these to affect region inference } // All regions are bound inside a witness ty::TyGeneratorWitness(..) => (), // OutlivesTypeParameterEnv -- the actual checking that `X:'a` // is implied by the environment is done in regionck. ty::TyParam(p) => { out.push(Component::Param(p)); } // For projections, we prefer to generate an obligation like // `<P0 as Trait<P1...Pn>>::Foo: 'a`, because this gives the // regionck more ways to prove that it holds. However, // regionck is not (at least currently) prepared to deal with // higher-ranked regions that may appear in the // trait-ref. Therefore, if we see any higher-ranke regions, // we simply fallback to the most restrictive rule, which // requires that `Pi: 'a` for all `i`. ty::TyProjection(ref data) => { if !data.has_escaping_regions() { // best case: no escaping regions, so push the // projection and skip the subtree (thus generating no // constraints for Pi). This defers the choice between // the rules OutlivesProjectionEnv, // OutlivesProjectionTraitDef, and // OutlivesProjectionComponents to regionck. out.push(Component::Projection(*data)); } else { // fallback case: hard code // OutlivesProjectionComponents. Continue walking // through and constrain Pi. let subcomponents = self.capture_components(ty); out.push(Component::EscapingProjection(subcomponents)); } } // We assume that inference variables are fully resolved. // So, if we encounter an inference variable, just record // the unresolved variable as a component. ty::TyInfer(infer_ty) => { out.push(Component::UnresolvedInferenceVariable(infer_ty)); } // Most types do not introduce any region binders, nor // involve any other subtle cases, and so the WF relation // simply constraints any regions referenced directly by // the type and then visits the types that are lexically // contained within. (The comments refer to relevant rules // from RFC1214.) ty::TyBool | // OutlivesScalar ty::TyChar | // OutlivesScalar ty::TyInt(..) | // OutlivesScalar ty::TyUint(..) | // OutlivesScalar ty::TyFloat(..) | // OutlivesScalar ty::TyNever | // ... ty::TyAdt(..) | // OutlivesNominalType ty::TyAnon(..) | // OutlivesNominalType (ish) ty::TyForeign(..) | // OutlivesNominalType ty::TyStr | // OutlivesScalar (ish) ty::TyArray(..) | // ... ty::TySlice(..) | // ... ty::TyRawPtr(..) | // ... ty::TyRef(..) | // OutlivesReference ty::TyTuple(..) | // ... ty::TyFnDef(..) | // OutlivesFunction (*) ty::TyFnPtr(_) | // OutlivesFunction (*) ty::TyDynamic(..) | // OutlivesObject, OutlivesFragment (*) ty::TyError => { // (*) Bare functions and traits are both binders. In the // RFC, this means we would add the bound regions to the // "bound regions list". In our representation, no such // list is maintained explicitly, because bound regions // themselves can be readily identified. push_region_constraints(out, ty.regions()); for subty in ty.walk_shallow() { self.compute_components(subty, out); } } } } fn capture_components(&self, ty: Ty<'tcx>) -> Vec<Component<'tcx>> { let mut temp = vec![]; push_region_constraints(&mut temp, ty.regions()); for subty in ty.walk_shallow() { self.compute_components(subty, &mut temp); } temp } } fn push_region_constraints<'tcx>(out: &mut Vec<Component<'tcx>>, regions: Vec<ty::Region<'tcx>>) { for r in regions { if !r.is_late_bound() { out.push(Component::Region(r)); } } }
45.952632
98
0.565571
72778fda19ba5b05d0b76802390201fab51210ff
51,326
// Copyright (C) 2022 Scott Lamb <[email protected]> // SPDX-License-Identifier: MIT OR Apache-2.0 //! Message layer: statelessly converts [`crate::pkt`] packets into higher-level //! messages and vice versa. #[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; #[cfg(feature = "serde")] use serde::{de::Error as _, Deserialize, Serialize}; use std::str::FromStr; use crate::pkt::{AsciiPacket, Packet}; #[derive(Clone, Debug)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(transparent))] pub struct Error(String); impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.0.fmt(f) } } impl std::error::Error for Error {} fn parse_u8_dec(name: &str, val: &[u8]) -> Result<u8, String> { let as_str = std::str::from_utf8(val) .map_err(|_| format!("{} expected to be decimal in [0, 255); was bad utf-8", name))?; u8::from_str(as_str) .map_err(|_| format!("{} expected to be decimal in [0, 255); got {:?}", name, val)) } /// Defines all messages, taking care of some `enum Message` and `Into` /// boilerplate. macro_rules! messages { ( $( #[doc=$doc:literal] $(#[$other_attrs:meta])* struct $m:ident $body:tt )+ ) => { /// A parsed (ASCII) message of any supported type. #[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[cfg_attr( feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase")) ] #[derive(Clone, Debug, PartialEq, Eq)] #[non_exhaustive] pub enum Message { $( #[doc=$doc] $m($m), )* } impl Message { pub fn to_pkt(&self) -> Packet { match self { $( Message::$m(m) => m.into(), )* } } /// Returns true if this message may be a reply to `request`. pub fn is_response_to(&self, request: &Message) -> bool { match self { $( Message::$m(m) => m.is_response_to(request), )* } } } $( #[doc=$doc] $(#[$other_attrs])* #[cfg_attr(feature = "arbitrary", derive(Arbitrary))] pub struct $m $body impl Into<Message> for $m { #[inline] fn into(self) -> Message { Message::$m(self) } } impl Into<AsciiPacket> for &$m { #[inline] fn into(self) -> AsciiPacket { self.to_ascii() } } impl Into<Packet> for &$m { #[inline] fn into(self) -> Packet { Packet::Ascii(self.to_ascii()) } } )* } } messages! { /// `aL`: Arm/Disarm Request. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase"))] struct ArmRequest { pub area: Area, pub level: ArmLevel, pub code: ArmCode, } /// `as`: Arming Status Request. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase"))] struct ArmingStatusRequest {} /// `AS`: Arming Status Report. #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase"))] struct ArmingStatusReport { pub arming_status: [ArmingStatus; NUM_AREAS], pub up_state: [ArmUpState; NUM_AREAS], pub alarm_state: [AlarmState; NUM_AREAS], pub first_exit_time: u8, } /// `EE`: Send Entry/Exit Time Data. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase"))] struct SendTimeData { pub area: Area, pub ty: TimeDataType, pub timer1: u8, pub timer2: u8, /// The armed state, which according to documentation is only present /// for M1 Ver. 4.1.18, 5.1.18 or later. pub armed_state: Option<ArmedState>, } /// `IC`: Send Valid User Number and Invalid User Code. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase"))] struct SendCode { /// The code, or all 0s if it represents a valid user. /// /// If this was entered on an Elk keypad, each of the six bytes will /// be a digit `[0, 9]`. That is, a zero on the keypad becomes an ASCII /// NUL, not a `b'0'`. If the Elk is configured for four-digit codes, /// the leading two digits will always be `0`. code: [u8; 6], /// The user code number. /// /// There are several "special" values. user: u8, keypad: Keypad, } /// `rr`: request Real Time Clock Data. #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase"))] struct RtcRequest { } /// `RR`: Real Time Clock Data. #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase"))] struct RtcResponse { rtc_data: RtcData, } /// `sd`: Request ASCII String Text Descriptions. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase"))] struct StringDescriptionRequest { pub ty: TextDescriptionType, pub num: u8, } /// `SD`: ASCII String Text Descriptions. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase"))] struct StringDescriptionResponse { pub ty: TextDescriptionType, pub num: u8, pub text: TextDescription, } /// `tn`: Task Activation. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase"))] struct ActivateTask { pub task: Task, } /// `TC`: Task Change Update. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase"))] struct TaskChange { pub task: Task, } /// `XK`: Control RTC Broadcast / IP Communications Device Test (a heartbeat). #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase"))] struct Heartbeat { rtc_data: RtcData, } /// `ZC`: Zone Change Update. #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase"))] struct ZoneChange { pub zone: Zone, pub status: ZoneStatus, } /// `zs`: Zone Status Request. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase"))] struct ZoneStatusRequest {} /// `ZS`: Zone Status Report. #[derive(Copy, Clone, PartialEq, Eq)] struct ZoneStatusReport { pub zones: [ZoneStatus; NUM_ZONES], } } impl Message { pub fn parse(pkt: &Packet) -> Result<Option<Self>, Error> { match pkt { Packet::Ascii(msg) => Self::parse_ascii(msg), Packet::Rp(_) => Ok(None), // todo Packet::Invalid { .. } => Ok(None), } } pub fn parse_ascii(pkt: &AsciiPacket) -> Result<Option<Self>, Error> { let payload = pkt.as_bytes(); if payload.len() < 4 { return Err(Error("malformed ASCII message: too short".into())); } let (cmd, data) = payload.split_at(2); match cmd { b"a0" | b"a1" | b"a2" | b"a3" | b"a4" | b"a5" | b"a6" | b"a7" | b"a8" | b"a9" | b"a:" => ArmRequest::from_ascii(cmd[1], data).map(Self::ArmRequest), b"as" => ArmingStatusRequest::from_ascii_data(data).map(Self::ArmingStatusRequest), b"AS" => ArmingStatusReport::from_ascii_data(data).map(Self::ArmingStatusReport), b"EE" => SendTimeData::from_ascii_data(data).map(Self::SendTimeData), b"IC" => SendCode::from_ascii_data(data).map(Self::SendCode), b"rr" => RtcRequest::from_ascii_data(data).map(Self::RtcRequest), b"RR" => RtcResponse::from_ascii_data(data).map(Self::RtcResponse), b"sd" => { StringDescriptionRequest::from_ascii_data(data).map(Self::StringDescriptionRequest) } b"SD" => StringDescriptionResponse::from_ascii_data(data) .map(Self::StringDescriptionResponse), b"tn" => ActivateTask::from_ascii_data(data).map(Self::ActivateTask), b"TC" => TaskChange::from_ascii_data(data).map(Self::TaskChange), b"XK" => Heartbeat::from_ascii_data(data).map(Self::Heartbeat), b"ZC" => ZoneChange::from_ascii_data(data).map(Self::ZoneChange), b"zs" => ZoneStatusRequest::from_ascii_data(data).map(Self::ZoneStatusRequest), b"ZS" => ZoneStatusReport::from_ascii_data(data).map(Self::ZoneStatusReport), _ => return Ok(None), } .map(Some) .map_err(Error) } } impl Into<Packet> for &Message { fn into(self) -> Packet { self.to_pkt() } } /// Defines an enum for a `u8` with an automatic `TryFrom` that uses the /// explicit discriminant as the byte value. macro_rules! byte_enum { ( #[doc=$enum_doc:literal] $vis:vis enum $enum:ident { $( $(#[doc=$var_doc:literal])? $var:ident = $val:literal, )* } ) => { #[doc=$enum_doc] #[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[cfg_attr( feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase")) ] #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[repr(u8)] $vis enum $enum { $( $(#[doc=$var_doc])* $var = $val, )* } impl TryFrom<u8> for $enum { type Error = String; fn try_from(val: u8) -> Result<Self, Self::Error> { Ok(match val { $( $val => Self::$var, )* _ => return Err(format!("bad {} {:?}", stringify!($enum), char::from(val))), }) } } } } /// Almost the same as `byte_enum`, but uses the *numeric* value in the error /// message, without conversion to `char`. /// /// TODO: maybe find a slick way to combine these? macro_rules! num_enum { ( #[doc=$enum_doc:literal] $vis:vis enum $enum:ident { $( $(#[doc=$var_doc:literal])? $var:ident = $val:literal, )* } ) => { #[doc=$enum_doc] #[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[cfg_attr( feature = "serde", derive(Serialize, Deserialize), serde(rename_all="camelCase")) ] #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[repr(u8)] $vis enum $enum { $( $(#[doc=$var_doc])* $var = $val, )* } impl TryFrom<u8> for $enum { type Error = String; fn try_from(val: u8) -> Result<Self, Self::Error> { Ok(match val { $( $val => Self::$var, )* _ => return Err(format!("bad {} {}", stringify!($enum), val)), }) } } } } byte_enum! { /// The arming level in an [`ArmRequest`]. pub enum ArmLevel { Disarm = b'0', ArmedAway = b'1', ArmedStay = b'2', ArmedStayInstant = b'3', ArmedNight = b'4', ArmedNightInstant = b'5', ArmedVacation = b'6', /// Arm to next away mode; requires M1 Ver. 4.28 or later. ArmToNextAwayMode = b'7', /// Arm to next stay mode; requires M1 Ver. 4.28 or later. ArmToNextStayMode = b'8', /// Force arm to away; requires M1 Ver. 4.28 or later. ForceArmToAway = b'9', /// Force arm to stay; requires M1 Ver. 4.28 or later. ForceArmToStay = b':', } } /// A six-digit numeric arm code. #[derive(Copy, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(transparent))] pub struct ArmCode([u8; 6]); impl std::fmt::Debug for ArmCode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let as_str = std::str::from_utf8(&self.0[..]).expect("ArmCode is valid UTF-8"); as_str.fmt(f) } } /// Converts from a number. impl TryFrom<u32> for ArmCode { type Error = String; fn try_from(n: u32) -> Result<Self, String> { if n >= 1_000000 { return Err("code out of range".into()); } let n = format!("{:06}", n); let mut copied = [0u8; 6]; copied.copy_from_slice(n.as_bytes()); Ok(ArmCode(copied)) } } /// Converts from ASCII digits. impl TryFrom<&[u8]> for ArmCode { type Error = String; fn try_from(value: &[u8]) -> Result<Self, Self::Error> { if value.len() != 6 { return Err("ArmCode must be of length 6".to_owned()); } let mut code = [0u8; 6]; code.copy_from_slice(value); if code.iter().any(|&b| b < b'0' || b > b'9') { return Err("ArmCode must be numeric".to_owned()); } Ok(ArmCode(code)) } } #[cfg(feature = "arbitrary")] impl Arbitrary<'_> for ArmCode { fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> { let buf = u.bytes(3)?; let n = (u32::from(buf[0]) << 24) | (u32::from(buf[1]) << 16) | (u32::from(buf[2])); ArmCode::try_from(n).map_err(|_| arbitrary::Error::IncorrectFormat) } fn size_hint(depth: usize) -> (usize, Option<usize>) { let _ = depth; (3, Some(3)) } } byte_enum! { /// Type of time, used in [`SendTimeData`]. pub enum TimeDataType { Exit = b'0', Entry = b'1', } } byte_enum! { /// Arm state for [`ArmingStatusReport`]. pub enum ArmedState { Disarmed = b'0', ArmedAway = b'1', ArmedStay = b'2', ArmedStayInstant = b'3', ArmedNight = b'4', ArmedNightInstant = b'5', ArmedVacation = b'6', } } byte_enum! { /// Day of the week, as in `RR` and `XK` messages. enum Weekday { Sun = b'1', Mon = b'2', Tue = b'3', Wed = b'4', Thu = b'5', Fri = b'6', Sat = b'7', } } byte_enum! { /// Clock display mode: 24-hour or 12-hour. enum ClockDisplayMode { TwentyFourHour = b'0', TwelveHour = b'1', } } byte_enum! { /// Date display mode: `mm/dd` or `dd/mm`. enum DateDisplayMode { MonthFirst = b'0', DayFirst = b'1', } } /// Creates a `u8` wrapper that enforces a range of `[1, $max]`. macro_rules! limited_u8 { ( #[doc=$doc:literal] $t:ident max=$max:literal ) => { #[repr(transparent)] #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] #[doc=$doc] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(transparent))] pub struct $t(u8); impl $t { pub fn to_index(self) -> usize { usize::from(self.0) - 1 } } #[cfg(feature = "arbitrary")] impl Arbitrary<'_> for $t { fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> { let b: u8 = Arbitrary::from(u); $t::try_from(b).map_err(|_| arbitrary::Error::IncorrectFormat) } fn size_hint(depth: usize) -> (usize, Option<usize>) { <u8 as Arbitrary>::size_hint(depth) } } impl std::fmt::Debug for $t { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(&self.0, f) } } impl std::fmt::Display for $t { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Display::fmt(&self.0, f) } } impl TryFrom<u8> for $t { type Error = String; fn try_from(val: u8) -> Result<Self, Self::Error> { if val < 1 || val > $max { return Err(format!( "{} not in expected {} range of [1, {}]", val, stringify!(t), $max, )); } Ok(Self(val)) } } impl Into<u8> for $t { fn into(self) -> u8 { self.0 } } }; } /// A datetime, loosely defined. /// /// This currently enforces the ranges mentioned in Elk's spec, e.g. day can't be more than 31. /// It doesn't use a real date library and thus doesn't prevent silly dates like February 30th. #[derive(Copy, Clone, PartialEq, Eq)] struct DateTime { year: u8, month: u8, day: u8, hour: u8, minute: u8, second: u8, } impl DateTime { /// Parses an ISO 8601 datetime like `YYYY-mm-DDTHH:MM:SS`. pub fn from_iso_8601(val: &str) -> Result<Self, String> { if val.len() != 19 { return Err("wrong length".to_owned()); } let year = u16::from_str(&val[..4]).map_err(|_| "bad year".to_owned())?; if year < 2000 || year > 2100 { return Err(format!("year {} out of range", year)); } if &val[4..5] != "-" { return Err("bad year-month separator".to_owned()); } let month = u8::from_str(&val[5..7]).map_err(|_| "bad month".to_owned())?; if month < 1 || month > 12 { return Err(format!("month {} out of range", month)); } if &val[7..8] != "-" { return Err("bad month-day separator".to_owned()); } let day = u8::from_str(&val[8..10]).map_err(|_| "bad day".to_owned())?; if day < 1 || day > 31 { return Err(format!("day {} out of range", day)); } if &val[10..11] != "T" { return Err("bad date-time separator".to_owned()); } let hour = u8::from_str(&val[11..13]).map_err(|_| "bad hour".to_owned())?; if &val[13..14] != ":" { return Err("bad hour-minute separator".to_owned()); } if hour > 23 { return Err(format!("hour {} out of range", hour)); } let minute = u8::from_str(&val[14..16]).map_err(|_| "bad minute".to_owned())?; if minute > 59 { return Err(format!("minute {} out of range", minute)); } if &val[16..17] != ":" { return Err("bad minute-second separator".to_owned()); } let second = u8::from_str(&val[17..19]).map_err(|_| "bad second".to_owned())?; if second > 59 { return Err("second out of range".to_owned()); } Ok(Self { year: (year - 2000) as u8, month, day, hour, minute, second, }) } pub fn to_iso_8601(&self) -> String { format!( "20{:02}-{:02}-{:02}T{:02}:{:02}:{:02}", self.year, self.month, self.day, self.hour, self.minute, self.second ) } } #[cfg(feature = "serde")] impl Serialize for DateTime { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { serializer.serialize_str(&self.to_iso_8601()) } } #[cfg(feature = "serde")] impl<'de> Deserialize<'de> for DateTime { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { let s: &str = Deserialize::deserialize(deserializer)?; Self::from_iso_8601(s).map_err(|_| { D::Error::invalid_value( serde::de::Unexpected::Str(s), &"a datetime of the format YYYY-mm-ddTHH:MM:SS", ) }) } } impl std::fmt::Debug for DateTime { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.to_iso_8601().fmt(f) } } impl std::fmt::Display for DateTime { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.to_iso_8601().fmt(f) } } /// Real-time clock date: datetime and flags. #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] struct RtcData { datetime: DateTime, weekday: Weekday, dst: bool, clock_display: ClockDisplayMode, date_display: DateDisplayMode, } impl RtcData { fn from_ascii(data: &[u8]) -> Result<Self, String> { if data.len() < 16 { return Err("RTC data must be at least 16 bytes".to_owned()); } let year = parse_u8_dec("year", &data[11..13])?; debug_assert!(year < 100); let month = parse_u8_dec("month", &data[9..11])?; if month < 1 || month > 12 { return Err("month out of range".to_owned()); } let day = parse_u8_dec("day", &data[7..9])?; if day < 1 || day > 31 { return Err("day out of range".to_owned()); } let hour = parse_u8_dec("hour", &data[4..6])?; if hour >= 24 { return Err("hour out of range".to_owned()); } let minute = parse_u8_dec("minute", &data[2..4])?; if minute >= 60 { return Err("minute out of range".to_owned()); } let second = parse_u8_dec("second", &data[0..2])?; if second >= 60 { return Err("second out of range".to_owned()); } let weekday = Weekday::try_from(data[6])?; let dst = match data[13] { b'0' => false, b'1' => true, _ => return Err("bad dst flag".to_owned()), }; let clock_display = ClockDisplayMode::try_from(data[14])?; let date_display = DateDisplayMode::try_from(data[15])?; Ok(Self { datetime: DateTime { year, month, day, hour, minute, second, }, weekday, dst, clock_display, date_display, }) } fn to_ascii(&self) -> impl Iterator<Item = u8> { format!( "{:02}{:02}{:02}{:01}{:02}{:02}{:02}{:01}{:01}{:01}", self.datetime.second, self.datetime.minute, self.datetime.hour, self.weekday as u8 as char, self.datetime.day, self.datetime.month, self.datetime.year, if self.dst { 1 } else { 0 }, self.clock_display as u8 as char, self.date_display as u8 as char, ) .into_bytes() .into_iter() } } pub const NUM_AREAS: usize = 8; pub const NUM_KEYPADS: usize = 16; pub const NUM_TASKS: usize = 32; pub const NUM_ZONES: usize = 208; limited_u8! { /// A zone number in the range of `[1, 208]`. Zone max=208 } limited_u8! { /// An area number in the range of `[1, 8]`. Area max=8 } limited_u8! { /// A keypad number in the range of `[1, 16]`. Keypad max=16 } limited_u8! { /// An automation task number in the range of `[1, 32]`. Task max=32 } impl SendTimeData { fn from_ascii_data(data: &[u8]) -> Result<Self, String> { if data.len() < 10 { return Err(format!("expected at least 10 bytes, got {}", data.len())); } let area = match data[0] { b @ b'1'..=b'8' => Area(b - b'0'), b => return Err(format!("expected area in [1, 8], got {:?}", b)), }; let ty = TimeDataType::try_from(data[1])?; let timer1 = parse_u8_dec("timer1", &data[2..5])?; let timer2 = parse_u8_dec("timer2", &data[5..8])?; let armed_state = if data.len() < 11 { None } else { Some(ArmedState::try_from(data[8])?) }; Ok(SendTimeData { area, ty, timer1, timer2, armed_state, }) } fn to_ascii(&self) -> AsciiPacket { let mut msg = format!( "EE{}{}{:03}{:03}", &self.area, self.ty as u8 as char, self.timer1, self.timer2 ); if let Some(s) = self.armed_state { msg.push(s as u8 as char); } msg.push_str("00"); // reserved AsciiPacket::try_from(msg).expect("SendTimeData invalid") } pub fn is_response_to(&self, _request: &Message) -> bool { false } } #[derive(Copy, Clone, Default, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(transparent))] pub struct ZoneStatus( /// The decoded hex nibble as a value in \[0, 16\). u8, ); impl std::fmt::Debug for ZoneStatus { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("ZoneStatus") .field(&self.logical()) .field(&self.physical()) .finish() } } impl ZoneStatus { pub const UNCONFIGURED: ZoneStatus = ZoneStatus(0); pub const fn new(logical: ZoneLogicalStatus, physical: ZonePhysicalStatus) -> Self { ZoneStatus((logical as u8) << 2 | (physical as u8)) } fn from_ascii(hex_nibble: u8) -> Result<Self, String> { Ok(ZoneStatus(AsciiPacket::dehex_nibble(hex_nibble).map_err( |()| format!("bad zone status {:?}", char::from(hex_nibble)), )?)) } fn to_ascii(self) -> u8 { AsciiPacket::hex_nibble(self.0) } pub fn logical(self) -> ZoneLogicalStatus { match self.0 >> 2 { 0b00 => ZoneLogicalStatus::Normal, 0b01 => ZoneLogicalStatus::Trouble, 0b10 => ZoneLogicalStatus::Violated, 0b11 => ZoneLogicalStatus::Bypassed, _ => unreachable!(), } } pub fn physical(self) -> ZonePhysicalStatus { match self.0 & 0b11 { 0b00 => ZonePhysicalStatus::Unconfigured, 0b01 => ZonePhysicalStatus::Open, 0b10 => ZonePhysicalStatus::EOL, 0b11 => ZonePhysicalStatus::Short, _ => unreachable!(), } } } #[cfg(feature = "arbitrary")] impl Arbitrary<'_> for ZoneStatus { fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> { let b = u.bytes(1)?[0]; if (b & 0xF0) != 0 { return Err(arbitrary::Error::IncorrectFormat); } Ok(Self(b)) } fn size_hint(_depth: usize) -> (usize, Option<usize>) { (1, Some(1)) } } /// Zone physical status, the least significant 2 bits of a zone status nibble. #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[repr(u8)] pub enum ZonePhysicalStatus { Unconfigured = 0b00, Open = 0b01, EOL = 0b10, Short = 0b11, } /// Zone logical status, the most significant 2 bits of a zone status nibble. #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[repr(u8)] pub enum ZoneLogicalStatus { Normal = 0b00, Trouble = 0b01, Violated = 0b10, Bypassed = 0b11, } impl ZoneChange { fn from_ascii_data(data: &[u8]) -> Result<Self, String> { if data.len() < 4 { return Err(format!("expected at least 4 bytes, got {}", data.len())); } let zone = Zone::try_from(parse_u8_dec("zone", &data[0..3])?)?; Ok(ZoneChange { zone, status: ZoneStatus::from_ascii(data[3])?, }) } fn to_ascii(&self) -> AsciiPacket { let msg = format!("ZC{:03}{:1X}00", self.zone, self.status.0); AsciiPacket::try_from(msg).expect("ZoneChange invalid") } pub fn is_response_to(&self, _request: &Message) -> bool { false } } impl ZoneStatusRequest { fn from_ascii_data(_data: &[u8]) -> Result<Self, String> { Ok(ZoneStatusRequest {}) } fn to_ascii(&self) -> AsciiPacket { AsciiPacket::try_from("zs00".to_owned()).expect("ZoneStatusRequest invalid") } pub fn is_response_to(&self, _request: &Message) -> bool { false } } impl ZoneStatusReport { pub const ALL_UNCONFIGURED: ZoneStatusReport = ZoneStatusReport { zones: [ZoneStatus::UNCONFIGURED; 208], }; fn from_ascii_data(data: &[u8]) -> Result<Self, String> { if data.len() < NUM_ZONES { return Err(format!( "expected at least {} bytes, got {}", NUM_ZONES, data.len() )); } let mut zones = [ZoneStatus(0); NUM_ZONES]; for i in 0..NUM_ZONES { zones[i] = ZoneStatus::from_ascii(data[i])?; } Ok(ZoneStatusReport { zones }) } fn to_ascii(&self) -> AsciiPacket { let mut msg = Vec::with_capacity(4 + NUM_ZONES); msg.extend(b"ZS"); for s in &self.zones { msg.push(s.to_ascii()); } msg.extend(b"00"); AsciiPacket::try_from(msg).expect("ZoneStatusReport should be valid") } pub fn is_response_to(&self, request: &Message) -> bool { matches!(request, Message::ZoneStatusRequest(_)) } } #[cfg(feature = "serde")] impl Serialize for ZoneStatusReport { fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { todo!() } } #[cfg(feature = "serde")] impl<'de> Deserialize<'de> for ZoneStatusReport { fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { todo!() } } impl std::fmt::Debug for ZoneStatusReport { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_map() .entries(self.zones.iter().enumerate().filter_map(|(i, s)| { if s.0 == 0 { return None; } Some((i + 1, s)) })) .finish() } } impl ArmRequest { fn from_ascii(subtype: u8, data: &[u8]) -> Result<Self, String> { // Message::parse will only call ArmRequest with valid subtypes. let level = ArmLevel::try_from(subtype).expect("subtype must be valid"); if data.len() < 9 { return Err(format!( "Expected ArmRequest to have at least 9 bytes of data, got {}", data.len() )); } let area = Area::try_from(parse_u8_dec("area", &data[0..1])?)?; let code = ArmCode::try_from(&data[1..7])?; Ok(ArmRequest { area, level, code }) } pub fn to_ascii(&self) -> AsciiPacket { let msg: Vec<u8> = [b'a', self.level as u8, self.area.0 + b'0'] .iter() .chain(self.code.0.iter()) .chain(b"00".iter()) .copied() .collect(); AsciiPacket::try_from(msg).expect("ArmRequest invalid") } pub fn is_response_to(&self, _request: &Message) -> bool { false } } impl ArmingStatusRequest { fn from_ascii_data(_data: &[u8]) -> Result<Self, String> { Ok(ArmingStatusRequest {}) } pub fn to_ascii(&self) -> AsciiPacket { AsciiPacket::try_from("as00".to_owned()).expect("ArmingStatusRequest invalid") } pub fn is_response_to(&self, _request: &Message) -> bool { false } } byte_enum! { /// The arming status of a single area, as in the `S` array of an [`ArmingStatusReport`]. pub enum ArmingStatus { Disarmed = b'0', ArmedAway = b'1', ArmedStay = b'2', ArmedStayInstant = b'3', ArmedNight = b'4', ArmedNightInstant = b'5', ArmedVacation = b'6', } } byte_enum! { /// The arm up state for use in [`ArmingStatusReport`]. pub enum ArmUpState { NotReadyToArm = b'0', ReadyToArm = b'1', ReadyToForceArm = b'2', ArmedWithExitTimer = b'3', ArmedFully = b'4', ForceArmedWithForceArmZoneViolated = b'5', ArmedWithBypass = b'6', } } byte_enum! { /// The alarm state for use in [`ArmingStatusReport`]. pub enum AlarmState { NoAlarmActive = b'0', EntranceDelayActive = b'1', AlarmAbortDelayActive = b'2', FireAlarm = b'3', MedicalAlarm = b'4', PoliceAlarm = b'5', BurglarAlarm = b'6', Aux1Alarm = b'7', Aux2Alarm = b'8', Aux3Alarm = b'9', Aux4Alarm = b':', CarbonMonoxideAlarm = b';', EmergencyAlarm = b'<', FreezeAlarm = b'=', GasAlarm = b'>', HeatAlarm = b'?', WaterAlarm = b'@', FireSupervisory = b'A', VerifyFire = b'B', } } impl AlarmState { #[inline] pub fn is_firing(self) -> bool { self as u8 > AlarmState::FireAlarm as u8 } } impl ArmingStatus { fn has_entry_delay(self) -> bool { use ArmingStatus::*; matches!(self, ArmedAway | ArmedStay | ArmedNight | ArmedVacation) } } impl ArmingStatusReport { fn from_ascii_data(data: &[u8]) -> Result<Self, String> { if data.len() < 26 { return Err(format!("expected at least {} data", 26)); } let mut arming_status = [ArmingStatus::Disarmed; NUM_AREAS]; let mut up_state = [ArmUpState::ReadyToArm; NUM_AREAS]; let mut alarm_state = [AlarmState::NoAlarmActive; NUM_AREAS]; for i in 0..8 { arming_status[i] = ArmingStatus::try_from(data[i])?; up_state[i] = ArmUpState::try_from(data[NUM_AREAS + i])?; alarm_state[i] = AlarmState::try_from(data[2 * NUM_AREAS + i])?; } let first_exit_time = AsciiPacket::dehex_byte(data[24], data[25]).map_err(|()| "bad first_exit_time")?; Ok(ArmingStatusReport { arming_status, up_state, alarm_state, first_exit_time, }) } pub fn to_ascii(&self) -> AsciiPacket { let msg: Vec<_> = b"AS" .iter() .copied() .chain(self.arming_status.iter().map(|&v| v as u8)) .chain(self.up_state.iter().map(|&v| v as u8)) .chain(self.alarm_state.iter().map(|&v| v as u8)) .chain(AsciiPacket::hex_byte(self.first_exit_time).iter().copied()) .collect(); AsciiPacket::try_from(msg).expect("ArmingStatusResponse valid ascii") } pub fn is_response_to(&self, request: &Message) -> bool { matches!(request, Message::ArmingStatusRequest(_)) } /// Checks if a `from`->`to` transition is likely to be spurious. /// /// See [this thread](https://www.elkproducts.com/forums/topic/spurious-armed-fully-message/). pub fn is_transition_suspicious(from: &ArmingStatusReport, to: &ArmingStatusReport) -> bool { for ((f_s, t_s), t_u) in from .arming_status .iter() .zip(to.arming_status.iter()) .zip(to.up_state.iter()) { if *f_s == ArmingStatus::Disarmed && t_s.has_entry_delay() && *t_u == ArmUpState::ArmedFully { return true; } } false } } num_enum! { /// Type of object to describe in a [`StringDescriptionRequest`]. pub enum TextDescriptionType { Zone = 0, Area = 1, User = 2, Keypad = 3, Output = 4, Task = 5, Telephone = 6, Light = 7, AlarmDuration = 8, CustomSettings = 9, Counter = 10, Thermostat = 11, FunctionKey1 = 12, FunctionKey2 = 13, FunctionKey3 = 14, FunctionKey4 = 15, FunctionKey5 = 16, FunctionKey6 = 17, AudioZone = 18, AudioSource = 19, } } impl SendCode { fn from_ascii_data(data: &[u8]) -> Result<Self, String> { if data.len() < 19 { return Err(format!("expected at least 19 bytes, got {}", data.len())); } let mut code = [0u8; 6]; for i in 0..6 { code[i] = AsciiPacket::dehex_byte(data[2 * i], data[2 * i + 1]) .map_err(|()| "invalid hex code")?; } let user = parse_u8_dec("user", &data[12..15])?; let keypad = Keypad::try_from(parse_u8_dec("keypad", &data[15..17])?)?; Ok(SendCode { code, user, keypad }) } fn is_response_to(&self, request: &Message) -> bool { // We could narrow it down further by eliminating invalid code responses with a different // code, but this is probably pointless. matches!(request, Message::ArmRequest(_)) } fn to_ascii(&self) -> AsciiPacket { let trailer = format!("{:03}{:02}00", self.user, self.keypad); let msg: Vec<u8> = [b'I', b'C'] .iter() .copied() .chain(self.code.iter().copied().flat_map(AsciiPacket::hex_byte)) .chain(trailer.as_bytes().iter().copied()) .collect(); AsciiPacket::try_from(msg).expect("SendCode valid") } } impl RtcRequest { fn from_ascii_data(_data: &[u8]) -> Result<Self, String> { Ok(RtcRequest {}) } fn to_ascii(&self) -> AsciiPacket { AsciiPacket::try_from("rr00").expect("RtcResponse valid") } fn is_response_to(&self, _request: &Message) -> bool { false } } impl RtcResponse { fn from_ascii_data(data: &[u8]) -> Result<Self, String> { Ok(RtcResponse { rtc_data: RtcData::from_ascii(data)?, }) } fn to_ascii(&self) -> AsciiPacket { let msg: Vec<u8> = [b'R', b'R'] .iter() .copied() .chain(self.rtc_data.to_ascii()) .collect(); AsciiPacket::try_from(msg).expect("RtcResponse valid") } fn is_response_to(&self, request: &Message) -> bool { matches!(request, Message::RtcRequest(_)) } } impl StringDescriptionRequest { fn from_ascii_data(data: &[u8]) -> Result<Self, String> { if data.len() < 5 { return Err(format!("expected at least 5 bytes, got {}", data.len())); } let ty = TextDescriptionType::try_from(parse_u8_dec("type", &data[0..2])?)?; let num = parse_u8_dec("num", &data[2..5])?; Ok(StringDescriptionRequest { ty, num }) } fn is_response_to(&self, _request: &Message) -> bool { false } fn to_ascii(&self) -> AsciiPacket { let msg = format!("sd{:02}{:03}00", self.ty as u8, self.num); AsciiPacket::try_from(msg).expect("StringDescriptionRequest valid") } } /// A 16-byte printable ASCII description, with spaces used as trailing padding. #[derive(Copy, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize), serde(transparent))] pub struct TextDescription([u8; 16]); impl TextDescription { pub const EMPTY: TextDescription = TextDescription(*b" "); /// Uses up to 16 bytes of `text`, which must be ASCII printable characters. pub fn new(text: &str) -> Result<Self, String> { AsciiPacket::check_printable(text.as_bytes())?; let mut this = Self::default(); for (b_in, b_out) in text.as_bytes().iter().zip(this.0.iter_mut()) { *b_out = *b_in; } Ok(this) } fn as_padded_str(&self) -> &str { std::str::from_utf8(&self.0[..]).unwrap() } pub fn as_str(&self) -> &str { self.as_padded_str().trim_end_matches(' ') } pub fn is_empty(&self) -> bool { self.0[0] == b' ' } } #[derive(Copy, Clone, Eq, PartialEq)] pub struct TextDescriptions<const N: usize>(pub [TextDescription; N]); impl<const N: usize> TextDescriptions<N> { pub const ALL_EMPTY: TextDescriptions<N> = TextDescriptions([TextDescription::EMPTY; N]); } impl<const N: usize> std::ops::Index<usize> for TextDescriptions<N> { type Output = TextDescription; fn index(&self, index: usize) -> &Self::Output { &self.0[index] } } impl<const N: usize> std::fmt::Debug for TextDescriptions<N> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_map() .entries(self.0.iter().enumerate().filter_map(|(i, d)| { if d.is_empty() { return None; } Some((i + 1, d)) })) .finish() } } #[cfg(feature = "arbitrary")] impl Arbitrary<'_> for TextDescription { fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> { let mut buf = [0u8; 16]; u.fill_buffer(&mut buf)?; AsciiPacket::check_printable(&buf).map_err(|_| arbitrary::Error::IncorrectFormat)?; Ok(TextDescription(buf)) } fn size_hint(_depth: usize) -> (usize, Option<usize>) { (16, Some(16)) } } impl Default for TextDescription { fn default() -> Self { TextDescription([b' '; 16]) } } impl std::fmt::Debug for TextDescription { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(&**self, f) } } impl std::fmt::Display for TextDescription { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Display::fmt(&**self, f) } } impl std::ops::Deref for TextDescription { type Target = str; #[inline] fn deref(&self) -> &Self::Target { self.as_str() } } impl std::cmp::PartialEq<str> for TextDescription { fn eq(&self, other: &str) -> bool { self.as_str() == other } } impl StringDescriptionResponse { fn from_ascii_data(data: &[u8]) -> Result<Self, String> { if data.len() < 21 { return Err(format!("expected at least 5 bytes, got {}", data.len())); } let ty = TextDescriptionType::try_from(parse_u8_dec("type", &data[0..2])?)?; let num = parse_u8_dec("num", &data[2..5])?; let text = TextDescription(data[5..21].try_into().expect("slice->array")); Ok(StringDescriptionResponse { ty, num, text }) } fn to_ascii(&self) -> AsciiPacket { let msg = format!( "SD{:02}{:03}{}00", self.ty as u8, self.num, self.text.as_padded_str() ); AsciiPacket::try_from(msg).expect("StringDescriptionResponse valid") } fn is_response_to(&self, request: &Message) -> bool { matches!( request, Message::StringDescriptionRequest(StringDescriptionRequest { ty, num }) // > If the first character in a requested name is a “space” or // > less, then the next names are searched until a name is found // > whose first character is greater than “space” or the “Show On // > Keypad” bit is set. If no valid names are found, a “000” for // > the NNN address is returned. This speeds up the loading of // > names so that invalid names are not returned. M1 version 2.4.6 // or later. if self.ty == *ty && (self.num >= *num || self.num == 0) ) } } impl ActivateTask { fn from_ascii_data(data: &[u8]) -> Result<Self, String> { if data.len() < 3 { return Err(format!("expected at least 3 bytes, got {}", data.len())); } let task = Task::try_from(parse_u8_dec("task", &data[..3])?)?; Ok(ActivateTask { task }) } fn to_ascii(&self) -> AsciiPacket { let msg = format!("tn{:03}00", self.task.0); AsciiPacket::try_from(msg).expect("Task valid") } fn is_response_to(&self, _request: &Message) -> bool { false } } impl TaskChange { fn from_ascii_data(data: &[u8]) -> Result<Self, String> { if data.len() < 3 { return Err(format!("expected at least 3 bytes, got {}", data.len())); } let task = Task::try_from(parse_u8_dec("task", &data[..3])?)?; Ok(TaskChange { task }) } fn to_ascii(&self) -> AsciiPacket { let msg = format!("TC{:03}000", self.task.0); AsciiPacket::try_from(msg).expect("Task valid") } fn is_response_to(&self, request: &Message) -> bool { matches!(request, Message::ActivateTask(t) if t.task == self.task) } } impl Heartbeat { fn from_ascii_data(data: &[u8]) -> Result<Self, String> { Ok(Heartbeat { rtc_data: RtcData::from_ascii(data)?, }) } fn to_ascii(&self) -> AsciiPacket { let msg: Vec<u8> = [b'X', b'K'] .iter() .copied() .chain(self.rtc_data.to_ascii()) .collect(); AsciiPacket::try_from(msg).expect("Heartbeat valid") } fn is_response_to(&self, _request: &Message) -> bool { false } } #[cfg(test)] mod tests { use super::*; /*#[test] fn valid_as_report_without_timer() { }*/ #[test] fn valid_as_report_with_timer() { let mut expected = ArmingStatusReport { arming_status: [ArmingStatus::Disarmed; NUM_AREAS], up_state: [ArmUpState::ReadyToArm; NUM_AREAS], alarm_state: [AlarmState::NoAlarmActive; NUM_AREAS], first_exit_time: 59, }; expected.arming_status[0] = ArmingStatus::ArmedStay; expected.up_state[0] = ArmUpState::ArmedWithExitTimer; let pkt = Packet::Ascii(AsciiPacket::try_from("AS2000000031111111000000003B").unwrap()); let msg = Message::parse(&pkt).unwrap().unwrap(); assert_eq!(msg, Message::ArmingStatusReport(expected)); assert_eq!(msg.to_pkt(), pkt); } #[test] fn valid_old_ee_report() { let pkt = Packet::Ascii(AsciiPacket::try_from("EE1103000000").unwrap()); let msg = Message::parse(&pkt).unwrap().unwrap(); assert_eq!( msg, Message::SendTimeData(SendTimeData { area: Area::try_from(1).unwrap(), ty: TimeDataType::Entry, timer1: 30, timer2: 0, armed_state: None, }) ); assert_eq!(msg.to_pkt(), pkt); } #[test] fn valid_new_ee_report() { let pkt = Packet::Ascii(AsciiPacket::try_from("EE11030000100").unwrap()); let msg = Message::parse(&pkt).unwrap().unwrap(); assert_eq!( msg, Message::SendTimeData(SendTimeData { area: Area::try_from(1).unwrap(), ty: TimeDataType::Entry, timer1: 30, timer2: 0, armed_state: Some(ArmedState::ArmedAway), }) ); assert_eq!(msg.to_pkt(), pkt); } #[test] fn valid_new_ic_report() { let pkt = Packet::Ascii(AsciiPacket::try_from("IC0000010203040000100").unwrap()); let msg = Message::parse(&pkt).unwrap().unwrap(); assert_eq!( msg, Message::SendCode(SendCode { code: [0, 0, 1, 2, 3, 4], user: 0, keypad: Keypad::try_from(1).unwrap(), }) ); assert_eq!(msg.to_pkt(), pkt); } #[test] fn valid_sd_req() { let pkt = Packet::Ascii(AsciiPacket::try_from("sd0100100").unwrap()); let msg = Message::parse(&pkt).unwrap().unwrap(); assert_eq!( msg, Message::StringDescriptionRequest(StringDescriptionRequest { ty: TextDescriptionType::Area, num: 1, }) ); assert_eq!(msg.to_pkt(), pkt); } #[test] fn valid_sd_report() { let pkt = Packet::Ascii(AsciiPacket::try_from("SD05001Garage Door 00").unwrap()); let msg = Message::parse(&pkt).unwrap().unwrap(); assert_eq!( msg, Message::StringDescriptionResponse(StringDescriptionResponse { ty: TextDescriptionType::Task, num: 1, text: TextDescription::new("Garage Door").unwrap(), }) ); assert_eq!(msg.to_pkt(), pkt); } #[test] fn valid_zc_report() { let pkt = Packet::Ascii(AsciiPacket::try_from("ZC016900").unwrap()); let msg = Message::parse(&pkt).unwrap().unwrap(); assert_eq!( msg, Message::ZoneChange(ZoneChange { zone: Zone::try_from(16).unwrap(), status: ZoneStatus::new(ZoneLogicalStatus::Violated, ZonePhysicalStatus::Open), }) ); assert_eq!(msg.to_pkt(), pkt); } #[test] fn suspicious() { let disarmed = ArmingStatusReport { arming_status: [ArmingStatus::Disarmed; NUM_AREAS], up_state: [ArmUpState::ReadyToArm; NUM_AREAS], alarm_state: [AlarmState::NoAlarmActive; NUM_AREAS], first_exit_time: 0, }; let mut arming = disarmed; arming.arming_status[0] = ArmingStatus::ArmedStay; arming.up_state[0] = ArmUpState::ArmedWithExitTimer; let mut armed = disarmed; armed.arming_status[0] = ArmingStatus::ArmedStay; armed.up_state[0] = ArmUpState::ArmedFully; assert!(!ArmingStatusReport::is_transition_suspicious( &disarmed, &arming )); assert!(ArmingStatusReport::is_transition_suspicious( &disarmed, &armed )); assert!(!ArmingStatusReport::is_transition_suspicious( &arming, &armed )); assert!(!ArmingStatusReport::is_transition_suspicious( &armed, &disarmed )); } #[test] fn rtc() { const ENCODED: &[u8; 16] = b"0059107251205110"; let parsed = RtcData::from_ascii(ENCODED).unwrap(); assert_eq!( parsed, RtcData { datetime: DateTime::from_iso_8601("2005-12-25T10:59:00").unwrap(), weekday: Weekday::Sat, dst: true, clock_display: ClockDisplayMode::TwelveHour, date_display: DateDisplayMode::MonthFirst, }, ); let reencoded: Vec<u8> = parsed.to_ascii().collect(); assert_eq!(&ENCODED[..], &reencoded[..]); } }
31.201216
99
0.535128
9ce6ca7b37119fd0dc83537d38c13193540d2eaa
2,157
/* * Copyright 2019 The Starlark in Rust Authors. * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! Lowest level stream communication as JSON. //! Because DAP debugging is hard, we write everything we see to stdout (for the protocol) //! AND stderr (for debugging). use serde_json::Value; use std::{ env, fs::{File, OpenOptions}, io::{self, Read, Write}, path::PathBuf, }; // Debugging anything through DAP is a nightmare, because VS Code doesn't surface any logs. // Therefore, do the hacky thing of putting logs next to the binary. fn log_file() -> PathBuf { let mut res = env::current_exe().unwrap(); res.set_extension("dap.log"); res } pub(crate) fn log_begin() { File::create(log_file()).unwrap(); } pub(crate) fn log(x: &str) { let mut file = OpenOptions::new().append(true).open(log_file()).unwrap(); file.write_all(format!("{}\n", x).as_bytes()).unwrap() } pub(crate) fn send(x: Value) { let s = x.to_string(); log(&format!("SEND: {}", s)); print!("Content-Length: {}\r\n\r\n{}", s.len(), s); io::stdout().flush().unwrap() } pub(crate) fn read() -> Value { let mut s = String::new(); io::stdin().read_line(&mut s).unwrap(); let len: usize = s .strip_prefix("Content-Length: ") .unwrap() .trim() .parse() .unwrap(); io::stdin().read_line(&mut s).unwrap(); let mut res = vec![0u8; len]; io::stdin().lock().read_exact(&mut res).unwrap(); let s = String::from_utf8_lossy(&res); log(&format!("RECV: {}", s)); serde_json::from_str(&s).unwrap() }
30.814286
91
0.642559
e4355c7e02f35ab6435a5e410aa7f8506b457c92
665
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // xfail-test // error-pattern: ran out of stack struct R { b: int, } impl R : Drop { fn finalize(&self) { let _y = R { b: self.b }; } } fn main() { let _x = R { b: 0 }; }
25.576923
68
0.664662
0aaa44d539b835eadeadb8abfed59134f01e4a18
4,537
// Zinc, the bare metal stack for rust. // Copyright 2014 Vladimir "farcaller" Pouzanov <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use syntax::ast; use syntax::codemap::DUMMY_SP; use syntax::ext::base::ExtCtxt; use syntax::ext::build::AstBuilder; use syntax::parse::token::{InternedString, intern_and_get_ident}; use syntax::ptr::P; use std::hash::{hash, Hash, SipHasher}; static TAG: &'static str = "__zinc_task_ty_params"; pub trait ToTyHash { fn to_tyhash(&self) -> String; } impl ToTyHash for String { fn to_tyhash(&self) -> String { let h: u64 = hash::<_, SipHasher>(&self); format!("Ty{:X}", h) } } /// Sets ty_params for named task. /// /// Arguments: /// task: task name. This function must be called exactly once per task /// args: a vector of type parameters pub fn set_ty_params_for_task(cx: &mut ExtCtxt, task: &str, args: Vec<String>) { let ty_params = args.iter().map(|arg| { cx.meta_word(DUMMY_SP, intern_and_get_ident(arg.as_slice())) }).collect(); let newmi = cx.meta_list(DUMMY_SP, intern_and_get_ident(task), ty_params); let mut tasks = get_tasks(cx); tasks.push(newmi); set_tasks(cx, tasks); } /// Returns a vector of type parameters for task. pub fn get_ty_params_for_task(cx: &ExtCtxt, task: &str) -> Vec<String> { get_task(&get_tasks(cx), task) } /// Inserts or replaces tasks vector fn set_tasks(cx: &mut ExtCtxt, tasks: Vec<P<ast::MetaItem>>) { let mut vec_clone = cx.cfg(); let maybe_pos = vec_clone.iter().position(|i| { match i.node { ast::MetaList(ref k, _) if k.get() == TAG => true, _ => false, } }); if maybe_pos.is_some() { vec_clone.remove(maybe_pos.unwrap()); } vec_clone.push(cx.meta_list(DUMMY_SP, InternedString::new(TAG), tasks)); cx.cfg = vec_clone; } /// Returns a vector of MetaLists where each MetaList corresponds to one task. fn get_tasks(cx: &ExtCtxt) -> Vec<P<ast::MetaItem>> { for i in cx.cfg.iter() { match i.node { ast::MetaList(ref k, ref v) if k.get() == TAG => return v.clone(), _ => (), } }; vec!() } /// Returns a vector of type parameters for named task. fn get_task(tasks: &Vec<P<ast::MetaItem>>, task: &str) -> Vec<String> { let mut ty_params = vec!(); for mi in tasks.iter() { match mi.node { ast::MetaList(ref k, ref v) if k.get() == task => { for submi in v.iter() { match submi.node { ast::MetaWord(ref w) => ty_params.push(w.get().to_string()), _ => panic!("unexpected node type"), } } break; }, _ => (), } } ty_params } // pub fn get_ty_params_for_task(cx: &ExtCtxt, task: &str) -> Option<Vec<String>> { // get_task(cx, task).and_then(|ma| Some(ma.extra_ty_params.clone())) // } // fn get_task(cx: &ExtCtxt, task: &str) -> Option<MetaArgs> { // get_args(cx).and_then(|args| { // for a in args.iter() { // if a.task_name.as_slice() == task { // return Some(a.clone()); // } // } // None // }) // } // fn get_args(cx: &ExtCtxt) -> Option<Vec<MetaArgs>> { // cx.cfg.iter().find(|i| { // match i.node { // ast::MetaList(ref k, _) => k.get() == TAG, // _ => false, // } // }).and_then(|i| match i.node { // ast::MetaList(_, ref v) => Some(meta_item_to_meta_args(v)), // _ => panic!(), // }) // } // fn meta_item_to_meta_args(mi: &Vec<P<ast::MetaItem>>) -> Vec<MetaArgs> { // let mut args = vec!(); // for i in mi.iter() { // match i.node { // ast::MetaWord(ref istr) => { // let s = istr.get(); // args.push(json::decode(s).unwrap()); // }, // _ => panic!(), // } // } // args // } // fn meta_args_to_meta_item(name: String, args: Vec<String>) -> P<ast::MetaItem> { // let ma = MetaArgs { // task_name: name, // extra_ty_params: args, // }; // let enc = json::encode(&ma); // let istr = intern_and_get_ident(enc.as_slice()); // box(GC) respan(DUMMY_SP, ast::MetaWord(istr)) // }
28.35625
83
0.610095
7a6f2e25c398373e8590b77908a57602b536fba7
10,833
// Copyright 2022 TiKV Project Authors. Licensed under Apache-2.0. use cc::Build; use cmake::Config; use std::path::{Path, PathBuf}; use std::{env, str}; // On these platforms jemalloc-sys will use a prefixed jemalloc which cannot be linked together // with RocksDB. // See https://github.com/gnzlbg/jemallocator/blob/bfc89192971e026e6423d9ee5aaa02bc56585c58/jemalloc-sys/build.rs#L45 const NO_JEMALLOC_TARGETS: &[&str] = &["android", "dragonfly", "musl", "darwin"]; // Generate the bindings to rocksdb C-API. // Try to disable the generation of platform-related bindings. #[cfg(feature = "update-bindings")] fn bindgen_rocksdb(file_path: &Path) { println!("cargo:rerun-if-env-changed=TEST_BIND"); let gen_tests = env::var("TEST_BIND").map_or(false, |s| s == "1"); let mut builder = bindgen::Builder::default(); if env::var("CARGO_CFG_TARGET_OS").map_or(false, |s| s == "windows") { builder = builder.clang_arg("-D _WIN32_WINNT=0x600"); } let prefix = "pub enum rocksdb_"; let pre_defined: Vec<_> = std::fs::read_to_string("src/pre_defined.rs") .unwrap() .lines() .map(|l| { let mut new_string = String::new(); let mut visited = false; for part in (l[prefix.len()..]) .split_whitespace() .next() .unwrap() .split('_') { if visited { new_string.push('_'); } else { if !part.chars().next().unwrap().is_lowercase() { visited = true; } if !new_string.is_empty() { new_string.push_str("::"); } } new_string.push_str(part); } new_string }) .collect(); let filter_exp = format!("\\brocksdb::({})", pre_defined.join("|")); println!("filtering {}", filter_exp); let builder = builder .header("crocksdb/crocksdb/c.h") .header("rocksdb/include/rocksdb/statistics.h") .header("titan/include/titan/statistics.h") .clang_arg("-xc++") .clang_arg("-Irocksdb/include") .clang_arg("-Ititan/include") .clang_arg("-std=c++11") .clang_arg("-DOPENSSL") .rustfmt_bindings(true) .allowlist_function(r"\bcrocksdb_.*") .allowlist_type(r"\bcrocksdb_.*") .allowlist_var(r"\bcrocksdb_.*") .allowlist_function(r"\bctitandb_.*") .allowlist_type(r"\bctitandb_.*") .allowlist_type(r"\brocksdb::Tickers") .allowlist_type(r"\brocksdb::Histograms") .allowlist_type(r"\brocksdb::titandb::TickerType") .allowlist_type(r"\brocksdb::titandb::HistogramType") .opaque_type(r"\brocksdb::Env") // Just blocking the type will still include its dependencies. .opaque_type(&filter_exp) // Block all system headers .blocklist_file(r"^/.*") .blocklist_item(r"\brocksdb::DB_Properties.*") .blocklist_type(r"\brocksdb::Env_FileAttributes") // `TableProperties` has different size on different platform. .blocklist_type(&filter_exp) .with_codegen_config( bindgen::CodegenConfig::FUNCTIONS | bindgen::CodegenConfig::VARS | bindgen::CodegenConfig::TYPES, ) .no_copy(r"\b(crocksdb|rocksdb|ctitandb)_.*$") .size_t_is_usize(true) .disable_header_comment() .ctypes_prefix("libc") .layout_tests(gen_tests) .default_enum_style(bindgen::EnumVariation::Rust { non_exhaustive: false, }); println!("running {}", builder.command_line_flags().join(" ")); let bindings = builder .generate() .expect("unable to generate rocksdb bindings"); bindings .write_to_file(file_path) .expect("unable to write rocksdb bindings"); } /// Determine if need to update bindings. When `update-bindings` feature /// is enabled, it will regenerate the bindings. fn config_binding_path() { // Cargo treats nonexistent files changed, so we only emit the rerun-if-changed // directive when we expect the target-specific pre-generated binding file to be // present. println!("cargo:rerun-if-changed=bindings/bindings.rs"); let file_path = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()).join("bindings/bindings.rs"); #[cfg(feature = "update-bindings")] bindgen_rocksdb(&file_path); println!( "cargo:rustc-env=BINDING_PATH={}", file_path.to_str().unwrap() ); } fn link_cpp(build: &mut Build) { let tool = build.get_compiler(); let stdlib = if tool.is_like_gnu() { "libstdc++.a" } else if tool.is_like_clang() { "libc++.a" } else { // Don't link to c++ statically on windows. return; }; let output = tool .to_command() .arg("--print-file-name") .arg(stdlib) .output() .unwrap(); if !output.status.success() || output.stdout.is_empty() { // fallback to dynamically return; } let path = match str::from_utf8(&output.stdout) { Ok(path) => PathBuf::from(path), Err(_) => return, }; if !path.is_absolute() { return; } // remove lib prefix and .a postfix. let libname = &stdlib[3..stdlib.len() - 2]; // optional static linking if cfg!(feature = "static-libcpp") { println!("cargo:rustc-link-lib=static={}", &libname); } else { println!("cargo:rustc-link-lib=dylib={}", &libname); } println!( "cargo:rustc-link-search=native={}", path.parent().unwrap().display() ); build.cpp_link_stdlib(None); } fn patch_libz_env() { // cmake script expect libz.a being under ${DEP_Z_ROOT}/lib, but libz-sys crate put it // under ${DEP_Z_ROOT}/build. Append the path to CMAKE_PREFIX_PATH to get around it. let zlib_root = env::var("DEP_Z_ROOT").unwrap(); let prefix_path = if let Ok(prefix_path) = env::var("CMAKE_PREFIX_PATH") { format!("{};{}/build", prefix_path, zlib_root) } else { format!("{}/build", zlib_root) }; // To avoid linking system library, set lib path explicitly. println!("cargo:rustc-link-search=native={}/build", zlib_root); println!("cargo:rustc-link-search=native={}/lib", zlib_root); env::set_var("CMAKE_PREFIX_PATH", prefix_path); } fn configure_common_rocksdb_args(cfg: &mut Config, name: &str) { let out_dir = format!("{}/{}", env::var("OUT_DIR").unwrap(), name); std::fs::create_dir_all(&out_dir).unwrap(); cfg.out_dir(out_dir); if cfg!(feature = "portable") { cfg.define("PORTABLE", "ON"); } if cfg!(feature = "sse") { cfg.define("FORCE_SSE42", "ON"); } cfg.define("WITH_GFLAGS", "OFF") .register_dep("Z") .define("WITH_ZLIB", "ON") .register_dep("BZIP2") .define("WITH_BZ2", "ON") .register_dep("LZ4") .define("WITH_LZ4", "ON") .register_dep("ZSTD") .define("WITH_ZSTD", "ON") .register_dep("SNAPPY") .define("WITH_SNAPPY", "ON") .configure_arg("-Wno-dev"); } fn figure_link_lib(dst: &Path, name: &str) { println!("cargo:rerun-if-changed={}", name); if cfg!(target_os = "windows") { let profile = match &*env::var("PROFILE").unwrap_or_else(|_| "debug".to_owned()) { "bench" | "release" => "Release", _ => "Debug", }; println!( "cargo:rustc-link-search=native={}/build/{}", dst.display(), profile ); } else { println!("cargo:rustc-link-search=native={}/build", dst.display()); } println!("cargo:rustc-link-lib=static={}", name); } fn build_titan(build: &mut Build) { let cur_dir = std::env::current_dir().unwrap(); let mut cfg = cmake::Config::new("titan"); configure_common_rocksdb_args(&mut cfg, "titan"); let dst = cfg .define("ROCKSDB_DIR", cur_dir.join("rocksdb")) .define("WITH_TITAN_TESTS", "OFF") .define("WITH_TITAN_TOOLS", "OFF") .build_target("titan") .very_verbose(true) .build(); figure_link_lib(&dst, "titan"); build.include(cur_dir.join("titan").join("include")); build.include(cur_dir.join("titan")); } fn build_rocksdb(build: &mut Build) { let target = env::var("TARGET").expect("TARGET was not set"); let mut cfg = Config::new("rocksdb"); cfg.out_dir(format!("{}/rocksdb", env::var("OUT_DIR").unwrap())); if cfg!(feature = "encryption") { cfg.register_dep("OPENSSL").define("WITH_OPENSSL", "ON"); println!("cargo:rustc-link-lib=static=crypto"); } if cfg!(feature = "jemalloc") && NO_JEMALLOC_TARGETS.iter().all(|i| !target.contains(i)) { cfg.register_dep("JEMALLOC").define("WITH_JEMALLOC", "ON"); println!("cargo:rustc-link-lib=static=jemalloc"); } configure_common_rocksdb_args(&mut cfg, "rocksdb"); let dst = cfg .define("WITH_TESTS", "OFF") .define("WITH_TOOLS", "OFF") .build_target("rocksdb") .very_verbose(true) .build(); figure_link_lib(&dst, "rocksdb"); if cfg!(target_os = "windows") { build.define("OS_WIN", None); } else { build.define("ROCKSDB_PLATFORM_POSIX", None); } if cfg!(target_os = "macos") { build.define("OS_MACOSX", None); } else if cfg!(target_os = "freebsd") { build.define("OS_FREEBSD", None); } config_binding_path(); let cur_dir = env::current_dir().unwrap(); build.include(cur_dir.join("rocksdb").join("include")); build.include(cur_dir.join("rocksdb")); // Adding rocksdb specific compile macros. // TODO: should make sure crocksdb compile options is the same as rocksdb and titan. build.define("ROCKSDB_SUPPORT_THREAD_LOCAL", None); if cfg!(feature = "encryption") { build.define("OPENSSL", None); } println!("cargo:rustc-link-lib=static=z"); println!("cargo:rustc-link-lib=static=bz2"); println!("cargo:rustc-link-lib=static=lz4"); println!("cargo:rustc-link-lib=static=zstd"); println!("cargo:rustc-link-lib=static=snappy"); } fn main() { patch_libz_env(); let mut build = Build::new(); build_titan(&mut build); build_rocksdb(&mut build); println!("cargo:rerun-if-changed=crocksdb/crocksdb/c.h"); println!("cargo:rerun-if-changed=crocksdb/c.cc"); build.cpp(true).file("crocksdb/c.cc"); if !cfg!(target_os = "windows") { build.flag("-std=c++11"); build.flag("-fno-rtti"); } link_cpp(&mut build); build.warnings(false).compile("libcrocksdb.a"); }
35.058252
117
0.592541
87597d640ea223959b1c39c796c9d8769c609f18
639
/* * * * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) * * The version of the OpenAPI document: 1.0.0 * * Generated by: https://openapi-generator.tech */ /// LogSeverityLevels : Allowable severity levels for log events. /// Allowable severity levels for log events. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum LogSeverityLevels { #[serde(rename = "Okay")] Okay, #[serde(rename = "Warning")] Warning, #[serde(rename = "Error")] Error, #[serde(rename = "Always")] Always, }
21.3
109
0.666667
acdea52c55e4882d9940870460d54b3e9bcf16e1
5,644
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // use std::sync::Arc; use azure_core_mirror::HttpClient; use azure_storage_mirror::clients::StorageAccountClient; use azure_storage_mirror::core::prelude::*; use common_exception::ErrorCode; use common_exception::Result; use futures::Stream; use futures::StreamExt; use crate::AzureBlobInputStream; use crate::Bytes; use crate::DataAccessor; use crate::InputStream; use crate::SeekableReader; pub struct AzureBlobAccessor { client: Arc<StorageClient>, container: String, } impl AzureBlobAccessor { /// Create a azure blob accessor instance without credentials. /// The code will use env variable "STORAGE_ACCOUNT" and "STORAGE_MASTER_KEY". #[allow(dead_code)] pub fn try_create(account: impl Into<String>, container: impl Into<String>) -> Result<Self> { let master_key_res = std::env::var("STORAGE_MASTER_KEY"); if let Err(e) = master_key_res { return Err(ErrorCode::SecretKeyNotSet(format!( "Secret key not found for azure blob client, {}", e.to_string() ))); } let master_key = master_key_res.unwrap(); let http_client: Arc<Box<dyn HttpClient>> = Arc::new(Box::new(reqwest::Client::new())); let client = StorageAccountClient::new_access_key(http_client, account, &master_key); Ok(Self { client: client.as_storage_client(), container: container.into(), }) } pub fn with_credentials( account: impl Into<String>, container: impl Into<String>, master_key: impl Into<String>, ) -> Self { let http_client: Arc<Box<dyn HttpClient>> = Arc::new(Box::new(reqwest::Client::new())); let client = StorageAccountClient::new_access_key(http_client, account, master_key); Self { client: client.as_storage_client(), container: container.into(), } } async fn put_blob(&self, blob_name: &str, body: Vec<u8>) -> common_exception::Result<()> { let blob = self .client .as_container_client(&self.container) .as_blob_client(blob_name); let response_opt = blob.put_block_blob(body).execute().await; match response_opt { Err(e) => { return Err(ErrorCode::DALTransportError(format!( "Failed on azure blob put operation, {}", e.to_string() ))) } Ok(_) => Ok(()), } } pub fn get_stream(&self, path: impl Into<String>) -> AzureBlobInputStream { let blob_client = self .client .clone() .as_container_client(&self.container) .as_blob_client(path); AzureBlobInputStream::create(blob_client) } } #[async_trait::async_trait] impl DataAccessor for AzureBlobAccessor { fn get_reader( &self, _path: &str, _stream_len: Option<u64>, ) -> common_exception::Result<Box<dyn SeekableReader>> { todo!() } fn get_input_stream( &self, path: &str, _stream_len: Option<u64>, ) -> common_exception::Result<InputStream> { let blob_client = self .client .clone() .as_container_client(&self.container) .as_blob_client(path); Ok(Box::new(AzureBlobInputStream::create(blob_client))) } /// Get blob as Bytes from Azure blob /// /// * `blob` - the blob name is corresponding to the BlobName in the example url 'https://myaccount.blob.core.windows.net/mycontainer/BlobName' async fn get(&self, blob: &str) -> common_exception::Result<Bytes> { let blob = self .client .as_container_client(&self.container) .as_blob_client(blob); let retrieved_blob_opt = blob.get().execute().await; match retrieved_blob_opt { Err(e) => { return Err(ErrorCode::DALTransportError(format!( "Failed on azure blob get operation,, {}", e.to_string() ))); } Ok(blob_data) => Ok(blob_data.data), } } async fn put(&self, path: &str, content: Vec<u8>) -> common_exception::Result<()> { self.put_blob(path, content).await } async fn put_stream( &self, path: &str, input_stream: Box< dyn Stream<Item = std::result::Result<bytes::Bytes, std::io::Error>> + Send + Unpin + 'static, >, _stream_len: usize, ) -> common_exception::Result<()> { let mut data: Vec<u8> = vec![]; let mut s = Box::pin(input_stream); while let Some(bytes_res) = s.next().await { match bytes_res { Err(e) => return Err(ErrorCode::DALTransportError(e.to_string())), Ok(bytes) => data.append(&mut bytes.to_vec()), } } self.put_blob(path, data).await } }
32.436782
147
0.593373
09a234e0316d36891699669c45b3b58b65e27f21
3,036
use crate::changelog::fs_utils::{read_and_filter_dir, read_to_string_opt}; use crate::changelog::parsing_utils::trim_newlines; use crate::{ChangeSetSection, Config, Error, Result}; use log::debug; use std::fs; use std::path::{Path, PathBuf}; /// A set of changes, either associated with a release or not. #[derive(Debug, Clone)] pub struct ChangeSet { /// An optional high-level summary of the set of changes. pub maybe_summary: Option<String>, /// The sections making up the change set. pub sections: Vec<ChangeSetSection>, } impl ChangeSet { /// Returns true if this change set has no summary and no entries /// associated with it. pub fn is_empty(&self) -> bool { self.maybe_summary.as_ref().map_or(true, String::is_empty) && self.are_sections_empty() } /// Returns whether or not all the sections are empty. pub fn are_sections_empty(&self) -> bool { self.sections.iter().all(ChangeSetSection::is_empty) } /// Attempt to read a single change set from the given directory. pub fn read_from_dir<P>(config: &Config, path: P) -> Result<Self> where P: AsRef<Path>, { let path = path.as_ref(); debug!("Loading change set from {}", path.display()); let summary = read_to_string_opt(path.join(&config.change_sets.summary_filename))? .map(|s| trim_newlines(&s).to_owned()); let section_dirs = read_and_filter_dir(path, change_set_section_filter)?; let mut sections = section_dirs .into_iter() .map(|path| ChangeSetSection::read_from_dir(config, path)) .collect::<Result<Vec<ChangeSetSection>>>()?; // Sort sections alphabetically sections.sort_by(|a, b| a.title.cmp(&b.title)); Ok(Self { maybe_summary: summary, sections, }) } /// Attempt to read a single change set from the given directory, like /// [`ChangeSet::read_from_dir`], but return `Option::None` if the /// directory does not exist. pub fn read_from_dir_opt<P>(config: &Config, path: P) -> Result<Option<Self>> where P: AsRef<Path>, { let path = path.as_ref(); // The path doesn't exist if fs::metadata(path).is_err() { return Ok(None); } Self::read_from_dir(config, path).map(Some) } pub fn render(&self, config: &Config) -> String { let mut paragraphs = Vec::new(); if let Some(summary) = self.maybe_summary.as_ref() { paragraphs.push(summary.clone()); } self.sections .iter() .filter(|s| !s.is_empty()) .for_each(|s| paragraphs.push(s.render(config))); paragraphs.join("\n\n") } } fn change_set_section_filter(e: fs::DirEntry) -> Option<Result<PathBuf>> { let meta = match e.metadata() { Ok(m) => m, Err(e) => return Some(Err(Error::Io(e))), }; if meta.is_dir() { Some(Ok(e.path())) } else { None } }
33.733333
95
0.608037
1ae4a366ce6e920d63c62a17626ca1c0fac5ba55
121,701
use crate::{ checks::*, cli::{ log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult, }, }; use bip39::{Language, Mnemonic, MnemonicType, Seed}; use clap::{App, AppSettings, Arg, ArgMatches, SubCommand}; use log::*; use solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig}; use solana_bpf_loader_program::{syscalls::register_syscalls, BpfError, ThisInstructionMeter}; use solana_clap_utils::{self, input_parsers::*, input_validators::*, keypair::*}; use solana_cli_output::{ display::new_spinner_progress_bar, CliProgram, CliProgramAccountType, CliProgramAuthority, CliProgramBuffer, CliProgramId, CliUpgradeableBuffer, CliUpgradeableBuffers, CliUpgradeableProgram, CliUpgradeableProgramClosed, CliUpgradeablePrograms, }; use solana_client::{ client_error::ClientErrorKind, rpc_client::RpcClient, rpc_config::RpcSendTransactionConfig, rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig}, rpc_filter::{Memcmp, MemcmpEncodedBytes, RpcFilterType}, rpc_request::MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, tpu_client::{TpuClient, TpuClientConfig}, }; use solana_rbpf::{ verifier, vm::{Config, Executable}, }; use solana_remote_wallet::remote_wallet::RemoteWalletManager; use solana_sdk::{ account::Account, account_utils::StateMut, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, commitment_config::CommitmentConfig, instruction::Instruction, instruction::InstructionError, loader_instruction, message::Message, native_token::Sol, process_instruction::MockInvokeContext, pubkey::Pubkey, signature::{keypair_from_seed, read_keypair_file, Keypair, Signer}, signers::Signers, system_instruction::{self, SystemError}, system_program, transaction::Transaction, transaction::TransactionError, }; use solana_transaction_status::TransactionConfirmationStatus; use std::{ collections::HashMap, error, fs::File, io::{Read, Write}, mem::size_of, path::PathBuf, str::FromStr, sync::Arc, thread::sleep, time::Duration, }; const DATA_CHUNK_SIZE: usize = 229; // Keep program chunks under PACKET_DATA_SIZE #[derive(Debug, PartialEq)] pub enum ProgramCliCommand { Deploy { program_location: Option<String>, program_signer_index: Option<SignerIndex>, program_pubkey: Option<Pubkey>, buffer_signer_index: Option<SignerIndex>, buffer_pubkey: Option<Pubkey>, upgrade_authority_signer_index: SignerIndex, is_final: bool, max_len: Option<usize>, allow_excessive_balance: bool, }, WriteBuffer { program_location: String, buffer_signer_index: Option<SignerIndex>, buffer_pubkey: Option<Pubkey>, buffer_authority_signer_index: Option<SignerIndex>, max_len: Option<usize>, }, SetBufferAuthority { buffer_pubkey: Pubkey, buffer_authority_index: Option<SignerIndex>, new_buffer_authority: Pubkey, }, SetUpgradeAuthority { program_pubkey: Pubkey, upgrade_authority_index: Option<SignerIndex>, new_upgrade_authority: Option<Pubkey>, }, Show { account_pubkey: Option<Pubkey>, authority_pubkey: Pubkey, get_programs: bool, get_buffers: bool, all: bool, use_lamports_unit: bool, }, Dump { account_pubkey: Option<Pubkey>, output_location: String, }, Close { account_pubkey: Option<Pubkey>, recipient_pubkey: Pubkey, authority_index: SignerIndex, use_lamports_unit: bool, }, } pub trait ProgramSubCommands { fn program_subcommands(self) -> Self; } impl ProgramSubCommands for App<'_, '_> { fn program_subcommands(self) -> Self { self.subcommand( SubCommand::with_name("program") .about("Program management") .setting(AppSettings::SubcommandRequiredElseHelp) .subcommand( SubCommand::with_name("deploy") .about("Deploy a program") .arg( Arg::with_name("program_location") .index(1) .value_name("PROGRAM_FILEPATH") .takes_value(true) .help("/path/to/program.so"), ) .arg( Arg::with_name("buffer") .long("buffer") .value_name("BUFFER_SIGNER") .takes_value(true) .validator(is_valid_signer) .help("Intermediate buffer account to write data to, which can be used to resume a failed deploy \ [default: random address]") ) .arg( Arg::with_name("upgrade_authority") .long("upgrade-authority") .value_name("UPGRADE_AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) .help("Upgrade authority [default: the default configured keypair]") ) .arg( pubkey!(Arg::with_name("program_id") .long("program-id") .value_name("PROGRAM_ID"), "Executable program's address, must be a keypair for initial deploys, can be a pubkey for upgrades \ [default: address of keypair at /path/to/program-keypair.json if present, otherwise a random address]"), ) .arg( Arg::with_name("final") .long("final") .help("The program will not be upgradeable") ) .arg( Arg::with_name("max_len") .long("max-len") .value_name("max_len") .takes_value(true) .required(false) .help("Maximum length of the upgradeable program \ [default: twice the length of the original deployed program]") ) .arg( Arg::with_name("allow_excessive_balance") .long("allow-excessive-deploy-account-balance") .takes_value(false) .help("Use the designated program id even if the account already holds a large balance of SOL") ), ) .subcommand( SubCommand::with_name("write-buffer") .about("Writes a program into a buffer account") .arg( Arg::with_name("program_location") .index(1) .value_name("PROGRAM_FILEPATH") .takes_value(true) .required(true) .help("/path/to/program.so"), ) .arg( Arg::with_name("buffer") .long("buffer") .value_name("BUFFER_SIGNER") .takes_value(true) .validator(is_valid_signer) .help("Buffer account to write data into [default: random address]") ) .arg( Arg::with_name("buffer_authority") .long("buffer-authority") .value_name("BUFFER_AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) .help("Buffer authority [default: the default configured keypair]") ) .arg( Arg::with_name("max_len") .long("max-len") .value_name("max_len") .takes_value(true) .required(false) .help("Maximum length of the upgradeable program \ [default: twice the length of the original deployed program]") ), ) .subcommand( SubCommand::with_name("set-buffer-authority") .about("Set a new buffer authority") .arg( Arg::with_name("buffer") .index(1) .value_name("BUFFER_PUBKEY") .takes_value(true) .required(true) .help("Public key of the buffer") ) .arg( Arg::with_name("buffer_authority") .long("buffer-authority") .value_name("BUFFER_AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) .help("Buffer authority [default: the default configured keypair]") ) .arg( pubkey!(Arg::with_name("new_buffer_authority") .long("new-buffer-authority") .value_name("NEW_BUFFER_AUTHORITY") .required(true), "Address of the new buffer authority"), ) ) .subcommand( SubCommand::with_name("set-upgrade-authority") .about("Set a new program authority") .arg( Arg::with_name("program_id") .index(1) .value_name("PROGRAM_ADDRESS") .takes_value(true) .required(true) .help("Address of the program to upgrade") ) .arg( Arg::with_name("upgrade_authority") .long("upgrade-authority") .value_name("UPGRADE_AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) .help("Upgrade authority [default: the default configured keypair]") ) .arg( pubkey!(Arg::with_name("new_upgrade_authority") .long("new-upgrade-authority") .required_unless("final") .value_name("NEW_UPGRADE_AUTHORITY"), "Address of the new upgrade authority"), ) .arg( Arg::with_name("final") .long("final") .conflicts_with("new_upgrade_authority") .help("The program will not be upgradeable") ) ) .subcommand( SubCommand::with_name("show") .about("Display information about a buffer or program") .arg( Arg::with_name("account") .index(1) .value_name("ACCOUNT_ADDRESS") .takes_value(true) .help("Address of the buffer or program to show") ) .arg( Arg::with_name("programs") .long("programs") .conflicts_with("account") .conflicts_with("buffers") .required_unless_one(&["account", "buffers"]) .help("Show every upgradeable program that matches the authority") ) .arg( Arg::with_name("buffers") .long("buffers") .conflicts_with("account") .conflicts_with("programs") .required_unless_one(&["account", "programs"]) .help("Show every upgradeable buffer that matches the authority") ) .arg( Arg::with_name("all") .long("all") .conflicts_with("account") .conflicts_with("buffer_authority") .help("Show accounts for all authorities") ) .arg( pubkey!(Arg::with_name("buffer_authority") .long("buffer-authority") .value_name("AUTHORITY") .conflicts_with("all"), "Authority [default: the default configured keypair]"), ) .arg( Arg::with_name("lamports") .long("lamports") .takes_value(false) .help("Display balance in lamports instead of SOL"), ), ) .subcommand( SubCommand::with_name("dump") .about("Write the program data to a file") .arg( Arg::with_name("account") .index(1) .value_name("ACCOUNT_ADDRESS") .takes_value(true) .required(true) .help("Address of the buffer or program") ) .arg( Arg::with_name("output_location") .index(2) .value_name("OUTPUT_FILEPATH") .takes_value(true) .required(true) .help("/path/to/program.so"), ), ) .subcommand( SubCommand::with_name("close") .about("Close a program or buffer account and withdraw all lamports") .arg( Arg::with_name("account") .index(1) .value_name("ACCOUNT_ADDRESS") .takes_value(true) .help("Address of the program or buffer account to close"), ) .arg( Arg::with_name("buffers") .long("buffers") .conflicts_with("account") .required_unless("account") .help("Close all buffer accounts that match the authority") ) .arg( Arg::with_name("authority") .long("authority") .alias("buffer-authority") .value_name("AUTHORITY_SIGNER") .takes_value(true) .validator(is_valid_signer) .help("Upgrade or buffer authority [default: the default configured keypair]") ) .arg( pubkey!(Arg::with_name("recipient_account") .long("recipient") .value_name("RECIPIENT_ADDRESS"), "Address of the account to deposit the closed account's lamports [default: the default configured keypair]"), ) .arg( Arg::with_name("lamports") .long("lamports") .takes_value(false) .help("Display balance in lamports instead of SOL"), ), ) ) .subcommand( SubCommand::with_name("deploy") .about("Deploy a program") .arg( Arg::with_name("program_location") .index(1) .value_name("PROGRAM_FILEPATH") .takes_value(true) .required(true) .help("/path/to/program.o"), ) .arg( Arg::with_name("address_signer") .index(2) .value_name("PROGRAM_ADDRESS_SIGNER") .takes_value(true) .validator(is_valid_signer) .help("The signer for the desired address of the program [default: new random address]") ) .arg( Arg::with_name("use_deprecated_loader") .long("use-deprecated-loader") .takes_value(false) .hidden(true) // Don't document this argument to discourage its use .help("Use the deprecated BPF loader") ) .arg( Arg::with_name("allow_excessive_balance") .long("allow-excessive-deploy-account-balance") .takes_value(false) .help("Use the designated program id, even if the account already holds a large balance of SOL") ), ) } } pub fn parse_program_subcommand( matches: &ArgMatches<'_>, default_signer: &DefaultSigner, wallet_manager: &mut Option<Arc<RemoteWalletManager>>, ) -> Result<CliCommandInfo, CliError> { let response = match matches.subcommand() { ("deploy", Some(matches)) => { let mut bulk_signers = vec![Some( default_signer.signer_from_path(matches, wallet_manager)?, )]; let program_location = matches .value_of("program_location") .map(|location| location.to_string()); let buffer_pubkey = if let Ok((buffer_signer, Some(buffer_pubkey))) = signer_of(matches, "buffer", wallet_manager) { bulk_signers.push(buffer_signer); Some(buffer_pubkey) } else { pubkey_of_signer(matches, "buffer", wallet_manager)? }; let program_pubkey = if let Ok((program_signer, Some(program_pubkey))) = signer_of(matches, "program_id", wallet_manager) { bulk_signers.push(program_signer); Some(program_pubkey) } else { pubkey_of_signer(matches, "program_id", wallet_manager)? }; let upgrade_authority_pubkey = if let Ok((upgrade_authority_signer, Some(upgrade_authority_pubkey))) = signer_of(matches, "upgrade_authority", wallet_manager) { bulk_signers.push(upgrade_authority_signer); Some(upgrade_authority_pubkey) } else { Some( default_signer .signer_from_path(matches, wallet_manager)? .pubkey(), ) }; let max_len = value_of(matches, "max_len"); let signer_info = default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location, program_signer_index: signer_info.index_of_or_none(program_pubkey), program_pubkey, buffer_signer_index: signer_info.index_of_or_none(buffer_pubkey), buffer_pubkey, upgrade_authority_signer_index: signer_info .index_of(upgrade_authority_pubkey) .unwrap(), is_final: matches.is_present("final"), max_len, allow_excessive_balance: matches.is_present("allow_excessive_balance"), }), signers: signer_info.signers, } } ("write-buffer", Some(matches)) => { let mut bulk_signers = vec![Some( default_signer.signer_from_path(matches, wallet_manager)?, )]; let buffer_pubkey = if let Ok((buffer_signer, Some(buffer_pubkey))) = signer_of(matches, "buffer", wallet_manager) { bulk_signers.push(buffer_signer); Some(buffer_pubkey) } else { pubkey_of_signer(matches, "buffer", wallet_manager)? }; let buffer_authority_pubkey = if let Ok((buffer_authority_signer, Some(buffer_authority_pubkey))) = signer_of(matches, "buffer_authority", wallet_manager) { bulk_signers.push(buffer_authority_signer); Some(buffer_authority_pubkey) } else { Some( default_signer .signer_from_path(matches, wallet_manager)? .pubkey(), ) }; let max_len = value_of(matches, "max_len"); let signer_info = default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: matches.value_of("program_location").unwrap().to_string(), buffer_signer_index: signer_info.index_of_or_none(buffer_pubkey), buffer_pubkey, buffer_authority_signer_index: signer_info .index_of_or_none(buffer_authority_pubkey), max_len, }), signers: signer_info.signers, } } ("set-buffer-authority", Some(matches)) => { let buffer_pubkey = pubkey_of(matches, "buffer").unwrap(); let (buffer_authority_signer, buffer_authority_pubkey) = signer_of(matches, "buffer_authority", wallet_manager)?; let new_buffer_authority = pubkey_of_signer(matches, "new_buffer_authority", wallet_manager)?.unwrap(); let signer_info = default_signer.generate_unique_signers( vec![ Some(default_signer.signer_from_path(matches, wallet_manager)?), buffer_authority_signer, ], matches, wallet_manager, )?; CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::SetBufferAuthority { buffer_pubkey, buffer_authority_index: signer_info.index_of(buffer_authority_pubkey), new_buffer_authority, }), signers: signer_info.signers, } } ("set-upgrade-authority", Some(matches)) => { let (upgrade_authority_signer, upgrade_authority_pubkey) = signer_of(matches, "upgrade_authority", wallet_manager)?; let program_pubkey = pubkey_of(matches, "program_id").unwrap(); let new_upgrade_authority = if matches.is_present("final") { None } else { pubkey_of_signer(matches, "new_upgrade_authority", wallet_manager)? }; let signer_info = default_signer.generate_unique_signers( vec![ Some(default_signer.signer_from_path(matches, wallet_manager)?), upgrade_authority_signer, ], matches, wallet_manager, )?; CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::SetUpgradeAuthority { program_pubkey, upgrade_authority_index: signer_info.index_of(upgrade_authority_pubkey), new_upgrade_authority, }), signers: signer_info.signers, } } ("show", Some(matches)) => { let authority_pubkey = if let Some(authority_pubkey) = pubkey_of_signer(matches, "buffer_authority", wallet_manager)? { authority_pubkey } else { default_signer .signer_from_path(matches, wallet_manager)? .pubkey() }; CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Show { account_pubkey: pubkey_of(matches, "account"), authority_pubkey, get_programs: matches.is_present("programs"), get_buffers: matches.is_present("buffers"), all: matches.is_present("all"), use_lamports_unit: matches.is_present("lamports"), }), signers: vec![], } } ("dump", Some(matches)) => CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Dump { account_pubkey: pubkey_of(matches, "account"), output_location: matches.value_of("output_location").unwrap().to_string(), }), signers: vec![], }, ("close", Some(matches)) => { let account_pubkey = if matches.is_present("buffers") { None } else { pubkey_of(matches, "account") }; let recipient_pubkey = if let Some(recipient_pubkey) = pubkey_of_signer(matches, "recipient_account", wallet_manager)? { recipient_pubkey } else { default_signer .signer_from_path(matches, wallet_manager)? .pubkey() }; let (authority_signer, authority_pubkey) = signer_of(matches, "authority", wallet_manager)?; let signer_info = default_signer.generate_unique_signers( vec![ Some(default_signer.signer_from_path(matches, wallet_manager)?), authority_signer, ], matches, wallet_manager, )?; CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Close { account_pubkey, recipient_pubkey, authority_index: signer_info.index_of(authority_pubkey).unwrap(), use_lamports_unit: matches.is_present("lamports"), }), signers: signer_info.signers, } } _ => unreachable!(), }; Ok(response) } pub fn process_program_subcommand( rpc_client: Arc<RpcClient>, config: &CliConfig, program_subcommand: &ProgramCliCommand, ) -> ProcessResult { match program_subcommand { ProgramCliCommand::Deploy { program_location, program_signer_index, program_pubkey, buffer_signer_index, buffer_pubkey, upgrade_authority_signer_index, is_final, max_len, allow_excessive_balance, } => process_program_deploy( rpc_client, config, program_location, *program_signer_index, *program_pubkey, *buffer_signer_index, *buffer_pubkey, *upgrade_authority_signer_index, *is_final, *max_len, *allow_excessive_balance, ), ProgramCliCommand::WriteBuffer { program_location, buffer_signer_index, buffer_pubkey, buffer_authority_signer_index, max_len, } => process_write_buffer( rpc_client, config, program_location, *buffer_signer_index, *buffer_pubkey, *buffer_authority_signer_index, *max_len, ), ProgramCliCommand::SetBufferAuthority { buffer_pubkey, buffer_authority_index, new_buffer_authority, } => process_set_authority( &rpc_client, config, None, Some(*buffer_pubkey), *buffer_authority_index, Some(*new_buffer_authority), ), ProgramCliCommand::SetUpgradeAuthority { program_pubkey, upgrade_authority_index, new_upgrade_authority, } => process_set_authority( &rpc_client, config, Some(*program_pubkey), None, *upgrade_authority_index, *new_upgrade_authority, ), ProgramCliCommand::Show { account_pubkey, authority_pubkey, get_programs, get_buffers, all, use_lamports_unit, } => process_show( &rpc_client, config, *account_pubkey, *authority_pubkey, *get_programs, *get_buffers, *all, *use_lamports_unit, ), ProgramCliCommand::Dump { account_pubkey, output_location, } => process_dump(&rpc_client, config, *account_pubkey, output_location), ProgramCliCommand::Close { account_pubkey, recipient_pubkey, authority_index, use_lamports_unit, } => process_close( &rpc_client, config, *account_pubkey, *recipient_pubkey, *authority_index, *use_lamports_unit, ), } } fn get_default_program_keypair(program_location: &Option<String>) -> Keypair { let program_keypair = { if let Some(program_location) = program_location { let mut keypair_file = PathBuf::new(); keypair_file.push(program_location); let mut filename = keypair_file.file_stem().unwrap().to_os_string(); filename.push("-keypair"); keypair_file.set_file_name(filename); keypair_file.set_extension("json"); if let Ok(keypair) = read_keypair_file(&keypair_file.to_str().unwrap()) { keypair } else { Keypair::new() } } else { Keypair::new() } }; program_keypair } /// Deploy using upgradeable loader #[allow(clippy::too_many_arguments)] fn process_program_deploy( rpc_client: Arc<RpcClient>, config: &CliConfig, program_location: &Option<String>, program_signer_index: Option<SignerIndex>, program_pubkey: Option<Pubkey>, buffer_signer_index: Option<SignerIndex>, buffer_pubkey: Option<Pubkey>, upgrade_authority_signer_index: SignerIndex, is_final: bool, max_len: Option<usize>, allow_excessive_balance: bool, ) -> ProcessResult { let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?; let (buffer_provided, buffer_signer, buffer_pubkey) = if let Some(i) = buffer_signer_index { (true, Some(config.signers[i]), config.signers[i].pubkey()) } else if let Some(pubkey) = buffer_pubkey { (true, None, pubkey) } else { ( false, Some(&buffer_keypair as &dyn Signer), buffer_keypair.pubkey(), ) }; let upgrade_authority_signer = config.signers[upgrade_authority_signer_index]; let default_program_keypair = get_default_program_keypair(program_location); let (program_signer, program_pubkey) = if let Some(i) = program_signer_index { (Some(config.signers[i]), config.signers[i].pubkey()) } else if let Some(program_pubkey) = program_pubkey { (None, program_pubkey) } else { ( Some(&default_program_keypair as &dyn Signer), default_program_keypair.pubkey(), ) }; let do_deploy = if let Some(account) = rpc_client .get_account_with_commitment(&program_pubkey, config.commitment)? .value { if account.owner != bpf_loader_upgradeable::id() { return Err(format!( "Account {} is not an upgradeable program or already in use", program_pubkey ) .into()); } if !account.executable { // Continue an initial deploy true } else if let Ok(UpgradeableLoaderState::Program { programdata_address, }) = account.state() { if let Some(account) = rpc_client .get_account_with_commitment(&programdata_address, config.commitment)? .value { if let Ok(UpgradeableLoaderState::ProgramData { slot: _, upgrade_authority_address: program_authority_pubkey, }) = account.state() { if program_authority_pubkey.is_none() { return Err( format!("Program {} is no longer upgradeable", program_pubkey).into(), ); } if program_authority_pubkey != Some(upgrade_authority_signer.pubkey()) { return Err(format!( "Program's authority {:?} does not match authority provided {:?}", program_authority_pubkey, upgrade_authority_signer.pubkey(), ) .into()); } // Do upgrade false } else { return Err(format!( "Program {} has been closed, use a new Program Id", program_pubkey ) .into()); } } else { return Err(format!( "Program {} has been closed, use a new Program Id", program_pubkey ) .into()); } } else { return Err(format!("{} is not an upgradeable program", program_pubkey).into()); } } else { // do new deploy true }; let (program_data, program_len) = if let Some(program_location) = program_location { let program_data = read_and_verify_elf(program_location)?; let program_len = program_data.len(); (program_data, program_len) } else if buffer_provided { // Check supplied buffer account if let Some(account) = rpc_client .get_account_with_commitment(&buffer_pubkey, config.commitment)? .value { if let Ok(UpgradeableLoaderState::Buffer { authority_address: _, }) = account.state() { } else { return Err(format!("Buffer account {} is not initialized", buffer_pubkey).into()); } (vec![], account.data.len()) } else { return Err(format!( "Buffer account {} not found, was it already consumed?", buffer_pubkey ) .into()); } } else { return Err("Program location required if buffer not supplied".into()); }; let buffer_data_len = program_len; let programdata_len = if let Some(len) = max_len { if program_len > len { return Err("Max length specified not large enough".into()); } len } else if is_final { program_len } else { program_len * 2 }; let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption( UpgradeableLoaderState::programdata_len(buffer_data_len)?, )?; let result = if do_deploy { if program_signer.is_none() { return Err( "Initial deployments require a keypair be provided for the program id".into(), ); } do_process_program_write_and_deploy( rpc_client.clone(), config, &program_data, buffer_data_len, programdata_len, minimum_balance, &bpf_loader_upgradeable::id(), Some(&[program_signer.unwrap(), upgrade_authority_signer]), buffer_signer, &buffer_pubkey, Some(upgrade_authority_signer), allow_excessive_balance, ) } else { do_process_program_upgrade( rpc_client.clone(), config, &program_data, &program_pubkey, config.signers[upgrade_authority_signer_index], &buffer_pubkey, buffer_signer, ) }; if result.is_ok() && is_final { process_set_authority( &rpc_client, config, Some(program_pubkey), None, Some(upgrade_authority_signer_index), None, )?; } if result.is_err() && buffer_signer_index.is_none() { report_ephemeral_mnemonic(words, mnemonic); } result } fn process_write_buffer( rpc_client: Arc<RpcClient>, config: &CliConfig, program_location: &str, buffer_signer_index: Option<SignerIndex>, buffer_pubkey: Option<Pubkey>, buffer_authority_signer_index: Option<SignerIndex>, max_len: Option<usize>, ) -> ProcessResult { // Create ephemeral keypair to use for Buffer account, if not provided let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?; let (buffer_signer, buffer_pubkey) = if let Some(i) = buffer_signer_index { (Some(config.signers[i]), config.signers[i].pubkey()) } else if let Some(pubkey) = buffer_pubkey { (None, pubkey) } else { ( Some(&buffer_keypair as &dyn Signer), buffer_keypair.pubkey(), ) }; let buffer_authority = if let Some(i) = buffer_authority_signer_index { config.signers[i] } else { config.signers[0] }; if let Some(account) = rpc_client .get_account_with_commitment(&buffer_pubkey, config.commitment)? .value { if let Ok(UpgradeableLoaderState::Buffer { authority_address }) = account.state() { if authority_address.is_none() { return Err(format!("Buffer {} is immutable", buffer_pubkey).into()); } if authority_address != Some(buffer_authority.pubkey()) { return Err(format!( "Buffer's authority {:?} does not match authority provided {}", authority_address, buffer_authority.pubkey() ) .into()); } } else { return Err(format!( "{} is not an upgradeable loader buffer account", buffer_pubkey ) .into()); } } let program_data = read_and_verify_elf(program_location)?; let buffer_data_len = if let Some(len) = max_len { len } else { program_data.len() }; let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption( UpgradeableLoaderState::programdata_len(buffer_data_len)?, )?; let result = do_process_program_write_and_deploy( rpc_client, config, &program_data, program_data.len(), program_data.len(), minimum_balance, &bpf_loader_upgradeable::id(), None, buffer_signer, &buffer_pubkey, Some(buffer_authority), true, ); if result.is_err() && buffer_signer_index.is_none() && buffer_signer.is_some() { report_ephemeral_mnemonic(words, mnemonic); } result } fn process_set_authority( rpc_client: &RpcClient, config: &CliConfig, program_pubkey: Option<Pubkey>, buffer_pubkey: Option<Pubkey>, authority: Option<SignerIndex>, new_authority: Option<Pubkey>, ) -> ProcessResult { let authority_signer = if let Some(index) = authority { config.signers[index] } else { return Err("Set authority requires the current authority".into()); }; trace!("Set a new authority"); let blockhash = rpc_client.get_latest_blockhash()?; let mut tx = if let Some(ref pubkey) = program_pubkey { Transaction::new_unsigned(Message::new( &[bpf_loader_upgradeable::set_upgrade_authority( pubkey, &authority_signer.pubkey(), new_authority.as_ref(), )], Some(&config.signers[0].pubkey()), )) } else if let Some(pubkey) = buffer_pubkey { if let Some(ref new_authority) = new_authority { Transaction::new_unsigned(Message::new( &[bpf_loader_upgradeable::set_buffer_authority( &pubkey, &authority_signer.pubkey(), new_authority, )], Some(&config.signers[0].pubkey()), )) } else { return Err("Buffer authority cannot be None".into()); } } else { return Err("Program or Buffer not provided".into()); }; tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; rpc_client .send_and_confirm_transaction_with_spinner_and_config( &tx, config.commitment, RpcSendTransactionConfig { skip_preflight: true, preflight_commitment: Some(config.commitment.commitment), ..RpcSendTransactionConfig::default() }, ) .map_err(|e| format!("Setting authority failed: {}", e))?; let authority = CliProgramAuthority { authority: new_authority .map(|pubkey| pubkey.to_string()) .unwrap_or_else(|| "none".to_string()), account_type: if program_pubkey.is_some() { CliProgramAccountType::Program } else { CliProgramAccountType::Buffer }, }; Ok(config.output_format.formatted_string(&authority)) } const ACCOUNT_TYPE_SIZE: usize = 4; const SLOT_SIZE: usize = size_of::<u64>(); const OPTION_SIZE: usize = 1; const PUBKEY_LEN: usize = 32; fn get_buffers( rpc_client: &RpcClient, authority_pubkey: Option<Pubkey>, use_lamports_unit: bool, ) -> Result<CliUpgradeableBuffers, Box<dyn std::error::Error>> { let mut filters = vec![RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![1, 0, 0, 0]).into_string()), encoding: None, })]; if let Some(authority_pubkey) = authority_pubkey { filters.push(RpcFilterType::Memcmp(Memcmp { offset: ACCOUNT_TYPE_SIZE, bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![1]).into_string()), encoding: None, })); filters.push(RpcFilterType::Memcmp(Memcmp { offset: ACCOUNT_TYPE_SIZE + OPTION_SIZE, bytes: MemcmpEncodedBytes::Binary( bs58::encode(authority_pubkey.as_ref()).into_string(), ), encoding: None, })); } let results = get_accounts_with_filter( rpc_client, filters, ACCOUNT_TYPE_SIZE + OPTION_SIZE + PUBKEY_LEN, )?; let mut buffers = vec![]; for (address, account) in results.iter() { if let Ok(UpgradeableLoaderState::Buffer { authority_address }) = account.state() { buffers.push(CliUpgradeableBuffer { address: address.to_string(), authority: authority_address .map(|pubkey| pubkey.to_string()) .unwrap_or_else(|| "none".to_string()), data_len: 0, lamports: account.lamports, use_lamports_unit, }); } else { return Err(format!("Error parsing Buffer account {}", address).into()); } } Ok(CliUpgradeableBuffers { buffers, use_lamports_unit, }) } fn get_programs( rpc_client: &RpcClient, authority_pubkey: Option<Pubkey>, use_lamports_unit: bool, ) -> Result<CliUpgradeablePrograms, Box<dyn std::error::Error>> { let mut filters = vec![RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![3, 0, 0, 0]).into_string()), encoding: None, })]; if let Some(authority_pubkey) = authority_pubkey { filters.push(RpcFilterType::Memcmp(Memcmp { offset: ACCOUNT_TYPE_SIZE + SLOT_SIZE, bytes: MemcmpEncodedBytes::Binary(bs58::encode(vec![1]).into_string()), encoding: None, })); filters.push(RpcFilterType::Memcmp(Memcmp { offset: ACCOUNT_TYPE_SIZE + SLOT_SIZE + OPTION_SIZE, bytes: MemcmpEncodedBytes::Binary( bs58::encode(authority_pubkey.as_ref()).into_string(), ), encoding: None, })); } let results = get_accounts_with_filter( rpc_client, filters, ACCOUNT_TYPE_SIZE + SLOT_SIZE + OPTION_SIZE + PUBKEY_LEN, )?; let mut programs = vec![]; for (programdata_address, programdata_account) in results.iter() { if let Ok(UpgradeableLoaderState::ProgramData { slot, upgrade_authority_address, }) = programdata_account.state() { let mut bytes = vec![2, 0, 0, 0]; bytes.extend_from_slice(programdata_address.as_ref()); let filters = vec![RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Binary(bs58::encode(bytes).into_string()), encoding: None, })]; let results = get_accounts_with_filter(rpc_client, filters, 0)?; if results.len() != 1 { return Err(format!( "Error: More than one Program associated with ProgramData account {}", programdata_address ) .into()); } programs.push(CliUpgradeableProgram { program_id: results[0].0.to_string(), owner: programdata_account.owner.to_string(), programdata_address: programdata_address.to_string(), authority: upgrade_authority_address .map(|pubkey| pubkey.to_string()) .unwrap_or_else(|| "none".to_string()), last_deploy_slot: slot, data_len: programdata_account.data.len() - UpgradeableLoaderState::programdata_data_offset()?, lamports: programdata_account.lamports, use_lamports_unit, }); } else { return Err( format!("Error parsing ProgramData account {}", programdata_address).into(), ); } } Ok(CliUpgradeablePrograms { programs, use_lamports_unit, }) } fn get_accounts_with_filter( rpc_client: &RpcClient, filters: Vec<RpcFilterType>, length: usize, ) -> Result<Vec<(Pubkey, Account)>, Box<dyn std::error::Error>> { let results = rpc_client.get_program_accounts_with_config( &bpf_loader_upgradeable::id(), RpcProgramAccountsConfig { filters: Some(filters), account_config: RpcAccountInfoConfig { encoding: Some(UiAccountEncoding::Base64), data_slice: Some(UiDataSliceConfig { offset: 0, length }), ..RpcAccountInfoConfig::default() }, ..RpcProgramAccountsConfig::default() }, )?; Ok(results) } fn process_show( rpc_client: &RpcClient, config: &CliConfig, account_pubkey: Option<Pubkey>, authority_pubkey: Pubkey, programs: bool, buffers: bool, all: bool, use_lamports_unit: bool, ) -> ProcessResult { if let Some(account_pubkey) = account_pubkey { if let Some(account) = rpc_client .get_account_with_commitment(&account_pubkey, config.commitment)? .value { if account.owner == bpf_loader::id() || account.owner == bpf_loader_deprecated::id() { Ok(config.output_format.formatted_string(&CliProgram { program_id: account_pubkey.to_string(), owner: account.owner.to_string(), data_len: account.data.len(), })) } else if account.owner == bpf_loader_upgradeable::id() { if let Ok(UpgradeableLoaderState::Program { programdata_address, }) = account.state() { if let Some(programdata_account) = rpc_client .get_account_with_commitment(&programdata_address, config.commitment)? .value { if let Ok(UpgradeableLoaderState::ProgramData { upgrade_authority_address, slot, }) = programdata_account.state() { Ok(config .output_format .formatted_string(&CliUpgradeableProgram { program_id: account_pubkey.to_string(), owner: account.owner.to_string(), programdata_address: programdata_address.to_string(), authority: upgrade_authority_address .map(|pubkey| pubkey.to_string()) .unwrap_or_else(|| "none".to_string()), last_deploy_slot: slot, data_len: programdata_account.data.len() - UpgradeableLoaderState::programdata_data_offset()?, lamports: programdata_account.lamports, use_lamports_unit, })) } else { Err(format!("Program {} has been closed", account_pubkey).into()) } } else { Err(format!("Program {} has been closed", account_pubkey).into()) } } else if let Ok(UpgradeableLoaderState::Buffer { authority_address }) = account.state() { Ok(config .output_format .formatted_string(&CliUpgradeableBuffer { address: account_pubkey.to_string(), authority: authority_address .map(|pubkey| pubkey.to_string()) .unwrap_or_else(|| "none".to_string()), data_len: account.data.len() - UpgradeableLoaderState::buffer_data_offset()?, lamports: account.lamports, use_lamports_unit, })) } else { Err(format!( "{} is not an upgradeable loader Buffer or Program account", account_pubkey ) .into()) } } else { Err(format!("{} is not a BPF program", account_pubkey).into()) } } else { Err(format!("Unable to find the account {}", account_pubkey).into()) } } else if programs { let authority_pubkey = if all { None } else { Some(authority_pubkey) }; let programs = get_programs(rpc_client, authority_pubkey, use_lamports_unit)?; Ok(config.output_format.formatted_string(&programs)) } else if buffers { let authority_pubkey = if all { None } else { Some(authority_pubkey) }; let buffers = get_buffers(rpc_client, authority_pubkey, use_lamports_unit)?; Ok(config.output_format.formatted_string(&buffers)) } else { Err("Invalid parameters".to_string().into()) } } fn process_dump( rpc_client: &RpcClient, config: &CliConfig, account_pubkey: Option<Pubkey>, output_location: &str, ) -> ProcessResult { if let Some(account_pubkey) = account_pubkey { if let Some(account) = rpc_client .get_account_with_commitment(&account_pubkey, config.commitment)? .value { if account.owner == bpf_loader::id() || account.owner == bpf_loader_deprecated::id() { let mut f = File::create(output_location)?; f.write_all(&account.data)?; Ok(format!("Wrote program to {}", output_location)) } else if account.owner == bpf_loader_upgradeable::id() { if let Ok(UpgradeableLoaderState::Program { programdata_address, }) = account.state() { if let Some(programdata_account) = rpc_client .get_account_with_commitment(&programdata_address, config.commitment)? .value { if let Ok(UpgradeableLoaderState::ProgramData { .. }) = programdata_account.state() { let offset = UpgradeableLoaderState::programdata_data_offset().unwrap_or(0); let program_data = &programdata_account.data[offset..]; let mut f = File::create(output_location)?; f.write_all(program_data)?; Ok(format!("Wrote program to {}", output_location)) } else { Err(format!("Program {} has been closed", account_pubkey).into()) } } else { Err(format!("Program {} has been closed", account_pubkey).into()) } } else if let Ok(UpgradeableLoaderState::Buffer { .. }) = account.state() { let offset = UpgradeableLoaderState::buffer_data_offset().unwrap_or(0); let program_data = &account.data[offset..]; let mut f = File::create(output_location)?; f.write_all(program_data)?; Ok(format!("Wrote program to {}", output_location)) } else { Err(format!( "{} is not an upgradeable loader buffer or program account", account_pubkey ) .into()) } } else { Err(format!("{} is not a BPF program", account_pubkey).into()) } } else { Err(format!("Unable to find the account {}", account_pubkey).into()) } } else { Err("No account specified".into()) } } fn close( rpc_client: &RpcClient, config: &CliConfig, account_pubkey: &Pubkey, recipient_pubkey: &Pubkey, authority_signer: &dyn Signer, program_pubkey: Option<&Pubkey>, ) -> Result<(), Box<dyn std::error::Error>> { let blockhash = rpc_client.get_latest_blockhash()?; let mut tx = Transaction::new_unsigned(Message::new( &[bpf_loader_upgradeable::close_any( account_pubkey, recipient_pubkey, Some(&authority_signer.pubkey()), program_pubkey, )], Some(&config.signers[0].pubkey()), )); tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( &tx, config.commitment, RpcSendTransactionConfig { skip_preflight: true, preflight_commitment: Some(config.commitment.commitment), ..RpcSendTransactionConfig::default() }, ); if let Err(err) = result { if let ClientErrorKind::TransactionError(TransactionError::InstructionError( _, InstructionError::InvalidInstructionData, )) = err.kind() { return Err("Closing a buffer account is not supported by the cluster".into()); } else if let ClientErrorKind::TransactionError(TransactionError::InstructionError( _, InstructionError::InvalidArgument, )) = err.kind() { return Err("Closing a program account is not supported by the cluster".into()); } else { return Err(format!("Close failed: {}", err).into()); } } Ok(()) } fn process_close( rpc_client: &RpcClient, config: &CliConfig, account_pubkey: Option<Pubkey>, recipient_pubkey: Pubkey, authority_index: SignerIndex, use_lamports_unit: bool, ) -> ProcessResult { let authority_signer = config.signers[authority_index]; if let Some(account_pubkey) = account_pubkey { if let Some(account) = rpc_client .get_account_with_commitment(&account_pubkey, config.commitment)? .value { match account.state() { Ok(UpgradeableLoaderState::Buffer { authority_address }) => { if authority_address != Some(authority_signer.pubkey()) { return Err(format!( "Buffer account authority {:?} does not match {:?}", authority_address, Some(authority_signer.pubkey()) ) .into()); } else { close( rpc_client, config, &account_pubkey, &recipient_pubkey, authority_signer, None, )?; } Ok(config .output_format .formatted_string(&CliUpgradeableBuffers { buffers: vec![CliUpgradeableBuffer { address: account_pubkey.to_string(), authority: authority_address .map(|pubkey| pubkey.to_string()) .unwrap_or_else(|| "none".to_string()), data_len: 0, lamports: account.lamports, use_lamports_unit, }], use_lamports_unit, })) } Ok(UpgradeableLoaderState::Program { programdata_address: programdata_pubkey, }) => { if let Some(account) = rpc_client .get_account_with_commitment(&programdata_pubkey, config.commitment)? .value { if let Ok(UpgradeableLoaderState::ProgramData { slot: _, upgrade_authority_address: authority_pubkey, }) = account.state() { if authority_pubkey != Some(authority_signer.pubkey()) { return Err(format!( "Program authority {:?} does not match {:?}", authority_pubkey, Some(authority_signer.pubkey()) ) .into()); } else { close( rpc_client, config, &programdata_pubkey, &recipient_pubkey, authority_signer, Some(&account_pubkey), )?; Ok(config.output_format.formatted_string( &CliUpgradeableProgramClosed { program_id: account_pubkey.to_string(), lamports: account.lamports, use_lamports_unit, }, )) } } else { return Err( format!("Program {} has been closed", account_pubkey).into() ); } } else { return Err(format!("Program {} has been closed", account_pubkey).into()); } } _ => { return Err( format!("{} is not a Program or Buffer account", account_pubkey).into(), ); } } } else { return Err(format!("Unable to find the account {}", account_pubkey).into()); } } else { let buffers = get_buffers( rpc_client, Some(authority_signer.pubkey()), use_lamports_unit, )?; let mut closed = vec![]; for buffer in buffers.buffers.iter() { if close( rpc_client, config, &Pubkey::from_str(&buffer.address)?, &recipient_pubkey, authority_signer, None, ) .is_ok() { closed.push(buffer.clone()); } } Ok(config .output_format .formatted_string(&CliUpgradeableBuffers { buffers: closed, use_lamports_unit, })) } } /// Deploy using non-upgradeable loader pub fn process_deploy( rpc_client: Arc<RpcClient>, config: &CliConfig, program_location: &str, buffer_signer_index: Option<SignerIndex>, use_deprecated_loader: bool, allow_excessive_balance: bool, ) -> ProcessResult { // Create ephemeral keypair to use for Buffer account, if not provided let (words, mnemonic, buffer_keypair) = create_ephemeral_keypair()?; let buffer_signer = if let Some(i) = buffer_signer_index { config.signers[i] } else { &buffer_keypair }; let program_data = read_and_verify_elf(program_location)?; let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(program_data.len())?; let loader_id = if use_deprecated_loader { bpf_loader_deprecated::id() } else { bpf_loader::id() }; let result = do_process_program_write_and_deploy( rpc_client, config, &program_data, program_data.len(), program_data.len(), minimum_balance, &loader_id, Some(&[buffer_signer]), Some(buffer_signer), &buffer_signer.pubkey(), Some(buffer_signer), allow_excessive_balance, ); if result.is_err() && buffer_signer_index.is_none() { report_ephemeral_mnemonic(words, mnemonic); } result } #[allow(clippy::too_many_arguments)] fn do_process_program_write_and_deploy( rpc_client: Arc<RpcClient>, config: &CliConfig, program_data: &[u8], buffer_data_len: usize, programdata_len: usize, minimum_balance: u64, loader_id: &Pubkey, program_signers: Option<&[&dyn Signer]>, buffer_signer: Option<&dyn Signer>, buffer_pubkey: &Pubkey, buffer_authority_signer: Option<&dyn Signer>, allow_excessive_balance: bool, ) -> ProcessResult { // Build messages to calculate fees let mut messages: Vec<&Message> = Vec::new(); // Initialize buffer account or complete if already partially initialized let (initial_message, write_messages, balance_needed) = if let Some(buffer_authority_signer) = buffer_authority_signer { let (initial_instructions, balance_needed) = if let Some(account) = rpc_client .get_account_with_commitment(buffer_pubkey, config.commitment)? .value { complete_partial_program_init( loader_id, &config.signers[0].pubkey(), buffer_pubkey, &account, if loader_id == &bpf_loader_upgradeable::id() { UpgradeableLoaderState::buffer_len(buffer_data_len)? } else { buffer_data_len }, minimum_balance, allow_excessive_balance, )? } else if loader_id == &bpf_loader_upgradeable::id() { ( bpf_loader_upgradeable::create_buffer( &config.signers[0].pubkey(), buffer_pubkey, &buffer_authority_signer.pubkey(), minimum_balance, buffer_data_len, )?, minimum_balance, ) } else { ( vec![system_instruction::create_account( &config.signers[0].pubkey(), buffer_pubkey, minimum_balance, buffer_data_len as u64, loader_id, )], minimum_balance, ) }; let initial_message = if !initial_instructions.is_empty() { Some(Message::new( &initial_instructions, Some(&config.signers[0].pubkey()), )) } else { None }; // Create and add write messages let mut write_messages = vec![]; for (chunk, i) in program_data.chunks(DATA_CHUNK_SIZE).zip(0..) { let instruction = if loader_id == &bpf_loader_upgradeable::id() { bpf_loader_upgradeable::write( buffer_pubkey, &buffer_authority_signer.pubkey(), (i * DATA_CHUNK_SIZE) as u32, chunk.to_vec(), ) } else { loader_instruction::write( buffer_pubkey, loader_id, (i * DATA_CHUNK_SIZE) as u32, chunk.to_vec(), ) }; let message = Message::new(&[instruction], Some(&config.signers[0].pubkey())); write_messages.push(message); } (initial_message, Some(write_messages), balance_needed) } else { (None, None, 0) }; if let Some(ref initial_message) = initial_message { messages.push(initial_message); } if let Some(ref write_messages) = write_messages { let mut write_message_refs = vec![]; for message in write_messages.iter() { write_message_refs.push(message); } messages.append(&mut write_message_refs); } // Create and add final message let final_message = if let Some(program_signers) = program_signers { let message = if loader_id == &bpf_loader_upgradeable::id() { Message::new( &bpf_loader_upgradeable::deploy_with_max_program_len( &config.signers[0].pubkey(), &program_signers[0].pubkey(), buffer_pubkey, &program_signers[1].pubkey(), rpc_client.get_minimum_balance_for_rent_exemption( UpgradeableLoaderState::program_len()?, )?, programdata_len, )?, Some(&config.signers[0].pubkey()), ) } else { Message::new( &[loader_instruction::finalize(buffer_pubkey, loader_id)], Some(&config.signers[0].pubkey()), ) }; Some(message) } else { None }; if let Some(ref message) = final_message { messages.push(message); } check_payer(&rpc_client, config, balance_needed, &messages)?; send_deploy_messages( rpc_client, config, &initial_message, &write_messages, &final_message, buffer_signer, buffer_authority_signer, program_signers, )?; if let Some(program_signers) = program_signers { let program_id = CliProgramId { program_id: program_signers[0].pubkey().to_string(), }; Ok(config.output_format.formatted_string(&program_id)) } else { let buffer = CliProgramBuffer { buffer: buffer_pubkey.to_string(), }; Ok(config.output_format.formatted_string(&buffer)) } } fn do_process_program_upgrade( rpc_client: Arc<RpcClient>, config: &CliConfig, program_data: &[u8], program_id: &Pubkey, upgrade_authority: &dyn Signer, buffer_pubkey: &Pubkey, buffer_signer: Option<&dyn Signer>, ) -> ProcessResult { let loader_id = bpf_loader_upgradeable::id(); let data_len = program_data.len(); let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption( UpgradeableLoaderState::programdata_len(data_len)?, )?; // Build messages to calculate fees let mut messages: Vec<&Message> = Vec::new(); let (initial_message, write_messages, balance_needed) = if let Some(buffer_signer) = buffer_signer { // Check Buffer account to see if partial initialization has occurred let (initial_instructions, balance_needed) = if let Some(account) = rpc_client .get_account_with_commitment(&buffer_signer.pubkey(), config.commitment)? .value { complete_partial_program_init( &loader_id, &config.signers[0].pubkey(), &buffer_signer.pubkey(), &account, UpgradeableLoaderState::buffer_len(data_len)?, minimum_balance, true, )? } else { ( bpf_loader_upgradeable::create_buffer( &config.signers[0].pubkey(), buffer_pubkey, &upgrade_authority.pubkey(), minimum_balance, data_len, )?, minimum_balance, ) }; let initial_message = if !initial_instructions.is_empty() { Some(Message::new( &initial_instructions, Some(&config.signers[0].pubkey()), )) } else { None }; // Create and add write messages let mut write_messages = vec![]; for (chunk, i) in program_data.chunks(DATA_CHUNK_SIZE).zip(0..) { let instruction = bpf_loader_upgradeable::write( &buffer_signer.pubkey(), &upgrade_authority.pubkey(), (i * DATA_CHUNK_SIZE) as u32, chunk.to_vec(), ); let message = Message::new(&[instruction], Some(&config.signers[0].pubkey())); write_messages.push(message); } (initial_message, Some(write_messages), balance_needed) } else { (None, None, 0) }; if let Some(ref message) = initial_message { messages.push(message); } if let Some(ref write_messages) = write_messages { let mut write_message_refs = vec![]; for message in write_messages.iter() { write_message_refs.push(message); } messages.append(&mut write_message_refs); } // Create and add final message let final_message = Message::new( &[bpf_loader_upgradeable::upgrade( program_id, buffer_pubkey, &upgrade_authority.pubkey(), &config.signers[0].pubkey(), )], Some(&config.signers[0].pubkey()), ); messages.push(&final_message); check_payer(&rpc_client, config, balance_needed, &messages)?; send_deploy_messages( rpc_client, config, &initial_message, &write_messages, &Some(final_message), buffer_signer, Some(upgrade_authority), Some(&[upgrade_authority]), )?; let program_id = CliProgramId { program_id: program_id.to_string(), }; Ok(config.output_format.formatted_string(&program_id)) } fn read_and_verify_elf(program_location: &str) -> Result<Vec<u8>, Box<dyn std::error::Error>> { let mut file = File::open(program_location) .map_err(|err| format!("Unable to open program file: {}", err))?; let mut program_data = Vec::new(); file.read_to_end(&mut program_data) .map_err(|err| format!("Unable to read program file: {}", err))?; let mut invoke_context = MockInvokeContext::new(vec![]); // Verify the program <dyn Executable<BpfError, ThisInstructionMeter>>::from_elf( &program_data, Some(verifier::check), Config { reject_unresolved_syscalls: true, verify_mul64_imm_nonzero: true, // TODO: Remove me after feature gate ..Config::default() }, register_syscalls(&mut invoke_context).unwrap(), ) .map_err(|err| format!("ELF error: {}", err))?; Ok(program_data) } fn complete_partial_program_init( loader_id: &Pubkey, payer_pubkey: &Pubkey, elf_pubkey: &Pubkey, account: &Account, account_data_len: usize, minimum_balance: u64, allow_excessive_balance: bool, ) -> Result<(Vec<Instruction>, u64), Box<dyn std::error::Error>> { let mut instructions: Vec<Instruction> = vec![]; let mut balance_needed = 0; if account.executable { return Err("Buffer account is already executable".into()); } if account.owner != *loader_id && !system_program::check_id(&account.owner) { return Err("Buffer account is already owned by another account".into()); } if account.data.is_empty() && system_program::check_id(&account.owner) { instructions.push(system_instruction::allocate( elf_pubkey, account_data_len as u64, )); if account.owner != *loader_id { instructions.push(system_instruction::assign(elf_pubkey, loader_id)); } } if account.lamports < minimum_balance { let balance = minimum_balance - account.lamports; instructions.push(system_instruction::transfer( payer_pubkey, elf_pubkey, balance, )); balance_needed = balance; } else if account.lamports > minimum_balance && system_program::check_id(&account.owner) && !allow_excessive_balance { return Err(format!( "Buffer account has a balance: {:?}; it may already be in use", Sol(account.lamports) ) .into()); } Ok((instructions, balance_needed)) } fn check_payer( rpc_client: &RpcClient, config: &CliConfig, balance_needed: u64, messages: &[&Message], ) -> Result<(), Box<dyn std::error::Error>> { let blockhash = rpc_client.get_latest_blockhash()?; // Does the payer have enough? check_account_for_spend_multiple_fees_with_commitment( rpc_client, &config.signers[0].pubkey(), balance_needed, &blockhash, messages, config.commitment, )?; Ok(()) } fn send_deploy_messages( rpc_client: Arc<RpcClient>, config: &CliConfig, initial_message: &Option<Message>, write_messages: &Option<Vec<Message>>, final_message: &Option<Message>, initial_signer: Option<&dyn Signer>, write_signer: Option<&dyn Signer>, final_signers: Option<&[&dyn Signer]>, ) -> Result<(), Box<dyn std::error::Error>> { let payer_signer = config.signers[0]; if let Some(message) = initial_message { if let Some(initial_signer) = initial_signer { trace!("Preparing the required accounts"); let blockhash = rpc_client.get_latest_blockhash()?; let mut initial_transaction = Transaction::new_unsigned(message.clone()); // Most of the initial_transaction combinations require both the fee-payer and new program // account to sign the transaction. One (transfer) only requires the fee-payer signature. // This check is to ensure signing does not fail on a KeypairPubkeyMismatch error from an // extraneous signature. if message.header.num_required_signatures == 2 { initial_transaction.try_sign(&[payer_signer, initial_signer], blockhash)?; } else { initial_transaction.try_sign(&[payer_signer], blockhash)?; } let result = rpc_client.send_and_confirm_transaction_with_spinner(&initial_transaction); log_instruction_custom_error::<SystemError>(result, config) .map_err(|err| format!("Account allocation failed: {}", err))?; } else { return Err("Buffer account not created yet, must provide a key pair".into()); } } if let Some(write_messages) = write_messages { if let Some(write_signer) = write_signer { trace!("Writing program data"); let (blockhash, last_valid_block_height) = rpc_client.get_latest_blockhash_with_commitment(config.commitment)?; let mut write_transactions = vec![]; for message in write_messages.iter() { let mut tx = Transaction::new_unsigned(message.clone()); tx.try_sign(&[payer_signer, write_signer], blockhash)?; write_transactions.push(tx); } send_and_confirm_transactions_with_spinner( rpc_client.clone(), &config.websocket_url, write_transactions, &[payer_signer, write_signer], config.commitment, last_valid_block_height, ) .map_err(|err| format!("Data writes to account failed: {}", err))?; } } if let Some(message) = final_message { if let Some(final_signers) = final_signers { trace!("Deploying program"); let blockhash = rpc_client.get_latest_blockhash()?; let mut final_tx = Transaction::new_unsigned(message.clone()); let mut signers = final_signers.to_vec(); signers.push(payer_signer); final_tx.try_sign(&signers, blockhash)?; rpc_client .send_and_confirm_transaction_with_spinner_and_config( &final_tx, config.commitment, RpcSendTransactionConfig { skip_preflight: true, preflight_commitment: Some(config.commitment.commitment), ..RpcSendTransactionConfig::default() }, ) .map_err(|e| format!("Deploying program failed: {}", e))?; } } Ok(()) } fn create_ephemeral_keypair( ) -> Result<(usize, bip39::Mnemonic, Keypair), Box<dyn std::error::Error>> { const WORDS: usize = 12; let mnemonic = Mnemonic::new(MnemonicType::for_word_count(WORDS)?, Language::English); let seed = Seed::new(&mnemonic, ""); let new_keypair = keypair_from_seed(seed.as_bytes())?; Ok((WORDS, mnemonic, new_keypair)) } fn report_ephemeral_mnemonic(words: usize, mnemonic: bip39::Mnemonic) { let phrase: &str = mnemonic.phrase(); let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap(); eprintln!( "{}\nRecover the intermediate account's ephemeral keypair file with", divider ); eprintln!( "`solana-keygen recover` and the following {}-word seed phrase:", words ); eprintln!("{}\n{}\n{}", divider, phrase, divider); eprintln!("To resume a deploy, pass the recovered keypair as"); eprintln!("the [PROGRAM_ADDRESS_SIGNER] argument to `solana deploy` or"); eprintln!("as the [BUFFER_SIGNER] to `solana program deploy` or `solana write-buffer'."); eprintln!("Or to recover the account's lamports, pass it as the"); eprintln!( "[BUFFER_ACCOUNT_ADDRESS] argument to `solana program close`.\n{}", divider ); } fn send_and_confirm_transactions_with_spinner<T: Signers>( rpc_client: Arc<RpcClient>, websocket_url: &str, mut transactions: Vec<Transaction>, signer_keys: &T, commitment: CommitmentConfig, mut last_valid_block_height: u64, ) -> Result<(), Box<dyn error::Error>> { let progress_bar = new_spinner_progress_bar(); let mut send_retries = 5; progress_bar.set_message("Finding leader nodes..."); let tpu_client = TpuClient::new( rpc_client.clone(), websocket_url, TpuClientConfig::default(), )?; loop { // Send all transactions let mut pending_transactions = HashMap::new(); let num_transactions = transactions.len(); for transaction in transactions { if !tpu_client.send_transaction(&transaction) { let _result = rpc_client .send_transaction_with_config( &transaction, RpcSendTransactionConfig { preflight_commitment: Some(commitment.commitment), ..RpcSendTransactionConfig::default() }, ) .ok(); } pending_transactions.insert(transaction.signatures[0], transaction); progress_bar.set_message(format!( "[{}/{}] Transactions sent", pending_transactions.len(), num_transactions )); // Throttle transactions to about 100 TPS sleep(Duration::from_millis(10)); } // Collect statuses for all the transactions, drop those that are confirmed loop { let mut block_height = 0; let pending_signatures = pending_transactions.keys().cloned().collect::<Vec<_>>(); for pending_signatures_chunk in pending_signatures.chunks(MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS) { if let Ok(result) = rpc_client.get_signature_statuses(pending_signatures_chunk) { let statuses = result.value; for (signature, status) in pending_signatures_chunk.iter().zip(statuses.into_iter()) { if let Some(status) = status { if let Some(confirmation_status) = &status.confirmation_status { if *confirmation_status != TransactionConfirmationStatus::Processed { let _ = pending_transactions.remove(signature); } } else if status.confirmations.is_none() || status.confirmations.unwrap() > 1 { let _ = pending_transactions.remove(signature); } } } } block_height = rpc_client.get_block_height()?; progress_bar.set_message(format!( "[{}/{}] Transactions confirmed. Retrying in {} blocks", num_transactions - pending_transactions.len(), num_transactions, last_valid_block_height.saturating_sub(block_height) )); } if pending_transactions.is_empty() { return Ok(()); } if block_height > last_valid_block_height { break; } for transaction in pending_transactions.values() { if !tpu_client.send_transaction(transaction) { let _result = rpc_client .send_transaction_with_config( transaction, RpcSendTransactionConfig { preflight_commitment: Some(commitment.commitment), ..RpcSendTransactionConfig::default() }, ) .ok(); } } if cfg!(not(test)) { // Retry twice a second sleep(Duration::from_millis(500)); } } if send_retries == 0 { return Err("Transactions failed".into()); } send_retries -= 1; // Re-sign any failed transactions with a new blockhash and retry let (blockhash, new_last_valid_block_height) = rpc_client.get_latest_blockhash_with_commitment(commitment)?; last_valid_block_height = new_last_valid_block_height; transactions = vec![]; for (_, mut transaction) in pending_transactions.into_iter() { transaction.try_sign(signer_keys, blockhash)?; transactions.push(transaction); } } } #[cfg(test)] mod tests { use super::*; use crate::{ clap_app::get_clap_app, cli::{parse_command, process_command}, }; use serde_json::Value; use solana_cli_output::OutputFormat; use solana_sdk::signature::write_keypair_file; fn make_tmp_path(name: &str) -> String { let out_dir = std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()); let keypair = Keypair::new(); let path = format!("{}/tmp/{}-{}", out_dir, name, keypair.pubkey()); // whack any possible collision let _ignored = std::fs::remove_dir_all(&path); // whack any possible collision let _ignored = std::fs::remove_file(&path); path } #[test] #[allow(clippy::cognitive_complexity)] fn test_cli_parse_deploy() { let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); let default_signer = DefaultSigner::new("", &keypair_file); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "deploy", "/Users/test/program.so", ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, program_pubkey: None, upgrade_authority_signer_index: 0, is_final: false, max_len: None, allow_excessive_balance: false, }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } ); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "deploy", "/Users/test/program.so", "--max-len", "42", ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, program_pubkey: None, upgrade_authority_signer_index: 0, is_final: false, max_len: Some(42), allow_excessive_balance: false, }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } ); let buffer_keypair = Keypair::new(); let buffer_keypair_file = make_tmp_path("buffer_keypair_file"); write_keypair_file(&buffer_keypair, &buffer_keypair_file).unwrap(); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "deploy", "--buffer", &buffer_keypair_file, ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: None, buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), program_signer_index: None, program_pubkey: None, upgrade_authority_signer_index: 0, is_final: false, max_len: None, allow_excessive_balance: false, }), signers: vec![ read_keypair_file(&keypair_file).unwrap().into(), read_keypair_file(&buffer_keypair_file).unwrap().into(), ], } ); let program_pubkey = Pubkey::new_unique(); let test = test_commands.clone().get_matches_from(vec![ "test", "program", "deploy", "/Users/test/program.so", "--program-id", &program_pubkey.to_string(), ]); assert_eq!( parse_command(&test, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, program_pubkey: Some(program_pubkey), upgrade_authority_signer_index: 0, is_final: false, max_len: None, allow_excessive_balance: false, }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } ); let program_keypair = Keypair::new(); let program_keypair_file = make_tmp_path("program_keypair_file"); write_keypair_file(&program_keypair, &program_keypair_file).unwrap(); let test = test_commands.clone().get_matches_from(vec![ "test", "program", "deploy", "/Users/test/program.so", "--program-id", &program_keypair_file, ]); assert_eq!( parse_command(&test, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), buffer_signer_index: None, buffer_pubkey: None, program_signer_index: Some(1), program_pubkey: Some(program_keypair.pubkey()), upgrade_authority_signer_index: 0, is_final: false, max_len: None, allow_excessive_balance: false, }), signers: vec![ read_keypair_file(&keypair_file).unwrap().into(), read_keypair_file(&program_keypair_file).unwrap().into(), ], } ); let authority_keypair = Keypair::new(); let authority_keypair_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "deploy", "/Users/test/program.so", "--upgrade-authority", &authority_keypair_file, ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, program_pubkey: None, upgrade_authority_signer_index: 1, is_final: false, max_len: None, allow_excessive_balance: false, }), signers: vec![ read_keypair_file(&keypair_file).unwrap().into(), read_keypair_file(&authority_keypair_file).unwrap().into(), ], } ); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "deploy", "/Users/test/program.so", "--final", ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some("/Users/test/program.so".to_string()), buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, program_pubkey: None, upgrade_authority_signer_index: 0, is_final: true, max_len: None, allow_excessive_balance: false, }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } ); } #[test] #[allow(clippy::cognitive_complexity)] fn test_cli_parse_write_buffer() { let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); let default_signer = DefaultSigner::new("", &keypair_file); // defaults let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "write-buffer", "/Users/test/program.so", ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: Some(0), max_len: None, }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } ); // specify max len let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "write-buffer", "/Users/test/program.so", "--max-len", "42", ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: Some(0), max_len: Some(42), }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } ); // specify buffer let buffer_keypair = Keypair::new(); let buffer_keypair_file = make_tmp_path("buffer_keypair_file"); write_keypair_file(&buffer_keypair, &buffer_keypair_file).unwrap(); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "write-buffer", "/Users/test/program.so", "--buffer", &buffer_keypair_file, ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: Some(0), max_len: None, }), signers: vec![ read_keypair_file(&keypair_file).unwrap().into(), read_keypair_file(&buffer_keypair_file).unwrap().into(), ], } ); // specify authority let authority_keypair = Keypair::new(); let authority_keypair_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "write-buffer", "/Users/test/program.so", "--buffer-authority", &authority_keypair_file, ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), buffer_signer_index: None, buffer_pubkey: None, buffer_authority_signer_index: Some(1), max_len: None, }), signers: vec![ read_keypair_file(&keypair_file).unwrap().into(), read_keypair_file(&authority_keypair_file).unwrap().into(), ], } ); // specify both buffer and authority let buffer_keypair = Keypair::new(); let buffer_keypair_file = make_tmp_path("buffer_keypair_file"); write_keypair_file(&buffer_keypair, &buffer_keypair_file).unwrap(); let authority_keypair = Keypair::new(); let authority_keypair_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "write-buffer", "/Users/test/program.so", "--buffer", &buffer_keypair_file, "--buffer-authority", &authority_keypair_file, ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::WriteBuffer { program_location: "/Users/test/program.so".to_string(), buffer_signer_index: Some(1), buffer_pubkey: Some(buffer_keypair.pubkey()), buffer_authority_signer_index: Some(2), max_len: None, }), signers: vec![ read_keypair_file(&keypair_file).unwrap().into(), read_keypair_file(&buffer_keypair_file).unwrap().into(), read_keypair_file(&authority_keypair_file).unwrap().into(), ], } ); } #[test] #[allow(clippy::cognitive_complexity)] fn test_cli_parse_set_upgrade_authority() { let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); let default_signer = DefaultSigner::new("", &keypair_file); let program_pubkey = Pubkey::new_unique(); let new_authority_pubkey = Pubkey::new_unique(); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "set-upgrade-authority", &program_pubkey.to_string(), "--new-upgrade-authority", &new_authority_pubkey.to_string(), ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::SetUpgradeAuthority { program_pubkey, upgrade_authority_index: Some(0), new_upgrade_authority: Some(new_authority_pubkey), }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } ); let program_pubkey = Pubkey::new_unique(); let new_authority_pubkey = Keypair::new(); let new_authority_pubkey_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&new_authority_pubkey, &new_authority_pubkey_file).unwrap(); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "set-upgrade-authority", &program_pubkey.to_string(), "--new-upgrade-authority", &new_authority_pubkey_file, ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::SetUpgradeAuthority { program_pubkey, upgrade_authority_index: Some(0), new_upgrade_authority: Some(new_authority_pubkey.pubkey()), }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } ); let program_pubkey = Pubkey::new_unique(); let new_authority_pubkey = Keypair::new(); let new_authority_pubkey_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&new_authority_pubkey, &new_authority_pubkey_file).unwrap(); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "set-upgrade-authority", &program_pubkey.to_string(), "--final", ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::SetUpgradeAuthority { program_pubkey, upgrade_authority_index: Some(0), new_upgrade_authority: None, }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } ); let program_pubkey = Pubkey::new_unique(); let authority = Keypair::new(); let authority_keypair_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&authority, &authority_keypair_file).unwrap(); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "set-upgrade-authority", &program_pubkey.to_string(), "--upgrade-authority", &authority_keypair_file, "--final", ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::SetUpgradeAuthority { program_pubkey, upgrade_authority_index: Some(1), new_upgrade_authority: None, }), signers: vec![ read_keypair_file(&keypair_file).unwrap().into(), read_keypair_file(&authority_keypair_file).unwrap().into(), ], } ); } #[test] #[allow(clippy::cognitive_complexity)] fn test_cli_parse_set_buffer_authority() { let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); let default_signer = DefaultSigner::new("", &keypair_file); let buffer_pubkey = Pubkey::new_unique(); let new_authority_pubkey = Pubkey::new_unique(); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "set-buffer-authority", &buffer_pubkey.to_string(), "--new-buffer-authority", &new_authority_pubkey.to_string(), ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::SetBufferAuthority { buffer_pubkey, buffer_authority_index: Some(0), new_buffer_authority: new_authority_pubkey, }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } ); let buffer_pubkey = Pubkey::new_unique(); let new_authority_keypair = Keypair::new(); let new_authority_keypair_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&new_authority_keypair, &new_authority_keypair_file).unwrap(); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "set-buffer-authority", &buffer_pubkey.to_string(), "--new-buffer-authority", &new_authority_keypair_file, ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::SetBufferAuthority { buffer_pubkey, buffer_authority_index: Some(0), new_buffer_authority: new_authority_keypair.pubkey(), }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } ); } #[test] #[allow(clippy::cognitive_complexity)] fn test_cli_parse_show() { let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); let default_signer = DefaultSigner::new("", &keypair_file); // defaults let buffer_pubkey = Pubkey::new_unique(); let authority_keypair = Keypair::new(); let authority_keypair_file = make_tmp_path("authority_keypair_file"); write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "show", &buffer_pubkey.to_string(), ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Show { account_pubkey: Some(buffer_pubkey), authority_pubkey: default_keypair.pubkey(), get_programs: false, get_buffers: false, all: false, use_lamports_unit: false, }), signers: vec![], } ); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "show", "--programs", "--all", "--lamports", ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Show { account_pubkey: None, authority_pubkey: default_keypair.pubkey(), get_programs: true, get_buffers: false, all: true, use_lamports_unit: true, }), signers: vec![], } ); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "show", "--buffers", "--all", "--lamports", ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Show { account_pubkey: None, authority_pubkey: default_keypair.pubkey(), get_programs: false, get_buffers: true, all: true, use_lamports_unit: true, }), signers: vec![], } ); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "show", "--buffers", "--buffer-authority", &authority_keypair.pubkey().to_string(), ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Show { account_pubkey: None, authority_pubkey: authority_keypair.pubkey(), get_programs: false, get_buffers: true, all: false, use_lamports_unit: false, }), signers: vec![], } ); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "show", "--buffers", "--buffer-authority", &authority_keypair_file, ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Show { account_pubkey: None, authority_pubkey: authority_keypair.pubkey(), get_programs: false, get_buffers: true, all: false, use_lamports_unit: false, }), signers: vec![], } ); } #[test] #[allow(clippy::cognitive_complexity)] fn test_cli_parse_close() { let test_commands = get_clap_app("test", "desc", "version"); let default_keypair = Keypair::new(); let keypair_file = make_tmp_path("keypair_file"); write_keypair_file(&default_keypair, &keypair_file).unwrap(); let default_signer = DefaultSigner::new("", &keypair_file); // defaults let buffer_pubkey = Pubkey::new_unique(); let recipient_pubkey = Pubkey::new_unique(); let authority_keypair = Keypair::new(); let authority_keypair_file = make_tmp_path("authority_keypair_file"); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "close", &buffer_pubkey.to_string(), ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Close { account_pubkey: Some(buffer_pubkey), recipient_pubkey: default_keypair.pubkey(), authority_index: 0, use_lamports_unit: false, }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } ); // with authority write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "close", &buffer_pubkey.to_string(), "--buffer-authority", &authority_keypair_file, ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Close { account_pubkey: Some(buffer_pubkey), recipient_pubkey: default_keypair.pubkey(), authority_index: 1, use_lamports_unit: false, }), signers: vec![ read_keypair_file(&keypair_file).unwrap().into(), read_keypair_file(&authority_keypair_file).unwrap().into(), ], } ); // with recipient let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "close", &buffer_pubkey.to_string(), "--recipient", &recipient_pubkey.to_string(), ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Close { account_pubkey: Some(buffer_pubkey), recipient_pubkey, authority_index: 0, use_lamports_unit: false, }), signers: vec![read_keypair_file(&keypair_file).unwrap().into(),], } ); // --buffers and lamports let test_command = test_commands.clone().get_matches_from(vec![ "test", "program", "close", "--buffers", "--lamports", ]); assert_eq!( parse_command(&test_command, &default_signer, &mut None).unwrap(), CliCommandInfo { command: CliCommand::Program(ProgramCliCommand::Close { account_pubkey: None, recipient_pubkey: default_keypair.pubkey(), authority_index: 0, use_lamports_unit: true, }), signers: vec![read_keypair_file(&keypair_file).unwrap().into(),], } ); } #[test] fn test_cli_keypair_file() { solana_logger::setup(); let default_keypair = Keypair::new(); let program_pubkey = Keypair::new(); let deploy_path = make_tmp_path("deploy"); let mut program_location = PathBuf::from(deploy_path.clone()); program_location.push("noop"); program_location.set_extension("so"); let mut pathbuf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); pathbuf.push("tests"); pathbuf.push("fixtures"); pathbuf.push("noop"); pathbuf.set_extension("so"); let program_keypair_location = program_location.with_file_name("noop-keypair.json"); std::fs::create_dir_all(deploy_path).unwrap(); std::fs::copy(pathbuf, program_location.as_os_str()).unwrap(); write_keypair_file(&program_pubkey, &program_keypair_location).unwrap(); let config = CliConfig { rpc_client: Some(Arc::new(RpcClient::new_mock("".to_string()))), command: CliCommand::Program(ProgramCliCommand::Deploy { program_location: Some(program_location.to_str().unwrap().to_string()), buffer_signer_index: None, buffer_pubkey: None, program_signer_index: None, program_pubkey: None, upgrade_authority_signer_index: 0, is_final: false, max_len: None, allow_excessive_balance: false, }), signers: vec![&default_keypair], output_format: OutputFormat::JsonCompact, ..CliConfig::default() }; let result = process_command(&config); let json: Value = serde_json::from_str(&result.unwrap()).unwrap(); let program_id = json .as_object() .unwrap() .get("programId") .unwrap() .as_str() .unwrap(); assert_eq!( program_id.parse::<Pubkey>().unwrap(), program_pubkey.pubkey() ); } }
38.795346
141
0.516027
564c4d3ef3ffbfbd22a3d23261e364854753ac20
26,842
use std::{cell::RefCell, fmt, future::Future, marker::PhantomData, rc::Rc}; use actix_http::{body::BoxBody, Extensions}; use actix_router::{IntoPatterns, Patterns}; use actix_service::{ apply, apply_fn_factory, boxed, fn_service, IntoServiceFactory, Service, ServiceFactory, ServiceFactoryExt, Transform, }; use futures_core::future::LocalBoxFuture; use futures_util::future::join_all; use crate::{ body::MessageBody, data::Data, dev::{ensure_leading_slash, AppService, ResourceDef}, guard::Guard, handler::Handler, route::{Route, RouteService}, service::{ BoxedHttpService, BoxedHttpServiceFactory, HttpServiceFactory, ServiceRequest, ServiceResponse, }, Error, FromRequest, HttpResponse, Responder, }; /// A collection of [`Route`]s that respond to the same path pattern. /// /// Resource in turn has at least one route. Route consists of an handlers objects and list of /// guards (objects that implement `Guard` trait). Resources and routes uses builder-like pattern /// for configuration. During request handling, resource object iterate through all routes and check /// guards for specific route, if request matches all guards, route considered matched and route /// handler get called. /// /// # Examples /// ``` /// use actix_web::{web, App, HttpResponse}; /// /// let app = App::new().service( /// web::resource("/") /// .route(web::get().to(|| HttpResponse::Ok()))); /// ``` /// /// If no matching route could be found, *405* response code get returned. Default behavior could be /// overridden with `default_resource()` method. pub struct Resource<T = ResourceEndpoint, B = BoxBody> { endpoint: T, rdef: Patterns, name: Option<String>, routes: Vec<Route>, app_data: Option<Extensions>, guards: Vec<Box<dyn Guard>>, default: BoxedHttpServiceFactory, factory_ref: Rc<RefCell<Option<ResourceFactory>>>, _phantom: PhantomData<B>, } impl Resource { pub fn new<T: IntoPatterns>(path: T) -> Resource { let fref = Rc::new(RefCell::new(None)); Resource { routes: Vec::new(), rdef: path.patterns(), name: None, endpoint: ResourceEndpoint::new(fref.clone()), factory_ref: fref, guards: Vec::new(), app_data: None, default: boxed::factory(fn_service(|req: ServiceRequest| async { Ok(req.into_response(HttpResponse::MethodNotAllowed())) })), _phantom: PhantomData, } } } impl<T, B> Resource<T, B> where T: ServiceFactory< ServiceRequest, Config = (), Response = ServiceResponse<B>, Error = Error, InitError = (), >, B: MessageBody, { /// Set resource name. /// /// Name is used for url generation. pub fn name(mut self, name: &str) -> Self { self.name = Some(name.to_string()); self } /// Add match guard to a resource. /// /// ``` /// use actix_web::{web, guard, App, HttpResponse}; /// /// async fn index(data: web::Path<(String, String)>) -> &'static str { /// "Welcome!" /// } /// /// fn main() { /// let app = App::new() /// .service( /// web::resource("/app") /// .guard(guard::Header("content-type", "text/plain")) /// .route(web::get().to(index)) /// ) /// .service( /// web::resource("/app") /// .guard(guard::Header("content-type", "text/json")) /// .route(web::get().to(|| HttpResponse::MethodNotAllowed())) /// ); /// } /// ``` pub fn guard<G: Guard + 'static>(mut self, guard: G) -> Self { self.guards.push(Box::new(guard)); self } pub(crate) fn add_guards(mut self, guards: Vec<Box<dyn Guard>>) -> Self { self.guards.extend(guards); self } /// Register a new route. /// /// ``` /// use actix_web::{web, guard, App, HttpResponse}; /// /// let app = App::new().service( /// web::resource("/").route( /// web::route() /// .guard(guard::Any(guard::Get()).or(guard::Put())) /// .guard(guard::Header("Content-Type", "text/plain")) /// .to(|| HttpResponse::Ok())) /// ); /// ``` /// /// Multiple routes could be added to a resource. Resource object uses /// match guards for route selection. /// /// ``` /// use actix_web::{web, guard, App}; /// /// fn main() { /// let app = App::new().service( /// web::resource("/container/") /// .route(web::get().to(get_handler)) /// .route(web::post().to(post_handler)) /// .route(web::delete().to(delete_handler)) /// ); /// } /// # async fn get_handler() -> impl actix_web::Responder { actix_web::HttpResponse::Ok() } /// # async fn post_handler() -> impl actix_web::Responder { actix_web::HttpResponse::Ok() } /// # async fn delete_handler() -> impl actix_web::Responder { actix_web::HttpResponse::Ok() } /// ``` pub fn route(mut self, route: Route) -> Self { self.routes.push(route); self } /// Add resource data. /// /// Data of different types from parent contexts will still be accessible. Any `Data<T>` types /// set here can be extracted in handlers using the `Data<T>` extractor. /// /// # Examples /// ``` /// use std::cell::Cell; /// use actix_web::{web, App, HttpRequest, HttpResponse, Responder}; /// /// struct MyData { /// count: std::cell::Cell<usize>, /// } /// /// async fn handler(req: HttpRequest, counter: web::Data<MyData>) -> impl Responder { /// // note this cannot use the Data<T> extractor because it was not added with it /// let incr = *req.app_data::<usize>().unwrap(); /// assert_eq!(incr, 3); /// /// // update counter using other value from app data /// counter.count.set(counter.count.get() + incr); /// /// HttpResponse::Ok().body(counter.count.get().to_string()) /// } /// /// let app = App::new().service( /// web::resource("/") /// .app_data(3usize) /// .app_data(web::Data::new(MyData { count: Default::default() })) /// .route(web::get().to(handler)) /// ); /// ``` pub fn app_data<U: 'static>(mut self, data: U) -> Self { self.app_data .get_or_insert_with(Extensions::new) .insert(data); self } /// Add resource data after wrapping in `Data<T>`. /// /// Deprecated in favor of [`app_data`](Self::app_data). #[deprecated(since = "4.0.0", note = "Use `.app_data(Data::new(val))` instead.")] pub fn data<U: 'static>(self, data: U) -> Self { self.app_data(Data::new(data)) } /// Register a new route and add handler. This route matches all requests. /// /// ``` /// use actix_web::*; /// /// fn index(req: HttpRequest) -> HttpResponse { /// unimplemented!() /// } /// /// App::new().service(web::resource("/").to(index)); /// ``` /// /// This is shortcut for: /// /// ``` /// # use actix_web::*; /// # fn index(req: HttpRequest) -> HttpResponse { unimplemented!() } /// App::new().service(web::resource("/").route(web::route().to(index))); /// ``` pub fn to<F, Args>(mut self, handler: F) -> Self where F: Handler<Args>, Args: FromRequest + 'static, F::Output: Responder + 'static, { self.routes.push(Route::new().to(handler)); self } /// Register a resource middleware. /// /// This is similar to `App's` middlewares, but middleware get invoked on resource level. /// Resource level middlewares are not allowed to change response /// type (i.e modify response's body). /// /// **Note**: middlewares get called in opposite order of middlewares registration. pub fn wrap<M, B1>( self, mw: M, ) -> Resource< impl ServiceFactory< ServiceRequest, Config = (), Response = ServiceResponse<B1>, Error = Error, InitError = (), >, B1, > where M: Transform< T::Service, ServiceRequest, Response = ServiceResponse<B1>, Error = Error, InitError = (), >, B1: MessageBody, { Resource { endpoint: apply(mw, self.endpoint), rdef: self.rdef, name: self.name, guards: self.guards, routes: self.routes, default: self.default, app_data: self.app_data, factory_ref: self.factory_ref, _phantom: PhantomData, } } /// Register a resource middleware function. /// /// This function accepts instance of `ServiceRequest` type and /// mutable reference to the next middleware in chain. /// /// This is similar to `App's` middlewares, but middleware get invoked on resource level. /// Resource level middlewares are not allowed to change response /// type (i.e modify response's body). /// /// ``` /// use actix_service::Service; /// use actix_web::{web, App}; /// use actix_web::http::header::{CONTENT_TYPE, HeaderValue}; /// /// async fn index() -> &'static str { /// "Welcome!" /// } /// /// fn main() { /// let app = App::new().service( /// web::resource("/index.html") /// .wrap_fn(|req, srv| { /// let fut = srv.call(req); /// async { /// let mut res = fut.await?; /// res.headers_mut().insert( /// CONTENT_TYPE, HeaderValue::from_static("text/plain"), /// ); /// Ok(res) /// } /// }) /// .route(web::get().to(index))); /// } /// ``` pub fn wrap_fn<F, R, B1>( self, mw: F, ) -> Resource< impl ServiceFactory< ServiceRequest, Config = (), Response = ServiceResponse<B1>, Error = Error, InitError = (), >, B1, > where F: Fn(ServiceRequest, &T::Service) -> R + Clone, R: Future<Output = Result<ServiceResponse<B1>, Error>>, B1: MessageBody, { Resource { endpoint: apply_fn_factory(self.endpoint, mw), rdef: self.rdef, name: self.name, guards: self.guards, routes: self.routes, default: self.default, app_data: self.app_data, factory_ref: self.factory_ref, _phantom: PhantomData, } } /// Default service to be used if no matching route could be found. /// By default *405* response get returned. Resource does not use /// default handler from `App` or `Scope`. pub fn default_service<F, U>(mut self, f: F) -> Self where F: IntoServiceFactory<U, ServiceRequest>, U: ServiceFactory< ServiceRequest, Config = (), Response = ServiceResponse, Error = Error, > + 'static, U::InitError: fmt::Debug, { // create and configure default resource self.default = boxed::factory( f.into_factory() .map_init_err(|e| log::error!("Can not construct default service: {:?}", e)), ); self } } impl<T, B> HttpServiceFactory for Resource<T, B> where T: ServiceFactory< ServiceRequest, Config = (), Response = ServiceResponse<B>, Error = Error, InitError = (), > + 'static, B: MessageBody + 'static, { fn register(mut self, config: &mut AppService) { let guards = if self.guards.is_empty() { None } else { Some(std::mem::take(&mut self.guards)) }; let mut rdef = if config.is_root() || !self.rdef.is_empty() { ResourceDef::new(ensure_leading_slash(self.rdef.clone())) } else { ResourceDef::new(self.rdef.clone()) }; if let Some(ref name) = self.name { rdef.set_name(name); } *self.factory_ref.borrow_mut() = Some(ResourceFactory { routes: self.routes, default: self.default, }); let resource_data = self.app_data.map(Rc::new); // wraps endpoint service (including middleware) call and injects app data for this scope let endpoint = apply_fn_factory(self.endpoint, move |mut req: ServiceRequest, srv| { if let Some(ref data) = resource_data { req.add_data_container(Rc::clone(data)); } let fut = srv.call(req); async { Ok(fut.await?.map_into_boxed_body()) } }); config.register_service(rdef, guards, endpoint, None) } } pub struct ResourceFactory { routes: Vec<Route>, default: BoxedHttpServiceFactory, } impl ServiceFactory<ServiceRequest> for ResourceFactory { type Response = ServiceResponse; type Error = Error; type Config = (); type Service = ResourceService; type InitError = (); type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>; fn new_service(&self, _: ()) -> Self::Future { // construct default service factory future. let default_fut = self.default.new_service(()); // construct route service factory futures let factory_fut = join_all(self.routes.iter().map(|route| route.new_service(()))); Box::pin(async move { let default = default_fut.await?; let routes = factory_fut .await .into_iter() .collect::<Result<Vec<_>, _>>()?; Ok(ResourceService { routes, default }) }) } } pub struct ResourceService { routes: Vec<RouteService>, default: BoxedHttpService, } impl Service<ServiceRequest> for ResourceService { type Response = ServiceResponse; type Error = Error; type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>; actix_service::always_ready!(); fn call(&self, mut req: ServiceRequest) -> Self::Future { for route in &self.routes { if route.check(&mut req) { return route.call(req); } } self.default.call(req) } } #[doc(hidden)] pub struct ResourceEndpoint { factory: Rc<RefCell<Option<ResourceFactory>>>, } impl ResourceEndpoint { fn new(factory: Rc<RefCell<Option<ResourceFactory>>>) -> Self { ResourceEndpoint { factory } } } impl ServiceFactory<ServiceRequest> for ResourceEndpoint { type Response = ServiceResponse; type Error = Error; type Config = (); type Service = ResourceService; type InitError = (); type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>; fn new_service(&self, _: ()) -> Self::Future { self.factory.borrow().as_ref().unwrap().new_service(()) } } #[cfg(test)] mod tests { use std::time::Duration; use actix_rt::time::sleep; use actix_service::Service; use actix_utils::future::ok; use super::*; use crate::{ guard, http::{ header::{self, HeaderValue}, Method, StatusCode, }, middleware::{Compat, DefaultHeaders}, service::{ServiceRequest, ServiceResponse}, test::{call_service, init_service, TestRequest}, web, App, Error, HttpMessage, HttpResponse, }; #[test] fn can_be_returned_from_fn() { fn my_resource() -> Resource { web::resource("/test").route(web::get().to(|| async { "hello" })) } fn my_compat_resource() -> Resource< impl ServiceFactory< ServiceRequest, Config = (), Response = ServiceResponse, Error = Error, InitError = (), >, > { web::resource("/test-compat") .wrap_fn(|req, srv| { let fut = srv.call(req); async { Ok(fut.await?.map_into_right_body::<()>()) } }) .wrap(Compat::noop()) .route(web::get().to(|| async { "hello" })) } App::new() .service(my_resource()) .service(my_compat_resource()); } #[actix_rt::test] async fn test_middleware() { let srv = init_service( App::new().service( web::resource("/test") .name("test") .wrap( DefaultHeaders::new() .add((header::CONTENT_TYPE, HeaderValue::from_static("0001"))), ) .route(web::get().to(HttpResponse::Ok)), ), ) .await; let req = TestRequest::with_uri("/test").to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::OK); assert_eq!( resp.headers().get(header::CONTENT_TYPE).unwrap(), HeaderValue::from_static("0001") ); } #[actix_rt::test] async fn test_middleware_fn() { let srv = init_service( App::new().service( web::resource("/test") .wrap_fn(|req, srv| { let fut = srv.call(req); async { fut.await.map(|mut res| { res.headers_mut().insert( header::CONTENT_TYPE, HeaderValue::from_static("0001"), ); res }) } }) .route(web::get().to(HttpResponse::Ok)), ), ) .await; let req = TestRequest::with_uri("/test").to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::OK); assert_eq!( resp.headers().get(header::CONTENT_TYPE).unwrap(), HeaderValue::from_static("0001") ); } #[actix_rt::test] async fn test_to() { let srv = init_service(App::new().service(web::resource("/test").to(|| async { sleep(Duration::from_millis(100)).await; Ok::<_, Error>(HttpResponse::Ok()) }))) .await; let req = TestRequest::with_uri("/test").to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::OK); } #[actix_rt::test] async fn test_pattern() { let srv = init_service( App::new().service( web::resource(["/test", "/test2"]) .to(|| async { Ok::<_, Error>(HttpResponse::Ok()) }), ), ) .await; let req = TestRequest::with_uri("/test").to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::OK); let req = TestRequest::with_uri("/test2").to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::OK); } #[actix_rt::test] async fn test_default_resource() { let srv = init_service( App::new() .service(web::resource("/test").route(web::get().to(HttpResponse::Ok))) .default_service(|r: ServiceRequest| { ok(r.into_response(HttpResponse::BadRequest())) }), ) .await; let req = TestRequest::with_uri("/test").to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::OK); let req = TestRequest::with_uri("/test") .method(Method::POST) .to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::METHOD_NOT_ALLOWED); let srv = init_service( App::new().service( web::resource("/test") .route(web::get().to(HttpResponse::Ok)) .default_service(|r: ServiceRequest| { ok(r.into_response(HttpResponse::BadRequest())) }), ), ) .await; let req = TestRequest::with_uri("/test").to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::OK); let req = TestRequest::with_uri("/test") .method(Method::POST) .to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::BAD_REQUEST); } #[actix_rt::test] async fn test_resource_guards() { let srv = init_service( App::new() .service( web::resource("/test/{p}") .guard(guard::Get()) .to(HttpResponse::Ok), ) .service( web::resource("/test/{p}") .guard(guard::Put()) .to(HttpResponse::Created), ) .service( web::resource("/test/{p}") .guard(guard::Delete()) .to(HttpResponse::NoContent), ), ) .await; let req = TestRequest::with_uri("/test/it") .method(Method::GET) .to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::OK); let req = TestRequest::with_uri("/test/it") .method(Method::PUT) .to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::CREATED); let req = TestRequest::with_uri("/test/it") .method(Method::DELETE) .to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::NO_CONTENT); } // allow deprecated `{App, Resource}::data` #[allow(deprecated)] #[actix_rt::test] async fn test_data() { let srv = init_service( App::new() .data(1.0f64) .data(1usize) .app_data(web::Data::new('-')) .service( web::resource("/test") .data(10usize) .app_data(web::Data::new('*')) .guard(guard::Get()) .to( |data1: web::Data<usize>, data2: web::Data<char>, data3: web::Data<f64>| { assert_eq!(**data1, 10); assert_eq!(**data2, '*'); let error = std::f64::EPSILON; assert!((**data3 - 1.0).abs() < error); HttpResponse::Ok() }, ), ), ) .await; let req = TestRequest::get().uri("/test").to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::OK); } // allow deprecated `{App, Resource}::data` #[allow(deprecated)] #[actix_rt::test] async fn test_data_default_service() { let srv = init_service( App::new().data(1usize).service( web::resource("/test") .data(10usize) .default_service(web::to(|data: web::Data<usize>| { assert_eq!(**data, 10); HttpResponse::Ok() })), ), ) .await; let req = TestRequest::get().uri("/test").to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::OK); } #[actix_rt::test] async fn test_middleware_app_data() { let srv = init_service( App::new().service( web::resource("test") .app_data(1usize) .wrap_fn(|req, srv| { assert_eq!(req.app_data::<usize>(), Some(&1usize)); req.extensions_mut().insert(1usize); srv.call(req) }) .route(web::get().to(HttpResponse::Ok)) .default_service(|req: ServiceRequest| async move { let (req, _) = req.into_parts(); assert_eq!(req.extensions().get::<usize>(), Some(&1)); Ok(ServiceResponse::new( req, HttpResponse::BadRequest().finish(), )) }), ), ) .await; let req = TestRequest::get().uri("/test").to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::OK); let req = TestRequest::post().uri("/test").to_request(); let resp = call_service(&srv, req).await; assert_eq!(resp.status(), StatusCode::BAD_REQUEST); } #[actix_rt::test] async fn test_middleware_body_type() { let srv = init_service( App::new().service( web::resource("/test") .wrap_fn(|req, srv| { let fut = srv.call(req); async { Ok(fut.await?.map_into_right_body::<()>()) } }) .route(web::get().to(|| async { "hello" })), ), ) .await; // test if `try_into_bytes()` is preserved across scope layer use actix_http::body::MessageBody as _; let req = TestRequest::with_uri("/test").to_request(); let resp = call_service(&srv, req).await; let body = resp.into_body(); assert_eq!(body.try_into_bytes().unwrap(), b"hello".as_ref()); } }
32.300842
100
0.504731
e228bafe7a39ea81673c73bcaa280a7da71a2a54
7,962
use kzg::{Fr, G1, G1Mul, G2, G2Mul}; use rand::{Rng, thread_rng}; use crate::consts::{BlstFp, BlstFp2, BlstP1, BlstP2, G1_NEGATIVE_GENERATOR, G2_NEGATIVE_GENERATOR}; extern "C" { // Fr fn fr_from_uint64(out: *mut BlstFr, n: u64); fn fr_from_uint64s(out: *mut BlstFr, vals: *const u64); fn fr_to_uint64s(out: *mut u64, fr: *const BlstFr); fn fr_is_zero(p: *const BlstFr) -> bool; fn fr_is_null(p: *const BlstFr) -> bool; fn fr_is_one(p: *const BlstFr) -> bool; fn fr_equal(aa: *const BlstFr, bb: *const BlstFr) -> bool; fn fr_negate(out: *mut BlstFr, in_: *const BlstFr); fn fr_pow(out: *mut BlstFr, a: *const BlstFr, n: u64); fn fr_div(out: *mut BlstFr, a: *const BlstFr, b: *const BlstFr); fn blst_fr_add(ret: *mut BlstFr, a: *const BlstFr, b: *const BlstFr); fn blst_fr_sqr(ret: *mut BlstFr, a: *const BlstFr); fn blst_fr_mul(ret: *mut BlstFr, a: *const BlstFr, b: *const BlstFr); // G1 fn blst_p1_generator() -> *const BlstP1; fn g1_add_or_dbl(out: *mut BlstP1, a: *const BlstP1, b: *const BlstP1); fn g1_equal(a: *const BlstP1, b: *const BlstP1) -> bool; fn g1_mul(out: *mut BlstP1, a: *const BlstP1, b: *const BlstFr); fn g1_dbl(out: *mut BlstP1, a: *const BlstP1); fn g1_sub(out: *mut BlstP1, a: *const BlstP1, b: *const BlstP1); fn g1_is_inf(a: *const BlstP1) -> bool; // G2 fn blst_p2_generator() -> *const BlstP2; fn g2_mul(out: *mut BlstP2, a: *const BlstP2, b: *const BlstFr); fn g2_dbl(out: *mut BlstP2, a: *const BlstP2); fn g2_add_or_dbl(out: *mut BlstP2, a: *const BlstP2, b: *const BlstP2); fn g2_equal(a: *const BlstP2, b: *const BlstP2) -> bool; fn g2_sub(out: *mut BlstP2, a: *const BlstP2, b: *const BlstP2); // Regular functions fn g1_linear_combination(out: *mut BlstP1, p: *const BlstP1, coeffs: *const BlstFr, len: u64); fn pairings_verify(a1: *const BlstP1, a2: *const BlstP2, b1: *const BlstP1, b2: *const BlstP2) -> bool; } #[repr(C)] #[derive(Debug, Default, Copy, Clone, PartialEq)] pub struct BlstFr { pub l: [u64; 4] } impl Fr for BlstFr { fn default() -> Self { Self { l: [0; 4] } } fn null() -> Self { Self { l: [u64::MAX; 4]} } fn zero() -> Self { Fr::from_u64(0) } fn one() -> Self { Fr::from_u64(1) } fn rand() -> Self { let mut ret = Fr::default(); let mut rng = thread_rng(); let a: [u64; 4] = [ rng.next_u64(), rng.next_u64(), rng.next_u64(), rng.next_u64() ]; unsafe { fr_from_uint64s(&mut ret, a.as_ptr()); } ret } fn from_u64_arr(u: &[u64; 4]) -> Self { let mut ret = Fr::default(); unsafe { fr_from_uint64s(&mut ret, u.as_ptr()); } ret } fn from_u64(u: u64) -> Self { let mut fr = Fr::default(); unsafe { fr_from_uint64(&mut fr, u); } fr } fn to_u64_arr(&self) -> [u64; 4] { let mut arr: [u64; 4] = [0; 4]; unsafe { fr_to_uint64s(arr.as_mut_ptr(), self); } arr } fn is_one(&self) -> bool { unsafe { fr_is_one(self) } } fn is_zero(&self) -> bool { unsafe { fr_is_zero(self) } } fn is_null(&self) -> bool { unsafe { fr_is_null(self) } } fn sqr(&self) -> Self { let mut ret = Fr::default(); unsafe { blst_fr_sqr(&mut ret, self); } ret } fn mul(&self, b: &Self) -> Self { let mut ret = Fr::default(); unsafe { blst_fr_mul(&mut ret, self, b); } ret } fn add(&self, b: &Self) -> Self { let mut sum = Fr::default(); unsafe { blst_fr_add(&mut sum, self, b); } sum } fn sub(&self, _b: &Self) -> Self { todo!() } fn eucl_inverse(&self) -> Self { todo!() } fn negate(&self) -> Self { let mut ret = Fr::default(); unsafe { fr_negate(&mut ret, self); } ret } fn inverse(&self) -> Self { todo!() } fn pow(&self, n: usize) -> Self { let mut ret = Fr::default(); unsafe { fr_pow(&mut ret, self, n as u64); } ret } fn div(&self, b: &Self) -> Result<Self, String> { let mut ret = Fr::default(); unsafe { fr_div(&mut ret, self, b); } Ok(ret) } fn equals(&self, b: &Self) -> bool { unsafe { fr_equal(self, b) } } } impl G1 for BlstP1 { fn default() -> Self { Self { x: BlstFp { l: [0; 6] }, y: BlstFp { l: [0; 6] }, z: BlstFp { l: [0; 6] }, } } fn identity() -> Self { Self { x: BlstFp { l: [0; 6] }, y: BlstFp { l: [0; 6] }, z: BlstFp { l: [0; 6] }, } } fn generator() -> Self { unsafe { *blst_p1_generator() } } fn negative_generator() -> Self { G1_NEGATIVE_GENERATOR } fn rand() -> Self { let mut ret = G1::default(); let random = Fr::rand(); unsafe { g1_mul(&mut ret, &G1::generator(), &random); } ret } fn add_or_dbl(&mut self, b: &Self) -> Self { let mut out = G1::default(); unsafe { g1_add_or_dbl(&mut out, b, self); } out } fn is_inf(&self) -> bool { unsafe { g1_is_inf(self) } } fn dbl(&self) -> Self { let mut ret = G1::default(); unsafe { g1_dbl(&mut ret, self); } ret } fn sub(&self, b: &Self) -> Self { let mut ret = G1::default(); unsafe { g1_sub(&mut ret, self, b); } ret } fn equals(&self, b: &Self) -> bool { unsafe { g1_equal(self, b) } } } impl G1Mul<BlstFr> for BlstP1 { fn mul(&self, b: &BlstFr) -> Self { let mut ret = G1::default(); unsafe { g1_mul(&mut ret, self, b); } ret } } impl G2 for BlstP2 { fn default() -> Self { Self { x: BlstFp2 { fp: [BlstFp { l: [0; 6] }, BlstFp { l: [0; 6] }] }, y: BlstFp2 { fp: [BlstFp { l: [0; 6] }, BlstFp { l: [0; 6] }] }, z: BlstFp2 { fp: [BlstFp { l: [0; 6] }, BlstFp { l: [0; 6] }] }, } } fn generator() -> Self { unsafe { *blst_p2_generator() } } fn negative_generator() -> Self { G2_NEGATIVE_GENERATOR } fn add_or_dbl(&mut self, b: &Self) -> Self { let mut ret = G2::default(); unsafe { g2_add_or_dbl(&mut ret, self, b); } ret } fn dbl(&self) -> Self { let mut ret = G2::default(); unsafe { g2_dbl(&mut ret, self); } ret } fn sub(&self, b: &Self) -> Self { let mut ret = G2::default(); unsafe { g2_sub(&mut ret, self, b); } ret } fn equals(&self, b: &Self) -> bool { unsafe { g2_equal(self, b) } } } impl G2Mul<BlstFr> for BlstP2 { fn mul(&self, b: &BlstFr) -> Self { let mut ret = G2::default(); unsafe { g2_mul(&mut ret, self, b); } ret } } pub fn linear_combination_g1(out: &mut BlstP1, p: &[BlstP1], coeffs: &[BlstFr], len: usize) { unsafe { g1_linear_combination(out, p.as_ptr(), coeffs.as_ptr(), len as u64); } } pub fn verify_pairings(a1: &BlstP1, a2: &BlstP2, b1: &BlstP1, b2: &BlstP2) -> bool { unsafe { pairings_verify(a1, a2, b1, b2) } }
23.348974
107
0.475132
f733697a5e970ed349c2eb5faf8bcd046291b44b
1,319
//! A good way of displaying an SVG image in egui. //! //! Requires the dependency `egui_extras` with the `svg` feature. #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] // hide console window on Windows in release use eframe::egui; fn main() { let options = eframe::NativeOptions { initial_window_size: Some(egui::vec2(1000.0, 700.0)), ..Default::default() }; eframe::run_native( "svg example", options, Box::new(|_cc| Box::new(MyApp::default())), ); } struct MyApp { svg_image: egui_extras::RetainedImage, } impl Default for MyApp { fn default() -> Self { Self { svg_image: egui_extras::RetainedImage::from_svg_bytes( "rustacean-flat-happy.svg", include_bytes!("rustacean-flat-happy.svg"), ) .unwrap(), } } } impl eframe::App for MyApp { fn update(&mut self, ctx: &mut egui::Context, _frame: &mut eframe::Frame) { egui::CentralPanel::default().show(ctx, |ui| { ui.heading("SVG example"); ui.label("The SVG is rasterized and displayed as a texture."); ui.separator(); let max_size = ui.available_size(); self.svg_image.show_max_size(ui, max_size); }); } }
26.38
111
0.580743
f8625202b4114109040bd25b0c587bd9a47be739
1,448
use std::time::Duration; use futures::future::join_all; use futures::FutureExt; use tokio::time::timeout; use ractor::{Actor, Broker, Context, MessageHandler}; #[derive(Debug)] struct Sleep(u64); #[derive(Default)] struct MyActor; #[async_trait::async_trait] impl Actor for MyActor { const MAIL_BOX_SIZE: u32 = 2; type Args = (); async fn create(_ctx: &mut Context<Self>) -> Self where Self: Sized, { MyActor } } #[async_trait::async_trait] impl MessageHandler<Sleep> for MyActor { type Output = (); async fn handle( &mut self, msg: Sleep, _ctx: &mut Context<Self>, ) -> Self::Output { tokio::time::sleep(Duration::from_secs(msg.0)).await } } // We have 100 actors and process 200 messages, and each message takes 1 second. // Since they are parallel, they should be completed in about 2 seconds #[tokio::main] async fn main() { let my_actor = Broker::<MyActor>::spawn(100, true).await; // Taking into account other costs, it should be completed within 2.2 seconds let res = timeout(Duration::from_secs_f64(2.2), async { // send 200 sleep message. let resp_handles = join_all( (0..200).map(|_| my_actor.send(Sleep(1)).map(|res| res.expect("send failed"))), ) .await; join_all(resp_handles.into_iter().map(|handle| handle.recv())).await; }) .await; assert!(res.is_ok()); }
23.737705
91
0.629144
ab48ecb22d31b2f71bbe20e015d40eeac032831e
18,469
//! Derive a builder for a struct //! //! This crate implements the [builder pattern] for you. //! Just apply `#[derive(Builder)]` to a struct `Foo`, and it will derive an additional //! struct `FooBuilder` with **setter**-methods for all fields and a **build**-method //! — the way you want it. //! //! # Quick Start //! //! Add `derive_builder` as a dependency to you `Cargo.toml`. //! //! ## What you write //! //! ```rust //! #[macro_use] //! extern crate derive_builder; //! //! #[derive(Builder)] //! struct Lorem { //! ipsum: u32, //! // .. //! } //! # fn main() {} //! ``` //! //! ## What you get //! //! ```rust //! # #[macro_use] //! # extern crate derive_builder; //! # //! # struct Lorem { //! # ipsum: u32, //! # } //! # fn main() {} //! # //! #[derive(Clone, Default)] //! struct LoremBuilder { //! ipsum: Option<u32>, //! } //! //! #[allow(dead_code)] //! impl LoremBuilder { //! pub fn ipsum(&mut self, value: u32) -> &mut Self { //! let mut new = self; //! new.ipsum = Some(value); //! new //! } //! //! fn build(&self) -> Result<Lorem, String> { //! Ok(Lorem { //! ipsum: Clone::clone(self.ipsum //! .as_ref() //! .ok_or("ipsum must be initialized")?), //! }) //! } //! } //! ``` //! //! By default all generated setter-methods take and return `&mut self` //! (aka _non-conusuming_ builder pattern). Accordingly, the build method also takes a //! reference by default. //! //! You can easily opt into different patterns and control many other aspects. //! //! The build method returns `Result<T, E>`, where `T` is the struct you started with //! and E is a generated builder error type. //! It returns `Err` if you didn't initialize all fields and no default values were //! provided. //! //! # Builder Patterns //! //! Let's look again at the example above. You can now build structs like this: //! //! ```rust //! # #[macro_use] extern crate derive_builder; //! # #[derive(Builder)] struct Lorem { ipsum: u32 } //! # fn try_main() -> Result<(), Box<dyn std::error::Error>> { //! let x: Lorem = LoremBuilder::default().ipsum(42).build()?; //! # Ok(()) //! # } fn main() { try_main().unwrap(); } //! ``` //! //! Ok, _chaining_ method calls is nice, but what if `ipsum(42)` should only happen if `geek = true`? //! //! So let's make this call conditional //! //! ```rust //! # #[macro_use] extern crate derive_builder; //! # #[derive(Builder)] struct Lorem { ipsum: u32 } //! # fn try_main() -> Result<(), Box<dyn std::error::Error>> { //! # let geek = true; //! let mut builder = LoremBuilder::default(); //! if geek { //! builder.ipsum(42); //! } //! let x: Lorem = builder.build()?; //! # Ok(()) //! # } fn main() { try_main().unwrap(); } //! ``` //! //! Now it comes in handy that our setter methods take and return mutable references. Otherwise //! we would need to write something more clumsy like `builder = builder.ipsum(42)` to reassign //! the return value each time we have to call a setter conditionally. //! //! Setters with mutable references are therefore a convenient default for the builder //! pattern in Rust. //! //! But this is a free world and the choice is still yours! //! //! ## Owned, aka Consuming //! //! Precede your struct (or field) with `#[builder(pattern = "owned")]` to opt into this pattern. //! Builders generated with this pattern do not automatically derive `Clone`, which allows builders //! to be generated for structs with fields that do not derive `Clone`. //! //! * Setters take and return `self`. //! * PRO: Setter calls and final build method can be chained. //! * CON: If you don't chain your calls, you have to create a reference to each return value, //! e.g. `builder = builder.ipsum(42)`. //! //! ## Mutable, aka Non-Consuming (recommended) //! //! This pattern is recommended and active by default if you don't specify anything else. //! You can precede your struct (or field) with `#[builder(pattern = "mutable")]` //! to make this choice explicit. //! //! * Setters take and return `&mut self`. //! * PRO: Setter calls and final build method can be chained. //! * CON: The build method must clone or copy data to create something owned out of a //! mutable reference. Otherwise it could not be used in a chain. **(*)** //! //! ## Immutable //! //! Precede your struct (or field) with `#[builder(pattern = "immutable")]` to opt into this pattern. //! //! * Setters take and return `&self`. //! * PRO: Setter calls and final build method can be chained. //! * CON: If you don't chain your calls, you have to create a reference to each return value, //! e.g. `builder = builder.ipsum(42)`. //! * CON: The build method _and each setter_ must clone or copy data to create something owned //! out of a reference. **(*)** //! //! ## (*) Performance Considerations //! //! Luckily Rust is clever enough to optimize these clone-calls away in release builds //! for your every-day use cases. Thats quite a safe bet - we checked this for you. ;-) //! Switching to consuming signatures (=`self`) is unlikely to give you any performance //! gain, but very likely to restrict your API for non-chained use cases. //! //! # More Features //! //! ## Hidden Fields //! //! You can hide fields by skipping their setters on the builder struct. //! //! - Opt-out — skip setters via `#[builder(setter(skip))]` on individual fields. //! - Opt-in — set `#[builder(setter(skip))]` on the whole struct //! and enable individual setters via `#[builder(setter)]`. //! //! The types of skipped fields must implement `Default`. //! //! ```rust //! # #[macro_use] //! # extern crate derive_builder; //! # //! #[derive(Builder)] //! struct SetterOptOut { //! setter_present: u32, //! #[builder(setter(skip))] //! setter_skipped: u32, //! } //! # fn main() {} //! ``` //! //! Alternatively, you can use the more verbose form: //! //! - `#[builder(setter(skip = "true"))]` //! - `#[builder(setter(skip = "false"))]` //! //! ## Setter Visibility //! //! Setters are public by default. You can precede your struct (or field) with `#[builder(public)]` //! to make this explicit. //! //! Otherwise precede your struct (or field) with `#[builder(private)]` to opt into private //! setters. //! //! ## Setter Name/Prefix //! //! Setter methods are named after their corresponding field by default. //! //! - You can customize the setter name via `#[builder(setter(name = "foo"))`. //! - Alternatively you can set a prefix via `#[builder(setter(prefix = "xyz"))`, which will change //! the method name to `xyz_foo` if the field is named `foo`. Note that an underscore is //! inserted, since Rust favors snake case here. //! //! Prefixes can also be defined on the struct level, but renames only work on fields. Renames //! take precedence over prefix definitions. //! //! ## Generic Setters //! //! You can make each setter generic over the `Into`-trait. It's as simple as adding //! `#[builder(setter(into))]` to either a field or the whole struct. //! //! ```rust //! # #[macro_use] //! # extern crate derive_builder; //! # //! #[derive(Builder, Debug, PartialEq)] //! struct Lorem { //! #[builder(setter(into))] //! pub ipsum: String, //! } //! //! fn main() { //! // `"foo"` will be converted into a `String` automatically. //! let x = LoremBuilder::default().ipsum("foo").build().unwrap(); //! //! assert_eq!(x, Lorem { //! ipsum: "foo".to_string(), //! }); //! } //! ``` //! //! ## Setters for Option //! //! You can avoid to user to wrap value into `Some(...)` for field of type `Option<T>`. It's as simple as adding //! `#[builder(setter(strip_option))]` to either a field or the whole struct. //! //! ```rust //! # #[macro_use] //! # extern crate derive_builder; //! # //! #[derive(Builder, Debug, PartialEq)] //! struct Lorem { //! #[builder(setter(into, strip_option))] //! pub ipsum: Option<String>, //! #[builder(setter(into, strip_option), default)] //! pub foo: Option<String>, //! } //! //! fn main() { //! // `"foo"` will be converted into a `String` automatically. //! let x = LoremBuilder::default().ipsum("foo").build().unwrap(); //! //! assert_eq!(x, Lorem { //! ipsum: Some("foo".to_string()), //! foo: None //! }); //! } //! ``` //! If you want to set the value to None when unset, then enable `default` on this field (or do not use `strip_option`). //! //! Limitation: only the `Option` type name is supported, not type alias nor `std::option::Option`. //! //! ## Fallible Setters //! //! Alongside the normal setter methods, you can expose fallible setters which are generic over //! the `TryInto` trait. TryInto is a not-yet-stable trait //! (see rust-lang issue [#33417](https://github.com/rust-lang/rust/issues/33417)) similar to //! `Into` with the key distinction that the conversion can fail, and therefore produces a //! `Result`. //! //! You can only declare the `try_setter` attribute today if you're targeting nightly, and you have //! to add `#![feature(try_from)]` to your crate to use it. //! //! ```rust //! # #[macro_use] //! # extern crate derive_builder; //! #[derive(Builder, Debug, PartialEq)] //! #[builder(try_setter, setter(into))] //! struct Lorem { //! pub name: String, //! pub ipsum: u8, //! } //! //! #[derive(Builder, Debug, PartialEq)] //! struct Ipsum { //! #[builder(try_setter, setter(into, name = "foo"))] //! pub dolor: u8, //! } //! //! fn main() { //! LoremBuilder::default() //! .try_ipsum(1u16).unwrap() //! .name("hello") //! .build() //! .expect("1 fits into a u8"); //! //! IpsumBuilder::default() //! .try_foo(1u16).unwrap() //! .build() //! .expect("1 fits into a u8"); //! } //! ``` //! //! ## Default Values //! //! You can define default values for each field via annotation by `#[builder(default = "...")]`, //! where `...` stands for any Rust expression and must be string-escaped, e.g. //! //! * `#[builder(default = "42")]` //! * `#[builder(default)]` delegates to the [`Default`] trait of the base type. //! //! The expression will be evaluated with each call to `build`. //! //! ```rust //! # #[macro_use] //! # extern crate derive_builder; //! # //! #[derive(Builder, Debug, PartialEq)] //! struct Lorem { //! #[builder(default = "42")] //! pub ipsum: u32, //! } //! //! fn main() { //! // If we don't set the field `ipsum`, //! let x = LoremBuilder::default().build().unwrap(); //! //! // .. the custom default will be used for `ipsum`: //! assert_eq!(x, Lorem { //! ipsum: 42, //! }); //! } //! ``` //! //! ### Tips on Defaults //! //! * The `#[builder(default)]` annotation can be used on the struct level, too. Overrides are //! still possible. //! * Delegate to a private helper method on `FooBuilder` for anything fancy. This way //! you will get _much better error diagnostics_ from the rust compiler and it will be _much //! more readable_ for other human beings. :-) //! * Defaults will not work while using `#[builder(build_fn(skip))]`. In this case, you'll //! need to handle default values yourself when converting from the builder, such as by //! using `.unwrap_or()` and `.unwrap_or_else()`. //! //! [`Default`]: https://doc.rust-lang.org/std/default/trait.Default.html //! //! ```rust //! # #[macro_use] //! # extern crate derive_builder; //! # //! # #[derive(Builder, PartialEq, Debug)] //! struct Lorem { //! ipsum: String, //! // Custom defaults can delegate to helper methods //! // and pass errors to the enclosing `build()` method via `?`. //! #[builder(default = "self.default_dolor()?")] //! dolor: String, //! } //! //! impl LoremBuilder { //! // Private helper method with access to the builder struct. //! fn default_dolor(&self) -> Result<String, String> { //! match self.ipsum { //! Some(ref x) if x.chars().count() > 3 => Ok(format!("dolor {}", x)), //! _ => Err("ipsum must at least 3 chars to build dolor".to_string()), //! } //! } //! } //! //! # fn main() { //! # let x = LoremBuilder::default() //! # .ipsum("ipsum".to_string()) //! # .build() //! # .unwrap(); //! # //! # assert_eq!(x, Lorem { //! # ipsum: "ipsum".to_string(), //! # dolor: "dolor ipsum".to_string(), //! # }); //! # } //! ``` //! //! You can even reference other fields, but you have to remember that the builder struct //! will wrap every type in an Option ([as illustrated earlier](#what-you-get)). //! //! ## Generic Structs //! //! ```rust //! # #[macro_use] //! # extern crate derive_builder; //! # //! #[derive(Builder, Debug, PartialEq, Default, Clone)] //! struct GenLorem<T: Clone> { //! ipsum: &'static str, //! dolor: T, //! } //! //! fn main() { //! let x = GenLoremBuilder::default().ipsum("sit").dolor(42).build().unwrap(); //! assert_eq!(x, GenLorem { ipsum: "sit".into(), dolor: 42 }); //! } //! ``` //! //! ## Build Method Customization //! //! You can rename or suppress the auto-generated build method, leaving you free to implement //! your own version. Suppression is done using `#[builder(build_fn(skip))]` at the struct level, //! and renaming is done with `#[builder(build_fn(name = "YOUR_NAME"))]`. //! //! ## Pre-Build Validation //! //! If you're using the provided `build` method, you can declare //! `#[builder(build_fn(validate = "path::to::fn"))]` to specify a validator function which gets //! access to the builder before construction. The path does not need to be fully-qualified, and //! will consider `use` statements made at module level. It must be accessible from the scope //! where the target struct is declared. //! //! The provided function must have the signature `(&FooBuilder) -> Result<_, String>`; //! the `Ok` variant is not used by the `build` method. //! //! ```rust //! # #[macro_use] //! # extern crate derive_builder; //! # //! #[derive(Builder, Debug, PartialEq)] //! #[builder(build_fn(validate = "Self::validate"))] //! struct Lorem { //! pub ipsum: u8, //! } //! //! impl LoremBuilder { //! /// Check that `Lorem` is putting in the right amount of effort. //! fn validate(&self) -> Result<(), String> { //! if let Some(ref ipsum) = self.ipsum { //! match *ipsum { //! i if i < 20 => Err("Try harder".to_string()), //! i if i > 100 => Err("You'll tire yourself out".to_string()), //! _ => Ok(()) //! } //! } else { //! Ok(()) //! } //! } //! } //! //! fn main() { //! // If we're trying too hard... //! let x = LoremBuilder::default().ipsum(120).build().unwrap_err(); //! //! // .. the build will fail: //! assert_eq!(&x.to_string(), "You'll tire yourself out"); //! } //! ``` //! //! Note: //! * Default values are applied _after_ validation, and will therefore not be validated! //! //! ## Additional Trait Derivations //! //! You can derive additional traits on the builder, including traits defined by other crates: //! //! ```rust //! # #[macro_use] //! # extern crate derive_builder; //! # //! #[derive(Builder, Clone)] //! #[builder(derive(Debug, PartialEq, Eq))] //! pub struct Lorem { //! foo: u8, //! bar: String, //! } //! //! fn main() { //! assert_eq!(LoremBuilder::default(), LoremBuilder::default()); //! } //! ``` //! //! Attributes declared for those traits are _not_ forwarded to the fields on the builder. //! //! ## Documentation Comments and Attributes //! //! `#[derive(Builder)]` copies doc comments and attributes (`#[...]`) from your fields //! to the according builder fields and setter-methods, if it is one of the following: //! //! * `/// ...` //! * `#[doc = ...]` //! * `#[cfg(...)]` //! * `#[allow(...)]` //! //! The whitelisting minimizes interference with other custom attributes like //! those used by Serde, Diesel, or others. //! //! ```rust //! # #[macro_use] //! # extern crate derive_builder; //! # //! #[derive(Builder)] //! struct Lorem { //! /// `ipsum` may be any `String` (be creative). //! ipsum: String, //! #[doc = r"`dolor` is the estimated amount of work."] //! dolor: i32, //! // `#[derive(Builder)]` understands conditional compilation via cfg-attributes, //! // i.e. => "no field = no setter". //! #[cfg(target_os = "macos")] //! #[allow(non_snake_case)] //! Im_a_Mac: bool, //! } //! # fn main() {} //! ``` //! //! # **`#![no_std]`** Support (on Nightly) //! //! You can activate support for `#![no_std]` by adding `#[builder(no_std)]` to your struct //! and `#![feature(alloc)] extern crate alloc` to your crate. //! //! The latter requires the _nightly_ toolchain. //! //! # Troubleshooting //! //! ## Gotchas //! //! - Tuple structs and unit structs are not supported as they have no field //! names. //! - Generic setters introduce a type parameter `VALUE: Into<_>`. Therefore you can't use //! `VALUE` as a type parameter on a generic struct in combination with generic setters. //! - The `try_setter` attribute and `owned` builder pattern are not compatible in practice; //! an error during building will consume the builder, making it impossible to continue //! construction. //! - When re-exporting the underlying struct under a different name, the //! auto-generated documentation will not match. //! - If derive_builder depends on your crate, and vice versa, then a cyclic //! dependency would occur. To break it you could try to depend on the //! [`derive_builder_core`] crate instead. //! //! ## Report Issues and Ideas //! //! [Open an issue on GitHub](https://github.com/colin-kiegel/rust-derive-builder/issues) //! //! If possible please try to provide the debugging info if you experience unexpected //! compilation errors (see above). //! //! [builder pattern]: https://web.archive.org/web/20170701044756/https://aturon.github.io/ownership/builders.html //! [`derive_builder_core`]: https://crates.io/crates/derive_builder_core #![deny(warnings)] #![cfg_attr(not(feature = "std"), no_std)] extern crate derive_builder_core; extern crate derive_builder_macro; pub use derive_builder_macro::Builder; #[doc(inline)] pub use derive_builder_core::UninitializedFieldError; #[doc(hidden)] pub mod export { pub mod core { #[cfg(not(feature = "std"))] pub use core::*; #[cfg(feature = "std")] pub use std::*; } }
32.921569
120
0.607883
9b3a617eecf2c6d75c9780f1295d64fe4bec956c
11,446
//! An easy to use library for pretty print tables of Rust `struct`s and `enum`s. //! //! The library is based on a [Tabled] trait which is used to actually build tables. //! It also provides an variate of dynamic settings for customization of a [Table]. //! //! [Table] can be build from vast majority of Rust's standart types. //! //! ## Examples //! //! If you wan't to build a table for your data. //! Most likely a starting point is to anotate your type with `#[derive(Tabled)]`. //! //! ```rust //! use tabled::{Tabled, Table}; //! //! #[derive(Tabled)] //! struct Language { //! name: &'static str, //! designed_by: &'static str, //! invented_year: usize, //! } //! //! let languages = vec![ //! Language{ //! name: "C", //! designed_by: "Dennis Ritchie", //! invented_year: 1972 //! }, //! Language{ //! name: "Rust", //! designed_by: "Graydon Hoare", //! invented_year: 2010 //! }, //! Language{ //! name: "Go", //! designed_by: "Rob Pike", //! invented_year: 2009 //! }, //! ]; //! //! let table = Table::new(languages).to_string(); //! //! let expected = "+------+----------------+---------------+\n\ //! | name | designed_by | invented_year |\n\ //! +------+----------------+---------------+\n\ //! | C | Dennis Ritchie | 1972 |\n\ //! +------+----------------+---------------+\n\ //! | Rust | Graydon Hoare | 2010 |\n\ //! +------+----------------+---------------+\n\ //! | Go | Rob Pike | 2009 |\n\ //! +------+----------------+---------------+\n"; //! //! assert_eq!(table, expected); //! ``` //! //! Not all types can derive [Tabled] trait though. //! The example below can't be compiled. //! //! ```rust,compile_fail //! # use tabled::Tabled; //! #[derive(Tabled)] //! struct SomeType { //! field1: SomeOtherType, //! } //! //! struct SomeOtherType; //! ``` //! //! We must know what we're up to print as a field. Because of this //! each field must implement [std::fmt::Display]. //! //! ### Default implementations //! //! As I've already mentioned most of the default types implements the trait out of the box. //! //! This allows you to run the following code. //! //! ```rust //! use tabled::{Tabled, Table}; //! let table = Table::new(&[1, 2, 3]); //! # let expected = "+-----+\n\ //! # | i32 |\n\ //! # +-----+\n\ //! # | 1 |\n\ //! # +-----+\n\ //! # | 2 |\n\ //! # +-----+\n\ //! # | 3 |\n\ //! # +-----+\n"; //! # assert_eq!(table.to_string(), expected); //! ``` //! //! ### Combination of types via tuples //! //! Personally I consider this a feature which drives the library to shine. //! You can combine any types that implements [Tabled] trait into one table. //! //! You can also see in this example a `#[header("name")]` usage which configures a header //! of a table which will be printed. //! You could change it dynamically as well. //! //! ```rust //! use tabled::{Tabled, Table, Style}; //! //! #[derive(Tabled)] //! enum Domain { //! Security, //! Embeded, //! Frontend, //! Unknown, //! } //! //! #[derive(Tabled)] //! struct Developer(#[header("name")] &'static str); //! //! let data = vec![ //! (Developer("Terri Kshlerin"), Domain::Embeded), //! (Developer("Catalina Dicki"), Domain::Security), //! (Developer("Jennie Schmeler"), Domain::Frontend), //! (Developer("Maxim Zhiburt"), Domain::Unknown), //! ]; //! //! let table = Table::new(data).with(Style::psql()).to_string(); //! //! assert_eq!( //! table, //! concat!( //! " name | Security | Embeded | Frontend | Unknown \n", //! "-----------------+----------+---------+----------+---------\n", //! " Terri Kshlerin | | + | | \n", //! " Catalina Dicki | + | | | \n", //! " Jennie Schmeler | | | + | \n", //! " Maxim Zhiburt | | | | + \n" //! ) //! ); //! ``` //! //! ## Settings //! //! You can find more examples of settings and attributes in //! [README.md](https://github.com/zhiburt/tabled/blob/master/README.md) //! use papergrid::{AlignmentHorizontal, Entity, Grid, Settings}; use std::fmt; mod alignment; mod disable; mod formating; mod indent; mod object; mod panel; mod rotate; pub mod style; mod width; pub use crate::{ alignment::*, disable::*, formating::*, indent::*, object::*, panel::*, rotate::*, style::Style, width::*, }; pub use papergrid; pub use tabled_derive::Tabled; /// Tabled a trait responsible for providing a header fields and a row fields. /// /// It's urgent that `header` len is equal to `fields` len. /// /// ```text /// Self::headers().len() == self.fields().len() /// ``` pub trait Tabled { /// Fields method must return a list of cells. /// /// The cells will be placed in the same row, preserving the order. fn fields(&self) -> Vec<String>; /// Headers must return a list of column names. fn headers() -> Vec<String>; } impl<T> Tabled for &T where T: Tabled, { fn fields(&self) -> Vec<String> { T::fields(self) } fn headers() -> Vec<String> { T::headers() } } /// A trait which is responsilbe for configuration of a [Grid]. pub trait TableOption { /// The function modifies a [Grid] object. fn change(&mut self, grid: &mut Grid); } impl<T> TableOption for &mut T where T: TableOption + ?Sized, { fn change(&mut self, grid: &mut Grid) { T::change(self, grid) } } /// A trait for configuring a [Cell] a single cell. /// Where cell represented by 'row' and 'column' indexes. pub trait CellOption { /// Modification function of a [Cell] fn change_cell(&mut self, grid: &mut Grid, row: usize, column: usize); } /// Table structure provides an interface for building a table for types that implements [Tabled]. /// /// To build a string representation of a table you must use a [std::fmt::Display]. /// Or simply call `.to_string()` method. /// /// ## Example /// /// ### Basic usage /// /// ```rust,no_run /// use tabled::Table; /// let table = Table::new(&["Year", "2021"]); /// ``` /// /// ### With settings /// /// ```rust,no_run /// use tabled::{Table, Style, Alignment, Full, Modify}; /// let data = vec!["Hello", "2021"]; /// let table = Table::new(&data) /// .with(Style::psql()) /// .with(Modify::new(Full).with(Alignment::left())); /// println!("{}", table); /// ``` pub struct Table { grid: Grid, } impl Table { /// New creates a Table instance. pub fn new<T: Tabled>(iter: impl IntoIterator<Item = T>) -> Self { let grid = build_grid(iter); Self { grid } } /// With is a generic function which applies options to the [Table]. /// /// It applies settings immediately. pub fn with<O>(mut self, mut option: O) -> Self where O: TableOption, { option.change(&mut self.grid); self } } impl fmt::Display for Table { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.grid) } } /// Modify structure provide an abstraction, to be able to apply /// a set of [CellOption]s to the same object. pub struct Modify<O> { obj: O, modifiers: Vec<Box<dyn CellOption>>, } impl<O> Modify<O> where O: Object, { /// Creates a new [Modify] without any options. pub fn new(obj: O) -> Self { Self { obj, modifiers: Vec::new(), } } /// With a generic function which stores a [CellOption]. /// /// The function *doesn't* changes a [Grid]. [Grid] will be changed /// only after passing [Modify] object to [Table::with]. pub fn with<F>(mut self, f: F) -> Self where F: CellOption + 'static, { let func = Box::new(f); self.modifiers.push(func); self } } impl<O> TableOption for Modify<O> where O: Object, { fn change(&mut self, grid: &mut Grid) { let cells = self.obj.cells(grid.count_rows(), grid.count_columns()); for func in &mut self.modifiers { for &(row, column) in &cells { func.change_cell(grid, row, column) } } } } /// Building [Grid] from a data. /// You must prefer [Table] over this function. fn build_grid<T: Tabled>(iter: impl IntoIterator<Item = T>) -> Grid { let headers = T::headers(); let obj: Vec<Vec<String>> = iter.into_iter().map(|t| t.fields()).collect(); let mut grid = Grid::new(obj.len() + 1, headers.len()); // it's crusial to set a global setting rather than a setting for an each cell // as it will be hard to override that since how Grid::style method works grid.set( Entity::Global, Settings::new() .indent(1, 1, 0, 0) .alignment(AlignmentHorizontal::Center), ); for (i, h) in headers.iter().enumerate() { grid.set(Entity::Cell(0, i), Settings::new().text(h)); } let mut row = 1; for fields in &obj { for (column, field) in fields.iter().enumerate() { grid.set(Entity::Cell(row, column), Settings::new().text(field)); } // don't show off a empty data array // currently it's possible when `#[header(hidden)]` attribute used for a enum if !fields.is_empty() { row += 1; } } grid } macro_rules! tuple_table { ( $($name:ident)+ ) => { impl<$($name: Tabled),+> Tabled for ($($name,)+){ fn fields(&self) -> Vec<String> { #![allow(non_snake_case)] let ($($name,)+) = self; let mut fields = Vec::new(); $(fields.append(&mut $name.fields());)+ fields } fn headers() -> Vec<String> { let mut fields = Vec::new(); $(fields.append(&mut $name::headers());)+ fields } } }; } tuple_table! { A } tuple_table! { A B } tuple_table! { A B C } tuple_table! { A B C D } tuple_table! { A B C D E } tuple_table! { A B C D E F } macro_rules! default_table { ( $t:ty ) => { impl Tabled for $t { fn fields(&self) -> Vec<String> { vec![format!("{}", self)] } fn headers() -> Vec<String> { vec![stringify!($t).to_string()] } } }; } default_table!(&str); default_table!(String); default_table!(char); default_table!(bool); default_table!(isize); default_table!(usize); default_table!(u8); default_table!(u16); default_table!(u32); default_table!(u64); default_table!(u128); default_table!(i8); default_table!(i16); default_table!(i32); default_table!(i64); default_table!(i128); default_table!(f32); default_table!(f64); impl<T: fmt::Display, const N: usize> Tabled for [T; N] { fn fields(&self) -> Vec<String> { self.iter().map(|e| e.to_string()).collect() } fn headers() -> Vec<String> { (0..N).map(|i| format!("{}", i)).collect() } }
26.868545
98
0.520531
d9eec480cd0c613a1df6e309619526d4c37f1afc
278
#![feature(core_intrinsics)] fn main() { let x = 5; unsafe { std::intrinsics::assume(x < 10); std::intrinsics::assume(x > 1); std::intrinsics::assume(x > 42); //~ ERROR constant evaluation error //~^ NOTE `assume` argument was false } }
23.166667
76
0.568345
1d66be29f63cb188d0a3831338d881a29dacf301
15,995
#[doc = "Register `EVCTRL` reader"] pub struct R(crate::R<EVCTRL_SPEC>); impl core::ops::Deref for R { type Target = crate::R<EVCTRL_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<EVCTRL_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<EVCTRL_SPEC>) -> Self { R(reader) } } #[doc = "Register `EVCTRL` writer"] pub struct W(crate::W<EVCTRL_SPEC>); impl core::ops::Deref for W { type Target = crate::W<EVCTRL_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<EVCTRL_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<EVCTRL_SPEC>) -> Self { W(writer) } } #[doc = "Field `PEREO0` reader - Periodic Interval 0 Event Output Enable"] pub struct PEREO0_R(crate::FieldReader<bool, bool>); impl PEREO0_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PEREO0_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PEREO0_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PEREO0` writer - Periodic Interval 0 Event Output Enable"] pub struct PEREO0_W<'a> { w: &'a mut W, } impl<'a> PEREO0_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u16 & 0x01); self.w } } #[doc = "Field `PEREO1` reader - Periodic Interval 1 Event Output Enable"] pub struct PEREO1_R(crate::FieldReader<bool, bool>); impl PEREO1_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PEREO1_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PEREO1_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PEREO1` writer - Periodic Interval 1 Event Output Enable"] pub struct PEREO1_W<'a> { w: &'a mut W, } impl<'a> PEREO1_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u16 & 0x01) << 1); self.w } } #[doc = "Field `PEREO2` reader - Periodic Interval 2 Event Output Enable"] pub struct PEREO2_R(crate::FieldReader<bool, bool>); impl PEREO2_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PEREO2_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PEREO2_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PEREO2` writer - Periodic Interval 2 Event Output Enable"] pub struct PEREO2_W<'a> { w: &'a mut W, } impl<'a> PEREO2_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u16 & 0x01) << 2); self.w } } #[doc = "Field `PEREO3` reader - Periodic Interval 3 Event Output Enable"] pub struct PEREO3_R(crate::FieldReader<bool, bool>); impl PEREO3_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PEREO3_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PEREO3_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PEREO3` writer - Periodic Interval 3 Event Output Enable"] pub struct PEREO3_W<'a> { w: &'a mut W, } impl<'a> PEREO3_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u16 & 0x01) << 3); self.w } } #[doc = "Field `PEREO4` reader - Periodic Interval 4 Event Output Enable"] pub struct PEREO4_R(crate::FieldReader<bool, bool>); impl PEREO4_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PEREO4_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PEREO4_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PEREO4` writer - Periodic Interval 4 Event Output Enable"] pub struct PEREO4_W<'a> { w: &'a mut W, } impl<'a> PEREO4_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u16 & 0x01) << 4); self.w } } #[doc = "Field `PEREO5` reader - Periodic Interval 5 Event Output Enable"] pub struct PEREO5_R(crate::FieldReader<bool, bool>); impl PEREO5_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PEREO5_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PEREO5_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PEREO5` writer - Periodic Interval 5 Event Output Enable"] pub struct PEREO5_W<'a> { w: &'a mut W, } impl<'a> PEREO5_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u16 & 0x01) << 5); self.w } } #[doc = "Field `PEREO6` reader - Periodic Interval 6 Event Output Enable"] pub struct PEREO6_R(crate::FieldReader<bool, bool>); impl PEREO6_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PEREO6_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PEREO6_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PEREO6` writer - Periodic Interval 6 Event Output Enable"] pub struct PEREO6_W<'a> { w: &'a mut W, } impl<'a> PEREO6_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u16 & 0x01) << 6); self.w } } #[doc = "Field `PEREO7` reader - Periodic Interval 7 Event Output Enable"] pub struct PEREO7_R(crate::FieldReader<bool, bool>); impl PEREO7_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PEREO7_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PEREO7_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PEREO7` writer - Periodic Interval 7 Event Output Enable"] pub struct PEREO7_W<'a> { w: &'a mut W, } impl<'a> PEREO7_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u16 & 0x01) << 7); self.w } } #[doc = "Field `ALARMEO0` reader - Alarm 0 Event Output Enable"] pub struct ALARMEO0_R(crate::FieldReader<bool, bool>); impl ALARMEO0_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { ALARMEO0_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for ALARMEO0_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `ALARMEO0` writer - Alarm 0 Event Output Enable"] pub struct ALARMEO0_W<'a> { w: &'a mut W, } impl<'a> ALARMEO0_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | ((value as u16 & 0x01) << 8); self.w } } #[doc = "Field `OVFEO` reader - Overflow Event Output Enable"] pub struct OVFEO_R(crate::FieldReader<bool, bool>); impl OVFEO_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { OVFEO_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for OVFEO_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `OVFEO` writer - Overflow Event Output Enable"] pub struct OVFEO_W<'a> { w: &'a mut W, } impl<'a> OVFEO_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 15)) | ((value as u16 & 0x01) << 15); self.w } } impl R { #[doc = "Bit 0 - Periodic Interval 0 Event Output Enable"] #[inline(always)] pub fn pereo0(&self) -> PEREO0_R { PEREO0_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Periodic Interval 1 Event Output Enable"] #[inline(always)] pub fn pereo1(&self) -> PEREO1_R { PEREO1_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Periodic Interval 2 Event Output Enable"] #[inline(always)] pub fn pereo2(&self) -> PEREO2_R { PEREO2_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Periodic Interval 3 Event Output Enable"] #[inline(always)] pub fn pereo3(&self) -> PEREO3_R { PEREO3_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - Periodic Interval 4 Event Output Enable"] #[inline(always)] pub fn pereo4(&self) -> PEREO4_R { PEREO4_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Periodic Interval 5 Event Output Enable"] #[inline(always)] pub fn pereo5(&self) -> PEREO5_R { PEREO5_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - Periodic Interval 6 Event Output Enable"] #[inline(always)] pub fn pereo6(&self) -> PEREO6_R { PEREO6_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 7 - Periodic Interval 7 Event Output Enable"] #[inline(always)] pub fn pereo7(&self) -> PEREO7_R { PEREO7_R::new(((self.bits >> 7) & 0x01) != 0) } #[doc = "Bit 8 - Alarm 0 Event Output Enable"] #[inline(always)] pub fn alarmeo0(&self) -> ALARMEO0_R { ALARMEO0_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 15 - Overflow Event Output Enable"] #[inline(always)] pub fn ovfeo(&self) -> OVFEO_R { OVFEO_R::new(((self.bits >> 15) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Periodic Interval 0 Event Output Enable"] #[inline(always)] pub fn pereo0(&mut self) -> PEREO0_W { PEREO0_W { w: self } } #[doc = "Bit 1 - Periodic Interval 1 Event Output Enable"] #[inline(always)] pub fn pereo1(&mut self) -> PEREO1_W { PEREO1_W { w: self } } #[doc = "Bit 2 - Periodic Interval 2 Event Output Enable"] #[inline(always)] pub fn pereo2(&mut self) -> PEREO2_W { PEREO2_W { w: self } } #[doc = "Bit 3 - Periodic Interval 3 Event Output Enable"] #[inline(always)] pub fn pereo3(&mut self) -> PEREO3_W { PEREO3_W { w: self } } #[doc = "Bit 4 - Periodic Interval 4 Event Output Enable"] #[inline(always)] pub fn pereo4(&mut self) -> PEREO4_W { PEREO4_W { w: self } } #[doc = "Bit 5 - Periodic Interval 5 Event Output Enable"] #[inline(always)] pub fn pereo5(&mut self) -> PEREO5_W { PEREO5_W { w: self } } #[doc = "Bit 6 - Periodic Interval 6 Event Output Enable"] #[inline(always)] pub fn pereo6(&mut self) -> PEREO6_W { PEREO6_W { w: self } } #[doc = "Bit 7 - Periodic Interval 7 Event Output Enable"] #[inline(always)] pub fn pereo7(&mut self) -> PEREO7_W { PEREO7_W { w: self } } #[doc = "Bit 8 - Alarm 0 Event Output Enable"] #[inline(always)] pub fn alarmeo0(&mut self) -> ALARMEO0_W { ALARMEO0_W { w: self } } #[doc = "Bit 15 - Overflow Event Output Enable"] #[inline(always)] pub fn ovfeo(&mut self) -> OVFEO_W { OVFEO_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u16) -> &mut Self { self.0.bits(bits); self } } #[doc = "MODE2 Event Control\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [evctrl](index.html) module"] pub struct EVCTRL_SPEC; impl crate::RegisterSpec for EVCTRL_SPEC { type Ux = u16; } #[doc = "`read()` method returns [evctrl::R](R) reader structure"] impl crate::Readable for EVCTRL_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [evctrl::W](W) writer structure"] impl crate::Writable for EVCTRL_SPEC { type Writer = W; } #[doc = "`reset()` method sets EVCTRL to value 0"] impl crate::Resettable for EVCTRL_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
29.785847
406
0.571741
1426174b482b64af54495f376ab182502ea48096
2,727
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // compile-flags: -Z identify_regions -Z span_free_formats -Z emit-end-regions // ignore-tidy-linelength // Unwinding should EndRegion for in-scope borrows: Borrow of moved data. fn main() { let d = D(0); foo(move || -> i32 { let r = &d; r.0 }); } struct D(i32); impl Drop for D { fn drop(&mut self) { println!("dropping D({})", self.0); } } fn foo<F>(f: F) where F: FnOnce() -> i32 { if f() > 0 { panic!("im positive"); } } // END RUST SOURCE // START rustc.main.SimplifyCfg-qualify-consts.after.mir // fn main() -> () { // let mut _0: (); // ... // let _1: D; // ... // let mut _2: (); // let mut _3: [closure@NodeId(22) d:D]; // let mut _4: D; // bb0: { // StorageLive(_1); // _1 = D::{{constructor}}(const 0i32,); // StorageLive(_3); // StorageLive(_4); // _4 = move _1; // _3 = [closure@NodeId(22)] { d: move _4 }; // drop(_4) -> [return: bb4, unwind: bb3]; // } // bb1: { // resume; // } // bb2: { // drop(_1) -> bb1; // } // bb3: { // drop(_3) -> bb2; // } // bb4: { // StorageDead(_4); // _2 = const foo(move _3) -> [return: bb5, unwind: bb3]; // } // bb5: { // drop(_3) -> [return: bb6, unwind: bb2]; // } // bb6: { // StorageDead(_3); // _0 = (); // drop(_1) -> [return: bb7, unwind: bb1]; // } // bb7: { // StorageDead(_1); // return; // } // } // END rustc.main.SimplifyCfg-qualify-consts.after.mir // START rustc.main-{{closure}}.SimplifyCfg-qualify-consts.after.mir // fn main::{{closure}}(_1: [closure@NodeId(22) d:D]) -> i32 { // let mut _0: i32; // ... // let _2: &'15_0rs D; // ... // let mut _3: i32; // bb0: { // StorageLive(_2); // _2 = &'15_0rs (_1.0: D); // StorageLive(_3); // _3 = ((*_2).0: i32); // _0 = move _3; // StorageDead(_3); // EndRegion('15_0rs); // StorageDead(_2); // drop(_1) -> [return: bb2, unwind: bb1]; // } // bb1: { // resume; // } // bb2: { // return; // } // } // END rustc.main-{{closure}}.SimplifyCfg-qualify-consts.after.mir
27
78
0.508984
4b6f03c0331801498b9b92f3891201db7fe891fb
7,169
/// A set of possible key presses, equivalent to the one in `[email protected]::event::Key` #[derive(Debug, Clone, Copy)] pub enum Key { Backspace, Left, Right, Up, Down, Home, End, PageUp, PageDown, BackTab, Delete, Insert, F(u8), Char(char), Alt(char), Ctrl(char), Null, Esc, } #[cfg(any(feature = "crossterm", feature = "termion"))] enum Action<T> { Continue, Result(Result<T, std::io::Error>), } #[cfg(any(feature = "crossterm", feature = "termion"))] fn continue_on_interrupt<T>(result: Result<T, std::io::Error>) -> Action<T> { match result { Ok(v) => Action::Result(Ok(v)), Err(err) if err.kind() == std::io::ErrorKind::Interrupted => Action::Continue, Err(err) => Action::Result(Err(err)), } } mod convert { #[cfg(any(feature = "crossterm", feature = "termion"))] use super::Key; #[cfg(feature = "crossterm")] /// Convert from `crossterm::event::KeyEvent` impl std::convert::TryFrom<crossterm::event::KeyEvent> for Key { type Error = crossterm::event::KeyEvent; fn try_from(value: crossterm::event::KeyEvent) -> Result<Self, Self::Error> { use crossterm::event::{KeyCode::*, KeyModifiers}; Ok(match value.code { Backspace => Key::Backspace, Enter => Key::Char('\n'), Left => Key::Left, Right => Key::Right, Up => Key::Up, Down => Key::Down, Home => Key::Home, End => Key::End, PageUp => Key::PageUp, PageDown => Key::PageDown, Tab => Key::Char('\t'), BackTab => Key::BackTab, Delete => Key::Delete, Insert => Key::Insert, F(k) => Key::F(k), Null => Key::Null, Esc => Key::Esc, Char(c) => match value.modifiers { KeyModifiers::NONE | KeyModifiers::SHIFT => Key::Char(c), KeyModifiers::CONTROL => Key::Ctrl(c), KeyModifiers::ALT => Key::Alt(c), _ => return Err(value), }, }) } } #[cfg(feature = "termion")] /// Convert from `termion::event::Key` impl std::convert::TryFrom<termion::event::Key> for Key { type Error = termion::event::Key; fn try_from(value: termion::event::Key) -> Result<Self, Self::Error> { use termion::event::Key::*; Ok(match value { Backspace => Key::Backspace, Left => Key::Left, Right => Key::Right, Up => Key::Up, Down => Key::Down, Home => Key::Home, End => Key::End, PageUp => Key::PageUp, PageDown => Key::PageDown, BackTab => Key::BackTab, Delete => Key::Delete, Insert => Key::Insert, F(c) => Key::F(c), Char(c) => Key::Char(c), Alt(c) => Key::Alt(c), Ctrl(c) => Key::Ctrl(c), Null => Key::Null, Esc => Key::Esc, _ => return Err(value), }) } } } #[cfg(feature = "crossterm")] mod _impl { use crate::input::{continue_on_interrupt, Action}; /// Return a receiver of user input events to avoid blocking the main thread. pub fn key_input_channel() -> std::sync::mpsc::Receiver<super::Key> { use std::convert::TryInto; let (key_send, key_receive) = std::sync::mpsc::sync_channel(0); std::thread::spawn(move || -> Result<(), std::io::Error> { loop { let event = match continue_on_interrupt( crossterm::event::read().map_err(crate::crossterm_utils::into_io_error), ) { Action::Continue => continue, Action::Result(res) => res?, }; match event { crossterm::event::Event::Key(key) => { let key: Result<super::Key, _> = key.try_into(); if let Ok(key) = key { if key_send.send(key).is_err() { break; } }; } _ => continue, }; } Ok(()) }); key_receive } /// Return a stream of key input Events /// /// Requires the `input-async` feature. #[cfg(feature = "input-async-crossterm")] pub fn key_input_stream() -> impl futures_core::stream::Stream<Item = super::Key> { use futures_lite::StreamExt; use std::convert::TryFrom; crossterm::event::EventStream::new() .filter_map(|r| r.ok()) .filter_map(|e| match e { crossterm::event::Event::Key(key) => super::Key::try_from(key).ok(), _ => None, }) } } #[cfg(all(feature = "termion", not(feature = "crossterm")))] mod _impl { use crate::input::{continue_on_interrupt, Action}; /// Return a stream of user input events. /// /// Requires feature `futures-channel` #[cfg(feature = "input-async")] pub fn key_input_stream() -> impl futures_core::stream::Stream<Item = super::Key> { use std::{convert::TryInto, io}; use termion::input::TermRead; let (key_send, key_receive) = async_channel::bounded::<super::Key>(1); // This brings blocking key-handling into the async world std::thread::spawn(move || -> Result<(), io::Error> { for key in io::stdin().keys() { let key: Result<super::Key, _> = match continue_on_interrupt(key) { Action::Continue => continue, Action::Result(res) => res?.try_into(), }; if let Ok(key) = key { if futures_lite::future::block_on(key_send.send(key)).is_err() { break; } } } Ok(()) }); key_receive } pub fn key_input_channel() -> std::sync::mpsc::Receiver<super::Key> { use std::{convert::TryInto, io}; use termion::input::TermRead; let (key_send, key_receive) = std::sync::mpsc::sync_channel(0); std::thread::spawn(move || -> Result<(), io::Error> { for key in io::stdin().keys() { let key: Result<super::Key, _> = match continue_on_interrupt(key) { Action::Continue => continue, Action::Result(res) => res?.try_into(), }; if let Ok(key) = key { if key_send.send(key).is_err() { break; } } } Ok(()) }); key_receive } } #[cfg(any(feature = "termion", feature = "crossterm"))] pub use _impl::*;
33.189815
92
0.469382
e69ca971b5234f3533f871dd8920d6fe1678335a
6,965
// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license. use crate::colors; use crate::version; use deno_core::error::type_error; use deno_core::error::AnyError; use deno_core::error::Context; use deno_core::futures::FutureExt; use deno_core::resolve_url; use deno_core::serde::Deserialize; use deno_core::serde::Serialize; use deno_core::serde_json; use deno_core::url::Url; use deno_core::v8_set_flags; use deno_core::ModuleLoader; use deno_core::ModuleSpecifier; use deno_core::OpState; use deno_runtime::deno_broadcast_channel::InMemoryBroadcastChannel; use deno_runtime::deno_file::BlobUrlStore; use deno_runtime::permissions::Permissions; use deno_runtime::permissions::PermissionsOptions; use deno_runtime::worker::MainWorker; use deno_runtime::worker::WorkerOptions; use log::Level; use std::cell::RefCell; use std::convert::TryInto; use std::env::current_exe; use std::fs::File; use std::io::Read; use std::io::Seek; use std::io::SeekFrom; use std::iter::once; use std::pin::Pin; use std::rc::Rc; use std::sync::Arc; #[derive(Deserialize, Serialize)] pub struct Metadata { pub argv: Vec<String>, pub unstable: bool, pub seed: Option<u64>, pub permissions: PermissionsOptions, pub location: Option<Url>, pub v8_flags: Vec<String>, pub log_level: Option<Level>, pub ca_data: Option<Vec<u8>>, } pub const MAGIC_TRAILER: &[u8; 8] = b"d3n0l4nd"; /// This function will try to run this binary as a standalone binary /// produced by `deno compile`. It determines if this is a standalone /// binary by checking for the magic trailer string `D3N0` at EOF-12. /// The magic trailer is followed by: /// - a u64 pointer to the JS bundle embedded in the binary /// - a u64 pointer to JSON metadata (serialized flags) embedded in the binary /// These are dereferenced, and the bundle is executed under the configuration /// specified by the metadata. If no magic trailer is present, this function /// exits with `Ok(None)`. pub fn extract_standalone( args: Vec<String>, ) -> Result<Option<(Metadata, String)>, AnyError> { let current_exe_path = current_exe()?; let mut current_exe = File::open(current_exe_path)?; let trailer_pos = current_exe.seek(SeekFrom::End(-24))?; let mut trailer = [0; 24]; current_exe.read_exact(&mut trailer)?; let (magic_trailer, rest) = trailer.split_at(8); if magic_trailer != MAGIC_TRAILER { return Ok(None); } let (bundle_pos, rest) = rest.split_at(8); let metadata_pos = rest; let bundle_pos = u64_from_bytes(bundle_pos)?; let metadata_pos = u64_from_bytes(metadata_pos)?; let bundle_len = metadata_pos - bundle_pos; let metadata_len = trailer_pos - metadata_pos; current_exe.seek(SeekFrom::Start(bundle_pos))?; let bundle = read_string_slice(&mut current_exe, bundle_pos, bundle_len) .context("Failed to read source bundle from the current executable")?; let metadata = read_string_slice(&mut current_exe, metadata_pos, metadata_len) .context("Failed to read metadata from the current executable")?; let mut metadata: Metadata = serde_json::from_str(&metadata).unwrap(); metadata.argv.append(&mut args[1..].to_vec()); Ok(Some((metadata, bundle))) } fn u64_from_bytes(arr: &[u8]) -> Result<u64, AnyError> { let fixed_arr: &[u8; 8] = arr .try_into() .context("Failed to convert the buffer into a fixed-size array")?; Ok(u64::from_be_bytes(*fixed_arr)) } fn read_string_slice( file: &mut File, pos: u64, len: u64, ) -> Result<String, AnyError> { let mut string = String::new(); file.seek(SeekFrom::Start(pos))?; file.take(len).read_to_string(&mut string)?; // TODO: check amount of bytes read Ok(string) } const SPECIFIER: &str = "file://$deno$/bundle.js"; struct EmbeddedModuleLoader(String); impl ModuleLoader for EmbeddedModuleLoader { fn resolve( &self, _op_state: Rc<RefCell<OpState>>, specifier: &str, _referrer: &str, _is_main: bool, ) -> Result<ModuleSpecifier, AnyError> { if specifier != SPECIFIER { return Err(type_error( "Self-contained binaries don't support module loading", )); } Ok(resolve_url(specifier)?) } fn load( &self, _op_state: Rc<RefCell<OpState>>, module_specifier: &ModuleSpecifier, _maybe_referrer: Option<ModuleSpecifier>, _is_dynamic: bool, ) -> Pin<Box<deno_core::ModuleSourceFuture>> { let module_specifier = module_specifier.clone(); let code = self.0.to_string(); async move { if module_specifier.to_string() != SPECIFIER { return Err(type_error( "Self-contained binaries don't support module loading", )); } Ok(deno_core::ModuleSource { code, module_url_specified: module_specifier.to_string(), module_url_found: module_specifier.to_string(), }) } .boxed_local() } } pub async fn run( source_code: String, metadata: Metadata, ) -> Result<(), AnyError> { let main_module = resolve_url(SPECIFIER)?; let permissions = Permissions::from_options(&metadata.permissions); let blob_url_store = BlobUrlStore::default(); let broadcast_channel = InMemoryBroadcastChannel::default(); let module_loader = Rc::new(EmbeddedModuleLoader(source_code)); let create_web_worker_cb = Arc::new(|_| { todo!("Worker are currently not supported in standalone binaries"); }); // Keep in sync with `main.rs`. v8_set_flags( once("UNUSED_BUT_NECESSARY_ARG0".to_owned()) .chain(metadata.v8_flags.iter().cloned()) .collect::<Vec<_>>(), ); let options = WorkerOptions { apply_source_maps: false, args: metadata.argv, debug_flag: metadata.log_level.map_or(false, |l| l == log::Level::Debug), user_agent: version::get_user_agent(), unstable: metadata.unstable, ca_data: metadata.ca_data, seed: metadata.seed, js_error_create_fn: None, create_web_worker_cb, attach_inspector: false, maybe_inspector_server: None, should_break_on_first_statement: false, module_loader, runtime_version: version::deno(), ts_version: version::TYPESCRIPT.to_string(), no_color: !colors::use_color(), get_error_class_fn: Some(&get_error_class_name), location: metadata.location, origin_storage_dir: None, blob_url_store, broadcast_channel, }; let mut worker = MainWorker::from_options(main_module.clone(), permissions, &options); worker.bootstrap(&options); worker.execute_module(&main_module).await?; worker.execute("window.dispatchEvent(new Event('load'))")?; worker.run_event_loop(true).await?; worker.execute("window.dispatchEvent(new Event('unload'))")?; std::process::exit(0); } fn get_error_class_name(e: &AnyError) -> &'static str { deno_runtime::errors::get_error_class_name(e).unwrap_or_else(|| { panic!( "Error '{}' contains boxed error of unsupported type:{}", e, e.chain() .map(|e| format!("\n {:?}", e)) .collect::<String>() ); }) }
31.515837
78
0.702513
ffb6f47fd05a515a0b267eaae571e5e8672861fd
4,130
#![no_main] #![no_std] #![feature(default_alloc_error_handler)] extern crate alloc; use alloc::vec; use alloc_cortex_m::CortexMHeap; use cherry::widget::{ container::{Alignment, Axis, Border, Container, Insets, Justification}, Widget, }; use defmt_rtt as _; use embedded_graphics::{pixelcolor::BinaryColor, prelude::*, primitives::CornerRadii}; use panic_probe as _; use ssd1306::{mode::BufferedGraphicsMode, prelude::*, I2CDisplayInterface, Ssd1306}; use stm32f3_discovery::stm32f3xx_hal::{ self as _, gpio::{Alternate, Gpiob, OpenDrain, Pin, U}, i2c::I2c, pac::{self, I2C1}, prelude::*, }; type Screen = Ssd1306< I2CInterface< I2c< I2C1, ( Pin<Gpiob, U<6_u8>, Alternate<OpenDrain, 4_u8>>, Pin<Gpiob, U<7_u8>, Alternate<OpenDrain, 4_u8>>, ), >, >, ssd1306::prelude::DisplaySize128x64, BufferedGraphicsMode<ssd1306::prelude::DisplaySize128x64>, >; const HEAP_SIZE: usize = 1024; const MAX_BLOCK_SIZE: u32 = 30; const MIN_BLOCK_SIZE: u32 = 0; const BLOCK_SIZE_INCREMENT: u32 = 2; #[global_allocator] static ALLOCATOR: CortexMHeap = CortexMHeap::empty(); #[cortex_m_rt::entry] fn main() -> ! { init_allocator(); let mut display = configure_display(); animate(&mut display); } fn init_allocator() { static mut HEAP: [u8; HEAP_SIZE] = [0; HEAP_SIZE]; unsafe { ALLOCATOR.init( &mut HEAP as *const u8 as usize, core::mem::size_of_val(&HEAP), ) } } fn configure_display() -> Screen { let peripherals = pac::Peripherals::take().unwrap(); let mut flash = peripherals.FLASH.constrain(); let mut rcc = peripherals.RCC.constrain(); let clocks = rcc.cfgr.freeze(&mut flash.acr); let mut gpiob = peripherals.GPIOB.split(&mut rcc.ahb); let mut scl = gpiob .pb6 .into_af4_open_drain(&mut gpiob.moder, &mut gpiob.otyper, &mut gpiob.afrl); let mut sda = gpiob .pb7 .into_af4_open_drain(&mut gpiob.moder, &mut gpiob.otyper, &mut gpiob.afrl); scl.internal_pull_up(&mut gpiob.pupdr, true); sda.internal_pull_up(&mut gpiob.pupdr, true); let i2c = I2c::new( peripherals.I2C1, (scl, sda), 400.kHz().try_into().unwrap(), clocks, &mut rcc.apb1, ); let interface = I2CDisplayInterface::new(i2c); let mut display = Ssd1306::new(interface, DisplaySize128x64, DisplayRotation::Rotate0) .into_buffered_graphics_mode(); display.init().unwrap(); display } fn animate(display: &mut Screen) -> ! { let size = display.size(); let mut ascending = true; let mut block_size: u32 = MIN_BLOCK_SIZE; loop { display.clear(); let widget = widget(block_size); widget.draw(display, Point::zero(), size).unwrap(); display.flush().unwrap(); if ascending && block_size == MAX_BLOCK_SIZE { ascending = false; } else if !ascending && block_size == MIN_BLOCK_SIZE { ascending = true } if ascending { block_size += BLOCK_SIZE_INCREMENT; } else { block_size -= BLOCK_SIZE_INCREMENT; } } } fn widget<Display>(block_size: u32) -> Container<Display> where Display: 'static + DrawTarget<Color = BinaryColor>, { Container::new() .alignment(Alignment::Center) .axis(Axis::Horizontal) .border(Border { color: BinaryColor::On, width: 1, }) .children(vec![ block(block_size).boxed(), block(MAX_BLOCK_SIZE - block_size).boxed(), block(block_size).boxed(), ]) .corner_radii(CornerRadii::new(Size::new(10, 10))) .justification(Justification::SpaceBetween) .padding(Insets::all(10)) .width(200) } fn block<Display>(size: u32) -> Container<Display> where Display: DrawTarget<Color = BinaryColor>, { Container::new() .background_color(BinaryColor::On) .width(size) .height(size) }
25.493827
90
0.606295
fbc294081fc5205f0869a7b8bb2079a5bb63e97a
41,544
use super::grammars::{ExternalToken, LexicalGrammar, SyntaxGrammar, VariableType}; use super::nfa::CharacterSet; use super::rules::{Alias, AliasMap, Symbol, SymbolType}; use super::tables::{ AdvanceAction, FieldLocation, LexState, LexTable, ParseAction, ParseTable, ParseTableEntry, }; use core::ops::Range; use hashbrown::{HashMap, HashSet}; use std::fmt::Write; use std::mem::swap; use tree_sitter::LANGUAGE_VERSION; macro_rules! add { ($this: tt, $($arg: tt)*) => {{ $this.buffer.write_fmt(format_args!($($arg)*)).unwrap(); }} } macro_rules! add_whitespace { ($this: tt) => {{ for _ in 0..$this.indent_level { write!(&mut $this.buffer, " ").unwrap(); } }}; } macro_rules! add_line { ($this: tt, $($arg: tt)*) => { add_whitespace!($this); $this.buffer.write_fmt(format_args!($($arg)*)).unwrap(); $this.buffer += "\n"; } } macro_rules! indent { ($this: tt) => { $this.indent_level += 1; }; } macro_rules! dedent { ($this: tt) => { assert_ne!($this.indent_level, 0); $this.indent_level -= 1; }; } struct Generator { buffer: String, indent_level: usize, language_name: String, parse_table: ParseTable, main_lex_table: LexTable, keyword_lex_table: LexTable, keyword_capture_token: Option<Symbol>, syntax_grammar: SyntaxGrammar, lexical_grammar: LexicalGrammar, simple_aliases: AliasMap, symbol_ids: HashMap<Symbol, String>, alias_ids: HashMap<Alias, String>, external_scanner_states: Vec<HashSet<usize>>, alias_map: HashMap<Alias, Option<Symbol>>, field_names: Vec<String>, } impl Generator { fn generate(mut self) -> String { self.init(); self.add_includes(); self.add_pragmas(); self.add_stats(); self.add_symbol_enum(); self.add_symbol_names_list(); self.add_symbol_metadata_list(); if !self.field_names.is_empty() { self.add_field_name_enum(); self.add_field_name_names_list(); self.add_field_sequences(); } if !self.alias_ids.is_empty() { self.add_alias_sequences(); } let mut main_lex_table = LexTable::default(); swap(&mut main_lex_table, &mut self.main_lex_table); self.add_lex_function("ts_lex", main_lex_table); if self.keyword_capture_token.is_some() { let mut keyword_lex_table = LexTable::default(); swap(&mut keyword_lex_table, &mut self.keyword_lex_table); self.add_lex_function("ts_lex_keywords", keyword_lex_table); } self.add_lex_modes_list(); if !self.syntax_grammar.external_tokens.is_empty() { self.add_external_token_enum(); self.add_external_scanner_symbol_map(); self.add_external_scanner_states_list(); } self.add_parse_table(); self.add_parser_export(); self.buffer } fn init(&mut self) { let mut symbol_identifiers = HashSet::new(); for i in 0..self.parse_table.symbols.len() { self.assign_symbol_id(self.parse_table.symbols[i], &mut symbol_identifiers); } let mut field_names = Vec::new(); for production_info in &self.parse_table.production_infos { for field_name in production_info.field_map.keys() { field_names.push(field_name); } for alias in &production_info.alias_sequence { if let Some(alias) = &alias { let alias_kind = if alias.is_named { VariableType::Named } else { VariableType::Anonymous }; let matching_symbol = self.parse_table.symbols.iter().cloned().find(|symbol| { let (name, kind) = self.metadata_for_symbol(*symbol); name == alias.value && kind == alias_kind }); let alias_id = if let Some(symbol) = matching_symbol { self.symbol_ids[&symbol].clone() } else if alias.is_named { format!("alias_sym_{}", self.sanitize_identifier(&alias.value)) } else { format!("anon_alias_sym_{}", self.sanitize_identifier(&alias.value)) }; self.alias_ids.entry(alias.clone()).or_insert(alias_id); self.alias_map .entry(alias.clone()) .or_insert(matching_symbol); } } } field_names.sort_unstable(); field_names.dedup(); self.field_names = field_names.into_iter().cloned().collect(); } fn add_includes(&mut self) { add_line!(self, "#include <tree_sitter/parser.h>"); add_line!(self, ""); } fn add_pragmas(&mut self) { add_line!(self, "#if defined(__GNUC__) || defined(__clang__)"); add_line!(self, "#pragma GCC diagnostic push"); add_line!( self, "#pragma GCC diagnostic ignored \"-Wmissing-field-initializers\"" ); add_line!(self, "#endif"); add_line!(self, ""); // Compiling large lexer functions can be very slow. Disabling optimizations // is not ideal, but only a very small fraction of overall parse time is // spent lexing, so the performance impact of this is negligible. if self.main_lex_table.states.len() > 300 { add_line!(self, "#ifdef _MSC_VER"); add_line!(self, "#pragma optimize(\"\", off)"); add_line!(self, "#elif defined(__clang__)"); add_line!(self, "#pragma clang optimize off"); add_line!(self, "#elif defined(__GNUC__)"); add_line!(self, "#pragma GCC optimize (\"O0\")"); add_line!(self, "#endif"); add_line!(self, ""); } } fn add_stats(&mut self) { let token_count = self .parse_table .symbols .iter() .filter(|symbol| { if symbol.is_terminal() || symbol.is_eof() { true } else if symbol.is_external() { self.syntax_grammar.external_tokens[symbol.index] .corresponding_internal_token .is_none() } else { false } }) .count(); add_line!(self, "#define LANGUAGE_VERSION {}", LANGUAGE_VERSION); add_line!( self, "#define STATE_COUNT {}", self.parse_table.states.len() ); add_line!( self, "#define SYMBOL_COUNT {}", self.parse_table.symbols.len() ); add_line!( self, "#define ALIAS_COUNT {}", self.alias_map.iter().filter(|e| e.1.is_none()).count() ); add_line!(self, "#define TOKEN_COUNT {}", token_count); add_line!( self, "#define EXTERNAL_TOKEN_COUNT {}", self.syntax_grammar.external_tokens.len() ); add_line!(self, "#define FIELD_COUNT {}", self.field_names.len()); add_line!( self, "#define MAX_ALIAS_SEQUENCE_LENGTH {}", self.parse_table.max_aliased_production_length ); add_line!(self, ""); } fn add_symbol_enum(&mut self) { add_line!(self, "enum {{"); indent!(self); let mut i = 1; for symbol in self.parse_table.symbols.iter() { if *symbol != Symbol::end() { add_line!(self, "{} = {},", self.symbol_ids[&symbol], i); i += 1; } } for (alias, symbol) in &self.alias_map { if symbol.is_none() { add_line!(self, "{} = {},", self.alias_ids[&alias], i); i += 1; } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_symbol_names_list(&mut self) { add_line!(self, "static const char *ts_symbol_names[] = {{"); indent!(self); for symbol in self.parse_table.symbols.iter() { let name = self.sanitize_string( self.simple_aliases .get(symbol) .map(|alias| alias.value.as_str()) .unwrap_or(self.metadata_for_symbol(*symbol).0), ); add_line!(self, "[{}] = \"{}\",", self.symbol_ids[&symbol], name); } for (alias, symbol) in &self.alias_map { if symbol.is_none() { add_line!( self, "[{}] = \"{}\",", self.alias_ids[&alias], self.sanitize_string(&alias.value) ); } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_field_name_enum(&mut self) { add_line!(self, "enum {{"); indent!(self); for (i, field_name) in self.field_names.iter().enumerate() { add_line!(self, "{} = {},", self.field_id(field_name), i + 1); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_field_name_names_list(&mut self) { add_line!(self, "static const char *ts_field_names[] = {{"); indent!(self); add_line!(self, "[0] = NULL,"); for field_name in &self.field_names { add_line!( self, "[{}] = \"{}\",", self.field_id(field_name), field_name ); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_symbol_metadata_list(&mut self) { add_line!( self, "static const TSSymbolMetadata ts_symbol_metadata[] = {{" ); indent!(self); for symbol in &self.parse_table.symbols { add_line!(self, "[{}] = {{", self.symbol_ids[&symbol]); indent!(self); if let Some(Alias { is_named, .. }) = self.simple_aliases.get(symbol) { add_line!(self, ".visible = true,"); add_line!(self, ".named = {},", is_named); } else { match self.metadata_for_symbol(*symbol).1 { VariableType::Named => { add_line!(self, ".visible = true,"); add_line!(self, ".named = true,"); } VariableType::Anonymous => { add_line!(self, ".visible = true,"); add_line!(self, ".named = false,"); } VariableType::Hidden => { add_line!(self, ".visible = false,"); add_line!(self, ".named = true,"); } VariableType::Auxiliary => { add_line!(self, ".visible = false,"); add_line!(self, ".named = false,"); } } } dedent!(self); add_line!(self, "}},"); } for (alias, matching_symbol) in &self.alias_map { if matching_symbol.is_none() { add_line!(self, "[{}] = {{", self.alias_ids[&alias]); indent!(self); add_line!(self, ".visible = true,"); add_line!(self, ".named = {},", alias.is_named); dedent!(self); add_line!(self, "}},"); } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_alias_sequences(&mut self) { add_line!( self, "static TSSymbol ts_alias_sequences[{}][MAX_ALIAS_SEQUENCE_LENGTH] = {{", self.parse_table.production_infos.len() ); indent!(self); for (i, production_info) in self.parse_table.production_infos.iter().enumerate() { if production_info.alias_sequence.is_empty() { continue; } add_line!(self, "[{}] = {{", i); indent!(self); for (j, alias) in production_info.alias_sequence.iter().enumerate() { if let Some(alias) = alias { add_line!(self, "[{}] = {},", j, self.alias_ids[&alias]); } } dedent!(self); add_line!(self, "}},"); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_field_sequences(&mut self) { let mut flat_field_maps = vec![]; let mut next_flat_field_map_index = 0; self.get_field_map_id( &Vec::new(), &mut flat_field_maps, &mut next_flat_field_map_index, ); let mut field_map_ids = Vec::new(); for production_info in &self.parse_table.production_infos { if !production_info.field_map.is_empty() { let mut flat_field_map = Vec::new(); for (field_name, locations) in &production_info.field_map { for location in locations { flat_field_map.push((field_name.clone(), *location)); } } field_map_ids.push(( self.get_field_map_id( &flat_field_map, &mut flat_field_maps, &mut next_flat_field_map_index, ), flat_field_map.len(), )); } else { field_map_ids.push((0, 0)); } } add_line!( self, "static const TSFieldMapSlice ts_field_map_slices[] = {{", ); indent!(self); for (production_id, (row_id, length)) in field_map_ids.into_iter().enumerate() { if length > 0 { add_line!( self, "[{}] = {{.index = {}, .length = {}}},", production_id, row_id, length ); } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); add_line!( self, "static const TSFieldMapEntry ts_field_map_entries[] = {{", ); indent!(self); for (row_index, field_pairs) in flat_field_maps.into_iter().skip(1) { add_line!(self, "[{}] =", row_index); indent!(self); for (field_name, location) in field_pairs { add_whitespace!(self); add!(self, "{{{}, {}", self.field_id(&field_name), location.index); if location.inherited { add!(self, ", .inherited = true"); } add!(self, "}},\n"); } dedent!(self); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_lex_function(&mut self, name: &str, lex_table: LexTable) { add_line!( self, "static bool {}(TSLexer *lexer, TSStateId state) {{", name ); indent!(self); add_line!(self, "START_LEXER();"); add_line!(self, "switch (state) {{"); indent!(self); for (i, state) in lex_table.states.into_iter().enumerate() { add_line!(self, "case {}:", i); indent!(self); self.add_lex_state(state); dedent!(self); } add_line!(self, "default:"); indent!(self); add_line!(self, "return false;"); dedent!(self); dedent!(self); add_line!(self, "}}"); dedent!(self); add_line!(self, "}}"); add_line!(self, ""); } fn add_lex_state(&mut self, state: LexState) { if let Some(accept_action) = state.accept_action { add_line!(self, "ACCEPT_TOKEN({});", self.symbol_ids[&accept_action]); } let mut ruled_out_characters = HashSet::new(); for (characters, action) in state.advance_actions { let previous_length = self.buffer.len(); add_whitespace!(self); add!(self, "if ("); if self.add_character_set_condition(&characters, &ruled_out_characters) { add!(self, ") "); self.add_advance_action(&action); if let CharacterSet::Include(chars) = characters { ruled_out_characters.extend(chars.iter().map(|c| *c as u32)); } } else { self.buffer.truncate(previous_length); self.add_advance_action(&action); } add!(self, "\n"); } add_line!(self, "END_STATE();"); } fn add_character_set_condition( &mut self, characters: &CharacterSet, ruled_out_characters: &HashSet<u32>, ) -> bool { match characters { CharacterSet::Include(chars) => { let ranges = Self::get_ranges(chars, ruled_out_characters); self.add_character_range_conditions(ranges, false) } CharacterSet::Exclude(chars) => { let ranges = Some('\0'..'\0') .into_iter() .chain(Self::get_ranges(chars, ruled_out_characters)); self.add_character_range_conditions(ranges, true) } } } fn add_character_range_conditions( &mut self, ranges: impl Iterator<Item = Range<char>>, is_negated: bool, ) -> bool { let line_break = "\n "; let mut did_add = false; for range in ranges { if is_negated { if did_add { add!(self, " &&{}", line_break); } if range.end == range.start { add!(self, "lookahead != "); self.add_character(range.start); } else if range.end as u32 == range.start as u32 + 1 { add!(self, "lookahead != "); self.add_character(range.start); add!(self, " &&{}lookahead != ", line_break); self.add_character(range.end); } else { add!(self, "(lookahead < "); self.add_character(range.start); add!(self, " || "); self.add_character(range.end); add!(self, " < lookahead)"); } } else { if did_add { add!(self, " ||{}", line_break); } if range.end == range.start { add!(self, "lookahead == "); self.add_character(range.start); } else if range.end as u32 == range.start as u32 + 1 { add!(self, "lookahead == "); self.add_character(range.start); add!(self, " ||{}lookahead == ", line_break); self.add_character(range.end); } else { add!(self, "("); self.add_character(range.start); add!(self, " <= lookahead && lookahead <= "); self.add_character(range.end); add!(self, ")"); } } did_add = true; } did_add } fn get_ranges<'a>( chars: &'a Vec<char>, ruled_out_characters: &'a HashSet<u32>, ) -> impl Iterator<Item = Range<char>> + 'a { let mut prev_range: Option<Range<char>> = None; chars .iter() .map(|c| (*c, false)) .chain(Some(('\0', true))) .filter_map(move |(c, done)| { if done { return prev_range.clone(); } if ruled_out_characters.contains(&(c as u32)) { return None; } if let Some(range) = prev_range.clone() { let mut prev_range_successor = range.end as u32 + 1; while prev_range_successor < c as u32 { if !ruled_out_characters.contains(&prev_range_successor) { prev_range = Some(c..c); return Some(range); } prev_range_successor += 1; } prev_range = Some(range.start..c); None } else { prev_range = Some(c..c); None } }) } fn add_advance_action(&mut self, action: &AdvanceAction) { if action.in_main_token { add!(self, "ADVANCE({});", action.state); } else { add!(self, "SKIP({})", action.state); } } fn add_lex_modes_list(&mut self) { self.get_external_scanner_state_id(HashSet::new()); let mut external_tokens_by_corresponding_internal_token = HashMap::new(); for (i, external_token) in self.syntax_grammar.external_tokens.iter().enumerate() { if let Some(symbol) = external_token.corresponding_internal_token { external_tokens_by_corresponding_internal_token.insert(symbol.index, i); } } add_line!(self, "static TSLexMode ts_lex_modes[STATE_COUNT] = {{"); indent!(self); for i in 0..self.parse_table.states.len() { let mut external_tokens = HashSet::new(); for token in self.parse_table.states[i].terminal_entries.keys() { if token.is_external() { external_tokens.insert(token.index); } else if token.is_terminal() { if let Some(external_index) = external_tokens_by_corresponding_internal_token.get(&token.index) { external_tokens.insert(*external_index); } } } let external_state_id = self.get_external_scanner_state_id(external_tokens); let state = &self.parse_table.states[i]; if external_state_id > 0 { add_line!( self, "[{}] = {{.lex_state = {}, .external_lex_state = {}}},", i, state.lex_state_id, external_state_id ); } else { add_line!(self, "[{}] = {{.lex_state = {}}},", i, state.lex_state_id); } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_external_token_enum(&mut self) { add_line!(self, "enum {{"); indent!(self); for i in 0..self.syntax_grammar.external_tokens.len() { add_line!( self, "{} = {},", self.external_token_id(&self.syntax_grammar.external_tokens[i]), i ); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_external_scanner_symbol_map(&mut self) { add_line!( self, "static TSSymbol ts_external_scanner_symbol_map[EXTERNAL_TOKEN_COUNT] = {{" ); indent!(self); for i in 0..self.syntax_grammar.external_tokens.len() { let token = &self.syntax_grammar.external_tokens[i]; let id_token = token .corresponding_internal_token .unwrap_or(Symbol::external(i)); add_line!( self, "[{}] = {},", self.external_token_id(&token), self.symbol_ids[&id_token], ); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_external_scanner_states_list(&mut self) { add_line!( self, "static bool ts_external_scanner_states[{}][EXTERNAL_TOKEN_COUNT] = {{", self.external_scanner_states.len(), ); indent!(self); for i in 0..self.external_scanner_states.len() { if !self.external_scanner_states[i].is_empty() { add_line!(self, "[{}] = {{", i); indent!(self); for token_index in &self.external_scanner_states[i] { add_line!( self, "[{}] = true,", self.external_token_id(&self.syntax_grammar.external_tokens[*token_index]) ); } dedent!(self); add_line!(self, "}},"); } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_parse_table(&mut self) { let mut parse_table_entries = Vec::new(); let mut next_parse_action_list_index = 0; self.get_parse_action_list_id( &ParseTableEntry { actions: Vec::new(), reusable: false, }, &mut parse_table_entries, &mut next_parse_action_list_index, ); add_line!( self, "static uint16_t ts_parse_table[STATE_COUNT][SYMBOL_COUNT] = {{" ); indent!(self); for (i, state) in self.parse_table.states.iter().enumerate() { add_line!(self, "[{}] = {{", i); indent!(self); for (symbol, state_id) in &state.nonterminal_entries { add_line!(self, "[{}] = STATE({}),", self.symbol_ids[symbol], state_id); } for (symbol, entry) in &state.terminal_entries { let entry_id = self.get_parse_action_list_id( entry, &mut parse_table_entries, &mut next_parse_action_list_index, ); add_line!( self, "[{}] = ACTIONS({}),", self.symbol_ids[symbol], entry_id ); } dedent!(self); add_line!(self, "}},"); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); self.add_parse_action_list(parse_table_entries); } fn add_parse_action_list(&mut self, parse_table_entries: Vec<(usize, ParseTableEntry)>) { add_line!(self, "static TSParseActionEntry ts_parse_actions[] = {{"); indent!(self); for (i, entry) in parse_table_entries { add!( self, " [{}] = {{.count = {}, .reusable = {}}},", i, entry.actions.len(), entry.reusable ); for action in entry.actions { add!(self, " "); match action { ParseAction::Accept => add!(self, " ACCEPT_INPUT()"), ParseAction::Recover => add!(self, "RECOVER()"), ParseAction::ShiftExtra => add!(self, "SHIFT_EXTRA()"), ParseAction::Shift { state, is_repetition, } => { if is_repetition { add!(self, "SHIFT_REPEAT({})", state); } else { add!(self, "SHIFT({})", state); } } ParseAction::Reduce { symbol, child_count, dynamic_precedence, production_id, .. } => { add!(self, "REDUCE({}, {}", self.symbol_ids[&symbol], child_count); if dynamic_precedence != 0 { add!(self, ", .dynamic_precedence = {}", dynamic_precedence); } if production_id != 0 { add!(self, ", .production_id = {}", production_id); } add!(self, ")"); } } add!(self, ",") } add!(self, "\n"); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_parser_export(&mut self) { let language_function_name = format!("tree_sitter_{}", self.language_name); let external_scanner_name = format!("{}_external_scanner", language_function_name); if !self.syntax_grammar.external_tokens.is_empty() { add_line!(self, "void *{}_create(void);", external_scanner_name); add_line!(self, "void {}_destroy(void *);", external_scanner_name); add_line!( self, "bool {}_scan(void *, TSLexer *, const bool *);", external_scanner_name ); add_line!( self, "unsigned {}_serialize(void *, char *);", external_scanner_name ); add_line!( self, "void {}_deserialize(void *, const char *, unsigned);", external_scanner_name ); add_line!(self, ""); } add_line!(self, "#ifdef _WIN32"); add_line!(self, "#define extern __declspec(dllexport)"); add_line!(self, "#endif"); add_line!(self, ""); add_line!( self, "extern const TSLanguage *{}(void) {{", language_function_name ); indent!(self); add_line!(self, "static TSLanguage language = {{"); indent!(self); add_line!(self, ".version = LANGUAGE_VERSION,"); add_line!(self, ".symbol_count = SYMBOL_COUNT,"); add_line!(self, ".alias_count = ALIAS_COUNT,"); add_line!(self, ".token_count = TOKEN_COUNT,"); add_line!(self, ".symbol_metadata = ts_symbol_metadata,"); add_line!( self, ".parse_table = (const unsigned short *)ts_parse_table," ); add_line!(self, ".parse_actions = ts_parse_actions,"); add_line!(self, ".lex_modes = ts_lex_modes,"); add_line!(self, ".symbol_names = ts_symbol_names,"); if !self.alias_ids.is_empty() { add_line!( self, ".alias_sequences = (const TSSymbol *)ts_alias_sequences," ); } add_line!(self, ".field_count = FIELD_COUNT,"); if !self.field_names.is_empty() { add_line!(self, ".field_names = ts_field_names,"); add_line!( self, ".field_map_slices = (const TSFieldMapSlice *)ts_field_map_slices," ); add_line!( self, ".field_map_entries = (const TSFieldMapEntry *)ts_field_map_entries," ); } add_line!( self, ".max_alias_sequence_length = MAX_ALIAS_SEQUENCE_LENGTH," ); add_line!(self, ".lex_fn = ts_lex,"); if let Some(keyword_capture_token) = self.keyword_capture_token { add_line!(self, ".keyword_lex_fn = ts_lex_keywords,"); add_line!( self, ".keyword_capture_token = {},", self.symbol_ids[&keyword_capture_token] ); } add_line!(self, ".external_token_count = EXTERNAL_TOKEN_COUNT,"); if !self.syntax_grammar.external_tokens.is_empty() { add_line!(self, ".external_scanner = {{"); indent!(self); add_line!(self, "(const bool *)ts_external_scanner_states,"); add_line!(self, "ts_external_scanner_symbol_map,"); add_line!(self, "{}_create,", external_scanner_name); add_line!(self, "{}_destroy,", external_scanner_name); add_line!(self, "{}_scan,", external_scanner_name); add_line!(self, "{}_serialize,", external_scanner_name); add_line!(self, "{}_deserialize,", external_scanner_name); dedent!(self); add_line!(self, "}},"); } dedent!(self); add_line!(self, "}};"); add_line!(self, "return &language;"); dedent!(self); add_line!(self, "}}"); } fn get_parse_action_list_id( &self, entry: &ParseTableEntry, parse_table_entries: &mut Vec<(usize, ParseTableEntry)>, next_parse_action_list_index: &mut usize, ) -> usize { if let Some((index, _)) = parse_table_entries.iter().find(|(_, e)| *e == *entry) { return *index; } let result = *next_parse_action_list_index; parse_table_entries.push((result, entry.clone())); *next_parse_action_list_index += 1 + entry.actions.len(); result } fn get_field_map_id( &self, flat_field_map: &Vec<(String, FieldLocation)>, flat_field_maps: &mut Vec<(usize, Vec<(String, FieldLocation)>)>, next_flat_field_map_index: &mut usize, ) -> usize { if let Some((index, _)) = flat_field_maps.iter().find(|(_, e)| *e == *flat_field_map) { return *index; } let result = *next_flat_field_map_index; flat_field_maps.push((result, flat_field_map.clone())); *next_flat_field_map_index += flat_field_map.len(); result } fn get_external_scanner_state_id(&mut self, external_tokens: HashSet<usize>) -> usize { self.external_scanner_states .iter() .position(|tokens| *tokens == external_tokens) .unwrap_or_else(|| { self.external_scanner_states.push(external_tokens); self.external_scanner_states.len() - 1 }) } fn external_token_id(&self, token: &ExternalToken) -> String { format!( "ts_external_token_{}", self.sanitize_identifier(&token.name) ) } fn assign_symbol_id(&mut self, symbol: Symbol, used_identifiers: &mut HashSet<String>) { let mut id; if symbol == Symbol::end() { id = "ts_builtin_sym_end".to_string(); } else { let (name, kind) = self.metadata_for_symbol(symbol); id = match kind { VariableType::Auxiliary => format!("aux_sym_{}", self.sanitize_identifier(name)), VariableType::Anonymous => format!("anon_sym_{}", self.sanitize_identifier(name)), VariableType::Hidden | VariableType::Named => { format!("sym_{}", self.sanitize_identifier(name)) } }; let mut suffix_number = 1; let mut suffix = String::new(); while used_identifiers.contains(&id) { id.drain(id.len() - suffix.len()..); suffix_number += 1; suffix = suffix_number.to_string(); id += &suffix; } } used_identifiers.insert(id.clone()); self.symbol_ids.insert(symbol, id); } fn field_id(&self, field_name: &String) -> String { format!("field_{}", field_name) } fn metadata_for_symbol(&self, symbol: Symbol) -> (&str, VariableType) { match symbol.kind { SymbolType::End => ("end", VariableType::Hidden), SymbolType::NonTerminal => { let variable = &self.syntax_grammar.variables[symbol.index]; (&variable.name, variable.kind) } SymbolType::Terminal => { let variable = &self.lexical_grammar.variables[symbol.index]; (&variable.name, variable.kind) } SymbolType::External => { let token = &self.syntax_grammar.external_tokens[symbol.index]; (&token.name, token.kind) } } } fn sanitize_identifier(&self, name: &str) -> String { let mut result = String::with_capacity(name.len()); for c in name.chars() { if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' { result.push(c); } else { let replacement = match c { '~' => "TILDE", '`' => "BQUOTE", '!' => "BANG", '@' => "AT", '#' => "POUND", '$' => "DOLLAR", '%' => "PERCENT", '^' => "CARET", '&' => "AMP", '*' => "STAR", '(' => "LPAREN", ')' => "RPAREN", '-' => "DASH", '+' => "PLUS", '=' => "EQ", '{' => "LBRACE", '}' => "RBRACE", '[' => "LBRACK", ']' => "RBRACK", '\\' => "BSLASH", '|' => "PIPE", ':' => "COLON", ';' => "SEMI", '"' => "DQUOTE", '\'' => "SQUOTE", '<' => "LT", '>' => "GT", ',' => "COMMA", '.' => "DOT", '?' => "QMARK", '/' => "SLASH", '\n' => "LF", '\r' => "CR", '\t' => "TAB", _ => continue, }; if !result.is_empty() && !result.ends_with("_") { result.push('_'); } result += replacement; } } result } fn sanitize_string(&self, name: &str) -> String { let mut result = String::with_capacity(name.len()); for c in name.chars() { match c { '\"' => result += "\\\"", '\\' => result += "\\\\", '\t' => result += "\\t", '\n' => result += "\\n", '\r' => result += "\\r", _ => result.push(c), } } result } fn add_character(&mut self, c: char) { if c.is_ascii() { match c { '\0' => add!(self, "0"), '\'' => add!(self, "'\\''"), '\\' => add!(self, "'\\\\'"), '\t' => add!(self, "'\\t'"), '\n' => add!(self, "'\\n'"), '\r' => add!(self, "'\\r'"), _ => add!(self, "'{}'", c), } } else { add!(self, "{}", c as u32) } } } pub(crate) fn render_c_code( name: &str, parse_table: ParseTable, main_lex_table: LexTable, keyword_lex_table: LexTable, keyword_capture_token: Option<Symbol>, syntax_grammar: SyntaxGrammar, lexical_grammar: LexicalGrammar, simple_aliases: AliasMap, ) -> String { Generator { buffer: String::new(), indent_level: 0, language_name: name.to_string(), parse_table, main_lex_table, keyword_lex_table, keyword_capture_token, syntax_grammar, lexical_grammar, simple_aliases, symbol_ids: HashMap::new(), alias_ids: HashMap::new(), external_scanner_states: Vec::new(), alias_map: HashMap::new(), field_names: Vec::new(), } .generate() } #[cfg(test)] mod tests { use super::*; #[test] fn test_get_char_ranges() { struct Row { chars: Vec<char>, ruled_out_chars: Vec<char>, expected_ranges: Vec<Range<char>>, } let table = [ Row { chars: vec!['a'], ruled_out_chars: vec![], expected_ranges: vec!['a'..'a'], }, Row { chars: vec!['a', 'b', 'c', 'e', 'z'], ruled_out_chars: vec![], expected_ranges: vec!['a'..'c', 'e'..'e', 'z'..'z'], }, Row { chars: vec!['a', 'b', 'c', 'e', 'h', 'z'], ruled_out_chars: vec!['d', 'f', 'g'], expected_ranges: vec!['a'..'h', 'z'..'z'], }, ]; for Row { chars, ruled_out_chars, expected_ranges, } in table.iter() { let ruled_out_chars = ruled_out_chars .into_iter() .map(|c: &char| *c as u32) .collect(); let ranges = Generator::get_ranges(chars, &ruled_out_chars).collect::<Vec<_>>(); assert_eq!(ranges, *expected_ranges); } } }
34.248969
98
0.467601
cccae303889f2f774ce53117b51e20c1ff96e9e7
1,219
// CATCHUp query experiments with pregenerated queries with source and target drawn uniformly at random. // Takes as input one directory arg which should contain the all data. use std::{env, error::Error, path::Path}; use rust_road_router::{ algo::customizable_contraction_hierarchy::*, cli::CliErr, datastr::graph::floating_time_dependent::{shortcut_graph::CustomizedGraphReconstrctor, *}, io::*, }; fn main() -> Result<(), Box<dyn Error>> { let arg = &env::args().skip(1).next().ok_or(CliErr("No directory arg given"))?; let path = Path::new(arg); let graph = TDGraph::reconstruct_from(&path)?; let cch_folder = path.join("cch"); let cch = CCHReconstrctor(&graph).reconstruct_from(&cch_folder)?; let customized_folder = path.join("customized"); let td_cch_graph = CustomizedGraphReconstrctor { original_graph: &graph, first_out: cch.first_out(), head: cch.head(), } .reconstruct_from(&customized_folder)?; let (unique_down, unique_up) = td_cch_graph.unique_path_edges(); eprintln!("down: {}/{}", unique_down.count_ones(), unique_down.len()); eprintln!("up: {}/{}", unique_up.count_ones(), unique_up.len()); Ok(()) }
32.945946
104
0.674323
891c6656d66b62b11d0475b0d637d27b35d7d86e
15,913
/* * MIT License * * Copyright (c) 2020 Reto Achermann * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * SPDX-License-Identifier: MIT */ /*********************************************************************************************** * *** * * !!!! WARNING: THIS FILE IS AUTO GENERATED. ANY CHANGES MAY BE OVERWRITTEN !!!! * * Generated on: 2020-10-05T16:49:32.074971 * Version: Armv8.7-A-2020-09 * Source: https://developer.arm.com/-/media/developer/products/architecture/armv8-a-architecture/2020-09/SysReg_xml_v87A-2020-09.tar.gz * * !!!! WARNING: THIS FILE IS AUTO GENERATED. ANY CHANGES MAY BE OVERWRITTEN !!!! * ********************************************************************************************** * * */ /* * ================================================================================================ * Register Information * ================================================================================================ * * Register: Saved Program Status Register (EL2) (spsr_el2) * Group: Special-purpose registers * Type: 64-bit Register * Description: Holds the saved process state when an exception is taken to EL2. * File: AArch64-spsr_el2.xml */ /* * ================================================================================================ * Register Read/Write Functions * ================================================================================================ */ /// reading the Saved Program Status Register (EL2) (spsr_el2) register pub fn reg_rawrd() -> u64 { let mut regval: u64; unsafe { // MRS <Xt>, SPSR_EL2 llvm_asm!("mrs $0, spsr_el2" : "=r"(regval)); } return regval; } /// writing the Saved Program Status Register (EL2) (spsr_el2) register pub fn reg_rawwr(val: u64) { unsafe { // MSR SPSR_EL2, <Xt> llvm_asm!("msr spsr_el2, $0" : : "r"(val)); } } /* * ================================================================================================ * Register Fields Read/Write Functions * ================================================================================================ */ /// reads field val from register pub fn n_read() -> u64 { // bits 31..31 let val = reg_rawrd(); (val >> 31) & 0x1 } /// inserts field val into register pub fn n_write(newval: u64) { // bits 31..31 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 31) | ((newval & 0x1) << 31)); } /// reads field val from register pub fn z_read() -> u64 { // bits 30..30 let val = reg_rawrd(); (val >> 30) & 0x1 } /// inserts field val into register pub fn z_write(newval: u64) { // bits 30..30 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 30) | ((newval & 0x1) << 30)); } /// reads field val from register pub fn c_read() -> u64 { // bits 29..29 let val = reg_rawrd(); (val >> 29) & 0x1 } /// inserts field val into register pub fn c_write(newval: u64) { // bits 29..29 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 29) | ((newval & 0x1) << 29)); } /// reads field val from register pub fn v_read() -> u64 { // bits 28..28 let val = reg_rawrd(); (val >> 28) & 0x1 } /// inserts field val into register pub fn v_write(newval: u64) { // bits 28..28 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 28) | ((newval & 0x1) << 28)); } /// reads field val from register pub fn tco_1_read() -> u64 { // bits 25..25 let val = reg_rawrd(); (val >> 25) & 0x1 } /// inserts field val into register pub fn tco_1_write(newval: u64) { // bits 25..25 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 25) | ((newval & 0x1) << 25)); } /// reads field val from register pub fn dit_1_read() -> u64 { // bits 24..24 let val = reg_rawrd(); (val >> 24) & 0x1 } /// inserts field val into register pub fn dit_1_write(newval: u64) { // bits 24..24 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 24) | ((newval & 0x1) << 24)); } /// reads field val from register pub fn uao_1_read() -> u64 { // bits 23..23 let val = reg_rawrd(); (val >> 23) & 0x1 } /// inserts field val into register pub fn uao_1_write(newval: u64) { // bits 23..23 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 23) | ((newval & 0x1) << 23)); } /// reads field val from register pub fn pan_1_read() -> u64 { // bits 22..22 let val = reg_rawrd(); (val >> 22) & 0x1 } /// inserts field val into register pub fn pan_1_write(newval: u64) { // bits 22..22 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 22) | ((newval & 0x1) << 22)); } /// reads field val from register pub fn ss_read() -> u64 { // bits 21..21 let val = reg_rawrd(); (val >> 21) & 0x1 } /// inserts field val into register pub fn ss_write(newval: u64) { // bits 21..21 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 21) | ((newval & 0x1) << 21)); } /// reads field val from register pub fn il_read() -> u64 { // bits 20..20 let val = reg_rawrd(); (val >> 20) & 0x1 } /// inserts field val into register pub fn il_write(newval: u64) { // bits 20..20 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 20) | ((newval & 0x1) << 20)); } /// reads field val from register pub fn ssbs_1_read() -> u64 { // bits 12..12 let val = reg_rawrd(); (val >> 12) & 0x1 } /// inserts field val into register pub fn ssbs_1_write(newval: u64) { // bits 12..12 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 12) | ((newval & 0x1) << 12)); } /// reads field val from register pub fn btype_1_read() -> u64 { // bits 10..11 let val = reg_rawrd(); (val >> 10) & 0x3 } /// inserts field val into register pub fn btype_1_write(newval: u64) { // bits 10..11 let val = reg_rawrd(); reg_rawwr(val & !(0x3 << 10) | ((newval & 0x3) << 10)); } /// reads field val from register pub fn d_read() -> u64 { // bits 9..9 let val = reg_rawrd(); (val >> 9) & 0x1 } /// inserts field val into register pub fn d_write(newval: u64) { // bits 9..9 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 9) | ((newval & 0x1) << 9)); } /// reads field val from register pub fn a_read() -> u64 { // bits 8..8 let val = reg_rawrd(); (val >> 8) & 0x1 } /// inserts field val into register pub fn a_write(newval: u64) { // bits 8..8 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 8) | ((newval & 0x1) << 8)); } /// reads field val from register pub fn i_read() -> u64 { // bits 7..7 let val = reg_rawrd(); (val >> 7) & 0x1 } /// inserts field val into register pub fn i_write(newval: u64) { // bits 7..7 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 7) | ((newval & 0x1) << 7)); } /// reads field val from register pub fn f_read() -> u64 { // bits 6..6 let val = reg_rawrd(); (val >> 6) & 0x1 } /// inserts field val into register pub fn f_write(newval: u64) { // bits 6..6 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 6) | ((newval & 0x1) << 6)); } /// reads field val from register pub fn m4_read() -> u64 { // bits 4..4 let val = reg_rawrd(); (val >> 4) & 0x1 } /// inserts field val into register pub fn m4_write(newval: u64) { // bits 4..4 let val = reg_rawrd(); reg_rawwr(val & !(0x1 << 4) | ((newval & 0x1) << 4)); } /// reads field val from register pub fn m30_read() -> u64 { // bits 0..3 let val = reg_rawrd(); (val >> 0) & 0xf } /// inserts field val into register pub fn m30_write(newval: u64) { // bits 0..3 let val = reg_rawrd(); reg_rawwr(val & !(0xf << 0) | ((newval & 0xf) << 0)); } /* * ================================================================================================ * Data Structure Definitions * ================================================================================================ */ /// struct holding a copy of the Saved Program Status Register (EL2) value in memory pub struct RegVal { val: u64, } /// struct implementation for accessing the fields of register spsr_el2 impl RegVal { // creates a new default value pub fn default() -> RegVal { RegVal { val: 0 } } /// inserts field val into current value pub fn current(&mut self) -> RegVal { let curval = reg_rawrd() & 0xf3f01fdf; RegVal { val: curval } } /// extracts field val from current value pub fn read(&mut self) { self.val = reg_rawrd() & 0xf3f01fdf } /// inserts field val into current value pub fn write(&self) { reg_rawwr(self.val & 0xf3f01fdf) } // sets the value of the struct pub fn set(&mut self, newval: u64) { self.val = newval & 4092600287; } // gets the value of the struct pub fn get(&self) -> u64 { self.val } /// extracts field val from current value pub fn n_extract(&mut self) -> u64 { // bits 31..31 (self.val >> 31) & 0x1 } /// inserts field val into current value pub fn n_insert(&mut self, val: u64) { // bits 31..31 self.val = self.val & !(0x1 << 31) | ((val & 0x1) << 31); } /// extracts field val from current value pub fn z_extract(&mut self) -> u64 { // bits 30..30 (self.val >> 30) & 0x1 } /// inserts field val into current value pub fn z_insert(&mut self, val: u64) { // bits 30..30 self.val = self.val & !(0x1 << 30) | ((val & 0x1) << 30); } /// extracts field val from current value pub fn c_extract(&mut self) -> u64 { // bits 29..29 (self.val >> 29) & 0x1 } /// inserts field val into current value pub fn c_insert(&mut self, val: u64) { // bits 29..29 self.val = self.val & !(0x1 << 29) | ((val & 0x1) << 29); } /// extracts field val from current value pub fn v_extract(&mut self) -> u64 { // bits 28..28 (self.val >> 28) & 0x1 } /// inserts field val into current value pub fn v_insert(&mut self, val: u64) { // bits 28..28 self.val = self.val & !(0x1 << 28) | ((val & 0x1) << 28); } /// extracts field val from current value pub fn tco_1_extract(&mut self) -> u64 { // bits 25..25 (self.val >> 25) & 0x1 } /// inserts field val into current value pub fn tco_1_insert(&mut self, val: u64) { // bits 25..25 self.val = self.val & !(0x1 << 25) | ((val & 0x1) << 25); } /// extracts field val from current value pub fn dit_1_extract(&mut self) -> u64 { // bits 24..24 (self.val >> 24) & 0x1 } /// inserts field val into current value pub fn dit_1_insert(&mut self, val: u64) { // bits 24..24 self.val = self.val & !(0x1 << 24) | ((val & 0x1) << 24); } /// extracts field val from current value pub fn uao_1_extract(&mut self) -> u64 { // bits 23..23 (self.val >> 23) & 0x1 } /// inserts field val into current value pub fn uao_1_insert(&mut self, val: u64) { // bits 23..23 self.val = self.val & !(0x1 << 23) | ((val & 0x1) << 23); } /// extracts field val from current value pub fn pan_1_extract(&mut self) -> u64 { // bits 22..22 (self.val >> 22) & 0x1 } /// inserts field val into current value pub fn pan_1_insert(&mut self, val: u64) { // bits 22..22 self.val = self.val & !(0x1 << 22) | ((val & 0x1) << 22); } /// extracts field val from current value pub fn ss_extract(&mut self) -> u64 { // bits 21..21 (self.val >> 21) & 0x1 } /// inserts field val into current value pub fn ss_insert(&mut self, val: u64) { // bits 21..21 self.val = self.val & !(0x1 << 21) | ((val & 0x1) << 21); } /// extracts field val from current value pub fn il_extract(&mut self) -> u64 { // bits 20..20 (self.val >> 20) & 0x1 } /// inserts field val into current value pub fn il_insert(&mut self, val: u64) { // bits 20..20 self.val = self.val & !(0x1 << 20) | ((val & 0x1) << 20); } /// extracts field val from current value pub fn ssbs_1_extract(&mut self) -> u64 { // bits 12..12 (self.val >> 12) & 0x1 } /// inserts field val into current value pub fn ssbs_1_insert(&mut self, val: u64) { // bits 12..12 self.val = self.val & !(0x1 << 12) | ((val & 0x1) << 12); } /// extracts field val from current value pub fn btype_1_extract(&mut self) -> u64 { // bits 10..11 (self.val >> 10) & 0x3 } /// inserts field val into current value pub fn btype_1_insert(&mut self, val: u64) { // bits 10..11 self.val = self.val & !(0x3 << 10) | ((val & 0x3) << 10); } /// extracts field val from current value pub fn d_extract(&mut self) -> u64 { // bits 9..9 (self.val >> 9) & 0x1 } /// inserts field val into current value pub fn d_insert(&mut self, val: u64) { // bits 9..9 self.val = self.val & !(0x1 << 9) | ((val & 0x1) << 9); } /// extracts field val from current value pub fn a_extract(&mut self) -> u64 { // bits 8..8 (self.val >> 8) & 0x1 } /// inserts field val into current value pub fn a_insert(&mut self, val: u64) { // bits 8..8 self.val = self.val & !(0x1 << 8) | ((val & 0x1) << 8); } /// extracts field val from current value pub fn i_extract(&mut self) -> u64 { // bits 7..7 (self.val >> 7) & 0x1 } /// inserts field val into current value pub fn i_insert(&mut self, val: u64) { // bits 7..7 self.val = self.val & !(0x1 << 7) | ((val & 0x1) << 7); } /// extracts field val from current value pub fn f_extract(&mut self) -> u64 { // bits 6..6 (self.val >> 6) & 0x1 } /// inserts field val into current value pub fn f_insert(&mut self, val: u64) { // bits 6..6 self.val = self.val & !(0x1 << 6) | ((val & 0x1) << 6); } /// extracts field val from current value pub fn m4_extract(&mut self) -> u64 { // bits 4..4 (self.val >> 4) & 0x1 } /// inserts field val into current value pub fn m4_insert(&mut self, val: u64) { // bits 4..4 self.val = self.val & !(0x1 << 4) | ((val & 0x1) << 4); } /// extracts field val from current value pub fn m30_extract(&mut self) -> u64 { // bits 0..3 (self.val >> 0) & 0xf } /// inserts field val into current value pub fn m30_insert(&mut self, val: u64) { // bits 0..3 self.val = self.val & !(0xf << 0) | ((val & 0xf) << 0); } }
25.874797
136
0.526299
2f28c94c43667488ac2c6c7eda2ce12e87da5a06
1,450
// Copyright (C) 2019 Martin Mroz // // This software may be modified and distributed under the terms // of the MIT license. See the LICENSE file for details. use std::error; use std::fmt; use std::result; #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] pub enum Error { /// The BCBP string does not contain exclusively ASCII characters. InvalidCharacters, /// The BCBP format is not supported. UnsupportedFormat, /// The end of otherwise-valid IATA BCBP data was reached prematurely. UnexpectedEndOfInput, /// Parsing the encoded data failed. ParseFailed(String), /// After successfully parsing a BCBP object, additional characters remain. TrailingCharacters, } impl error::Error for Error {} impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { &Error::InvalidCharacters => write!(f, "non-ASCII characters"), &Error::UnsupportedFormat => write!(f, "not an IATA BCBP Type M boarding pass"), &Error::UnexpectedEndOfInput => write!(f, "unexpected end-of-input"), &Error::ParseFailed(ref reason) => write!(f, "parse failed: {}", reason), &Error::TrailingCharacters => write!(f, "input includes data after a valid boarding pass"), } } } pub type Result<T> = result::Result<T, Error>;
32.954545
79
0.626897
1db97d5d36149d28aedc8196fb70850cca251d68
4,266
// The From trait is used for value-to-value conversions. // If From is implemented correctly for a type, the Into trait should work conversely. // You can read more about it at https://doc.rust-lang.org/std/convert/trait.From.html #[derive(Debug)] struct Person { name: String, age: usize, } // We implement the Default trait to use it as a fallback // when the provided string is not convertible into a Person object impl Default for Person { fn default() -> Person { Person { name: String::from("John"), age: 30, } } } // Your task is to complete this implementation // in order for the line `let p = Person::from("Mark,20")` to compile // Please note that you'll need to parse the age component into a `usize` // with something like `"4".parse::<usize>()`. The outcome of this needs to // be handled appropriately. // // Steps: // 1. If the length of the provided string is 0, then return the default of Person // 2. Split the given string on the commas present in it // 3. Extract the first element from the split operation and use it as the name // 4. If the name is empty, then return the default of Person // 5. Extract the other element from the split operation and parse it into a `usize` as the age // If while parsing the age, something goes wrong, then return the default of Person // Otherwise, then return an instantiated Person object with the results impl From<&str> for Person { fn from(s: &str) -> Person { let result: Person; let p: Vec<&str> = s.split(",").collect(); if p.len() == 2 { let name = p[0]; let age = p[1].parse::<usize>(); if name.len() != 0 && age.is_ok() { result = Person { name: String::from(name), age: age.unwrap() } } else { result = Person::default(); } } else { result = Person::default(); } result } } fn main() { // Use the `from` function let p1 = Person::from("Mark,20"); // Since From is implemented for Person, we should be able to use Into let p2: Person = "Gerald,70".into(); println!("{:?}", p1); println!("{:?}", p2); } #[cfg(test)] mod tests { use super::*; #[test] fn test_default() { // Test that the default person is 30 year old John let dp = Person::default(); assert_eq!(dp.name, "John"); assert_eq!(dp.age, 30); } #[test] fn test_bad_convert() { // Test that John is returned when bad string is provided let p = Person::from(""); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_good_convert() { // Test that "Mark,20" works let p = Person::from("Mark,20"); assert_eq!(p.name, "Mark"); assert_eq!(p.age, 20); } #[test] fn test_bad_age() { // Test that "Mark,twenty" will return the default person due to an error in parsing age let p = Person::from("Mark,twenty"); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_missing_comma_and_age() { let p: Person = Person::from("Mark"); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_missing_age() { let p: Person = Person::from("Mark,"); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_missing_name() { let p: Person = Person::from(",1"); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_missing_name_and_age() { let p: Person = Person::from(","); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_missing_name_and_invalid_age() { let p: Person = Person::from(",one"); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_trailing_comma() { let p: Person = Person::from("Mike,32,"); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_trailing_comma_and_some_string() { let p: Person = Person::from("Mike,32,man"); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } }
29.832168
96
0.576653
6ae3bc285dfc37dcd18aeac06b760d62a6a54eeb
708
use algebra_core::Field; use r1cs_core::{ConstraintSystem, SynthesisError}; use r1cs_std::prelude::*; use crate::signature::SignatureScheme; pub trait SigRandomizePkGadget<S: SignatureScheme, ConstraintF: Field> { type ParametersGadget: AllocGadget<S::Parameters, ConstraintF> + Clone; type PublicKeyGadget: ToBytesGadget<ConstraintF> + EqGadget<ConstraintF> + AllocGadget<S::PublicKey, ConstraintF> + Clone; fn check_randomization_gadget<CS: ConstraintSystem<ConstraintF>>( cs: CS, parameters: &Self::ParametersGadget, public_key: &Self::PublicKeyGadget, randomness: &[UInt8], ) -> Result<Self::PublicKeyGadget, SynthesisError>; }
32.181818
75
0.710452
de4b8dea115b4bb514a8b87477347f962c62095d
10,790
/* This tool is part of the WhiteboxTools geospatial analysis library. Authors: Dr. John Lindsay Created: 02/07/2017 Last Modified: 13/10/2018 License: MIT */ use whitebox_raster::*; use crate::tools::*; use num_cpus; use std::env; use std::f64; use std::io::{Error, ErrorKind}; use std::path; use std::sync::mpsc; use std::sync::Arc; use std::thread; /// This tool is a Boolean **NOT** operator, i.e. it works on *True* or *False* (1 and 0) values. Grid cells for which /// the first input raster (`--input1`) has a *True* value and the second raster (`--input2`) has a *False* value are assigned /// 0 in the output raster, otherwise grid cells are assigned a value of 0. All non-zero values in the input /// rasters are considered to be *True*, while all zero-valued grid cells are considered to be *False*. Grid /// cells containing **NoData** values in either of the input rasters will be assigned a **NoData** value in /// the output raster (`--output`). Notice that the **Not** operator is asymmetrical, and the order of inputs matters. /// /// # See Also /// `And`, `Or`, `Xor` pub struct Not { name: String, description: String, toolbox: String, parameters: Vec<ToolParameter>, example_usage: String, } impl Not { pub fn new() -> Not { // public constructor let name = "Not".to_string(); let toolbox = "Math and Stats Tools".to_string(); let description = "Performs a logical NOT operator on two Boolean raster images.".to_string(); let mut parameters = vec![]; parameters.push(ToolParameter { name: "Input File".to_owned(), flags: vec!["--input1".to_owned()], description: "Input raster file.".to_owned(), parameter_type: ParameterType::ExistingFile(ParameterFileType::Raster), default_value: None, optional: false, }); parameters.push(ToolParameter { name: "Input File".to_owned(), flags: vec!["--input2".to_owned()], description: "Input raster file.".to_owned(), parameter_type: ParameterType::ExistingFile(ParameterFileType::Raster), default_value: None, optional: false, }); parameters.push(ToolParameter { name: "Output File".to_owned(), flags: vec!["-o".to_owned(), "--output".to_owned()], description: "Output raster file.".to_owned(), parameter_type: ParameterType::NewFile(ParameterFileType::Raster), default_value: None, optional: false, }); let sep: String = path::MAIN_SEPARATOR.to_string(); let p = format!("{}", env::current_dir().unwrap().display()); let e = format!("{}", env::current_exe().unwrap().display()); let mut short_exe = e .replace(&p, "") .replace(".exe", "") .replace(".", "") .replace(&sep, ""); if e.contains(".exe") { short_exe += ".exe"; } let usage = format!(">>.*{0} -r={1} -v --wd=\"*path*to*data*\" --input1='in1.tif' --input2='in2.tif' -o=output.tif", short_exe, name).replace("*", &sep); Not { name: name, description: description, toolbox: toolbox, parameters: parameters, example_usage: usage, } } } impl WhiteboxTool for Not { fn get_source_file(&self) -> String { String::from(file!()) } fn get_tool_name(&self) -> String { self.name.clone() } fn get_tool_description(&self) -> String { self.description.clone() } fn get_tool_parameters(&self) -> String { let mut s = String::from("{\"parameters\": ["); for i in 0..self.parameters.len() { if i < self.parameters.len() - 1 { s.push_str(&(self.parameters[i].to_string())); s.push_str(","); } else { s.push_str(&(self.parameters[i].to_string())); } } s.push_str("]}"); s } fn get_example_usage(&self) -> String { self.example_usage.clone() } fn get_toolbox(&self) -> String { self.toolbox.clone() } fn run<'a>( &self, args: Vec<String>, working_directory: &'a str, verbose: bool, ) -> Result<(), Error> { let mut input1 = String::new(); let mut input2 = String::new(); let mut output_file = String::new(); if args.len() == 0 { return Err(Error::new( ErrorKind::InvalidInput, "Tool run with no parameters.", )); } for i in 0..args.len() { let mut arg = args[i].replace("\"", ""); arg = arg.replace("\'", ""); let cmd = arg.split("="); // in case an equals sign was used let vec = cmd.collect::<Vec<&str>>(); let mut keyval = false; if vec.len() > 1 { keyval = true; } if vec[0].to_lowercase() == "-i1" || vec[0].to_lowercase() == "--input1" { if keyval { input1 = vec[1].to_string(); } else { input1 = args[i + 1].to_string(); } } else if vec[0].to_lowercase() == "-i2" || vec[0].to_lowercase() == "--input2" { if keyval { input2 = vec[1].to_string(); } else { input2 = args[i + 1].to_string(); } } else if vec[0].to_lowercase() == "-o" || vec[0].to_lowercase() == "--output" { if keyval { output_file = vec[1].to_string(); } else { output_file = args[i + 1].to_string(); } } } if verbose { let tool_name = self.get_tool_name(); let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28); // 28 = length of the 'Powered by' by statement. println!("{}", "*".repeat(welcome_len)); println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len())); println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28)); println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23)); println!("{}", "*".repeat(welcome_len)); } let sep: String = path::MAIN_SEPARATOR.to_string(); let mut progress: usize; let mut old_progress: usize = 1; if !output_file.contains(&sep) && !output_file.contains("/") { output_file = format!("{}{}", working_directory, output_file); } if !input1.contains(&sep) && !input1.contains("/") { input1 = format!("{}{}", working_directory, input1); } if !input2.contains(&sep) && !input2.contains("/") { input2 = format!("{}{}", working_directory, input2); } if verbose { println!("Reading data...") }; let in1 = Arc::new(Raster::new(&input1, "r")?); let in2 = Arc::new(Raster::new(&input2, "r")?); let start = Instant::now(); let rows = in1.configs.rows as isize; let columns = in1.configs.columns as isize; let nodata1 = in1.configs.nodata; let nodata2 = in2.configs.nodata; // make sure the input files have the same size if in1.configs.rows != in2.configs.rows || in1.configs.columns != in2.configs.columns { return Err(Error::new( ErrorKind::InvalidInput, "The input files must have the same number of rows and columns and spatial extent.", )); } // calculate the number of downslope cells let mut num_procs = num_cpus::get() as isize; let configs = whitebox_common::configs::get_configs()?; let max_procs = configs.max_procs; if max_procs > 0 && max_procs < num_procs { num_procs = max_procs; } let (tx, rx) = mpsc::channel(); for tid in 0..num_procs { let in1 = in1.clone(); let in2 = in2.clone(); let tx = tx.clone(); thread::spawn(move || { let mut z1: f64; let mut z2: f64; for row in (0..rows).filter(|r| r % num_procs == tid) { let mut data: Vec<f64> = vec![nodata1; columns as usize]; for col in 0..columns { z1 = in1[(row, col)]; z2 = in2[(row, col)]; if z1 != nodata1 && z2 != nodata2 { if z1 != 0f64 && z2 == 0f64 { data[col as usize] = 1f64; } else { data[col as usize] = 0f64; } } } tx.send((row, data)).unwrap(); } }); } let mut output = Raster::initialize_using_file(&output_file, &in1); for r in 0..rows { let (row, data) = rx.recv().expect("Error receiving data from thread."); output.set_row_data(row, data); if verbose { progress = (100.0_f64 * r as f64 / (rows - 1) as f64) as usize; if progress != old_progress { println!("Progress: {}%", progress); old_progress = progress; } } } let elapsed_time = get_formatted_elapsed_time(start); output.configs.data_type = DataType::F32; output.configs.palette = "qual.plt".to_string(); output.configs.photometric_interp = PhotometricInterpretation::Categorical; output.add_metadata_entry(format!( "Created by whitebox_tools\' {} tool", self.get_tool_name() )); output.add_metadata_entry(format!("Input1: {}", input1)); output.add_metadata_entry(format!("Input2: {}", input2)); output.add_metadata_entry(format!("Elapsed Time (excluding I/O): {}", elapsed_time)); if verbose { println!("Saving data...") }; let _ = match output.write() { Ok(_) => { if verbose { println!("Output file written") } } Err(e) => return Err(e), }; if verbose { println!( "{}", &format!("Elapsed Time (excluding I/O): {}", elapsed_time) ); } Ok(()) } }
35.610561
161
0.503429
90d2c5fe00718579b63a712debe0ade09b769a13
8,574
use crate::ciphersuite::{signable::*, *}; use crate::codec::*; use crate::config::Config; use crate::config::ProtocolVersion; use crate::extensions::*; use crate::group::*; use crate::schedule::psk::PreSharedKeys; use crate::schedule::JoinerSecret; use crate::tree::{index::*, *}; use serde::{Deserialize, Serialize}; mod codec; pub(crate) mod errors; pub(crate) mod proposals; pub use codec::*; pub use errors::*; use proposals::*; #[cfg(test)] mod test_proposals; #[cfg(test)] mod test_welcome; /// Welcome Messages /// /// > 11.2.2. Welcoming New Members /// /// ```text /// struct { /// ProtocolVersion version = mls10; /// CipherSuite cipher_suite; /// EncryptedGroupSecrets secrets<0..2^32-1>; /// opaque encrypted_group_info<1..2^32-1>; /// } Welcome; /// ``` #[derive(Clone, Debug, PartialEq)] pub struct Welcome { version: ProtocolVersion, cipher_suite: &'static Ciphersuite, secrets: Vec<EncryptedGroupSecrets>, encrypted_group_info: Vec<u8>, } /// EncryptedGroupSecrets /// /// > 11.2.2. Welcoming New Members /// /// ```text /// struct { /// opaque key_package_hash<1..255>; /// HPKECiphertext encrypted_group_secrets; /// } EncryptedGroupSecrets; /// ``` #[derive(Clone, Debug, PartialEq)] pub struct EncryptedGroupSecrets { pub key_package_hash: Vec<u8>, pub encrypted_group_secrets: HpkeCiphertext, } impl Welcome { /// Create a new welcome message from the provided data. /// Note that secrets and the encrypted group info are consumed. pub(crate) fn new( version: ProtocolVersion, cipher_suite: &'static Ciphersuite, secrets: Vec<EncryptedGroupSecrets>, encrypted_group_info: Vec<u8>, ) -> Self { Self { version, cipher_suite, secrets, encrypted_group_info, } } /// Get a reference to the ciphersuite in this Welcome message. pub(crate) fn ciphersuite(&self) -> &'static Ciphersuite { self.cipher_suite } /// Get a reference to the encrypted group secrets in this Welcome message. pub fn secrets(&self) -> &[EncryptedGroupSecrets] { &self.secrets } /// Get a reference to the encrypted group info. pub(crate) fn encrypted_group_info(&self) -> &[u8] { &self.encrypted_group_info } } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct Commit { pub(crate) proposals: Vec<ProposalOrRef>, pub(crate) path: Option<UpdatePath>, } impl Commit { /// Returns `true` if the commit contains an update path. `false` otherwise. pub fn has_path(&self) -> bool { self.path.is_some() } } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct ConfirmationTag(pub(crate) Vec<u8>); impl ConfirmationTag { /// Create a new confirmation tag. /// /// > 11.2. Commit /// /// ```text /// MLSPlaintext.confirmation_tag = /// MAC(confirmation_key, GroupContext.confirmed_transcript_hash) /// ``` pub fn new( ciphersuite: &Ciphersuite, confirmation_key: &Secret, confirmed_transcript_hash: &[u8], ) -> Self { ConfirmationTag( ciphersuite .hkdf_extract( Some(confirmation_key), &Secret::from(confirmed_transcript_hash.to_vec()), ) .to_vec(), ) } /// Get a copy of the raw byte vector. pub(crate) fn to_vec(&self) -> Vec<u8> { self.0.to_vec() } } /// GroupInfo /// /// > 11.2.2. Welcoming New Members /// /// ```text /// struct { /// opaque group_id<0..255>; /// uint64 epoch; /// opaque tree_hash<0..255>; /// opaque confirmed_transcript_hash<0..255>; /// Extension extensions<0..2^32-1>; /// MAC confirmation_tag; /// uint32 signer_index; /// opaque signature<0..2^16-1>; /// } GroupInfo; /// ``` pub(crate) struct GroupInfo { group_id: GroupId, epoch: GroupEpoch, tree_hash: Vec<u8>, confirmed_transcript_hash: Vec<u8>, extensions: Vec<Box<dyn Extension>>, confirmation_tag: Vec<u8>, signer_index: LeafIndex, signature: Signature, } impl GroupInfo { pub(crate) fn new( group_id: GroupId, epoch: GroupEpoch, tree_hash: Vec<u8>, confirmed_transcript_hash: Vec<u8>, extensions: Vec<Box<dyn Extension>>, confirmation_tag: Vec<u8>, signer_index: LeafIndex, ) -> Self { Self { group_id, epoch, tree_hash, confirmed_transcript_hash, extensions, confirmation_tag, signer_index, signature: Signature::new_empty(), } } /// Get the tree hash as byte slice. pub(crate) fn tree_hash(&self) -> &[u8] { &self.tree_hash } /// Get the signer index. pub(crate) fn signer_index(&self) -> LeafIndex { self.signer_index } /// Get the signature. pub(crate) fn signature(&self) -> &Signature { &self.signature } /// Set the signature. pub(crate) fn set_signature(&mut self, signature: Signature) { self.signature = signature; } /// Get the group ID. pub(crate) fn group_id(&self) -> &GroupId { &self.group_id } /// Get the epoch. pub(crate) fn epoch(&self) -> GroupEpoch { self.epoch } /// Get the confirmed transcript hash. pub(crate) fn confirmed_transcript_hash(&self) -> &[u8] { &self.confirmed_transcript_hash } /// Get the confirmed tag. pub(crate) fn confirmation_tag(&self) -> &[u8] { &self.confirmation_tag } /// Get the extensions. pub(crate) fn extensions(&self) -> &[Box<dyn Extension>] { &self.extensions } /// Get the extensions as mutable reference. pub(crate) fn extensions_mut(&mut self) -> &mut Vec<Box<dyn Extension>> { &mut self.extensions } } impl GroupInfo { pub fn from_bytes(bytes: &[u8]) -> Result<Self, CodecError> { let mut cursor = Cursor::new(bytes); let group_id = GroupId::decode(&mut cursor)?; let epoch = GroupEpoch::decode(&mut cursor)?; let tree_hash = decode_vec(VecSize::VecU8, &mut cursor)?; let confirmed_transcript_hash = decode_vec(VecSize::VecU8, &mut cursor)?; let extensions = extensions_vec_from_cursor(&mut cursor)?; let confirmation_tag = decode_vec(VecSize::VecU8, &mut cursor)?; let signer_index = LeafIndex::from(u32::decode(&mut cursor)?); let signature = Signature::decode(&mut cursor)?; Ok(GroupInfo { group_id, epoch, tree_hash, confirmed_transcript_hash, extensions, confirmation_tag, signer_index, signature, }) } } impl Signable for GroupInfo { fn unsigned_payload(&self) -> Result<Vec<u8>, CodecError> { let buffer = &mut vec![]; self.group_id.encode(buffer)?; self.epoch.encode(buffer)?; encode_vec(VecSize::VecU8, buffer, &self.tree_hash)?; encode_vec(VecSize::VecU8, buffer, &self.confirmed_transcript_hash)?; // Get extensions encoded. We need to build a Vec::<ExtensionStruct> first. let encoded_extensions: Vec<ExtensionStruct> = self .extensions .iter() .map(|e| e.to_extension_struct()) .collect(); encode_vec(VecSize::VecU16, buffer, &encoded_extensions)?; encode_vec(VecSize::VecU8, buffer, &self.confirmation_tag)?; self.signer_index.encode(buffer)?; Ok(buffer.to_vec()) } } /// PathSecret /// /// > 11.2.2. Welcoming New Members /// /// ```text /// struct { /// opaque path_secret<1..255>; /// } PathSecret; /// ``` pub(crate) struct PathSecret { pub path_secret: Secret, } /// GroupSecrets /// /// > 11.2.2. Welcoming New Members /// /// ```text /// struct { /// opaque joiner_secret<1..255>; /// optional<PathSecret> path_secret; /// optional<PreSharedKeys> psks; /// } GroupSecrets; /// ``` #[allow(dead_code)] pub(crate) struct GroupSecrets { pub(crate) joiner_secret: JoinerSecret, pub(crate) path_secret: Option<PathSecret>, pub(crate) psks: Option<PreSharedKeys>, } impl GroupSecrets { /// Create a new group secret. pub(crate) fn new(joiner_secret: JoinerSecret, path_secret: Option<PathSecret>) -> Self { Self { joiner_secret, path_secret, psks: None, } } }
26.140244
93
0.605668
d9c91a4222ad3b3972c4ad0fbc5ce90445ab8ee7
178
use bracket_random::prelude::*; fn main() { let mut rng = RandomNumberGenerator::new(); DiceIterator::new(6, &mut rng).take(10).for_each(|n| println!("Rolled {}", n)); }
29.666667
83
0.640449
3376274b7b9eeed8b1e0aa27f2f4f302a526dfd3
918
use std::sync::atomic::{self, Ordering}; #[cfg_attr(nightly, repr(align(64)))] #[derive(Debug)] struct AtomicBool(atomic::AtomicBool); impl AtomicBool { #[inline] fn new(v: bool) -> Self { AtomicBool(atomic::AtomicBool::new(v)) } #[inline] fn load(&self, order: Ordering) -> bool { self.0.load(order) } #[inline] fn store(&self, val: bool, order: Ordering) { self.0.store(val, order); } } mod nio; #[cfg(windows)] mod windows; #[cfg(windows)] pub(crate) use self::windows::{net, Awakener, Recv}; #[cfg(windows)] use self::windows::poll; #[cfg(windows)] use self::windows::nio::get_ready_tasks; #[cfg(unix)] mod unix; #[cfg(unix)] pub(crate) use self::unix::{net, Awakener, Recv}; #[cfg(unix)] use self::unix::poll; #[cfg(unix)] use self::unix::nio::get_ready_tasks; mod eloop; pub(crate) use self::eloop::{EventLoop, ReadyTasks}; use self::eloop::*;
19.531915
52
0.62963
912200d25ca732408abe4b4272a0406aa24e76c7
2,697
// I was going to write a linked-list but then I read Learning Rust With Entirely Too Many Linked // Lists (http://cglab.ca/~abeinges/blah/too-many-lists/book/README.html) which is worth reading // but also convinced me to use a Vec instead. // // Note: this puzzle is very similar to https://en.wikipedia.org/wiki/Josephus_problem. The first // part can be solved without a computer using some pure math. I'm not too sure about the second // part. If you want to learn more about this puzzle (and lots of other interesting math stuff), // check out Donald E Knuth's book: Concrete Mathematics // https://notendur.hi.is/pgg/(ebook-pdf)%20-%20Mathematics%20-%20Concrete%20Mathematics.pdf pub fn solve(input: usize) { assert_eq!(part1(5), 3); println!("part 1: {}", part1(input)); assert_eq!(part2(5), 2); println!("part 2: {}", part2(input)); } struct CircularBuffer { // nit: we could set the element to the length of the vector and trigger an overflow if we // accidentally access a deleted item. I however think it's cleaner to use an Option type. next: Vec<Option<usize>>, curr: usize, // the position for the opposite is next[opp]. We need one level of indirection so we can // remove the opposite element. opp: usize, size: usize, } impl CircularBuffer { fn new(size: usize) -> CircularBuffer { let mut next = Vec::with_capacity(size); for i in 0..size { next.push(Some((i + 1) % size)); } CircularBuffer { next: next, curr: 0, opp: size / 2 - 1, size: size, } } fn remove_opp(&mut self) { assert!(self.size > 1); let remove = self.next[self.opp].unwrap(); let next = self.next[remove].unwrap(); self.next[self.opp] = Some(next); self.next[remove] = None; if self.size % 2 == 1 { // move forward self.opp = next; } self.curr = self.next[self.curr].unwrap(); self.size -= 1; } fn remove_next(&mut self) { assert!(self.size > 1); let remove = self.next[self.curr].unwrap(); let next = self.next[remove].unwrap(); self.next[self.curr] = Some(next); self.next[remove] = None; self.curr = self.next[self.curr].unwrap(); self.size -= 1; } } fn part1(input: usize) -> usize { let mut elves = CircularBuffer::new(input); for _ in 1..input { elves.remove_next(); } elves.curr + 1 } fn part2(input: usize) -> usize { let mut elves = CircularBuffer::new(input); for _ in 1..input { elves.remove_opp(); } elves.curr + 1 }
31.360465
97
0.602521
03c559aad268c45484dd08d843a71d72b7a5cbfa
3,795
use liturgy::{Psalm, PsalmSection, PsalmVerse, Reference, Source}; lazy_static! { pub static ref PSALM_7: Psalm = Psalm { number: 7, citation: None, sections: vec![PsalmSection { reference: Reference { source: Source::BCP1979, page: 590 }, local_name: String::from(""), latin_name: String::from("Domine, Deus meus"), verses: vec![ PsalmVerse { number: 1, a: String::from("O LORD my God, I take refuge in you; *"), b: String::from("save and deliver me from all who pursue me;") }, PsalmVerse { number: 2, a: String::from("Lest like a lion they tear me in pieces *"), b: String::from("and snatch me away with none to deliver me.") }, PsalmVerse { number: 3, a: String::from("O LORD my God, if I have done these things: *"), b: String::from("if there is any wickedness in my hands,") }, PsalmVerse { number: 4, a: String::from("If I have repaid my friend with evil, *"), b: String::from("or plundered him who without cause is my enemy;") }, PsalmVerse { number: 5, a: String::from("Then let my enemy pursue and overtake me, *"), b: String::from("trample my life into the ground,\nand lay my honor in the dust.") }, PsalmVerse { number: 6, a: String::from("Stand up, O LORD, in your wrath; *"), b: String::from("rise up against the fury of my enemies.") }, PsalmVerse { number: 7, a: String::from("Awake, O my God, decree justice; *"), b: String::from("let the assembly of the peoples gather round you.") }, PsalmVerse { number: 8, a: String::from("Be seated on your lofty throne, O Most High; *"), b: String::from("O LORD, judge the nations.") }, PsalmVerse { number: 9, a: String::from("Give judgment for me according to my righteousness, O LORD, *"), b: String::from("and according to my innocence, O Most High.") }, PsalmVerse { number: 10, a: String::from("Let the malice of the wicked come to an end,\nbut establish the righteous; *"), b: String::from("for you test the mind and heart, O righteous God.") }, PsalmVerse { number: 11, a: String::from("God is my shield and defense; *"), b: String::from("he is the savior of the true in heart.") }, PsalmVerse { number: 12, a: String::from("God is a righteous judge; *"), b: String::from("God sits in judgment every day.") }, PsalmVerse { number: 13, a: String::from("If they will not repent, God will whet his sword; *"), b: String::from("he will bend his bow and make it ready.") }, PsalmVerse { number: 14, a: String::from("He has prepared his weapons of death; *"), b: String::from("he makes his arrows shafts of fire.") }, PsalmVerse { number: 15, a: String::from("Look at those who are in labor with wickedness, *"), b: String::from("who conceive evil, and give birth to a lie.") }, PsalmVerse { number: 16, a: String::from("They dig a pit and make it deep *"), b: String::from("and fall into the hole that they have made.") }, PsalmVerse { number: 17, a: String::from("Their malice turns back upon their own head; *"), b: String::from("their violence falls on their own scalp.") }, PsalmVerse { number: 18, a: String::from("I will bear witness that the LORD is righteous; *"), b: String::from("I will praise the Name of the LORD Most High.") }, ] }] }; }
35.138889
104
0.562319
017e6bff543692b8c810f9cef7f4ebff9e79204f
927
pub fn map_values( value: u32, input_min: u32, input_max: u32, target_min: u32, target_max: u32, ) -> f32 { let slope = (target_max - target_min) as f32 / (input_max - input_min) as f32; (value - input_min) as f32 * slope + target_min as f32 } #[cfg(test)] mod tests { use crate::map_values::map_values; #[test] fn test_map_values_clean_up() { let test_value = map_values(3, 0, 10, 0, 100); assert_eq!(30 as f32, test_value); } #[test] fn test_map_values_clean_down() { let test_value = map_values(5, 0, 100, 0, 20); assert_eq!(1 as f32, test_value); } #[test] fn test_map_values_up() { let test_value = map_values(3, 0, 10, 0, 25); assert_eq!(7.5, test_value); } #[test] fn test_map_values_down() { let test_value = map_values(5, 0, 10, 0, 5); assert_eq!(2.5, test_value); } }
23.175
82
0.582524
f95544cdcd9493c61d4155210ff168b6d26275e1
9,577
mod converters; mod winit_config; mod winit_windows; use bevy_input::{ keyboard::KeyboardInput, mouse::{MouseButtonInput, MouseMotion, MouseScrollUnit, MouseWheel}, }; pub use winit_config::*; pub use winit_windows::*; use bevy_app::{prelude::*, AppExit}; use bevy_ecs::Resources; use bevy_math::Vec2; use bevy_window::{ CreateWindow, CursorMoved, Window, WindowCloseRequested, WindowCreated, WindowResized, Windows, }; use winit::{ event::{self, DeviceEvent, Event, WindowEvent}, event_loop::{ControlFlow, EventLoop, EventLoopWindowTarget}, }; #[derive(Default)] pub struct WinitPlugin; impl Plugin for WinitPlugin { fn build(&self, app: &mut AppBuilder) { app // TODO: It would be great to provide a raw winit WindowEvent here, but the lifetime on it is // stopping us. there are plans to remove the lifetime: https://github.com/rust-windowing/winit/pull/1456 // .add_event::<winit::event::WindowEvent>() .init_resource::<WinitWindows>() .set_runner(winit_runner); } } fn run<F>(event_loop: EventLoop<()>, event_handler: F) -> ! where F: 'static + FnMut(Event<'_, ()>, &EventLoopWindowTarget<()>, &mut ControlFlow), { event_loop.run(event_handler) } // TODO: It may be worth moving this cfg into a procedural macro so that it can be referenced by // a single name instead of being copied around. // https://gist.github.com/jakerr/231dee4a138f7a5f25148ea8f39b382e seems to work. #[cfg(any( target_os = "windows", target_os = "macos", target_os = "linux", target_os = "dragonfly", target_os = "freebsd", target_os = "netbsd", target_os = "openbsd" ))] fn run_return<F>(event_loop: &mut EventLoop<()>, event_handler: F) where F: FnMut(Event<'_, ()>, &EventLoopWindowTarget<()>, &mut ControlFlow), { use winit::platform::desktop::EventLoopExtDesktop; event_loop.run_return(event_handler) } #[cfg(not(any( target_os = "windows", target_os = "macos", target_os = "linux", target_os = "dragonfly", target_os = "freebsd", target_os = "netbsd", target_os = "openbsd" )))] fn run_return<F>(_event_loop: &mut EventLoop<()>, _event_handler: F) where F: FnMut(Event<'_, ()>, &EventLoopWindowTarget<()>, &mut ControlFlow), { panic!("Run return is not supported on this platform!") } pub fn winit_runner(mut app: App) { let mut event_loop = EventLoop::new(); let mut create_window_event_reader = EventReader::<CreateWindow>::default(); let mut app_exit_event_reader = EventReader::<AppExit>::default(); handle_create_window_events( &mut app.resources, &event_loop, &mut create_window_event_reader, ); log::debug!("Entering winit event loop"); let should_return_from_run = app .resources .get::<WinitConfig>() .map_or(false, |config| config.return_from_run); let event_handler = move |event: Event<()>, event_loop: &EventLoopWindowTarget<()>, control_flow: &mut ControlFlow| { *control_flow = if cfg!(feature = "metal-auto-capture") { ControlFlow::Exit } else { ControlFlow::Poll }; if let Some(app_exit_events) = app.resources.get_mut::<Events<AppExit>>() { if app_exit_event_reader.latest(&app_exit_events).is_some() { *control_flow = ControlFlow::Exit; } } match event { event::Event::WindowEvent { event: WindowEvent::Resized(size), window_id: winit_window_id, .. } => { let winit_windows = app.resources.get_mut::<WinitWindows>().unwrap(); let mut windows = app.resources.get_mut::<Windows>().unwrap(); let window_id = winit_windows.get_window_id(winit_window_id).unwrap(); let mut window = windows.get_mut(window_id).unwrap(); window.width = size.width; window.height = size.height; let mut resize_events = app.resources.get_mut::<Events<WindowResized>>().unwrap(); resize_events.send(WindowResized { id: window_id, height: window.height as usize, width: window.width as usize, }); } event::Event::WindowEvent { event, window_id: winit_window_id, .. } => match event { WindowEvent::CloseRequested => { let mut window_close_requested_events = app .resources .get_mut::<Events<WindowCloseRequested>>() .unwrap(); let winit_windows = app.resources.get_mut::<WinitWindows>().unwrap(); let window_id = winit_windows.get_window_id(winit_window_id).unwrap(); window_close_requested_events.send(WindowCloseRequested { id: window_id }); } WindowEvent::KeyboardInput { ref input, .. } => { let mut keyboard_input_events = app.resources.get_mut::<Events<KeyboardInput>>().unwrap(); keyboard_input_events.send(converters::convert_keyboard_input(input)); } WindowEvent::CursorMoved { position, .. } => { let mut cursor_moved_events = app.resources.get_mut::<Events<CursorMoved>>().unwrap(); let winit_windows = app.resources.get_mut::<WinitWindows>().unwrap(); let window_id = winit_windows.get_window_id(winit_window_id).unwrap(); let window = winit_windows.get_window(window_id).unwrap(); let inner_size = window.inner_size(); // move origin to bottom left let y_position = inner_size.height as f32 - position.y as f32; cursor_moved_events.send(CursorMoved { id: window_id, position: Vec2::new(position.x as f32, y_position as f32), }); } WindowEvent::MouseInput { state, button, .. } => { let mut mouse_button_input_events = app.resources.get_mut::<Events<MouseButtonInput>>().unwrap(); mouse_button_input_events.send(MouseButtonInput { button: converters::convert_mouse_button(button), state: converters::convert_element_state(state), }); } WindowEvent::MouseWheel { delta, .. } => match delta { event::MouseScrollDelta::LineDelta(x, y) => { let mut mouse_wheel_input_events = app.resources.get_mut::<Events<MouseWheel>>().unwrap(); mouse_wheel_input_events.send(MouseWheel { unit: MouseScrollUnit::Line, x, y, }); } event::MouseScrollDelta::PixelDelta(p) => { let mut mouse_wheel_input_events = app.resources.get_mut::<Events<MouseWheel>>().unwrap(); mouse_wheel_input_events.send(MouseWheel { unit: MouseScrollUnit::Pixel, x: p.x as f32, y: p.y as f32, }); } }, _ => {} }, event::Event::DeviceEvent { ref event, .. } => { if let DeviceEvent::MouseMotion { delta } = event { let mut mouse_motion_events = app.resources.get_mut::<Events<MouseMotion>>().unwrap(); mouse_motion_events.send(MouseMotion { delta: Vec2::new(delta.0 as f32, delta.1 as f32), }); } } event::Event::MainEventsCleared => { handle_create_window_events( &mut app.resources, event_loop, &mut create_window_event_reader, ); app.update(); } _ => (), } }; if should_return_from_run { run_return(&mut event_loop, event_handler); } else { run(event_loop, event_handler); } } fn handle_create_window_events( resources: &mut Resources, event_loop: &EventLoopWindowTarget<()>, create_window_event_reader: &mut EventReader<CreateWindow>, ) { let mut winit_windows = resources.get_mut::<WinitWindows>().unwrap(); let mut windows = resources.get_mut::<Windows>().unwrap(); let create_window_events = resources.get::<Events<CreateWindow>>().unwrap(); let mut window_created_events = resources.get_mut::<Events<WindowCreated>>().unwrap(); for create_window_event in create_window_event_reader.iter(&create_window_events) { let window = Window::new(create_window_event.id, &create_window_event.descriptor); winit_windows.create_window(event_loop, &window); let window_id = window.id; windows.add(window); window_created_events.send(WindowCreated { id: window_id }); } }
40.07113
117
0.558004
e4ce049eea5f6032001ce642609d33aacf2b4ceb
79
//! Reexport odbc-sys as ffi extern crate odbc_sys; pub use self::odbc_sys::*;
19.75
28
0.721519