hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
5b07b3916d870c667bce13dc19c6c69b235db0c3
303
// SPDX-License-Identifier: MIT // Copyright (C) 2018-present iced project and contributors use crate::formatter::nasm::tests::fmt_factory; use crate::formatter::tests::registers::register_tests; #[test] fn test_regs() { register_tests("Nasm", "RegisterTests", || fmt_factory::create_registers()); }
27.545455
77
0.745875
5b36539f707932347fb39bde63ca8faabfa678b1
8,272
use nu_engine::{eval_block, CallExt}; use nu_protocol::ast::Call; use nu_protocol::engine::{CaptureBlock, Command, EngineState, Stack}; use nu_protocol::{ Example, IntoPipelineData, PipelineData, ShellError, Signature, Span, Spanned, SyntaxShape, Value, }; use indexmap::IndexMap; #[derive(Clone)] pub struct GroupBy; impl Command for GroupBy { fn name(&self) -> &str { "group-by" } fn signature(&self) -> Signature { Signature::build("group-by").optional( "grouper", SyntaxShape::Any, "the grouper value to use", ) } fn usage(&self) -> &str { "Create a new table grouped." } fn run( &self, engine_state: &EngineState, stack: &mut Stack, call: &Call, input: PipelineData, ) -> Result<nu_protocol::PipelineData, nu_protocol::ShellError> { group_by(engine_state, stack, call, input) } #[allow(clippy::unwrap_used)] fn examples(&self) -> Vec<Example> { vec![ Example { description: "group items by column named \"type\"", example: r#"ls | group-by type"#, result: None, }, Example { description: "you can also group by raw values by leaving out the argument", example: "echo ['1' '3' '1' '3' '2' '1' '1'] | group-by", result: Some(Value::Record { cols: vec!["1".to_string(), "3".to_string(), "2".to_string()], vals: vec![ Value::List { vals: vec![ Value::test_string("1"), Value::test_string("1"), Value::test_string("1"), Value::test_string("1"), ], span: Span::test_data(), }, Value::List { vals: vec![Value::test_string("3"), Value::test_string("3")], span: Span::test_data(), }, Value::List { vals: vec![Value::test_string("2")], span: Span::test_data(), }, ], span: Span::test_data(), }), }, ] } } enum Grouper { ByColumn(Option<Spanned<String>>), ByBlock, } pub fn group_by( engine_state: &EngineState, stack: &mut Stack, call: &Call, input: PipelineData, ) -> Result<PipelineData, ShellError> { let name = call.head; let grouper: Option<Value> = call.opt(engine_state, stack, 0)?; let values: Vec<Value> = input.into_iter().collect(); let mut keys: Vec<Result<String, ShellError>> = vec![]; let mut group_strategy = Grouper::ByColumn(None); let first = values[0].clone(); if values.is_empty() { return Err(ShellError::SpannedLabeledError( "expected table from pipeline".into(), "requires a table input".into(), name, )); } let value_list = Value::List { vals: values.clone(), span: name, }; match grouper { Some(Value::Block { .. }) => { let block: Option<CaptureBlock> = call.opt(engine_state, stack, 0)?; let error_key = "error"; for value in values { if let Some(capture_block) = &block { let mut stack = stack.captures_to_stack(&capture_block.captures); let block = engine_state.get_block(capture_block.block_id); let pipeline = eval_block(engine_state, &mut stack, block, value.into_pipeline_data()); match pipeline { Ok(s) => { let collection: Vec<Value> = s.into_iter().collect(); if collection.len() > 1 { return Err(ShellError::SpannedLabeledError( "expected one value from the block".into(), "requires a table with one value for grouping".into(), name, )); } let value = match collection.get(0) { Some(Value::Error { .. }) | None => Value::String { val: error_key.to_string(), span: name, }, Some(return_value) => return_value.clone(), }; keys.push(value.as_string()); } Err(_) => { keys.push(Ok(error_key.into())); } } } } group_strategy = Grouper::ByBlock; } Some(other) => { group_strategy = Grouper::ByColumn(Some(Spanned { item: other.as_string()?, span: name, })); } _ => {} } let name = if let Ok(span) = first.span() { span } else { name }; let group_value = match group_strategy { Grouper::ByBlock => { let map = keys; let block = Box::new(move |idx: usize, row: &Value| match map.get(idx) { Some(Ok(key)) => Ok(key.clone()), Some(Err(reason)) => Err(reason.clone()), None => row.as_string(), }); data_group(&value_list, &Some(block), name) } Grouper::ByColumn(column_name) => group(&column_name, &value_list, name), }; Ok(PipelineData::Value(group_value?, None)) } #[allow(clippy::type_complexity)] pub fn data_group( values: &Value, grouper: &Option<Box<dyn Fn(usize, &Value) -> Result<String, ShellError> + Send>>, span: Span, ) -> Result<Value, ShellError> { let mut groups: IndexMap<String, Vec<Value>> = IndexMap::new(); for (idx, value) in values.clone().into_pipeline_data().into_iter().enumerate() { let group_key = if let Some(ref grouper) = grouper { grouper(idx, &value) } else { value.as_string() }; let group = groups.entry(group_key?).or_insert(vec![]); group.push(value); } let mut cols = vec![]; let mut vals = vec![]; for (k, v) in groups { cols.push(k.to_string()); vals.push(Value::List { vals: v, span }); } Ok(Value::Record { cols, vals, span }) } pub fn group( column_name: &Option<Spanned<String>>, values: &Value, span: Span, ) -> Result<Value, ShellError> { let name = span; let grouper = if let Some(column_name) = column_name { Grouper::ByColumn(Some(column_name.clone())) } else { Grouper::ByColumn(None) }; match grouper { Grouper::ByColumn(Some(column_name)) => { let block = Box::new( move |_, row: &Value| match row.get_data_by_key(&column_name.item) { Some(group_key) => Ok(group_key.as_string()?), None => Err(ShellError::CantFindColumn( column_name.span, row.span().unwrap_or(column_name.span), )), }, ); data_group(values, &Some(block), name) } Grouper::ByColumn(None) => { let block = Box::new(move |_, row: &Value| row.as_string()); data_group(values, &Some(block), name) } Grouper::ByBlock => Err(ShellError::NushellFailed( "Block not implemented: This should never happen.".into(), )), } } #[cfg(test)] mod test { use super::*; #[test] fn test_examples() { use crate::test_examples; test_examples(GroupBy {}) } }
30.750929
96
0.464942
8a358fd33904fcd6206b89f8c2751b02d9744d86
1,816
use reqwest::Client; use serde::Serialize; use time::OffsetDateTime; use crate::highscores::HighScore; pub struct HighScoreApi { base_url: String, client: Client, } #[derive(Serialize)] struct QueryParams { #[serde(with = "time::serde::rfc3339")] since: OffsetDateTime, } impl HighScoreApi { pub fn new(base_url: &str) -> Self { Self { base_url: base_url.to_string(), client: Client::new(), } } pub async fn top_ten( &self, since: Option<OffsetDateTime>, ) -> Result<Vec<HighScore>, anyhow::Error> { let request_url = if let Some(since) = since { let query_params = serde_qs::to_string(&QueryParams { since })?; format!("{base}/api/topten?{query_params}", base = self.base_url) } else { format!("{base}/api/topten", base = self.base_url) }; let res = self .client .get(&request_url) .send() .await? .error_for_status()? .json() .await?; Ok(res) } pub async fn submit(&self, highscore: &HighScore) -> Result<(), anyhow::Error> { self.client .post(&format!("{base}/api/submit", base = self.base_url)) .json(highscore) .send() .await? .error_for_status()?; Ok(()) } } #[cfg(test)] mod tests { use super::*; use time::format_description::well_known::Rfc3339; #[test] fn should_serialize_query_params_correctly() { let since = OffsetDateTime::parse("2021-01-01T00:00:00Z", &Rfc3339).unwrap(); let query_params = serde_qs::to_string(&QueryParams { since }).unwrap(); assert_eq!("since=2021-01-01T00%3A00%3A00Z", &query_params); } }
25.222222
85
0.555066
b935ccb7398e23e33407343ffa7a991bffbaa1fb
2,757
//! Walks the crate looking for items/impl-items/trait-items that have //! either a `rustc_symbol_name` or `rustc_def_path` attribute and //! generates an error giving, respectively, the symbol name or //! def-path. This is used for unit testing the code that generates //! paths etc in all kinds of annoying scenarios. use rustc::hir; use rustc::ty::TyCtxt; use rustc_mir::monomorphize::Instance; use syntax::symbol::{Symbol, sym}; const SYMBOL_NAME: Symbol = sym::rustc_symbol_name; const DEF_PATH: Symbol = sym::rustc_def_path; pub fn report_symbol_names<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { // if the `rustc_attrs` feature is not enabled, then the // attributes we are interested in cannot be present anyway, so // skip the walk. if !tcx.features().rustc_attrs { return; } tcx.dep_graph.with_ignore(|| { let mut visitor = SymbolNamesTest { tcx }; tcx.hir().krate().visit_all_item_likes(&mut visitor); }) } struct SymbolNamesTest<'a, 'tcx:'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, } impl<'a, 'tcx> SymbolNamesTest<'a, 'tcx> { fn process_attrs(&mut self, hir_id: hir::HirId) { let tcx = self.tcx; let def_id = tcx.hir().local_def_id_from_hir_id(hir_id); for attr in tcx.get_attrs(def_id).iter() { if attr.check_name(SYMBOL_NAME) { // for now, can only use on monomorphic names let instance = Instance::mono(tcx, def_id); let mangled = self.tcx.symbol_name(instance); tcx.sess.span_err(attr.span, &format!("symbol-name({})", mangled)); if let Ok(demangling) = rustc_demangle::try_demangle(&mangled.as_str()) { tcx.sess.span_err(attr.span, &format!("demangling({})", demangling)); tcx.sess.span_err(attr.span, &format!("demangling-alt({:#})", demangling)); } } else if attr.check_name(DEF_PATH) { let path = tcx.def_path_str(def_id); tcx.sess.span_err(attr.span, &format!("def-path({})", path)); } // (*) The formatting of `tag({})` is chosen so that tests can elect // to test the entirety of the string, if they choose, or else just // some subset. } } } impl<'a, 'tcx> hir::itemlikevisit::ItemLikeVisitor<'tcx> for SymbolNamesTest<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { self.process_attrs(item.hir_id); } fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) { self.process_attrs(trait_item.hir_id); } fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) { self.process_attrs(impl_item.hir_id); } }
37.767123
95
0.612985
62e85c5f895b10af79b9994b35585ffaa239230a
2,583
/// Utilities for creating data dumps. use std::collections::{HashMap, HashSet}; use std::path::{Path, PathBuf}; use rpki::uri; //------------ DumpRegistry -------------------------------------------------- /// A registration for all the repositories encountered during a dump. #[derive(Clone, Debug)] pub struct DumpRegistry { /// The base directory under which to store repositories. base_dir: PathBuf, /// The RRDP repositories we’ve already seen and where they go. rrdp_uris: HashMap<uri::Https, String>, /// The directory names we have already used for RRDP repositories.. /// /// This is the last component of the path. rrdp_dirs: HashSet<String>, } impl DumpRegistry { /// Creates a new registry. pub fn new(base_dir: PathBuf) -> Self { DumpRegistry { base_dir, rrdp_uris: HashMap::new(), rrdp_dirs: HashSet::new(), } } /// Returns the base directory of the dump. pub fn base_dir(&self) -> &Path { &self.base_dir } /// Registers the repository for the manifest and returns the target path. pub fn get_repo_path( &mut self, rpki_notify: Option<&uri::Https> ) -> PathBuf { if let Some(rpki_notify) = rpki_notify { if let Some(path) = self.rrdp_uris.get(rpki_notify) { self.base_dir.join(path) } else { self.make_path(rpki_notify) } } else { self.base_dir.join("rsync") } } fn make_path(&mut self, uri: &uri::Https) -> PathBuf { let authority = uri.canonical_authority(); if !self.rrdp_dirs.contains(authority.as_ref()) { self.rrdp_dirs.insert(authority.as_ref().into()); self.rrdp_uris.insert(uri.clone(), authority.as_ref().into()); self.base_dir.join(authority.as_ref()) } else { let mut i = 1; loop { let name = format!("{}-{}", authority, i); if !self.rrdp_dirs.contains(&name) { self.rrdp_dirs.insert(name.clone()); self.rrdp_uris.insert(uri.clone(), name.clone()); return self.base_dir.join(name) } i += 1 } } } /// Returns an iterator over the URIs and their paths. pub fn rrdp_uris( &self ) -> impl Iterator<Item = (&'_ uri::Https, &'_ str)> + '_ { self.rrdp_uris.iter().map(|(key, value)| (key, value.as_str())) } }
30.034884
78
0.540844
114d94cd1b5ab8b4704e3c379154f681804d13b2
21,912
use std::iter; use proc_macro2::{Ident, Literal, Span, TokenStream}; use common_gen::*; use protocol::*; use util::*; use Side; pub(crate) fn generate_protocol_client(protocol: Protocol) -> TokenStream { let modules = protocol.interfaces.iter().map(|iface| { let doc_attr = iface.description.as_ref().map(description_to_doc_attr); let mod_name = Ident::new(&iface.name, Span::call_site()); let iface_name = Ident::new(&snake_to_camel(&iface.name), Span::call_site()); let enums = &iface.enums; let ident = Ident::new("Request", Span::call_site()); let requests = gen_messagegroup( &ident, Side::Client, false, &iface.requests, Some(messagegroup_c_addon( &ident, &iface_name, Side::Client, false, &iface.requests, )), ); let ident = Ident::new("Event", Span::call_site()); let events = gen_messagegroup( &ident, Side::Client, true, &iface.events, Some(messagegroup_c_addon( &ident, &iface_name, Side::Client, true, &iface.events, )), ); let interface = gen_interface( &iface_name, &iface.name, iface.version, Some(interface_c_addon(&iface.name)), Side::Client, ); let object_methods = gen_object_methods(&iface_name, &iface.requests, Side::Client); let event_handler_trait = gen_event_handler_trait(&iface_name, &iface.events, Side::Client); let sinces = gen_since_constants(&iface.requests, &iface.events); let c_interface = super::c_interface_gen::generate_interface(&iface); quote! { #doc_attr pub mod #mod_name { use std::os::raw::c_char; use super::{ Proxy, NewProxy, AnonymousObject, Interface, MessageGroup, MessageDesc, ArgumentType, Object, Message, Argument, ObjectMetadata, HandledBy, types_null, NULLPTR }; use super::sys::common::{wl_interface, wl_array, wl_argument, wl_message}; use super::sys::client::*; #(#enums)* #requests #events #interface #object_methods #event_handler_trait #sinces #c_interface } } }); let c_prefix = super::c_interface_gen::generate_interfaces_prefix(&protocol); quote! { #c_prefix #(#modules)* } } pub(crate) fn generate_protocol_server(protocol: Protocol) -> TokenStream { let modules = protocol .interfaces .iter() // display and registry are handled specially .filter(|iface| iface.name != "wl_display" && iface.name != "wl_registry") .map(|iface| { let doc_attr = iface.description.as_ref().map(description_to_doc_attr); let mod_name = Ident::new(&iface.name, Span::call_site()); let iface_name = Ident::new(&snake_to_camel(&iface.name), Span::call_site()); let enums = &iface.enums; let ident = Ident::new("Request", Span::call_site()); let requests = gen_messagegroup( &ident, Side::Server, true, &iface.requests, Some(messagegroup_c_addon(&ident, &iface_name, Side::Server, true, &iface.requests)), ); let ident = Ident::new("Event", Span::call_site()); let events = gen_messagegroup( &ident, Side::Server, false, &iface.events, Some(messagegroup_c_addon(&ident, &iface_name, Side::Server, false, &iface.events)), ); let interface = gen_interface( &Ident::new(&snake_to_camel(&iface.name), Span::call_site()), &iface.name, iface.version, Some(interface_c_addon(&iface.name)), Side::Server, ); let object_methods = gen_object_methods(&iface_name, &iface.events, Side::Server); let event_handler_trait = gen_event_handler_trait(&iface_name, &iface.requests, Side::Server); let sinces = gen_since_constants(&iface.requests, &iface.events); let c_interface = super::c_interface_gen::generate_interface(&iface); quote! { #doc_attr pub mod #mod_name { use std::os::raw::c_char; use super::{ Resource, NewResource, AnonymousObject, Interface, MessageGroup, MessageDesc, ArgumentType, Object, Message, Argument, ObjectMetadata, HandledBy, types_null, NULLPTR }; use super::sys::common::{wl_argument, wl_interface, wl_array, wl_message}; use super::sys::server::*; #(#enums)* #requests #events #interface #object_methods #event_handler_trait #sinces #c_interface } } }); let c_prefix = super::c_interface_gen::generate_interfaces_prefix(&protocol); quote! { #c_prefix #(#modules)* } } fn messagegroup_c_addon( name: &Ident, parent_iface: &Ident, side: Side, receiver: bool, messages: &[Message], ) -> TokenStream { let from_raw_c_body = if receiver { let match_arms = messages .iter() .enumerate() .map(|(i, msg)| { let pattern = Literal::u16_unsuffixed(i as u16); let msg_name = Ident::new(&snake_to_camel(&msg.name), Span::call_site()); let msg_name_qualified = quote!(#name::#msg_name); let (args_binding, result) = if msg.args.is_empty() { (None, msg_name_qualified) } else { let len = Literal::usize_unsuffixed(msg.args.len()); let fields = msg.args.iter().enumerate().map(|(j, arg)| { let field_name = Ident::new( &format!("{}{}", if is_keyword(&arg.name) { "_" } else { "" }, arg.name), Span::call_site(), ); let idx = Literal::usize_unsuffixed(j); let field_value = match arg.typ { Type::Uint => { if let Some(ref enu) = arg.enum_ { let enum_type = dotted_to_relname(enu); quote!(#enum_type::from_raw(_args[#idx].u).ok_or(())?) } else { quote!(_args[#idx].u) } } Type::Int => { if let Some(ref enu) = arg.enum_ { let enum_type = dotted_to_relname(enu); quote!(#enum_type::from_raw(_args[#idx].i as u32).ok_or(())?) } else { quote!(_args[#idx].i) } } Type::Fixed => quote!((_args[#idx].f as f64) / 256.), Type::String => { let string_conversion = quote! { ::std::ffi::CStr::from_ptr(_args[#idx].s).to_string_lossy().into_owned() }; if arg.allow_null { quote! { if _args[#idx].s.is_null() { None } else { Some(#string_conversion) } } } else { string_conversion } } Type::Array => { let array_conversion = quote! { { let array = &*_args[#idx].a; ::std::slice::from_raw_parts(array.data as *const u8, array.size) .to_owned() } }; if arg.allow_null { quote! { if _args[#idx].a.is_null() { None } else { Some(#array_conversion) } } } else { array_conversion } } Type::Fd => quote!(_args[#idx].h), Type::Object => { let object_name = side.object_name(); let object_conversion = if let Some(ref iface) = arg.interface { let iface_mod = Ident::new(iface, Span::call_site()); let iface_type = Ident::new(&snake_to_camel(iface), Span::call_site()); quote! { #object_name::<super::#iface_mod::#iface_type>::from_c_ptr( _args[#idx].o as *mut _, ).into() } } else { quote! { #object_name::<AnonymousObject>::from_c_ptr(_args[#idx].o as *mut _).into() } }; if arg.allow_null { quote! { if _args[#idx].o.is_null() { None } else { Some(#object_conversion) } } } else { object_conversion } } Type::NewId => { let new_id_conversion = if let Some(ref iface) = arg.interface { let iface_mod = Ident::new(iface, Span::call_site()); let iface_type = Ident::new(&snake_to_camel(iface), Span::call_site()); match side { Side::Client => { quote! { NewProxy::<super::#iface_mod::#iface_type>::from_c_ptr( _args[#idx].o as *mut _ ) } } Side::Server => { quote! { { let me = Resource::<#parent_iface>::from_c_ptr(obj as *mut _); me.make_child_for::<super::#iface_mod::#iface_type>(_args[#idx].n).unwrap() } } } } } else { // bind-like function quote!(panic!("Cannot unserialize anonymous new id.")) }; if arg.allow_null { quote! { if _args[#idx].o.is_null() { None } else { Some(#new_id_conversion) } } } else { new_id_conversion } } Type::Destructor => panic!("An argument cannot have type \"destructor\"."), }; quote!(#field_name: #field_value) }); let result = quote! { #msg_name_qualified { #(#fields,)* } }; let args_binding = quote! { let _args = ::std::slice::from_raw_parts(args, #len); }; (Some(args_binding), result) }; quote! { #pattern => { #args_binding Ok(#result) } } }) .chain(iter::once(quote!(_ => return Err(())))); quote! { match opcode { #(#match_arms,)* } } } else { let panic_message = format!("{}::from_raw_c can not be used {:?}-side.", name, side); quote!(panic!(#panic_message)) }; let as_raw_c_in_body = if receiver { let panic_message = format!("{}::as_raw_c_in can not be used {:?}-side.", name, side); quote!(panic!(#panic_message)) } else { let match_arms = messages.iter().enumerate().map(|(i, msg)| { let msg_name = Ident::new(&snake_to_camel(&msg.name), Span::call_site()); let pattern = if msg.args.is_empty() { quote!(#name::#msg_name) } else { let fields = msg.args.iter().map(|arg| { Ident::new( &format!("{}{}", if is_keyword(&arg.name) { "_" } else { "" }, arg.name), Span::call_site(), ) }); quote!(#name::#msg_name { #(#fields),* }) }; let buffer_len = Literal::usize_unsuffixed( msg.args.len() + 2 * msg .args .iter() .filter(|arg| arg.typ == Type::NewId && arg.interface.is_none()) .count(), ); let mut j = 0; let args_array_init_stmts = msg.args.iter().map(|arg| { let idx = Literal::usize_unsuffixed(j); let arg_name = Ident::new( &format!("{}{}", if is_keyword(&arg.name) { "_" } else { "" }, arg.name), Span::call_site(), ); let res = match arg.typ { Type::Uint => { if arg.enum_.is_some() { quote! { _args_array[#idx].u = #arg_name.to_raw(); } } else { quote! { _args_array[#idx].u = #arg_name; } } } Type::Int => { if arg.enum_.is_some() { quote! { _args_array[#idx].i = #arg_name.to_raw() as i32; } } else { quote! { _args_array[#idx].i = #arg_name; } } } Type::Fixed => quote! { _args_array[#idx].f = (#arg_name * 256.) as i32; }, Type::String => { let arg_variable = Ident::new(&format!("_arg_{}", j), Span::call_site()); if arg.allow_null { quote! { let #arg_variable = #arg_name.map(|s| ::std::ffi::CString::new(s).unwrap()); _args_array[#idx].s = #arg_variable.map(|s| s.as_ptr()).unwrap_or(::std::ptr::null()); } } else { quote! { let #arg_variable = ::std::ffi::CString::new(#arg_name).unwrap(); _args_array[#idx].s = #arg_variable.as_ptr(); } } } Type::Array => { let arg_variable = Ident::new(&format!("_arg_{}", j), Span::call_site()); if arg.allow_null { quote! { let #arg_variable = #arg_name.as_ref().map(|vec| wl_array { size: vec.len(), alloc: vec.capacity(), data: vec.as_ptr() as *mut _, }); _args_array[#idx].a = #arg_variable .as_ref() .map(|a| a as *const wl_array) .unwrap_or(::std::ptr::null()); } } else { quote! { let #arg_variable = wl_array { size: #arg_name.len(), alloc: #arg_name.capacity(), data: #arg_name.as_ptr() as *mut _, }; _args_array[#idx].a = &#arg_variable; } } } Type::Fd => quote! { _args_array[#idx].h = #arg_name; }, Type::Object => { if arg.allow_null { quote! { _args_array[#idx].o = #arg_name .map(|o| o.as_ref().c_ptr() as *mut _) .unwrap_or(::std::ptr::null_mut()); } } else { quote! { _args_array[#idx].o = #arg_name.as_ref().c_ptr() as *mut _; } } } Type::NewId => { if arg.interface.is_some() { quote! { _args_array[#idx].o = #arg_name.as_ref().c_ptr() as *mut _; } } else { assert!( side != Side::Server, "Cannot serialize anonymous NewID from server." ); // The arg is actually (string, uint, NULL) let arg_variable = Ident::new(&format!("_arg_{}_s", j), Span::call_site()); let idx1 = Literal::usize_unsuffixed(j + 1); let idx2 = Literal::usize_unsuffixed(j + 2); let res = quote! { let #arg_variable = ::std::ffi::CString::new(#arg_name.0).unwrap(); _args_array[#idx].s = #arg_variable.as_ptr(); _args_array[#idx1].u = #arg_name.1; _args_array[#idx2].o = ::std::ptr::null_mut(); }; j += 2; res } } Type::Destructor => panic!("An argument cannot have type \"destructor\"."), }; j += 1; res }); let idx = Literal::u32_unsuffixed(i as u32); quote! { #pattern => { let mut _args_array: [wl_argument; #buffer_len] = unsafe { ::std::mem::zeroed() }; #(#args_array_init_stmts)* f(#idx, &mut _args_array) } } }); quote! { match self { #name::__nonexhaustive => unreachable!(), #(#match_arms,)* } } }; quote! { unsafe fn from_raw_c( obj: *mut ::std::os::raw::c_void, opcode: u32, args: *const wl_argument, ) -> Result<#name, ()> { #from_raw_c_body } fn as_raw_c_in<F, T>(self, f: F) -> T where F: FnOnce(u32, &mut [wl_argument]) -> T { #as_raw_c_in_body } } } fn interface_c_addon(low_name: &str) -> TokenStream { let iface_name = Ident::new(&format!("{}_interface", low_name), Span::call_site()); quote! { fn c_interface() -> *const wl_interface { unsafe { &#iface_name } } } }
40.804469
127
0.369067
bfb358143165789f052da6fd04e4cb7941fb26be
2,102
use crate::{db::models::keys::OriginPrivateSigningKey, server::error::{Error, Result}}; use builder_core::crypto; use diesel::pg::PgConnection; use biome_core::crypto::keys::{Key, KeyCache}; use std::time::Instant; pub mod encrypt_secret_keys; // This value was arbitrarily chosen and might need some tuning const KEY_MIGRATION_CHUNK_SIZE: i64 = 100; pub fn migrate_to_encrypted(conn: &PgConnection, key_cache: &KeyCache) -> Result<()> { let start_time = Instant::now(); let mut updated_keys = 0; let mut skipped_keys = 0; let builder_secret_key = key_cache.latest_builder_key()?; let mut next_id: i64 = 0; loop { let skeys = OriginPrivateSigningKey::list_unencrypted(next_id, KEY_MIGRATION_CHUNK_SIZE, conn).map_err(Error::DieselError)?; warn!("migrate_to_encrypted found {}/{} keys requested", skeys.len(), KEY_MIGRATION_CHUNK_SIZE); if skeys.is_empty() { break; }; for skey in skeys { next_id = skey.id; if skey.encryption_key_rev.is_none() { let unencrypted_key = skey.body; let (encrypted_key, _revision) = crypto::encrypt_with_key(&builder_secret_key, unencrypted_key); OriginPrivateSigningKey::update_key(skey.id, &encrypted_key, builder_secret_key.named_revision().revision(), conn).map_err(Error::DieselError)?; updated_keys += 1; } else { skipped_keys += 1; } } } warn!("migrate_to_encrypted complete in {} sec, updated {}, skipped {} as already updated", start_time.elapsed().as_secs_f64(), updated_keys, skipped_keys); Ok(()) }
36.877193
99
0.52236
fca3cc7b1300d02c8444332b33e4523d2350cd8b
11,613
extern crate core; mod test_util; use crate::test_util::TestServer; use beatrice::internal::{read_http_head, Head, HeadError, HttpError}; use beatrice::{AsciiString, Response}; use fixed_buffer::FixedBuf; use futures_lite::AsyncWriteExt; use safina_sync::Receiver; use safina_timer::sleep_for; use std::time::Duration; use test_util::{async_test, connected_streams}; #[test] fn request_line() { let server = TestServer::start(|_req| Response::new(200)).unwrap(); assert_eq!(server.exchange("").unwrap().as_str(), "",); assert_eq!( server.exchange("M / HTTP/1.1\r\n\r\n").unwrap().as_str(), "HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n", ); assert_eq!( server.exchange(" / HTTP/1.1\r\n\r\n").unwrap().as_str(), "HTTP/1.1 400 Bad Request\r\ncontent-type: text/plain; charset=UTF-8\r\ncontent-length: 31\r\n\r\nHttpError::MalformedRequestLine", ); } #[test] fn try_read_request_line() { Head::try_read(&mut FixedBuf::from(*b"M / HTTP/1.1\r\n\r\n")).unwrap(); assert_eq!( Err(HeadError::Truncated), Head::try_read(&mut <FixedBuf<10>>::new()) ); for (expected_err, req) in [ (HeadError::Truncated, ""), (HeadError::MalformedRequestLine, " / HTTP/1.1\r\n\r\n"), (HeadError::MalformedRequestLine, "M HTTP/1.1\r\n\r\n"), (HeadError::MalformedRequestLine, "M / \r\n\r\n"), (HeadError::Truncated, "M / HTTP/1.1\n\r\n"), (HeadError::Truncated, "M / HTTP/1.1\r\n\r"), ( HeadError::MalformedHeader, "M / HTTP/1.1\r\nM / HTTP/1.1\r\n\r\n", ), ] { let mut buf: FixedBuf<200> = FixedBuf::new(); buf.write_bytes(req).unwrap(); assert_eq!(Err(expected_err), Head::try_read(&mut buf), "{:?}", req,); } } #[test] fn try_read_method() { assert_eq!( "M", Head::try_read(&mut FixedBuf::from(*b"M / HTTP/1.1\r\n\r\n",)) .unwrap() .method ); // TODO: Check all valid method chars. } #[test] fn try_read_url() { assert_eq!( "/", Head::try_read(&mut FixedBuf::from(*b"M / HTTP/1.1\r\n\r\n",)) .unwrap() .url .path() ); assert_eq!( Err(HeadError::MalformedPath), Head::try_read(&mut FixedBuf::from(*b"M a HTTP/1.1\r\n\r\n",)) ); assert_eq!( Err(HeadError::MalformedRequestLine), Head::try_read(&mut FixedBuf::from(*b"M /\n HTTP/1.1\r\n\r\n",)) ); assert_eq!( Err(HeadError::MalformedRequestLine), Head::try_read(&mut FixedBuf::from(*b"M / HTTP/1.1\r\n\r\n",)) ); assert_eq!( Err(HeadError::MalformedRequestLine), Head::try_read(&mut FixedBuf::from(*b"M / / HTTP/1.1\r\n\r\n",)) ); } #[test] fn try_read_proto() { Head::try_read(&mut FixedBuf::from(*b"M / HTTP/1.1\r\n\r\n")).unwrap(); for req in [ "M / HTTP/1.0\r\n\r\n", "M / HTTP/1.2\r\n\r\n", "M / X\r\n\r\n", ] { let mut buf: FixedBuf<200> = FixedBuf::new(); buf.write_bytes(req).unwrap(); assert_eq!( Err(HeadError::UnsupportedProtocol), Head::try_read(&mut buf), "{:?}", req ); } } #[test] fn head_try_read_headers() { Head::try_read(&mut FixedBuf::from(*b"M / HTTP/1.1\r\na:b\r\n\r\n")).unwrap(); for (expected, req) in [ ( Err(HeadError::MalformedHeader), "M / HTTP/1.1\r\n:v\r\n\r\n", ), ( Err(HeadError::MalformedHeader), "M / HTTP/1.1\r\nav\r\n\r\n", ), (Ok(vec!["".to_string()]), "M / HTTP/1.1\r\na:\r\n\r\n"), (Ok(vec!["b".to_string()]), "M / HTTP/1.1\r\na:b\r\n\r\n"), ( Err(HeadError::MalformedHeader), "M / HTTP/1.1\r\n a:b\r\n\r\n", ), // Strips value whitespace. ( Ok(vec!["b".to_string()]), "M / HTTP/1.1\r\na: \t\rb\r\n\r\n", ), ( Ok(vec!["b".to_string()]), "M / HTTP/1.1\r\na:b \t\r\r\n\r\n", ), // Keeps last duplicate. ( Ok(vec!["1".to_string(), "2".to_string()]), "M / HTTP/1.1\r\na:1\r\nA:2\r\n\r\n", ), // Extra newlines ( Err(HeadError::MalformedHeader), "M / HTTP/1.1\r\n\na:b\r\n\r\n", ), ( Err(HeadError::MalformedHeader), "M / HTTP/1.1\r\na:b\r\n\nc:d\r\n\r\n", ), ( Err(HeadError::MalformedHeader), "M / HTTP/1.1\r\na:b\r\n\n\r\n\r\n", ), // Extra carriage-returns ( Err(HeadError::MalformedHeader), "M / HTTP/1.1\r\n\ra:b\r\n\r\n", ), ( Ok(vec!["b".to_string()]), "M / HTTP/1.1\r\na:b\r\r\nc:d\r\n\r\n", ), ( Err(HeadError::MalformedHeader), "M / HTTP/1.1\r\na:b\r\n\rc:d\r\n\r\n", ), ( Err(HeadError::MalformedHeader), "M / HTTP/1.1\r\na:b\r\n\r\r\n\r\n", ), ( Ok(vec!["b".to_string()]), "M / HTTP/1.1\r\na:b\r\r\n\r\n\r\n", ), ] { let mut buf: FixedBuf<200> = FixedBuf::new(); buf.write_bytes(req).unwrap(); assert_eq!( expected, Head::try_read(&mut buf).map(|mut head| head .headers .remove_all("a") .into_iter() .map(AsciiString::into) .collect::<Vec<String>>()), "{:?}", req ); } // Lookups are case-insensitive. assert_eq!( Some("CCdd2"), Head::try_read(&mut FixedBuf::from(*b"M / HTTP/1.1\r\nAAbb1:CCdd2\r\n\r\n",)) .unwrap() .headers .get_only("aabb1") .map(AsciiString::as_str) ); // Accepts all valid header name symbols. // https://datatracker.ietf.org/doc/html/rfc7230#section-3.2 // header-field = field-name ":" OWS field-value OWS // field-name = token // field-value = *( field-content ) // field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] // field-vchar = VCHAR // OWS = *( SP / HTAB ) // token = 1*tchar // tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" // / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" // / DIGIT / ALPHA // ; any VCHAR, except delimiters // VCHAR is any visible ASCII character. assert_eq!( Some("! \"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"), Head::try_read(&mut FixedBuf::from( *(b"M / HTTP/1.1\r\n1#$%&'*+-.^_`|~0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ:! \"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\r\n\r\n") )) .unwrap() .headers .get_only("1#$%&'*+-.^_`|~0123456789abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz") .map(AsciiString::as_str) ); // TODO: Add tests of non-ASCII chars in names and values. } #[test] fn head_try_read_reads() { let mut buf: FixedBuf<100> = FixedBuf::new(); buf.write_bytes("A /a HTTP/1.1\r\n\r\nB /b HTTP/1.1\r\n\r\n") .unwrap(); let head = Head::try_read(&mut buf).unwrap(); assert_eq!("A", head.method); assert_eq!("/a", head.url.path()); let head = Head::try_read(&mut buf).unwrap(); assert_eq!("B", head.method); assert_eq!("/b", head.url.path()); assert_eq!(Err(HeadError::Truncated), Head::try_read(&mut buf)); } async fn read_http_head_task() -> (async_net::TcpStream, Receiver<Result<Head, HttpError>>) { let (mut stream0, stream1) = connected_streams().await; let (sender, receiver) = safina_sync::sync_channel(5); safina_executor::spawn(async move { loop { let result = read_http_head(&mut <FixedBuf<1000>>::new(), &mut stream0).await; let result_is_err = result.is_err(); if sender.send(result).is_err() || result_is_err { break; } } }); (stream1, receiver) } #[test] fn read_http_head_ok() { async_test(async { let (mut stream, mut receiver) = read_http_head_task().await; stream.write_all(b"M / HTTP/1.1\r\n\r\n").await.unwrap(); let head = receiver.async_recv().await.unwrap().unwrap(); assert_eq!("M", head.method); assert_eq!("/", head.url.path()); assert!(head.headers.is_empty()); }); } #[test] fn read_http_head_error() { async_test(async { let (mut stream, mut receiver) = read_http_head_task().await; stream.write_all(b"M / BADPROTO\r\n\r\n").await.unwrap(); assert_eq!( Err(HttpError::UnsupportedProtocol), receiver.async_recv().await.unwrap() ); }); } #[test] fn read_http_head_too_long() { async_test(async { let (mut stream, mut receiver) = read_http_head_task().await; stream.write_all(&[b'a'; 10_000]).await.unwrap(); assert_eq!( Err(HttpError::HeadTooLong), receiver.async_recv().await.unwrap() ); }); } #[test] fn read_http_head_truncated() { async_test(async { let (mut stream, mut receiver) = read_http_head_task().await; stream.write_all(b"M / HTTP/1.1\r\n\r").await.unwrap(); drop(stream); assert_eq!( Err(HttpError::Truncated), receiver.async_recv().await.unwrap() ); }); } #[test] fn read_http_head_multiple_writes() { async_test(async { let (mut stream, mut receiver) = read_http_head_task().await; stream.write_all(b"M / HTTP/1.1\r\n").await.unwrap(); stream.flush().await.unwrap(); sleep_for(Duration::from_millis(100)).await; stream.write_all(b"\r\n").await.unwrap(); stream.flush().await.unwrap(); let head = receiver.async_recv().await.unwrap().unwrap(); assert_eq!("M", head.method); assert_eq!("/", head.url.path()); assert!(head.headers.is_empty()); }); } #[test] fn read_http_head_subsequent() { async_test(async { let (mut stream, mut receiver) = read_http_head_task().await; stream.write_all(b"M /1 HTTP/1.1\r\n\r\n").await.unwrap(); assert_eq!( "/1", receiver.async_recv().await.unwrap().unwrap().url.path() ); stream.write_all(b"M /2 HTTP/1.1\r\n\r\n").await.unwrap(); assert_eq!( "/2", receiver.async_recv().await.unwrap().unwrap().url.path() ); drop(stream); assert_eq!( Err(HttpError::Disconnected), receiver.async_recv().await.unwrap() ); receiver.async_recv().await.unwrap_err(); }); } #[test] fn head_derive() { let head1 = Head::try_read(&mut FixedBuf::from( *b"A /1 HTTP/1.1\r\nH1: V1\r\nh2:v2\r\n\r\n", )) .unwrap(); let head2 = Head::try_read(&mut FixedBuf::from(*b"B /2 HTTP/1.1\r\n\r\n")).unwrap(); // Clone let head1_clone = head1.clone(); // Eq, PartialEq assert_eq!(head1, head1_clone); assert_ne!(head1, head2); // Debug assert_eq!( "Head{method=\"A\", path=\"/1\", headers={H1: \"V1\", h2: \"v2\"}}", format!("{:?}", head1).as_str() ); }
31.991736
217
0.520882
1dc172f65bcd8d41dbb7079d049fcac874e5a12e
11,269
// Copyright (c) SimpleStaking and Tezedge Contributors // SPDX-License-Identifier: MIT //! Listens for events from the `protocol_runner`. use std::convert::TryFrom; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use std::thread; use std::thread::JoinHandle; use std::time::Duration; use failure::Error; use riker::actors::*; use slog::{crit, debug, info, warn, Logger}; use crypto::hash::{BlockHash, ContextHash, FromBytesError, HashType}; use storage::context::{ContextApi, TezedgeContext}; use storage::persistent::{ActionRecorder, PersistentStorage}; use storage::BlockStorage; use tezos_context::channel::{ContextAction, ContextActionMessage}; use tezos_wrapper::service::IpcEvtServer; use crate::shell_channel::{ShellChannelMsg, ShellChannelRef}; use crate::subscription::subscribe_to_shell_shutdown; type SharedJoinHandle = Arc<Mutex<Option<JoinHandle<Result<(), Error>>>>>; /// This actor listens for events generated by the `protocol_runner`. #[actor(ShellChannelMsg)] pub struct ContextListener { /// Just for subscribing to shell shutdown channel shell_channel: ShellChannelRef, /// Thread where blocks are applied will run until this is set to `false` listener_run: Arc<AtomicBool>, /// Context event listener thread listener_thread: SharedJoinHandle, } /// Reference to [context listener](ContextListener) actor. pub type ContextListenerRef = ActorRef<ContextListenerMsg>; impl ContextListener { // TODO: if needed, can go to cfg const IPC_ACCEPT_TIMEOUT: Duration = Duration::from_secs(3); /// Create new actor instance. /// /// This actor spawns a new thread in which it listens for incoming events from the `protocol_runner`. /// Events are received from IPC channel provided by [`event_server`](IpcEvtServer). pub fn actor( sys: &impl ActorRefFactory, shell_channel: ShellChannelRef, persistent_storage: &PersistentStorage, action_store_backend: Vec<Box<dyn ActionRecorder + Send>>, mut event_server: IpcEvtServer, log: Logger, ) -> Result<ContextListenerRef, CreateError> { let listener_run = Arc::new(AtomicBool::new(true)); let block_applier_thread = { let listener_run = listener_run.clone(); let persistent_storage = persistent_storage.clone(); thread::spawn(move || -> Result<(), Error> { let mut context: Box<dyn ContextApi> = Box::new(TezedgeContext::new( BlockStorage::new(&persistent_storage), persistent_storage.merkle(), )); let mut action_store_backend = action_store_backend; while listener_run.load(Ordering::Acquire) { match listen_protocol_events( &listener_run, &mut event_server, Self::IPC_ACCEPT_TIMEOUT, &mut action_store_backend, &mut context, &log, ) { Ok(()) => info!(log, "Context listener finished"), Err(err) => { if listener_run.load(Ordering::Acquire) { crit!(log, "Error process context event"; "reason" => format!("{:?}", err)) } } } } info!(log, "Context listener thread finished"); Ok(()) }) }; let myself = sys.actor_of_props::<ContextListener>( ContextListener::name(), Props::new_args(( shell_channel, listener_run, Arc::new(Mutex::new(Some(block_applier_thread))), )), )?; Ok(myself) } /// The `ContextListener` is intended to serve as a singleton actor so that's why /// we won't support multiple names per instance. fn name() -> &'static str { "context-listener" } } impl ActorFactoryArgs<(ShellChannelRef, Arc<AtomicBool>, SharedJoinHandle)> for ContextListener { fn create_args( (shell_channel, listener_run, listener_thread): ( ShellChannelRef, Arc<AtomicBool>, SharedJoinHandle, ), ) -> Self { ContextListener { shell_channel, listener_run, listener_thread, } } } impl Actor for ContextListener { type Msg = ContextListenerMsg; fn pre_start(&mut self, ctx: &Context<Self::Msg>) { subscribe_to_shell_shutdown(&self.shell_channel, ctx.myself()); } fn post_stop(&mut self) { self.listener_run.store(false, Ordering::Release); let _ = self .listener_thread .lock() .unwrap() .take() .expect("Thread join handle is missing") .join() .expect("Failed to join context listener thread"); } fn recv(&mut self, ctx: &Context<Self::Msg>, msg: Self::Msg, sender: Sender) { self.receive(ctx, msg, sender); } } impl Receive<ShellChannelMsg> for ContextListener { type Msg = ContextListenerMsg; fn receive(&mut self, _: &Context<Self::Msg>, msg: ShellChannelMsg, _sender: Sender) { if let ShellChannelMsg::ShuttingDown(_) = msg { self.listener_run.store(false, Ordering::Release); } } } fn listen_protocol_events( apply_block_run: &AtomicBool, event_server: &mut IpcEvtServer, event_server_accept_timeout: Duration, action_store_backend: &mut Vec<Box<dyn ActionRecorder + Send>>, context: &mut Box<dyn ContextApi>, log: &Logger, ) -> Result<(), Error> { info!( log, "Context listener is waiting for connection from protocol runner" ); let mut rx = event_server.try_accept(event_server_accept_timeout)?; info!( log, "Context listener received connection from protocol runner. Starting to process context events." ); let mut event_count = 0; while apply_block_run.load(Ordering::Acquire) { match rx.receive() { Ok(ContextActionMessage { action: ContextAction::Shutdown, .. }) => { // when we receive shutting down, it means just that protocol runner disconnected // we dont want to stop context listener here, for example, because we are just restarting protocol runner // and we want to wait for a new one to try_accept // if we want to shutdown context listener, there is ShellChannelMsg for that break; } Ok(msg) => { if event_count % 100 == 0 { debug!( log, "Received protocol event"; "count" => event_count, "context_hash" => match &context.get_last_commit_hash() { None => "-none-".to_string(), Some(c) => HashType::ContextHash.hash_to_b58check(c)? } ); } event_count = if let ContextAction::Shutdown = &msg.action { 0 } else { event_count + 1 }; if msg.record { // record action in the order they are really comming for recorder in action_store_backend.iter_mut() { if let Err(error) = recorder.record(&msg) { warn!(log, "Failed to store context action"; "action" => format!("{:?}", &msg.action), "reason" => format!("{}", error)); } } } if msg.perform { perform_context_action(&msg.action, context)?; } } Err(err) => { warn!(log, "Failed to receive event from protocol runner"; "reason" => format!("{:?}", err)); break; } } } Ok(()) } fn try_from_untyped_option<H>(h: &Option<Vec<u8>>) -> Result<Option<H>, FromBytesError> where H: TryFrom<Vec<u8>, Error = FromBytesError>, { h.as_ref() .map(|h| H::try_from(h.clone())) .map_or(Ok(None), |r| r.map(Some)) } fn perform_context_action( action: &ContextAction, context: &mut Box<dyn ContextApi>, ) -> Result<(), Error> { match action { ContextAction::Get { key, .. } => { context.get_key(key)?; } ContextAction::Mem { key, .. } => { context.mem(key)?; } ContextAction::DirMem { key, .. } => { context.dirmem(key)?; } ContextAction::Set { key, value, context_hash, .. } => { let context_hash = try_from_untyped_option(context_hash)?; context.set(&context_hash, key, value)?; } ContextAction::Copy { to_key: key, from_key, context_hash, .. } => { let context_hash = try_from_untyped_option(context_hash)?; context.copy_to_diff(&context_hash, from_key, key)?; } ContextAction::Delete { key, context_hash, .. } => { let context_hash = try_from_untyped_option(context_hash)?; context.delete_to_diff(&context_hash, key)?; } ContextAction::RemoveRecursively { key, context_hash, .. } => { let context_hash = try_from_untyped_option(context_hash)?; context.remove_recursively_to_diff(&context_hash, key)?; } ContextAction::Commit { parent_context_hash, new_context_hash, block_hash: Some(block_hash), author, message, date, .. } => { let parent_context_hash = try_from_untyped_option(parent_context_hash)?; let block_hash = BlockHash::try_from(block_hash.clone())?; let new_context_hash = ContextHash::try_from(new_context_hash.clone())?; let hash = context.commit( &block_hash, &parent_context_hash, author.to_string(), message.to_string(), *date, )?; assert_eq!( &hash, &new_context_hash, "Invalid context_hash for block: {}, expected: {}, but was: {}", block_hash.to_base58_check(), new_context_hash.to_base58_check(), hash.to_base58_check(), ); } ContextAction::Checkout { context_hash, .. } => { context.checkout(&ContextHash::try_from(context_hash.clone())?)?; } ContextAction::Commit { .. } => (), // Ignored (no block_hash) ContextAction::Fold { .. } => (), // Ignored ContextAction::Shutdown => (), // Ignored }; Ok(()) }
33.638806
149
0.547342
e5e791a33c7b2359100b5ec19dd4e17808e218d9
5,748
use reference_parser::{BibleReference, BibleVerse, BibleVersePart, Book}; use serde::{Deserialize, Serialize}; use crate::Reference; /// Represents an entire psalm #[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] pub struct Psalm { /// The psalm number (e.g., 8 for Psalm 8) pub number: u8, /// Present when only a subset of verses should be displayed pub citation: Option<String>, /// The content of the psalm, by section pub sections: Vec<PsalmSection>, } impl Psalm { #[must_use] pub fn citation(mut self, citation: impl std::fmt::Display) -> Self { self.citation = Some(citation.to_string()); self } /// Returns only the verses and sections of a psalm that are included in its citation. /// ``` /// # use psalter::bcp1979::*; /// # use reference_parser::BibleReference; /// // simple filtering within a single-section psalm /// let mut psalm_1 = PSALM_1.clone(); /// psalm_1.citation = Some(String::from("Psalm 1:1-4")); /// assert_eq!(psalm_1.filtered_sections().len(), 1); /// assert_eq!(psalm_1.filtered_sections()[0].verses.len(), 4); /// /// // filtering across multiple sections of a single psalm with a single citation /// let mut psalm_119 = PSALM_119.clone(); /// psalm_119.citation = Some(String::from("Psalm 119:145-176")); /// assert_eq!(psalm_119.filtered_sections().len(), 4); /// assert_eq!(psalm_119.filtered_sections()[0].verses.len(), 8); /// assert_eq!(psalm_119.filtered_sections()[0].local_name, "Qoph"); /// assert_eq!(psalm_119.filtered_sections()[0].verses[0].a, "I call with my whole heart; *"); /// /// // filtering with comma /// let mut psalm_116 = PSALM_116.clone(); /// psalm_116.citation = Some(String::from("Psalm 116:1, 10-17")); /// assert_eq!(psalm_116.filtered_sections().len(), 1); /// assert_eq!(psalm_116.filtered_sections()[0].verses.len(), 9); /// ``` pub fn filtered_sections(&self) -> Vec<PsalmSection> { let citation = self.citation.as_ref().map(BibleReference::from); if let Some(citation) = citation { self.sections .iter() .map(|section| PsalmSection { reference: section.reference, local_name: section.local_name.clone(), latin_name: section.latin_name.clone(), verses: section .verses .iter() .filter_map(|verse| { let contains_a = citation.contains(BibleVerse { book: Book::Psalms, chapter: self.number as u16, verse: verse.number as u16, verse_part: BibleVersePart::A, }); let contains_b = citation.contains(BibleVerse { book: Book::Psalms, chapter: self.number as u16, verse: verse.number as u16, verse_part: BibleVersePart::B, }); match (contains_a, contains_b) { (true, true) => Some(PsalmVerse { number: verse.number, a: verse.a.clone(), b: verse.b.clone(), }), (true, false) => Some(PsalmVerse { number: verse.number, a: verse.a.clone(), b: "".into(), }), (false, true) => Some(PsalmVerse { number: verse.number, a: "".into(), b: verse.b.clone(), }), (false, false) => None, } }) .collect(), }) .filter(|section| !section.verses.is_empty()) .collect() } else { self.sections.clone() } } } #[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] pub struct PsalmSection { /// Reference to e.g., a BCP page pub reference: Reference, /// Name for the section in the psalm's own language (e.g., "Part I" or "Aleph") pub local_name: String, /// Latin name for the section (e.g., "Beatus vir qui non abiit") pub latin_name: String, /// The set of verses included in this section pub verses: Vec<PsalmVerse>, } impl PsalmSection { /// Verse number of the first verse in this section pub fn first_verse(&self) -> u8 { self.verses.first().map_or(0, |verse| verse.number) } /// Verse number of the last verse in this section pub fn last_verse(&self) -> u8 { self.verses.last().map_or(0, |verse| verse.number) } /// Tests whether this section includes the given verse number pub fn includes_verse(&self, verse_number: u8) -> bool { self.verses.iter().any(|verse| verse.number == verse_number) } } #[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] pub struct PsalmVerse { /// Verse number pub number: u8, /// Text of the first half of the verse, up to the asterisk pub a: String, /// Text of the second half of the verse, after the asterisk pub b: String, }
40.765957
98
0.510438
628642fc0466525a38242ffaa9fb51b3c76a7bcc
783
use proc_macro2::TokenStream; use super::super::{DeclGenerics, FuncIdent}; pub(in super::super) fn quote_cuda_generic_function( DeclGenerics { generic_start_token, generic_kernel_params: generic_params, generic_close_token, generic_kernel_where_clause: generic_where_clause, .. }: &DeclGenerics, func_inputs: &syn::punctuated::Punctuated<syn::FnArg, syn::token::Comma>, FuncIdent { func_ident, .. }: &FuncIdent, func_attrs: &[syn::Attribute], func_block: &syn::Block, ) -> TokenStream { quote! { #[cfg(target_os = "cuda")] #(#func_attrs)* fn #func_ident #generic_start_token #generic_params #generic_close_token (#func_inputs) #generic_where_clause #func_block } }
30.115385
95
0.660281
bbd195eeb3345754aadeb9a1e2aea38a965a4014
4,223
use super::{compute_arr_presc, Error, Event, FTimer, Instance, Timer}; use core::ops::{Deref, DerefMut}; use fugit::HertzU32 as Hertz; /// Hardware timers pub struct CounterHz<TIM>(pub(super) Timer<TIM>); impl<T> Deref for CounterHz<T> { type Target = Timer<T>; fn deref(&self) -> &Self::Target { &self.0 } } impl<T> DerefMut for CounterHz<T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl<TIM: Instance> CounterHz<TIM> { /// Releases the TIM peripheral pub fn release(mut self) -> Timer<TIM> { // stop timer self.tim.cr1_reset(); self.0 } } impl<TIM: Instance> CounterHz<TIM> { pub fn start(&mut self, timeout: Hertz) -> Result<(), Error> { // pause self.tim.disable_counter(); // reset counter self.tim.reset_counter(); let (psc, arr) = compute_arr_presc(timeout.raw(), self.clk.raw()); self.tim.set_prescaler(psc); self.tim.set_auto_reload(arr)?; // Trigger update event to load the registers self.tim.trigger_update(); // start counter self.tim.enable_counter(); Ok(()) } pub fn wait(&mut self) -> nb::Result<(), Error> { if self.tim.get_interrupt_flag().contains(Event::Update) { self.tim.clear_interrupt_flag(Event::Update); Ok(()) } else { Err(nb::Error::WouldBlock) } } pub fn cancel(&mut self) -> Result<(), Error> { if !self.tim.is_counter_enabled() { return Err(Error::Disabled); } // disable counter self.tim.disable_counter(); Ok(()) } } use fugit::{TimerDurationU32, TimerInstantU32}; /// Periodic non-blocking timer that imlements [embedded_hal::timer::CountDown] pub struct Counter<TIM, const FREQ: u32>(pub(super) FTimer<TIM, FREQ>); impl<T, const FREQ: u32> Deref for Counter<T, FREQ> { type Target = FTimer<T, FREQ>; fn deref(&self) -> &Self::Target { &self.0 } } impl<T, const FREQ: u32> DerefMut for Counter<T, FREQ> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } /// `Counter` with precision of 1 μs (1 MHz sampling) pub type CounterUs<TIM> = Counter<TIM, 1_000_000>; /// `Counter` with precision of of 1 ms (1 kHz sampling) /// /// NOTE: don't use this if your system frequency more than 65 MHz pub type CounterMs<TIM> = Counter<TIM, 1_000>; impl<TIM: Instance, const FREQ: u32> Counter<TIM, FREQ> { /// Releases the TIM peripheral pub fn release(mut self) -> FTimer<TIM, FREQ> { // stop counter self.tim.cr1_reset(); self.0 } pub fn now(&self) -> TimerInstantU32<FREQ> { TimerInstantU32::from_ticks(self.tim.read_count().into()) } pub fn start(&mut self, timeout: TimerDurationU32<FREQ>) -> Result<(), Error> { // pause self.tim.disable_counter(); // reset counter self.tim.reset_counter(); self.tim.set_auto_reload(timeout.ticks() - 1)?; // Trigger update event to load the registers self.tim.trigger_update(); // start counter self.tim.enable_counter(); Ok(()) } pub fn wait(&mut self) -> nb::Result<(), Error> { if self.tim.get_interrupt_flag().contains(Event::Update) { self.tim.clear_interrupt_flag(Event::Update); Ok(()) } else { Err(nb::Error::WouldBlock) } } pub fn cancel(&mut self) -> Result<(), Error> { if !self.tim.is_counter_enabled() { return Err(Error::Disabled); } // disable counter self.tim.disable_counter(); Ok(()) } } impl<TIM: Instance, const FREQ: u32> fugit_timer::Timer<FREQ> for Counter<TIM, FREQ> { type Error = Error; fn now(&mut self) -> TimerInstantU32<FREQ> { Self::now(self) } fn start(&mut self, duration: TimerDurationU32<FREQ>) -> Result<(), Self::Error> { self.start(duration) } fn cancel(&mut self) -> Result<(), Self::Error> { self.cancel() } fn wait(&mut self) -> nb::Result<(), Self::Error> { self.wait() } }
25.907975
86
0.578735
f85283edb2c704799a9e22283ff226d24ccf3f98
734
extern crate flif; use std::fs::File; use std::io::BufReader; use flif::Error; use flif::Flif; use flif::Limits; #[test] fn maniac_size_limit() { let file = BufReader::new(File::open("../resources/sea_snail.flif").unwrap()); let mut limits: Limits = Default::default(); limits.maniac_nodes = 16; match Flif::decode_with_limits(file, limits) { Err(Error::LimitViolation(ref message)) if message.contains("maniac") => {} Err(err) => panic!( "Expected an Error::LimitViolation indicating the maniac tree was too large, got {:?}", err ), _ => panic!("Expected an Error::LimitViolation indicating the maniac tree was too large, got a valid image instead") } }
30.583333
124
0.643052
4af26e19b370c08e8ec33c3a236cbfc3fe86b6ef
118,995
use crate::session::{self, DataTypeKind}; use crate::ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions}; use syntax::ast::{self, Ident, IntTy, UintTy}; use syntax::attr; use syntax_pos::DUMMY_SP; use std::cmp; use std::fmt; use std::i128; use std::iter; use std::mem; use std::ops::Bound; use crate::hir; use crate::ich::StableHashingContext; use crate::mir::{GeneratorLayout, GeneratorSavedLocal}; use crate::ty::GeneratorSubsts; use crate::ty::subst::Subst; use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; pub use rustc_target::abi::*; use rustc_target::spec::{HasTargetSpec, abi::Abi as SpecAbi}; use rustc_target::abi::call::{ ArgAttribute, ArgAttributes, ArgType, Conv, FnType, IgnoreMode, PassMode, Reg, RegKind }; pub trait IntegerExt { fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>; fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer; fn repr_discr<'tcx>( tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, repr: &ReprOptions, min: i128, max: i128, ) -> (Integer, bool); } impl IntegerExt for Integer { fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> { match (*self, signed) { (I8, false) => tcx.types.u8, (I16, false) => tcx.types.u16, (I32, false) => tcx.types.u32, (I64, false) => tcx.types.u64, (I128, false) => tcx.types.u128, (I8, true) => tcx.types.i8, (I16, true) => tcx.types.i16, (I32, true) => tcx.types.i32, (I64, true) => tcx.types.i64, (I128, true) => tcx.types.i128, } } /// Gets the Integer type from an attr::IntType. fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer { let dl = cx.data_layout(); match ity { attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8, attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16, attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32, attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64, attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128, attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => { dl.ptr_sized_integer() } } } /// Finds the appropriate Integer type and signedness for the given /// signed discriminant range and #[repr] attribute. /// N.B.: u128 values above i128::MAX will be treated as signed, but /// that shouldn't affect anything, other than maybe debuginfo. fn repr_discr<'tcx>( tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, repr: &ReprOptions, min: i128, max: i128, ) -> (Integer, bool) { // Theoretically, negative values could be larger in unsigned representation // than the unsigned representation of the signed minimum. However, if there // are any negative values, the only valid unsigned representation is u128 // which can fit all i128 values, so the result remains unaffected. let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128)); let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max)); let mut min_from_extern = None; let min_default = I8; if let Some(ity) = repr.int { let discr = Integer::from_attr(&tcx, ity); let fit = if ity.is_signed() { signed_fit } else { unsigned_fit }; if discr < fit { bug!("Integer::repr_discr: `#[repr]` hint too small for \ discriminant range of enum `{}", ty) } return (discr, ity.is_signed()); } if repr.c() { match &tcx.sess.target.target.arch[..] { // WARNING: the ARM EABI has two variants; the one corresponding // to `at_least == I32` appears to be used on Linux and NetBSD, // but some systems may use the variant corresponding to no // lower bound. However, we don't run on those yet...? "arm" => min_from_extern = Some(I32), _ => min_from_extern = Some(I32), } } let at_least = min_from_extern.unwrap_or(min_default); // If there are no negative values, we can use the unsigned fit. if min >= 0 { (cmp::max(unsigned_fit, at_least), false) } else { (cmp::max(signed_fit, at_least), true) } } } pub trait PrimitiveExt { fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>; } impl PrimitiveExt for Primitive { fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> { match *self { Int(i, signed) => i.to_ty(tcx, signed), Float(FloatTy::F32) => tcx.types.f32, Float(FloatTy::F64) => tcx.types.f64, Pointer => tcx.mk_mut_ptr(tcx.mk_unit()), } } } /// The first half of a fat pointer. /// /// - For a trait object, this is the address of the box. /// - For a slice, this is the base address. pub const FAT_PTR_ADDR: usize = 0; /// The second half of a fat pointer. /// /// - For a trait object, this is the address of the vtable. /// - For a slice, this is the length. pub const FAT_PTR_EXTRA: usize = 1; #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] pub enum LayoutError<'tcx> { Unknown(Ty<'tcx>), SizeOverflow(Ty<'tcx>) } impl<'tcx> fmt::Display for LayoutError<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { LayoutError::Unknown(ty) => { write!(f, "the type `{:?}` has an unknown layout", ty) } LayoutError::SizeOverflow(ty) => { write!(f, "the type `{:?}` is too big for the current architecture", ty) } } } } fn layout_raw<'tcx>( tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> { ty::tls::with_related_context(tcx, move |icx| { let rec_limit = *tcx.sess.recursion_limit.get(); let (param_env, ty) = query.into_parts(); if icx.layout_depth > rec_limit { tcx.sess.fatal( &format!("overflow representing the type `{}`", ty)); } // Update the ImplicitCtxt to increase the layout_depth let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() }; ty::tls::enter_context(&icx, |_| { let cx = LayoutCx { tcx, param_env }; let layout = cx.layout_raw_uncached(ty); // Type-level uninhabitedness should always imply ABI uninhabitedness. if let Ok(layout) = layout { if ty.conservative_is_privately_uninhabited(tcx) { assert!(layout.abi.is_uninhabited()); } } layout }) }) } pub fn provide(providers: &mut ty::query::Providers<'_>) { *providers = ty::query::Providers { layout_raw, ..*providers }; } pub struct LayoutCx<'tcx, C> { pub tcx: C, pub param_env: ty::ParamEnv<'tcx>, } #[derive(Copy, Clone, Debug)] enum StructKind { /// A tuple, closure, or univariant which cannot be coerced to unsized. AlwaysSized, /// A univariant, the last field of which may be coerced to unsized. MaybeUnsized, /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag). Prefixed(Size, Align), } // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`. // This is used to go between `memory_index` (source field order to memory order) // and `inverse_memory_index` (memory order to source field order). // See also `FieldPlacement::Arbitrary::memory_index` for more details. // FIXME(eddyb) build a better abstraction for permutations, if possible. fn invert_mapping(map: &[u32]) -> Vec<u32> { let mut inverse = vec![0; map.len()]; for i in 0..map.len() { inverse[map[i] as usize] = i as u32; } inverse } impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutDetails { let dl = self.data_layout(); let b_align = b.value.align(dl); let align = a.value.align(dl).max(b_align).max(dl.aggregate_align); let b_offset = a.value.size(dl).align_to(b_align.abi); let size = (b_offset + b.value.size(dl)).align_to(align.abi); LayoutDetails { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Arbitrary { offsets: vec![Size::ZERO, b_offset], memory_index: vec![0, 1] }, abi: Abi::ScalarPair(a, b), align, size } } fn univariant_uninterned(&self, ty: Ty<'tcx>, fields: &[TyLayout<'_>], repr: &ReprOptions, kind: StructKind) -> Result<LayoutDetails, LayoutError<'tcx>> { let dl = self.data_layout(); let packed = repr.packed(); if packed && repr.align > 0 { bug!("struct cannot be packed and aligned"); } let pack = Align::from_bytes(repr.pack as u64).unwrap(); let mut align = if packed { dl.i8_align } else { dl.aggregate_align }; let mut sized = true; let mut offsets = vec![Size::ZERO; fields.len()]; let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect(); let mut optimize = !repr.inhibit_struct_field_reordering_opt(); if let StructKind::Prefixed(_, align) = kind { optimize &= align.bytes() == 1; } if optimize { let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() }; let optimizing = &mut inverse_memory_index[..end]; let field_align = |f: &TyLayout<'_>| { if packed { f.align.abi.min(pack) } else { f.align.abi } }; match kind { StructKind::AlwaysSized | StructKind::MaybeUnsized => { optimizing.sort_by_key(|&x| { // Place ZSTs first to avoid "interesting offsets", // especially with only one or two non-ZST fields. let f = &fields[x as usize]; (!f.is_zst(), cmp::Reverse(field_align(f))) }); } StructKind::Prefixed(..) => { optimizing.sort_by_key(|&x| field_align(&fields[x as usize])); } } } // inverse_memory_index holds field indices by increasing memory offset. // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. // We now write field offsets to the corresponding offset slot; // field 5 with offset 0 puts 0 in offsets[5]. // At the bottom of this function, we invert `inverse_memory_index` to // produce `memory_index` (see `invert_mapping`). let mut offset = Size::ZERO; if let StructKind::Prefixed(prefix_size, prefix_align) = kind { let prefix_align = if packed { prefix_align.min(pack) } else { prefix_align }; align = align.max(AbiAndPrefAlign::new(prefix_align)); offset = prefix_size.align_to(prefix_align); } for &i in &inverse_memory_index { let field = fields[i as usize]; if !sized { bug!("univariant: field #{} of `{}` comes after unsized field", offsets.len(), ty); } if field.is_unsized() { sized = false; } // Invariant: offset < dl.obj_size_bound() <= 1<<61 let field_align = if packed { field.align.min(AbiAndPrefAlign::new(pack)) } else { field.align }; offset = offset.align_to(field_align.abi); align = align.max(field_align); debug!("univariant offset: {:?} field: {:#?}", offset, field); offsets[i as usize] = offset; offset = offset.checked_add(field.size, dl) .ok_or(LayoutError::SizeOverflow(ty))?; } if repr.align > 0 { let repr_align = repr.align as u64; align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap())); debug!("univariant repr_align: {:?}", repr_align); } debug!("univariant min_size: {:?}", offset); let min_size = offset; // As stated above, inverse_memory_index holds field indices by increasing offset. // This makes it an already-sorted view of the offsets vec. // To invert it, consider: // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0. // Field 5 would be the first element, so memory_index is i: // Note: if we didn't optimize, it's already right. let memory_index; if optimize { memory_index = invert_mapping(&inverse_memory_index); } else { memory_index = inverse_memory_index; } let size = min_size.align_to(align.abi); let mut abi = Abi::Aggregate { sized }; // Unpack newtype ABIs and find scalar pairs. if sized && size.bytes() > 0 { // All other fields must be ZSTs, and we need them to all start at 0. let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst()); if zst_offsets.all(|(_, o)| o.bytes() == 0) { let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst()); match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { // We have exactly one non-ZST field. (Some((i, field)), None, None) => { // Field fills the struct and it has a scalar or scalar pair ABI. if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size { match field.abi { // For plain scalars, or vectors of them, we can't unpack // newtypes for `#[repr(C)]`, as that affects C ABIs. Abi::Scalar(_) | Abi::Vector { .. } if optimize => { abi = field.abi.clone(); } // But scalar pairs are Rust-specific and get // treated as aggregates by C ABIs anyway. Abi::ScalarPair(..) => { abi = field.abi.clone(); } _ => {} } } } // Two non-ZST fields, and they're both scalars. (Some((i, &TyLayout { details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, .. })), Some((j, &TyLayout { details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, .. })), None) => { // Order by the memory placement, not source order. let ((i, a), (j, b)) = if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) }; let pair = self.scalar_pair(a.clone(), b.clone()); let pair_offsets = match pair.fields { FieldPlacement::Arbitrary { ref offsets, ref memory_index } => { assert_eq!(memory_index, &[0, 1]); offsets } _ => bug!() }; if offsets[i] == pair_offsets[0] && offsets[j] == pair_offsets[1] && align == pair.align && size == pair.size { // We can use `ScalarPair` only when it matches our // already computed layout (including `#[repr(C)]`). abi = pair.abi; } } _ => {} } } } if sized && fields.iter().any(|f| f.abi.is_uninhabited()) { abi = Abi::Uninhabited; } Ok(LayoutDetails { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Arbitrary { offsets, memory_index }, abi, align, size }) } fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> { let tcx = self.tcx; let param_env = self.param_env; let dl = self.data_layout(); let scalar_unit = |value: Primitive| { let bits = value.size(dl).bits(); assert!(bits <= 128); Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) } }; let scalar = |value: Primitive| { tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value))) }; let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| { Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?)) }; debug_assert!(!ty.has_infer_types()); Ok(match ty.sty { // Basic scalars. ty::Bool => { tcx.intern_layout(LayoutDetails::scalar(self, Scalar { value: Int(I8, false), valid_range: 0..=1 })) } ty::Char => { tcx.intern_layout(LayoutDetails::scalar(self, Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF })) } ty::Int(ity) => { scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)) } ty::Uint(ity) => { scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)) } ty::Float(fty) => scalar(Float(fty)), ty::FnPtr(_) => { let mut ptr = scalar_unit(Pointer); ptr.valid_range = 1..=*ptr.valid_range.end(); tcx.intern_layout(LayoutDetails::scalar(self, ptr)) } // The never type. ty::Never => { tcx.intern_layout(LayoutDetails { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Union(0), abi: Abi::Uninhabited, align: dl.i8_align, size: Size::ZERO }) } // Potentially-fat pointers. ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => { let mut data_ptr = scalar_unit(Pointer); if !ty.is_unsafe_ptr() { data_ptr.valid_range = 1..=*data_ptr.valid_range.end(); } let pointee = tcx.normalize_erasing_regions(param_env, pointee); if pointee.is_sized(tcx.at(DUMMY_SP), param_env) { return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr))); } let unsized_part = tcx.struct_tail(pointee); let metadata = match unsized_part.sty { ty::Foreign(..) => { return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr))); } ty::Slice(_) | ty::Str => { scalar_unit(Int(dl.ptr_sized_integer(), false)) } ty::Dynamic(..) => { let mut vtable = scalar_unit(Pointer); vtable.valid_range = 1..=*vtable.valid_range.end(); vtable } _ => return Err(LayoutError::Unknown(unsized_part)) }; // Effectively a (ptr, meta) tuple. tcx.intern_layout(self.scalar_pair(data_ptr, metadata)) } // Arrays and slices. ty::Array(element, mut count) => { if count.has_projections() { count = tcx.normalize_erasing_regions(param_env, count); if count.has_projections() { return Err(LayoutError::Unknown(ty)); } } let count = count.assert_usize(tcx).ok_or(LayoutError::Unknown(ty))?; let element = self.layout_of(element)?; let size = element.size.checked_mul(count, dl) .ok_or(LayoutError::SizeOverflow(ty))?; let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) { Abi::Uninhabited } else { Abi::Aggregate { sized: true } }; tcx.intern_layout(LayoutDetails { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Array { stride: element.size, count }, abi, align: element.align, size }) } ty::Slice(element) => { let element = self.layout_of(element)?; tcx.intern_layout(LayoutDetails { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Array { stride: element.size, count: 0 }, abi: Abi::Aggregate { sized: false }, align: element.align, size: Size::ZERO }) } ty::Str => { tcx.intern_layout(LayoutDetails { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Array { stride: Size::from_bytes(1), count: 0 }, abi: Abi::Aggregate { sized: false }, align: dl.i8_align, size: Size::ZERO }) } // Odd unit types. ty::FnDef(..) => { univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)? } ty::Dynamic(..) | ty::Foreign(..) => { let mut unit = self.univariant_uninterned(ty, &[], &ReprOptions::default(), StructKind::AlwaysSized)?; match unit.abi { Abi::Aggregate { ref mut sized } => *sized = false, _ => bug!() } tcx.intern_layout(unit) } ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, &substs)?, ty::Closure(def_id, ref substs) => { let tys = substs.upvar_tys(def_id, tcx); univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?, &ReprOptions::default(), StructKind::AlwaysSized)? } ty::Tuple(tys) => { let kind = if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized }; univariant(&tys.iter().map(|k| { self.layout_of(k.expect_ty()) }).collect::<Result<Vec<_>, _>>()?, &ReprOptions::default(), kind)? } // SIMD vector types. ty::Adt(def, ..) if def.repr.simd() => { let element = self.layout_of(ty.simd_type(tcx))?; let count = ty.simd_size(tcx) as u64; assert!(count > 0); let scalar = match element.abi { Abi::Scalar(ref scalar) => scalar.clone(), _ => { tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \ a non-machine element type `{}`", ty, element.ty)); } }; let size = element.size.checked_mul(count, dl) .ok_or(LayoutError::SizeOverflow(ty))?; let align = dl.vector_align(size); let size = size.align_to(align.abi); tcx.intern_layout(LayoutDetails { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Array { stride: element.size, count }, abi: Abi::Vector { element: scalar, count }, size, align, }) } // ADTs. ty::Adt(def, substs) => { // Cache the field layouts. let variants = def.variants.iter().map(|v| { v.fields.iter().map(|field| { self.layout_of(field.ty(tcx, substs)) }).collect::<Result<Vec<_>, _>>() }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?; if def.is_union() { let packed = def.repr.packed(); if packed && def.repr.align > 0 { bug!("Union cannot be packed and aligned"); } let pack = Align::from_bytes(def.repr.pack as u64).unwrap(); let mut align = if packed { dl.i8_align } else { dl.aggregate_align }; if def.repr.align > 0 { let repr_align = def.repr.align as u64; align = align.max( AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap())); } let optimize = !def.repr.inhibit_union_abi_opt(); let mut size = Size::ZERO; let mut abi = Abi::Aggregate { sized: true }; let index = VariantIdx::new(0); for field in &variants[index] { assert!(!field.is_unsized()); let field_align = if packed { field.align.min(AbiAndPrefAlign::new(pack)) } else { field.align }; align = align.max(field_align); // If all non-ZST fields have the same ABI, forward this ABI if optimize && !field.is_zst() { // Normalize scalar_unit to the maximal valid range let field_abi = match &field.abi { Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)), Abi::ScalarPair(x, y) => { Abi::ScalarPair( scalar_unit(x.value), scalar_unit(y.value), ) } Abi::Vector { element: x, count } => { Abi::Vector { element: scalar_unit(x.value), count: *count, } } Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true }, }; if size == Size::ZERO { // first non ZST: initialize 'abi' abi = field_abi; } else if abi != field_abi { // different fields have different ABI: reset to Aggregate abi = Abi::Aggregate { sized: true }; } } size = cmp::max(size, field.size); } return Ok(tcx.intern_layout(LayoutDetails { variants: Variants::Single { index }, fields: FieldPlacement::Union(variants[index].len()), abi, align, size: size.align_to(align.abi) })); } // A variant is absent if it's uninhabited and only has ZST fields. // Present uninhabited variants only require space for their fields, // but *not* an encoding of the discriminant (e.g., a tag value). // See issue #49298 for more details on the need to leave space // for non-ZST uninhabited data (mostly partial initialization). let absent = |fields: &[TyLayout<'_>]| { let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited()); let is_zst = fields.iter().all(|f| f.is_zst()); uninhabited && is_zst }; let (present_first, present_second) = { let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| { if absent(v) { None } else { Some(i) } }); (present_variants.next(), present_variants.next()) }; if present_first.is_none() { // Uninhabited because it has no variants, or only absent ones. return tcx.layout_raw(param_env.and(tcx.types.never)); } let is_struct = !def.is_enum() || // Only one variant is present. (present_second.is_none() && // Representation optimizations are allowed. !def.repr.inhibit_enum_layout_opt()); if is_struct { // Struct, or univariant enum equivalent to a struct. // (Typechecking will reject discriminant-sizing attrs.) let v = present_first.unwrap(); let kind = if def.is_enum() || variants[v].len() == 0 { StructKind::AlwaysSized } else { let param_env = tcx.param_env(def.did); let last_field = def.variants[v].fields.last().unwrap(); let always_sized = tcx.type_of(last_field.did) .is_sized(tcx.at(DUMMY_SP), param_env); if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized } }; let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?; st.variants = Variants::Single { index: v }; let (start, end) = self.tcx.layout_scalar_valid_range(def.did); match st.abi { Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => { // the asserts ensure that we are not using the // `#[rustc_layout_scalar_valid_range(n)]` // attribute to widen the range of anything as that would probably // result in UB somewhere if let Bound::Included(start) = start { assert!(*scalar.valid_range.start() <= start); scalar.valid_range = start..=*scalar.valid_range.end(); } if let Bound::Included(end) = end { assert!(*scalar.valid_range.end() >= end); scalar.valid_range = *scalar.valid_range.start()..=end; } } _ => assert!( start == Bound::Unbounded && end == Bound::Unbounded, "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}", def, st, ), } return Ok(tcx.intern_layout(st)); } // The current code for niche-filling relies on variant indices // instead of actual discriminants, so dataful enums with // explicit discriminants (RFC #2363) would misbehave. let no_explicit_discriminants = def.variants.iter_enumerated() .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32())); // Niche-filling enum optimization. if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants { let mut dataful_variant = None; let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0); // Find one non-ZST variant. 'variants: for (v, fields) in variants.iter_enumerated() { if absent(fields) { continue 'variants; } for f in fields { if !f.is_zst() { if dataful_variant.is_none() { dataful_variant = Some(v); continue 'variants; } else { dataful_variant = None; break 'variants; } } } niche_variants = *niche_variants.start().min(&v)..=v; } if niche_variants.start() > niche_variants.end() { dataful_variant = None; } if let Some(i) = dataful_variant { let count = ( niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1 ) as u128; for (field_index, &field) in variants[i].iter().enumerate() { let niche = match self.find_niche(field)? { Some(niche) => niche, _ => continue, }; let (niche_start, niche_scalar) = match niche.reserve(self, count) { Some(pair) => pair, None => continue, }; let mut align = dl.aggregate_align; let st = variants.iter_enumerated().map(|(j, v)| { let mut st = self.univariant_uninterned(ty, v, &def.repr, StructKind::AlwaysSized)?; st.variants = Variants::Single { index: j }; align = align.max(st.align); Ok(st) }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?; let offset = st[i].fields.offset(field_index) + niche.offset; let size = st[i].size; let mut abi = match st[i].abi { Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()), Abi::ScalarPair(ref first, ref second) => { // We need to use scalar_unit to reset the // valid range to the maximal one for that // primitive, because only the niche is // guaranteed to be initialised, not the // other primitive. if offset.bytes() == 0 { Abi::ScalarPair( niche_scalar.clone(), scalar_unit(second.value), ) } else { Abi::ScalarPair( scalar_unit(first.value), niche_scalar.clone(), ) } } _ => Abi::Aggregate { sized: true }, }; if st.iter().all(|v| v.abi.is_uninhabited()) { abi = Abi::Uninhabited; } return Ok(tcx.intern_layout(LayoutDetails { variants: Variants::Multiple { discr: niche_scalar, discr_kind: DiscriminantKind::Niche { dataful_variant: i, niche_variants, niche_start, }, discr_index: 0, variants: st, }, fields: FieldPlacement::Arbitrary { offsets: vec![offset], memory_index: vec![0] }, abi, size, align, })); } } } let (mut min, mut max) = (i128::max_value(), i128::min_value()); let discr_type = def.repr.discr_type(); let bits = Integer::from_attr(self, discr_type).size().bits(); for (i, discr) in def.discriminants(tcx) { if variants[i].iter().any(|f| f.abi.is_uninhabited()) { continue; } let mut x = discr.val as i128; if discr_type.is_signed() { // sign extend the raw representation to be an i128 x = (x << (128 - bits)) >> (128 - bits); } if x < min { min = x; } if x > max { max = x; } } // We might have no inhabited variants, so pretend there's at least one. if (min, max) == (i128::max_value(), i128::min_value()) { min = 0; max = 0; } assert!(min <= max, "discriminant range is {}...{}", min, max); let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); let mut align = dl.aggregate_align; let mut size = Size::ZERO; // We're interested in the smallest alignment, so start large. let mut start_align = Align::from_bytes(256).unwrap(); assert_eq!(Integer::for_align(dl, start_align), None); // repr(C) on an enum tells us to make a (tag, union) layout, // so we need to grow the prefix alignment to be at least // the alignment of the union. (This value is used both for // determining the alignment of the overall enum, and the // determining the alignment of the payload after the tag.) let mut prefix_align = min_ity.align(dl).abi; if def.repr.c() { for fields in &variants { for field in fields { prefix_align = prefix_align.max(field.align.abi); } } } // Create the set of structs that represent each variant. let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| { let mut st = self.univariant_uninterned(ty, &field_layouts, &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?; st.variants = Variants::Single { index: i }; // Find the first field we can't move later // to make room for a larger discriminant. for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) { if !field.is_zst() || field.align.abi.bytes() != 1 { start_align = start_align.min(field.align.abi); break; } } size = cmp::max(size, st.size); align = align.max(st.align); Ok(st) }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?; // Align the maximum variant size to the largest alignment. size = size.align_to(align.abi); if size.bytes() >= dl.obj_size_bound() { return Err(LayoutError::SizeOverflow(ty)); } let typeck_ity = Integer::from_attr(dl, def.repr.discr_type()); if typeck_ity < min_ity { // It is a bug if Layout decided on a greater discriminant size than typeck for // some reason at this point (based on values discriminant can take on). Mostly // because this discriminant will be loaded, and then stored into variable of // type calculated by typeck. Consider such case (a bug): typeck decided on // byte-sized discriminant, but layout thinks we need a 16-bit to store all // discriminant values. That would be a bug, because then, in codegen, in order // to store this 16-bit discriminant into 8-bit sized temporary some of the // space necessary to represent would have to be discarded (or layout is wrong // on thinking it needs 16 bits) bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})", min_ity, typeck_ity); // However, it is fine to make discr type however large (as an optimisation) // after this point – we’ll just truncate the value we load in codegen. } // Check to see if we should use a different type for the // discriminant. We can safely use a type with the same size // as the alignment of the first field of each variant. // We increase the size of the discriminant to avoid LLVM copying // padding when it doesn't need to. This normally causes unaligned // load/stores and excessive memcpy/memset operations. By using a // bigger integer size, LLVM can be sure about its contents and // won't be so conservative. // Use the initial field alignment let mut ity = if def.repr.c() || def.repr.int.is_some() { min_ity } else { Integer::for_align(dl, start_align).unwrap_or(min_ity) }; // If the alignment is not larger than the chosen discriminant size, // don't use the alignment as the final size. if ity <= min_ity { ity = min_ity; } else { // Patch up the variants' first few fields. let old_ity_size = min_ity.size(); let new_ity_size = ity.size(); for variant in &mut layout_variants { match variant.fields { FieldPlacement::Arbitrary { ref mut offsets, .. } => { for i in offsets { if *i <= old_ity_size { assert_eq!(*i, old_ity_size); *i = new_ity_size; } } // We might be making the struct larger. if variant.size <= old_ity_size { variant.size = new_ity_size; } } _ => bug!() } } } let tag_mask = !0u128 >> (128 - ity.size().bits()); let tag = Scalar { value: Int(ity, signed), valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask), }; let mut abi = Abi::Aggregate { sized: true }; if tag.value.size(dl) == size { abi = Abi::Scalar(tag.clone()); } else { // Try to use a ScalarPair for all tagged enums. let mut common_prim = None; for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) { let offsets = match layout_variant.fields { FieldPlacement::Arbitrary { ref offsets, .. } => offsets, _ => bug!(), }; let mut fields = field_layouts .iter() .zip(offsets) .filter(|p| !p.0.is_zst()); let (field, offset) = match (fields.next(), fields.next()) { (None, None) => continue, (Some(pair), None) => pair, _ => { common_prim = None; break; } }; let prim = match field.details.abi { Abi::Scalar(ref scalar) => scalar.value, _ => { common_prim = None; break; } }; if let Some(pair) = common_prim { // This is pretty conservative. We could go fancier // by conflating things like i32 and u32, or even // realising that (u8, u8) could just cohabit with // u16 or even u32. if pair != (prim, offset) { common_prim = None; break; } } else { common_prim = Some((prim, offset)); } } if let Some((prim, offset)) = common_prim { let pair = self.scalar_pair(tag.clone(), scalar_unit(prim)); let pair_offsets = match pair.fields { FieldPlacement::Arbitrary { ref offsets, ref memory_index } => { assert_eq!(memory_index, &[0, 1]); offsets } _ => bug!() }; if pair_offsets[0] == Size::ZERO && pair_offsets[1] == *offset && align == pair.align && size == pair.size { // We can use `ScalarPair` only when it matches our // already computed layout (including `#[repr(C)]`). abi = pair.abi; } } } if layout_variants.iter().all(|v| v.abi.is_uninhabited()) { abi = Abi::Uninhabited; } tcx.intern_layout(LayoutDetails { variants: Variants::Multiple { discr: tag, discr_kind: DiscriminantKind::Tag, discr_index: 0, variants: layout_variants, }, fields: FieldPlacement::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }, abi, align, size }) } // Types with no meaningful known layout. ty::Projection(_) | ty::Opaque(..) => { let normalized = tcx.normalize_erasing_regions(param_env, ty); if ty == normalized { return Err(LayoutError::Unknown(ty)); } tcx.layout_raw(param_env.and(normalized))? } ty::Bound(..) | ty::Placeholder(..) | ty::UnnormalizedProjection(..) | ty::GeneratorWitness(..) | ty::Infer(_) => { bug!("LayoutDetails::compute: unexpected type `{}`", ty) } ty::Param(_) | ty::Error => { return Err(LayoutError::Unknown(ty)); } }) } } /// Overlap eligibility and variant assignment for each GeneratorSavedLocal. #[derive(Clone, Debug, PartialEq)] enum SavedLocalEligibility { Unassigned, Assigned(VariantIdx), // FIXME: Use newtype_index so we aren't wasting bytes Ineligible(Option<u32>), } // When laying out generators, we divide our saved local fields into two // categories: overlap-eligible and overlap-ineligible. // // Those fields which are ineligible for overlap go in a "prefix" at the // beginning of the layout, and always have space reserved for them. // // Overlap-eligible fields are only assigned to one variant, so we lay // those fields out for each variant and put them right after the // prefix. // // Finally, in the layout details, we point to the fields from the // variants they are assigned to. It is possible for some fields to be // included in multiple variants. No field ever "moves around" in the // layout; its offset is always the same. // // Also included in the layout are the upvars and the discriminant. // These are included as fields on the "outer" layout; they are not part // of any variant. impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { /// Compute the eligibility and assignment of each local. fn generator_saved_local_eligibility(&self, info: &GeneratorLayout<'tcx>) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) { use SavedLocalEligibility::*; let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> = IndexVec::from_elem_n(Unassigned, info.field_tys.len()); // The saved locals not eligible for overlap. These will get // "promoted" to the prefix of our generator. let mut ineligible_locals = BitSet::new_empty(info.field_tys.len()); // Figure out which of our saved locals are fields in only // one variant. The rest are deemed ineligible for overlap. for (variant_index, fields) in info.variant_fields.iter_enumerated() { for local in fields { match assignments[*local] { Unassigned => { assignments[*local] = Assigned(variant_index); } Assigned(idx) => { // We've already seen this local at another suspension // point, so it is no longer a candidate. trace!("removing local {:?} in >1 variant ({:?}, {:?})", local, variant_index, idx); ineligible_locals.insert(*local); assignments[*local] = Ineligible(None); } Ineligible(_) => {}, } } } // Next, check every pair of eligible locals to see if they // conflict. for local_a in info.storage_conflicts.rows() { let conflicts_a = info.storage_conflicts.count(local_a); if ineligible_locals.contains(local_a) { continue; } for local_b in info.storage_conflicts.iter(local_a) { // local_a and local_b are storage live at the same time, therefore they // cannot overlap in the generator layout. The only way to guarantee // this is if they are in the same variant, or one is ineligible // (which means it is stored in every variant). if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] { continue; } // If they conflict, we will choose one to make ineligible. // This is not always optimal; it's just a greedy heuristic that // seems to produce good results most of the time. let conflicts_b = info.storage_conflicts.count(local_b); let (remove, other) = if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) }; ineligible_locals.insert(remove); assignments[remove] = Ineligible(None); trace!("removing local {:?} due to conflict with {:?}", remove, other); } } // Write down the order of our locals that will be promoted to the prefix. { let mut idx = 0u32; for local in ineligible_locals.iter() { assignments[local] = Ineligible(Some(idx)); idx += 1; } } debug!("generator saved local assignments: {:?}", assignments); (ineligible_locals, assignments) } /// Compute the full generator layout. fn generator_layout( &self, ty: Ty<'tcx>, def_id: hir::def_id::DefId, substs: &GeneratorSubsts<'tcx>, ) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> { use SavedLocalEligibility::*; let tcx = self.tcx; let subst_field = |ty: Ty<'tcx>| { ty.subst(tcx, substs.substs) }; let info = tcx.generator_layout(def_id); let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info); // Build a prefix layout, including "promoting" all ineligible // locals as part of the prefix. We compute the layout of all of // these fields at once to get optimal packing. let discr_index = substs.prefix_tys(def_id, tcx).count(); let promoted_tys = ineligible_locals.iter().map(|local| subst_field(info.field_tys[local])); let prefix_tys = substs.prefix_tys(def_id, tcx) .chain(iter::once(substs.discr_ty(tcx))) .chain(promoted_tys); let prefix = self.univariant_uninterned( ty, &prefix_tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?, &ReprOptions::default(), StructKind::AlwaysSized)?; let (prefix_size, prefix_align) = (prefix.size, prefix.align); // Split the prefix layout into the "outer" fields (upvars and // discriminant) and the "promoted" fields. Promoted fields will // get included in each variant that requested them in // GeneratorLayout. debug!("prefix = {:#?}", prefix); let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields { FieldPlacement::Arbitrary { mut offsets, memory_index } => { let mut inverse_memory_index = invert_mapping(&memory_index); // "a" (`0..b_start`) and "b" (`b_start..`) correspond to // "outer" and "promoted" fields respectively. let b_start = (discr_index + 1) as u32; let offsets_b = offsets.split_off(b_start as usize); let offsets_a = offsets; // Disentangle the "a" and "b" components of `inverse_memory_index` // by preserving the order but keeping only one disjoint "half" each. // FIXME(eddyb) build a better abstraction for permutations, if possible. let inverse_memory_index_b: Vec<_> = inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect(); inverse_memory_index.retain(|&i| i < b_start); let inverse_memory_index_a = inverse_memory_index; // Since `inverse_memory_index_{a,b}` each only refer to their // respective fields, they can be safely inverted let memory_index_a = invert_mapping(&inverse_memory_index_a); let memory_index_b = invert_mapping(&inverse_memory_index_b); let outer_fields = FieldPlacement::Arbitrary { offsets: offsets_a, memory_index: memory_index_a, }; (outer_fields, offsets_b, memory_index_b) } _ => bug!(), }; let mut size = prefix.size; let mut align = prefix.align; let variants = info.variant_fields.iter_enumerated().map(|(index, variant_fields)| { // Only include overlap-eligible fields when we compute our variant layout. let variant_only_tys = variant_fields .iter() .filter(|local| { match assignments[**local] { Unassigned => bug!(), Assigned(v) if v == index => true, Assigned(_) => bug!("assignment does not match variant"), Ineligible(_) => false, } }) .map(|local| subst_field(info.field_tys[*local])); let mut variant = self.univariant_uninterned( ty, &variant_only_tys .map(|ty| self.layout_of(ty)) .collect::<Result<Vec<_>, _>>()?, &ReprOptions::default(), StructKind::Prefixed(prefix_size, prefix_align.abi))?; variant.variants = Variants::Single { index }; let (offsets, memory_index) = match variant.fields { FieldPlacement::Arbitrary { offsets, memory_index } => { (offsets, memory_index) } _ => bug!(), }; // Now, stitch the promoted and variant-only fields back together in // the order they are mentioned by our GeneratorLayout. // Because we only use some subset (that can differ between variants) // of the promoted fields, we can't just pick those elements of the // `promoted_memory_index` (as we'd end up with gaps). // So instead, we build an "inverse memory_index", as if all of the // promoted fields were being used, but leave the elements not in the // subset as `INVALID_FIELD_IDX`, which we can filter out later to // obtain a valid (bijective) mapping. const INVALID_FIELD_IDX: u32 = !0; let mut combined_inverse_memory_index = vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()]; let mut offsets_and_memory_index = offsets.into_iter().zip(memory_index); let combined_offsets = variant_fields.iter().enumerate().map(|(i, local)| { let (offset, memory_index) = match assignments[*local] { Unassigned => bug!(), Assigned(_) => { let (offset, memory_index) = offsets_and_memory_index.next().unwrap(); (offset, promoted_memory_index.len() as u32 + memory_index) } Ineligible(field_idx) => { let field_idx = field_idx.unwrap() as usize; (promoted_offsets[field_idx], promoted_memory_index[field_idx]) } }; combined_inverse_memory_index[memory_index as usize] = i as u32; offset }).collect(); // Remove the unused slots and invert the mapping to obtain the // combined `memory_index` (also see previous comment). combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX); let combined_memory_index = invert_mapping(&combined_inverse_memory_index); variant.fields = FieldPlacement::Arbitrary { offsets: combined_offsets, memory_index: combined_memory_index, }; size = size.max(variant.size); align = align.max(variant.align); Ok(variant) }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?; let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) { Abi::Uninhabited } else { Abi::Aggregate { sized: true } }; let discr = match &self.layout_of(substs.discr_ty(tcx))?.abi { Abi::Scalar(s) => s.clone(), _ => bug!(), }; let layout = tcx.intern_layout(LayoutDetails { variants: Variants::Multiple { discr, discr_kind: DiscriminantKind::Tag, discr_index, variants, }, fields: outer_fields, abi, size, align, }); debug!("generator layout ({:?}): {:#?}", ty, layout); Ok(layout) } /// This is invoked by the `layout_raw` query to record the final /// layout of each type. #[inline(always)] fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) { // If we are running with `-Zprint-type-sizes`, maybe record layouts // for dumping later. if self.tcx.sess.opts.debugging_opts.print_type_sizes { self.record_layout_for_printing_outlined(layout) } } fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) { // Ignore layouts that are done with non-empty environments or // non-monomorphic layouts, as the user only wants to see the stuff // resulting from the final codegen session. if layout.ty.has_param_types() || layout.ty.has_self_ty() || !self.param_env.caller_bounds.is_empty() { return; } // (delay format until we actually need it) let record = |kind, packed, opt_discr_size, variants| { let type_desc = format!("{:?}", layout.ty); self.tcx.sess.code_stats.borrow_mut().record_type_size(kind, type_desc, layout.align.abi, layout.size, packed, opt_discr_size, variants); }; let adt_def = match layout.ty.sty { ty::Adt(ref adt_def, _) => { debug!("print-type-size t: `{:?}` process adt", layout.ty); adt_def } ty::Closure(..) => { debug!("print-type-size t: `{:?}` record closure", layout.ty); record(DataTypeKind::Closure, false, None, vec![]); return; } _ => { debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty); return; } }; let adt_kind = adt_def.adt_kind(); let adt_packed = adt_def.repr.packed(); let build_variant_info = |n: Option<Ident>, flds: &[ast::Name], layout: TyLayout<'tcx>| { let mut min_size = Size::ZERO; let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| { match layout.field(self, i) { Err(err) => { bug!("no layout found for field {}: `{:?}`", name, err); } Ok(field_layout) => { let offset = layout.fields.offset(i); let field_end = offset + field_layout.size; if min_size < field_end { min_size = field_end; } session::FieldInfo { name: name.to_string(), offset: offset.bytes(), size: field_layout.size.bytes(), align: field_layout.align.abi.bytes(), } } } }).collect(); session::VariantInfo { name: n.map(|n| n.to_string()), kind: if layout.is_unsized() { session::SizeKind::Min } else { session::SizeKind::Exact }, align: layout.align.abi.bytes(), size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() }, fields: field_info, } }; match layout.variants { Variants::Single { index } => { debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variants[index].ident); if !adt_def.variants.is_empty() { let variant_def = &adt_def.variants[index]; let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect(); record(adt_kind.into(), adt_packed, None, vec![build_variant_info(Some(variant_def.ident), &fields, layout)]); } else { // (This case arises for *empty* enums; so give it // zero variants.) record(adt_kind.into(), adt_packed, None, vec![]); } } Variants::Multiple { ref discr, ref discr_kind, .. } => { debug!("print-type-size `{:#?}` adt general variants def {}", layout.ty, adt_def.variants.len()); let variant_infos: Vec<_> = adt_def.variants.iter_enumerated().map(|(i, variant_def)| { let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect(); build_variant_info(Some(variant_def.ident), &fields, layout.for_variant(self, i)) }) .collect(); record(adt_kind.into(), adt_packed, match discr_kind { DiscriminantKind::Tag => Some(discr.value.size(self)), _ => None }, variant_infos); } } } } /// Type size "skeleton", i.e., the only information determining a type's size. /// While this is conservative, (aside from constant sizes, only pointers, /// newtypes thereof and null pointer optimized enums are allowed), it is /// enough to statically check common use cases of transmute. #[derive(Copy, Clone, Debug)] pub enum SizeSkeleton<'tcx> { /// Any statically computable Layout. Known(Size), /// A potentially-fat pointer. Pointer { /// If true, this pointer is never null. non_zero: bool, /// The type which determines the unsized metadata, if any, /// of this pointer. Either a type parameter or a projection /// depending on one, with regions erased. tail: Ty<'tcx> } } impl<'tcx> SizeSkeleton<'tcx> { pub fn compute( ty: Ty<'tcx>, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> { debug_assert!(!ty.has_infer_types()); // First try computing a static layout. let err = match tcx.layout_of(param_env.and(ty)) { Ok(layout) => { return Ok(SizeSkeleton::Known(layout.size)); } Err(err) => err }; match ty.sty { ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => { let non_zero = !ty.is_unsafe_ptr(); let tail = tcx.struct_tail(pointee); match tail.sty { ty::Param(_) | ty::Projection(_) => { debug_assert!(tail.has_param_types() || tail.has_self_ty()); Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(&tail) }) } _ => { bug!("SizeSkeleton::compute({}): layout errored ({}), yet \ tail `{}` is not a type parameter or a projection", ty, err, tail) } } } ty::Adt(def, substs) => { // Only newtypes and enums w/ nullable pointer optimization. if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 { return Err(err); } // Get a zero-sized variant or a pointer newtype. let zero_or_ptr_variant = |i| { let i = VariantIdx::new(i); let fields = def.variants[i].fields.iter().map(|field| { SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env) }); let mut ptr = None; for field in fields { let field = field?; match field { SizeSkeleton::Known(size) => { if size.bytes() > 0 { return Err(err); } } SizeSkeleton::Pointer {..} => { if ptr.is_some() { return Err(err); } ptr = Some(field); } } } Ok(ptr) }; let v0 = zero_or_ptr_variant(0)?; // Newtype. if def.variants.len() == 1 { if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 { return Ok(SizeSkeleton::Pointer { non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) { (Bound::Included(start), Bound::Unbounded) => start > 0, (Bound::Included(start), Bound::Included(end)) => 0 < start && start < end, _ => false, }, tail, }); } else { return Err(err); } } let v1 = zero_or_ptr_variant(1)?; // Nullable pointer enum optimization. match (v0, v1) { (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => { Ok(SizeSkeleton::Pointer { non_zero: false, tail, }) } _ => Err(err) } } ty::Projection(_) | ty::Opaque(..) => { let normalized = tcx.normalize_erasing_regions(param_env, ty); if ty == normalized { Err(err) } else { SizeSkeleton::compute(normalized, tcx, param_env) } } _ => Err(err) } } pub fn same_size(self, other: SizeSkeleton<'_>) -> bool { match (self, other) { (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b, (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => a == b, _ => false } } } pub trait HasTyCtxt<'tcx>: HasDataLayout { fn tcx(&self) -> TyCtxt<'tcx>; } pub trait HasParamEnv<'tcx> { fn param_env(&self) -> ty::ParamEnv<'tcx>; } impl<'tcx> HasDataLayout for TyCtxt<'tcx> { fn data_layout(&self) -> &TargetDataLayout { &self.data_layout } } impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> { fn tcx(&self) -> TyCtxt<'tcx> { self.global_tcx() } } impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> { fn param_env(&self) -> ty::ParamEnv<'tcx> { self.param_env } } impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> { fn data_layout(&self) -> &TargetDataLayout { self.tcx.data_layout() } } impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> { fn tcx(&self) -> TyCtxt<'tcx> { self.tcx.tcx() } } pub trait MaybeResult<T> { type Error; fn from(x: Result<T, Self::Error>) -> Self; fn to_result(self) -> Result<T, Self::Error>; } impl<T> MaybeResult<T> for T { type Error = !; fn from(x: Result<T, Self::Error>) -> Self { let Ok(x) = x; x } fn to_result(self) -> Result<T, Self::Error> { Ok(self) } } impl<T, E> MaybeResult<T> for Result<T, E> { type Error = E; fn from(x: Result<T, Self::Error>) -> Self { x } fn to_result(self) -> Result<T, Self::Error> { self } } pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>; impl<'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'tcx>> { type Ty = Ty<'tcx>; type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>; /// Computes the layout of a type. Note that this implicitly /// executes in "reveal all" mode. fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout { let param_env = self.param_env.with_reveal_all(); let ty = self.tcx.normalize_erasing_regions(param_env, ty); let details = self.tcx.layout_raw(param_env.and(ty))?; let layout = TyLayout { ty, details }; // N.B., this recording is normally disabled; when enabled, it // can however trigger recursive invocations of `layout_of`. // Therefore, we execute it *after* the main query has // completed, to avoid problems around recursive structures // and the like. (Admittedly, I wasn't able to reproduce a problem // here, but it seems like the right thing to do. -nmatsakis) self.record_layout_for_printing(layout); Ok(layout) } } impl LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> { type Ty = Ty<'tcx>; type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>; /// Computes the layout of a type. Note that this implicitly /// executes in "reveal all" mode. fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout { let param_env = self.param_env.with_reveal_all(); let ty = self.tcx.normalize_erasing_regions(param_env, ty); let details = self.tcx.layout_raw(param_env.and(ty))?; let layout = TyLayout { ty, details }; // N.B., this recording is normally disabled; when enabled, it // can however trigger recursive invocations of `layout_of`. // Therefore, we execute it *after* the main query has // completed, to avoid problems around recursive structures // and the like. (Admittedly, I wasn't able to reproduce a problem // here, but it seems like the right thing to do. -nmatsakis) let cx = LayoutCx { tcx: *self.tcx, param_env: self.param_env }; cx.record_layout_for_printing(layout); Ok(layout) } } // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users. impl TyCtxt<'tcx> { /// Computes the layout of a type. Note that this implicitly /// executes in "reveal all" mode. #[inline] pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> Result<TyLayout<'tcx>, LayoutError<'tcx>> { let cx = LayoutCx { tcx: self.global_tcx(), param_env: param_env_and_ty.param_env }; cx.layout_of(param_env_and_ty.value) } } impl ty::query::TyCtxtAt<'tcx> { /// Computes the layout of a type. Note that this implicitly /// executes in "reveal all" mode. #[inline] pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> Result<TyLayout<'tcx>, LayoutError<'tcx>> { let cx = LayoutCx { tcx: self.global_tcx().at(self.span), param_env: param_env_and_ty.param_env }; cx.layout_of(param_env_and_ty.value) } } impl<'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx> where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>, C::TyLayout: MaybeResult<TyLayout<'tcx>>, C: HasParamEnv<'tcx>, { fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> { let details = match this.variants { Variants::Single { index } if index == variant_index => this.details, Variants::Single { index } => { // Deny calling for_variant more than once for non-Single enums. if let Ok(layout) = cx.layout_of(this.ty).to_result() { assert_eq!(layout.variants, Variants::Single { index }); } let fields = match this.ty.sty { ty::Adt(def, _) => def.variants[variant_index].fields.len(), _ => bug!() }; let tcx = cx.tcx(); tcx.intern_layout(LayoutDetails { variants: Variants::Single { index: variant_index }, fields: FieldPlacement::Union(fields), abi: Abi::Uninhabited, align: tcx.data_layout.i8_align, size: Size::ZERO }) } Variants::Multiple { ref variants, .. } => { &variants[variant_index] } }; assert_eq!(details.variants, Variants::Single { index: variant_index }); TyLayout { ty: this.ty, details } } fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout { let tcx = cx.tcx(); let discr_layout = |discr: &Scalar| -> C::TyLayout { let layout = LayoutDetails::scalar(cx, discr.clone()); MaybeResult::from(Ok(TyLayout { details: tcx.intern_layout(layout), ty: discr.value.to_ty(tcx), })) }; cx.layout_of(match this.ty.sty { ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::FnPtr(_) | ty::Never | ty::FnDef(..) | ty::GeneratorWitness(..) | ty::Foreign(..) | ty::Dynamic(..) => { bug!("TyLayout::field_type({:?}): not applicable", this) } // Potentially-fat pointers. ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => { assert!(i < this.fields.count()); // Reuse the fat *T type as its own thin pointer data field. // This provides information about e.g., DST struct pointees // (which may have no non-DST form), and will work as long // as the `Abi` or `FieldPlacement` is checked by users. if i == 0 { let nil = tcx.mk_unit(); let ptr_ty = if this.ty.is_unsafe_ptr() { tcx.mk_mut_ptr(nil) } else { tcx.mk_mut_ref(tcx.lifetimes.re_static, nil) }; return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| { ptr_layout.ty = this.ty; ptr_layout })); } match tcx.struct_tail(pointee).sty { ty::Slice(_) | ty::Str => tcx.types.usize, ty::Dynamic(_, _) => { tcx.mk_imm_ref( tcx.lifetimes.re_static, tcx.mk_array(tcx.types.usize, 3), ) /* FIXME: use actual fn pointers Warning: naively computing the number of entries in the vtable by counting the methods on the trait + methods on all parent traits does not work, because some methods can be not object safe and thus excluded from the vtable. Increase this counter if you tried to implement this but failed to do it without duplicating a lot of code from other places in the compiler: 2 tcx.mk_tup(&[ tcx.mk_array(tcx.types.usize, 3), tcx.mk_array(Option<fn()>), ]) */ } _ => bug!("TyLayout::field_type({:?}): not applicable", this) } } // Arrays and slices. ty::Array(element, _) | ty::Slice(element) => element, ty::Str => tcx.types.u8, // Tuples, generators and closures. ty::Closure(def_id, ref substs) => { substs.upvar_tys(def_id, tcx).nth(i).unwrap() } ty::Generator(def_id, ref substs, _) => { match this.variants { Variants::Single { index } => { substs.state_tys(def_id, tcx) .nth(index.as_usize()).unwrap() .nth(i).unwrap() } Variants::Multiple { ref discr, discr_index, .. } => { if i == discr_index { return discr_layout(discr); } substs.prefix_tys(def_id, tcx).nth(i).unwrap() } } } ty::Tuple(tys) => tys[i].expect_ty(), // SIMD vector types. ty::Adt(def, ..) if def.repr.simd() => { this.ty.simd_type(tcx) } // ADTs. ty::Adt(def, substs) => { match this.variants { Variants::Single { index } => { def.variants[index].fields[i].ty(tcx, substs) } // Discriminant field for enums (where applicable). Variants::Multiple { ref discr, .. } => { assert_eq!(i, 0); return discr_layout(discr); } } } ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) | ty::Placeholder(..) | ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | ty::Error => { bug!("TyLayout::field_type: unexpected type `{}`", this.ty) } }) } fn pointee_info_at( this: TyLayout<'tcx>, cx: &C, offset: Size, ) -> Option<PointeeInfo> { match this.ty.sty { ty::RawPtr(mt) if offset.bytes() == 0 => { cx.layout_of(mt.ty).to_result().ok() .map(|layout| PointeeInfo { size: layout.size, align: layout.align.abi, safe: None, }) } ty::Ref(_, ty, mt) if offset.bytes() == 0 => { let tcx = cx.tcx(); let is_freeze = ty.is_freeze(tcx, cx.param_env(), DUMMY_SP); let kind = match mt { hir::MutImmutable => if is_freeze { PointerKind::Frozen } else { PointerKind::Shared }, hir::MutMutable => { // Previously we would only emit noalias annotations for LLVM >= 6 or in // panic=abort mode. That was deemed right, as prior versions had many bugs // in conjunction with unwinding, but later versions didn’t seem to have // said issues. See issue #31681. // // Alas, later on we encountered a case where noalias would generate wrong // code altogether even with recent versions of LLVM in *safe* code with no // unwinding involved. See #54462. // // For now, do not enable mutable_noalias by default at all, while the // issue is being figured out. let mutable_noalias = tcx.sess.opts.debugging_opts.mutable_noalias .unwrap_or(false); if mutable_noalias { PointerKind::UniqueBorrowed } else { PointerKind::Shared } } }; cx.layout_of(ty).to_result().ok() .map(|layout| PointeeInfo { size: layout.size, align: layout.align.abi, safe: Some(kind), }) } _ => { let mut data_variant = match this.variants { // Within the discriminant field, only the niche itself is // always initialized, so we only check for a pointer at its // offset. // // If the niche is a pointer, it's either valid (according // to its type), or null (which the niche field's scalar // validity range encodes). This allows using // `dereferenceable_or_null` for e.g., `Option<&T>`, and // this will continue to work as long as we don't start // using more niches than just null (e.g., the first page of // the address space, or unaligned pointers). Variants::Multiple { discr_kind: DiscriminantKind::Niche { dataful_variant, .. }, discr_index, .. } if this.fields.offset(discr_index) == offset => Some(this.for_variant(cx, dataful_variant)), _ => Some(this), }; if let Some(variant) = data_variant { // We're not interested in any unions. if let FieldPlacement::Union(_) = variant.fields { data_variant = None; } } let mut result = None; if let Some(variant) = data_variant { let ptr_end = offset + Pointer.size(cx); for i in 0..variant.fields.count() { let field_start = variant.fields.offset(i); if field_start <= offset { let field = variant.field(cx, i); result = field.to_result().ok() .and_then(|field| { if ptr_end <= field_start + field.size { // We found the right field, look inside it. field.pointee_info_at(cx, offset - field_start) } else { None } }); if result.is_some() { break; } } } } // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`. if let Some(ref mut pointee) = result { if let ty::Adt(def, _) = this.ty.sty { if def.is_box() && offset.bytes() == 0 { pointee.safe = Some(PointerKind::UniqueOwned); } } } result } } } } struct Niche { offset: Size, scalar: Scalar, available: u128, } impl Niche { fn reserve<'tcx>( &self, cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, count: u128, ) -> Option<(u128, Scalar)> { if count > self.available { return None; } let Scalar { value, valid_range: ref v } = self.scalar; let bits = value.size(cx).bits(); assert!(bits <= 128); let max_value = !0u128 >> (128 - bits); let start = v.end().wrapping_add(1) & max_value; let end = v.end().wrapping_add(count) & max_value; Some((start, Scalar { value, valid_range: *v.start()..=end })) } } impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { /// Find the offset of a niche leaf field, starting from /// the given type and recursing through aggregates. // FIXME(eddyb) traverse already optimized enums. fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> { let scalar_niche = |scalar: &Scalar, offset| { let Scalar { value, valid_range: ref v } = *scalar; let bits = value.size(self).bits(); assert!(bits <= 128); let max_value = !0u128 >> (128 - bits); // Find out how many values are outside the valid range. let available = if v.start() <= v.end() { v.start() + (max_value - v.end()) } else { v.start() - v.end() - 1 }; // Give up if there is no niche value available. if available == 0 { return None; } Some(Niche { offset, scalar: scalar.clone(), available }) }; // Locals variables which live across yields are stored // in the generator type as fields. These may be uninitialized // so we don't look for niches there. if let ty::Generator(..) = layout.ty.sty { return Ok(None); } match layout.abi { Abi::Scalar(ref scalar) => { return Ok(scalar_niche(scalar, Size::ZERO)); } Abi::ScalarPair(ref a, ref b) => { // HACK(nox): We iter on `b` and then `a` because `max_by_key` // returns the last maximum. let niche = iter::once( (b, a.value.size(self).align_to(b.value.align(self).abi)) ) .chain(iter::once((a, Size::ZERO))) .filter_map(|(scalar, offset)| scalar_niche(scalar, offset)) .max_by_key(|niche| niche.available); return Ok(niche); } Abi::Vector { ref element, .. } => { return Ok(scalar_niche(element, Size::ZERO)); } _ => {} } // Perhaps one of the fields is non-zero, let's recurse and find out. if let FieldPlacement::Union(_) = layout.fields { // Only Rust enums have safe-to-inspect fields // (a discriminant), other unions are unsafe. if let Variants::Single { .. } = layout.variants { return Ok(None); } } if let FieldPlacement::Array { count: original_64_bit_count, .. } = layout.fields { // rust-lang/rust#57038: avoid ICE within FieldPlacement::count when count too big if original_64_bit_count > usize::max_value() as u64 { return Err(LayoutError::SizeOverflow(layout.ty)); } if layout.fields.count() > 0 { return self.find_niche(layout.field(self, 0)?); } else { return Ok(None); } } let mut niche = None; let mut available = 0; for i in 0..layout.fields.count() { if let Some(mut c) = self.find_niche(layout.field(self, i)?)? { if c.available > available { available = c.available; c.offset += layout.fields.offset(i); niche = Some(c); } } } Ok(niche) } } impl<'a> HashStable<StableHashingContext<'a>> for Variants { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { use crate::ty::layout::Variants::*; mem::discriminant(self).hash_stable(hcx, hasher); match *self { Single { index } => { index.hash_stable(hcx, hasher); } Multiple { ref discr, ref discr_kind, discr_index, ref variants, } => { discr.hash_stable(hcx, hasher); discr_kind.hash_stable(hcx, hasher); discr_index.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); } } } } impl<'a> HashStable<StableHashingContext<'a>> for DiscriminantKind { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { use crate::ty::layout::DiscriminantKind::*; mem::discriminant(self).hash_stable(hcx, hasher); match *self { Tag => {} Niche { dataful_variant, ref niche_variants, niche_start, } => { dataful_variant.hash_stable(hcx, hasher); niche_variants.start().hash_stable(hcx, hasher); niche_variants.end().hash_stable(hcx, hasher); niche_start.hash_stable(hcx, hasher); } } } } impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { use crate::ty::layout::FieldPlacement::*; mem::discriminant(self).hash_stable(hcx, hasher); match *self { Union(count) => { count.hash_stable(hcx, hasher); } Array { count, stride } => { count.hash_stable(hcx, hasher); stride.hash_stable(hcx, hasher); } Arbitrary { ref offsets, ref memory_index } => { offsets.hash_stable(hcx, hasher); memory_index.hash_stable(hcx, hasher); } } } } impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx { fn hash_stable<W: StableHasherResult>( &self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>, ) { self.as_u32().hash_stable(hcx, hasher) } } impl<'a> HashStable<StableHashingContext<'a>> for Abi { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { use crate::ty::layout::Abi::*; mem::discriminant(self).hash_stable(hcx, hasher); match *self { Uninhabited => {} Scalar(ref value) => { value.hash_stable(hcx, hasher); } ScalarPair(ref a, ref b) => { a.hash_stable(hcx, hasher); b.hash_stable(hcx, hasher); } Vector { ref element, count } => { element.hash_stable(hcx, hasher); count.hash_stable(hcx, hasher); } Aggregate { sized } => { sized.hash_stable(hcx, hasher); } } } } impl<'a> HashStable<StableHashingContext<'a>> for Scalar { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { let Scalar { value, ref valid_range } = *self; value.hash_stable(hcx, hasher); valid_range.start().hash_stable(hcx, hasher); valid_range.end().hash_stable(hcx, hasher); } } impl_stable_hash_for!(struct crate::ty::layout::LayoutDetails { variants, fields, abi, size, align }); impl_stable_hash_for!(enum crate::ty::layout::Integer { I8, I16, I32, I64, I128 }); impl_stable_hash_for!(enum crate::ty::layout::Primitive { Int(integer, signed), Float(fty), Pointer }); impl_stable_hash_for!(struct crate::ty::layout::AbiAndPrefAlign { abi, pref }); impl<'tcx> HashStable<StableHashingContext<'tcx>> for Align { fn hash_stable<W: StableHasherResult>( &self, hcx: &mut StableHashingContext<'tcx>, hasher: &mut StableHasher<W>, ) { self.bytes().hash_stable(hcx, hasher); } } impl<'tcx> HashStable<StableHashingContext<'tcx>> for Size { fn hash_stable<W: StableHasherResult>( &self, hcx: &mut StableHashingContext<'tcx>, hasher: &mut StableHasher<W>, ) { self.bytes().hash_stable(hcx, hasher); } } impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher<W>) { use crate::ty::layout::LayoutError::*; mem::discriminant(self).hash_stable(hcx, hasher); match *self { Unknown(t) | SizeOverflow(t) => t.hash_stable(hcx, hasher) } } } pub trait FnTypeExt<'tcx, C> where C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>> + HasDataLayout + HasTargetSpec + HasTyCtxt<'tcx> + HasParamEnv<'tcx>, { fn of_instance(cx: &C, instance: &ty::Instance<'tcx>) -> Self; fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self; fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self; fn new_internal( cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>], mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>, ) -> Self; fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi); } impl<'tcx, C> FnTypeExt<'tcx, C> for call::FnType<'tcx, Ty<'tcx>> where C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>> + HasDataLayout + HasTargetSpec + HasTyCtxt<'tcx> + HasParamEnv<'tcx>, { fn of_instance(cx: &C, instance: &ty::Instance<'tcx>) -> Self { let sig = instance.fn_sig(cx.tcx()); let sig = cx .tcx() .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); call::FnType::new(cx, sig, &[]) } fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self { call::FnType::new_internal(cx, sig, extra_args, |ty, _| ArgType::new(cx.layout_of(ty))) } fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self { FnTypeExt::new_internal(cx, sig, extra_args, |ty, arg_idx| { let mut layout = cx.layout_of(ty); // Don't pass the vtable, it's not an argument of the virtual fn. // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait` // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen if arg_idx == Some(0) { let fat_pointer_ty = if layout.is_unsized() { // unsized `self` is passed as a pointer to `self` // FIXME (mikeyhew) change this to use &own if it is ever added to the language cx.tcx().mk_mut_ptr(layout.ty) } else { match layout.abi { Abi::ScalarPair(..) => (), _ => bug!("receiver type has unsupported layout: {:?}", layout), } // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self> // with a Scalar (not ScalarPair) ABI. This is a hack that is understood // elsewhere in the compiler as a method on a `dyn Trait`. // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we // get a built-in pointer type let mut fat_pointer_layout = layout; 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr() && !fat_pointer_layout.ty.is_region_ptr() { 'iter_fields: for i in 0..fat_pointer_layout.fields.count() { let field_layout = fat_pointer_layout.field(cx, i); if !field_layout.is_zst() { fat_pointer_layout = field_layout; continue 'descend_newtypes; } } bug!( "receiver has no non-zero-sized fields {:?}", fat_pointer_layout ); } fat_pointer_layout.ty }; // we now have a type like `*mut RcBox<dyn Trait>` // change its layout to that of `*mut ()`, a thin pointer, but keep the same type // this is understood as a special case elsewhere in the compiler let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit()); layout = cx.layout_of(unit_pointer_ty); layout.ty = fat_pointer_ty; } ArgType::new(layout) }) } fn new_internal( cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>], mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>, ) -> Self { debug!("FnType::new_internal({:?}, {:?})", sig, extra_args); use rustc_target::spec::abi::Abi::*; let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) { RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::C, // It's the ABI's job to select this, not ours. System => bug!("system abi should be selected elsewhere"), Stdcall => Conv::X86Stdcall, Fastcall => Conv::X86Fastcall, Vectorcall => Conv::X86VectorCall, Thiscall => Conv::X86ThisCall, C => Conv::C, Unadjusted => Conv::C, Win64 => Conv::X86_64Win64, SysV64 => Conv::X86_64SysV, Aapcs => Conv::ArmAapcs, PtxKernel => Conv::PtxKernel, Msp430Interrupt => Conv::Msp430Intr, X86Interrupt => Conv::X86Intr, AmdGpuKernel => Conv::AmdGpuKernel, // These API constants ought to be more specific... Cdecl => Conv::C, }; let mut inputs = sig.inputs(); let extra_args = if sig.abi == RustCall { assert!(!sig.c_variadic && extra_args.is_empty()); match sig.inputs().last().unwrap().sty { ty::Tuple(tupled_arguments) => { inputs = &sig.inputs()[0..sig.inputs().len() - 1]; tupled_arguments.iter().map(|k| k.expect_ty()).collect() } _ => { bug!( "argument to function with \"rust-call\" ABI \ is not a tuple" ); } } } else { assert!(sig.c_variadic || extra_args.is_empty()); extra_args.to_vec() }; let target = &cx.tcx().sess.target.target; let win_x64_gnu = target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu"; let linux_s390x = target.target_os == "linux" && target.arch == "s390x" && target.target_env == "gnu"; let linux_sparc64 = target.target_os == "linux" && target.arch == "sparc64" && target.target_env == "gnu"; let rust_abi = match sig.abi { RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true, _ => false, }; // Handle safe Rust thin and fat pointers. let adjust_for_rust_scalar = |attrs: &mut ArgAttributes, scalar: &Scalar, layout: TyLayout<'tcx>, offset: Size, is_return: bool| { // Booleans are always an i1 that needs to be zero-extended. if scalar.is_bool() { attrs.set(ArgAttribute::ZExt); return; } // Only pointer types handled below. if scalar.value != Pointer { return; } if scalar.valid_range.start() < scalar.valid_range.end() { if *scalar.valid_range.start() > 0 { attrs.set(ArgAttribute::NonNull); } } if let Some(pointee) = layout.pointee_info_at(cx, offset) { if let Some(kind) = pointee.safe { attrs.pointee_size = pointee.size; attrs.pointee_align = Some(pointee.align); // `Box` pointer parameters never alias because ownership is transferred // `&mut` pointer parameters never alias other parameters, // or mutable global data // // `&T` where `T` contains no `UnsafeCell<U>` is immutable, // and can be marked as both `readonly` and `noalias`, as // LLVM's definition of `noalias` is based solely on memory // dependencies rather than pointer equality let no_alias = match kind { PointerKind::Shared => false, PointerKind::UniqueOwned => true, PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return, }; if no_alias { attrs.set(ArgAttribute::NoAlias); } if kind == PointerKind::Frozen && !is_return { attrs.set(ArgAttribute::ReadOnly); } } } }; // Store the index of the last argument. This is useful for working with // C-compatible variadic arguments. let last_arg_idx = if sig.inputs().is_empty() { None } else { Some(sig.inputs().len() - 1) }; let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| { let is_return = arg_idx.is_none(); let mut arg = mk_arg_type(ty, arg_idx); if arg.layout.is_zst() { // For some forsaken reason, x86_64-pc-windows-gnu // doesn't ignore zero-sized struct arguments. // The same is true for s390x-unknown-linux-gnu // and sparc64-unknown-linux-gnu. if is_return || rust_abi || (!win_x64_gnu && !linux_s390x && !linux_sparc64) { arg.mode = PassMode::Ignore(IgnoreMode::Zst); } } // If this is a C-variadic function, this is not the return value, // and there is one or more fixed arguments; ensure that the `VaListImpl` // is ignored as an argument. if sig.c_variadic { match (last_arg_idx, arg_idx) { (Some(last_idx), Some(cur_idx)) if last_idx == cur_idx => { let va_list_did = match cx.tcx().lang_items().va_list() { Some(did) => did, None => bug!("`va_list` lang item required for C-variadic functions"), }; match ty.sty { ty::Adt(def, _) if def.did == va_list_did => { // This is the "spoofed" `VaListImpl`. Set the arguments mode // so that it will be ignored. arg.mode = PassMode::Ignore(IgnoreMode::CVarArgs); } _ => (), } } _ => {} } } // FIXME(eddyb) other ABIs don't have logic for scalar pairs. if !is_return && rust_abi { if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi { let mut a_attrs = ArgAttributes::new(); let mut b_attrs = ArgAttributes::new(); adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false); adjust_for_rust_scalar( &mut b_attrs, b, arg.layout, a.value.size(cx).align_to(b.value.align(cx).abi), false, ); arg.mode = PassMode::Pair(a_attrs, b_attrs); return arg; } } if let Abi::Scalar(ref scalar) = arg.layout.abi { if let PassMode::Direct(ref mut attrs) = arg.mode { adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return); } } arg }; let mut fn_ty = FnType { ret: arg_of(sig.output(), None), args: inputs .iter() .cloned() .chain(extra_args) .enumerate() .map(|(i, ty)| arg_of(ty, Some(i))) .collect(), c_variadic: sig.c_variadic, conv, }; fn_ty.adjust_for_abi(cx, sig.abi); fn_ty } fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) { if abi == SpecAbi::Unadjusted { return; } if abi == SpecAbi::Rust || abi == SpecAbi::RustCall || abi == SpecAbi::RustIntrinsic || abi == SpecAbi::PlatformIntrinsic { let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| { if arg.is_ignore() { return; } match arg.layout.abi { Abi::Aggregate { .. } => {} // This is a fun case! The gist of what this is doing is // that we want callers and callees to always agree on the // ABI of how they pass SIMD arguments. If we were to *not* // make these arguments indirect then they'd be immediates // in LLVM, which means that they'd used whatever the // appropriate ABI is for the callee and the caller. That // means, for example, if the caller doesn't have AVX // enabled but the callee does, then passing an AVX argument // across this boundary would cause corrupt data to show up. // // This problem is fixed by unconditionally passing SIMD // arguments through memory between callers and callees // which should get them all to agree on ABI regardless of // target feature sets. Some more information about this // issue can be found in #44367. // // Note that the platform intrinsic ABI is exempt here as // that's how we connect up to LLVM and it's unstable // anyway, we control all calls to it in libstd. Abi::Vector { .. } if abi != SpecAbi::PlatformIntrinsic && cx.tcx().sess.target.target.options.simd_types_indirect => { arg.make_indirect(); return; } _ => return, } let size = arg.layout.size; if arg.layout.is_unsized() || size > Pointer.size(cx) { arg.make_indirect(); } else { // We want to pass small aggregates as immediates, but using // a LLVM aggregate type for this leads to bad optimizations, // so we pick an appropriately sized integer type instead. arg.cast_to(Reg { kind: RegKind::Integer, size, }); } }; fixup(&mut self.ret); for arg in &mut self.args { fixup(arg); } if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode { attrs.set(ArgAttribute::StructRet); } return; } if let Err(msg) = self.adjust_for_cabi(cx, abi) { cx.tcx().sess.fatal(&msg); } } }
41.089434
100
0.464078
892a203efa9469ec58aee2a6f408a8b344f4fca6
2,343
#![feature(async_await, await_macro)] use futures::executor::block_on; use riker::actors::*; #[test] fn system_create() { assert!(ActorSystem::new().is_ok()); assert!(ActorSystem::with_name("valid-name").is_ok()); assert!(ActorSystem::with_name("/").is_err()); assert!(ActorSystem::with_name("*").is_err()); assert!(ActorSystem::with_name("/a/b/c").is_err()); assert!(ActorSystem::with_name("@").is_err()); assert!(ActorSystem::with_name("#").is_err()); assert!(ActorSystem::with_name("abc*").is_err()); } struct ShutdownTest { level: u32, } impl ShutdownTest { fn actor(level: u32) -> Self { ShutdownTest { level: level } } } impl Actor for ShutdownTest { type Msg = (); fn pre_start(&mut self, ctx: &Context<Self::Msg>) { if self.level < 10 { let props = Props::new_args(ShutdownTest::actor, self.level + 1); ctx.actor_of(props, format!("test-actor-{}", self.level + 1).as_str()) .unwrap(); } } fn recv(&mut self, _: &Context<Self::Msg>, _: Self::Msg, _: Sender) {} } #[test] #[allow(dead_code)] fn system_shutdown() { let sys = ActorSystem::new().unwrap(); let props = Props::new_args(ShutdownTest::actor, 1); let _ = sys.actor_of(props, "test-actor-1").unwrap(); block_on(sys.shutdown()).unwrap(); } #[test] fn system_futures_exec() { let sys = ActorSystem::new().unwrap(); for i in 0..100 { let f = sys.run(async move { format!("some_val_{}", i) }).unwrap(); assert_eq!(block_on(f), format!("some_val_{}", i)); } } #[test] fn system_futures_panic() { let sys = ActorSystem::new().unwrap(); for _ in 0..100 { let _ = sys .run(async move { panic!("// TEST PANIC // TEST PANIC // TEST PANIC //"); }) .unwrap(); } for i in 0..100 { let f = sys.run(async move { format!("some_val_{}", i) }).unwrap(); assert_eq!(block_on(f), format!("some_val_{}", i)); } } #[test] fn system_load_app_config() { let sys = ActorSystem::new().unwrap(); assert_eq!(sys.config().get_int("app.some_setting").unwrap() as i64, 1); } #[test] fn system_builder() { let sys = SystemBuilder::new().name("my-sys").create().unwrap(); block_on(sys.shutdown()).unwrap(); }
24.154639
82
0.577038
8a469d4c0d299c4198d4a565b369ca19428ccdf8
8,062
//! Structures to keep the process alive until some event occurs use domain::event::DeathReason; use futures::{ channel::mpsc::{channel, Receiver, Sender}, lock::Mutex, pin_mut, prelude::*, select, }; use std::{ sync::Arc, time::{Duration, Instant}, }; use tokio::signal::{ ctrl_c, unix::{signal, SignalKind}, }; use tokio::time::sleep; use tracing::{debug, error, instrument, trace, warn}; /// Action to a hearth #[derive(Debug)] pub enum HeartInteraction { /// Kill it for the given reason Kill(String), /// Reset its lifetime to the original value Rejuvenate, } /// Lifecycle management struct that can be used to keep the application alive pub struct Heart { /// Receiver for interactions sent by heart stone rx: Receiver<HeartInteraction>, /// Point in time when the lifetime was last reset lifetime_start: Arc<Mutex<Instant>>, /// Maximum lifetime duration lifetime: Option<Duration>, } impl Heart { /// Creates a new heart and linked stone with no lifetime limit pub fn new() -> (Self, HeartStone) { Heart::internal_new(None) } /// Creates a new heart with no lifetime and discards the linked stone pub fn without_heart_stone() -> Self { Heart::internal_new(None).0 } /// Creates a new heart and linked stone with a lifetime pub fn with_lifetime(lifetime: Duration) -> (Self, HeartStone) { Heart::internal_new(Some(lifetime)) } /// Reduces the next lifetime timeout by artificially shifting the beginning of the current period. /// This allows e.g. shorter initial lifetimes. #[instrument(skip(self))] pub async fn reduce_next_lifetime(&mut self, next_lifetime: Duration) { if let Some(lifetime) = self.lifetime { debug!("Reducing next lifetime"); *self.lifetime_start.lock().await = Instant::now() - lifetime + next_lifetime; } else { warn!("Attempted to reduce non-existent lifetime"); } } /// Future that waits until the heart dies for the returned reason #[instrument(skip(self))] pub async fn death(&mut self) -> DeathReason { debug!("Awaiting death of heart"); let mut age_future = match self.lifetime { Some(lifetime) => Heart::lifetime_watch(lifetime, self.lifetime_start.clone()).boxed(), None => futures::future::pending().boxed(), } .fuse(); loop { select! { interaction = self.rx.next() => { if let Some(interaction) = interaction { trace!(?interaction, "Received interaction with heart"); match interaction { HeartInteraction::Kill(reason) => return DeathReason::Killed(reason), HeartInteraction::Rejuvenate => { *self.lifetime_start.lock().await = Instant::now(); } } } }, () = age_future => { debug!("Lifetime of heart has been exceeded"); return DeathReason::LifetimeExceeded }, () = Heart::termination_signal().fuse() => { debug!("Heart has been terminated externally"); return DeathReason::Terminated }, }; } } fn internal_new(lifetime: Option<Duration>) -> (Self, HeartStone) { let (tx, rx) = channel(2); let heart = Self { rx, lifetime_start: Arc::new(Mutex::new(Instant::now())), lifetime, }; let stone = HeartStone::new(tx); (heart, stone) } async fn termination_signal() { let mut sigterm_stream = signal(SignalKind::terminate()).unwrap(); let sigterm = sigterm_stream.recv().fuse(); let ctrl_c = ctrl_c().fuse(); pin_mut!(sigterm, ctrl_c); select! { _ = sigterm => {}, _ = ctrl_c => {}, }; } async fn lifetime_watch(lifetime: Duration, lifetime_start: Arc<Mutex<Instant>>) { loop { let elapsed_time = lifetime_start.lock().await.elapsed(); if elapsed_time > lifetime { break; } sleep(lifetime - elapsed_time).await; } } } /// Remote controller for the heart #[derive(Clone)] pub struct HeartStone { remote: Sender<HeartInteraction>, } impl HeartStone { fn new(remote: Sender<HeartInteraction>) -> Self { Self { remote } } /// Kill the associated heart #[instrument(skip(self))] pub async fn kill(&mut self, reason: String) { debug!(?reason, "Killing heart"); self.send(HeartInteraction::Kill(reason)).await; } /// Reset the lifetime of the associated heart #[instrument(skip(self))] pub async fn reset_lifetime(&mut self) { debug!("Rejuvenating heart"); self.send(HeartInteraction::Rejuvenate).await; } #[instrument(skip(self))] async fn send(&mut self, interaction: HeartInteraction) { trace!("Sending interaction to heart"); if let Err(error) = self.remote.send(interaction).await { error!(?error, "Failed to send interaction to heart"); } } } #[cfg(test)] mod does { use super::*; use futures::poll; use tokio::task::{spawn, yield_now}; use tokio::time::sleep; #[tokio::test] async fn reduce_lifetime() { let lifetime = Duration::from_millis(500); let reduced_lifetime = Duration::from_millis(100); let (mut heart, _stone) = Heart::with_lifetime(lifetime); let (mut reduced_heart, _reduced_stone) = Heart::with_lifetime(lifetime); reduced_heart.reduce_next_lifetime(reduced_lifetime).await; let handle = spawn(async move { heart.death().await }); let reduced_handle = spawn(async move { reduced_heart.death().await }); sleep(reduced_lifetime).await; yield_now().await; assert!(!poll!(handle).is_ready()); assert!(poll!(reduced_handle).is_ready()); } #[tokio::test] async fn live_without_lifetime() { let (mut heart, _stone) = Heart::new(); let handle = spawn(async move { heart.death().await }); sleep(Duration::from_millis(100)).await; yield_now().await; assert!(!poll!(handle).is_ready()); } #[tokio::test] async fn die_when_killed() { let (mut heart, mut stone) = Heart::new(); let handle = spawn(async move { heart.death().await }); stone.kill("Testing".to_owned()).await; yield_now().await; assert!(poll!(handle).is_ready()); } #[tokio::test] async fn die_after_lifetime() { let lifetime = Duration::from_millis(10); let (mut heart, _stone) = Heart::with_lifetime(lifetime); let handle = spawn(async move { heart.death().await }); sleep(lifetime).await; yield_now().await; assert!(poll!(handle).is_ready()); } // TODO Re-implement this test, tokio JoinHandles can not be .shared() thus it is inactive for now :( // #[tokio::test] // async fn lives_longer_after_rejuvenation() { // let lifetime = Duration::from_millis(10); // let (mut heart, mut stone) = Heart::with_lifetime(lifetime); // let handle = spawn(async move { heart.death().await }); // // Wait half the lifetime and reset it // sleep(lifetime / 2).await; // stone.reset_lifetime().await; // // Check status after the original lifetime elapsed // sleep(lifetime / 2).await; // yield_now().await; // assert!(!poll!(handle).is_ready()); // // Wait for the reset lifetime to expire and check status // sleep(lifetime / 2).await; // yield_now().await; // assert!(poll!(handle).is_ready()); // } }
30.888889
105
0.579757
2628ce0b7b825eed6a39e3bc43f9e8c81b986e99
13,648
use crate::errors::{EventError, EventErrorType}; use crate::peer::PeerId; use bincode::serialize; use failure::Error; use ring::digest::{digest, SHA256}; use serde::Serialize; use std::collections::HashMap; pub mod event_hash; pub mod event_signature; pub mod parents; use self::event_hash::EventHash; use self::event_signature::EventSignature; use self::parents::Parents; #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct Event<P: Parents + Clone + Serialize> { #[serde(skip)] can_see: HashMap<PeerId, EventHash>, #[serde(skip)] famous: Option<bool>, payload: Vec<Vec<u8>>, parents: Option<P>, timestamp: Option<u64>, creator: PeerId, signature: Option<EventSignature>, #[serde(skip)] round: Option<usize>, #[serde(skip)] round_received: Option<usize>, } impl<P: Parents + Clone + Serialize> Event<P> { pub fn new(payload: Vec<Vec<u8>>, parents: Option<P>, creator: PeerId) -> Event<P> { Event { can_see: HashMap::new(), creator, famous: None, payload, parents, round: None, round_received: None, signature: None, timestamp: None, } } #[inline] pub fn set_timestamp(&mut self, timestamp: u64) { self.timestamp = Some(timestamp); } #[inline] pub fn timestamp(&self) -> Result<u64, Error> { self.timestamp .clone() .ok_or(Error::from(EventError::new(EventErrorType::NoTimestamp { hash: self.hash()?, }))) } #[inline] pub fn set_round_received(&mut self, round_received: usize) { self.round_received = Some(round_received); } #[inline] pub fn is_self_parent(&self, hash: &EventHash) -> Result<bool, Error> { let mut error: Option<Error> = None; let r = self .parents .clone() .map(|p| match p.self_parent() { Ok(self_parent) => self_parent == hash.clone(), Err(e) => { error = Some(e); false } }) .unwrap_or(false); if error.is_some() { return Err(error.unwrap()); } Ok(r) } #[inline] pub fn signature(&self) -> Result<EventSignature, Error> { self.signature .clone() .ok_or(Error::from(EventError::new(EventErrorType::NoSignature { hash: self.hash()?, }))) } #[inline] pub fn payload(&self) -> Vec<Vec<u8>> { self.payload.clone() } #[inline] pub fn famous(&mut self, famous: bool) { self.famous = Some(famous) } #[inline] pub fn is_famous(&self) -> bool { self.famous.unwrap_or(false) } #[inline] pub fn is_undefined(&self) -> bool { self.famous.is_none() } #[inline] pub fn can_see(&self) -> &HashMap<PeerId, EventHash> { &self.can_see } #[inline] pub fn set_can_see(&mut self, can_see: HashMap<PeerId, EventHash>) { self.can_see = can_see; } #[inline] pub fn round(&self) -> Result<usize, Error> { self.round .ok_or(Error::from(EventError::new(EventErrorType::RoundNotSet { hash: self.hash()?, }))) } #[inline] pub fn maybe_round(&self) -> Option<usize> { self.round.clone() } #[inline] pub fn add_can_see(&mut self, peer: PeerId, hash: EventHash) { self.can_see.insert(peer, hash); } #[inline] pub fn is_root(&self) -> bool { self.parents.is_none() } #[inline] pub fn self_parent(&self) -> Result<EventHash, Error> { let mut error: Option<Error> = None; let none_error = format_err!("self_parent() returned None"); match self .parents .clone() .map(|p| match p.self_parent() { Ok(sp) => Some(sp), Err(e) => { debug!(target: "event", "{}", e); let hash: EventHash = match self.hash() { Ok(hash) => hash, Err(e) => { debug!(target: "hash", "{}", e); EventHash([0; 32]) } }; error = Some(Error::from(EventError::new(EventErrorType::NoSelfParent { hash: hash, }))); None } }) .filter(|p| p.is_some()) .unwrap() { Some(p) => Ok(p), None => Err(if error.is_some() { error.unwrap() } else { none_error }), } } #[inline] pub fn parents(&self) -> &Option<P> { &self.parents } #[inline] pub fn creator(&self) -> &PeerId { &self.creator } pub fn sign(&mut self, signature: EventSignature) { self.signature = Some(signature); } #[inline] pub fn set_round(&mut self, round: usize) { self.round = Some(round); } pub fn hash(&self) -> Result<EventHash, Error> { let value = ( self.payload.clone(), self.parents.clone(), self.timestamp.clone(), self.creator.clone(), ); let bytes = serialize(&value)?; Ok(EventHash::new(digest(&SHA256, bytes.as_ref()).as_ref())) } pub fn is_valid(&self, hash: &EventHash) -> Result<bool, Error> { self.signature .clone() .map(|s| s.verify(&self, &self.creator)) .unwrap_or(Err(Error::from(EventError::new( EventErrorType::UnsignedEvent { hash: self.hash()? }, ))))?; Ok(hash.as_ref() == self.hash()?.as_ref()) } } proptest! { #[test] fn root_event_shouldnt_have_self_parents(hash in ".*") { use crate::event::{EventHash, parents::ParentsPair}; use ring::digest::{digest, SHA256}; let event: Event<ParentsPair> = Event::new(Vec::new(), None, Vec::new()); let hash = EventHash::new(digest(&SHA256, hash.as_bytes()).as_ref()); assert!(!event.is_self_parent(&hash).unwrap()) } #[test] fn it_should_report_correctly_self_parent(self_parent_hash in ".*", p_try in ".*") { use crate::event::{EventHash, parents::ParentsPair}; use ring::digest::{digest, SHA256}; let self_parent = EventHash::new(digest(&SHA256, self_parent_hash.as_bytes()).as_ref()); let other_parent = EventHash::new(digest(&SHA256, b"fish").as_ref()); let event = Event::new(Vec::new(), Some(ParentsPair(self_parent.clone(), other_parent)), Vec::new()); let hash = EventHash::new(digest(&SHA256, p_try.as_bytes()).as_ref()); assert!(event.is_self_parent(&self_parent).unwrap()); assert_eq!(self_parent_hash == p_try, event.is_self_parent(&hash).unwrap()) } #[test] fn it_should_have_different_hashes_on_different_transactions(tx1 in "[a-z]*", tx2 in "[a-z]*") { use crate::event::parents::ParentsPair; let event1: Event<ParentsPair> = Event::new(vec![tx1.as_bytes().to_vec()], None, Vec::new()); let event2: Event<ParentsPair> = Event::new(vec![tx2.as_bytes().to_vec()], None, Vec::new()); let event3: Event<ParentsPair> = Event::new(vec![tx2.as_bytes().to_vec()], None, Vec::new()); let hash1 = event1.hash().unwrap(); let hash2 = event2.hash().unwrap(); let hash3 = event3.hash().unwrap(); assert!(hash2 == hash3); assert_eq!(tx1 == tx2, hash1 == hash2); } #[test] fn it_should_have_different_hashes_on_different_self_parents(tx1 in ".*", tx2 in ".*") { use crate::event::{EventHash, parents::ParentsPair}; use ring::digest::{digest, SHA256}; let other_parent = EventHash::new(digest(&SHA256, b"42").as_ref()); let self_parent1 = EventHash::new(digest(&SHA256, tx1.as_bytes()).as_ref()); let self_parent2 = EventHash::new(digest(&SHA256, tx2.as_bytes()).as_ref()); let self_parent3 = EventHash::new(digest(&SHA256, tx2.as_bytes()).as_ref()); let event1 = Event::new(vec![], Some(ParentsPair(self_parent1, other_parent.clone())), Vec::new()); let event2 = Event::new(vec![], Some(ParentsPair(self_parent2, other_parent.clone())), Vec::new()); let event3 = Event::new(vec![], Some(ParentsPair(self_parent3, other_parent.clone())), Vec::new()); let hash1 = event1.hash().unwrap(); let hash2 = event2.hash().unwrap(); let hash3 = event3.hash().unwrap(); assert!(hash2 == hash3); assert_eq!(tx1 == tx2, hash1 == hash2); } #[test] fn it_should_have_different_hashes_on_different_other_parents(tx1 in ".*", tx2 in ".*") { use crate::event::{EventHash, parents::ParentsPair}; use ring::digest::{digest, SHA256}; let self_parent = EventHash::new(digest(&SHA256, b"42").as_ref()); let other_parent1 = EventHash::new(digest(&SHA256, tx1.as_bytes()).as_ref()); let other_parent2 = EventHash::new(digest(&SHA256, tx2.as_bytes()).as_ref()); let other_parent3 = EventHash::new(digest(&SHA256, tx2.as_bytes()).as_ref()); let event1 = Event::new(vec![], Some(ParentsPair(self_parent.clone(), other_parent1)), Vec::new()); let event2 = Event::new(vec![], Some(ParentsPair(self_parent.clone(), other_parent2)), Vec::new()); let event3 = Event::new(vec![], Some(ParentsPair(self_parent.clone(), other_parent3)), Vec::new()); let hash1 = event1.hash().unwrap(); let hash2 = event2.hash().unwrap(); let hash3 = event3.hash().unwrap(); assert!(hash2 == hash3); assert_eq!(tx1 == tx2, hash1 == hash2); } #[test] fn it_should_have_different_hash_on_different_creators(c1 in ".*", c2 in ".*") { use crate::event::parents::ParentsPair; let event1: Event<ParentsPair> = Event::new(vec![], None, c1.as_bytes().to_vec()); let event2: Event<ParentsPair> = Event::new(vec![], None, c2.as_bytes().to_vec()); let event3: Event<ParentsPair> = Event::new(vec![], None, c2.as_bytes().to_vec()); let hash1 = event1.hash().unwrap(); let hash2 = event2.hash().unwrap(); let hash3 = event3.hash().unwrap(); assert!(hash2 == hash3); assert_eq!(c1 == c2, hash1 == hash2); } #[test] fn it_should_have_different_hash_on_different_timestamps(s1 in 0u64..10000, s2 in 0u64..10000) { use crate::event::parents::ParentsPair; let mut event1: Event<ParentsPair> = Event::new(vec![], None, Vec::new()); let mut event2: Event<ParentsPair> = Event::new(vec![], None, Vec::new()); let mut event3: Event<ParentsPair> = Event::new(vec![], None, Vec::new()); event1.set_timestamp(s1); event2.set_timestamp(s2); event3.set_timestamp(s2); let hash1 = event1.hash().unwrap(); let hash2 = event2.hash().unwrap(); let hash3 = event3.hash().unwrap(); assert!(hash2 == hash3); assert_eq!(s1 == s2, hash1 == hash2); } } #[cfg(test)] mod tests { use crate::event::{parents::ParentsPair, Event, EventHash, EventSignature}; use ring::digest::{digest, SHA256}; use ring::{rand, signature}; #[test] fn it_should_succeed_when_verifying_correct_event() { let rng = rand::SystemRandom::new(); let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap(); let kp = signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap(); let mut event: Event<ParentsPair> = Event::new(vec![], None, kp.public_key_bytes().to_vec()); let hash = event.hash().unwrap(); let sign = kp.sign(hash.as_ref()); let event_signature = EventSignature::new(sign.as_ref()); event.sign(event_signature); assert!(event.is_valid(&hash).unwrap()); } #[test] fn it_shouldnt_succeed_when_verifying_correct_event_with_wrong_hash() { let rng = rand::SystemRandom::new(); let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap(); let kp = signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap(); let mut event: Event<ParentsPair> = Event::new(vec![], None, kp.public_key_bytes().to_vec()); let hash = event.hash().unwrap(); let sign = kp.sign(hash.as_ref()); let event_signature = EventSignature::new(sign.as_ref()); let wrong_hash = EventHash::new(digest(&SHA256, b"42").as_ref()); event.sign(event_signature); assert!(!event.is_valid(&wrong_hash).unwrap()); } #[test] #[should_panic(expected = "Unspecified")] fn it_should_error_when_verifying_wrong_event() { let rng = rand::SystemRandom::new(); let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap(); let kp = signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap(); let mut event: Event<ParentsPair> = Event::new(vec![], None, vec![]); let hash = event.hash().unwrap(); let sign = kp.sign(hash.as_ref()); let event_signature = EventSignature::new(sign.as_ref()); event.sign(event_signature); assert!(!event.is_valid(&hash).unwrap()); } }
35.727749
109
0.567922
eb873bf1d371efd02cceb3a993611b339c58c007
3,038
// Taken from: https://gitlab.com/Toru3/ring-algorithm/-/blob/c4eaf606e88cb62cf87df98c99f923b253ad976a/src/lib.rs // Original code is licensed under terms of: MIT OR Apache-2.0 use num_bigint::Sign; use num_traits::Signed; use crate::arithmetic::{One, Zero}; use super::BigInt; fn leading_unit(n: &BigInt) -> BigInt { match n.num.sign() { Sign::Minus => -BigInt::one(), _ => BigInt::one(), } } fn abs(n: &BigInt) -> BigInt { BigInt { num: n.num.abs() } } /// Extended euclidian algorithm with normalize pub fn normalized_extended_euclidian_algorithm(x: &BigInt, y: &BigInt) -> (BigInt, BigInt, BigInt) { let lc_x = leading_unit(&x); let lc_y = leading_unit(&y); let mut old = (abs(x), &BigInt::one() / &lc_x, BigInt::zero()); let mut now = (abs(y), BigInt::zero(), &BigInt::one() / &lc_y); while !now.0.is_zero() { let q = &old.0 / &now.0; let r = &old.0 % &now.0; let lc_r = leading_unit(&r); let new = ( abs(&r), &(&old.1 - &(&q * &now.1)) / &lc_r, &(&old.2 - &(&q * &now.2)) / &lc_r, ); old = now; now = new; } old } /// Calc inverse in modulo /// /// calc x ($`ax \equiv 1 \pmod{m}`$) pub fn modulo_inverse(a: &BigInt, m: &BigInt) -> Option<BigInt> { let (gcd, inv_a, _) = normalized_extended_euclidian_algorithm(a, m); if gcd.is_one() { Some(inv_a) } else { None } } #[cfg(test)] mod test { use super::*; use crate::arithmetic::Modulo; #[test] fn test_gcd() { let gcd = |a, b| normalized_extended_euclidian_algorithm(&a, &b).0; assert_eq!(gcd(BigInt::from(0), BigInt::from(0)), BigInt::from(0)); assert_eq!(gcd(BigInt::from(42), BigInt::from(0)), BigInt::from(42)); assert_eq!(gcd(BigInt::from(0), BigInt::from(42)), BigInt::from(42)); assert_eq!(gcd(BigInt::from(64), BigInt::from(58)), BigInt::from(2)); assert_eq!(gcd(BigInt::from(97), BigInt::from(89)), BigInt::from(1)); } #[test] fn test_mod_inv() { // not exists inverse assert_eq!(check_mod_inv(&BigInt::from(0), &BigInt::from(0)), false); assert_eq!(check_mod_inv(&BigInt::from(42), &BigInt::from(0)), false); assert_eq!(check_mod_inv(&BigInt::from(0), &BigInt::from(42)), false); assert_eq!(check_mod_inv(&BigInt::from(64), &BigInt::from(58)), false); // exists inverse assert_eq!(check_mod_inv(&BigInt::from(97), &BigInt::from(89)), true); assert_eq!(check_mod_inv(&BigInt::from(7), &BigInt::from(15)), true); assert_eq!(check_mod_inv(&BigInt::from(42), &BigInt::from(55)), true); assert_eq!(check_mod_inv(&BigInt::from(15), &BigInt::from(64)), true); } fn check_mod_inv(a: &BigInt, b: &BigInt) -> bool { match modulo_inverse(a, b) { Some(c) => { assert_eq!(BigInt::mod_mul(a, &c, b), BigInt::one()); true } None => false, } } }
32.319149
113
0.563858
ab391efb62e0230e06922589888f6342cab019af
22,823
#[doc = "Register `FRCE_OFF` reader"] pub struct R(crate::R<FRCE_OFF_SPEC>); impl core::ops::Deref for R { type Target = crate::R<FRCE_OFF_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<FRCE_OFF_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<FRCE_OFF_SPEC>) -> Self { R(reader) } } #[doc = "Register `FRCE_OFF` writer"] pub struct W(crate::W<FRCE_OFF_SPEC>); impl core::ops::Deref for W { type Target = crate::W<FRCE_OFF_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<FRCE_OFF_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<FRCE_OFF_SPEC>) -> Self { W(writer) } } #[doc = "Field `proc1` reader - "] pub struct PROC1_R(crate::FieldReader<bool, bool>); impl PROC1_R { pub(crate) fn new(bits: bool) -> Self { PROC1_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PROC1_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `proc1` writer - "] pub struct PROC1_W<'a> { w: &'a mut W, } impl<'a> PROC1_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 16)) | ((value as u32 & 0x01) << 16); self.w } } #[doc = "Field `proc0` reader - "] pub struct PROC0_R(crate::FieldReader<bool, bool>); impl PROC0_R { pub(crate) fn new(bits: bool) -> Self { PROC0_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PROC0_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `proc0` writer - "] pub struct PROC0_W<'a> { w: &'a mut W, } impl<'a> PROC0_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 15)) | ((value as u32 & 0x01) << 15); self.w } } #[doc = "Field `sio` reader - "] pub struct SIO_R(crate::FieldReader<bool, bool>); impl SIO_R { pub(crate) fn new(bits: bool) -> Self { SIO_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SIO_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sio` writer - "] pub struct SIO_W<'a> { w: &'a mut W, } impl<'a> SIO_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 14)) | ((value as u32 & 0x01) << 14); self.w } } #[doc = "Field `vreg_and_chip_reset` reader - "] pub struct VREG_AND_CHIP_RESET_R(crate::FieldReader<bool, bool>); impl VREG_AND_CHIP_RESET_R { pub(crate) fn new(bits: bool) -> Self { VREG_AND_CHIP_RESET_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for VREG_AND_CHIP_RESET_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `vreg_and_chip_reset` writer - "] pub struct VREG_AND_CHIP_RESET_W<'a> { w: &'a mut W, } impl<'a> VREG_AND_CHIP_RESET_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 13)) | ((value as u32 & 0x01) << 13); self.w } } #[doc = "Field `xip` reader - "] pub struct XIP_R(crate::FieldReader<bool, bool>); impl XIP_R { pub(crate) fn new(bits: bool) -> Self { XIP_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for XIP_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `xip` writer - "] pub struct XIP_W<'a> { w: &'a mut W, } impl<'a> XIP_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | ((value as u32 & 0x01) << 12); self.w } } #[doc = "Field `sram5` reader - "] pub struct SRAM5_R(crate::FieldReader<bool, bool>); impl SRAM5_R { pub(crate) fn new(bits: bool) -> Self { SRAM5_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SRAM5_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sram5` writer - "] pub struct SRAM5_W<'a> { w: &'a mut W, } impl<'a> SRAM5_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 11)) | ((value as u32 & 0x01) << 11); self.w } } #[doc = "Field `sram4` reader - "] pub struct SRAM4_R(crate::FieldReader<bool, bool>); impl SRAM4_R { pub(crate) fn new(bits: bool) -> Self { SRAM4_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SRAM4_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sram4` writer - "] pub struct SRAM4_W<'a> { w: &'a mut W, } impl<'a> SRAM4_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 10)) | ((value as u32 & 0x01) << 10); self.w } } #[doc = "Field `sram3` reader - "] pub struct SRAM3_R(crate::FieldReader<bool, bool>); impl SRAM3_R { pub(crate) fn new(bits: bool) -> Self { SRAM3_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SRAM3_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sram3` writer - "] pub struct SRAM3_W<'a> { w: &'a mut W, } impl<'a> SRAM3_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 9)) | ((value as u32 & 0x01) << 9); self.w } } #[doc = "Field `sram2` reader - "] pub struct SRAM2_R(crate::FieldReader<bool, bool>); impl SRAM2_R { pub(crate) fn new(bits: bool) -> Self { SRAM2_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SRAM2_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sram2` writer - "] pub struct SRAM2_W<'a> { w: &'a mut W, } impl<'a> SRAM2_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | ((value as u32 & 0x01) << 8); self.w } } #[doc = "Field `sram1` reader - "] pub struct SRAM1_R(crate::FieldReader<bool, bool>); impl SRAM1_R { pub(crate) fn new(bits: bool) -> Self { SRAM1_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SRAM1_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sram1` writer - "] pub struct SRAM1_W<'a> { w: &'a mut W, } impl<'a> SRAM1_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u32 & 0x01) << 7); self.w } } #[doc = "Field `sram0` reader - "] pub struct SRAM0_R(crate::FieldReader<bool, bool>); impl SRAM0_R { pub(crate) fn new(bits: bool) -> Self { SRAM0_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SRAM0_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sram0` writer - "] pub struct SRAM0_W<'a> { w: &'a mut W, } impl<'a> SRAM0_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6); self.w } } #[doc = "Field `rom` reader - "] pub struct ROM_R(crate::FieldReader<bool, bool>); impl ROM_R { pub(crate) fn new(bits: bool) -> Self { ROM_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for ROM_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `rom` writer - "] pub struct ROM_W<'a> { w: &'a mut W, } impl<'a> ROM_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u32 & 0x01) << 5); self.w } } #[doc = "Field `busfabric` reader - "] pub struct BUSFABRIC_R(crate::FieldReader<bool, bool>); impl BUSFABRIC_R { pub(crate) fn new(bits: bool) -> Self { BUSFABRIC_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for BUSFABRIC_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `busfabric` writer - "] pub struct BUSFABRIC_W<'a> { w: &'a mut W, } impl<'a> BUSFABRIC_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4); self.w } } #[doc = "Field `resets` reader - "] pub struct RESETS_R(crate::FieldReader<bool, bool>); impl RESETS_R { pub(crate) fn new(bits: bool) -> Self { RESETS_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for RESETS_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `resets` writer - "] pub struct RESETS_W<'a> { w: &'a mut W, } impl<'a> RESETS_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3); self.w } } #[doc = "Field `clocks` reader - "] pub struct CLOCKS_R(crate::FieldReader<bool, bool>); impl CLOCKS_R { pub(crate) fn new(bits: bool) -> Self { CLOCKS_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for CLOCKS_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `clocks` writer - "] pub struct CLOCKS_W<'a> { w: &'a mut W, } impl<'a> CLOCKS_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2); self.w } } #[doc = "Field `xosc` reader - "] pub struct XOSC_R(crate::FieldReader<bool, bool>); impl XOSC_R { pub(crate) fn new(bits: bool) -> Self { XOSC_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for XOSC_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `xosc` writer - "] pub struct XOSC_W<'a> { w: &'a mut W, } impl<'a> XOSC_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1); self.w } } #[doc = "Field `rosc` reader - "] pub struct ROSC_R(crate::FieldReader<bool, bool>); impl ROSC_R { pub(crate) fn new(bits: bool) -> Self { ROSC_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for ROSC_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `rosc` writer - "] pub struct ROSC_W<'a> { w: &'a mut W, } impl<'a> ROSC_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } impl R { #[doc = "Bit 16"] #[inline(always)] pub fn proc1(&self) -> PROC1_R { PROC1_R::new(((self.bits >> 16) & 0x01) != 0) } #[doc = "Bit 15"] #[inline(always)] pub fn proc0(&self) -> PROC0_R { PROC0_R::new(((self.bits >> 15) & 0x01) != 0) } #[doc = "Bit 14"] #[inline(always)] pub fn sio(&self) -> SIO_R { SIO_R::new(((self.bits >> 14) & 0x01) != 0) } #[doc = "Bit 13"] #[inline(always)] pub fn vreg_and_chip_reset(&self) -> VREG_AND_CHIP_RESET_R { VREG_AND_CHIP_RESET_R::new(((self.bits >> 13) & 0x01) != 0) } #[doc = "Bit 12"] #[inline(always)] pub fn xip(&self) -> XIP_R { XIP_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bit 11"] #[inline(always)] pub fn sram5(&self) -> SRAM5_R { SRAM5_R::new(((self.bits >> 11) & 0x01) != 0) } #[doc = "Bit 10"] #[inline(always)] pub fn sram4(&self) -> SRAM4_R { SRAM4_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bit 9"] #[inline(always)] pub fn sram3(&self) -> SRAM3_R { SRAM3_R::new(((self.bits >> 9) & 0x01) != 0) } #[doc = "Bit 8"] #[inline(always)] pub fn sram2(&self) -> SRAM2_R { SRAM2_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 7"] #[inline(always)] pub fn sram1(&self) -> SRAM1_R { SRAM1_R::new(((self.bits >> 7) & 0x01) != 0) } #[doc = "Bit 6"] #[inline(always)] pub fn sram0(&self) -> SRAM0_R { SRAM0_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 5"] #[inline(always)] pub fn rom(&self) -> ROM_R { ROM_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 4"] #[inline(always)] pub fn busfabric(&self) -> BUSFABRIC_R { BUSFABRIC_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 3"] #[inline(always)] pub fn resets(&self) -> RESETS_R { RESETS_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 2"] #[inline(always)] pub fn clocks(&self) -> CLOCKS_R { CLOCKS_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 1"] #[inline(always)] pub fn xosc(&self) -> XOSC_R { XOSC_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 0"] #[inline(always)] pub fn rosc(&self) -> ROSC_R { ROSC_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bit 16"] #[inline(always)] pub fn proc1(&mut self) -> PROC1_W { PROC1_W { w: self } } #[doc = "Bit 15"] #[inline(always)] pub fn proc0(&mut self) -> PROC0_W { PROC0_W { w: self } } #[doc = "Bit 14"] #[inline(always)] pub fn sio(&mut self) -> SIO_W { SIO_W { w: self } } #[doc = "Bit 13"] #[inline(always)] pub fn vreg_and_chip_reset(&mut self) -> VREG_AND_CHIP_RESET_W { VREG_AND_CHIP_RESET_W { w: self } } #[doc = "Bit 12"] #[inline(always)] pub fn xip(&mut self) -> XIP_W { XIP_W { w: self } } #[doc = "Bit 11"] #[inline(always)] pub fn sram5(&mut self) -> SRAM5_W { SRAM5_W { w: self } } #[doc = "Bit 10"] #[inline(always)] pub fn sram4(&mut self) -> SRAM4_W { SRAM4_W { w: self } } #[doc = "Bit 9"] #[inline(always)] pub fn sram3(&mut self) -> SRAM3_W { SRAM3_W { w: self } } #[doc = "Bit 8"] #[inline(always)] pub fn sram2(&mut self) -> SRAM2_W { SRAM2_W { w: self } } #[doc = "Bit 7"] #[inline(always)] pub fn sram1(&mut self) -> SRAM1_W { SRAM1_W { w: self } } #[doc = "Bit 6"] #[inline(always)] pub fn sram0(&mut self) -> SRAM0_W { SRAM0_W { w: self } } #[doc = "Bit 5"] #[inline(always)] pub fn rom(&mut self) -> ROM_W { ROM_W { w: self } } #[doc = "Bit 4"] #[inline(always)] pub fn busfabric(&mut self) -> BUSFABRIC_W { BUSFABRIC_W { w: self } } #[doc = "Bit 3"] #[inline(always)] pub fn resets(&mut self) -> RESETS_W { RESETS_W { w: self } } #[doc = "Bit 2"] #[inline(always)] pub fn clocks(&mut self) -> CLOCKS_W { CLOCKS_W { w: self } } #[doc = "Bit 1"] #[inline(always)] pub fn xosc(&mut self) -> XOSC_W { XOSC_W { w: self } } #[doc = "Bit 0"] #[inline(always)] pub fn rosc(&mut self) -> ROSC_W { ROSC_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Force into reset (i.e. power it off) This register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api). For information about available fields see [frce_off](index.html) module"] pub struct FRCE_OFF_SPEC; impl crate::RegisterSpec for FRCE_OFF_SPEC { type Ux = u32; } #[doc = "`read()` method returns [frce_off::R](R) reader structure"] impl crate::Readable for FRCE_OFF_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [frce_off::W](W) writer structure"] impl crate::Writable for FRCE_OFF_SPEC { type Writer = W; } #[doc = "`reset()` method sets FRCE_OFF to value 0"] impl crate::Resettable for FRCE_OFF_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
26.756155
300
0.538229
4862925a6721607b9b1c3c488bf82091a05d7afd
758
use libloragw_sx1302::hal; pub mod rak; pub mod semtech; #[derive(Default, Clone)] pub struct Configuration { pub radio_count: usize, pub clock_source: u8, pub full_duplex: bool, pub lora_multi_sf_bandwidth: u32, pub radio_config: Vec<RadioConfig>, pub gps_tty_path: Option<String>, pub spidev_path: String, pub reset_pin: Option<u32>, pub power_en_pin: Option<u32>, } #[derive(Clone)] pub struct RadioConfig { pub enable: bool, pub radio_type: hal::RadioType, pub single_input_mode: bool, pub rssi_offset: f32, pub rssi_temp_compensation: hal::RssiTempCompensationConfig, pub tx_enable: bool, pub tx_freq_min: u32, pub tx_freq_max: u32, pub tx_gain_table: Vec<hal::TxGainConfig>, }
24.451613
64
0.707124
f9b09326e637ed2ab03cec05f10f90fe109ba87d
84
pub mod dom; pub(super) mod sections; pub(super) mod state; pub(super) mod actions;
16.8
24
0.738095
11f923e88acbcf9244f8cbaf66f33bb8f6988ea7
6,380
// industrial-io/src/lib.rs // // Copyright (c) 2018-2020, Frank Pagliughi // // Licensed under the MIT license: // <LICENSE or http://opensource.org/licenses/MIT> // This file may not be copied, modified, or distributed except according // to those terms. // //! //! The Rust Industrial I/O crate for Linux. //! //! This is a Rust wrapper for _libiio_, a library for high-performance //! analog I/O from Linux user-space. It interacts with Linux Industrial I/O //! (IIO) devices such as A/D's, D/A's, accelerometers, pressure and //! temperature sensors, magnetometers, and so on. //! //! For more information, see: //! //! [IIO Wiki](https://wiki.analog.com/software/linux/docs/iio/iio) //! //! [libiio Wiki](https://wiki.analog.com/resources/tools-software/linux-software/libiio) //! // Lints // This may be overkill. #![deny( missing_docs, missing_debug_implementations, missing_copy_implementations, unstable_features, unused_import_braces, unused_qualifications )] use std::{ any::{Any, TypeId}, collections::HashMap, ffi::{CStr, CString}, fmt, os::raw::{c_char, c_int, c_uint, c_void}, slice, str, str::FromStr, }; use libiio_sys::{self as ffi}; use nix::errno; pub use crate::buffer::*; pub use crate::channel::*; pub use crate::context::*; pub use crate::device::*; pub use crate::errors::*; mod macros; pub mod buffer; pub mod channel; pub mod context; pub mod device; pub mod errors; /// According to the IIO samples, internal buffers need to be big enough /// for attributes coming back from the kernel. const ATTR_BUF_SIZE: usize = 16384; // -------------------------------------------------------------------------- /// Gets an optional string value from a C const char pointer. /// If the pointer is NULL, this returns `None` otherwise it converts the /// string and returns it. fn cstring_opt(pstr: *const c_char) -> Option<String> { if pstr.is_null() { None } else { let name = unsafe { CStr::from_ptr(pstr) }; Some(name.to_str().unwrap_or_default().to_string()) } } pub(crate) fn sys_result<T>(ret: i32, result: T) -> Result<T> { if ret < 0 { Err(errno::from_i32(-ret).into()) } else { Ok(result) } } /// Converts the attribute name and value to CString's that can be sent to /// the C library. /// /// `attr` The name of the attribute /// `val` The value to write. This should typically be an int, float, bool, /// or string type. pub(crate) fn attr_to_string<T>(val: T) -> Result<String> where T: fmt::Display + Any, { let mut sval = format!("{}", val); if TypeId::of::<T>() == TypeId::of::<bool>() { sval = (if sval == "true" { "1" } else { "0" }).into(); } Ok(sval) } /// Converts a String to an atribute value. /// The type is typically an int, float, bool, or string. /// /// `attr` The name of the attribute pub(crate) fn string_to_attr<T>(mut sval: String) -> Result<T> where T: FromStr + Any, { if TypeId::of::<T>() == TypeId::of::<bool>() { sval = (if sval.trim() == "0" { "false" } else { "true" }).into(); } let val = T::from_str(&sval).map_err(|_| Error::StringConversionError)?; Ok(val) } // Callback from the C lib to extract the collection of all // device-specific attributes. See attr_read_all(). pub(crate) unsafe extern "C" fn attr_read_all_cb( _chan: *mut ffi::iio_device, attr: *const c_char, val: *const c_char, _len: usize, pmap: *mut c_void, ) -> c_int { if attr.is_null() || val.is_null() || pmap.is_null() { return -1; } let attr = CStr::from_ptr(attr).to_string_lossy().to_string(); // TODO: We could/should check val[len-1] == '\x0' let val = CStr::from_ptr(val).to_string_lossy().to_string(); let map: &mut HashMap<String, String> = &mut *(pmap as *mut _); map.insert(attr, val); 0 } // -------------------------------------------------------------------------- /// A struct to hold version numbers #[derive(Debug, PartialEq)] pub struct Version { /// The Major version number pub major: u32, /// The Minor version number pub minor: u32, /// The git tag for the release pub git_tag: String, } impl fmt::Display for Version { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}.{} tag: {}", self.major, self.minor, self.git_tag) } } // -------------------------------------------------------------------------- /// Gets the library version as (Major, Minor, Git Tag) pub fn library_version() -> Version { let mut major: c_uint = 0; let mut minor: c_uint = 0; const BUF_SZ: usize = 8; let mut buf = vec![' ' as c_char; BUF_SZ]; let pbuf = buf.as_mut_ptr(); unsafe { ffi::iio_library_get_version(&mut major, &mut minor, pbuf) }; let sgit = unsafe { if buf.contains(&0) { CStr::from_ptr(pbuf).to_owned() } else { let slc = str::from_utf8(slice::from_raw_parts(pbuf as *mut u8, BUF_SZ)).unwrap(); CString::new(slc).unwrap() } }; Version { major: major as u32, minor: minor as u32, git_tag: sgit.to_string_lossy().into_owned(), } } // -------------------------------------------------------------------------- #[cfg(test)] mod tests { use super::*; // Just make sure version gives a consistent result. #[test] fn version() { let v1 = library_version(); let v2 = library_version(); assert!(v1 == v2); } #[test] fn string_to_attr_val() { let val: i32 = string_to_attr("123".to_string()).unwrap(); assert_eq!(val, 123); let val = string_to_attr::<bool>("1".to_string()).unwrap(); assert_eq!(val, true); let val: bool = string_to_attr(" 0 \n".to_string()).unwrap(); assert_eq!(val, false); let val: String = string_to_attr("hello".to_string()).unwrap(); assert_eq!(&val, "hello"); } #[test] fn attr_val_to_string() { let s = attr_to_string(123).unwrap(); assert_eq!(&s, "123"); let s = attr_to_string(true).unwrap(); assert_eq!(&s, "1"); let s = attr_to_string(false).unwrap(); assert_eq!(&s, "0"); let s = attr_to_string("hello").unwrap(); assert_eq!(&s, "hello"); } }
27.033898
94
0.57837
9c84128f58a5b58284f1ce7ed43c6565a5e8e973
98
pub mod abc; pub mod simple; pub mod generic; pub use self::generic::*; pub use self::simple::*;
14
25
0.683673
039dc8daed1836bc81417312d1cbc302078fabc2
28,528
//! Public Raft interface and data types. use std::collections::BTreeSet; use std::fmt::Debug; use std::sync::Arc; use std::time::Duration; use serde::Deserialize; use serde::Serialize; use tokio::sync::mpsc; use tokio::sync::oneshot; use tokio::sync::watch; use tokio::sync::Mutex; use tokio::task::JoinHandle; use tracing::Span; use crate::config::Config; use crate::core::RaftCore; use crate::error::AddLearnerError; use crate::error::AppendEntriesError; use crate::error::ClientReadError; use crate::error::ClientWriteError; use crate::error::Fatal; use crate::error::InitializeError; use crate::error::InstallSnapshotError; use crate::error::VoteError; use crate::metrics::RaftMetrics; use crate::metrics::Wait; use crate::AppData; use crate::AppDataResponse; use crate::LogId; use crate::Membership; use crate::MessageSummary; use crate::NodeId; use crate::RaftNetwork; use crate::RaftStorage; use crate::SnapshotMeta; struct RaftInner<D: AppData, R: AppDataResponse, N: RaftNetwork<D>, S: RaftStorage<D, R>> { tx_api: mpsc::UnboundedSender<(RaftMsg<D, R>, Span)>, rx_metrics: watch::Receiver<RaftMetrics>, raft_handle: Mutex<Option<JoinHandle<Result<(), Fatal>>>>, tx_shutdown: Mutex<Option<oneshot::Sender<()>>>, marker_n: std::marker::PhantomData<N>, marker_s: std::marker::PhantomData<S>, } /// The Raft API. /// /// This type implements the full Raft spec, and is the interface to a running Raft node. /// Applications building on top of Raft will use this to spawn a Raft task and interact with /// the spawned task. /// /// For more information on the Raft protocol, see /// [the specification here](https://raft.github.io/raft.pdf) (**pdf warning**). /// /// For details and discussion on this API, see the /// [Raft API](https://datafuselabs.github.io/openraft/raft.html) section of the guide. /// /// ### clone /// This type implements `Clone`, and should be cloned liberally. The clone itself is very cheap /// and helps to facilitate use with async workflows. /// /// ### shutting down /// If any of the interfaces returns a `RaftError::ShuttingDown`, this indicates that the Raft node /// is shutting down (potentially for data safety reasons due to a storage error), and the `shutdown` /// method should be called on this type to await the shutdown of the node. If the parent /// application needs to shutdown the Raft node for any reason, calling `shutdown` will do the trick. pub struct Raft<D: AppData, R: AppDataResponse, N: RaftNetwork<D>, S: RaftStorage<D, R>> { inner: Arc<RaftInner<D, R, N, S>>, } impl<D: AppData, R: AppDataResponse, N: RaftNetwork<D>, S: RaftStorage<D, R>> Raft<D, R, N, S> { /// Create and spawn a new Raft task. /// /// ### `id` /// The ID which the spawned Raft task will use to identify itself within the cluster. /// Applications must guarantee that the ID provided to this function is stable, and should be /// persisted in a well known location, probably alongside the Raft log and the application's /// state machine. This ensures that restarts of the node will yield the same ID every time. /// /// ### `config` /// Raft's runtime config. See the docs on the `Config` object for more details. /// /// ### `network` /// An implementation of the `RaftNetwork` trait which will be used by Raft for sending RPCs to /// peer nodes within the cluster. See the docs on the `RaftNetwork` trait for more details. /// /// ### `storage` /// An implementation of the `RaftStorage` trait which will be used by Raft for data storage. /// See the docs on the `RaftStorage` trait for more details. #[tracing::instrument(level="debug", skip(config, network, storage), fields(cluster=%config.cluster_name))] pub fn new(id: NodeId, config: Arc<Config>, network: Arc<N>, storage: Arc<S>) -> Self { let (tx_api, rx_api) = mpsc::unbounded_channel(); let (tx_metrics, rx_metrics) = watch::channel(RaftMetrics::new_initial(id)); let (tx_shutdown, rx_shutdown) = oneshot::channel(); let raft_handle = RaftCore::spawn(id, config, network, storage, rx_api, tx_metrics, rx_shutdown); let inner = RaftInner { tx_api, rx_metrics, raft_handle: Mutex::new(Some(raft_handle)), tx_shutdown: Mutex::new(Some(tx_shutdown)), marker_n: std::marker::PhantomData, marker_s: std::marker::PhantomData, }; Self { inner: Arc::new(inner) } } /// Submit an AppendEntries RPC to this Raft node. /// /// These RPCs are sent by the cluster leader to replicate log entries (§5.3), and are also /// used as heartbeats (§5.2). #[tracing::instrument(level = "trace", skip(self, rpc), fields(rpc=%rpc.summary()))] pub async fn append_entries( &self, rpc: AppendEntriesRequest<D>, ) -> Result<AppendEntriesResponse, AppendEntriesError> { let (tx, rx) = oneshot::channel(); self.call_core(RaftMsg::AppendEntries { rpc, tx }, rx).await } /// Submit a VoteRequest (RequestVote in the spec) RPC to this Raft node. /// /// These RPCs are sent by cluster peers which are in candidate state attempting to gather votes (§5.2). #[tracing::instrument(level = "debug", skip(self, rpc), fields(rpc=%rpc.summary()))] pub async fn vote(&self, rpc: VoteRequest) -> Result<VoteResponse, VoteError> { let (tx, rx) = oneshot::channel(); self.call_core(RaftMsg::RequestVote { rpc, tx }, rx).await } /// Submit an InstallSnapshot RPC to this Raft node. /// /// These RPCs are sent by the cluster leader in order to bring a new node or a slow node up-to-speed /// with the leader (§7). #[tracing::instrument(level = "debug", skip(self, rpc), fields(snapshot_id=%rpc.meta.last_log_id))] pub async fn install_snapshot( &self, rpc: InstallSnapshotRequest, ) -> Result<InstallSnapshotResponse, InstallSnapshotError> { let (tx, rx) = oneshot::channel(); self.call_core(RaftMsg::InstallSnapshot { rpc, tx }, rx).await } /// Get the ID of the current leader from this Raft node. /// /// This method is based on the Raft metrics system which does a good job at staying /// up-to-date; however, the `client_read` method must still be used to guard against stale /// reads. This method is perfect for making decisions on where to route client requests. #[tracing::instrument(level = "debug", skip(self))] pub async fn current_leader(&self) -> Option<NodeId> { self.metrics().borrow().current_leader } /// Check to ensure this node is still the cluster leader, in order to guard against stale reads (§8). /// /// The actual read operation itself is up to the application, this method just ensures that /// the read will not be stale. #[tracing::instrument(level = "debug", skip(self))] pub async fn client_read(&self) -> Result<(), ClientReadError> { let (tx, rx) = oneshot::channel(); self.call_core(RaftMsg::ClientReadRequest { tx }, rx).await } /// Submit a mutating client request to Raft to update the state of the system (§5.1). /// /// It will be appended to the log, committed to the cluster, and then applied to the /// application state machine. The result of applying the request to the state machine will /// be returned as the response from this method. /// /// Our goal for Raft is to implement linearizable semantics. If the leader crashes after committing /// a log entry but before responding to the client, the client may retry the command with a new /// leader, causing it to be executed a second time. As such, clients should assign unique serial /// numbers to every command. Then, the state machine should track the latest serial number /// processed for each client, along with the associated response. If it receives a command whose /// serial number has already been executed, it responds immediately without re-executing the /// request (§8). The `RaftStorage::apply_entry_to_state_machine` method is the perfect place /// to implement this. /// /// These are application specific requirements, and must be implemented by the application which is /// being built on top of Raft. #[tracing::instrument(level = "debug", skip(self, rpc))] pub async fn client_write(&self, rpc: ClientWriteRequest<D>) -> Result<ClientWriteResponse<R>, ClientWriteError> { let (tx, rx) = oneshot::channel(); self.call_core(RaftMsg::ClientWriteRequest { rpc, tx }, rx).await } /// Initialize a pristine Raft node with the given config. /// /// This command should be called on pristine nodes — where the log index is 0 and the node is /// in Learner state — as if either of those constraints are false, it indicates that the /// cluster is already formed and in motion. If `InitializeError::NotAllowed` is returned /// from this function, it is safe to ignore, as it simply indicates that the cluster is /// already up and running, which is ultimately the goal of this function. /// /// This command will work for single-node or multi-node cluster formation. This command /// should be called with all discovered nodes which need to be part of cluster, and as such /// it is recommended that applications be configured with an initial cluster formation delay /// which will allow time for the initial members of the cluster to be discovered (by the /// parent application) for this call. /// /// If successful, this routine will set the given config as the active config, only in memory, /// and will start an election. /// /// It is recommended that applications call this function based on an initial call to /// `RaftStorage.get_initial_state`. If the initial state indicates that the hard state's /// current term is `0` and the `last_log_index` is `0`, then this routine should be called /// in order to initialize the cluster. /// /// Once a node becomes leader and detects that its index is 0, it will commit a new config /// entry (instead of the normal blank entry created by new leaders). /// /// Every member of the cluster should perform these actions. This routine is race-condition /// free, and Raft guarantees that the first node to become the cluster leader will propagate /// only its own config. #[tracing::instrument(level = "debug", skip(self))] pub async fn initialize(&self, members: BTreeSet<NodeId>) -> Result<(), InitializeError> { let (tx, rx) = oneshot::channel(); self.call_core(RaftMsg::Initialize { members, tx }, rx).await } /// Synchronize a new Raft node, optionally, blocking until up-to-speed (§6). /// /// - Add a node as learner into the cluster. /// - Setup replication from leader to it. /// /// If blocking is true, this function blocks until the leader believes the logs on the new node is up to date, /// i.e., ready to join the cluster, as a voter, by calling `change_membership`. /// When finished, it returns the last log id on the new node, in a `RaftResponse::LogId`. /// /// If blocking is false, this function returns at once as successfully setting up the replication. /// /// If the node to add is already a voter or learner, it returns `RaftResponse::NoChange` at once. #[tracing::instrument(level = "debug", skip(self, id), fields(target=id))] pub async fn add_learner(&self, id: NodeId, blocking: bool) -> Result<AddLearnerResponse, AddLearnerError> { let (tx, rx) = oneshot::channel(); self.call_core(RaftMsg::AddLearner { id, blocking, tx }, rx).await } /// Propose a cluster configuration change. /// /// If a node in the proposed config but is not yet a voter or learner, it first calls `add_learner` to setup /// replication to the new node. /// /// Internal: /// - It proposes a **joint** config. /// - When the **joint** config is committed, it proposes a uniform config. /// /// If blocking is true, it blocks until every learner becomes up to date. /// Otherwise it returns error `ChangeMembershipError::LearnerIsLagging` if there is a lagging learner. /// /// If it lost leadership or crashed before committing the second **uniform** config log, the cluster is left in the /// **joint** config. #[tracing::instrument(level = "debug", skip(self))] pub async fn change_membership( &self, members: BTreeSet<NodeId>, blocking: bool, ) -> Result<ClientWriteResponse<R>, ClientWriteError> { tracing::info!(?members, "change_membership: add every member as learner"); for id in members.iter() { let res = self.add_learner(*id, blocking).await; let res_err = match res { Ok(_) => { continue; } Err(e) => e, }; tracing::info!(%res_err, "add learner: already exists"); match res_err { // TODO(xp): test add learner on non-leader AddLearnerError::ForwardToLeader(forward_err) => { return Err(ClientWriteError::ForwardToLeader(forward_err)) } AddLearnerError::Exists(node_id) => { tracing::info!(%node_id, "add learner: already exists"); continue; } AddLearnerError::Fatal(f) => return Err(ClientWriteError::Fatal(f)), } } tracing::info!("change_membership: start to commit joint config"); let (tx, rx) = oneshot::channel(); // res is error if membership can not be changed. // If it is not error, it will go into a joint state let res = self .call_core( RaftMsg::ChangeMembership { members: members.clone(), blocking, tx, }, rx, ) .await?; tracing::info!("res of first change_membership: {:?}", res.summary()); let (log_id, joint) = (res.log_id, res.membership.clone().unwrap()); // There is a previously in progress joint state and it becomes the membership config we want. if !joint.is_in_joint_consensus() { return Ok(res); } tracing::debug!("committed a joint config: {} {:?}", log_id, joint); tracing::debug!("the second step is to change to uniform config: {:?}", members); let (tx, rx) = oneshot::channel(); let res = self.call_core(RaftMsg::ChangeMembership { members, blocking, tx }, rx).await?; tracing::info!("res of second change_membership: {}", res.summary()); Ok(res) } /// Invoke RaftCore by sending a RaftMsg and blocks waiting for response. #[tracing::instrument(level = "debug", skip(self, mes, rx))] pub(crate) async fn call_core<T, E>(&self, mes: RaftMsg<D, R>, rx: RaftRespRx<T, E>) -> Result<T, E> where E: From<Fatal> { let span = tracing::Span::current(); let sum = mes.summary(); let send_res = self.inner.tx_api.send((mes, span)); if let Err(send_err) = send_res { let last_err = self.inner.rx_metrics.borrow().running_state.clone(); tracing::error!(%send_err, mes=%sum, last_error=?last_err, "error send tx to RaftCore"); let err = match last_err { Ok(_) => { // normal shutdown, not caused by any error. Fatal::Stopped } Err(e) => e, }; return Err(err.into()); } let recv_res = rx.await; let res = match recv_res { Ok(x) => x, Err(e) => { let last_err = self.inner.rx_metrics.borrow().running_state.clone(); tracing::error!(%e, mes=%sum, last_error=?last_err, "error recv rx from RaftCore"); let err = match last_err { Ok(_) => { // normal shutdown, not caused by any error. Fatal::Stopped } Err(e) => e, }; Err(err.into()) } }; res } /// Get a handle to the metrics channel. pub fn metrics(&self) -> watch::Receiver<RaftMetrics> { self.inner.rx_metrics.clone() } /// Get a handle to wait for the metrics to satisfy some condition. /// /// ```ignore /// # use std::time::Duration; /// # use openraft::{State, Raft}; /// /// let timeout = Duration::from_millis(200); /// /// // wait for raft log-3 to be received and applied: /// r.wait(Some(timeout)).log(Some(3), "log").await?; /// /// // wait for ever for raft node's current leader to become 3: /// r.wait(None).current_leader(2, "wait for leader").await?; /// /// // wait for raft state to become a follower /// r.wait(None).state(State::Follower, "state").await?; /// ``` pub fn wait(&self, timeout: Option<Duration>) -> Wait { let timeout = match timeout { Some(t) => t, None => Duration::from_millis(500), }; Wait { timeout, rx: self.inner.rx_metrics.clone(), } } /// Shutdown this Raft node. pub async fn shutdown(&self) -> anyhow::Result<()> { if let Some(tx) = self.inner.tx_shutdown.lock().await.take() { let _ = tx.send(()); } if let Some(handle) = self.inner.raft_handle.lock().await.take() { let _ = handle.await?; } Ok(()) } } impl<D: AppData, R: AppDataResponse, N: RaftNetwork<D>, S: RaftStorage<D, R>> Clone for Raft<D, R, N, S> { fn clone(&self) -> Self { Self { inner: self.inner.clone(), } } } pub(crate) type RaftRespTx<T, E> = oneshot::Sender<Result<T, E>>; pub(crate) type RaftRespRx<T, E> = oneshot::Receiver<Result<T, E>>; #[derive(Debug, Clone, PartialEq, Eq)] pub struct AddLearnerResponse { pub matched: Option<LogId>, } /// A message coming from the Raft API. pub(crate) enum RaftMsg<D: AppData, R: AppDataResponse> { AppendEntries { rpc: AppendEntriesRequest<D>, tx: RaftRespTx<AppendEntriesResponse, AppendEntriesError>, }, RequestVote { rpc: VoteRequest, tx: RaftRespTx<VoteResponse, VoteError>, }, InstallSnapshot { rpc: InstallSnapshotRequest, tx: RaftRespTx<InstallSnapshotResponse, InstallSnapshotError>, }, ClientWriteRequest { rpc: ClientWriteRequest<D>, tx: RaftRespTx<ClientWriteResponse<R>, ClientWriteError>, }, ClientReadRequest { tx: RaftRespTx<(), ClientReadError>, }, Initialize { members: BTreeSet<NodeId>, tx: RaftRespTx<(), InitializeError>, }, // TODO(xp): make tx a field of a struct /// Request raft core to setup a new replication to a learner. AddLearner { id: NodeId, /// If block until the newly added learner becomes line-rate. blocking: bool, /// Send the log id when the replication becomes line-rate. tx: RaftRespTx<AddLearnerResponse, AddLearnerError>, }, ChangeMembership { members: BTreeSet<NodeId>, /// with blocking==false, respond to client a ChangeMembershipError::LearnerIsLagging error at once if a /// non-member is lagging. /// /// Otherwise, wait for commit of the member change log. blocking: bool, tx: RaftRespTx<ClientWriteResponse<R>, ClientWriteError>, }, } impl<D, R> MessageSummary for RaftMsg<D, R> where D: AppData, R: AppDataResponse, { fn summary(&self) -> String { match self { RaftMsg::AppendEntries { rpc, .. } => { format!("AppendEntries: {}", rpc.summary()) } RaftMsg::RequestVote { rpc, .. } => { format!("RequestVote: {}", rpc.summary()) } RaftMsg::InstallSnapshot { rpc, .. } => { format!("InstallSnapshot: {}", rpc.summary()) } RaftMsg::ClientWriteRequest { rpc, .. } => { format!("ClientWriteRequest: {}", rpc.summary()) } RaftMsg::ClientReadRequest { .. } => "ClientReadRequest".to_string(), RaftMsg::Initialize { members, .. } => { format!("Initialize: {:?}", members) } RaftMsg::AddLearner { id, blocking, .. } => { format!("AddLearner: id: {}, blocking: {}", id, blocking) } RaftMsg::ChangeMembership { members, blocking, .. } => { format!("ChangeMembership: members: {:?}, blocking: {}", members, blocking) } } } } ////////////////////////////////////////////////////////////////////////////////////////////////// /// An RPC sent by a cluster leader to replicate log entries (§5.3), and as a heartbeat (§5.2). #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AppendEntriesRequest<D: AppData> { /// The leader's current term. pub term: u64, /// The leader's ID. Useful in redirecting clients. pub leader_id: u64, pub prev_log_id: Option<LogId>, /// The new log entries to store. /// /// This may be empty when the leader is sending heartbeats. Entries /// are batched for efficiency. #[serde(bound = "D: AppData")] pub entries: Vec<Entry<D>>, /// The leader's committed log id. pub leader_commit: Option<LogId>, } impl<D: AppData> MessageSummary for AppendEntriesRequest<D> { fn summary(&self) -> String { format!( "leader={}-{}, prev_log_id={}, leader_commit={}, entries={}", self.term, self.leader_id, self.prev_log_id.summary(), self.leader_commit.summary(), self.entries.as_slice().summary() ) } } /// The response to an `AppendEntriesRequest`. #[derive(Debug, Serialize, Deserialize)] pub struct AppendEntriesResponse { /// The responding node's current term, for leader to update itself. pub term: u64, pub success: bool, pub conflict: bool, } impl MessageSummary for AppendEntriesResponse { fn summary(&self) -> String { format!( "term:{}, success:{:?}, conflict:{:?}", self.term, self.success, self.conflict ) } } /// A Raft log entry. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Entry<D: AppData> { pub log_id: LogId, /// This entry's payload. #[serde(bound = "D: AppData")] pub payload: EntryPayload<D>, } impl<D: AppData> MessageSummary for Entry<D> { fn summary(&self) -> String { format!("{}:{}", self.log_id, self.payload.summary()) } } impl<D: AppData> MessageSummary for Option<Entry<D>> { fn summary(&self) -> String { match self { None => "None".to_string(), Some(x) => format!("Some({})", x.summary()), } } } impl<D: AppData> MessageSummary for &[Entry<D>] { fn summary(&self) -> String { let entry_refs: Vec<_> = self.iter().collect(); entry_refs.as_slice().summary() } } impl<D: AppData> MessageSummary for &[&Entry<D>] { fn summary(&self) -> String { let mut res = Vec::with_capacity(self.len()); if self.len() <= 5 { for x in self.iter() { let e = format!("{}:{}", x.log_id, x.payload.summary()); res.push(e); } res.join(",") } else { let first = *self.first().unwrap(); let last = *self.last().unwrap(); format!("{} ... {}", first.summary(), last.summary()) } } } /// Log entry payload variants. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum EntryPayload<D: AppData> { /// An empty payload committed by a new cluster leader. Blank, #[serde(bound = "D: AppData")] Normal(D), /// A change-membership log entry. Membership(Membership), } impl<D: AppData> MessageSummary for EntryPayload<D> { fn summary(&self) -> String { match self { EntryPayload::Blank => "blank".to_string(), EntryPayload::Normal(_n) => "normal".to_string(), EntryPayload::Membership(c) => { format!("membership: {}", c.summary()) } } } } /// An RPC sent by candidates to gather votes (§5.2). #[derive(Debug, Serialize, Deserialize)] pub struct VoteRequest { /// The candidate's current term. pub term: u64, pub candidate_id: u64, pub last_log_id: Option<LogId>, } impl MessageSummary for VoteRequest { fn summary(&self) -> String { format!("{}-{}, last_log:{:?}", self.term, self.candidate_id, self.last_log_id) } } impl VoteRequest { pub fn new(term: u64, candidate_id: u64, last_log_id: Option<LogId>) -> Self { Self { term, candidate_id, last_log_id, } } } /// The response to a `VoteRequest`. #[derive(Debug, Serialize, Deserialize)] pub struct VoteResponse { /// The current term of the responding node, for the candidate to update itself. pub term: u64, /// Will be true if the candidate received a vote from the responder. pub vote_granted: bool, /// The last log id stored on the remote voter. pub last_log_id: Option<LogId>, } ////////////////////////////////////////////////////////////////////////////////////////////////// /// An RPC sent by the Raft leader to send chunks of a snapshot to a follower (§7). #[derive(Clone, Debug, Serialize, Deserialize)] pub struct InstallSnapshotRequest { /// The leader's current term. pub term: u64, /// The leader's ID. Useful in redirecting clients. pub leader_id: u64, /// Metadata of a snapshot: snapshot_id, last_log_ed membership etc. pub meta: SnapshotMeta, /// The byte offset where this chunk of data is positioned in the snapshot file. pub offset: u64, /// The raw bytes of the snapshot chunk, starting at `offset`. pub data: Vec<u8>, /// Will be `true` if this is the last chunk in the snapshot. pub done: bool, } impl MessageSummary for InstallSnapshotRequest { fn summary(&self) -> String { format!( "term={}, leader_id={}, meta={:?}, offset={}, len={}, done={}", self.term, self.leader_id, self.meta, self.offset, self.data.len(), self.done ) } } /// The response to an `InstallSnapshotRequest`. #[derive(Debug, Serialize, Deserialize)] pub struct InstallSnapshotResponse { /// The receiving node's current term, for leader to update itself. pub term: u64, } ////////////////////////////////////////////////////////////////////////////////////////////////// /// An application specific client request to update the state of the system (§5.1). /// /// The entry of this payload will be appended to the Raft log and then applied to the Raft state /// machine according to the Raft protocol. #[derive(Debug, Serialize, Deserialize)] pub struct ClientWriteRequest<D: AppData> { /// The application specific contents of this client request. #[serde(bound = "D: AppData")] pub(crate) payload: EntryPayload<D>, } impl<D: AppData> MessageSummary for ClientWriteRequest<D> { fn summary(&self) -> String { self.payload.summary() } } impl<D: AppData> ClientWriteRequest<D> { pub fn new(entry: EntryPayload<D>) -> Self { Self { payload: entry } } } /// The response to a `ClientRequest`. #[derive(Debug, Serialize, Deserialize)] pub struct ClientWriteResponse<R: AppDataResponse> { pub log_id: LogId, /// Application specific response data. #[serde(bound = "R: AppDataResponse")] pub data: R, /// If the log entry is a change-membership entry. pub membership: Option<Membership>, } impl<R: AppDataResponse> MessageSummary for ClientWriteResponse<R> { fn summary(&self) -> String { format!("log_id: {}, membership: {:?}", self.log_id, self.membership) } }
37.145833
120
0.609857
fc5269055e1d34b329d387a1c50f8bb8ca942201
2,464
use std::iter::repeat; #[derive(Clone, Copy, PartialEq, Eq)] pub struct ByteAddr(u8); fn addr(bit: usize) -> (usize, ByteAddr) { (bit / 8, ByteAddr((bit % 8) as u8)) } impl ByteAddr { pub fn clear_mask(self) -> u8 { match self.0 { 0 => 0b1111_1110, 1 => 0b1111_1101, 2 => 0b1111_1011, 3 => 0b1111_0111, 4 => 0b1110_1111, 5 => 0b1101_1111, 6 => 0b1011_1111, _ => 0b0111_1111, } } pub fn set_mask(self) -> u8 { match self.0 { 0 => 0b0000_0001, 1 => 0b0000_0010, 2 => 0b0000_0100, 3 => 0b0000_1000, 4 => 0b0001_0000, 5 => 0b0010_0000, 6 => 0b0100_0000, _ => 0b1000_0000, } } pub fn get_shifter(self) -> u8 { self.0 } #[allow(dead_code)] pub fn get_next(self) -> Option<ByteAddr> { if self.0 == 7 { None } else { Some(ByteAddr(self.0 + 1)) } } } #[allow(dead_code)] pub fn new(size: usize) -> Vec<u8> { let v: Vec<u8> = repeat(0).take(size as usize).collect(); v } pub fn set_bit_to(data: &mut [u8], bit: usize, value: bool) { let (byte_addr, bit_addr) = addr(bit); if value { data[byte_addr] |= bit_addr.set_mask(); } else { data[byte_addr] &= bit_addr.clear_mask(); } } pub fn get_bit(data: &[u8], bit: usize) -> bool { let (byte_addr, bit_addr) = addr(bit); let val = (data[byte_addr] >> bit_addr.get_shifter()) & 0x1; val == 0x1 } #[allow(dead_code)] pub fn get_bits(data: &[u8], start_bit: usize, nb_bits: usize) -> Vec<bool> { let mut v = Vec::with_capacity(nb_bits); let (start_byte_addr, start_bit_addr) = addr(start_bit); let mut current_val = data[start_byte_addr] >> start_bit_addr.get_shifter(); let mut current_byte_addr = start_byte_addr; let mut current_bit_addr = start_bit_addr; for i in 0..nb_bits { v[nb_bits - i - 1] = (current_val & 0x1) == 0x1; match current_bit_addr.get_next() { None => { current_byte_addr += 1; current_bit_addr = ByteAddr(0); current_val = data[current_byte_addr]; } Some(new_addr) => { current_bit_addr = new_addr; current_val = current_val >> 1; } } } v }
25.666667
80
0.519886
69cb13c991faa98f161e44d0088980ebb6f80b61
3,302
extern crate ruzstd; use std::fs::File; use std::io::Read; use std::io::Write; struct StateTracker { bytes_used: u64, old_percentage: i8, } fn main() { let mut file_paths: Vec<_> = std::env::args().filter(|f| !f.starts_with('-')).collect(); let flags: Vec<_> = std::env::args().filter(|f| f.starts_with('-')).collect(); file_paths.remove(0); if !flags.contains(&"-d".to_owned()) { eprintln!("This zstd implementation only supports decompression. Please add a \"-d\" flag"); return; } if !flags.contains(&"-c".to_owned()) { eprintln!("This zstd implementation only supports output on the stdout. Please add a \"-c\" flag and pipe the output into a file"); return; } if flags.len() != 2 { eprintln!( "No flags other than -d and -c are currently implemented. Flags used: {:?}", flags ); return; } let mut frame_dec = ruzstd::FrameDecoder::new(); for path in file_paths { let mut tracker = StateTracker { bytes_used: 0, old_percentage: -1, }; eprintln!("File: {}", path); let mut f = File::open(path).unwrap(); frame_dec.reset(&mut f).unwrap(); let batch_size = 1024 * 1024 * 10; let mut result = vec![0; batch_size]; while !frame_dec.is_finished() { frame_dec .decode_blocks(&mut f, ruzstd::BlockDecodingStrategy::UptoBytes(batch_size)) .unwrap(); if frame_dec.can_collect() > batch_size { let x = frame_dec.read(result.as_mut_slice()).unwrap(); result.resize(x, 0); do_something(&result, &mut tracker); result.resize(result.capacity(), 0); let percentage = (tracker.bytes_used * 100) / frame_dec.content_size().unwrap(); if percentage as i8 != tracker.old_percentage { eprint!("\r"); eprint!("{} % done", percentage); tracker.old_percentage = percentage as i8; } } } // handle the last chunk of data while frame_dec.can_collect() > 0 { let x = frame_dec.read(result.as_mut_slice()).unwrap(); result.resize(x, 0); do_something(&result, &mut tracker); result.resize(result.capacity(), 0); } eprintln!("\nDecoded bytes: {}", tracker.bytes_used); match frame_dec.get_checksum_from_data() { Some(chksum) => { if frame_dec.get_calculated_checksum().unwrap() != chksum { eprintln!( "Checksum did not match! From data: {}, calculated while decoding: {}", chksum, frame_dec.get_calculated_checksum().unwrap() ); } else { eprintln!("Checksums are ok!"); } } None => eprintln!("No checksums to test"), } } } fn do_something(data: &[u8], s: &mut StateTracker) { //Do something. Like writing it to a file or to stdout... std::io::stdout().write_all(data).unwrap(); s.bytes_used += data.len() as u64; }
31.75
139
0.527256
75cfcb19c7ae5cd93aba6b84a2c7556eb1d34265
368
pub mod renderer; pub use renderer::Renderer; pub mod material; pub mod mesh; mod render_graph; pub use render_graph::{CommandBufferQueue, CommandQueueItem, RenderGraph}; mod pipeline; pub use pipeline::{BindGroupWithData, SimplePipeline, SimplePipelineDesc, VertexStateBuilder}; pub mod pipelines; pub mod resources; pub mod systems; pub mod pipeline_manager;
18.4
94
0.80163
ff8ee9a8672fea5dc3b5b263915ce5430444b17e
674
#![no_std] #![no_main] #![feature(type_alias_impl_trait)] use defmt::{info, unwrap}; use embassy::executor::Spawner; use embassy::time::{Duration, Timer}; use embassy_nrf::Peripherals; use defmt_rtt as _; // global logger use panic_probe as _; #[embassy::task] async fn run1() { loop { info!("BIG INFREQUENT TICK"); Timer::after(Duration::from_ticks(64000)).await; } } #[embassy::task] async fn run2() { loop { info!("tick"); Timer::after(Duration::from_ticks(13000)).await; } } #[embassy::main] async fn main(spawner: Spawner, _p: Peripherals) { unwrap!(spawner.spawn(run1())); unwrap!(spawner.spawn(run2())); }
19.823529
56
0.637982
75cd9e2f967727d4914e3cc234d118010b432e90
12,331
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! typeck.rs, an introduction The type checker is responsible for: 1. Determining the type of each expression 2. Resolving methods and traits 3. Guaranteeing that most type rules are met ("most?", you say, "why most?" Well, dear reader, read on) The main entry point is `check_crate()`. Type checking operates in several major phases: 1. The collect phase first passes over all items and determines their type, without examining their "innards". 2. Variance inference then runs to compute the variance of each parameter 3. Coherence checks for overlapping or orphaned impls 4. Finally, the check phase then checks function bodies and so forth. Within the check phase, we check each function body one at a time (bodies of function expressions are checked as part of the containing function). Inference is used to supply types wherever they are unknown. The actual checking of a function itself has several phases (check, regionck, writeback), as discussed in the documentation for the `check` module. The type checker is defined into various submodules which are documented independently: - astconv: converts the AST representation of types into the `ty` representation - collect: computes the types of each top-level item and enters them into the `tcx.types` table for later use - coherence: enforces coherence rules, builds some tables - variance: variance inference - check: walks over function bodies and type checks them, inferring types for local variables, type parameters, etc as necessary. - infer: finds the types to use for each type variable such that all subtyping and assignment constraints are met. In essence, the check module specifies the constraints, and the infer module solves them. # Note This API is completely unstable and subject to change. */ #![crate_name = "rustc_typeck"] #![unstable(feature = "rustc_private", issue = "27812")] #![crate_type = "dylib"] #![crate_type = "rlib"] #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] #![deny(warnings)] #![allow(non_camel_case_types)] #![feature(box_patterns)] #![feature(box_syntax)] #![feature(conservative_impl_trait)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] #![feature(staged_api)] #[macro_use] extern crate log; #[macro_use] extern crate syntax; extern crate syntax_pos; extern crate arena; extern crate fmt_macros; #[macro_use] extern crate rustc; extern crate rustc_platform_intrinsics as intrinsics; extern crate rustc_back; extern crate rustc_const_math; extern crate rustc_const_eval; extern crate rustc_data_structures; extern crate rustc_errors as errors; pub use rustc::dep_graph; pub use rustc::hir; pub use rustc::lint; pub use rustc::middle; pub use rustc::session; pub use rustc::util; use dep_graph::DepNode; use hir::map as hir_map; use rustc::infer::InferOk; use rustc::ty::subst::Substs; use rustc::ty::{self, Ty, TyCtxt}; use rustc::traits::{self, ObligationCause, ObligationCauseCode, Reveal}; use session::config; use util::common::time; use syntax::ast; use syntax::abi::Abi; use syntax_pos::Span; use std::iter; use std::cell::RefCell; use util::nodemap::NodeMap; // NB: This module needs to be declared first so diagnostics are // registered before they are used. pub mod diagnostics; pub mod check; pub mod check_unused; mod rscope; mod astconv; pub mod collect; mod constrained_type_params; mod impl_wf_check; pub mod coherence; pub mod variance; pub struct TypeAndSubsts<'tcx> { pub substs: &'tcx Substs<'tcx>, pub ty: Ty<'tcx>, } pub struct CrateCtxt<'a, 'tcx: 'a> { ast_ty_to_ty_cache: RefCell<NodeMap<Ty<'tcx>>>, /// A vector of every trait accessible in the whole crate /// (i.e. including those from subcrates). This is used only for /// error reporting, and so is lazily initialised and generally /// shouldn't taint the common path (hence the RefCell). pub all_traits: RefCell<Option<check::method::AllTraitsVec>>, /// This stack is used to identify cycles in the user's source. /// Note that these cycles can cross multiple items. pub stack: RefCell<Vec<collect::AstConvRequest>>, pub tcx: TyCtxt<'a, 'tcx, 'tcx>, /// Obligations which will have to be checked at the end of /// type-checking, after all functions have been inferred. /// The key is the NodeId of the item the obligations were from. pub deferred_obligations: RefCell<NodeMap<Vec<traits::DeferredObligation<'tcx>>>>, } fn require_c_abi_if_variadic(tcx: TyCtxt, decl: &hir::FnDecl, abi: Abi, span: Span) { if decl.variadic && abi != Abi::C { let mut err = struct_span_err!(tcx.sess, span, E0045, "variadic function must have C calling convention"); err.span_label(span, &("variadics require C calling conventions").to_string()) .emit(); } } fn require_same_types<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, cause: &ObligationCause<'tcx>, expected: Ty<'tcx>, actual: Ty<'tcx>) -> bool { ccx.tcx.infer_ctxt(None, None, Reveal::NotSpecializable).enter(|infcx| { match infcx.eq_types(false, &cause, expected, actual) { Ok(InferOk { obligations, .. }) => { // FIXME(#32730) propagate obligations assert!(obligations.is_empty()); true } Err(err) => { infcx.report_mismatched_types(cause, expected, actual, err).emit(); false } } }) } fn check_main_fn_ty(ccx: &CrateCtxt, main_id: ast::NodeId, main_span: Span) { let tcx = ccx.tcx; let main_def_id = tcx.map.local_def_id(main_id); let main_t = tcx.item_type(main_def_id); match main_t.sty { ty::TyFnDef(..) => { match tcx.map.find(main_id) { Some(hir_map::NodeItem(it)) => { match it.node { hir::ItemFn(.., ref generics, _) => { if generics.is_parameterized() { struct_span_err!(ccx.tcx.sess, generics.span, E0131, "main function is not allowed to have type parameters") .span_label(generics.span, &format!("main cannot have type parameters")) .emit(); return; } } _ => () } } _ => () } let substs = tcx.intern_substs(&[]); let se_ty = tcx.mk_fn_def(main_def_id, substs, tcx.mk_bare_fn(ty::BareFnTy { unsafety: hir::Unsafety::Normal, abi: Abi::Rust, sig: ty::Binder(tcx.mk_fn_sig(iter::empty(), tcx.mk_nil(), false)) })); require_same_types( ccx, &ObligationCause::new(main_span, main_id, ObligationCauseCode::MainFunctionType), se_ty, main_t); } _ => { span_bug!(main_span, "main has a non-function type: found `{}`", main_t); } } } fn check_start_fn_ty(ccx: &CrateCtxt, start_id: ast::NodeId, start_span: Span) { let tcx = ccx.tcx; let start_def_id = ccx.tcx.map.local_def_id(start_id); let start_t = tcx.item_type(start_def_id); match start_t.sty { ty::TyFnDef(..) => { match tcx.map.find(start_id) { Some(hir_map::NodeItem(it)) => { match it.node { hir::ItemFn(..,ref ps,_) if ps.is_parameterized() => { struct_span_err!(tcx.sess, ps.span, E0132, "start function is not allowed to have type parameters") .span_label(ps.span, &format!("start function cannot have type parameters")) .emit(); return; } _ => () } } _ => () } let substs = tcx.intern_substs(&[]); let se_ty = tcx.mk_fn_def(start_def_id, substs, tcx.mk_bare_fn(ty::BareFnTy { unsafety: hir::Unsafety::Normal, abi: Abi::Rust, sig: ty::Binder(tcx.mk_fn_sig( [ tcx.types.isize, tcx.mk_imm_ptr(tcx.mk_imm_ptr(tcx.types.u8)) ].iter().cloned(), tcx.types.isize, false, )), })); require_same_types( ccx, &ObligationCause::new(start_span, start_id, ObligationCauseCode::StartFunctionType), se_ty, start_t); } _ => { span_bug!(start_span, "start has a non-function type: found `{}`", start_t); } } } fn check_for_entry_fn(ccx: &CrateCtxt) { let tcx = ccx.tcx; let _task = tcx.dep_graph.in_task(DepNode::CheckEntryFn); if let Some((id, sp)) = *tcx.sess.entry_fn.borrow() { match tcx.sess.entry_type.get() { Some(config::EntryMain) => check_main_fn_ty(ccx, id, sp), Some(config::EntryStart) => check_start_fn_ty(ccx, id, sp), Some(config::EntryNone) => {} None => bug!("entry function without a type") } } } pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Result<NodeMap<Ty<'tcx>>, usize> { let time_passes = tcx.sess.time_passes(); let ccx = CrateCtxt { ast_ty_to_ty_cache: RefCell::new(NodeMap()), all_traits: RefCell::new(None), stack: RefCell::new(Vec::new()), tcx: tcx, deferred_obligations: RefCell::new(NodeMap()), }; // this ensures that later parts of type checking can assume that items // have valid types and not error tcx.sess.track_errors(|| { time(time_passes, "type collecting", || collect::collect_item_types(&ccx)); })?; time(time_passes, "variance inference", || variance::infer_variance(tcx)); tcx.sess.track_errors(|| { time(time_passes, "impl wf inference", || impl_wf_check::impl_wf_check(&ccx)); })?; tcx.sess.track_errors(|| { time(time_passes, "coherence checking", || coherence::check_coherence(&ccx)); })?; time(time_passes, "wf checking", || check::check_wf_new(&ccx))?; time(time_passes, "item-types checking", || check::check_item_types(&ccx))?; time(time_passes, "item-bodies checking", || check::check_item_bodies(&ccx))?; time(time_passes, "drop-impl checking", || check::check_drop_impls(&ccx))?; check_unused::check_crate(tcx); check_for_entry_fn(&ccx); let err_count = tcx.sess.err_count(); if err_count == 0 { Ok(ccx.ast_ty_to_ty_cache.into_inner()) } else { Err(err_count) } } __build_diagnostic_array! { librustc_typeck, DIAGNOSTICS }
33.969697
100
0.582272
d91853b6202348e0de5a8d28667d4c0be453434f
6,359
use std::{cmp, ops}; use anyhow::Result; use ordered_float::NotNan; use serde::{Deserialize, Serialize}; use crate::{deserialize_f64, serialize_f64, trim_f64, Duration}; /// In seconds since midnight. Can't be negative. #[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Serialize, Deserialize)] pub struct Time( #[serde(serialize_with = "serialize_f64", deserialize_with = "deserialize_f64")] f64, ); // By construction, Time is a finite f64 with trimmed precision. impl Eq for Time {} #[allow(clippy::derive_ord_xor_partial_ord)] // false positive impl Ord for Time { fn cmp(&self, other: &Time) -> cmp::Ordering { self.partial_cmp(other).unwrap() } } #[allow(clippy::derive_hash_xor_eq)] // false positive impl std::hash::Hash for Time { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { NotNan::new(self.0).unwrap().hash(state); } } impl Time { pub const START_OF_DAY: Time = Time(0.0); // No direct public constructors. Explicitly do Time::START_OF_DAY + duration. fn seconds_since_midnight(value: f64) -> Time { if !value.is_finite() || value < 0.0 { panic!("Bad Time {}", value); } Time(trim_f64(value)) } /// (hours, minutes, seconds, centiseconds) fn get_parts(self) -> (usize, usize, usize, usize) { let mut remainder = self.0; let hours = (remainder / 3600.0).floor(); remainder -= hours * 3600.0; let minutes = (remainder / 60.0).floor(); remainder -= minutes * 60.0; let seconds = remainder.floor(); remainder -= seconds; let centis = (remainder / 0.1).floor(); ( hours as usize, minutes as usize, seconds as usize, centis as usize, ) } /// Rounded down. 6:59:00 is hour 6. pub fn get_hours(self) -> usize { self.get_parts().0 } pub fn ampm_tostring(self) -> String { let (mut hours, minutes, seconds, _) = self.get_parts(); let next_day = if hours >= 24 { let days = hours / 24; hours %= 24; format!(" (+{} days)", days) } else { "".to_string() }; let suffix = if hours < 12 { "AM" } else { "PM" }; if hours == 0 { hours = 12; } else if hours >= 24 { hours -= 24; } else if hours > 12 { hours -= 12; } format!( "{:02}:{:02}:{:02} {}{}", hours, minutes, seconds, suffix, next_day ) } pub fn as_filename(self) -> String { let (hours, minutes, seconds, remainder) = self.get_parts(); format!( "{0:02}h{1:02}m{2:02}.{3:01}s", hours, minutes, seconds, remainder ) } pub fn parse(string: &str) -> Result<Time> { let parts: Vec<&str> = string.split(':').collect(); if parts.is_empty() { bail!("Time {}: no :'s", string); } let mut seconds = parts.last().unwrap().parse::<f64>()?; match parts.len() { 1 => Ok(Time::seconds_since_midnight(seconds)), 2 => { // They're really minutes seconds *= 60.0; seconds += 3600.0 * parts[0].parse::<f64>()?; Ok(Time::seconds_since_midnight(seconds)) } 3 => { seconds += 60.0 * parts[1].parse::<f64>()?; seconds += 3600.0 * parts[0].parse::<f64>()?; Ok(Time::seconds_since_midnight(seconds)) } _ => bail!("Time {}: weird number of parts", string), } } // TODO These are a little weird, so don't operator overload yet pub fn percent_of(self, p: f64) -> Time { if !(0.0..=1.0).contains(&p) { panic!("Bad percent_of input: {}", p); } Time::seconds_since_midnight(self.0 * p) } pub fn to_percent(self, other: Time) -> f64 { self.0 / other.0 } /// For RNG range generation. Don't abuse. pub fn inner_seconds(self) -> f64 { self.0 } pub fn clamped_sub(self, dt: Duration) -> Time { Time::seconds_since_midnight((self.0 - dt.inner_seconds()).max(0.0)) } pub fn round_seconds(self, s: f64) -> Time { Time::seconds_since_midnight(s * (self.0 / s).round()) } } // 24-hour format by default impl std::fmt::Display for Time { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let (hours, minutes, seconds, remainder) = self.get_parts(); write!( f, "{0:02}:{1:02}:{2:02}.{3:01}", hours, minutes, seconds, remainder ) } } impl ops::Add<Duration> for Time { type Output = Time; fn add(self, other: Duration) -> Time { Time::seconds_since_midnight(self.0 + other.inner_seconds()) } } impl ops::AddAssign<Duration> for Time { fn add_assign(&mut self, other: Duration) { *self = *self + other; } } impl ops::Sub<Duration> for Time { type Output = Time; fn sub(self, other: Duration) -> Time { Time::seconds_since_midnight(self.0 - other.inner_seconds()) } } impl ops::Sub for Time { type Output = Duration; fn sub(self, other: Time) -> Duration { Duration::seconds(self.0 - other.0) } } #[cfg(test)] mod tests { use super::*; #[test] fn parse() { assert_eq!( Time::START_OF_DAY + Duration::seconds(42.3), Time::parse("42.3").unwrap() ); assert_eq!( Time::START_OF_DAY + Duration::hours(7) + Duration::minutes(30), Time::parse("07:30").unwrap() ); assert_eq!( Time::START_OF_DAY + Duration::hours(7) + Duration::minutes(30) + Duration::seconds(5.0), Time::parse("07:30:05").unwrap() ); } #[test] fn get_hours() { assert_eq!((Time::START_OF_DAY + Duration::hours(6)).get_hours(), 6); assert_eq!( (Time::START_OF_DAY + Duration::hours(6) + Duration::seconds(1.0)).get_hours(), 6 ); assert_eq!( (Time::START_OF_DAY + Duration::hours(6) + Duration::minutes(59)).get_hours(), 6 ); } }
28.013216
91
0.527756
db15c5adc800a1d16d1cec47d4b96f600addcbd0
7,151
// Copyright 2021 Sonatype. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. extern crate packageurl; extern crate quick_xml; use crate::Package; use log::trace; use packageurl::PackageUrl; use quick_xml::events::BytesEnd; use quick_xml::events::BytesStart; use quick_xml::events::BytesText; use quick_xml::events::Event; use quick_xml::Writer; use std::io::Cursor; use std::str::FromStr; pub struct CycloneDXGenerator(); impl CycloneDXGenerator { pub fn generate_sbom_from_purls(&self, purls: Vec<Package>) -> String { return generate_1_3_sbom_from_purls(purls); } } fn generate_1_3_sbom_from_purls(purls: Vec<Package>) -> String { let mut writer = Writer::new(Cursor::new(Vec::new())); let mut bom = BytesStart::borrowed_name(b"bom"); bom.push_attribute(("xmlns", "http://cyclonedx.org/schema/bom/1.3")); bom.push_attribute(("version", "1")); assert!(writer.write_event(Event::Start(bom)).is_ok()); assert!(writer .write_event(Event::Start(BytesStart::borrowed_name(b"components"))) .is_ok()); for p in purls { let purl = PackageUrl::from_str(&p.as_purl()).unwrap(); let mut component = BytesStart::borrowed_name(b"component"); component.push_attribute(("type", "library")); component.push_attribute(("bom-ref", purl.clone().to_string().as_ref())); assert!(writer.write_event(Event::Start(component)).is_ok()); // Name tag assert!(writer .write_event(Event::Start(BytesStart::borrowed_name(b"name"))) .is_ok()); let name = &purl.clone().name; let name_value = BytesText::from_plain_str(name); assert!(writer.write_event(Event::Text(name_value)).is_ok()); assert!(writer .write_event(Event::End(BytesEnd::borrowed(b"name"))) .is_ok()); // Version tag assert!(writer .write_event(Event::Start(BytesStart::borrowed_name(b"version"))) .is_ok()); let vers = &purl.clone().version.unwrap(); let version_value = BytesText::from_plain_str(vers); assert!(writer.write_event(Event::Text(version_value)).is_ok()); assert!(writer .write_event(Event::End(BytesEnd::borrowed(b"version"))) .is_ok()); // License tag match p.license { Some(license) => { assert!(writer .write_event(Event::Start(BytesStart::borrowed_name(b"licenses"))) .is_ok()); assert!(writer .write_event(Event::Start(BytesStart::borrowed_name(b"license"))) .is_ok()); assert!(writer .write_event(Event::Start(BytesStart::borrowed_name(b"name"))) .is_ok()); let license_value = BytesText::from_plain_str(&license); assert!(writer.write_event(Event::Text(license_value)).is_ok()); assert!(writer .write_event(Event::End(BytesEnd::borrowed(b"name"))) .is_ok()); assert!(writer .write_event(Event::End(BytesEnd::borrowed(b"license"))) .is_ok()); assert!(writer .write_event(Event::End(BytesEnd::borrowed(b"licenses"))) .is_ok()); } None => { trace!("No license found for component"); } } // Purl tag assert!(writer .write_event(Event::Start(BytesStart::borrowed_name(b"purl"))) .is_ok()); let purl_string = &purl.clone().to_string(); let purl_value = BytesText::from_plain_str(purl_string); assert!(writer.write_event(Event::Text(purl_value)).is_ok()); assert!(writer .write_event(Event::End(BytesEnd::borrowed(b"purl"))) .is_ok()); assert!(writer .write_event(Event::End(BytesEnd::borrowed(b"component"))) .is_ok()); } assert!(writer .write_event(Event::End(BytesEnd::borrowed(b"components"))) .is_ok()); assert!(writer .write_event(Event::End(BytesEnd::borrowed(b"bom"))) .is_ok()); match String::from_utf8(writer.into_inner().into_inner()) { Ok(s) => return s, Err(e) => panic!("Something went horribly wrong: {}", e), }; } #[cfg(test)] mod tests { use super::*; use cargo_metadata::PackageId; #[test] fn can_generate_sbom_from_purls_test() { let cyclonedx = CycloneDXGenerator {}; let mut packages: Vec<Package> = Vec::new(); packages.push(Package { name: "test".to_string(), version: cargo_metadata::Version { major: 1, minor: 0, patch: 0, build: vec![], pre: vec![], }, license: None, package_id: PackageId { repr: "".to_string(), }, }); packages.push(Package { name: "test".to_string(), version: cargo_metadata::Version { major: 1, minor: 0, patch: 1, build: vec![], pre: vec![], }, license: None, package_id: PackageId { repr: "".to_string(), }, }); packages.push(Package { name: "test".to_string(), version: cargo_metadata::Version { major: 1, minor: 0, patch: 2, build: vec![], pre: vec![], }, license: Some("Apache-2.0".to_string()), package_id: PackageId { repr: "".to_string(), }, }); let sbom = cyclonedx.generate_sbom_from_purls(packages); let expected = "<bom xmlns=\"http://cyclonedx.org/schema/bom/1.3\" version=\"1\"><components><component type=\"library\" bom-ref=\"pkg:cargo/[email protected]\"><name>test</name><version>1.0.0</version><purl>pkg:cargo/[email protected]</purl></component><component type=\"library\" bom-ref=\"pkg:cargo/[email protected]\"><name>test</name><version>1.0.1</version><purl>pkg:cargo/[email protected]</purl></component><component type=\"library\" bom-ref=\"pkg:cargo/[email protected]\"><name>test</name><version>1.0.2</version><licenses><license><name>Apache-2.0</name></license></licenses><purl>pkg:cargo/[email protected]</purl></component></components></bom>"; assert_eq!(sbom, expected); } }
35.40099
626
0.561879
fb374e64cb988cc8981cdec1e85a31fe41063983
112
use beat_saber_example::real_main; use hotham::HothamResult; fn main() -> HothamResult<()> { real_main() }
16
34
0.696429
1d65a0a5e9e8c32276be1e78d43e8c26c8932226
768
// ignore-fast // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-fast #[allow(unused_imports)]; #[no_std]; extern crate std; extern crate zed = "std"; extern crate bar = "std#0.10-pre"; use std::str; use x = zed::str; mod baz { pub use bar::str; pub use x = std::str; } #[start] pub fn start(_: int, _: **u8) -> int { 0 }
24
69
0.686198
def71d1b18769cbcabe7e274ef4f524a0969ed27
49,834
//! Tools for compiling SPWN into GD object strings use crate::ast; use crate::builtin::*; use crate::levelstring::*; use crate::STD_PATH; use std::collections::{HashMap, HashSet}; use crate::parser::{ParseNotes, SyntaxError}; use std::fs; use std::path::PathBuf; use crate::compiler_types::*; use crate::print_with_color; pub const CONTEXT_MAX: usize = 2; use termcolor::Color as TColor; #[derive(Debug)] pub enum RuntimeError { UndefinedErr { undefined: String, desc: String, info: CompilerInfo, }, PackageSyntaxError { err: SyntaxError, info: CompilerInfo, }, TypeError { expected: String, found: String, info: CompilerInfo, }, RuntimeError { message: String, info: CompilerInfo, }, BuiltinError { message: String, info: CompilerInfo, }, } pub fn print_error_intro(pos: crate::parser::FileRange, file: &PathBuf) { use std::io::Write; use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}; let mut stdout = StandardStream::stderr(ColorChoice::Always); let mut write_with_color = |text: &str, color: Color| { stdout .set_color(ColorSpec::new().set_fg(Some(color))) .unwrap(); write!(&mut stdout, "{}", text).unwrap(); }; let path_str = format!( "{}:{}:{}", file.to_string_lossy().to_string(), pos.0 .0, pos.0 .1 + 1 ); write_with_color("Error", TColor::Red); write_with_color(&format!(" at {}\n", path_str), TColor::White); if pos.0 .0 == pos.1 .0 { use std::io::BufRead; if let Ok(file) = fs::File::open(&file) { if let Some(line) = std::io::BufReader::new(file).lines().nth(pos.0 .0 - 1) { if let Ok(line) = line { let line_num = pos.1 .0.to_string(); let mut spacing = String::new(); for _ in 0..line_num.len() { spacing += " "; } let squiggly_line = "^"; write_with_color( (spacing.clone() + " |\n" + &line_num + " |").as_str(), TColor::Cyan, ); write_with_color((line.replace("\t", " ") + "\n").as_str(), TColor::White); write_with_color((spacing + " |").as_str(), TColor::Cyan); let mut out = String::new(); for _ in 0..(pos.0 .1) { out += " "; } for _ in 0..(pos.1 .1 - pos.0 .1) { out += squiggly_line; } out += "\n"; write_with_color(&out, TColor::Red); stdout .set_color(ColorSpec::new().set_fg(Some(TColor::White))) .unwrap(); } } } }; } impl std::fmt::Display for RuntimeError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let info = match self { RuntimeError::UndefinedErr { undefined: _, desc: _, info, } => info, RuntimeError::PackageSyntaxError { err: _, info } => info, RuntimeError::TypeError { expected: _, found: _, info, } => info, RuntimeError::RuntimeError { message: _, info } => info, RuntimeError::BuiltinError { message: _, info } => info, }; print_error_intro(info.pos, &info.current_file); match self { RuntimeError::UndefinedErr { undefined, desc, info: _, } => write!(f, "{} '{}' is not defined", desc, undefined,), RuntimeError::PackageSyntaxError { err, info: _ } => { write!(f, "Error when parsing library: {}", err) } RuntimeError::TypeError { expected, found, info: _, } => write!(f, "Type mismatch: expected {}, found {}", expected, found,), RuntimeError::RuntimeError { message, info: _ } => write!(f, "{}", message,), RuntimeError::BuiltinError { message, info: _ } => { write!(f, "Error when calling built-in-function: {}", message,) } } } } impl std::error::Error for RuntimeError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { None } } pub const NULL_STORAGE: usize = 1; pub const BUILTIN_STORAGE: usize = 0; pub fn compile_spwn( statements: Vec<ast::Statement>, path: PathBuf, included_paths: Vec<PathBuf>, notes: ParseNotes, ) -> Result<Globals, RuntimeError> { //variables that get changed throughout the compiling let mut globals = Globals::new(path.clone()); if statements.is_empty() { return Err(RuntimeError::RuntimeError { message: "this script is empty".to_string(), info: CompilerInfo { depth: 0, path: vec!["main scope".to_string()], pos: ((0, 0), (0, 0)), current_file: path, current_module: String::new(), includes: vec![], }, }); } let mut start_context = Context::new(); //store at pos 0 // store_value(Value::Builtins, 1, &mut globals, &start_context); // store_value(Value::Null, 1, &mut globals, &start_context); let start_info = CompilerInfo { depth: 0, path: vec!["main scope".to_string()], pos: statements[0].pos, current_file: path, current_module: String::new(), includes: included_paths, }; use std::time::Instant; //println!("Importing standard library..."); print_with_color("Building script ...", TColor::Cyan); print_with_color("———————————————————————————\n", TColor::White); let start_time = Instant::now(); if !notes.tag.tags.iter().any(|x| x.0 == "no_std") { let standard_lib = import_module( &ImportType::Lib(STD_PATH.to_string()), &start_context, &mut globals, start_info.clone(), false, )?; if standard_lib.len() != 1 { return Err(RuntimeError::RuntimeError { message: "The standard library can not split the context".to_string(), info: start_info, }); } start_context = standard_lib[0].1.clone(); if let Value::Dict(d) = &globals.stored_values[standard_lib[0].0] { start_context.variables.extend(d.clone()); } else { return Err(RuntimeError::RuntimeError { message: "The standard library must return a dictionary".to_string(), info: start_info, }); } } let (contexts, _) = compile_scope( &statements, smallvec![start_context], &mut globals, start_info, )?; for c in contexts { if let Some((i, _)) = c.broken { return Err(RuntimeError::RuntimeError { message: "break statement is never used".to_string(), info: i, }); } } print_with_color("———————————————————————————\n", TColor::White); print_with_color( &format!( "Built in {} milliseconds!", start_time.elapsed().as_millis() ), TColor::Green, ); Ok(globals) } use smallvec::{smallvec, SmallVec}; pub fn compile_scope( statements: &[ast::Statement], mut contexts: SmallVec<[Context; CONTEXT_MAX]>, globals: &mut Globals, mut info: CompilerInfo, ) -> Result<(SmallVec<[Context; CONTEXT_MAX]>, Returns), RuntimeError> { let mut returns: Returns = SmallVec::new(); //take out broken contexts let mut broken_contexts = SmallVec::new(); let mut to_be_removed = SmallVec::<[usize; CONTEXT_MAX]>::new(); for (i, c) in contexts.iter().enumerate() { if c.broken != None { broken_contexts.push(c.clone()); to_be_removed.push(i) } } for i in to_be_removed.iter().rev() { contexts.swap_remove(*i); } if contexts.is_empty() { return Ok((broken_contexts, returns)); } globals.stored_values.increment_lifetimes(); for statement in statements.iter() { //find out what kind of statement this is //let start_time = Instant::now(); //print_error_intro(info.pos, &info.current_file); // println!( // "{} -> Compiling a statement in {} contexts", // info.path.join(">"), // contexts.len() // ); if contexts.is_empty() { return Err(RuntimeError::RuntimeError { message: "No context! This is probably a bug, please contact sputnix".to_string(), info, }); } use ast::StatementBody::*; let stored_context = if statement.arrow { Some(contexts.clone()) } else { None }; info.pos = statement.pos; //println!("{}:{}:{}", info.current_file.to_string_lossy(), info.pos.0.0, info.pos.0.1); //use crate::fmt::SpwnFmt; match &statement.body { Expr(expr) => { let mut new_contexts: SmallVec<[Context; CONTEXT_MAX]> = SmallVec::new(); for context in &contexts { let is_assign = !expr.operators.is_empty() && expr.operators[0] == ast::Operator::Assign && !expr.values[0].is_defined(&context, globals); if is_assign { let mut new_expr = expr.clone(); let symbol = new_expr.values.remove(0); //use crate::fmt::SpwnFmt; new_expr.operators.remove(0); //assign operator let constant = symbol.operator != Some(ast::UnaryOperator::Let); //let mut new_context = context.clone(); match (new_expr.values.len() == 1, &new_expr.values[0].value.body) { (true, ast::ValueBody::CmpStmt(f)) => { //to account for recursion //create the function context let mut new_context = context.clone(); let storage = symbol.define(&mut new_context, globals, &info)?; //pick a start group let start_group = Group::next_free(&mut globals.closed_groups); //store value globals.stored_values[storage] = Value::TriggerFunc(TriggerFunction { start_group }); new_context.start_group = start_group; let new_info = info.clone(); let (_, inner_returns) = compile_scope( &f.statements, smallvec![new_context], globals, new_info, )?; returns.extend(inner_returns); let mut after_context = context.clone(); let var_storage = symbol.define(&mut after_context, globals, &info)?; globals.stored_values[var_storage] = Value::TriggerFunc(TriggerFunction { start_group }); new_contexts.push(after_context); } _ => { let (evaled, inner_returns) = new_expr.eval(context, globals, info.clone(), constant)?; if !constant { for (val, _) in &evaled { globals.stored_values.set_mutability(*val, !constant); } } returns.extend(inner_returns); for (e, c2) in evaled { let mut new_context = c2.clone(); let storage = symbol.define(&mut new_context, globals, &info)?; //clone the value so as to not share the reference let cloned = clone_value(e, 1, globals, &new_context, true); globals.stored_values[storage] = globals.stored_values[cloned].clone(); new_contexts.push(new_context); } } } } else { //we dont care about the return value in this case let (evaled, inner_returns) = expr.eval(context, globals, info.clone(), false)?; returns.extend(inner_returns); new_contexts.extend(evaled.iter().map(|x| { //globals.stored_values.map.remove(&x.0); x.1.clone() })); } } contexts = new_contexts; } Extract(val) => { let mut all_values: Returns = SmallVec::new(); for context in &contexts { let (evaled, inner_returns) = val.eval(context, globals, info.clone(), true)?; returns.extend(inner_returns); all_values.extend(evaled); } contexts = SmallVec::new(); for (val, mut context) in all_values { match globals.stored_values[val].clone() { Value::Dict(d) => { context.variables.extend( d.iter() .map(|(k, v)| { (k.clone(), clone_value(*v, 1, globals, &context, false)) }) .collect::<HashMap<String, StoredValue>>(), ); } Value::Builtins => { for name in BUILTIN_LIST.iter() { let p = store_value( Value::BuiltinFunction(String::from(*name)), 1, globals, &context, ); context.variables.insert(String::from(*name), p); } } a => { return Err(RuntimeError::RuntimeError { message: format!( "This type ({}) can not be extracted!", a.to_str(globals) ), info, }) } } contexts.push(context); } } TypeDef(name) => { //initialize type let already = globals.type_ids.get(name); if let Some(t) = already { if !(t.1 == info.current_file && t.2 == info.pos.0) { return Err(RuntimeError::RuntimeError { message: format!("the type '{}' is already defined", name), info, }); } } else { (*globals).type_id_count += 1; (*globals).type_ids.insert( name.clone(), (globals.type_id_count, info.current_file.clone(), info.pos.0), ); } //Value::TypeIndicator(globals.type_id_count) } If(if_stmt) => { let mut all_values: Returns = SmallVec::new(); for context in &contexts { let (evaled, inner_returns) = if_stmt .condition .eval(context, globals, info.clone(), true)?; returns.extend(inner_returns); all_values.extend(evaled); } contexts = SmallVec::new(); for (val, context) in all_values { match &globals.stored_values[val] { Value::Bool(b) => { //internal if statement if *b { let new_info = info.clone(); let compiled = compile_scope( &if_stmt.if_body, smallvec![context.clone()], globals, new_info, )?; returns.extend(compiled.1); contexts.extend(compiled.0.iter().map(|c| Context { variables: context.variables.clone(), ..c.clone() })); } else { match &if_stmt.else_body { Some(body) => { let new_info = info.clone(); let compiled = compile_scope( body, smallvec![context.clone()], globals, new_info, )?; returns.extend(compiled.1); contexts.extend(compiled.0.iter().map(|c| Context { variables: context.variables.clone(), ..c.clone() })); } None => contexts.push(context), }; } } a => { return Err(RuntimeError::RuntimeError { message: format!( "Expected boolean condition in if statement, found {}", a.to_str(globals) ), info, }) } } } } Impl(imp) => { let message = "cannot run impl statement in a trigger function context, consider moving it to the start of your script.".to_string(); if contexts.len() > 1 || contexts[0].start_group.id != ID::Specific(0) { return Err(RuntimeError::RuntimeError { message, info }); } let new_info = info.clone(); let (evaled, inner_returns) = imp.symbol .to_value(contexts[0].clone(), globals, new_info, true)?; if evaled.len() > 1 { return Err(RuntimeError::RuntimeError { message: "impl statements with context-splitting values are not allowed" .to_string(), info, }); } returns.extend(inner_returns); let (typ, c) = evaled[0].clone(); if c.start_group.id != ID::Specific(0) { return Err(RuntimeError::RuntimeError { message, info }); } match globals.stored_values[typ].clone() { Value::TypeIndicator(s) => { let new_info = info.clone(); let (evaled, inner_returns) = eval_dict(imp.members.clone(), &c, globals, new_info, true)?; if evaled.len() > 1 { return Err(RuntimeError::RuntimeError { message: "impl statements with context-splitting values are not allowed" .to_string(), info, }); } //Returns inside impl values dont really make sense do they if !inner_returns.is_empty() { return Err(RuntimeError::RuntimeError { message: "you can't use return from inside an impl statement" .to_string(), info, }); } let (val, _) = evaled[0]; // make this not ugly, future me globals.stored_values.increment_single_lifetime( val, 1000, &mut HashSet::new(), ); if let Value::Dict(d) = &globals.stored_values[val] { match globals.implementations.get_mut(&s) { Some(implementation) => { for (key, val) in d.iter() { (*implementation).insert(key.clone(), (*val, true)); } } None => { globals.implementations.insert( s, d.iter() .map(|(key, value)| (key.clone(), (*value, true))) .collect(), ); } } } else { unreachable!(); } } a => { return Err(RuntimeError::RuntimeError { message: format!( "Expected type-indicator, found {}", a.to_str(globals) ), info, }) } } //println!("{:?}", new_contexts[0].implementations); } Call(call) => { /*for context in &mut contexts { context.x += 1; }*/ let mut all_values: Returns = SmallVec::new(); for context in contexts { let (evaled, inner_returns) = call.function .to_value(context, globals, info.clone(), true)?; returns.extend(inner_returns); all_values.extend(evaled); } contexts = SmallVec::new(); //let mut obj_list = Vec::<GDObj>::new(); for (func, context) in all_values { contexts.push(context.clone()); let mut params = HashMap::new(); params.insert( 51, match &globals.stored_values[func] { Value::TriggerFunc(g) => ObjParam::Group(g.start_group), Value::Group(g) => ObjParam::Group(*g), a => { return Err(RuntimeError::RuntimeError { message: format!( "Expected trigger function or group, found: {}", a.to_str(globals) ), info, }) } }, ); params.insert(1, ObjParam::Number(1268.0)); (*globals).trigger_order += 1; (*globals).func_ids[context.func_id].obj_list.push(( GDObj { params, ..context_trigger(&context, &mut globals.uid_counter) } .context_parameters(&context), globals.trigger_order, )) } } For(f) => { let mut all_arrays: Returns = SmallVec::new(); for context in &contexts { let (evaled, inner_returns) = f.array.eval(context, globals, info.clone(), true)?; returns.extend(inner_returns); all_arrays.extend(evaled); } contexts = SmallVec::new(); /* Before going further you should probably understand what contexts mean. A "context", in SPWN, is like a parallel universe. Each context is nearly identical to a code block, except it expands a runtime value that is meant to be converted into a compile time item. Every time you want to do something like convert a counter to a number, SPWN will branch the current code block into a number of contexts, one for each possible value from the conversion. All of the branched contexts will be evaluated in isolation to each other. */ for (val, context) in all_arrays { match globals.stored_values[val].clone() { // what are we iterating Value::Array(arr) => { // its an array! let mut new_contexts: SmallVec<[Context; CONTEXT_MAX]> = smallvec![context.clone()]; // new contexts: any contexts that are created in the loop. currently only has 1 let mut out_contexts: SmallVec<[Context; CONTEXT_MAX]> = SmallVec::new(); // out contexts: anything declared outside the loop for element in arr { // going through the array items for c in &mut new_contexts { // reset all variables per context (*c).variables = context.variables.clone(); (*c).variables.insert(f.symbol.clone(), element); } let new_info = info.clone(); // file position info let (end_contexts, inner_returns) = compile_scope(&f.body, new_contexts, globals, new_info)?; // eval the stuff // end_contexts has any new contexts made in the loop new_contexts = SmallVec::new(); for mut c in end_contexts { // add contexts made in the loop to the new_contexts, if they dont have a break match c.broken { Some((_, BreakType::Loop)) => { c.broken = None; out_contexts.push(c) } Some((_, BreakType::ContinueLoop)) => { c.broken = None; new_contexts.push(c) } _ => new_contexts.push(c), } } returns.extend(inner_returns); // return stuff if new_contexts.is_empty() { break; } } out_contexts.extend(new_contexts); contexts.extend(out_contexts.iter().map(|c| Context { variables: context.variables.clone(), ..c.clone() })); // finally append all newly created ones to the global count } Value::Dict(d) => { // its a dict! let mut new_contexts: SmallVec<[Context; CONTEXT_MAX]> = smallvec![context.clone()]; // new contexts: any contexts that are created in the loop. currently only has 1 let mut out_contexts: SmallVec<[Context; CONTEXT_MAX]> = SmallVec::new(); // out contexts: anything declared outside the loop for (k, v) in d { // going through the array items for c in &mut new_contexts { // reset all variables per context (*c).variables = context.variables.clone(); let key_stored = store_const_value( // store the dict key Value::Str(k.clone()), 1, globals, c, ); let stored = store_const_value( // store the val key Value::Array(vec![key_stored, v]), 1, globals, c, ); (*c).variables.insert(f.symbol.clone(), stored); } let new_info = info.clone(); // file position info let (end_contexts, inner_returns) = compile_scope(&f.body, new_contexts, globals, new_info)?; // eval the stuff // end_contexts has any new contexts made in the loop new_contexts = SmallVec::new(); for mut c in end_contexts { // add contexts made in the loop to the new_contexts, if theyre not broken if let Some((_, BreakType::Loop)) = c.broken { c.broken = None; out_contexts.push(c) } else { new_contexts.push(c) } } returns.extend(inner_returns); // return stuff if new_contexts.is_empty() { break; } } out_contexts.extend(new_contexts); contexts.extend(out_contexts.iter().map(|c| Context { variables: context.variables.clone(), ..c.clone() })); // finally append all newly created ones to the global count } Value::Str(s) => { //let iterator_val = store_value(Value::Null, globals); //let scope_vars = context.variables.clone(); let mut new_contexts: SmallVec<[Context; CONTEXT_MAX]> = smallvec![context.clone()]; let mut out_contexts: SmallVec<[Context; CONTEXT_MAX]> = SmallVec::new(); for ch in s.chars() { //println!("{}", new_contexts.len()); for c in &mut new_contexts { (*c).variables = context.variables.clone(); let stored = store_const_value( Value::Str(ch.to_string()), 1, globals, c, ); (*c).variables.insert(f.symbol.clone(), stored); } let new_info = info.clone(); let (end_contexts, inner_returns) = compile_scope(&f.body, new_contexts, globals, new_info)?; new_contexts = SmallVec::new(); for mut c in end_contexts { if let Some((_, BreakType::Loop)) = c.broken { c.broken = None; out_contexts.push(c) } else { new_contexts.push(c) } } returns.extend(inner_returns); if new_contexts.is_empty() { break; } } out_contexts.extend(new_contexts); contexts.extend(out_contexts.iter().map(|c| Context { variables: context.variables.clone(), ..c.clone() })); } Value::Range(start, end, step) => { let mut normal = (start..end).step_by(step); let mut rev = (end..start).step_by(step).rev(); let range: &mut dyn Iterator<Item = i32> = if start < end { &mut normal } else { &mut rev }; let mut new_contexts: SmallVec<[Context; CONTEXT_MAX]> = smallvec![context.clone()]; let mut out_contexts: SmallVec<[Context; CONTEXT_MAX]> = SmallVec::new(); for num in range { //println!("{}", new_contexts.len()); let element = store_value(Value::Number(num as f64), 0, globals, &context); for c in &mut new_contexts { (*c).variables = context.variables.clone(); (*c).variables.insert(f.symbol.clone(), element); } let new_info = info.clone(); //println!("{}", new_contexts.len()); let (end_contexts, inner_returns) = compile_scope(&f.body, new_contexts, globals, new_info)?; new_contexts = SmallVec::new(); for mut c in end_contexts { if let Some((_, BreakType::Loop)) = c.broken { c.broken = None; out_contexts.push(c) } else { new_contexts.push(c) } } returns.extend(inner_returns); if new_contexts.is_empty() { break; } } out_contexts.extend(new_contexts); contexts.extend(out_contexts.iter().map(|c| Context { variables: context.variables.clone(), ..c.clone() })); } a => { return Err(RuntimeError::RuntimeError { message: format!("{} is not iteratable!", a.to_str(globals)), info, }) } } } } Break => { //set all contexts to broken for c in &mut contexts { (*c).broken = Some((info.clone(), BreakType::Loop)); } break; } Continue => { //set all contexts to broken for c in &mut contexts { (*c).broken = Some((info.clone(), BreakType::ContinueLoop)); } break; } Return(return_val) => { match return_val { Some(val) => { let mut all_values: Returns = SmallVec::new(); for context in &contexts { let (evaled, inner_returns) = val.eval(context, globals, info.clone(), true)?; returns.extend(inner_returns); all_values.extend(evaled); } returns.extend(all_values); } None => { let mut all_values: Returns = SmallVec::new(); for context in &contexts { all_values.push(( store_value(Value::Null, 1, globals, context), context.clone(), )); } returns.extend(all_values); } }; if !statement.arrow { //set all contexts to broken for c in &mut contexts { (*c).broken = Some((info.clone(), BreakType::Macro)); } break; } } Error(e) => { for context in &contexts { let (evaled, _) = e.message.eval(context, globals, info.clone(), true)?; for (msg, _) in evaled { eprintln!( "{}", match &globals.stored_values[msg] { Value::Str(s) => s, _ => "no message", }, ); } } return Err(RuntimeError::RuntimeError { message: "Error statement, see message(s) above.".to_string(), info, }); } } let mut to_be_removed = Vec::new(); for (i, c) in contexts.iter().enumerate() { if c.broken != None { broken_contexts.push(c.clone()); to_be_removed.push(i) } } for i in to_be_removed.iter().rev() { contexts.swap_remove(*i); } if let Some(c) = stored_context { //resetting the context if async for c in contexts { if let Some((i, _)) = c.broken { return Err(RuntimeError::RuntimeError { message: "break statement is never used because it's inside an arrow statement" .to_string(), info: i, }); } } contexts = c; } //try to merge contexts //if statement_index < statements.len() - 1 { loop { if !merge_contexts(&mut contexts, globals) { break; } } //} /*println!( "{} -> Compiled '{}' in {} milliseconds!", path, statement_type, start_time.elapsed().as_millis(), );*/ } //return values need longer lifetimes for (val, _) in &returns { globals .stored_values .increment_single_lifetime(*val, 1, &mut HashSet::new()); } globals.stored_values.decrement_lifetimes(); //collect garbage globals.stored_values.clean_up(); // put broken contexts back contexts.extend(broken_contexts); //(*globals).highest_x = context.x; Ok((contexts, returns)) } fn merge_impl(target: &mut Implementations, source: &Implementations) { for (key, imp) in source.iter() { match target.get_mut(key) { Some(target_imp) => (*target_imp).extend(imp.iter().map(|x| (x.0.clone(), *x.1))), None => { (*target).insert(*key, imp.clone()); } } } } pub fn import_module( path: &ImportType, context: &Context, globals: &mut Globals, info: CompilerInfo, forced: bool, ) -> Result<Returns, RuntimeError> { if !forced { if let Some(ret) = globals.prev_imports.get(path) { merge_impl(&mut globals.implementations, &ret.1); return Ok(smallvec![( store_value(ret.0.clone(), 1, globals, context), context.clone() )]); } } let mut module_path = match path { ImportType::Script(p) => globals .path .clone() .parent() .expect("Your file must be in a folder to import modules!") .join(&p), ImportType::Lib(name) => { let mut outpath = info.includes[0].clone(); let mut found = false; for path in &info.includes { if path.join("libraries").join(name).exists() { outpath = path.to_path_buf(); found = true; break; } } if found { outpath } else { return Err(RuntimeError::RuntimeError { message: "Unable to find library folder in given search paths".to_string(), info, }); } } //.parent() //ADD BACK BEFORE RELEASE //.unwrap() .join("libraries") .join(name), }; if module_path.is_dir() { module_path = module_path.join("lib.spwn"); } else if module_path.is_file() && module_path.extension().is_none() { module_path.set_extension("spwn"); } else if !module_path.is_file() { return Err(RuntimeError::RuntimeError { message: format!( "Couldn't find library file ({})", module_path.to_string_lossy() ), info, }); } let unparsed = match fs::read_to_string(&module_path) { Ok(content) => content, Err(e) => { return Err(RuntimeError::RuntimeError { message: format!( "Something went wrong when opening library file ({}): {}", module_path.to_string_lossy(), e ), info, }) } }; let (parsed, notes) = match crate::parse_spwn(unparsed, module_path.clone()) { Ok(p) => p, Err(err) => return Err(RuntimeError::PackageSyntaxError { err, info }), }; let mut start_context = Context::new(); let mut stored_impl = None; if let ImportType::Lib(_) = path { stored_impl = Some(globals.implementations.clone()); globals.implementations = HashMap::new(); } if !notes.tag.tags.iter().any(|x| x.0 == "no_std") { let standard_lib = import_module( &ImportType::Lib(STD_PATH.to_string()), &start_context, globals, info.clone(), false, )?; if standard_lib.len() != 1 { return Err(RuntimeError::RuntimeError { message: "The standard library can not split the context".to_string(), info, }); } start_context = standard_lib[0].1.clone(); if let Value::Dict(d) = &globals.stored_values[standard_lib[0].0] { start_context.variables.extend(d.clone()); } else { return Err(RuntimeError::RuntimeError { message: "The standard library must return a dictionary".to_string(), info, }); } } let stored_path = globals.path.clone(); (*globals).path = module_path.clone(); let mut new_info = info; new_info.current_file = module_path; new_info.pos = ((0, 0), (0, 0)); if let ImportType::Lib(l) = path { new_info.current_module = l.clone(); } let (contexts, mut returns) = compile_scope(&parsed, smallvec![start_context], globals, new_info)?; for c in &contexts { if let Some((i, BreakType::Loop)) = &c.broken { return Err(RuntimeError::RuntimeError { message: "break statement is never used".to_string(), info: i.clone(), }); } } (*globals).path = stored_path; if let Some(stored_impl) = stored_impl { //change and delete from impls let mut to_be_deleted = Vec::new(); for (k1, imp) in &mut globals.implementations { for (k2, (_, in_scope)) in imp { if *in_scope { (*in_scope) = false; } else { to_be_deleted.push((*k1, k2.clone())); } } } for (k1, k2) in to_be_deleted { (*globals).implementations.get_mut(&k1).unwrap().remove(&k2); } //merge impls merge_impl(&mut globals.implementations, &stored_impl); } let out = if returns.is_empty() { contexts .iter() .map(|x| { let mut new_context = x.clone(); new_context.variables = context.variables.clone(); (NULL_STORAGE, new_context) }) .collect() } else { for (_, c) in &mut returns { (*c).variables = context.variables.clone(); } returns }; if out.len() == 1 && &out[0].1 == context { let cloned = clone_and_get_value(out[0].0, 9999, globals, context, true); let s_impl = globals.implementations.clone(); globals.prev_imports.insert(path.clone(), (cloned, s_impl)); } Ok(out) } // const ID_MAX: u16 = 999; // pub fn next_free( // ids: &mut Vec<u16>, // id_class: ast::IDClass, // info: CompilerInfo, // ) -> Result<ID, RuntimeError> { // for i in 1..ID_MAX { // if !ids.contains(&i) { // (*ids).push(i); // return Ok(i); // } // } // Err(RuntimeError::IDError { id_class, info }) // //panic!("All ids of this type are used up!"); // }
39.208497
149
0.398503
de331469420118e7c00a168a11805a1bd8542fb8
1,051
#[doc = "Reader of register ADDR2"] pub type R = crate::R<u16, super::ADDR2>; #[doc = "Writer for register ADDR2"] pub type W = crate::W<u16, super::ADDR2>; #[doc = "Register ADDR2 `reset()`'s with value 0"] impl crate::ResetValue for super::ADDR2 { type Type = u16; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `VALUE`"] pub type VALUE_R = crate::R<u8, u8>; #[doc = "Write proxy for field `VALUE`"] pub struct VALUE_W<'a> { w: &'a mut W, } impl<'a> VALUE_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0xff) | ((value as u16) & 0xff); self.w } } impl R { #[doc = "Bits 0:7 - Address Byte 2"] #[inline(always)] pub fn value(&self) -> VALUE_R { VALUE_R::new((self.bits & 0xff) as u8) } } impl W { #[doc = "Bits 0:7 - Address Byte 2"] #[inline(always)] pub fn value(&mut self) -> VALUE_W { VALUE_W { w: self } } }
25.634146
70
0.55471
6a715db7e31a7232dfffde03ac486fc143d37bb4
927
// Copyright 2020-2021 The Datafuse Authors. // // SPDX-License-Identifier: Apache-2.0. use std::sync::Arc; use common_datavalues::DataSchema; use common_datavalues::DataSchemaRef; use crate::Partitions; use crate::Statistics; #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct ReadDataSourcePlan { pub db: String, pub table: String, pub schema: DataSchemaRef, pub partitions: Partitions, pub statistics: Statistics, pub description: String } impl ReadDataSourcePlan { pub fn empty() -> ReadDataSourcePlan { ReadDataSourcePlan { db: "".to_string(), table: "".to_string(), schema: Arc::from(DataSchema::empty()), partitions: vec![], statistics: Statistics::default(), description: "".to_string() } } pub fn schema(&self) -> DataSchemaRef { self.schema.clone() } }
23.769231
61
0.632147
6280af3d6fa03e533e447182d549250646349c41
2,505
pub fn find(buffer: &[u8], kind: u8) -> Option<&[u8]> { let mut iter = buffer[8..].iter().enumerate(); let buffer_len = buffer.len(); loop { match iter.next() { Some((_, &len)) => match iter.next() { Some((i, potentialkind)) => { if potentialkind == &kind { if (8 + i) + len as usize > buffer_len { return None; } else { return Some(&buffer[9 + i..8 + i + len as usize]); } } else if len > 0 { for _ in 0..len - 1 { iter.next(); } } else { return None; } } _ => return None, }, None => return None, } } } pub fn extract_for_service(service: [u8; 2], data: &[u8]) -> Option<&[u8]> { if data.len() > 1 { if service[0] == data[0] && service[1] == data[1] { Some(&data[2..]) } else { None } } else { None } } #[cfg(test)] mod test { use crate::ble_parser::*; use crate::simple_ble::BUFFER_SIZE_SCAN; #[test] fn extracts_data_for_ids_correctly() { let mut buf = [0; BUFFER_SIZE_SCAN]; { let slice = &mut buf[8..23]; let data = &[ 0x02, 0x02, 0x01, 0x02, 0x01, 0x03, 0x03, 0x16, 0x01, 0x02, 0x04, 0xFF, 0x01, 0x02, 0x03, ]; slice.clone_from_slice(data); } assert_eq!(find(&buf, 0x02), Some(&[0x01][0..1])); assert_eq!(find(&buf, 0x01), Some(&[0x03][0..1])); assert_eq!(find(&buf, 0x16), Some(&[0x01, 0x02][0..2])); assert_eq!(find(&buf, 0xFF), Some(&[0x01, 0x02, 0x03][0..3])); } #[test] fn doesnt_panic_for_defect_packets() { let mut buf = [0; BUFFER_SIZE_SCAN]; { let slice = &mut buf[8..18]; let data = &[0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x0, 0x16, 0x01, 0x02]; slice.clone_from_slice(data); } } #[test] fn ignores_illegal_lengths_in_packets() { let mut buf = [0; 11]; { let slice = &mut buf[8..10]; let data = &[0x04, 0x02]; slice.clone_from_slice(data); } assert_eq!(find(&buf, 0xF2), None); } }
29.127907
99
0.422754
cc19383339b4bddd3b415f1252b38fa1a0c220e3
67,424
//! FIXME: write short doc here use std::{iter, sync::Arc}; use arrayvec::ArrayVec; use base_db::{CrateDisplayName, CrateId, Edition, FileId}; use either::Either; use hir_def::{ adt::{ReprKind, StructKind, VariantData}, builtin_type::BuiltinType, expr::{BindingAnnotation, LabelId, Pat, PatId}, import_map, item_tree::ItemTreeNode, lang_item::LangItemTarget, path::ModPath, per_ns::PerNs, resolver::{HasResolver, Resolver}, src::HasSource as _, type_ref::{Mutability, TypeRef}, AdtId, AssocContainerId, AssocItemId, AssocItemLoc, AttrDefId, ConstId, ConstParamId, DefWithBodyId, EnumId, FunctionId, GenericDefId, HasModule, ImplId, LifetimeParamId, LocalEnumVariantId, LocalFieldId, LocalModuleId, Lookup, ModuleId, StaticId, StructId, TraitId, TypeAliasId, TypeParamId, UnionId, }; use hir_def::{find_path::PrefixKind, item_scope::ItemInNs, visibility::Visibility}; use hir_expand::{ diagnostics::DiagnosticSink, name::{name, AsName}, MacroDefId, MacroDefKind, }; use hir_ty::{ autoderef, display::{write_bounds_like_dyn_trait, HirDisplayError, HirFormatter}, method_resolution, traits::{FnTrait, Solution, SolutionVariables}, ApplicationTy, BoundVar, CallableDefId, Canonical, DebruijnIndex, FnSig, GenericPredicate, InEnvironment, Obligation, ProjectionPredicate, ProjectionTy, Substs, TraitEnvironment, Ty, TyDefId, TyKind, TypeCtor, }; use rustc_hash::FxHashSet; use stdx::{format_to, impl_from}; use syntax::{ ast::{self, AttrsOwner, NameOwner}, AstNode, SmolStr, }; use tt::{Ident, Leaf, Literal, TokenTree}; use crate::{ db::{DefDatabase, HirDatabase}, has_source::HasSource, HirDisplay, InFile, Name, }; /// hir::Crate describes a single crate. It's the main interface with which /// a crate's dependencies interact. Mostly, it should be just a proxy for the /// root module. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Crate { pub(crate) id: CrateId, } #[derive(Debug)] pub struct CrateDependency { pub krate: Crate, pub name: Name, } impl Crate { pub fn dependencies(self, db: &dyn HirDatabase) -> Vec<CrateDependency> { db.crate_graph()[self.id] .dependencies .iter() .map(|dep| { let krate = Crate { id: dep.crate_id }; let name = dep.as_name(); CrateDependency { krate, name } }) .collect() } // FIXME: add `transitive_reverse_dependencies`. pub fn reverse_dependencies(self, db: &dyn HirDatabase) -> Vec<Crate> { let crate_graph = db.crate_graph(); crate_graph .iter() .filter(|&krate| { crate_graph[krate].dependencies.iter().any(|it| it.crate_id == self.id) }) .map(|id| Crate { id }) .collect() } pub fn root_module(self, db: &dyn HirDatabase) -> Module { let module_id = db.crate_def_map(self.id).root; Module::new(self, module_id) } pub fn root_file(self, db: &dyn HirDatabase) -> FileId { db.crate_graph()[self.id].root_file_id } pub fn edition(self, db: &dyn HirDatabase) -> Edition { db.crate_graph()[self.id].edition } pub fn display_name(self, db: &dyn HirDatabase) -> Option<CrateDisplayName> { db.crate_graph()[self.id].display_name.clone() } pub fn query_external_importables( self, db: &dyn DefDatabase, query: import_map::Query, ) -> impl Iterator<Item = Either<ModuleDef, MacroDef>> { import_map::search_dependencies(db, self.into(), query).into_iter().map(|item| match item { ItemInNs::Types(mod_id) | ItemInNs::Values(mod_id) => Either::Left(mod_id.into()), ItemInNs::Macros(mac_id) => Either::Right(mac_id.into()), }) } pub fn all(db: &dyn HirDatabase) -> Vec<Crate> { db.crate_graph().iter().map(|id| Crate { id }).collect() } /// Try to get the root URL of the documentation of a crate. pub fn get_html_root_url(self: &Crate, db: &dyn HirDatabase) -> Option<String> { // Look for #![doc(html_root_url = "...")] let attrs = db.attrs(AttrDefId::ModuleId(self.root_module(db).into())); let doc_attr_q = attrs.by_key("doc"); if !doc_attr_q.exists() { return None; } let doc_url = doc_attr_q.tt_values().map(|tt| { let name = tt.token_trees.iter() .skip_while(|tt| !matches!(tt, TokenTree::Leaf(Leaf::Ident(Ident{text: ref ident, ..})) if ident == "html_root_url")) .skip(2) .next(); match name { Some(TokenTree::Leaf(Leaf::Literal(Literal{ref text, ..}))) => Some(text), _ => None } }).flat_map(|t| t).next(); doc_url.map(|s| s.trim_matches('"').trim_end_matches('/').to_owned() + "/") } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Module { pub(crate) id: ModuleId, } /// The defs which can be visible in the module. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum ModuleDef { Module(Module), Function(Function), Adt(Adt), // Can't be directly declared, but can be imported. Variant(Variant), Const(Const), Static(Static), Trait(Trait), TypeAlias(TypeAlias), BuiltinType(BuiltinType), } impl_from!( Module, Function, Adt(Struct, Enum, Union), Variant, Const, Static, Trait, TypeAlias, BuiltinType for ModuleDef ); impl From<VariantDef> for ModuleDef { fn from(var: VariantDef) -> Self { match var { VariantDef::Struct(t) => Adt::from(t).into(), VariantDef::Union(t) => Adt::from(t).into(), VariantDef::Variant(t) => t.into(), } } } impl ModuleDef { pub fn module(self, db: &dyn HirDatabase) -> Option<Module> { match self { ModuleDef::Module(it) => it.parent(db), ModuleDef::Function(it) => Some(it.module(db)), ModuleDef::Adt(it) => Some(it.module(db)), ModuleDef::Variant(it) => Some(it.module(db)), ModuleDef::Const(it) => Some(it.module(db)), ModuleDef::Static(it) => Some(it.module(db)), ModuleDef::Trait(it) => Some(it.module(db)), ModuleDef::TypeAlias(it) => Some(it.module(db)), ModuleDef::BuiltinType(_) => None, } } pub fn canonical_path(&self, db: &dyn HirDatabase) -> Option<String> { let mut segments = Vec::new(); segments.push(self.name(db)?.to_string()); for m in self.module(db)?.path_to_root(db) { segments.extend(m.name(db).map(|it| it.to_string())) } segments.reverse(); Some(segments.join("::")) } pub fn definition_visibility(&self, db: &dyn HirDatabase) -> Option<Visibility> { let module = match self { ModuleDef::Module(it) => it.parent(db)?, ModuleDef::Function(it) => return Some(it.visibility(db)), ModuleDef::Adt(it) => it.module(db), ModuleDef::Variant(it) => { let parent = it.parent_enum(db); let module = it.module(db); return module.visibility_of(db, &ModuleDef::Adt(Adt::Enum(parent))); } ModuleDef::Const(it) => return Some(it.visibility(db)), ModuleDef::Static(it) => it.module(db), ModuleDef::Trait(it) => it.module(db), ModuleDef::TypeAlias(it) => return Some(it.visibility(db)), ModuleDef::BuiltinType(_) => return None, }; module.visibility_of(db, self) } pub fn name(self, db: &dyn HirDatabase) -> Option<Name> { match self { ModuleDef::Adt(it) => Some(it.name(db)), ModuleDef::Trait(it) => Some(it.name(db)), ModuleDef::Function(it) => Some(it.name(db)), ModuleDef::Variant(it) => Some(it.name(db)), ModuleDef::TypeAlias(it) => Some(it.name(db)), ModuleDef::Module(it) => it.name(db), ModuleDef::Const(it) => it.name(db), ModuleDef::Static(it) => it.name(db), ModuleDef::BuiltinType(it) => Some(it.as_name()), } } pub fn diagnostics(self, db: &dyn HirDatabase, sink: &mut DiagnosticSink) { let id = match self { ModuleDef::Adt(it) => match it { Adt::Struct(it) => it.id.into(), Adt::Enum(it) => it.id.into(), Adt::Union(it) => it.id.into(), }, ModuleDef::Trait(it) => it.id.into(), ModuleDef::Function(it) => it.id.into(), ModuleDef::TypeAlias(it) => it.id.into(), ModuleDef::Module(it) => it.id.into(), ModuleDef::Const(it) => it.id.into(), ModuleDef::Static(it) => it.id.into(), _ => return, }; let module = match self.module(db) { Some(it) => it, None => return, }; hir_ty::diagnostics::validate_module_item(db, module.id.krate, id, sink) } } impl Module { pub(crate) fn new(krate: Crate, crate_module_id: LocalModuleId) -> Module { Module { id: ModuleId { krate: krate.id, local_id: crate_module_id } } } /// Name of this module. pub fn name(self, db: &dyn HirDatabase) -> Option<Name> { let def_map = db.crate_def_map(self.id.krate); let parent = def_map[self.id.local_id].parent?; def_map[parent].children.iter().find_map(|(name, module_id)| { if *module_id == self.id.local_id { Some(name.clone()) } else { None } }) } /// Returns the crate this module is part of. pub fn krate(self) -> Crate { Crate { id: self.id.krate } } /// Topmost parent of this module. Every module has a `crate_root`, but some /// might be missing `krate`. This can happen if a module's file is not included /// in the module tree of any target in `Cargo.toml`. pub fn crate_root(self, db: &dyn HirDatabase) -> Module { let def_map = db.crate_def_map(self.id.krate); self.with_module_id(def_map.root) } /// Iterates over all child modules. pub fn children(self, db: &dyn HirDatabase) -> impl Iterator<Item = Module> { let def_map = db.crate_def_map(self.id.krate); let children = def_map[self.id.local_id] .children .iter() .map(|(_, module_id)| self.with_module_id(*module_id)) .collect::<Vec<_>>(); children.into_iter() } /// Finds a parent module. pub fn parent(self, db: &dyn HirDatabase) -> Option<Module> { let def_map = db.crate_def_map(self.id.krate); let parent_id = def_map[self.id.local_id].parent?; Some(self.with_module_id(parent_id)) } pub fn path_to_root(self, db: &dyn HirDatabase) -> Vec<Module> { let mut res = vec![self]; let mut curr = self; while let Some(next) = curr.parent(db) { res.push(next); curr = next } res } /// Returns a `ModuleScope`: a set of items, visible in this module. pub fn scope( self, db: &dyn HirDatabase, visible_from: Option<Module>, ) -> Vec<(Name, ScopeDef)> { db.crate_def_map(self.id.krate)[self.id.local_id] .scope .entries() .filter_map(|(name, def)| { if let Some(m) = visible_from { let filtered = def.filter_visibility(|vis| vis.is_visible_from(db.upcast(), m.id)); if filtered.is_none() && !def.is_none() { None } else { Some((name, filtered)) } } else { Some((name, def)) } }) .flat_map(|(name, def)| { ScopeDef::all_items(def).into_iter().map(move |item| (name.clone(), item)) }) .collect() } pub fn visibility_of(self, db: &dyn HirDatabase, def: &ModuleDef) -> Option<Visibility> { db.crate_def_map(self.id.krate)[self.id.local_id].scope.visibility_of(def.clone().into()) } pub fn diagnostics(self, db: &dyn HirDatabase, sink: &mut DiagnosticSink) { let _p = profile::span("Module::diagnostics").detail(|| { format!("{:?}", self.name(db).map_or("<unknown>".into(), |name| name.to_string())) }); let crate_def_map = db.crate_def_map(self.id.krate); crate_def_map.add_diagnostics(db.upcast(), self.id.local_id, sink); for decl in self.declarations(db) { match decl { crate::ModuleDef::Function(f) => f.diagnostics(db, sink), crate::ModuleDef::Module(m) => { // Only add diagnostics from inline modules if crate_def_map[m.id.local_id].origin.is_inline() { m.diagnostics(db, sink) } } _ => { decl.diagnostics(db, sink); } } } for impl_def in self.impl_defs(db) { for item in impl_def.items(db) { if let AssocItem::Function(f) = item { f.diagnostics(db, sink); } } } } pub fn declarations(self, db: &dyn HirDatabase) -> Vec<ModuleDef> { let def_map = db.crate_def_map(self.id.krate); def_map[self.id.local_id].scope.declarations().map(ModuleDef::from).collect() } pub fn impl_defs(self, db: &dyn HirDatabase) -> Vec<Impl> { let def_map = db.crate_def_map(self.id.krate); def_map[self.id.local_id].scope.impls().map(Impl::from).collect() } pub(crate) fn with_module_id(self, module_id: LocalModuleId) -> Module { Module::new(self.krate(), module_id) } /// Finds a path that can be used to refer to the given item from within /// this module, if possible. pub fn find_use_path(self, db: &dyn DefDatabase, item: impl Into<ItemInNs>) -> Option<ModPath> { hir_def::find_path::find_path(db, item.into(), self.into()) } /// Finds a path that can be used to refer to the given item from within /// this module, if possible. This is used for returning import paths for use-statements. pub fn find_use_path_prefixed( self, db: &dyn DefDatabase, item: impl Into<ItemInNs>, prefix_kind: PrefixKind, ) -> Option<ModPath> { hir_def::find_path::find_path_prefixed(db, item.into(), self.into(), prefix_kind) } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Field { pub(crate) parent: VariantDef, pub(crate) id: LocalFieldId, } #[derive(Debug, PartialEq, Eq)] pub enum FieldSource { Named(ast::RecordField), Pos(ast::TupleField), } impl Field { pub fn name(&self, db: &dyn HirDatabase) -> Name { self.parent.variant_data(db).fields()[self.id].name.clone() } /// Returns the type as in the signature of the struct (i.e., with /// placeholder types for type parameters). This is good for showing /// signature help, but not so good to actually get the type of the field /// when you actually have a variable of the struct. pub fn signature_ty(&self, db: &dyn HirDatabase) -> Type { let var_id = self.parent.into(); let generic_def_id: GenericDefId = match self.parent { VariantDef::Struct(it) => it.id.into(), VariantDef::Union(it) => it.id.into(), VariantDef::Variant(it) => it.parent.id.into(), }; let substs = Substs::type_params(db, generic_def_id); let ty = db.field_types(var_id)[self.id].clone().subst(&substs); Type::new(db, self.parent.module(db).id.krate, var_id, ty) } pub fn parent_def(&self, _db: &dyn HirDatabase) -> VariantDef { self.parent } } impl HasVisibility for Field { fn visibility(&self, db: &dyn HirDatabase) -> Visibility { let variant_data = self.parent.variant_data(db); let visibility = &variant_data.fields()[self.id].visibility; let parent_id: hir_def::VariantId = self.parent.into(); visibility.resolve(db.upcast(), &parent_id.resolver(db.upcast())) } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Struct { pub(crate) id: StructId, } impl Struct { pub fn module(self, db: &dyn HirDatabase) -> Module { Module { id: self.id.lookup(db.upcast()).container.module(db.upcast()) } } pub fn krate(self, db: &dyn HirDatabase) -> Option<Crate> { Some(self.module(db).krate()) } pub fn name(self, db: &dyn HirDatabase) -> Name { db.struct_data(self.id).name.clone() } pub fn fields(self, db: &dyn HirDatabase) -> Vec<Field> { db.struct_data(self.id) .variant_data .fields() .iter() .map(|(id, _)| Field { parent: self.into(), id }) .collect() } pub fn ty(self, db: &dyn HirDatabase) -> Type { Type::from_def(db, self.id.lookup(db.upcast()).container.module(db.upcast()).krate, self.id) } pub fn repr(self, db: &dyn HirDatabase) -> Option<ReprKind> { db.struct_data(self.id).repr.clone() } pub fn kind(self, db: &dyn HirDatabase) -> StructKind { self.variant_data(db).kind() } fn variant_data(self, db: &dyn HirDatabase) -> Arc<VariantData> { db.struct_data(self.id).variant_data.clone() } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Union { pub(crate) id: UnionId, } impl Union { pub fn name(self, db: &dyn HirDatabase) -> Name { db.union_data(self.id).name.clone() } pub fn module(self, db: &dyn HirDatabase) -> Module { Module { id: self.id.lookup(db.upcast()).container.module(db.upcast()) } } pub fn ty(self, db: &dyn HirDatabase) -> Type { Type::from_def(db, self.id.lookup(db.upcast()).container.module(db.upcast()).krate, self.id) } pub fn fields(self, db: &dyn HirDatabase) -> Vec<Field> { db.union_data(self.id) .variant_data .fields() .iter() .map(|(id, _)| Field { parent: self.into(), id }) .collect() } fn variant_data(self, db: &dyn HirDatabase) -> Arc<VariantData> { db.union_data(self.id).variant_data.clone() } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Enum { pub(crate) id: EnumId, } impl Enum { pub fn module(self, db: &dyn HirDatabase) -> Module { Module { id: self.id.lookup(db.upcast()).container.module(db.upcast()) } } pub fn krate(self, db: &dyn HirDatabase) -> Option<Crate> { Some(self.module(db).krate()) } pub fn name(self, db: &dyn HirDatabase) -> Name { db.enum_data(self.id).name.clone() } pub fn variants(self, db: &dyn HirDatabase) -> Vec<Variant> { db.enum_data(self.id).variants.iter().map(|(id, _)| Variant { parent: self, id }).collect() } pub fn ty(self, db: &dyn HirDatabase) -> Type { Type::from_def(db, self.id.lookup(db.upcast()).container.module(db.upcast()).krate, self.id) } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Variant { pub(crate) parent: Enum, pub(crate) id: LocalEnumVariantId, } impl Variant { pub fn module(self, db: &dyn HirDatabase) -> Module { self.parent.module(db) } pub fn parent_enum(self, _db: &dyn HirDatabase) -> Enum { self.parent } pub fn name(self, db: &dyn HirDatabase) -> Name { db.enum_data(self.parent.id).variants[self.id].name.clone() } pub fn fields(self, db: &dyn HirDatabase) -> Vec<Field> { self.variant_data(db) .fields() .iter() .map(|(id, _)| Field { parent: self.into(), id }) .collect() } pub fn kind(self, db: &dyn HirDatabase) -> StructKind { self.variant_data(db).kind() } pub(crate) fn variant_data(self, db: &dyn HirDatabase) -> Arc<VariantData> { db.enum_data(self.parent.id).variants[self.id].variant_data.clone() } } /// A Data Type #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum Adt { Struct(Struct), Union(Union), Enum(Enum), } impl_from!(Struct, Union, Enum for Adt); impl Adt { pub fn has_non_default_type_params(self, db: &dyn HirDatabase) -> bool { let subst = db.generic_defaults(self.into()); subst.iter().any(|ty| &ty.value == &Ty::Unknown) } /// Turns this ADT into a type. Any type parameters of the ADT will be /// turned into unknown types, which is good for e.g. finding the most /// general set of completions, but will not look very nice when printed. pub fn ty(self, db: &dyn HirDatabase) -> Type { let id = AdtId::from(self); Type::from_def(db, id.module(db.upcast()).krate, id) } pub fn module(self, db: &dyn HirDatabase) -> Module { match self { Adt::Struct(s) => s.module(db), Adt::Union(s) => s.module(db), Adt::Enum(e) => e.module(db), } } pub fn krate(self, db: &dyn HirDatabase) -> Option<Crate> { Some(self.module(db).krate()) } pub fn name(self, db: &dyn HirDatabase) -> Name { match self { Adt::Struct(s) => s.name(db), Adt::Union(u) => u.name(db), Adt::Enum(e) => e.name(db), } } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum VariantDef { Struct(Struct), Union(Union), Variant(Variant), } impl_from!(Struct, Union, Variant for VariantDef); impl VariantDef { pub fn fields(self, db: &dyn HirDatabase) -> Vec<Field> { match self { VariantDef::Struct(it) => it.fields(db), VariantDef::Union(it) => it.fields(db), VariantDef::Variant(it) => it.fields(db), } } pub fn module(self, db: &dyn HirDatabase) -> Module { match self { VariantDef::Struct(it) => it.module(db), VariantDef::Union(it) => it.module(db), VariantDef::Variant(it) => it.module(db), } } pub fn name(&self, db: &dyn HirDatabase) -> Name { match self { VariantDef::Struct(s) => s.name(db), VariantDef::Union(u) => u.name(db), VariantDef::Variant(e) => e.name(db), } } pub(crate) fn variant_data(self, db: &dyn HirDatabase) -> Arc<VariantData> { match self { VariantDef::Struct(it) => it.variant_data(db), VariantDef::Union(it) => it.variant_data(db), VariantDef::Variant(it) => it.variant_data(db), } } } /// The defs which have a body. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum DefWithBody { Function(Function), Static(Static), Const(Const), } impl_from!(Function, Const, Static for DefWithBody); impl DefWithBody { pub fn module(self, db: &dyn HirDatabase) -> Module { match self { DefWithBody::Const(c) => c.module(db), DefWithBody::Function(f) => f.module(db), DefWithBody::Static(s) => s.module(db), } } pub fn name(self, db: &dyn HirDatabase) -> Option<Name> { match self { DefWithBody::Function(f) => Some(f.name(db)), DefWithBody::Static(s) => s.name(db), DefWithBody::Const(c) => c.name(db), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Function { pub(crate) id: FunctionId, } impl Function { pub fn module(self, db: &dyn HirDatabase) -> Module { self.id.lookup(db.upcast()).module(db.upcast()).into() } pub fn krate(self, db: &dyn HirDatabase) -> Option<Crate> { Some(self.module(db).krate()) } pub fn name(self, db: &dyn HirDatabase) -> Name { db.function_data(self.id).name.clone() } /// Get this function's return type pub fn ret_type(self, db: &dyn HirDatabase) -> Type { let resolver = self.id.resolver(db.upcast()); let ret_type = &db.function_data(self.id).ret_type; let ctx = hir_ty::TyLoweringContext::new(db, &resolver); let environment = TraitEnvironment::lower(db, &resolver); Type { krate: self.id.lookup(db.upcast()).container.module(db.upcast()).krate, ty: InEnvironment { value: Ty::from_hir_ext(&ctx, ret_type).0, environment }, } } pub fn self_param(self, db: &dyn HirDatabase) -> Option<SelfParam> { if !db.function_data(self.id).has_self_param { return None; } Some(SelfParam { func: self.id }) } pub fn assoc_fn_params(self, db: &dyn HirDatabase) -> Vec<Param> { let resolver = self.id.resolver(db.upcast()); let ctx = hir_ty::TyLoweringContext::new(db, &resolver); let environment = TraitEnvironment::lower(db, &resolver); db.function_data(self.id) .params .iter() .map(|type_ref| { let ty = Type { krate: self.id.lookup(db.upcast()).container.module(db.upcast()).krate, ty: InEnvironment { value: Ty::from_hir_ext(&ctx, type_ref).0, environment: environment.clone(), }, }; Param { ty } }) .collect() } pub fn method_params(self, db: &dyn HirDatabase) -> Option<Vec<Param>> { if self.self_param(db).is_none() { return None; } let mut res = self.assoc_fn_params(db); res.remove(0); Some(res) } pub fn is_unsafe(self, db: &dyn HirDatabase) -> bool { db.function_data(self.id).is_unsafe } pub fn diagnostics(self, db: &dyn HirDatabase, sink: &mut DiagnosticSink) { let krate = self.module(db).id.krate; hir_def::diagnostics::validate_body(db.upcast(), self.id.into(), sink); hir_ty::diagnostics::validate_module_item(db, krate, self.id.into(), sink); hir_ty::diagnostics::validate_body(db, self.id.into(), sink); } /// Whether this function declaration has a definition. /// /// This is false in the case of required (not provided) trait methods. pub fn has_body(self, db: &dyn HirDatabase) -> bool { db.function_data(self.id).has_body } /// A textual representation of the HIR of this function for debugging purposes. pub fn debug_hir(self, db: &dyn HirDatabase) -> String { let body = db.body(self.id.into()); let mut result = String::new(); format_to!(result, "HIR expressions in the body of `{}`:\n", self.name(db)); for (id, expr) in body.exprs.iter() { format_to!(result, "{:?}: {:?}\n", id, expr); } result } } // Note: logically, this belongs to `hir_ty`, but we are not using it there yet. pub enum Access { Shared, Exclusive, Owned, } impl From<Mutability> for Access { fn from(mutability: Mutability) -> Access { match mutability { Mutability::Shared => Access::Shared, Mutability::Mut => Access::Exclusive, } } } #[derive(Debug)] pub struct Param { ty: Type, } impl Param { pub fn ty(&self) -> &Type { &self.ty } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct SelfParam { func: FunctionId, } impl SelfParam { pub fn access(self, db: &dyn HirDatabase) -> Access { let func_data = db.function_data(self.func); func_data .params .first() .map(|param| match *param { TypeRef::Reference(.., mutability) => mutability.into(), _ => Access::Owned, }) .unwrap_or(Access::Owned) } } impl HasVisibility for Function { fn visibility(&self, db: &dyn HirDatabase) -> Visibility { let function_data = db.function_data(self.id); let visibility = &function_data.visibility; visibility.resolve(db.upcast(), &self.id.resolver(db.upcast())) } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Const { pub(crate) id: ConstId, } impl Const { pub fn module(self, db: &dyn HirDatabase) -> Module { Module { id: self.id.lookup(db.upcast()).module(db.upcast()) } } pub fn krate(self, db: &dyn HirDatabase) -> Option<Crate> { Some(self.module(db).krate()) } pub fn name(self, db: &dyn HirDatabase) -> Option<Name> { db.const_data(self.id).name.clone() } } impl HasVisibility for Const { fn visibility(&self, db: &dyn HirDatabase) -> Visibility { let function_data = db.const_data(self.id); let visibility = &function_data.visibility; visibility.resolve(db.upcast(), &self.id.resolver(db.upcast())) } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Static { pub(crate) id: StaticId, } impl Static { pub fn module(self, db: &dyn HirDatabase) -> Module { Module { id: self.id.lookup(db.upcast()).module(db.upcast()) } } pub fn krate(self, db: &dyn HirDatabase) -> Option<Crate> { Some(self.module(db).krate()) } pub fn name(self, db: &dyn HirDatabase) -> Option<Name> { db.static_data(self.id).name.clone() } pub fn is_mut(self, db: &dyn HirDatabase) -> bool { db.static_data(self.id).mutable } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Trait { pub(crate) id: TraitId, } impl Trait { pub fn module(self, db: &dyn HirDatabase) -> Module { Module { id: self.id.lookup(db.upcast()).container.module(db.upcast()) } } pub fn name(self, db: &dyn HirDatabase) -> Name { db.trait_data(self.id).name.clone() } pub fn items(self, db: &dyn HirDatabase) -> Vec<AssocItem> { db.trait_data(self.id).items.iter().map(|(_name, it)| (*it).into()).collect() } pub fn is_auto(self, db: &dyn HirDatabase) -> bool { db.trait_data(self.id).auto } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct TypeAlias { pub(crate) id: TypeAliasId, } impl TypeAlias { pub fn has_non_default_type_params(self, db: &dyn HirDatabase) -> bool { let subst = db.generic_defaults(self.id.into()); subst.iter().any(|ty| &ty.value == &Ty::Unknown) } pub fn module(self, db: &dyn HirDatabase) -> Module { Module { id: self.id.lookup(db.upcast()).module(db.upcast()) } } pub fn krate(self, db: &dyn HirDatabase) -> Option<Crate> { Some(self.module(db).krate()) } pub fn type_ref(self, db: &dyn HirDatabase) -> Option<TypeRef> { db.type_alias_data(self.id).type_ref.clone() } pub fn ty(self, db: &dyn HirDatabase) -> Type { Type::from_def(db, self.id.lookup(db.upcast()).module(db.upcast()).krate, self.id) } pub fn name(self, db: &dyn HirDatabase) -> Name { db.type_alias_data(self.id).name.clone() } } impl HasVisibility for TypeAlias { fn visibility(&self, db: &dyn HirDatabase) -> Visibility { let function_data = db.type_alias_data(self.id); let visibility = &function_data.visibility; visibility.resolve(db.upcast(), &self.id.resolver(db.upcast())) } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct MacroDef { pub(crate) id: MacroDefId, } impl MacroDef { /// FIXME: right now, this just returns the root module of the crate that /// defines this macro. The reasons for this is that macros are expanded /// early, in `hir_expand`, where modules simply do not exist yet. pub fn module(self, db: &dyn HirDatabase) -> Option<Module> { let krate = self.id.krate; let module_id = db.crate_def_map(krate).root; Some(Module::new(Crate { id: krate }, module_id)) } /// XXX: this parses the file pub fn name(self, db: &dyn HirDatabase) -> Option<Name> { self.source(db)?.value.name().map(|it| it.as_name()) } /// Indicate it is a proc-macro pub fn is_proc_macro(&self) -> bool { matches!(self.id.kind, MacroDefKind::ProcMacro(_)) } /// Indicate it is a derive macro pub fn is_derive_macro(&self) -> bool { matches!(self.id.kind, MacroDefKind::ProcMacro(_) | MacroDefKind::BuiltInDerive(_)) } } /// Invariant: `inner.as_assoc_item(db).is_some()` /// We do not actively enforce this invariant. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum AssocItem { Function(Function), Const(Const), TypeAlias(TypeAlias), } pub enum AssocItemContainer { Trait(Trait), Impl(Impl), } pub trait AsAssocItem { fn as_assoc_item(self, db: &dyn HirDatabase) -> Option<AssocItem>; } impl AsAssocItem for Function { fn as_assoc_item(self, db: &dyn HirDatabase) -> Option<AssocItem> { as_assoc_item(db, AssocItem::Function, self.id) } } impl AsAssocItem for Const { fn as_assoc_item(self, db: &dyn HirDatabase) -> Option<AssocItem> { as_assoc_item(db, AssocItem::Const, self.id) } } impl AsAssocItem for TypeAlias { fn as_assoc_item(self, db: &dyn HirDatabase) -> Option<AssocItem> { as_assoc_item(db, AssocItem::TypeAlias, self.id) } } fn as_assoc_item<ID, DEF, CTOR, AST>(db: &dyn HirDatabase, ctor: CTOR, id: ID) -> Option<AssocItem> where ID: Lookup<Data = AssocItemLoc<AST>>, DEF: From<ID>, CTOR: FnOnce(DEF) -> AssocItem, AST: ItemTreeNode, { match id.lookup(db.upcast()).container { AssocContainerId::TraitId(_) | AssocContainerId::ImplId(_) => Some(ctor(DEF::from(id))), AssocContainerId::ContainerId(_) => None, } } impl AssocItem { pub fn name(self, db: &dyn HirDatabase) -> Option<Name> { match self { AssocItem::Function(it) => Some(it.name(db)), AssocItem::Const(it) => it.name(db), AssocItem::TypeAlias(it) => Some(it.name(db)), } } pub fn module(self, db: &dyn HirDatabase) -> Module { match self { AssocItem::Function(f) => f.module(db), AssocItem::Const(c) => c.module(db), AssocItem::TypeAlias(t) => t.module(db), } } pub fn container(self, db: &dyn HirDatabase) -> AssocItemContainer { let container = match self { AssocItem::Function(it) => it.id.lookup(db.upcast()).container, AssocItem::Const(it) => it.id.lookup(db.upcast()).container, AssocItem::TypeAlias(it) => it.id.lookup(db.upcast()).container, }; match container { AssocContainerId::TraitId(id) => AssocItemContainer::Trait(id.into()), AssocContainerId::ImplId(id) => AssocItemContainer::Impl(id.into()), AssocContainerId::ContainerId(_) => panic!("invalid AssocItem"), } } } impl HasVisibility for AssocItem { fn visibility(&self, db: &dyn HirDatabase) -> Visibility { match self { AssocItem::Function(f) => f.visibility(db), AssocItem::Const(c) => c.visibility(db), AssocItem::TypeAlias(t) => t.visibility(db), } } } #[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] pub enum GenericDef { Function(Function), Adt(Adt), Trait(Trait), TypeAlias(TypeAlias), Impl(Impl), // enum variants cannot have generics themselves, but their parent enums // can, and this makes some code easier to write Variant(Variant), // consts can have type parameters from their parents (i.e. associated consts of traits) Const(Const), } impl_from!( Function, Adt(Struct, Enum, Union), Trait, TypeAlias, Impl, Variant, Const for GenericDef ); impl GenericDef { pub fn params(self, db: &dyn HirDatabase) -> Vec<GenericParam> { let generics = db.generic_params(self.into()); let ty_params = generics .types .iter() .map(|(local_id, _)| TypeParam { id: TypeParamId { parent: self.into(), local_id } }) .map(GenericParam::TypeParam); let lt_params = generics .lifetimes .iter() .map(|(local_id, _)| LifetimeParam { id: LifetimeParamId { parent: self.into(), local_id }, }) .map(GenericParam::LifetimeParam); let const_params = generics .consts .iter() .map(|(local_id, _)| ConstParam { id: ConstParamId { parent: self.into(), local_id } }) .map(GenericParam::ConstParam); ty_params.chain(lt_params).chain(const_params).collect() } pub fn type_params(self, db: &dyn HirDatabase) -> Vec<TypeParam> { let generics = db.generic_params(self.into()); generics .types .iter() .map(|(local_id, _)| TypeParam { id: TypeParamId { parent: self.into(), local_id } }) .collect() } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct Local { pub(crate) parent: DefWithBodyId, pub(crate) pat_id: PatId, } impl Local { pub fn is_param(self, db: &dyn HirDatabase) -> bool { let src = self.source(db); match src.value { Either::Left(bind_pat) => { bind_pat.syntax().ancestors().any(|it| ast::Param::can_cast(it.kind())) } Either::Right(_self_param) => true, } } // FIXME: why is this an option? It shouldn't be? pub fn name(self, db: &dyn HirDatabase) -> Option<Name> { let body = db.body(self.parent.into()); match &body[self.pat_id] { Pat::Bind { name, .. } => Some(name.clone()), _ => None, } } pub fn is_self(self, db: &dyn HirDatabase) -> bool { self.name(db) == Some(name![self]) } pub fn is_mut(self, db: &dyn HirDatabase) -> bool { let body = db.body(self.parent.into()); match &body[self.pat_id] { Pat::Bind { mode, .. } => match mode { BindingAnnotation::Mutable | BindingAnnotation::RefMut => true, _ => false, }, _ => false, } } pub fn parent(self, _db: &dyn HirDatabase) -> DefWithBody { self.parent.into() } pub fn module(self, db: &dyn HirDatabase) -> Module { self.parent(db).module(db) } pub fn ty(self, db: &dyn HirDatabase) -> Type { let def = DefWithBodyId::from(self.parent); let infer = db.infer(def); let ty = infer[self.pat_id].clone(); let krate = def.module(db.upcast()).krate; Type::new(db, krate, def, ty) } pub fn source(self, db: &dyn HirDatabase) -> InFile<Either<ast::IdentPat, ast::SelfParam>> { let (_body, source_map) = db.body_with_source_map(self.parent.into()); let src = source_map.pat_syntax(self.pat_id).unwrap(); // Hmm... let root = src.file_syntax(db.upcast()); src.map(|ast| { ast.map_left(|it| it.cast().unwrap().to_node(&root)).map_right(|it| it.to_node(&root)) }) } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct Label { pub(crate) parent: DefWithBodyId, pub(crate) label_id: LabelId, } impl Label { pub fn module(self, db: &dyn HirDatabase) -> Module { self.parent(db).module(db) } pub fn parent(self, _db: &dyn HirDatabase) -> DefWithBody { self.parent.into() } pub fn name(self, db: &dyn HirDatabase) -> Name { let body = db.body(self.parent.into()); body[self.label_id].name.clone() } pub fn source(self, db: &dyn HirDatabase) -> InFile<ast::Label> { let (_body, source_map) = db.body_with_source_map(self.parent.into()); let src = source_map.label_syntax(self.label_id); let root = src.file_syntax(db.upcast()); src.map(|ast| ast.to_node(&root)) } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum GenericParam { TypeParam(TypeParam), LifetimeParam(LifetimeParam), ConstParam(ConstParam), } impl_from!(TypeParam, LifetimeParam, ConstParam for GenericParam); #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct TypeParam { pub(crate) id: TypeParamId, } impl TypeParam { pub fn name(self, db: &dyn HirDatabase) -> Name { let params = db.generic_params(self.id.parent); params.types[self.id.local_id].name.clone().unwrap_or_else(Name::missing) } pub fn module(self, db: &dyn HirDatabase) -> Module { self.id.parent.module(db.upcast()).into() } pub fn ty(self, db: &dyn HirDatabase) -> Type { let resolver = self.id.parent.resolver(db.upcast()); let environment = TraitEnvironment::lower(db, &resolver); let ty = Ty::Placeholder(self.id); Type { krate: self.id.parent.module(db.upcast()).krate, ty: InEnvironment { value: ty, environment }, } } pub fn trait_bounds(self, db: &dyn HirDatabase) -> Vec<Trait> { db.generic_predicates_for_param(self.id) .into_iter() .filter_map(|pred| match &pred.value { hir_ty::GenericPredicate::Implemented(trait_ref) => { Some(Trait::from(trait_ref.trait_)) } _ => None, }) .collect() } pub fn default(self, db: &dyn HirDatabase) -> Option<Type> { let params = db.generic_defaults(self.id.parent); let local_idx = hir_ty::param_idx(db, self.id)?; let resolver = self.id.parent.resolver(db.upcast()); let environment = TraitEnvironment::lower(db, &resolver); let ty = params.get(local_idx)?.clone(); let subst = Substs::type_params(db, self.id.parent); let ty = ty.subst(&subst.prefix(local_idx)); Some(Type { krate: self.id.parent.module(db.upcast()).krate, ty: InEnvironment { value: ty, environment }, }) } } impl HirDisplay for TypeParam { fn hir_fmt(&self, f: &mut HirFormatter) -> Result<(), HirDisplayError> { write!(f, "{}", self.name(f.db))?; let bounds = f.db.generic_predicates_for_param(self.id); let substs = Substs::type_params(f.db, self.id.parent); let predicates = bounds.iter().cloned().map(|b| b.subst(&substs)).collect::<Vec<_>>(); if !(predicates.is_empty() || f.omit_verbose_types()) { write!(f, ": ")?; write_bounds_like_dyn_trait(&predicates, f)?; } Ok(()) } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct LifetimeParam { pub(crate) id: LifetimeParamId, } impl LifetimeParam { pub fn name(self, db: &dyn HirDatabase) -> Name { let params = db.generic_params(self.id.parent); params.lifetimes[self.id.local_id].name.clone() } pub fn module(self, db: &dyn HirDatabase) -> Module { self.id.parent.module(db.upcast()).into() } pub fn parent(self, _db: &dyn HirDatabase) -> GenericDef { self.id.parent.into() } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct ConstParam { pub(crate) id: ConstParamId, } impl ConstParam { pub fn name(self, db: &dyn HirDatabase) -> Name { let params = db.generic_params(self.id.parent); params.consts[self.id.local_id].name.clone() } pub fn module(self, db: &dyn HirDatabase) -> Module { self.id.parent.module(db.upcast()).into() } pub fn parent(self, _db: &dyn HirDatabase) -> GenericDef { self.id.parent.into() } pub fn ty(self, db: &dyn HirDatabase) -> Type { let def = self.id.parent; let krate = def.module(db.upcast()).krate; Type::new(db, krate, def, db.const_param_ty(self.id)) } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Impl { pub(crate) id: ImplId, } impl Impl { pub fn all_in_crate(db: &dyn HirDatabase, krate: Crate) -> Vec<Impl> { let inherent = db.inherent_impls_in_crate(krate.id); let trait_ = db.trait_impls_in_crate(krate.id); inherent.all_impls().chain(trait_.all_impls()).map(Self::from).collect() } pub fn for_trait(db: &dyn HirDatabase, krate: Crate, trait_: Trait) -> Vec<Impl> { let impls = db.trait_impls_in_crate(krate.id); impls.for_trait(trait_.id).map(Self::from).collect() } // FIXME: the return type is wrong. This should be a hir version of // `TraitRef` (ie, resolved `TypeRef`). pub fn target_trait(self, db: &dyn HirDatabase) -> Option<TypeRef> { db.impl_data(self.id).target_trait.clone() } pub fn target_ty(self, db: &dyn HirDatabase) -> Type { let impl_data = db.impl_data(self.id); let resolver = self.id.resolver(db.upcast()); let ctx = hir_ty::TyLoweringContext::new(db, &resolver); let environment = TraitEnvironment::lower(db, &resolver); let ty = Ty::from_hir(&ctx, &impl_data.target_type); Type { krate: self.id.lookup(db.upcast()).container.module(db.upcast()).krate, ty: InEnvironment { value: ty, environment }, } } pub fn items(self, db: &dyn HirDatabase) -> Vec<AssocItem> { db.impl_data(self.id).items.iter().map(|it| (*it).into()).collect() } pub fn is_negative(self, db: &dyn HirDatabase) -> bool { db.impl_data(self.id).is_negative } pub fn module(self, db: &dyn HirDatabase) -> Module { self.id.lookup(db.upcast()).container.module(db.upcast()).into() } pub fn krate(self, db: &dyn HirDatabase) -> Crate { Crate { id: self.module(db).id.krate } } pub fn is_builtin_derive(self, db: &dyn HirDatabase) -> Option<InFile<ast::Attr>> { let src = self.source(db)?; let item = src.file_id.is_builtin_derive(db.upcast())?; let hygenic = hir_expand::hygiene::Hygiene::new(db.upcast(), item.file_id); // FIXME: handle `cfg_attr` let attr = item .value .attrs() .filter_map(|it| { let path = ModPath::from_src(it.path()?, &hygenic)?; if path.as_ident()?.to_string() == "derive" { Some(it) } else { None } }) .last()?; Some(item.with_value(attr)) } } #[derive(Clone, PartialEq, Eq, Debug)] pub struct Type { krate: CrateId, ty: InEnvironment<Ty>, } impl Type { pub(crate) fn new_with_resolver( db: &dyn HirDatabase, resolver: &Resolver, ty: Ty, ) -> Option<Type> { let krate = resolver.krate()?; Some(Type::new_with_resolver_inner(db, krate, resolver, ty)) } pub(crate) fn new_with_resolver_inner( db: &dyn HirDatabase, krate: CrateId, resolver: &Resolver, ty: Ty, ) -> Type { let environment = TraitEnvironment::lower(db, &resolver); Type { krate, ty: InEnvironment { value: ty, environment } } } fn new(db: &dyn HirDatabase, krate: CrateId, lexical_env: impl HasResolver, ty: Ty) -> Type { let resolver = lexical_env.resolver(db.upcast()); let environment = TraitEnvironment::lower(db, &resolver); Type { krate, ty: InEnvironment { value: ty, environment } } } fn from_def( db: &dyn HirDatabase, krate: CrateId, def: impl HasResolver + Into<TyDefId> + Into<GenericDefId>, ) -> Type { let substs = Substs::build_for_def(db, def).fill_with_unknown().build(); let ty = db.ty(def.into()).subst(&substs); Type::new(db, krate, def, ty) } pub fn is_unit(&self) -> bool { matches!( self.ty.value, Ty::Apply(ApplicationTy { ctor: TypeCtor::Tuple { cardinality: 0 }, .. }) ) } pub fn is_bool(&self) -> bool { matches!(self.ty.value, Ty::Apply(ApplicationTy { ctor: TypeCtor::Bool, .. })) } pub fn is_mutable_reference(&self) -> bool { matches!( self.ty.value, Ty::Apply(ApplicationTy { ctor: TypeCtor::Ref(Mutability::Mut), .. }) ) } pub fn remove_ref(&self) -> Option<Type> { if let Ty::Apply(ApplicationTy { ctor: TypeCtor::Ref(_), .. }) = self.ty.value { self.ty.value.substs().map(|substs| self.derived(substs[0].clone())) } else { None } } pub fn is_unknown(&self) -> bool { matches!(self.ty.value, Ty::Unknown) } /// Checks that particular type `ty` implements `std::future::Future`. /// This function is used in `.await` syntax completion. pub fn impls_future(&self, db: &dyn HirDatabase) -> bool { // No special case for the type of async block, since Chalk can figure it out. let krate = self.krate; let std_future_trait = db.lang_item(krate, "future_trait".into()).and_then(|it| it.as_trait()); let std_future_trait = match std_future_trait { Some(it) => it, None => return false, }; let canonical_ty = Canonical { value: self.ty.value.clone(), kinds: Arc::new([]) }; method_resolution::implements_trait( &canonical_ty, db, self.ty.environment.clone(), krate, std_future_trait, ) } /// Checks that particular type `ty` implements `std::ops::FnOnce`. /// /// This function can be used to check if a particular type is callable, since FnOnce is a /// supertrait of Fn and FnMut, so all callable types implements at least FnOnce. pub fn impls_fnonce(&self, db: &dyn HirDatabase) -> bool { let krate = self.krate; let fnonce_trait = match FnTrait::FnOnce.get_id(db, krate) { Some(it) => it, None => return false, }; let canonical_ty = Canonical { value: self.ty.value.clone(), kinds: Arc::new([]) }; method_resolution::implements_trait_unique( &canonical_ty, db, self.ty.environment.clone(), krate, fnonce_trait, ) } pub fn impls_trait(&self, db: &dyn HirDatabase, trait_: Trait, args: &[Type]) -> bool { let trait_ref = hir_ty::TraitRef { trait_: trait_.id, substs: Substs::build_for_def(db, trait_.id) .push(self.ty.value.clone()) .fill(args.iter().map(|t| t.ty.value.clone())) .build(), }; let goal = Canonical { value: hir_ty::InEnvironment::new( self.ty.environment.clone(), hir_ty::Obligation::Trait(trait_ref), ), kinds: Arc::new([]), }; db.trait_solve(self.krate, goal).is_some() } pub fn normalize_trait_assoc_type( &self, db: &dyn HirDatabase, trait_: Trait, args: &[Type], alias: TypeAlias, ) -> Option<Type> { let subst = Substs::build_for_def(db, trait_.id) .push(self.ty.value.clone()) .fill(args.iter().map(|t| t.ty.value.clone())) .build(); let predicate = ProjectionPredicate { projection_ty: ProjectionTy { associated_ty: alias.id, parameters: subst }, ty: Ty::Bound(BoundVar::new(DebruijnIndex::INNERMOST, 0)), }; let goal = Canonical { value: InEnvironment::new( self.ty.environment.clone(), Obligation::Projection(predicate), ), kinds: Arc::new([TyKind::General]), }; match db.trait_solve(self.krate, goal)? { Solution::Unique(SolutionVariables(subst)) => subst.value.first().cloned(), Solution::Ambig(_) => None, } .map(|ty| Type { krate: self.krate, ty: InEnvironment { value: ty, environment: Arc::clone(&self.ty.environment) }, }) } pub fn is_copy(&self, db: &dyn HirDatabase) -> bool { let lang_item = db.lang_item(self.krate, SmolStr::new("copy")); let copy_trait = match lang_item { Some(LangItemTarget::TraitId(it)) => it, _ => return false, }; self.impls_trait(db, copy_trait.into(), &[]) } pub fn as_callable(&self, db: &dyn HirDatabase) -> Option<Callable> { let def = match self.ty.value { Ty::Apply(ApplicationTy { ctor: TypeCtor::FnDef(def), parameters: _ }) => Some(def), _ => None, }; let sig = self.ty.value.callable_sig(db)?; Some(Callable { ty: self.clone(), sig, def, is_bound_method: false }) } pub fn is_closure(&self) -> bool { matches!(&self.ty.value, Ty::Apply(ApplicationTy { ctor: TypeCtor::Closure { .. }, .. })) } pub fn is_fn(&self) -> bool { matches!( &self.ty.value, Ty::Apply(ApplicationTy { ctor: TypeCtor::FnDef(..), .. }) | Ty::Apply(ApplicationTy { ctor: TypeCtor::FnPtr { .. }, .. }) ) } pub fn is_packed(&self, db: &dyn HirDatabase) -> bool { let adt_id = match self.ty.value { Ty::Apply(ApplicationTy { ctor: TypeCtor::Adt(adt_id), .. }) => adt_id, _ => return false, }; let adt = adt_id.into(); match adt { Adt::Struct(s) => matches!(s.repr(db), Some(ReprKind::Packed)), _ => false, } } pub fn is_raw_ptr(&self) -> bool { matches!(&self.ty.value, Ty::Apply(ApplicationTy { ctor: TypeCtor::RawPtr(..), .. })) } pub fn contains_unknown(&self) -> bool { return go(&self.ty.value); fn go(ty: &Ty) -> bool { match ty { Ty::Unknown => true, Ty::Apply(a_ty) => a_ty.parameters.iter().any(go), _ => false, } } } pub fn fields(&self, db: &dyn HirDatabase) -> Vec<(Field, Type)> { if let Ty::Apply(a_ty) = &self.ty.value { let variant_id = match a_ty.ctor { TypeCtor::Adt(AdtId::StructId(s)) => s.into(), TypeCtor::Adt(AdtId::UnionId(u)) => u.into(), _ => return Vec::new(), }; return db .field_types(variant_id) .iter() .map(|(local_id, ty)| { let def = Field { parent: variant_id.into(), id: local_id }; let ty = ty.clone().subst(&a_ty.parameters); (def, self.derived(ty)) }) .collect(); }; Vec::new() } pub fn tuple_fields(&self, _db: &dyn HirDatabase) -> Vec<Type> { let mut res = Vec::new(); if let Ty::Apply(a_ty) = &self.ty.value { if let TypeCtor::Tuple { .. } = a_ty.ctor { for ty in a_ty.parameters.iter() { let ty = ty.clone(); res.push(self.derived(ty)); } } }; res } pub fn autoderef<'a>(&'a self, db: &'a dyn HirDatabase) -> impl Iterator<Item = Type> + 'a { // There should be no inference vars in types passed here // FIXME check that? let canonical = Canonical { value: self.ty.value.clone(), kinds: Arc::new([]) }; let environment = self.ty.environment.clone(); let ty = InEnvironment { value: canonical, environment }; autoderef(db, Some(self.krate), ty) .map(|canonical| canonical.value) .map(move |ty| self.derived(ty)) } // This would be nicer if it just returned an iterator, but that runs into // lifetime problems, because we need to borrow temp `CrateImplDefs`. pub fn iterate_assoc_items<T>( self, db: &dyn HirDatabase, krate: Crate, mut callback: impl FnMut(AssocItem) -> Option<T>, ) -> Option<T> { for krate in self.ty.value.def_crates(db, krate.id)? { let impls = db.inherent_impls_in_crate(krate); for impl_def in impls.for_self_ty(&self.ty.value) { for &item in db.impl_data(*impl_def).items.iter() { if let Some(result) = callback(item.into()) { return Some(result); } } } } None } pub fn iterate_method_candidates<T>( &self, db: &dyn HirDatabase, krate: Crate, traits_in_scope: &FxHashSet<TraitId>, name: Option<&Name>, mut callback: impl FnMut(&Ty, Function) -> Option<T>, ) -> Option<T> { // There should be no inference vars in types passed here // FIXME check that? // FIXME replace Unknown by bound vars here let canonical = Canonical { value: self.ty.value.clone(), kinds: Arc::new([]) }; let env = self.ty.environment.clone(); let krate = krate.id; method_resolution::iterate_method_candidates( &canonical, db, env, krate, traits_in_scope, name, method_resolution::LookupMode::MethodCall, |ty, it| match it { AssocItemId::FunctionId(f) => callback(ty, f.into()), _ => None, }, ) } pub fn iterate_path_candidates<T>( &self, db: &dyn HirDatabase, krate: Crate, traits_in_scope: &FxHashSet<TraitId>, name: Option<&Name>, mut callback: impl FnMut(&Ty, AssocItem) -> Option<T>, ) -> Option<T> { // There should be no inference vars in types passed here // FIXME check that? // FIXME replace Unknown by bound vars here let canonical = Canonical { value: self.ty.value.clone(), kinds: Arc::new([]) }; let env = self.ty.environment.clone(); let krate = krate.id; method_resolution::iterate_method_candidates( &canonical, db, env, krate, traits_in_scope, name, method_resolution::LookupMode::Path, |ty, it| callback(ty, it.into()), ) } pub fn as_adt(&self) -> Option<Adt> { let (adt, _subst) = self.ty.value.as_adt()?; Some(adt.into()) } pub fn as_dyn_trait(&self) -> Option<Trait> { self.ty.value.dyn_trait().map(Into::into) } pub fn as_impl_traits(&self, db: &dyn HirDatabase) -> Option<Vec<Trait>> { self.ty.value.impl_trait_bounds(db).map(|it| { it.into_iter() .filter_map(|pred| match pred { hir_ty::GenericPredicate::Implemented(trait_ref) => { Some(Trait::from(trait_ref.trait_)) } _ => None, }) .collect() }) } pub fn as_associated_type_parent_trait(&self, db: &dyn HirDatabase) -> Option<Trait> { self.ty.value.associated_type_parent_trait(db).map(Into::into) } // FIXME: provide required accessors such that it becomes implementable from outside. pub fn is_equal_for_find_impls(&self, other: &Type) -> bool { match (&self.ty.value, &other.ty.value) { (Ty::Apply(a_original_ty), Ty::Apply(ApplicationTy { ctor, parameters })) => match ctor { TypeCtor::Ref(..) => match parameters.as_single() { Ty::Apply(a_ty) => a_original_ty.ctor == a_ty.ctor, _ => false, }, _ => a_original_ty.ctor == *ctor, }, _ => false, } } fn derived(&self, ty: Ty) -> Type { Type { krate: self.krate, ty: InEnvironment { value: ty, environment: self.ty.environment.clone() }, } } pub fn walk(&self, db: &dyn HirDatabase, mut cb: impl FnMut(Type)) { // TypeWalk::walk for a Ty at first visits parameters and only after that the Ty itself. // We need a different order here. fn walk_substs( db: &dyn HirDatabase, type_: &Type, substs: &Substs, cb: &mut impl FnMut(Type), ) { for ty in substs.iter() { walk_type(db, &type_.derived(ty.clone()), cb); } } fn walk_bounds( db: &dyn HirDatabase, type_: &Type, bounds: &[GenericPredicate], cb: &mut impl FnMut(Type), ) { for pred in bounds { match pred { GenericPredicate::Implemented(trait_ref) => { cb(type_.clone()); walk_substs(db, type_, &trait_ref.substs, cb); } _ => (), } } } fn walk_type(db: &dyn HirDatabase, type_: &Type, cb: &mut impl FnMut(Type)) { let ty = type_.ty.value.strip_references(); match ty { Ty::Apply(ApplicationTy { ctor, parameters }) => { match ctor { TypeCtor::Adt(_) => { cb(type_.derived(ty.clone())); } TypeCtor::AssociatedType(_) => { if let Some(_) = ty.associated_type_parent_trait(db) { cb(type_.derived(ty.clone())); } } TypeCtor::OpaqueType(..) => { if let Some(bounds) = ty.impl_trait_bounds(db) { walk_bounds(db, &type_.derived(ty.clone()), &bounds, cb); } } _ => (), } // adt params, tuples, etc... walk_substs(db, type_, parameters, cb); } Ty::Opaque(opaque_ty) => { if let Some(bounds) = ty.impl_trait_bounds(db) { walk_bounds(db, &type_.derived(ty.clone()), &bounds, cb); } walk_substs(db, type_, &opaque_ty.parameters, cb); } Ty::Placeholder(_) => { if let Some(bounds) = ty.impl_trait_bounds(db) { walk_bounds(db, &type_.derived(ty.clone()), &bounds, cb); } } Ty::Dyn(bounds) => { walk_bounds(db, &type_.derived(ty.clone()), bounds.as_ref(), cb); } _ => (), } } walk_type(db, self, &mut cb); } } impl HirDisplay for Type { fn hir_fmt(&self, f: &mut HirFormatter) -> Result<(), HirDisplayError> { self.ty.value.hir_fmt(f) } } // FIXME: closures #[derive(Debug)] pub struct Callable { ty: Type, sig: FnSig, def: Option<CallableDefId>, pub(crate) is_bound_method: bool, } pub enum CallableKind { Function(Function), TupleStruct(Struct), TupleEnumVariant(Variant), Closure, } impl Callable { pub fn kind(&self) -> CallableKind { match self.def { Some(CallableDefId::FunctionId(it)) => CallableKind::Function(it.into()), Some(CallableDefId::StructId(it)) => CallableKind::TupleStruct(it.into()), Some(CallableDefId::EnumVariantId(it)) => CallableKind::TupleEnumVariant(it.into()), None => CallableKind::Closure, } } pub fn receiver_param(&self, db: &dyn HirDatabase) -> Option<ast::SelfParam> { let func = match self.def { Some(CallableDefId::FunctionId(it)) if self.is_bound_method => it, _ => return None, }; let src = func.lookup(db.upcast()).source(db.upcast()); let param_list = src.value.param_list()?; param_list.self_param() } pub fn n_params(&self) -> usize { self.sig.params().len() - if self.is_bound_method { 1 } else { 0 } } pub fn params( &self, db: &dyn HirDatabase, ) -> Vec<(Option<Either<ast::SelfParam, ast::Pat>>, Type)> { let types = self .sig .params() .iter() .skip(if self.is_bound_method { 1 } else { 0 }) .map(|ty| self.ty.derived(ty.clone())); let patterns = match self.def { Some(CallableDefId::FunctionId(func)) => { let src = func.lookup(db.upcast()).source(db.upcast()); src.value.param_list().map(|param_list| { param_list .self_param() .map(|it| Some(Either::Left(it))) .filter(|_| !self.is_bound_method) .into_iter() .chain(param_list.params().map(|it| it.pat().map(Either::Right))) }) } _ => None, }; patterns.into_iter().flatten().chain(iter::repeat(None)).zip(types).collect() } pub fn return_type(&self) -> Type { self.ty.derived(self.sig.ret().clone()) } } /// For IDE only #[derive(Debug)] pub enum ScopeDef { ModuleDef(ModuleDef), MacroDef(MacroDef), GenericParam(TypeParam), ImplSelfType(Impl), AdtSelfType(Adt), Local(Local), Unknown, } impl ScopeDef { pub fn all_items(def: PerNs) -> ArrayVec<[Self; 3]> { let mut items = ArrayVec::new(); match (def.take_types(), def.take_values()) { (Some(m1), None) => items.push(ScopeDef::ModuleDef(m1.into())), (None, Some(m2)) => items.push(ScopeDef::ModuleDef(m2.into())), (Some(m1), Some(m2)) => { // Some items, like unit structs and enum variants, are // returned as both a type and a value. Here we want // to de-duplicate them. if m1 != m2 { items.push(ScopeDef::ModuleDef(m1.into())); items.push(ScopeDef::ModuleDef(m2.into())); } else { items.push(ScopeDef::ModuleDef(m1.into())); } } (None, None) => {} }; if let Some(macro_def_id) = def.take_macros() { items.push(ScopeDef::MacroDef(macro_def_id.into())); } if items.is_empty() { items.push(ScopeDef::Unknown); } items } } pub trait HasVisibility { fn visibility(&self, db: &dyn HirDatabase) -> Visibility; fn is_visible_from(&self, db: &dyn HirDatabase, module: Module) -> bool { let vis = self.visibility(db); vis.is_visible_from(db.upcast(), module.id) } }
32.714216
133
0.559608
ab82a5a6695596de4cf3197b8d67211180648b9b
6,827
use serde::{Deserialize, Serialize}; use std::fmt; #[cfg(target_arch = "aarch64")] pub(crate) use crate::simd_aarch64::*; #[cfg(target_arch = "x86_64")] pub(crate) use crate::simd_amd64::*; pub(crate) use crate::simd_common::*; impl PartialOrd for F32x8 { #[inline] fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { unsafe { Vec8_F32::from(*self).partial_cmp(&Vec8_F32::from(*other)) } } } impl PartialOrd for F64x4 { #[inline] fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { unsafe { Vec4_F64::from(*self).partial_cmp(&Vec4_F64::from(*other)) } } } impl PartialOrd for F64x2 { #[inline] fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { unsafe { Vec2_F64::from(*self).partial_cmp(&Vec2_F64::from(*other)) } } } impl PartialEq for F32x8 { #[inline] fn eq(&self, other: &Self) -> bool { unsafe { Vec8_F32::from(*self).eq(&Vec8_F32::from(*other)) } } } impl PartialEq for F64x4 { #[inline] fn eq(&self, other: &Self) -> bool { unsafe { Vec4_F64::from(*self).eq(&Vec4_F64::from(*other)) } } } impl PartialEq for F64x2 { #[inline] fn eq(&self, other: &Self) -> bool { unsafe { Vec2_F64::from(*self).eq(&Vec2_F64::from(*other)) } } } impl Serialize for F32x8 { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { unsafe { Vec8_F32::from(*self).serialize(serializer) } } } impl Serialize for F64x4 { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { unsafe { Vec4_F64::from(*self).serialize(serializer) } } } impl Serialize for F64x2 { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { unsafe { Vec2_F64::from(*self).serialize(serializer) } } } impl<'de> Deserialize<'de> for F32x8 { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { let vec8 = Vec8_F32::deserialize(deserializer)?; Ok(unsafe { F32x8::new( vec8.v1, vec8.v2, vec8.v3, vec8.v4, vec8.v5, vec8.v6, vec8.v7, vec8.v8, ) }) } } impl<'de> Deserialize<'de> for F64x4 { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { let vec4 = Vec4_F64::deserialize(deserializer)?; Ok(unsafe { F64x4::new(vec4.v1, vec4.v2, vec4.v3, vec4.v4) }) } } impl<'de> Deserialize<'de> for F64x2 { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { let vec2 = Vec2_F64::deserialize(deserializer)?; Ok(unsafe { F64x2::new(vec2.v1, vec2.v2) }) } } impl fmt::Debug for F64x4 { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { unsafe { write!( f, "F64x4 {{ v1: {}, v2: {}, v3: {}, v4: {} }}", self.v1(), self.v2(), self.v3(), self.v4() ) } } } impl fmt::Debug for F64x2 { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { unsafe { write!(f, "F64x4 {{ v1: {}, v2: {} }}", self.v1(), self.v2(),) } } } impl fmt::Debug for F32x8 { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { unsafe { write!( f, "F32x8 {{ v1: {}, v2: {}, v3: {}, v4: {}, v5: {}, v6: {}, v7: {}, v8: {} }}", self.v1(), self.v2(), self.v3(), self.v4(), self.v5(), self.v6(), self.v7(), self.v8() ) } } } pub(crate) fn vecf64_to_vecf32(v: &[F64x4]) -> Vec<F32x8> { let mut sz = v.len() / 2; if v.len() % 2 == 1 { sz += 1; } let mut result = Vec::with_capacity(sz); unsafe { let zero: F64x4 = F64x4::new(0.0, 0.0, 0.0, 0.0); for i in 0..sz { let p1 = v.get(i * 2).unwrap_or(&zero); let p2 = v.get(i * 2 + 1).unwrap_or(&zero); result.push(F32x8::new( p1.v1() as f32, p1.v2() as f32, p1.v3() as f32, p1.v4() as f32, p2.v1() as f32, p2.v2() as f32, p2.v3() as f32, p2.v4() as f32, )); } } result } pub(crate) fn vecf32_to_vecf64(v: &[F32x8]) -> Vec<F64x4> { let sz = v.len() * 2; let mut result = Vec::with_capacity(sz); for i in 0..v.len() { unsafe { result.push(F64x4::new( v[i].v1() as f64, v[i].v2() as f64, v[i].v3() as f64, v[i].v4() as f64, )); result.push(F64x4::new( v[i].v5() as f64, v[i].v6() as f64, v[i].v7() as f64, v[i].v8() as f64, )); } } result } impl From<Vec4_F32> for F32x4 { fn from(thing: Vec4_F32) -> Self { unsafe { F32x4::new(thing.v1, thing.v2, thing.v3, thing.v4) } } } impl From<F32x4> for Vec4_F32 { fn from(thing: F32x4) -> Self { Vec4_F32 { v1: thing.v1(), v2: thing.v2(), v3: thing.v3(), v4: thing.v4(), } } } impl From<Vec8_F32> for F32x8 { fn from(thing: Vec8_F32) -> Self { unsafe { F32x8::new( thing.v1, thing.v2, thing.v3, thing.v4, thing.v5, thing.v6, thing.v7, thing.v8, ) } } } impl From<F32x8> for Vec8_F32 { fn from(thing: F32x8) -> Self { Vec8_F32 { v1: thing.v1(), v2: thing.v2(), v3: thing.v3(), v4: thing.v4(), v5: thing.v5(), v6: thing.v6(), v7: thing.v7(), v8: thing.v8(), } } } impl From<Vec4_F64> for F64x4 { fn from(thing: Vec4_F64) -> Self { unsafe { F64x4::new(thing.v1, thing.v2, thing.v3, thing.v4) } } } impl From<F64x4> for Vec4_F64 { fn from(thing: F64x4) -> Self { Vec4_F64 { v1: thing.v1(), v2: thing.v2(), v3: thing.v3(), v4: thing.v4(), } } } impl From<Vec2_F64> for F64x2 { fn from(thing: Vec2_F64) -> Self { unsafe { F64x2::new(thing.v1, thing.v2) } } } impl From<F64x2> for Vec2_F64 { fn from(thing: F64x2) -> Self { Vec2_F64 { v1: thing.v1(), v2: thing.v2(), } } }
24.916058
95
0.478248
db188e2d17250196e039d81e41eb5f477374d64b
1,990
use std::collections::HashMap; use failure::Error; use utility; use utility::io::Parseable; /// Introduction to Random Strings /// /// Given: A DNA string s of length at most 100 bp and an array A /// containing at most 20 numbers between 0 and 1. /// /// Return: An array B having the same length as A in which B[k] represents the common logarithm /// of the probability that a random string constructed with the GC-content found in A[k] will /// match s exactly. pub fn rosalind_prob(filename: &str) -> Result<Vec<f64>, Error> { let input = utility::io::input_from_file(filename)?; let parts: Vec<_> = input.split('\n').collect(); let sequence = parts[0]; let gc_contents = f64::parse_line(parts[1])?; let mut probabilities = Vec::new(); for gc_content in gc_contents { let nucleotide_probs = nucleotide_probs_from_gc_content(gc_content); probabilities.push( sequence .chars() .map(|c| nucleotide_probs[&c].log10()) .sum::<f64>(), ); } println!("{}", utility::io::format_array(&probabilities)); Ok(probabilities) } /// Get expected probabilities of each nucleotide from the GC content pub fn nucleotide_probs_from_gc_content(gc_content: f64) -> HashMap<char, f64> { let gc = gc_content / 2.; let at = (1. - gc_content) / 2.; "ACGT".chars().zip(vec![at, gc, gc, at]).collect() } #[cfg(test)] mod tests { use assert_approx_eq::assert_approx_eq; use super::*; #[test] fn prob() -> Result<(), Error> { let (input_file, output_file) = utility::testing::get_input_output_file("rosalind_prob")?; let output = f64::parse_line(&utility::io::input_from_file(&output_file)?)?; let result = rosalind_prob(&input_file)?; result .into_iter() .zip(output.into_iter()) .for_each(|(x, y)| assert_approx_eq!(x, y, utility::testing::ROSALIND_FLOAT_ERROR_F64)); Ok(()) } }
33.166667
100
0.632663
ed0f3d7029436e95cbc09d28e8f44931fb9a93a8
1,571
use std::sync::Arc; use anyhow::Result; use crate::browser::tab::Tab; use crate::protocol::cdp::Target::CreateTarget; /// Equivalent to a new incognito window pub struct Context<'a> { id: String, browser: &'a super::Browser, } impl<'a> Context<'a> { pub fn new(browser: &'a super::Browser, context_id: String) -> Self { Self { id: context_id, browser, } } /// Opens a new tab in this context. It will not share cookies or a cache with the default /// browsing context or any other contexts created pub fn new_tab(&self) -> Result<Arc<Tab>> { let tab_in_context = CreateTarget { url: "about:blank".to_string(), width: None, height: None, browser_context_id: Some(self.id.clone()), enable_begin_frame_control: None, new_window: None, background: None, }; self.browser.new_tab_with_options(tab_in_context) } /// The BrowserContextId associated with this context pub fn get_id(&self) -> &str { &self.id } /// Any tabs created in this context pub fn get_tabs(&self) -> Result<Vec<Arc<Tab>>> { let browser_tabs = self.browser.get_tabs().lock().unwrap(); let mut tabs = vec![]; for tab in browser_tabs.iter() { if let Some(context_id) = tab.get_browser_context_id()? { if context_id == self.id { tabs.push(Arc::clone(tab)); } } } Ok(tabs) } }
28.053571
94
0.562062
bf6c856198e5a19e48c752934b55b57fe9e437ab
1,426
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/vhdirk/gir-files.git) // from webkit2gtk-gir-files // DO NOT EDIT use glib::translate::*; glib::wrapper! { #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct NetworkProxySettings(Boxed<ffi::WebKitNetworkProxySettings>); match fn { copy => |ptr| ffi::webkit_network_proxy_settings_copy(mut_override(ptr)), free => |ptr| ffi::webkit_network_proxy_settings_free(ptr), type_ => || ffi::webkit_network_proxy_settings_get_type(), } } impl NetworkProxySettings { #[doc(alias = "webkit_network_proxy_settings_new")] pub fn new(default_proxy_uri: Option<&str>, ignore_hosts: &[&str]) -> NetworkProxySettings { assert_initialized_main_thread!(); unsafe { from_glib_full(ffi::webkit_network_proxy_settings_new( default_proxy_uri.to_glib_none().0, ignore_hosts.to_glib_none().0, )) } } #[doc(alias = "webkit_network_proxy_settings_add_proxy_for_scheme")] pub fn add_proxy_for_scheme(&mut self, scheme: &str, proxy_uri: &str) { unsafe { ffi::webkit_network_proxy_settings_add_proxy_for_scheme( self.to_glib_none_mut().0, scheme.to_glib_none().0, proxy_uri.to_glib_none().0, ); } } }
33.952381
96
0.642356
67b6f38343c558b77cd4534db1587d5c713d5153
268
#![feature(proc_macro_hygiene, decl_macro)] #[macro_use] extern crate rocket; #[macro_use] extern crate rocket_contrib; #[macro_use] extern crate rocket_okapi; mod db; mod macros; mod model; #[cfg(test)] mod test; mod web; fn main() { web::rocket().launch(); }
13.4
43
0.708955
67d7fc49d4ca2b19fd47f1130e6430d9aa896932
4,188
/* * Hurl (https://hurl.dev) * Copyright (C) 2022 Orange * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ use std::fmt; /// /// Type system used in hurl /// Values are used by queries, captures, asserts and predicates /// #[derive(Clone, Debug)] pub enum Value { Bool(bool), Bytes(Vec<u8>), Float(f64), Integer(i64), List(Vec<Value>), Nodeset(usize), Null, Object(Vec<(String, Value)>), String(String), Unit, Regex(regex::Regex), } // You must implement it yourself because of the Float impl PartialEq for Value { fn eq(&self, other: &Self) -> bool { match (self, other) { (Value::Bool(v1), Value::Bool(v2)) => v1 == v2, (Value::Bytes(v1), Value::Bytes(v2)) => v1 == v2, (Value::Float(v1), Value::Float(v2)) => (v1 - v2).abs() < f64::EPSILON, (Value::Integer(v1), Value::Integer(v2)) => v1 == v2, (Value::List(v1), Value::List(v2)) => v1 == v2, (Value::Nodeset(v1), Value::Nodeset(v2)) => v1 == v2, (Value::Null, Value::Null) => true, (Value::Object(v1), Value::Object(v2)) => v1 == v2, (Value::String(v1), Value::String(v2)) => v1 == v2, (Value::Unit, Value::Unit) => true, _ => false, } } } impl Eq for Value {} impl fmt::Display for Value { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let value = match self { Value::Integer(x) => x.to_string(), Value::Bool(x) => x.to_string(), Value::Float(f) => format_float(*f), Value::String(x) => x.clone(), Value::List(values) => { let values: Vec<String> = values.iter().map(|e| e.to_string()).collect(); format!("[{}]", values.join(",")) } Value::Object(_) => "Object()".to_string(), Value::Nodeset(x) => format!("Nodeset{:?}", x), Value::Bytes(v) => format!("hex, {};", hex::encode(v)), Value::Null => "null".to_string(), Value::Unit => "Unit".to_string(), Value::Regex(x) => { let s = str::replace(x.as_str(), "/", "\\/"); format!("/{}/", s) } }; write!(f, "{}", value) } } fn format_float(value: f64) -> String { if value.fract() < f64::EPSILON { format!("{}.0", value) } else { value.to_string() } } impl Value { pub fn _type(&self) -> String { match self { Value::Integer(_) => "integer".to_string(), Value::Bool(_) => "boolean".to_string(), Value::Float(_) => "float".to_string(), Value::String(_) => "string".to_string(), Value::List(_) => "list".to_string(), Value::Object(_) => "object".to_string(), Value::Nodeset(_) => "nodeset".to_string(), Value::Bytes(_) => "bytes".to_string(), Value::Null => "null".to_string(), Value::Unit => "unit".to_string(), Value::Regex(_) => "regex".to_string(), } } pub fn from_f64(value: f64) -> Value { Value::Float(value) } pub fn is_scalar(&self) -> bool { !matches!(self, Value::Nodeset(_) | Value::List(_)) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_is_scalar() { assert!(Value::Integer(1).is_scalar()); assert!(!Value::List(vec![]).is_scalar()); } #[test] fn test_to_string() { assert_eq!(Value::Float(1.0).to_string(), "1.0".to_string()); assert_eq!(Value::Float(1.1).to_string(), "1.1".to_string()); } }
31.022222
89
0.523639
defee75fbc74bc3ea9daab391dbdf5450c015c5d
3,591
//! # 全局属性 //! //! - `#![no_std]` //! 禁用标准库 #![no_std] //! //! - `#![no_main]` //! 不使用 `main` 函数等全部 Rust-level 入口点来作为程序入口 #![no_main] //! //! - `#![deny(missing_docs)]` //! 任何没有注释的地方都会产生警告:这个属性用来压榨写实验指导的学长,同学可以删掉了 #![warn(missing_docs)] //! # 一些 unstable 的功能需要在 crate 层级声明后才可以使用 //! //! - `#![feature(alloc_error_handler)]` //! 我们使用了一个全局动态内存分配器,以实现原本标准库中的堆内存分配。 //! 而语言要求我们同时实现一个错误回调,这里我们直接 panic #![feature(alloc_error_handler)] //! //! - `#![feature(llvm_asm)]` //! 内嵌汇编 #![feature(llvm_asm)] //! //! - `#![feature(global_asm)]` //! 内嵌整个汇编文件 #![feature(global_asm)] //! //! - `#![feature(panic_info_message)]` //! panic! 时,获取其中的信息并打印 #![feature(panic_info_message)] //! //! - `#![feature(naked_functions)]` //! 允许使用 naked 函数,即编译器不在函数前后添加出入栈操作。 //! 这允许我们在函数中间内联汇编使用 `ret` 提前结束,而不会导致栈出现异常 #![feature(naked_functions)] //! //! - `#![feature(slice_fill)]` //! 允许将 slice 填充值 #![feature(slice_fill)] #[macro_use] mod console; mod drivers; mod fs; mod interrupt; mod kernel; mod memory; mod panic; mod process; mod sbi; extern crate alloc; use alloc::sync::Arc; use fs::{INodeExt, ROOT_INODE}; use memory::PhysicalAddress; use process::*; use xmas_elf::ElfFile; // 汇编编写的程序入口,具体见该文件 global_asm!(include_str!("entry.asm")); /// Rust 的入口函数 /// /// 在 `_start` 为我们进行了一系列准备之后,这是第一个被调用的 Rust 函数 #[no_mangle] pub extern "C" fn rust_main(_hart_id: usize, dtb_pa: PhysicalAddress) -> ! { memory::init(); interrupt::init(); drivers::init(dtb_pa); fs::init(); /* { let mut processor = PROCESSOR.lock(); // 创建一个内核进程 let kernel_process = Process::new_kernel().unwrap(); // 为这个进程创建多个线程,并设置入口均为 sample_process,而参数不同 for i in 1..9usize { processor.add_thread(create_kernel_thread( kernel_process.clone(), sample_process as usize, Some(&[i]), i, )); } } */ { let mut processor = PROCESSOR.lock(); processor.add_thread(create_user_process("hello_world")); } extern "C" { fn __restore(context: usize); } // 获取第一个线程的 Context let context = PROCESSOR.lock().prepare_next_thread(); // 启动第一个线程 unsafe { __restore(context as usize) }; unreachable!() } fn sample_process(id: usize) { for i in 0..4000000 { if i % 1000000 == 0 { println!("Hello world from kernel mode {} program!{}", id, i); } } } /// 创建一个内核进程 pub fn create_kernel_thread( process: Arc<Process>, entry_point: usize, arguments: Option<&[usize]>, priority: usize, ) -> Arc<Thread> { // 创建线程 let thread = Thread::new(process, entry_point, arguments, priority).unwrap(); // 设置线程的返回地址为 kernel_thread_exit thread .as_ref() .inner() .context .as_mut() .unwrap() .set_ra(kernel_thread_exit as usize); thread } /// 创建一个用户进程,从指定的文件名读取 ELF pub fn create_user_process(name: &str) -> Arc<Thread> { // 从文件系统中找到程序 let app = ROOT_INODE.find(name).unwrap(); // 读取数据 let data = app.readall().unwrap(); // 解析 ELF 文件 let elf = ElfFile::new(data.as_slice()).unwrap(); // 利用 ELF 文件创建线程,映射空间并加载数据 let process = Process::from_elf(&elf, true).unwrap(); // 再从 ELF 中读出程序入口地址 Thread::new(process, elf.header.pt2.entry_point() as usize, None, 0).unwrap() } /// 内核线程需要调用这个函数来退出 fn kernel_thread_exit() { // 当前线程标记为结束 PROCESSOR.lock().current_thread().as_ref().inner().dead = true; // 制造一个中断来交给操作系统处理 unsafe { llvm_asm!("ebreak" :::: "volatile") }; }
23.318182
81
0.605681
33f814f2d795b356d0a6319800c833c4c07d02ea
2,839
use azure_sdk_cosmos::prelude::*; use std::error::Error; #[macro_use] extern crate serde_derive; #[derive(Serialize, Deserialize, Debug)] struct MySampleStructOwned { id: String, a_string: String, a_number: u64, a_timestamp: i64, } #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { let database_name = std::env::args() .nth(1) .expect("please specify database name as first command line parameter"); let collection_name = std::env::args() .nth(2) .expect("please specify collection name as second command line parameter"); let query = std::env::args() .nth(3) .expect("please specify requested query"); let account = std::env::var("COSMOS_ACCOUNT").expect("Set env variable COSMOS_ACCOUNT first!"); let master_key = std::env::var("COSMOS_MASTER_KEY").expect("Set env variable COSMOS_MASTER_KEY first!"); let authorization_token = AuthorizationToken::new(account, TokenType::Master, &master_key)?; let client = ClientBuilder::new(authorization_token)?; let ret = client .query_documents( &database_name, &collection_name, Query::from(query.as_ref()), ) .execute_json() .await?; println!("As JSON:\n{:?}", ret); for doc in ret.results { println!("{}", doc.result); } let ret = client .query_documents( &database_name, &collection_name, Query::from(query.as_ref()), ) .execute::<MySampleStructOwned>() .await?; println!("\nAs entities:\n{:?}", ret); for doc in ret.results { println!("{:?}", doc); } // test continuation token // only if we have more than 2 records let ret = client .query_documents( &database_name, &collection_name, Query::from(query.as_ref()), ) .max_item_count(2u64) .execute::<MySampleStructOwned>() .await?; println!( "Received {} entries. Continuation token is == {:?}", ret.results.len(), ret.additional_headers.continuation_token ); if let Some(ct) = ret.additional_headers.continuation_token { let ret = { // if we have more, let's get them client .query_documents( &database_name, &collection_name, Query::from(query.as_ref()), ) .continuation_token(ct) .execute::<MySampleStructOwned>() .await? }; println!( "Received {} entries. Continuation token is == {:?}", ret.results.len(), ret.additional_headers.continuation_token ); } Ok(()) }
27.298077
99
0.556886
76f9941fb913e55c57d5870fe63a161a440d717f
23,183
// Copyright 2018 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Mining Stratum Server use bufstream::BufStream; use chrono::prelude::Utc; use serde; use serde_json; use serde_json::Value; use std::error::Error; use std::io::{BufRead, ErrorKind, Write}; use std::net::{TcpListener, TcpStream}; use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, SystemTime}; use std::{cmp, thread}; use chain; use common::stats::{StratumStats, WorkerStats}; use common::types::{StratumServerConfig, SyncState}; use core::core::verifier_cache::VerifierCache; use core::core::Block; use core::{global, pow}; use keychain; use mining::mine_block; use pool; use util::LOGGER; // ---------------------------------------- // http://www.jsonrpc.org/specification // RPC Methods #[derive(Serialize, Deserialize, Debug)] struct RpcRequest { id: String, jsonrpc: String, method: String, params: Option<Value>, } #[derive(Serialize, Deserialize, Debug)] struct RpcResponse { id: String, jsonrpc: String, method: String, result: Option<Value>, error: Option<Value>, } #[derive(Serialize, Deserialize, Debug)] struct RpcError { code: i32, message: String, } #[derive(Serialize, Deserialize, Debug)] struct LoginParams { login: String, pass: String, agent: String, } #[derive(Serialize, Deserialize, Debug)] struct SubmitParams { height: u64, job_id: u64, nonce: u64, pow: Vec<u64>, } #[derive(Serialize, Deserialize, Debug)] pub struct JobTemplate { height: u64, job_id: u64, difficulty: u64, pre_pow: String, } #[derive(Serialize, Deserialize, Debug)] pub struct WorkerStatus { id: String, height: u64, difficulty: u64, accepted: u64, rejected: u64, stale: u64, } // ---------------------------------------- // Worker Factory Thread Function // Run in a thread. Adds new connections to the workers list fn accept_workers( id: String, address: String, workers: &mut Arc<Mutex<Vec<Worker>>>, stratum_stats: &mut Arc<RwLock<StratumStats>>, ) { let listener = TcpListener::bind(address).expect("Failed to bind to listen address"); let mut worker_id: u32 = 0; for stream in listener.incoming() { match stream { Ok(stream) => { warn!( LOGGER, "(Server ID: {}) New connection: {}", id, stream.peer_addr().unwrap() ); stream .set_nonblocking(true) .expect("set_nonblocking call failed"); let mut worker = Worker::new(worker_id.to_string(), BufStream::new(stream)); workers.lock().unwrap().push(worker); // stats for this worker (worker stat objects are added and updated but never // removed) let mut worker_stats = WorkerStats::default(); worker_stats.is_connected = true; worker_stats.id = worker_id.to_string(); worker_stats.pow_difficulty = 1; // XXX TODO let mut stratum_stats = stratum_stats.write().unwrap(); stratum_stats.worker_stats.push(worker_stats); worker_id = worker_id + 1; } Err(e) => { warn!( LOGGER, "(Server ID: {}) Error accepting connection: {:?}", id, e ); } } } // close the socket server drop(listener); } // ---------------------------------------- // Worker Object - a connected stratum client - a miner, pool, proxy, etc... pub struct Worker { id: String, agent: String, login: Option<String>, stream: BufStream<TcpStream>, error: bool, authenticated: bool, } impl Worker { /// Creates a new Stratum Worker. pub fn new(id: String, stream: BufStream<TcpStream>) -> Worker { Worker { id: id, agent: String::from(""), login: None, stream: stream, error: false, authenticated: false, } } // Get Message from the worker fn read_message(&mut self) -> Option<String> { // Read and return a single message or None let mut line = String::new(); match self.stream.read_line(&mut line) { Ok(_) => { return Some(line); } Err(ref e) if e.kind() == ErrorKind::WouldBlock => { // Not an error, just no messages ready return None; } Err(e) => { warn!( LOGGER, "(Server ID: {}) Error in connection with stratum client: {}", self.id, e ); self.error = true; return None; } } } // Send Message to the worker fn write_message(&mut self, message_in: String) { // Write and Flush the message let mut message = message_in.clone(); if !message.ends_with("\n") { message += "\n"; } match self.stream.write(message.as_bytes()) { Ok(_) => match self.stream.flush() { Ok(_) => {} Err(e) => { warn!( LOGGER, "(Server ID: {}) Error in connection with stratum client: {}", self.id, e ); self.error = true; } }, Err(e) => { warn!( LOGGER, "(Server ID: {}) Error in connection with stratum client: {}", self.id, e ); self.error = true; return; } } } } // impl Worker // ---------------------------------------- // Grin Stratum Server pub struct StratumServer { id: String, config: StratumServerConfig, chain: Arc<chain::Chain>, tx_pool: Arc<RwLock<pool::TransactionPool>>, verifier_cache: Arc<RwLock<VerifierCache>>, current_block_versions: Vec<Block>, current_difficulty: u64, minimum_share_difficulty: u64, current_key_id: Option<keychain::Identifier>, workers: Arc<Mutex<Vec<Worker>>>, sync_state: Arc<SyncState>, } impl StratumServer { /// Creates a new Stratum Server. pub fn new( config: StratumServerConfig, chain: Arc<chain::Chain>, tx_pool: Arc<RwLock<pool::TransactionPool>>, verifier_cache: Arc<RwLock<VerifierCache>>, ) -> StratumServer { StratumServer { id: String::from("StratumServer"), minimum_share_difficulty: config.minimum_share_difficulty, config, chain, tx_pool, verifier_cache, current_block_versions: Vec::new(), current_difficulty: <u64>::max_value(), current_key_id: None, workers: Arc::new(Mutex::new(Vec::new())), sync_state: Arc::new(SyncState::new()), } } // Build and return a JobTemplate for mining the current block fn build_block_template(&self) -> JobTemplate { let bh = self.current_block_versions.last().unwrap().header.clone(); // Serialize the block header into pre and post nonce strings let mut pre_pow_writer = mine_block::HeaderPrePowWriter::default(); bh.write_pre_pow(&mut pre_pow_writer).unwrap(); bh.pow.write_pre_pow(bh.version, &mut pre_pow_writer).unwrap(); let pre = pre_pow_writer.as_hex_string(false); let job_template = JobTemplate { height: bh.height, job_id: (self.current_block_versions.len() - 1) as u64, difficulty: self.minimum_share_difficulty, pre_pow: pre, }; return job_template; } // Handle an RPC request message from the worker(s) fn handle_rpc_requests(&mut self, stratum_stats: &mut Arc<RwLock<StratumStats>>) { let mut workers_l = self.workers.lock().unwrap(); for num in 0..workers_l.len() { match workers_l[num].read_message() { Some(the_message) => { // Decompose the request from the JSONRpc wrapper let request: RpcRequest = match serde_json::from_str(&the_message) { Ok(request) => request, Err(e) => { // not a valid JSON RpcRequest - disconnect the worker warn!( LOGGER, "(Server ID: {}) Failed to parse JSONRpc: {} - {:?}", self.id, e.description(), the_message.as_bytes(), ); workers_l[num].error = true; continue; } }; let mut stratum_stats = stratum_stats.write().unwrap(); let worker_stats_id = stratum_stats .worker_stats .iter() .position(|r| r.id == workers_l[num].id) .unwrap(); stratum_stats.worker_stats[worker_stats_id].last_seen = SystemTime::now(); // Call the handler function for requested method let response = match request.method.as_str() { "login" => self.handle_login(request.params, &mut workers_l[num]), "submit" => { let res = self.handle_submit( request.params, &mut workers_l[num], &mut stratum_stats.worker_stats[worker_stats_id], ); // this key_id has been used now, reset if let Ok((_, true)) = res { self.current_key_id = None; } res.map(|(v, _)| v) } "keepalive" => self.handle_keepalive(), "getjobtemplate" => { if self.sync_state.is_syncing() { let e = RpcError { code: -32000, message: "Node is syncing - Please wait".to_string(), }; Err(serde_json::to_value(e).unwrap()) } else { self.handle_getjobtemplate() } } "status" => { self.handle_status(&stratum_stats.worker_stats[worker_stats_id]) } _ => { // Called undefined method let e = RpcError { code: -32601, message: "Method not found".to_string(), }; Err(serde_json::to_value(e).unwrap()) } }; // Package the reply as RpcResponse json let rpc_response: String; match response { Err(response) => { let resp = RpcResponse { id: request.id, jsonrpc: String::from("2.0"), method: request.method, result: None, error: Some(response), }; rpc_response = serde_json::to_string(&resp).unwrap(); } Ok(response) => { let resp = RpcResponse { id: request.id, jsonrpc: String::from("2.0"), method: request.method, result: Some(response), error: None, }; rpc_response = serde_json::to_string(&resp).unwrap(); } } // Send the reply workers_l[num].write_message(rpc_response); } None => {} // No message for us from this worker } } } // Handle STATUS message fn handle_status(&self, worker_stats: &WorkerStats) -> Result<Value, Value> { // Return worker status in json for use by a dashboard or healthcheck. let status = WorkerStatus { id: worker_stats.id.clone(), height: self.current_block_versions.last().unwrap().header.height, difficulty: worker_stats.pow_difficulty, accepted: worker_stats.num_accepted, rejected: worker_stats.num_rejected, stale: worker_stats.num_stale, }; let response = serde_json::to_value(&status).unwrap(); return Ok(response); } // Handle GETJOBTEMPLATE message fn handle_getjobtemplate(&self) -> Result<Value, Value> { // Build a JobTemplate from a BlockHeader and return JSON let job_template = self.build_block_template(); let response = serde_json::to_value(&job_template).unwrap(); debug!( LOGGER, "(Server ID: {}) sending block {} with id {} to single worker", self.id, job_template.height, job_template.job_id, ); return Ok(response); } // Handle KEEPALIVE message fn handle_keepalive(&self) -> Result<Value, Value> { return Ok(serde_json::to_value("ok".to_string()).unwrap()); } // Handle LOGIN message fn handle_login(&self, params: Option<Value>, worker: &mut Worker) -> Result<Value, Value> { let params: LoginParams = parse_params(params)?; worker.login = Some(params.login); // XXX TODO Future - Validate password? worker.agent = params.agent; worker.authenticated = true; return Ok(serde_json::to_value("ok".to_string()).unwrap()); } // Handle SUBMIT message // params contains a solved block header // We accept and log valid shares of all difficulty above configured minimum // Accepted shares that are full solutions will also be submitted to the // network fn handle_submit( &self, params: Option<Value>, worker: &mut Worker, worker_stats: &mut WorkerStats, ) -> Result<(Value, bool), Value> { // Validate parameters let params: SubmitParams = parse_params(params)?; let share_difficulty: u64; let mut share_is_block = false; if params.height != self.current_block_versions.last().unwrap().header.height { // Return error status error!( LOGGER, "(Server ID: {}) Share at height {} submitted too late", self.id, params.height, ); worker_stats.num_stale += 1; let e = RpcError { code: -32503, message: "Solution submitted too late".to_string(), }; return Err(serde_json::to_value(e).unwrap()); } // Find the correct version of the block to match this header let b: Option<&Block> = self.current_block_versions.get(params.job_id as usize); if b.is_none() { // Return error status error!( LOGGER, "(Server ID: {}) Failed to validate solution at height {}: invalid job_id {}", self.id, params.height, params.job_id, ); worker_stats.num_rejected += 1; let e = RpcError { code: -32502, message: "Failed to validate solution".to_string(), }; return Err(serde_json::to_value(e).unwrap()); } let mut b: Block = b.unwrap().clone(); // Reconstruct the block header with this nonce and pow added b.header.pow.nonce = params.nonce; b.header.pow.proof.nonces = params.pow; // Get share difficulty share_difficulty = b.header.pow.to_difficulty().to_num(); // If the difficulty is too low its an error if share_difficulty < self.minimum_share_difficulty { // Return error status error!( LOGGER, "(Server ID: {}) Share rejected due to low difficulty: {}/{}", self.id, share_difficulty, self.minimum_share_difficulty, ); worker_stats.num_rejected += 1; let e = RpcError { code: -32501, message: "Share rejected due to low difficulty".to_string(), }; return Err(serde_json::to_value(e).unwrap()); } // If the difficulty is high enough, submit it (which also validates it) if share_difficulty >= self.current_difficulty { // This is a full solution, submit it to the network let res = self.chain.process_block(b.clone(), chain::Options::MINE); if let Err(e) = res { // Return error status error!( LOGGER, "(Server ID: {}) Failed to validate solution at height {}: {:?}", self.id, params.height, e ); worker_stats.num_rejected += 1; let e = RpcError { code: -32502, message: "Failed to validate solution".to_string(), }; return Err(serde_json::to_value(e).unwrap()); } share_is_block = true; // Log message to make it obvious we found a block warn!( LOGGER, "(Server ID: {}) Solution Found for block {} - Yay!!!", self.id, params.height ); } else { // Do some validation but dont submit if !pow::verify_size(&b.header, global::min_sizeshift()) { // Return error status error!( LOGGER, "(Server ID: {}) Failed to validate share at height {} with nonce {} using job_id {}", self.id, params.height, b.header.pow.nonce, params.job_id, ); worker_stats.num_rejected += 1; let e = RpcError { code: -32502, message: "Failed to validate solution".to_string(), }; return Err(serde_json::to_value(e).unwrap()); } } // Log this as a valid share let submitted_by = match worker.login.clone() { None => worker.id.to_string(), Some(login) => login.clone(), }; info!( LOGGER, "(Server ID: {}) Got share for block: hash {}, height {}, nonce {}, difficulty {}/{}, submitted by {}", self.id, b.hash(), b.header.height, b.header.pow.nonce, share_difficulty, self.current_difficulty, submitted_by, ); worker_stats.num_accepted += 1; let submit_response; if share_is_block { submit_response = format!("blockfound - {}", b.hash().to_hex()); } else { submit_response = "ok".to_string(); } return Ok(( serde_json::to_value(submit_response).unwrap(), share_is_block, )); } // handle submit a solution // Purge dead/sick workers - remove all workers marked in error state fn clean_workers(&mut self, stratum_stats: &mut Arc<RwLock<StratumStats>>) -> usize { let mut start = 0; let mut workers_l = self.workers.lock().unwrap(); loop { for num in start..workers_l.len() { if workers_l[num].error == true { warn!( LOGGER, "(Server ID: {}) Dropping worker: {}", self.id, workers_l[num].id; ); // Update worker stats let mut stratum_stats = stratum_stats.write().unwrap(); let worker_stats_id = stratum_stats .worker_stats .iter() .position(|r| r.id == workers_l[num].id) .unwrap(); stratum_stats.worker_stats[worker_stats_id].is_connected = false; // Remove the dead worker workers_l.remove(num); break; } start = num + 1; } if start >= workers_l.len() { let mut stratum_stats = stratum_stats.write().unwrap(); stratum_stats.num_workers = workers_l.len(); return stratum_stats.num_workers; } } } // Broadcast a jobtemplate RpcRequest to all connected workers - no response // expected fn broadcast_job(&mut self) { // Package new block into RpcRequest let job_template = self.build_block_template(); let job_template_json = serde_json::to_string(&job_template).unwrap(); // Issue #1159 - use a serde_json Value type to avoid extra quoting let job_template_value: Value = serde_json::from_str(&job_template_json).unwrap(); let job_request = RpcRequest { id: String::from("Stratum"), jsonrpc: String::from("2.0"), method: String::from("job"), params: Some(job_template_value), }; let job_request_json = serde_json::to_string(&job_request).unwrap(); debug!( LOGGER, "(Server ID: {}) sending block {} with id {} to stratum clients", self.id, job_template.height, job_template.job_id, ); // Push the new block to all connected clients // NOTE: We do not give a unique nonce (should we?) so miners need // to choose one for themselves let mut workers_l = self.workers.lock().unwrap(); for num in 0..workers_l.len() { workers_l[num].write_message(job_request_json.clone()); } } /// "main()" - Starts the stratum-server. Creates a thread to Listens for /// a connection, then enters a loop, building a new block on top of the /// existing chain anytime required and sending that to the connected /// stratum miner, proxy, or pool, and accepts full solutions to /// be submitted. pub fn run_loop( &mut self, stratum_stats: Arc<RwLock<StratumStats>>, cuckoo_size: u32, proof_size: usize, sync_state: Arc<SyncState>, ) { info!( LOGGER, "(Server ID: {}) Starting stratum server with cuckoo_size = {}, proof_size = {}", self.id, cuckoo_size, proof_size ); self.sync_state = sync_state; // "globals" for this function let attempt_time_per_block = self.config.attempt_time_per_block; let mut deadline: i64 = 0; // to prevent the wallet from generating a new HD key derivation for each // iteration, we keep the returned derivation to provide it back when // nothing has changed. We only want to create a key_id for each new block, // and reuse it when we rebuild the current block to add new tx. let mut num_workers: usize; let mut head = self.chain.head().unwrap(); let mut current_hash = head.prev_block_h; let mut latest_hash; let listen_addr = self.config.stratum_server_addr.clone().unwrap(); self.current_block_versions.push(Block::default()); // Start a thread to accept new worker connections let mut workers_th = self.workers.clone(); let id_th = self.id.clone(); let mut stats_th = stratum_stats.clone(); let _listener_th = thread::spawn(move || { accept_workers(id_th, listen_addr, &mut workers_th, &mut stats_th); }); // We have started { let mut stratum_stats = stratum_stats.write().unwrap(); stratum_stats.is_running = true; stratum_stats.cuckoo_size = cuckoo_size as u16; } warn!( LOGGER, "Stratum server started on {}", self.config.stratum_server_addr.clone().unwrap() ); // Main Loop loop { // If we're fallen into sync mode, (or are just starting up, // tell connected clients to stop what they're doing let mining_stopped = self.sync_state.is_syncing(); // Remove workers with failed connections num_workers = self.clean_workers(&mut stratum_stats.clone()); // get the latest chain state head = self.chain.head().unwrap(); latest_hash = head.last_block_h; // Build a new block if: // There is a new block on the chain // or We are rebuilding the current one to include new transactions // and we're not synching // and there is at least one worker connected if (current_hash != latest_hash || Utc::now().timestamp() >= deadline) && !mining_stopped && num_workers > 0 { let mut wallet_listener_url: Option<String> = None; if !self.config.burn_reward { wallet_listener_url = Some(self.config.wallet_listener_url.clone()); } // If this is a new block, clear the current_block version history if current_hash != latest_hash { self.current_block_versions.clear(); } // Build the new block (version) let (new_block, block_fees) = mine_block::get_block( &self.chain, &self.tx_pool, self.verifier_cache.clone(), self.current_key_id.clone(), wallet_listener_url, ); self.current_difficulty = (new_block.header.total_difficulty() - head.total_difficulty).to_num(); self.current_key_id = block_fees.key_id(); current_hash = latest_hash; // set the minimum acceptable share difficulty for this block self.minimum_share_difficulty = cmp::min( self.config.minimum_share_difficulty, self.current_difficulty, ); // set a new deadline for rebuilding with fresh transactions deadline = Utc::now().timestamp() + attempt_time_per_block as i64; { let mut stratum_stats = stratum_stats.write().unwrap(); stratum_stats.block_height = new_block.header.height; stratum_stats.network_difficulty = self.current_difficulty; } // Add this new block version to our current block map self.current_block_versions.push(new_block); // Send this job to all connected workers self.broadcast_job(); } // Handle any messages from the workers self.handle_rpc_requests(&mut stratum_stats.clone()); // sleep before restarting loop thread::sleep(Duration::from_millis(50)); } // Main Loop } // fn run_loop() } // StratumServer // Utility function to parse a JSON RPC parameter object, returning a proper // error if things go wrong. fn parse_params<T>(params: Option<Value>) -> Result<T, Value> where for<'de> T: serde::Deserialize<'de>, { params .and_then(|v| serde_json::from_value(v).ok()) .ok_or_else(|| { let e = RpcError { code: -32600, message: "Invalid Request".to_string(), }; serde_json::to_value(e).unwrap() }) }
29.532484
106
0.653496
ccf117395febcd36a69b2175d248fefa371541fc
10,776
// Note: section 7.2.3 shows which pins support I2C Hs mode use crate::clock; use crate::time::Hertz; use crate::hal::blocking::i2c::{Read, Write, WriteRead}; use crate::target_device::sercom0::I2CM; use crate::target_device::{MCLK, SERCOM0, SERCOM1, SERCOM2, SERCOM3}; use crate::target_device::{SERCOM4, SERCOM5}; const BUS_STATE_IDLE: u8 = 1; const BUS_STATE_OWNED: u8 = 2; const MASTER_ACT_READ: u8 = 2; const MASTER_ACT_STOP: u8 = 3; /// Define an I2C master type for the given SERCOM and pad pair. macro_rules! i2c { ([ $($Type:ident: ($pad0:ident, $pad1:ident, $SERCOM:ident, $powermask:ident, $clock:ident, $apmask:ident),)+ ]) => { $( /// Represents the Sercom instance configured to act as an I2C Master. /// The embedded_hal blocking I2C traits are implemented by this instance. pub struct $Type<$pad0, $pad1> { sda: $pad0, scl: $pad1, sercom: $SERCOM, } impl<$pad0, $pad1> $Type<$pad0, $pad1> { /// Configures the sercom instance to work as an I2C Master. /// The clock is obtained via the `GenericClockGenerator` type. /// `freq` specifies the bus frequency to use for I2C communication. /// There are typically a handful of values that tend to be supported; /// standard mode is 100.khz(), full speed mode is 400.khz(). /// The hardware in the atsamd device supports fast mode at 1.mhz() /// and fast mode, but there may be additional hardware configuration /// missing from the current software implementation that prevents that /// from working as-written today. /// /// ```no_run /// let mut i2c = I2CMaster3::new( /// &clocks.sercom3_core(&gclk0).unwrap(), /// 400.khz(), /// p.device.SERCOM3, /// &mut p.device.MCLK, /// // Metro M0 express has I2C on pins PA22, PA23 /// pins.pa22.into_pad(&mut pins.port), /// pins.pa23.into_pad(&mut pins.port), /// ); /// ``` pub fn new<F: Into<Hertz>>( clock: &clock::$clock, freq: F, sercom: $SERCOM, mclk: &mut MCLK, sda: $pad0, scl: $pad1, ) -> Self { // Power up the peripheral bus clock. // safe because we're exclusively owning SERCOM mclk.$apmask.modify(|_, w| w.$powermask().set_bit()); unsafe { // reset the sercom instance sercom.i2cm().ctrla.modify(|_, w| w.swrst().set_bit()); // wait for reset to complete while sercom.i2cm().syncbusy.read().swrst().bit_is_set() || sercom.i2cm().ctrla.read().swrst().bit_is_set() {} // Put the hardware into i2c master mode sercom.i2cm().ctrla.modify(|_, w| w.mode().i2c_master()); // wait for configuration to take effect while sercom.i2cm().syncbusy.read().enable().bit_is_set() {} // set the baud rate let gclk = clock.freq(); let baud = (gclk.0 / (2 * freq.into().0) - 1) as u8; sercom.i2cm().baud.modify(|_, w| w.baud().bits(baud)); sercom.i2cm().ctrla.modify(|_, w| w.enable().set_bit()); // wait for configuration to take effect while sercom.i2cm().syncbusy.read().enable().bit_is_set() {} // set the bus idle sercom .i2cm() .status .modify(|_, w| w.busstate().bits(BUS_STATE_IDLE)); // wait for it to take effect while sercom.i2cm().syncbusy.read().sysop().bit_is_set() {} } Self { sda, scl, sercom } } /// Breaks the sercom device up into its constituent pins and the SERCOM /// instance. Does not make any changes to power management. pub fn free(self) -> ($pad0, $pad1, $SERCOM) { (self.sda, self.scl, self.sercom) } fn start_tx_write(&mut self, addr: u8) -> Result<(), I2CError> { loop { match self.i2cm().status.read().busstate().bits() { BUS_STATE_IDLE | BUS_STATE_OWNED => break, _ => continue, } } // Signal start and transmit encoded address. unsafe { self.i2cm() .addr .write(|w| w.addr().bits((addr as u16) << 1)); } // wait for transmission to complete while !self.i2cm().intflag.read().mb().bit_is_set() {} self.status_to_err() } fn status_to_err(&mut self) -> Result<(), I2CError> { let status = self.i2cm().status.read(); if status.arblost().bit_is_set() { return Err(I2CError::ArbitrationLost); } if status.buserr().bit_is_set() { return Err(I2CError::BusError); } if status.rxnack().bit_is_set() { return Err(I2CError::Nack); } if status.lowtout().bit_is_set() || status.sexttout().bit_is_set() || status.mexttout().bit_is_set() { return Err(I2CError::Timeout); } Ok(()) } fn start_tx_read(&mut self, addr: u8) -> Result<(), I2CError> { loop { match self.i2cm().status.read().busstate().bits() { BUS_STATE_IDLE | BUS_STATE_OWNED => break, _ => continue, } } self.i2cm().intflag.modify(|_, w| w.error().clear_bit()); // Signal start (or rep start if appropriate) // and transmit encoded address. unsafe { self.i2cm() .addr .write(|w| w.addr().bits(((addr as u16) << 1) | 1)); } // wait for transmission to complete loop { let intflag = self.i2cm().intflag.read(); // If arbitration was lost, it will be signalled via the mb bit if intflag.mb().bit_is_set() { return Err(I2CError::ArbitrationLost); } if intflag.sb().bit_is_set() || intflag.error().bit_is_set() { break; } } self.status_to_err() } fn wait_sync(&mut self) { while self.i2cm().syncbusy.read().sysop().bit_is_set() {} } fn cmd(&mut self, cmd: u8) { unsafe { self.i2cm().ctrlb.modify(|_, w| w.cmd().bits(cmd)); } self.wait_sync(); } fn cmd_stop(&mut self) { self.cmd(MASTER_ACT_STOP) } fn cmd_read(&mut self) { unsafe { self.i2cm().ctrlb.modify(|_, w| { // clear bit means send ack w.ackact().clear_bit(); w.cmd().bits(MASTER_ACT_READ) }); } self.wait_sync(); } fn i2cm(&mut self) -> &I2CM { self.sercom.i2cm() } fn send_bytes(&mut self, bytes: &[u8]) -> Result<(), I2CError> { for b in bytes { unsafe { self.i2cm().data.write(|w| w.bits(*b as u32)); } loop { let intflag = self.i2cm().intflag.read(); if intflag.mb().bit_is_set() || intflag.error().bit_is_set() { break; } } self.status_to_err()?; } Ok(()) } fn read_one(&mut self) -> u8 { while !self.i2cm().intflag.read().sb().bit_is_set() {} self.i2cm().data.read().bits() as u8 } fn fill_buffer(&mut self, buffer: &mut [u8]) -> Result<(), I2CError> { // Some manual iterator gumph because we need to ack bytes after the first. let mut iter = buffer.iter_mut(); *iter.next().expect("buffer len is at least 1") = self.read_one(); loop { match iter.next() { None => break, Some(dest) => { // Ack the last byte so that we can receive another one self.cmd_read(); *dest = self.read_one(); } } } // arrange to send nack on next command to // stop slave from transmitting more data self.i2cm().ctrlb.modify(|_, w| w.ackact().set_bit()); Ok(()) } fn do_write(&mut self, addr: u8, bytes: &[u8]) -> Result<(), I2CError> { self.start_tx_write(addr)?; self.send_bytes(bytes) } fn do_read(&mut self, addr: u8, buffer: &mut [u8]) -> Result<(), I2CError> { self.start_tx_read(addr)?; self.fill_buffer(buffer) } fn do_write_read(&mut self, addr: u8, bytes: &[u8], buffer: &mut [u8]) -> Result<(), I2CError> { self.start_tx_write(addr)?; self.send_bytes(bytes)?; self.start_tx_read(addr)?; self.fill_buffer(buffer) } } impl<$pad0, $pad1> Write for $Type<$pad0, $pad1> { type Error = I2CError; /// Sends bytes to slave with address `addr` fn write(&mut self, addr: u8, bytes: &[u8]) -> Result<(), Self::Error> { let res = self.do_write(addr, bytes); self.cmd_stop(); res } } impl<$pad0, $pad1> Read for $Type<$pad0, $pad1> { type Error = I2CError; fn read(&mut self, addr: u8, buffer: &mut [u8]) -> Result<(), Self::Error> { let res = self.do_read(addr, buffer); self.cmd_stop(); res } } impl<$pad0, $pad1> WriteRead for $Type<$pad0, $pad1> { type Error = I2CError; fn write_read(&mut self, addr: u8, bytes: &[u8], buffer: &mut [u8]) -> Result<(), Self::Error> { let res = self.do_write_read(addr, bytes, buffer); self.cmd_stop(); res } } )+ }; } i2c!([ I2CMaster0: ( Sercom0Pad0, Sercom0Pad1, SERCOM0, sercom0_, Sercom0CoreClock, apbamask ), I2CMaster1: ( Sercom1Pad0, Sercom1Pad1, SERCOM1, sercom1_, Sercom1CoreClock, apbamask ), I2CMaster2: ( Sercom2Pad0, Sercom2Pad1, SERCOM2, sercom2_, Sercom2CoreClock, apbbmask ), I2CMaster3: ( Sercom3Pad0, Sercom3Pad1, SERCOM3, sercom3_, Sercom3CoreClock, apbbmask ), ]); i2c!([ I2CMaster4: ( Sercom4Pad0, Sercom4Pad1, SERCOM4, sercom4_, Sercom4CoreClock, apbdmask ), I2CMaster5: ( Sercom5Pad0, Sercom5Pad1, SERCOM5, sercom5_, Sercom5CoreClock, apbdmask ), ]); #[derive(Debug)] pub enum I2CError { ArbitrationLost, AddressError, BusError, Timeout, Nack, }
28.967742
114
0.517446
ac73620825ecb1332e2eec5010181e74aa931469
4,327
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. use std::fmt; use std::str; // A struct that divide a name into serveral parts that meets rust's guidelines. struct NameSpliter<'a> { name: &'a [u8], pos: usize, } impl<'a> NameSpliter<'a> { fn new(s: &str) -> NameSpliter { NameSpliter { name: s.as_bytes(), pos: 0, } } } impl<'a> Iterator for NameSpliter<'a> { type Item = &'a str; fn next(&mut self) -> Option<&'a str> { if self.pos == self.name.len() { return None; } // skip all prefix '_' while self.pos < self.name.len() && self.name[self.pos] == b'_' { self.pos += 1; } let mut pos = self.name.len(); let mut upper_len = 0; let mut meet_lower = false; for i in self.pos..self.name.len() { let c = self.name[i]; if b'A' <= c && c <= b'Z' { if meet_lower { // So it should be AaA or aaA pos = i; break; } upper_len += 1; } else if c == b'_' { pos = i; break; } else { meet_lower = true; if upper_len > 1 { // So it should be AAa pos = i - 1; break; } } } let s = str::from_utf8(&self.name[self.pos..pos]).unwrap(); self.pos = pos; Some(s) } } /// Adjust method name to follow rust-guidelines. pub fn to_snake_case(name: &str) -> String { let mut snake_method_name = String::with_capacity(name.len()); for s in NameSpliter::new(name) { snake_method_name.push_str(&s.to_lowercase()); snake_method_name.push('_'); } snake_method_name.pop(); snake_method_name } #[cfg(feature = "protobuf-codec")] pub fn to_camel_case(name: &str) -> String { let mut camel_case_name = String::with_capacity(name.len()); for s in NameSpliter::new(name) { let mut chs = s.chars(); camel_case_name.extend(chs.next().unwrap().to_uppercase()); camel_case_name.push_str(&s[1..].to_lowercase()); } camel_case_name } pub fn fq_grpc(item: &str) -> String { format!("::grpcio::{}", item) } pub enum MethodType { Unary, ClientStreaming, ServerStreaming, Duplex, } impl fmt::Display for MethodType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "{}", match self { MethodType::Unary => "MethodType::Unary", MethodType::ClientStreaming => "MethodType::ClientStreaming", MethodType::ServerStreaming => "MethodType::ServerStreaming", MethodType::Duplex => "MethodType::Duplex", } ) } } #[cfg(test)] mod test { #[test] fn test_snake_name() { let cases = vec![ ("AsyncRequest", "async_request"), ("asyncRequest", "async_request"), ("async_request", "async_request"), ("createID", "create_id"), ("AsyncRClient", "async_r_client"), ("CreateIDForReq", "create_id_for_req"), ("Create_ID_For_Req", "create_id_for_req"), ("Create_ID_For__Req", "create_id_for_req"), ("ID", "id"), ("id", "id"), ]; for (origin, exp) in cases { let res = super::to_snake_case(&origin); assert_eq!(res, exp); } } #[test] fn test_camel_name() { let cases = vec![ ("AsyncRequest", "AsyncRequest"), ("asyncRequest", "AsyncRequest"), ("async_request", "AsyncRequest"), ("createID", "CreateId"), ("AsyncRClient", "AsyncRClient"), ("async_r_client", "AsyncRClient"), ("CreateIDForReq", "CreateIdForReq"), ("Create_ID_For_Req", "CreateIdForReq"), ("Create_ID_For__Req", "CreateIdForReq"), ("ID", "Id"), ("id", "Id"), ]; for (origin, exp) in cases { let res = super::to_camel_case(&origin); assert_eq!(res, exp); } } }
27.916129
80
0.497342
edb28090cedbd507fc6d04e8acb0d3cace01a9cd
190
pub fn data_key(s3_prefix: &str, path: &str) -> String { format!("{}data/{}", s3_prefix, path) } pub fn manifest_key(s3_prefix: &str) -> String { format!("{}manifest", s3_prefix) }
23.75
56
0.626316
617fe345da280940c1bb5cb7f0e451cae47899ec
13,761
use std::convert::TryFrom; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use std::time::{Duration, Instant}; use anyhow::Result; use ton_api::ton::{self, TLObject}; use super::neighbour::*; use super::neighbours_cache::*; use crate::dht_node::*; use crate::overlay_node::*; use crate::utils::*; pub struct Neighbours { dht: Arc<DhtNode>, overlay: Arc<OverlayNode>, overlay_id: OverlayIdShort, options: NeighboursOptions, cache: Arc<NeighboursCache>, overlay_peers: FxDashSet<AdnlNodeIdShort>, failed_attempts: AtomicU64, all_attempts: AtomicU64, start: Instant, } #[derive(Debug, Copy, Clone, serde::Serialize, serde::Deserialize)] #[serde(default)] pub struct NeighboursOptions { /// Default: 16 pub max_neighbours: usize, /// Default: 10 pub reloading_min_interval_sec: u32, /// Default: 30 pub reloading_max_interval_sec: u32, /// Default: 500 pub ping_interval_ms: u64, /// Default: 1000 pub search_interval_ms: u64, /// Default: 10 pub ping_min_timeout_ms: u64, /// Default: 1000 pub ping_max_timeout_ms: u64, /// Default: 6 pub max_ping_tasks: usize, } impl Default for NeighboursOptions { fn default() -> Self { Self { max_neighbours: 16, reloading_min_interval_sec: 10, reloading_max_interval_sec: 30, ping_interval_ms: 500, search_interval_ms: 1000, ping_min_timeout_ms: 10, ping_max_timeout_ms: 1000, max_ping_tasks: 6, } } } impl Neighbours { pub fn new( dht: &Arc<DhtNode>, overlay: &Arc<OverlayNode>, overlay_id: &OverlayIdShort, initial_peers: &[AdnlNodeIdShort], options: NeighboursOptions, ) -> Arc<Self> { let cache = Arc::new(NeighboursCache::new(initial_peers, options.max_neighbours)); Arc::new(Self { dht: dht.clone(), overlay: overlay.clone(), overlay_id: *overlay_id, options, cache, overlay_peers: Default::default(), failed_attempts: Default::default(), all_attempts: Default::default(), start: Instant::now(), }) } pub fn options(&self) -> &NeighboursOptions { &self.options } pub fn start_reloading_neighbours(self: &Arc<Self>) { use rand::distributions::Distribution; let neighbours = Arc::downgrade(self); let (min_ms, max_ms) = ordered_boundaries( self.options.reloading_min_interval_sec, self.options.reloading_max_interval_sec, ); let distribution = rand::distributions::Uniform::new(min_ms, max_ms); tokio::spawn(async move { loop { let sleep_duration = distribution.sample(&mut rand::thread_rng()) as u64; tokio::time::sleep(Duration::from_secs(sleep_duration)).await; let neighbours = match neighbours.upgrade() { Some(neighbours) => neighbours, None => return, }; if let Err(e) = neighbours.reload_neighbours(&neighbours.overlay_id) { log::warn!("Failed to reload neighbours: {}", e); } } }); } pub fn start_pinging_neighbours(self: &Arc<Self>) { let interval = Duration::from_millis(self.options.ping_interval_ms); let neighbours = Arc::downgrade(self); tokio::spawn(async move { loop { let neighbours = match neighbours.upgrade() { Some(neighbours) => neighbours, None => return, }; if let Err(e) = neighbours.ping_neighbours().await { log::warn!("Failed to ping neighbours: {}", e); tokio::time::sleep(interval).await; } } }); } pub fn start_searching_peers(self: &Arc<Self>) { let interval = Duration::from_millis(self.options.search_interval_ms); let neighbours = Arc::downgrade(self); tokio::spawn(async move { tokio::time::sleep(interval).await; let neighbours = match neighbours.upgrade() { Some(neighbours) => neighbours, None => return, }; let mut external_iter = ExternalNeighboursCacheIter::new(); while let Some(peer_id) = external_iter.get(&neighbours.cache) { external_iter.bump(); match neighbours .overlay .get_random_peers(&neighbours.overlay_id, &peer_id, None) .await { Ok(Some(peers)) => { let mut new_peers = Vec::new(); for peer in peers.into_iter() { match AdnlNodeIdFull::try_from(&peer.id) .and_then(|full_id| full_id.compute_short_id()) { Ok(peer_id) => { if !neighbours.contains_overlay_peer(&peer_id) { new_peers.push(peer_id); } } Err(e) => log::warn!("Failed to process peer: {}", e), } } if !new_peers.is_empty() { neighbours.add_new_peers(new_peers); } } Err(e) => { log::warn!("Failed to get random peers: {}", e); } _ => {} } } }); } pub fn len(&self) -> usize { self.cache.len() } pub fn is_empty(&self) -> bool { self.cache.is_empty() } pub fn contains(&self, peer_id: &AdnlNodeIdShort) -> bool { self.cache.contains(peer_id) } pub fn add(&self, peer_id: AdnlNodeIdShort) -> bool { self.cache.insert(peer_id) } pub fn contains_overlay_peer(&self, peer_id: &AdnlNodeIdShort) -> bool { self.overlay_peers.contains(peer_id) } pub fn add_overlay_peer(&self, peer_id: AdnlNodeIdShort) { self.overlay_peers.insert(peer_id); } pub fn remove_overlay_peer(&self, peer_id: &AdnlNodeIdShort) { self.overlay_peers.remove(peer_id); } pub fn choose_neighbour(&self) -> Option<Arc<Neighbour>> { let mut rng = rand::thread_rng(); let average_failures = self.failed_attempts.load(Ordering::Acquire) as f64 / std::cmp::max(self.all_attempts.load(Ordering::Acquire), 1) as f64; self.cache.choose_neighbour(&mut rng, average_failures) } pub fn reload_neighbours(&self, overlay_id: &OverlayIdShort) -> Result<()> { log::trace!("Start reload_neighbours (overlay: {})", overlay_id); let peers = PeersCache::with_capacity(self.options.max_neighbours * 2 + 1); self.overlay .write_cached_peers(overlay_id, self.options.max_neighbours * 2, &peers)?; self.process_neighbours(peers)?; log::trace!("Finish reload_neighbours (overlay: {})", overlay_id); Ok(()) } pub async fn ping_neighbours(self: &Arc<Self>) -> Result<()> { let neighbour_count = self.cache.len(); if neighbour_count == 0 { return Err(NeighboursError::NoPeersInOverlay(self.overlay_id).into()); } else { log::trace!( "Pinging neighbours in overlay {} (count: {})", self.overlay_id, neighbour_count, ) } let max_tasks = std::cmp::min(neighbour_count, self.options.max_ping_tasks); let mut response_collector = LimitedResponseCollector::new(max_tasks); loop { let neighbour = match self.cache.get_next_for_ping(&self.start) { Some(neighbour) => neighbour, None => { log::trace!("No neighbours to ping"); tokio::time::sleep(Duration::from_millis(self.options.ping_min_timeout_ms)) .await; continue; } }; let ms_since_last_ping = self.elapsed().saturating_sub(neighbour.last_ping()); let additional_sleep = if ms_since_last_ping < self.options.ping_max_timeout_ms { self.options .ping_max_timeout_ms .saturating_sub(ms_since_last_ping) } else { self.options.ping_min_timeout_ms }; tokio::time::sleep(Duration::from_millis(additional_sleep)).await; if let Some(response_tx) = response_collector.make_request() { let neighbours = self.clone(); tokio::spawn(async move { if let Err(e) = neighbours.update_capabilities(neighbour).await { log::debug!("Failed to ping peer: {}", e); } response_tx.send(Some(())); }); } else { while response_collector.count_pending() > 0 { response_collector.wait(false).await; } } } } pub fn add_new_peers(self: &Arc<Self>, peers: Vec<AdnlNodeIdShort>) { let neighbours = self.clone(); tokio::spawn(async move { for peer_id in peers.into_iter() { log::trace!( "add_new_peers: start searching address for peer {}", peer_id ); match neighbours.dht.find_address(&peer_id).await { Ok((ip, _)) => { log::info!("add_new_peers: found overlay peer address: {}", ip); neighbours.add_overlay_peer(peer_id); } Err(e) => { log::warn!("add_new_peers: failed to find overlay peer address: {}", e); } } } }); } pub fn update_neighbour_stats( &self, peer_id: &AdnlNodeIdShort, roundtrip: u64, success: bool, is_rldp: bool, update_attempts: bool, ) { let neighbour = match self.cache.get(peer_id) { Some(neighbour) => neighbour, None => return, }; neighbour.update_stats(roundtrip, success, is_rldp, update_attempts); if update_attempts { self.all_attempts.fetch_add(1, Ordering::Release); if !success { self.failed_attempts.fetch_add(1, Ordering::Release); } } } pub fn set_neighbour_capabilities( &self, peer_id: &AdnlNodeIdShort, capabilities: &ton::ton_node::Capabilities, ) { if let Some(neighbour) = self.cache.get(peer_id) { neighbour.update_proto_version(capabilities); } } async fn update_capabilities(self: &Arc<Self>, neighbour: Arc<Neighbour>) -> Result<()> { let now = Instant::now(); neighbour.set_last_ping(self.elapsed()); let query = TLObject::new(ton::rpc::ton_node::GetCapabilities); log::trace!( "Query capabilities from {} in {}", neighbour.peer_id(), self.overlay_id ); let timeout = Some( self.dht .adnl() .compute_query_timeout(neighbour.roundtrip_adnl()), ); match self .overlay .query(&self.overlay_id, neighbour.peer_id(), &query, timeout) .await { Ok(Some(answer)) => { let capabilities = parse_answer::<ton::ton_node::Capabilities>(answer)?; log::debug!( "Got capabilities from {} {}: {:?}", neighbour.peer_id(), self.overlay_id, capabilities ); let roundtrip = now.elapsed().as_millis() as u64; self.update_neighbour_stats(neighbour.peer_id(), roundtrip, true, false, false); self.set_neighbour_capabilities(neighbour.peer_id(), &capabilities); Ok(()) } _ => Err(NeighboursError::NoCapabilitiesReceived(*neighbour.peer_id()).into()), } } fn process_neighbours(&self, peers: PeersCache) -> Result<()> { let mut cache = self.cache.write(); let mut rng = rand::thread_rng(); for peer_id in peers { if cache.contains(&peer_id) { continue; } let (hint, unreliable_peer) = cache.insert_or_replace_unreliable(&mut rng, peer_id); if let Some(unreliable_peer) = unreliable_peer { self.overlay .delete_public_peer(&self.overlay_id, &unreliable_peer)?; self.overlay_peers.remove(&unreliable_peer); } if hint == NeighboursCacheHint::DefinitelyFull { break; } } Ok(()) } fn elapsed(&self) -> u64 { self.start.elapsed().as_millis() as u64 } } #[derive(thiserror::Error, Debug)] enum NeighboursError { #[error("No peers in overlay {}", .0)] NoPeersInOverlay(OverlayIdShort), #[error("No capabilities received for {}", .0)] NoCapabilitiesReceived(AdnlNodeIdShort), }
32.686461
96
0.528886
c1d00b6326a38cea6465f187a747de4a066c31dd
1,635
#[path = "protobuf/sf.near.codec.v1.rs"] mod pbcodec; use graph::{ blockchain::Block, blockchain::BlockPtr, prelude::{hex, web3::types::H256, BlockNumber}, }; use std::convert::TryFrom; use std::fmt::LowerHex; pub use pbcodec::*; impl From<&CryptoHash> for H256 { fn from(input: &CryptoHash) -> Self { H256::from_slice(&input.bytes) } } impl LowerHex for &CryptoHash { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(&hex::encode(&self.bytes)) } } impl BlockWrapper { pub fn block(&self) -> &pbcodec::Block { self.block.as_ref().unwrap() } pub fn header(&self) -> &BlockHeader { self.block().header.as_ref().unwrap() } pub fn parent_ptr(&self) -> Option<BlockPtr> { let header = self.header(); match (header.prev_hash.as_ref(), header.prev_height) { (Some(hash), number) => Some(BlockPtr::from((hash.into(), number))), _ => None, } } } impl From<BlockWrapper> for BlockPtr { fn from(b: BlockWrapper) -> BlockPtr { (&b).into() } } impl<'a> From<&'a BlockWrapper> for BlockPtr { fn from(b: &'a BlockWrapper) -> BlockPtr { let header = b.header(); let hash: H256 = header.hash.as_ref().unwrap().into(); BlockPtr::from((hash, header.height)) } } impl Block for BlockWrapper { fn number(&self) -> i32 { BlockNumber::try_from(self.header().height).unwrap() } fn ptr(&self) -> BlockPtr { self.into() } fn parent_ptr(&self) -> Option<BlockPtr> { self.parent_ptr() } }
22.39726
80
0.57737
18e02b2ad4cdd1ab0c6fc21f304a3a162079bff0
15
pub mod errors;
15
15
0.8
ffcd18ee40d9f631fcdda0f12c5022853e939e92
6,284
#[macro_use] extern crate clap; #[macro_use] extern crate failure; extern crate gb_live32; extern crate itertools; #[macro_use] extern crate log; extern crate rand; extern crate serialport; extern crate simplelog; use clap::{Arg, ArgMatches}; use failure::Error; use gb_live32::Gbl32; use rand::rngs::SmallRng; use rand::{FromEntropy, RngCore}; use serialport::SerialPortType; use simplelog::{LevelFilter, TermLogger}; use std::ffi::{OsStr, OsString}; use std::fs::File; use std::io::{self, Read, Write}; use std::process; use std::thread; fn scan_ports() -> Result<Vec<OsString>, Error> { let ports = serialport::available_ports()? .into_iter() .filter_map(|port| { if let SerialPortType::UsbPort(info) = port.port_type { let name = port.port_name; let manufacturer = info.manufacturer.as_ref().map_or("", String::as_ref); let product = info.product.as_ref().map_or("", String::as_ref); let serial_number = info.serial_number.as_ref().map_or("", String::as_ref); if info.vid == 0x16c0 && info.pid == 0x05e1 && manufacturer == "gekkio.fi" && (product == "GB-LIVE32" || product == "GB_LIVE32") { info!("Detected device: {} ({:04x}:{:04x}, manufacturer=\"{}\", product=\"{}\", serial=\"{}\")", name, info.vid, info.pid, manufacturer, product, serial_number); Some(name.into()) } else { info!("Skipping device: {} ({:04x}:{:04x}, manufacturer=\"{}\", product=\"{}\", serial=\"{}\")", name, info.vid, info.pid, manufacturer, product, serial_number); None } } else { None } }) .collect::<Vec<_>>(); Ok(ports) } #[derive(Clone, Debug)] enum Operation { Upload(Vec<u8>), Status, } fn worker(port: &OsString, operation: Operation) -> Result<(), Error> { let name = port.to_string_lossy(); let port = serialport::open(&port)?; info!("{}: Connecting...", name); let mut gbl32 = Gbl32::from_port(port)?; let version = gbl32.get_version()?; match version { (2, 0) | (2, 1) => (), (major, minor) => bail!("{}: Unsupported version v{}.{}", name, major, minor), } info!("{}: Connected (v{}.{})", name, version.0, version.1); unlock_if_necessary(&name, &mut gbl32)?; match operation { Operation::Upload(data) => { assert!(data.len() == 32768); gbl32.set_reset(true)?; gbl32.set_passthrough(false)?; gbl32.write_all(&data)?; gbl32.set_passthrough(true)?; gbl32.set_reset(false)?; info!("{}: Wrote ROM and reset the system", name); } Operation::Status => { let status = gbl32.get_status()?; info!( "Status: unlocked={}, passthrough={}, reset={}", status.unlocked, status.passthrough, status.reset ); } } Ok(()) } fn unlock_if_necessary(name: &str, gbl32: &mut Gbl32) -> Result<(), Error> { if gbl32.get_status()?.unlocked { return Ok(()); } info!("{}: Unlocking...", name); gbl32.set_passthrough(false)?; gbl32.set_unlocked(true)?; let mut buffer = vec![0u8; 0x8000]; let mut rng = SmallRng::from_entropy(); rng.fill_bytes(&mut buffer); gbl32.write_all(&buffer)?; let data = gbl32.read_all()?; for (idx, (a, b)) in buffer.into_iter().zip(data.into_iter()).enumerate() { if a != b { bail!("Self-test failed at index {}", idx); } } if !gbl32.get_status()?.unlocked { bail!("Failed to unlock device"); } info!("{}: Unlocked device after self-test", name); Ok(()) } fn run(matches: &ArgMatches) -> Result<(), Error> { let _ = TermLogger::init(LevelFilter::Debug, simplelog::Config::default()); let ports; if matches.is_present("broadcast") { ports = scan_ports()?; } else if let Some(devices) = matches.values_of_os("port") { ports = devices.map(OsStr::to_os_string).collect(); } else { ports = scan_ports()?; if ports.len() > 1 { bail!("Too many detected devices for automatic selection"); } } if ports.is_empty() { bail!("No supported devices found"); } info!( "Using {}: {}", if ports.len() == 1 { "device" } else { "devices" }, itertools::join(ports.iter().map(|p| p.to_string_lossy()), ", ") ); let operation = if let Some(path) = matches.value_of_os("upload") { let mut file = File::open(path)?; let mut buf = vec![0; 0x8000]; match file.read_exact(&mut buf) { Err(ref e) if e.kind() == io::ErrorKind::UnexpectedEof => { bail!("ROM file is smaller than 32768 bytes"); } result => result?, } let mut byte = [0; 1]; match file.read_exact(&mut byte) { Err(ref e) if e.kind() == io::ErrorKind::UnexpectedEof => (), Ok(_) => bail!("ROM file is larger than 32768 bytes"), result => result?, } Operation::Upload(buf) } else { Operation::Status }; let threads = ports .into_iter() .map(|port| { let name = port.to_string_lossy().to_string(); let op = operation.clone(); (name, thread::spawn(move || worker(&port, op))) }) .collect::<Vec<_>>(); let mut failures = 0; for (name, thread) in threads { let result = thread .join() .map_err(|_| format_err!("{}: failed to wait for worker thread", name))?; if let Err(ref e) = result { let stderr = &mut io::stderr(); let _ = writeln!(stderr, "{}: {}\n{}", name, e, e.backtrace()); failures += 1; } } if failures > 0 { bail!("{} devices failed", failures); } Ok(()) } fn main() { let matches = app_from_crate!() .arg( Arg::with_name("broadcast") .short("b") .long("broadcast") .help("Broadcast mode: use all connected devices"), ) .arg( Arg::with_name("port") .short("p") .long("port") .value_name("port") .multiple(true) .help("Serial port to use"), ) .arg( Arg::with_name("upload") .short("u") .long("upload") .value_name("upload") .help("ROM file to upload"), ) .get_matches(); if let Err(ref e) = run(&matches) { let stderr = &mut io::stderr(); let _ = writeln!(stderr, "Error: {}\n{}", e, e.backtrace()); process::exit(1); } }
26.627119
106
0.574952
aca7e7d46c7fc6c05e717c7583939c56e227bd9c
17,718
// Copyright (c) 2017-present PyO3 Project and Contributors use crate::method::{FnType, SelfType}; use crate::pymethod::{ impl_py_getter_def, impl_py_setter_def, impl_wrap_getter, impl_wrap_setter, PropertyType, }; use crate::utils; use proc_macro2::{Span, TokenStream}; use quote::quote; use syn::ext::IdentExt; use syn::parse::{Parse, ParseStream}; use syn::punctuated::Punctuated; use syn::{parse_quote, Expr, Token}; /// The parsed arguments of the pyclass macro pub struct PyClassArgs { pub freelist: Option<syn::Expr>, pub name: Option<syn::Expr>, pub flags: Vec<syn::Expr>, pub base: syn::TypePath, pub has_extends: bool, pub has_unsendable: bool, pub module: Option<syn::LitStr>, } impl Parse for PyClassArgs { fn parse(input: ParseStream) -> syn::parse::Result<Self> { let mut slf = PyClassArgs::default(); let vars = Punctuated::<Expr, Token![,]>::parse_terminated(input)?; for expr in vars { slf.add_expr(&expr)?; } Ok(slf) } } impl Default for PyClassArgs { fn default() -> Self { PyClassArgs { freelist: None, name: None, module: None, // We need the 0 as value for the constant we're later building using quote for when there // are no other flags flags: vec![parse_quote! { 0 }], base: parse_quote! { pyo3::PyAny }, has_extends: false, has_unsendable: false, } } } impl PyClassArgs { /// Adda single expression from the comma separated list in the attribute, which is /// either a single word or an assignment expression fn add_expr(&mut self, expr: &Expr) -> syn::parse::Result<()> { match expr { syn::Expr::Path(ref exp) if exp.path.segments.len() == 1 => self.add_path(exp), syn::Expr::Assign(ref assign) => self.add_assign(assign), _ => Err(syn::Error::new_spanned(expr, "Failed to parse arguments")), } } /// Match a key/value flag fn add_assign(&mut self, assign: &syn::ExprAssign) -> syn::Result<()> { let syn::ExprAssign { left, right, .. } = assign; let key = match &**left { syn::Expr::Path(exp) if exp.path.segments.len() == 1 => { exp.path.segments.first().unwrap().ident.to_string() } _ => { return Err(syn::Error::new_spanned(assign, "Failed to parse arguments")); } }; macro_rules! expected { ($expected: literal) => { expected!($expected, right) }; ($expected: literal, $span: ident) => { return Err(syn::Error::new_spanned( $span, concat!("Expected ", $expected), )); }; } match key.as_str() { "freelist" => { // We allow arbitrary expressions here so you can e.g. use `8*64` self.freelist = Some(syn::Expr::clone(right)); } "name" => match &**right { syn::Expr::Path(exp) if exp.path.segments.len() == 1 => { self.name = Some(exp.clone().into()); } _ => expected!("type name (e.g., Name)"), }, "extends" => match &**right { syn::Expr::Path(exp) => { self.base = syn::TypePath { path: exp.path.clone(), qself: None, }; self.has_extends = true; } _ => expected!("type path (e.g., my_mod::BaseClass)"), }, "module" => match &**right { syn::Expr::Lit(syn::ExprLit { lit: syn::Lit::Str(lit), .. }) => { self.module = Some(lit.clone()); } _ => expected!(r#"string literal (e.g., "my_mod")"#), }, _ => expected!("one of freelist/name/extends/module", left), }; Ok(()) } /// Match a single flag fn add_path(&mut self, exp: &syn::ExprPath) -> syn::Result<()> { let flag = exp.path.segments.first().unwrap().ident.to_string(); let mut push_flag = |flag| { self.flags.push(syn::Expr::Path(flag)); }; match flag.as_str() { "gc" => push_flag(parse_quote! {pyo3::type_flags::GC}), "weakref" => push_flag(parse_quote! {pyo3::type_flags::WEAKREF}), "subclass" => push_flag(parse_quote! {pyo3::type_flags::BASETYPE}), "dict" => push_flag(parse_quote! {pyo3::type_flags::DICT}), "unsendable" => { self.has_unsendable = true; } _ => { return Err(syn::Error::new_spanned( &exp.path, "Expected one of gc/weakref/subclass/dict/unsendable", )) } }; Ok(()) } } pub fn build_py_class(class: &mut syn::ItemStruct, attr: &PyClassArgs) -> syn::Result<TokenStream> { let text_signature = utils::parse_text_signature_attrs( &mut class.attrs, &get_class_python_name(&class.ident, attr), )?; let doc = utils::get_doc(&class.attrs, text_signature, true)?; let mut descriptors = Vec::new(); check_generics(class)?; if let syn::Fields::Named(ref mut fields) = class.fields { for field in fields.named.iter_mut() { let field_descs = parse_descriptors(field)?; if !field_descs.is_empty() { descriptors.push((field.clone(), field_descs)); } } } else { return Err(syn::Error::new_spanned( &class.fields, "#[pyclass] can only be used with C-style structs", )); } impl_class(&class.ident, &attr, doc, descriptors) } /// Parses `#[pyo3(get, set)]` fn parse_descriptors(item: &mut syn::Field) -> syn::Result<Vec<FnType>> { let mut descs = Vec::new(); let mut new_attrs = Vec::new(); for attr in item.attrs.iter() { if let Ok(syn::Meta::List(ref list)) = attr.parse_meta() { if list.path.is_ident("pyo3") { for meta in list.nested.iter() { if let syn::NestedMeta::Meta(ref metaitem) = meta { if metaitem.path().is_ident("get") { descs.push(FnType::Getter(SelfType::Receiver { mutable: false })); } else if metaitem.path().is_ident("set") { descs.push(FnType::Setter(SelfType::Receiver { mutable: true })); } else { return Err(syn::Error::new_spanned( metaitem, "Only get and set are supported", )); } } } } else { new_attrs.push(attr.clone()) } } else { new_attrs.push(attr.clone()); } } item.attrs.clear(); item.attrs.extend(new_attrs); Ok(descs) } /// To allow multiple #[pymethods]/#[pyproto] block, we define inventory types. fn impl_methods_inventory(cls: &syn::Ident) -> TokenStream { // Try to build a unique type for better error messages let name = format!("Pyo3MethodsInventoryFor{}", cls); let inventory_cls = syn::Ident::new(&name, Span::call_site()); quote! { #[doc(hidden)] pub struct #inventory_cls { methods: Vec<pyo3::class::PyMethodDefType>, } impl pyo3::class::methods::PyMethodsInventory for #inventory_cls { fn new(methods: Vec<pyo3::class::PyMethodDefType>) -> Self { Self { methods } } fn get(&'static self) -> &'static [pyo3::class::PyMethodDefType] { &self.methods } } impl pyo3::class::methods::HasMethodsInventory for #cls { type Methods = #inventory_cls; } pyo3::inventory::collect!(#inventory_cls); } } /// Implement `HasProtoInventory` for the class for lazy protocol initialization. fn impl_proto_inventory(cls: &syn::Ident) -> TokenStream { // Try to build a unique type for better error messages let name = format!("Pyo3ProtoInventoryFor{}", cls); let inventory_cls = syn::Ident::new(&name, Span::call_site()); quote! { #[doc(hidden)] pub struct #inventory_cls { def: pyo3::class::proto_methods::PyProtoMethodDef, } impl pyo3::class::proto_methods::PyProtoInventory for #inventory_cls { fn new(def: pyo3::class::proto_methods::PyProtoMethodDef) -> Self { Self { def } } fn get(&'static self) -> &'static pyo3::class::proto_methods::PyProtoMethodDef { &self.def } } impl pyo3::class::proto_methods::HasProtoInventory for #cls { type ProtoMethods = #inventory_cls; } pyo3::inventory::collect!(#inventory_cls); } } fn get_class_python_name(cls: &syn::Ident, attr: &PyClassArgs) -> TokenStream { match &attr.name { Some(name) => quote! { #name }, None => quote! { #cls }, } } fn impl_class( cls: &syn::Ident, attr: &PyClassArgs, doc: syn::LitStr, descriptors: Vec<(syn::Field, Vec<FnType>)>, ) -> syn::Result<TokenStream> { let cls_name = get_class_python_name(cls, attr).to_string(); let extra = { if let Some(freelist) = &attr.freelist { quote! { impl pyo3::freelist::PyClassWithFreeList for #cls { #[inline] fn get_free_list(_py: pyo3::Python) -> &mut pyo3::freelist::FreeList<*mut pyo3::ffi::PyObject> { static mut FREELIST: *mut pyo3::freelist::FreeList<*mut pyo3::ffi::PyObject> = 0 as *mut _; unsafe { if FREELIST.is_null() { FREELIST = Box::into_raw(Box::new( pyo3::freelist::FreeList::with_capacity(#freelist))); } &mut *FREELIST } } } } } else { quote! { impl pyo3::pyclass::PyClassAlloc for #cls {} } } }; let extra = if !descriptors.is_empty() { let path = syn::Path::from(syn::PathSegment::from(cls.clone())); let ty = syn::Type::from(syn::TypePath { path, qself: None }); let desc_impls = impl_descriptors(&ty, descriptors)?; quote! { #desc_impls #extra } } else { extra }; // insert space for weak ref let mut has_weakref = false; let mut has_dict = false; let mut has_gc = false; for f in attr.flags.iter() { if let syn::Expr::Path(ref epath) = f { if epath.path == parse_quote! { pyo3::type_flags::WEAKREF } { has_weakref = true; } else if epath.path == parse_quote! { pyo3::type_flags::DICT } { has_dict = true; } else if epath.path == parse_quote! { pyo3::type_flags::GC } { has_gc = true; } } } let weakref = if has_weakref { quote! { pyo3::pyclass_slots::PyClassWeakRefSlot } } else if attr.has_extends { quote! { <Self::BaseType as pyo3::derive_utils::PyBaseTypeUtils>::WeakRef } } else { quote! { pyo3::pyclass_slots::PyClassDummySlot } }; let dict = if has_dict { quote! { pyo3::pyclass_slots::PyClassDictSlot } } else if attr.has_extends { quote! { <Self::BaseType as pyo3::derive_utils::PyBaseTypeUtils>::Dict } } else { quote! { pyo3::pyclass_slots::PyClassDummySlot } }; let module = if let Some(m) = &attr.module { quote! { Some(#m) } } else { quote! { None } }; // Enforce at compile time that PyGCProtocol is implemented let gc_impl = if has_gc { let closure_name = format!("__assertion_closure_{}", cls); let closure_token = syn::Ident::new(&closure_name, Span::call_site()); quote! { fn #closure_token() { use pyo3::class; fn _assert_implements_protocol<'p, T: pyo3::class::PyGCProtocol<'p>>() {} _assert_implements_protocol::<#cls>(); } } } else { quote! {} }; let impl_inventory = impl_methods_inventory(&cls); let impl_proto_inventory = impl_proto_inventory(&cls); let base = &attr.base; let flags = &attr.flags; let extended = if attr.has_extends { quote! { pyo3::type_flags::EXTENDED } } else { quote! { 0 } }; let base_layout = if attr.has_extends { quote! { <Self::BaseType as pyo3::derive_utils::PyBaseTypeUtils>::LayoutAsBase } } else { quote! { pyo3::pycell::PyCellBase<pyo3::PyAny> } }; let base_nativetype = if attr.has_extends { quote! { <Self::BaseType as pyo3::derive_utils::PyBaseTypeUtils>::BaseNativeType } } else { quote! { pyo3::PyAny } }; // If #cls is not extended type, we allow Self->PyObject conversion let into_pyobject = if !attr.has_extends { quote! { impl pyo3::IntoPy<pyo3::PyObject> for #cls { fn into_py(self, py: pyo3::Python) -> pyo3::PyObject { pyo3::IntoPy::into_py(pyo3::Py::new(py, self).unwrap(), py) } } } } else { quote! {} }; let thread_checker = if attr.has_unsendable { quote! { pyo3::pyclass::ThreadCheckerImpl<#cls> } } else if attr.has_extends { quote! { pyo3::pyclass::ThreadCheckerInherited<#cls, <#cls as pyo3::type_object::PyTypeInfo>::BaseType> } } else { quote! { pyo3::pyclass::ThreadCheckerStub<#cls> } }; Ok(quote! { unsafe impl pyo3::type_object::PyTypeInfo for #cls { type Type = #cls; type BaseType = #base; type Layout = pyo3::PyCell<Self>; type BaseLayout = #base_layout; type Initializer = pyo3::pyclass_init::PyClassInitializer<Self>; type AsRefTarget = pyo3::PyCell<Self>; const NAME: &'static str = #cls_name; const MODULE: Option<&'static str> = #module; const DESCRIPTION: &'static str = #doc; const FLAGS: usize = #(#flags)|* | #extended; #[inline] fn type_object_raw(py: pyo3::Python) -> *mut pyo3::ffi::PyTypeObject { use pyo3::type_object::LazyStaticType; static TYPE_OBJECT: LazyStaticType = LazyStaticType::new(); TYPE_OBJECT.get_or_init::<Self>(py) } } impl pyo3::PyClass for #cls { type Dict = #dict; type WeakRef = #weakref; type BaseNativeType = #base_nativetype; } impl<'a> pyo3::derive_utils::ExtractExt<'a> for &'a #cls { type Target = pyo3::PyRef<'a, #cls>; } impl<'a> pyo3::derive_utils::ExtractExt<'a> for &'a mut #cls { type Target = pyo3::PyRefMut<'a, #cls>; } impl pyo3::pyclass::PyClassSend for #cls { type ThreadChecker = #thread_checker; } #into_pyobject #impl_inventory #impl_proto_inventory #extra #gc_impl }) } fn impl_descriptors( cls: &syn::Type, descriptors: Vec<(syn::Field, Vec<FnType>)>, ) -> syn::Result<TokenStream> { let py_methods: Vec<TokenStream> = descriptors .iter() .flat_map(|&(ref field, ref fns)| { fns.iter() .map(|desc| { let name = field.ident.as_ref().unwrap().unraw(); let doc = utils::get_doc(&field.attrs, None, true) .unwrap_or_else(|_| syn::LitStr::new(&name.to_string(), name.span())); match desc { FnType::Getter(self_ty) => Ok(impl_py_getter_def( &name, &doc, &impl_wrap_getter(&cls, PropertyType::Descriptor(&field), &self_ty)?, )), FnType::Setter(self_ty) => Ok(impl_py_setter_def( &name, &doc, &impl_wrap_setter(&cls, PropertyType::Descriptor(&field), &self_ty)?, )), _ => unreachable!(), } }) .collect::<Vec<syn::Result<TokenStream>>>() }) .collect::<syn::Result<_>>()?; Ok(quote! { pyo3::inventory::submit! { #![crate = pyo3] { type Inventory = <#cls as pyo3::class::methods::HasMethodsInventory>::Methods; <Inventory as pyo3::class::methods::PyMethodsInventory>::new(vec![#(#py_methods),*]) } } }) } fn check_generics(class: &mut syn::ItemStruct) -> syn::Result<()> { if class.generics.params.is_empty() { Ok(()) } else { Err(syn::Error::new_spanned( &class.generics, "#[pyclass] cannot have generic parameters", )) } }
34.270793
116
0.515352
56ac03bf097cb26feab312cb166cc3d063c6c48d
16,037
//! Development-related functionality: helpers and types for writing tests //! against concrete implementations of the traits in this crate. use crate::{ consts::U32, error::{Error, Result}, ff::{Field, PrimeField}, group, rand_core::RngCore, scalar::ScalarBits, sec1::{FromEncodedPoint, ToEncodedPoint}, subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption}, util::sbb64, weierstrass, zeroize::Zeroize, AlgorithmParameters, Curve, ProjectiveArithmetic, }; use core::{ convert::{TryFrom, TryInto}, iter::Sum, ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}, }; use hex_literal::hex; #[cfg(feature = "jwk")] use crate::JwkParameters; /// Pseudo-coordinate for fixed-based scalar mult output pub const PSEUDO_COORDINATE_FIXED_BASE_MUL: [u8; 32] = hex!("deadbeef00000000000000000000000000000000000000000000000000000001"); /// Mock elliptic curve type useful for writing tests which require a concrete /// curve type. /// /// Note: this type is roughly modeled off of NIST P-256, but does not provide /// an actual cure arithmetic implementation. #[derive(Clone, Debug, Default, Eq, PartialEq, PartialOrd, Ord)] pub struct MockCurve; impl Curve for MockCurve { type FieldSize = U32; } impl weierstrass::Curve for MockCurve {} impl ProjectiveArithmetic for MockCurve { type ProjectivePoint = ProjectivePoint; } impl AlgorithmParameters for MockCurve { /// OID for NIST P-256 const OID: pkcs8::ObjectIdentifier = pkcs8::ObjectIdentifier::new("1.2.840.10045.3.1.7"); } #[cfg(feature = "jwk")] #[cfg_attr(docsrs, doc(cfg(feature = "jwk")))] impl JwkParameters for MockCurve { const CRV: &'static str = "P-256"; } /// SEC1 encoded point. pub type EncodedPoint = crate::sec1::EncodedPoint<MockCurve>; /// Field element bytes. pub type FieldBytes = crate::FieldBytes<MockCurve>; /// Non-zero scalar value. pub type NonZeroScalar = crate::scalar::NonZeroScalar<MockCurve>; /// Public key. pub type PublicKey = crate::PublicKey<MockCurve>; /// Secret key. pub type SecretKey = crate::SecretKey<MockCurve>; const LIMBS: usize = 4; type U256 = [u64; LIMBS]; /// P-256 modulus pub const MODULUS: U256 = [ 0xf3b9_cac2_fc63_2551, 0xbce6_faad_a717_9e84, 0xffff_ffff_ffff_ffff, 0xffff_ffff_0000_0000, ]; /// Example scalar type #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] pub struct Scalar([u64; LIMBS]); impl Field for Scalar { fn random(_rng: impl RngCore) -> Self { unimplemented!(); } fn zero() -> Self { Self(Default::default()) } fn one() -> Self { Self([1, 0, 0, 0]) } fn is_zero(&self) -> bool { self.ct_eq(&Self::zero()).into() } #[must_use] fn square(&self) -> Self { unimplemented!(); } #[must_use] fn double(&self) -> Self { unimplemented!(); } fn invert(&self) -> CtOption<Self> { unimplemented!(); } fn sqrt(&self) -> CtOption<Self> { unimplemented!(); } } #[cfg(target_pointer_width = "64")] fn pack_bits(native: U256) -> ScalarBits<MockCurve> { native.into() } #[cfg(target_pointer_width = "32")] fn pack_bits(native: U256) -> ScalarBits<MockCurve> { [ (native[0] & 0xffff_ffff) as u32, (native[0] >> 32) as u32, (native[1] & 0xffff_ffff) as u32, (native[1] >> 32) as u32, (native[2] & 0xffff_ffff) as u32, (native[2] >> 32) as u32, (native[3] & 0xffff_ffff) as u32, (native[3] >> 32) as u32, ] .into() } impl PrimeField for Scalar { type Repr = FieldBytes; #[cfg(target_pointer_width = "32")] type ReprBits = [u32; 8]; #[cfg(target_pointer_width = "64")] type ReprBits = [u64; 4]; const NUM_BITS: u32 = 256; const CAPACITY: u32 = 255; const S: u32 = 4; fn from_repr(bytes: FieldBytes) -> Option<Self> { let mut w = [0u64; LIMBS]; // Interpret the bytes as a big-endian integer w. w[3] = u64::from_be_bytes(bytes[0..8].try_into().unwrap()); w[2] = u64::from_be_bytes(bytes[8..16].try_into().unwrap()); w[1] = u64::from_be_bytes(bytes[16..24].try_into().unwrap()); w[0] = u64::from_be_bytes(bytes[24..32].try_into().unwrap()); // If w is in the range [0, n) then w - n will overflow, resulting in a borrow // value of 2^64 - 1. let (_, borrow) = sbb64(w[0], MODULUS[0], 0); let (_, borrow) = sbb64(w[1], MODULUS[1], borrow); let (_, borrow) = sbb64(w[2], MODULUS[2], borrow); let (_, borrow) = sbb64(w[3], MODULUS[3], borrow); if (borrow as u8) & 1 == 1 { Some(Scalar(w)) } else { None } } fn to_repr(&self) -> FieldBytes { let mut ret = FieldBytes::default(); ret[0..8].copy_from_slice(&self.0[3].to_be_bytes()); ret[8..16].copy_from_slice(&self.0[2].to_be_bytes()); ret[16..24].copy_from_slice(&self.0[1].to_be_bytes()); ret[24..32].copy_from_slice(&self.0[0].to_be_bytes()); ret } fn to_le_bits(&self) -> ScalarBits<MockCurve> { pack_bits(self.0) } fn is_odd(&self) -> bool { unimplemented!(); } fn char_le_bits() -> ScalarBits<MockCurve> { pack_bits(MODULUS) } fn multiplicative_generator() -> Self { unimplemented!(); } fn root_of_unity() -> Self { unimplemented!(); } } impl TryFrom<[u64; 4]> for Scalar { type Error = Error; fn try_from(limbs: [u64; 4]) -> Result<Self> { // TODO(tarcieri): reject values that overflow the order Ok(Scalar(limbs)) } } impl ConditionallySelectable for Scalar { fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { Scalar([ u64::conditional_select(&a.0[0], &b.0[0], choice), u64::conditional_select(&a.0[1], &b.0[1], choice), u64::conditional_select(&a.0[2], &b.0[2], choice), u64::conditional_select(&a.0[3], &b.0[3], choice), ]) } } impl ConstantTimeEq for Scalar { fn ct_eq(&self, other: &Self) -> Choice { self.0[0].ct_eq(&other.0[0]) & self.0[1].ct_eq(&other.0[1]) & self.0[2].ct_eq(&other.0[2]) & self.0[3].ct_eq(&other.0[3]) } } impl Add<Scalar> for Scalar { type Output = Scalar; fn add(self, _other: Scalar) -> Scalar { unimplemented!(); } } impl Add<&Scalar> for Scalar { type Output = Scalar; fn add(self, _other: &Scalar) -> Scalar { unimplemented!(); } } impl AddAssign<Scalar> for Scalar { fn add_assign(&mut self, _rhs: Scalar) { unimplemented!(); } } impl AddAssign<&Scalar> for Scalar { fn add_assign(&mut self, _rhs: &Scalar) { unimplemented!(); } } impl Sub<Scalar> for Scalar { type Output = Scalar; fn sub(self, _other: Scalar) -> Scalar { unimplemented!(); } } impl Sub<&Scalar> for Scalar { type Output = Scalar; fn sub(self, _other: &Scalar) -> Scalar { unimplemented!(); } } impl SubAssign<Scalar> for Scalar { fn sub_assign(&mut self, _rhs: Scalar) { unimplemented!(); } } impl SubAssign<&Scalar> for Scalar { fn sub_assign(&mut self, _rhs: &Scalar) { unimplemented!(); } } impl Mul<Scalar> for Scalar { type Output = Scalar; fn mul(self, _other: Scalar) -> Scalar { unimplemented!(); } } impl Mul<&Scalar> for Scalar { type Output = Scalar; fn mul(self, _other: &Scalar) -> Scalar { unimplemented!(); } } impl MulAssign<Scalar> for Scalar { fn mul_assign(&mut self, _rhs: Scalar) { unimplemented!(); } } impl MulAssign<&Scalar> for Scalar { fn mul_assign(&mut self, _rhs: &Scalar) { unimplemented!(); } } impl Neg for Scalar { type Output = Scalar; fn neg(self) -> Scalar { unimplemented!(); } } impl From<u64> for Scalar { fn from(_: u64) -> Scalar { unimplemented!(); } } impl From<Scalar> for FieldBytes { fn from(scalar: Scalar) -> Self { Self::from(&scalar) } } impl From<&Scalar> for FieldBytes { fn from(scalar: &Scalar) -> Self { let mut ret = FieldBytes::default(); ret[0..8].copy_from_slice(&scalar.0[3].to_be_bytes()); ret[8..16].copy_from_slice(&scalar.0[2].to_be_bytes()); ret[16..24].copy_from_slice(&scalar.0[1].to_be_bytes()); ret[24..32].copy_from_slice(&scalar.0[0].to_be_bytes()); ret } } impl Zeroize for Scalar { fn zeroize(&mut self) { self.0.as_mut().zeroize() } } /// Example affine point type #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum AffinePoint { /// Result of fixed-based scalar multiplication FixedBaseOutput(Scalar), /// Is this point the identity point? Identity, /// Is this point the generator point? Generator, /// Is this point a different point corresponding to a given [`EncodedPoint`] Other(EncodedPoint), } impl ConditionallySelectable for AffinePoint { fn conditional_select(_a: &Self, _b: &Self, _choice: Choice) -> Self { unimplemented!(); } } impl Default for AffinePoint { fn default() -> Self { Self::Identity } } impl FromEncodedPoint<MockCurve> for AffinePoint { fn from_encoded_point(point: &EncodedPoint) -> Option<Self> { if point.is_identity() { Some(Self::Identity) } else { Some(Self::Other(*point)) } } } impl ToEncodedPoint<MockCurve> for AffinePoint { fn to_encoded_point(&self, compress: bool) -> EncodedPoint { match self { Self::FixedBaseOutput(scalar) => EncodedPoint::from_affine_coordinates( &scalar.to_repr(), &PSEUDO_COORDINATE_FIXED_BASE_MUL.into(), false, ), Self::Other(point) => { if compress == point.is_compressed() { *point } else { unimplemented!(); } } _ => unimplemented!(), } } } impl Mul<NonZeroScalar> for AffinePoint { type Output = AffinePoint; fn mul(self, _scalar: NonZeroScalar) -> Self { unimplemented!(); } } /// Example projective point type #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum ProjectivePoint { /// Result of fixed-based scalar multiplication FixedBaseOutput(Scalar), /// Is this point the identity point? Identity, /// Is this point the generator point? Generator, /// Is this point a different point corresponding to a given [`AffinePoint`] Other(AffinePoint), } impl Default for ProjectivePoint { fn default() -> Self { Self::Identity } } impl From<AffinePoint> for ProjectivePoint { fn from(point: AffinePoint) -> ProjectivePoint { match point { AffinePoint::FixedBaseOutput(scalar) => ProjectivePoint::FixedBaseOutput(scalar), AffinePoint::Identity => ProjectivePoint::Identity, AffinePoint::Generator => ProjectivePoint::Generator, other => ProjectivePoint::Other(other), } } } impl FromEncodedPoint<MockCurve> for ProjectivePoint { fn from_encoded_point(_point: &EncodedPoint) -> Option<Self> { unimplemented!(); } } impl ToEncodedPoint<MockCurve> for ProjectivePoint { fn to_encoded_point(&self, _compress: bool) -> EncodedPoint { unimplemented!(); } } impl group::Group for ProjectivePoint { type Scalar = Scalar; fn random(_rng: impl RngCore) -> Self { unimplemented!(); } fn identity() -> Self { Self::Identity } fn generator() -> Self { Self::Generator } fn is_identity(&self) -> Choice { Choice::from((self == &Self::Identity) as u8) } #[must_use] fn double(&self) -> Self { unimplemented!(); } } impl group::Curve for ProjectivePoint { type AffineRepr = AffinePoint; fn to_affine(&self) -> AffinePoint { match self { Self::FixedBaseOutput(scalar) => AffinePoint::FixedBaseOutput(*scalar), Self::Other(affine) => *affine, _ => unimplemented!(), } } } impl Add<ProjectivePoint> for ProjectivePoint { type Output = ProjectivePoint; fn add(self, _other: ProjectivePoint) -> ProjectivePoint { unimplemented!(); } } impl Add<&ProjectivePoint> for ProjectivePoint { type Output = ProjectivePoint; fn add(self, _other: &ProjectivePoint) -> ProjectivePoint { unimplemented!(); } } impl AddAssign<ProjectivePoint> for ProjectivePoint { fn add_assign(&mut self, _rhs: ProjectivePoint) { unimplemented!(); } } impl AddAssign<&ProjectivePoint> for ProjectivePoint { fn add_assign(&mut self, _rhs: &ProjectivePoint) { unimplemented!(); } } impl Sub<ProjectivePoint> for ProjectivePoint { type Output = ProjectivePoint; fn sub(self, _other: ProjectivePoint) -> ProjectivePoint { unimplemented!(); } } impl Sub<&ProjectivePoint> for ProjectivePoint { type Output = ProjectivePoint; fn sub(self, _other: &ProjectivePoint) -> ProjectivePoint { unimplemented!(); } } impl SubAssign<ProjectivePoint> for ProjectivePoint { fn sub_assign(&mut self, _rhs: ProjectivePoint) { unimplemented!(); } } impl SubAssign<&ProjectivePoint> for ProjectivePoint { fn sub_assign(&mut self, _rhs: &ProjectivePoint) { unimplemented!(); } } impl Add<AffinePoint> for ProjectivePoint { type Output = ProjectivePoint; fn add(self, _other: AffinePoint) -> ProjectivePoint { unimplemented!(); } } impl Add<&AffinePoint> for ProjectivePoint { type Output = ProjectivePoint; fn add(self, _other: &AffinePoint) -> ProjectivePoint { unimplemented!(); } } impl AddAssign<AffinePoint> for ProjectivePoint { fn add_assign(&mut self, _rhs: AffinePoint) { unimplemented!(); } } impl AddAssign<&AffinePoint> for ProjectivePoint { fn add_assign(&mut self, _rhs: &AffinePoint) { unimplemented!(); } } impl Sum for ProjectivePoint { fn sum<I: Iterator<Item = Self>>(_iter: I) -> Self { unimplemented!(); } } impl<'a> Sum<&'a ProjectivePoint> for ProjectivePoint { fn sum<I: Iterator<Item = &'a ProjectivePoint>>(_iter: I) -> Self { unimplemented!(); } } impl Sub<AffinePoint> for ProjectivePoint { type Output = ProjectivePoint; fn sub(self, _other: AffinePoint) -> ProjectivePoint { unimplemented!(); } } impl Sub<&AffinePoint> for ProjectivePoint { type Output = ProjectivePoint; fn sub(self, _other: &AffinePoint) -> ProjectivePoint { unimplemented!(); } } impl SubAssign<AffinePoint> for ProjectivePoint { fn sub_assign(&mut self, _rhs: AffinePoint) { unimplemented!(); } } impl SubAssign<&AffinePoint> for ProjectivePoint { fn sub_assign(&mut self, _rhs: &AffinePoint) { unimplemented!(); } } impl Mul<Scalar> for ProjectivePoint { type Output = ProjectivePoint; fn mul(self, scalar: Scalar) -> ProjectivePoint { match self { Self::Generator => Self::FixedBaseOutput(scalar), _ => unimplemented!(), } } } impl Mul<&Scalar> for ProjectivePoint { type Output = ProjectivePoint; fn mul(self, scalar: &Scalar) -> ProjectivePoint { self * *scalar } } impl MulAssign<Scalar> for ProjectivePoint { fn mul_assign(&mut self, _rhs: Scalar) { unimplemented!(); } } impl MulAssign<&Scalar> for ProjectivePoint { fn mul_assign(&mut self, _rhs: &Scalar) { unimplemented!(); } } impl Neg for ProjectivePoint { type Output = ProjectivePoint; fn neg(self) -> ProjectivePoint { unimplemented!(); } }
23.793769
93
0.608343
8f7eb4a7e08819ad7d1b6c19806d5e37386d058c
1,609
// Function copied from here https://github.com/tomaka/glium/blob/master/examples/support/mod.rs use obj; use genmesh; #[derive(Copy, Clone)] pub struct Vertex { pub position: [f32; 3], pub normal: [f32; 3], pub texture: [f32; 2], pub id: u32, } implement_vertex!(Vertex, position, normal, texture, id); // Returns a vertex buffer that should be rendered as `TrianglesList`. pub fn load_wavefront(data: &[u8], id: u32) -> Vec<Vertex> { let mut data = ::std::io::BufReader::new(data); let data = obj::Obj::load(&mut data); let mut vertex_data = Vec::new(); for object in data.object_iter() { for shape in object.group_iter().flat_map(|g| g.indices().iter()) { match shape { &genmesh::Polygon::PolyTri(genmesh::Triangle { x: v1, y: v2, z: v3 }) => { for v in [v1, v2, v3].iter() { let position = data.position()[v.0]; let texture = v.1.map(|index| data.texture()[index]); let normal = v.2.map(|index| data.normal()[index]); let texture = texture.unwrap_or([0.0, 0.0]); let normal = normal.unwrap_or([0.0, 0.0, 0.0]); vertex_data.push(Vertex { position: position, normal: normal, texture: texture, id: id, }) } } _ => unimplemented!(), } } } vertex_data }
32.836735
96
0.485395
acb4c932b9d700882807b55f33bb84f12efda0a2
2,575
use crate::vendor::tera::errors::Error; /// Escape HTML following [OWASP](https://www.owasp.org/index.php/XSS_(Cross_Site_Scripting)_Prevention_Cheat_Sheet) /// /// Escape the following characters with HTML entity encoding to prevent switching /// into any execution context, such as script, style, or event handlers. Using /// hex entities is recommended in the spec. In addition to the 5 characters /// significant in XML (&, <, >, ", '), the forward slash is included as it helps /// to end an HTML entity. /// /// ```text /// & --> &amp; /// < --> &lt; /// > --> &gt; /// " --> &quot; /// ' --> &#x27; &apos; is not recommended /// / --> &#x2F; forward slash is included as it helps end an HTML entity /// ``` #[inline] pub fn escape_html(input: &str) -> String { let mut output = String::with_capacity(input.len() * 2); for c in input.chars() { match c { '&' => output.push_str("&amp;"), '<' => output.push_str("&lt;"), '>' => output.push_str("&gt;"), '"' => output.push_str("&quot;"), '\'' => output.push_str("&#x27;"), '/' => output.push_str("&#x2F;"), _ => output.push(c), } } // Not using shrink_to_fit() on purpose output } pub(crate) fn render_to_string<C, F, E>(context: C, render: F) -> Result<String, Error> where C: FnOnce() -> String, F: FnOnce(&mut Vec<u8>) -> Result<(), E>, Error: From<E>, { let mut buffer = Vec::new(); render(&mut buffer).map_err(Error::from)?; buffer_to_string(context, buffer) } pub(crate) fn buffer_to_string<F>(context: F, buffer: Vec<u8>) -> Result<String, Error> where F: FnOnce() -> String, { String::from_utf8(buffer).map_err(|error| Error::utf8_conversion_error(error, context())) } #[cfg(test)] mod tests { use super::escape_html; use super::render_to_string; #[test] fn test_escape_html() { let tests = vec![ (r"", ""), (r"a&b", "a&amp;b"), (r"<a", "&lt;a"), (r">a", "&gt;a"), (r#"""#, "&quot;"), (r#"'"#, "&#x27;"), (r#"大阪"#, "大阪"), ]; for (input, expected) in tests { assert_eq!(escape_html(input), expected); } let empty = String::new(); assert_eq!(escape_html(&empty), empty); } #[test] fn test_render_to_string() { use std::io::Write; let string = render_to_string(|| panic!(), |w| write!(w, "test")).unwrap(); assert_eq!(string, "test".to_owned()); } }
29.94186
116
0.541359
11f67aedb9e8523bf96f173f861c32910911cf73
27,050
// Copyright (c) Microsoft. All rights reserved. use super::{compute_validity, refresh_cert}; use failure::ResultExt; use futures::{future, Future, IntoFuture, Stream}; use hyper::{Body, Request, Response}; use serde_json; use edgelet_core::{ Certificate, CertificateProperties, CertificateType, CreateCertificate, WorkloadConfig, }; use edgelet_http::route::{Handler, Parameters}; use edgelet_http::Error as HttpError; use edgelet_utils::{ensure_not_empty_with_context, prepare_dns_san_entries}; use workload::models::ServerCertificateRequest; use error::{CertOperation, Error, ErrorKind}; use IntoResponse; pub struct ServerCertHandler<T: CreateCertificate, W: WorkloadConfig> { hsm: T, config: W, } impl<T: CreateCertificate, W: WorkloadConfig> ServerCertHandler<T, W> { pub fn new(hsm: T, config: W) -> Self { ServerCertHandler { hsm, config } } } impl<T, W> Handler<Parameters> for ServerCertHandler<T, W> where T: CreateCertificate + Clone + Send + Sync + 'static, <T as CreateCertificate>::Certificate: Certificate, W: WorkloadConfig + Clone + Send + Sync + 'static, { fn handle( &self, req: Request<Body>, params: Parameters, ) -> Box<Future<Item = Response<Body>, Error = HttpError> + Send> { let hsm = self.hsm.clone(); let cfg = self.config.clone(); let max_duration = cfg.get_cert_max_duration(CertificateType::Server); let response = params .name("name") .ok_or_else(|| Error::from(ErrorKind::MissingRequiredParameter("name"))) .and_then(|name| { let genid = params .name("genid") .ok_or_else(|| Error::from(ErrorKind::MissingRequiredParameter("genid")))?; Ok((name, genid)) }) .map(|(module_id, genid)| { let module_id = module_id.to_string(); let alias = format!("{}{}server", module_id, genid.to_string()); req.into_body().concat2().then(move |body| { let body = body.context(ErrorKind::CertOperation(CertOperation::GetServerCert))?; Ok((alias, body, module_id)) }) }) .into_future() .flatten() .and_then(move |(alias, body, module_id)| { let cert_req: ServerCertificateRequest = serde_json::from_slice(&body).context(ErrorKind::MalformedRequestBody)?; let expiration = compute_validity( cert_req.expiration(), max_duration, ErrorKind::MalformedRequestBody, )?; #[cfg_attr(feature = "cargo-clippy", allow(cast_sign_loss))] let expiration = match expiration { expiration if expiration < 0 || expiration > max_duration => { return Err(Error::from(ErrorKind::MalformedRequestBody)); } expiration => expiration as u64, }; let common_name = cert_req.common_name(); ensure_not_empty_with_context(common_name, || ErrorKind::MalformedRequestBody)?; // add a DNS SAN entry in the server cert that uses the module identifier as // an alternative DNS name; we also need to add the common_name that we are using // as a DNS name since the presence of a DNS name SAN will take precedence over // the common name let sans = vec![prepare_dns_san_entries(&[&module_id, common_name])]; #[cfg_attr(feature = "cargo-clippy", allow(cast_sign_loss))] let props = CertificateProperties::new( expiration, common_name.to_string(), CertificateType::Server, alias.clone(), ) .with_san_entries(sans); let body = refresh_cert( &hsm, alias, &props, ErrorKind::CertOperation(CertOperation::GetServerCert), )?; Ok(body) }) .or_else(|e| future::ok(e.into_response())); Box::new(response) } } #[cfg(test)] mod tests { use std::result::Result as StdResult; use std::sync::Arc; use chrono::offset::Utc; use chrono::Duration; use super::*; use edgelet_core::{ CertificateProperties, CertificateType, CreateCertificate, Error as CoreError, ErrorKind as CoreErrorKind, KeyBytes, PrivateKey, WorkloadConfig, }; use edgelet_test_utils::cert::TestCert; use hyper::StatusCode; use workload::models::{CertificateResponse, ErrorResponse, ServerCertificateRequest}; const MAX_DURATION_SEC: u64 = 7200; #[derive(Clone, Default)] struct TestHsm { on_create: Option< Arc<Box<Fn(&CertificateProperties) -> StdResult<TestCert, CoreError> + Send + Sync>>, >, } impl TestHsm { fn with_on_create<F>(mut self, on_create: F) -> Self where F: Fn(&CertificateProperties) -> StdResult<TestCert, CoreError> + Send + Sync + 'static, { self.on_create = Some(Arc::new(Box::new(on_create))); self } } impl CreateCertificate for TestHsm { type Certificate = TestCert; fn create_certificate( &self, properties: &CertificateProperties, ) -> StdResult<Self::Certificate, CoreError> { let callback = self.on_create.as_ref().unwrap(); callback(properties) } fn destroy_certificate(&self, _alias: String) -> StdResult<(), CoreError> { Ok(()) } } struct TestWorkloadConfig { iot_hub_name: String, device_id: String, duration: i64, } impl Default for TestWorkloadConfig { #[cfg_attr(feature = "cargo-clippy", allow(cast_possible_wrap, cast_sign_loss))] fn default() -> Self { assert!(MAX_DURATION_SEC < (i64::max_value() as u64)); TestWorkloadConfig { iot_hub_name: String::from("zaphods_hub"), device_id: String::from("marvins_device"), duration: MAX_DURATION_SEC as i64, } } } #[derive(Clone)] struct TestWorkloadData { data: Arc<TestWorkloadConfig>, } impl Default for TestWorkloadData { fn default() -> Self { TestWorkloadData { data: Arc::new(TestWorkloadConfig::default()), } } } impl WorkloadConfig for TestWorkloadData { fn iot_hub_name(&self) -> &str { self.data.iot_hub_name.as_str() } fn device_id(&self) -> &str { self.data.device_id.as_str() } fn get_cert_max_duration(&self, _cert_type: CertificateType) -> i64 { self.data.duration } } fn parse_error_response(response: Response<Body>) -> ErrorResponse { response .into_body() .concat2() .and_then(|b| Ok(serde_json::from_slice::<ErrorResponse>(&b).unwrap())) .wait() .unwrap() } #[test] fn missing_name() { let handler = ServerCertHandler::new(TestHsm::default(), TestWorkloadData::default()); let request = Request::get("http://localhost/modules//genid/I/certificate/server") .body("".into()) .unwrap(); let response = handler.handle(request, Parameters::new()).wait().unwrap(); assert_eq!(StatusCode::BAD_REQUEST, response.status()); assert_eq!( "The request is missing required parameter `name`", parse_error_response(response).message() ); } #[test] fn missing_genid() { let handler = ServerCertHandler::new(TestHsm::default(), TestWorkloadData::default()); let request = Request::get("http://localhost/modules/beelebrox/genid//certificate/server") .body("".into()) .unwrap(); let response = handler.handle(request, Parameters::new()).wait().unwrap(); assert_eq!(StatusCode::BAD_REQUEST, response.status()); assert_eq!( "The request is missing required parameter `name`", parse_error_response(response).message() ); } #[test] fn empty_body() { let handler = ServerCertHandler::new(TestHsm::default(), TestWorkloadData::default()); let request = Request::get("http://localhost/modules/beeblebrox/genid/II/certificate/server") .body("".into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "II".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::BAD_REQUEST, response.status()); assert_eq!( "Request body is malformed\n\tcaused by: EOF while parsing a value at line 1 column 0", parse_error_response(response).message(), ); } #[test] fn bad_body() { let handler = ServerCertHandler::new(TestHsm::default(), TestWorkloadData::default()); let request = Request::get("http://localhost/modules/beeblebrox/genid/III/certificate/server") .body("The answer is 42.".into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "III".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::BAD_REQUEST, response.status()); assert_eq!( "Request body is malformed\n\tcaused by: expected value at line 1 column 1", parse_error_response(response).message(), ); } #[test] fn empty_expiration() { let handler = ServerCertHandler::new(TestHsm::default(), TestWorkloadData::default()); let cert_req = ServerCertificateRequest::new("".to_string(), "".to_string()); let request = Request::get("http://localhost/modules/beeblebrox/genid/IV/certificate/server") .body(serde_json::to_string(&cert_req).unwrap().into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "IV".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::BAD_REQUEST, response.status()); assert_eq!( "Request body is malformed\n\tcaused by: Argument is empty or only has whitespace - []", parse_error_response(response).message(), ); } #[test] fn whitespace_expiration() { let handler = ServerCertHandler::new(TestHsm::default(), TestWorkloadData::default()); let cert_req = ServerCertificateRequest::new("".to_string(), " ".to_string()); let request = Request::get("http://localhost/modules/beeblebrox/genid/I/certificate/server") .body(serde_json::to_string(&cert_req).unwrap().into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "I".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::BAD_REQUEST, response.status()); assert_eq!( "Request body is malformed\n\tcaused by: Argument is empty or only has whitespace - []", parse_error_response(response).message(), ); } #[test] fn invalid_expiration() { let handler = ServerCertHandler::new(TestHsm::default(), TestWorkloadData::default()); let cert_req = ServerCertificateRequest::new("".to_string(), "Umm.. No.. Just no..".to_string()); let request = Request::get("http://localhost/modules/beeblebrox/genid/I/certificate/server") .body(serde_json::to_string(&cert_req).unwrap().into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "I".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::BAD_REQUEST, response.status()); assert_eq!( "Request body is malformed\n\tcaused by: input contains invalid characters", parse_error_response(response).message(), ); } #[test] fn past_expiration() { let handler = ServerCertHandler::new(TestHsm::default(), TestWorkloadData::default()); let cert_req = ServerCertificateRequest::new("".to_string(), "1999-06-28T16:39:57-08:00".to_string()); let request = Request::get("http://localhost/modules/beeblebrox/genid/I/certificate/server") .body(serde_json::to_string(&cert_req).unwrap().into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "I".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::BAD_REQUEST, response.status()); assert_eq!( "Request body is malformed", parse_error_response(response).message(), ); } #[test] fn empty_common_name() { let handler = ServerCertHandler::new(TestHsm::default(), TestWorkloadData::default()); let cert_req = ServerCertificateRequest::new( "".to_string(), (Utc::now() + Duration::hours(1)).to_rfc3339(), ); let request = Request::get("http://localhost/modules/beeblebrox/genid/I/certificate/server") .body(serde_json::to_string(&cert_req).unwrap().into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "I".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::BAD_REQUEST, response.status()); assert_eq!( "Request body is malformed\n\tcaused by: Argument is empty or only has whitespace - []", parse_error_response(response).message(), ); } #[test] fn white_space_common_name() { let handler = ServerCertHandler::new(TestHsm::default(), TestWorkloadData::default()); let cert_req = ServerCertificateRequest::new( " ".to_string(), (Utc::now() + Duration::hours(1)).to_rfc3339(), ); let request = Request::get("http://localhost/modules/beeblebrox/genid/I/certificate/server") .body(serde_json::to_string(&cert_req).unwrap().into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "I".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::BAD_REQUEST, response.status()); assert_eq!( "Request body is malformed\n\tcaused by: Argument is empty or only has whitespace - []", parse_error_response(response).message(), ); } #[test] fn create_cert_fails() { let handler = ServerCertHandler::new( TestHsm::default().with_on_create(|props| { assert_eq!("marvin", props.common_name()); assert_eq!("beeblebroxIserver", props.alias()); assert_eq!(CertificateType::Server, *props.certificate_type()); assert!(MAX_DURATION_SEC >= *props.validity_in_secs()); Err(CoreError::from(CoreErrorKind::KeyStore)) }), TestWorkloadData::default(), ); let cert_req = ServerCertificateRequest::new( "marvin".to_string(), (Utc::now() + Duration::hours(1)).to_rfc3339(), ); let request = Request::get("http://localhost/modules/beeblebrox/genid/I/certificate/server") .body(serde_json::to_string(&cert_req).unwrap().into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "I".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::INTERNAL_SERVER_ERROR, response.status()); assert_eq!( "Could not get server cert\n\tcaused by: A error occurred in the key store.", parse_error_response(response).message(), ); } #[test] fn pem_fails() { let handler = ServerCertHandler::new( TestHsm::default().with_on_create(|props| { assert_eq!("marvin", props.common_name()); assert_eq!("beeblebroxIserver", props.alias()); assert_eq!(CertificateType::Server, *props.certificate_type()); assert!(MAX_DURATION_SEC >= *props.validity_in_secs()); Ok(TestCert::default().with_fail_pem(true)) }), TestWorkloadData::default(), ); let cert_req = ServerCertificateRequest::new( "marvin".to_string(), (Utc::now() + Duration::hours(1)).to_rfc3339(), ); let request = Request::get("http://localhost/modules/beeblebrox/genid/I/certificate/server") .body(serde_json::to_string(&cert_req).unwrap().into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "I".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::INTERNAL_SERVER_ERROR, response.status()); assert_eq!( "Could not get server cert\n\tcaused by: A error occurred in the key store.", parse_error_response(response).message(), ); } #[test] fn private_key_fails() { let handler = ServerCertHandler::new( TestHsm::default().with_on_create(|props| { assert_eq!("marvin", props.common_name()); assert_eq!("beeblebroxIserver", props.alias()); assert_eq!(CertificateType::Server, *props.certificate_type()); assert!(MAX_DURATION_SEC >= *props.validity_in_secs()); Ok(TestCert::default().with_fail_private_key(true)) }), TestWorkloadData::default(), ); let cert_req = ServerCertificateRequest::new( "marvin".to_string(), (Utc::now() + Duration::hours(1)).to_rfc3339(), ); let request = Request::get("http://localhost/modules/beeblebrox/genid/I/certificate/server") .body(serde_json::to_string(&cert_req).unwrap().into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "I".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::INTERNAL_SERVER_ERROR, response.status()); assert_eq!( "Could not get server cert\n\tcaused by: A error occurred in the key store.", parse_error_response(response).message(), ); } #[test] fn succeeds_key() { let handler = ServerCertHandler::new( TestHsm::default().with_on_create(|props| { assert_eq!("marvin", props.common_name()); assert_eq!("beeblebroxIserver", props.alias()); assert_eq!(CertificateType::Server, *props.certificate_type()); let san_entries = props.san_entries().unwrap(); assert_eq!(1, san_entries.len()); assert_eq!("DNS:beeblebrox, DNS:marvin", san_entries[0]); assert!(MAX_DURATION_SEC >= *props.validity_in_secs()); Ok(TestCert::default() .with_private_key(PrivateKey::Key(KeyBytes::Pem("Betelgeuse".to_string())))) }), TestWorkloadData::default(), ); let cert_req = ServerCertificateRequest::new( "marvin".to_string(), (Utc::now() + Duration::hours(1)).to_rfc3339(), ); let request = Request::get("http://localhost/modules/beeblebrox/genid/I/certificate/server") .body(serde_json::to_string(&cert_req).unwrap().into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "I".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::CREATED, response.status()); let cert_resp = response .into_body() .concat2() .and_then(|b| Ok(serde_json::from_slice::<CertificateResponse>(&b).unwrap())) .wait() .unwrap(); assert_eq!("key", cert_resp.private_key().type_()); assert_eq!(Some("Betelgeuse"), cert_resp.private_key().bytes()); } #[test] fn succeeds_ref() { let handler = ServerCertHandler::new( TestHsm::default().with_on_create(|props| { assert_eq!("marvin", props.common_name()); assert_eq!("beeblebroxIserver", props.alias()); assert_eq!(CertificateType::Server, *props.certificate_type()); assert!(MAX_DURATION_SEC >= *props.validity_in_secs()); Ok(TestCert::default().with_private_key(PrivateKey::Ref("Betelgeuse".to_string()))) }), TestWorkloadData::default(), ); let cert_req = ServerCertificateRequest::new( "marvin".to_string(), (Utc::now() + Duration::hours(1)).to_rfc3339(), ); let request = Request::get("http://localhost/modules/beeblebrox/genid/I/certificate/server") .body(serde_json::to_string(&cert_req).unwrap().into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "I".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::CREATED, response.status()); let cert_resp = response .into_body() .concat2() .and_then(|b| Ok(serde_json::from_slice::<CertificateResponse>(&b).unwrap())) .wait() .unwrap(); assert_eq!("ref", cert_resp.private_key().type_()); assert_eq!(Some("Betelgeuse"), cert_resp.private_key().ref_()); } #[test] fn long_expiration_capped_to_max_duration_ok() { let handler = ServerCertHandler::new( TestHsm::default().with_on_create(|props| { assert_eq!("marvin", props.common_name()); assert_eq!("beeblebroxIserver", props.alias()); assert_eq!(CertificateType::Server, *props.certificate_type()); assert_eq!(MAX_DURATION_SEC, *props.validity_in_secs()); Ok(TestCert::default() .with_private_key(PrivateKey::Key(KeyBytes::Pem("Betelgeuse".to_string())))) }), TestWorkloadData::default(), ); let cert_req = ServerCertificateRequest::new( "marvin".to_string(), (Utc::now() + Duration::hours(7000)).to_rfc3339(), ); let request = Request::get("http://localhost/modules/beeblebrox/genid/I/certificate/server") .body(serde_json::to_string(&cert_req).unwrap().into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "I".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::CREATED, response.status()); let cert_resp = response .into_body() .concat2() .and_then(|b| Ok(serde_json::from_slice::<CertificateResponse>(&b).unwrap())) .wait() .unwrap(); assert_eq!("key", cert_resp.private_key().type_()); assert_eq!(Some("Betelgeuse"), cert_resp.private_key().bytes()); } #[test] fn get_cert_time_fails() { let handler = ServerCertHandler::new( TestHsm::default().with_on_create(|props| { assert_eq!("marvin", props.common_name()); assert_eq!("beeblebroxIserver", props.alias()); assert_eq!(CertificateType::Server, *props.certificate_type()); assert!(MAX_DURATION_SEC >= *props.validity_in_secs()); Ok(TestCert::default().with_fail_valid_to(true)) }), TestWorkloadData::default(), ); let cert_req = ServerCertificateRequest::new( "marvin".to_string(), (Utc::now() + Duration::hours(1)).to_rfc3339(), ); let request = Request::get("http://localhost/modules/beeblebrox/genid/I/certificate/server") .body(serde_json::to_string(&cert_req).unwrap().into()) .unwrap(); let params = Parameters::with_captures(vec![ (Some("name".to_string()), "beeblebrox".to_string()), (Some("genid".to_string()), "I".to_string()), ]); let response = handler.handle(request, params).wait().unwrap(); assert_eq!(StatusCode::INTERNAL_SERVER_ERROR, response.status()); assert_eq!( "Could not get server cert\n\tcaused by: A error occurred in the key store.", parse_error_response(response).message(), ); } }
38.152327
100
0.569168
89c7d6af26aca457ee8d4c4c477d860879aeec02
56
net.sf.jasperreports.engine.fill.OffsetElementsIterator
28
55
0.892857
6a6730ad5de90b1f9bf22060148dd3724a3a914d
4,115
use std::sync::Arc; use metrics::{counter, histogram}; use tokio::{sync::Mutex, task, time::Instant}; use tracing::info; use crate::{ error::Result, format::{TableDesc, TableReader, Timestamp}, manifest::{Manifest, VersionDesc}, memtable::MemTable, storage::Storage, }; struct Table { desc: TableDesc, reader: Box<dyn TableReader>, } pub struct Version { sequence: u64, tables: Vec<Arc<Table>>, storage: Arc<dyn Storage>, } impl Version { pub fn sequence(&self) -> u64 { self.sequence } pub async fn get(&self, ts: Timestamp, key: &[u8]) -> Result<Option<Vec<u8>>> { // The last table contains the latest data. for table in self.tables.iter().rev() { if let Some(v) = table.reader.get(ts, key).await? { return Ok(Some(v)); } } Ok(None) } pub async fn count(&self) -> Result<usize> { let mut handles = Vec::new(); for table in &self.tables { let storage = self.storage.clone(); let table_number = table.desc.table_number; let handle = task::spawn(async move { storage.count_table(table_number).await }); handles.push(handle); } let mut sum = 0; for handle in handles { sum += handle.await??; } Ok(sum) } } pub struct VersionSet { id: u64, name: String, current: Mutex<Arc<Version>>, storage: Arc<dyn Storage>, manifest: Arc<dyn Manifest>, } impl VersionSet { pub fn new(id: u64, storage: Arc<dyn Storage>, manifest: Arc<dyn Manifest>) -> VersionSet { let version = Version { sequence: 0, tables: Vec::new(), storage: storage.clone(), }; VersionSet { id, name: format!("shard:{}", id), current: Mutex::new(Arc::new(version)), storage, manifest, } } pub async fn current(&self) -> Result<Arc<Version>> { let version = self.manifest.current(self.id).await?; self.install_version(version).await } pub async fn flush_memtable(&self, mem: Arc<dyn MemTable>) -> Result<Arc<Version>> { info!("[{}] start flush size {}", self.name, mem.size()); let start = Instant::now(); let number = self.manifest.next_number().await?; let mut builder = self.storage.new_builder(number).await?; let snapshot = mem.snapshot().await; for ent in snapshot.iter() { builder.add(ent.0, ent.1, ent.2).await; } let table = builder.finish().await?; let throughput = mem.size() as f64 / start.elapsed().as_secs_f64(); counter!("engula.flush.bytes", mem.size() as u64); histogram!("engula.flush.throughput", throughput); info!( "[{}] finish flush table {:?} throughput {} MB/s", self.name, table, throughput as u64 / 1024 / 1024 ); let version = self.manifest.add_table(self.id, table).await?; self.install_version(version).await } async fn install_version(&self, version: VersionDesc) -> Result<Arc<Version>> { let mut current = self.current.lock().await; if current.sequence() >= version.sequence { return Ok(current.clone()); } let mut tables = Vec::new(); for desc in version.tables { if let Some(table) = current .tables .iter() .find(|x| x.desc.table_number == desc.table_number) { // Reuses existing tables. tables.push(table.clone()); } else { let reader = self.storage.new_reader(desc.clone()).await?; let table = Arc::new(Table { desc, reader }); tables.push(table); } } *current = Arc::new(Version { sequence: version.sequence, tables, storage: self.storage.clone(), }); Ok(current.clone()) } }
30.036496
95
0.539733
566ac7429e64c206ba5004eefaeed4612e392b1e
1,921
//! [GET /_matrix/client/r0/rooms/{roomId}/context/{eventId}](https://matrix.org/docs/spec/client_server/r0.4.0.html#get-matrix-client-r0-rooms-roomid-context-eventid) use ruma_api::ruma_api; use ruma_events::{collections::only, EventResult}; use ruma_identifiers::{EventId, RoomId}; ruma_api! { metadata { description: "Get the events immediately preceding and following a given event.", method: GET, path: "/_matrix/client/r0/rooms/:room_id/context/:event_id", name: "get_context", rate_limited: false, requires_authentication: true, } request { /// The event to get context around. #[ruma_api(path)] pub event_id: EventId, /// The maximum number of events to return. /// /// Defaults to 10 if not supplied. #[ruma_api(query)] pub limit: u8, /// The room to get events from. #[ruma_api(path)] pub room_id: RoomId, } response { /// A token that can be used to paginate forwards with. pub end: String, /// Details of the requested event. #[wrap_incoming(with EventResult)] pub event: only::RoomEvent, /// A list of room events that happened just after the requested event, in chronological /// order. #[wrap_incoming(only::RoomEvent with EventResult)] pub events_after: Vec<only::RoomEvent>, /// A list of room events that happened just before the requested event, in /// reverse-chronological order. #[wrap_incoming(only::RoomEvent with EventResult)] pub events_before: Vec<only::RoomEvent>, /// A token that can be used to paginate backwards with. pub start: String, /// The state of the room at the last event returned. #[wrap_incoming(only::StateEvent with EventResult)] pub state: Vec<only::StateEvent>, } }
36.942308
167
0.62936
628b679236e1d4d879df4e906fcb79cbb5cb13d2
54,189
#[doc = include_str!("panic.md")] #[macro_export] #[rustc_builtin_macro(core_panic)] #[allow_internal_unstable(edition_panic)] #[stable(feature = "core", since = "1.6.0")] #[rustc_diagnostic_item = "core_panic_macro"] macro_rules! panic { // Expands to either `$crate::panic::panic_2015` or `$crate::panic::panic_2021` // depending on the edition of the caller. ($($arg:tt)*) => { /* compiler built-in */ }; } /// Asserts that two expressions are equal to each other (using [`PartialEq`]). /// /// On panic, this macro will print the values of the expressions with their /// debug representations. /// /// Like [`assert!`], this macro has a second form, where a custom /// panic message can be provided. /// /// # Examples /// /// ``` /// let a = 3; /// let b = 1 + 2; /// assert_eq!(a, b); /// /// assert_eq!(a, b, "we are testing addition with {} and {}", a, b); /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "assert_eq_macro")] #[allow_internal_unstable(core_panic)] macro_rules! assert_eq { ($left:expr, $right:expr $(,)?) => ({ match (&$left, &$right) { (left_val, right_val) => { if !(*left_val == *right_val) { let kind = $crate::panicking::AssertKind::Eq; // The reborrows below are intentional. Without them, the stack slot for the // borrow is initialized even before the values are compared, leading to a // noticeable slow down. $crate::panicking::assert_failed(kind, &*left_val, &*right_val, $crate::option::Option::None); } } } }); ($left:expr, $right:expr, $($arg:tt)+) => ({ match (&$left, &$right) { (left_val, right_val) => { if !(*left_val == *right_val) { let kind = $crate::panicking::AssertKind::Eq; // The reborrows below are intentional. Without them, the stack slot for the // borrow is initialized even before the values are compared, leading to a // noticeable slow down. $crate::panicking::assert_failed(kind, &*left_val, &*right_val, $crate::option::Option::Some($crate::format_args!($($arg)+))); } } } }); } /// Asserts that two expressions are not equal to each other (using [`PartialEq`]). /// /// On panic, this macro will print the values of the expressions with their /// debug representations. /// /// Like [`assert!`], this macro has a second form, where a custom /// panic message can be provided. /// /// # Examples /// /// ``` /// let a = 3; /// let b = 2; /// assert_ne!(a, b); /// /// assert_ne!(a, b, "we are testing that the values are not equal"); /// ``` #[macro_export] #[stable(feature = "assert_ne", since = "1.13.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "assert_ne_macro")] #[allow_internal_unstable(core_panic)] macro_rules! assert_ne { ($left:expr, $right:expr $(,)?) => ({ match (&$left, &$right) { (left_val, right_val) => { if *left_val == *right_val { let kind = $crate::panicking::AssertKind::Ne; // The reborrows below are intentional. Without them, the stack slot for the // borrow is initialized even before the values are compared, leading to a // noticeable slow down. $crate::panicking::assert_failed(kind, &*left_val, &*right_val, $crate::option::Option::None); } } } }); ($left:expr, $right:expr, $($arg:tt)+) => ({ match (&($left), &($right)) { (left_val, right_val) => { if *left_val == *right_val { let kind = $crate::panicking::AssertKind::Ne; // The reborrows below are intentional. Without them, the stack slot for the // borrow is initialized even before the values are compared, leading to a // noticeable slow down. $crate::panicking::assert_failed(kind, &*left_val, &*right_val, $crate::option::Option::Some($crate::format_args!($($arg)+))); } } } }); } /// Asserts that an expression matches any of the given patterns. /// /// Like in a `match` expression, the pattern can be optionally followed by `if` /// and a guard expression that has access to names bound by the pattern. /// /// On panic, this macro will print the value of the expression with its /// debug representation. /// /// Like [`assert!`], this macro has a second form, where a custom /// panic message can be provided. /// /// # Examples /// /// ``` /// #![feature(assert_matches)] /// /// use std::assert_matches::assert_matches; /// /// let a = 1u32.checked_add(2); /// let b = 1u32.checked_sub(2); /// assert_matches!(a, Some(_)); /// assert_matches!(b, None); /// /// let c = Ok("abc".to_string()); /// assert_matches!(c, Ok(x) | Err(x) if x.len() < 100); /// ``` #[unstable(feature = "assert_matches", issue = "82775")] #[allow_internal_unstable(core_panic)] #[rustc_macro_transparency = "semitransparent"] pub macro assert_matches { ($left:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )? $(,)?) => ({ match $left { $( $pattern )|+ $( if $guard )? => {} ref left_val => { $crate::panicking::assert_matches_failed( left_val, $crate::stringify!($($pattern)|+ $(if $guard)?), $crate::option::Option::None ); } } }), ($left:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )?, $($arg:tt)+) => ({ match $left { $( $pattern )|+ $( if $guard )? => {} ref left_val => { $crate::panicking::assert_matches_failed( left_val, $crate::stringify!($($pattern)|+ $(if $guard)?), $crate::option::Option::Some($crate::format_args!($($arg)+)) ); } } }), } /// Asserts that a boolean expression is `true` at runtime. /// /// This will invoke the [`panic!`] macro if the provided expression cannot be /// evaluated to `true` at runtime. /// /// Like [`assert!`], this macro also has a second version, where a custom panic /// message can be provided. /// /// # Uses /// /// Unlike [`assert!`], `debug_assert!` statements are only enabled in non /// optimized builds by default. An optimized build will not execute /// `debug_assert!` statements unless `-C debug-assertions` is passed to the /// compiler. This makes `debug_assert!` useful for checks that are too /// expensive to be present in a release build but may be helpful during /// development. The result of expanding `debug_assert!` is always type checked. /// /// An unchecked assertion allows a program in an inconsistent state to keep /// running, which might have unexpected consequences but does not introduce /// unsafety as long as this only happens in safe code. The performance cost /// of assertions, however, is not measurable in general. Replacing [`assert!`] /// with `debug_assert!` is thus only encouraged after thorough profiling, and /// more importantly, only in safe code! /// /// # Examples /// /// ``` /// // the panic message for these assertions is the stringified value of the /// // expression given. /// debug_assert!(true); /// /// fn some_expensive_computation() -> bool { true } // a very simple function /// debug_assert!(some_expensive_computation()); /// /// // assert with a custom message /// let x = true; /// debug_assert!(x, "x wasn't true!"); /// /// let a = 3; let b = 27; /// debug_assert!(a + b == 30, "a = {}, b = {}", a, b); /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_diagnostic_item = "debug_assert_macro"] #[allow_internal_unstable(edition_panic)] macro_rules! debug_assert { ($($arg:tt)*) => (if $crate::cfg!(debug_assertions) { $crate::assert!($($arg)*); }) } /// Asserts that two expressions are equal to each other. /// /// On panic, this macro will print the values of the expressions with their /// debug representations. /// /// Unlike [`assert_eq!`], `debug_assert_eq!` statements are only enabled in non /// optimized builds by default. An optimized build will not execute /// `debug_assert_eq!` statements unless `-C debug-assertions` is passed to the /// compiler. This makes `debug_assert_eq!` useful for checks that are too /// expensive to be present in a release build but may be helpful during /// development. The result of expanding `debug_assert_eq!` is always type checked. /// /// # Examples /// /// ``` /// let a = 3; /// let b = 1 + 2; /// debug_assert_eq!(a, b); /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "debug_assert_eq_macro")] macro_rules! debug_assert_eq { ($($arg:tt)*) => (if $crate::cfg!(debug_assertions) { $crate::assert_eq!($($arg)*); }) } /// Asserts that two expressions are not equal to each other. /// /// On panic, this macro will print the values of the expressions with their /// debug representations. /// /// Unlike [`assert_ne!`], `debug_assert_ne!` statements are only enabled in non /// optimized builds by default. An optimized build will not execute /// `debug_assert_ne!` statements unless `-C debug-assertions` is passed to the /// compiler. This makes `debug_assert_ne!` useful for checks that are too /// expensive to be present in a release build but may be helpful during /// development. The result of expanding `debug_assert_ne!` is always type checked. /// /// # Examples /// /// ``` /// let a = 3; /// let b = 2; /// debug_assert_ne!(a, b); /// ``` #[macro_export] #[stable(feature = "assert_ne", since = "1.13.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "debug_assert_ne_macro")] macro_rules! debug_assert_ne { ($($arg:tt)*) => (if $crate::cfg!(debug_assertions) { $crate::assert_ne!($($arg)*); }) } /// Asserts that an expression matches any of the given patterns. /// /// Like in a `match` expression, the pattern can be optionally followed by `if` /// and a guard expression that has access to names bound by the pattern. /// /// On panic, this macro will print the value of the expression with its /// debug representation. /// /// Unlike [`assert_matches!`], `debug_assert_matches!` statements are only /// enabled in non optimized builds by default. An optimized build will not /// execute `debug_assert_matches!` statements unless `-C debug-assertions` is /// passed to the compiler. This makes `debug_assert_matches!` useful for /// checks that are too expensive to be present in a release build but may be /// helpful during development. The result of expanding `debug_assert_matches!` /// is always type checked. /// /// # Examples /// /// ``` /// #![feature(assert_matches)] /// /// use std::assert_matches::debug_assert_matches; /// /// let a = 1u32.checked_add(2); /// let b = 1u32.checked_sub(2); /// debug_assert_matches!(a, Some(_)); /// debug_assert_matches!(b, None); /// /// let c = Ok("abc".to_string()); /// debug_assert_matches!(c, Ok(x) | Err(x) if x.len() < 100); /// ``` #[macro_export] #[unstable(feature = "assert_matches", issue = "82775")] #[allow_internal_unstable(assert_matches)] #[rustc_macro_transparency = "semitransparent"] pub macro debug_assert_matches($($arg:tt)*) { if $crate::cfg!(debug_assertions) { $crate::assert_matches::assert_matches!($($arg)*); } } /// Returns whether the given expression matches any of the given patterns. /// /// Like in a `match` expression, the pattern can be optionally followed by `if` /// and a guard expression that has access to names bound by the pattern. /// /// # Examples /// /// ``` /// let foo = 'f'; /// assert!(matches!(foo, 'A'..='Z' | 'a'..='z')); /// /// let bar = Some(4); /// assert!(matches!(bar, Some(x) if x > 2)); /// ``` #[macro_export] #[stable(feature = "matches_macro", since = "1.42.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "matches_macro")] macro_rules! matches { ($expression:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )? $(,)?) => { match $expression { $( $pattern )|+ $( if $guard )? => true, _ => false } } } /// Unwraps a result or propagates its error. /// /// The `?` operator was added to replace `try!` and should be used instead. /// Furthermore, `try` is a reserved word in Rust 2018, so if you must use /// it, you will need to use the [raw-identifier syntax][ris]: `r#try`. /// /// [ris]: https://doc.rust-lang.org/nightly/rust-by-example/compatibility/raw_identifiers.html /// /// `try!` matches the given [`Result`]. In case of the `Ok` variant, the /// expression has the value of the wrapped value. /// /// In case of the `Err` variant, it retrieves the inner error. `try!` then /// performs conversion using `From`. This provides automatic conversion /// between specialized errors and more general ones. The resulting /// error is then immediately returned. /// /// Because of the early return, `try!` can only be used in functions that /// return [`Result`]. /// /// # Examples /// /// ``` /// use std::io; /// use std::fs::File; /// use std::io::prelude::*; /// /// enum MyError { /// FileWriteError /// } /// /// impl From<io::Error> for MyError { /// fn from(e: io::Error) -> MyError { /// MyError::FileWriteError /// } /// } /// /// // The preferred method of quick returning Errors /// fn write_to_file_question() -> Result<(), MyError> { /// let mut file = File::create("my_best_friends.txt")?; /// file.write_all(b"This is a list of my best friends.")?; /// Ok(()) /// } /// /// // The previous method of quick returning Errors /// fn write_to_file_using_try() -> Result<(), MyError> { /// let mut file = r#try!(File::create("my_best_friends.txt")); /// r#try!(file.write_all(b"This is a list of my best friends.")); /// Ok(()) /// } /// /// // This is equivalent to: /// fn write_to_file_using_match() -> Result<(), MyError> { /// let mut file = r#try!(File::create("my_best_friends.txt")); /// match file.write_all(b"This is a list of my best friends.") { /// Ok(v) => v, /// Err(e) => return Err(From::from(e)), /// } /// Ok(()) /// } /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_deprecated(since = "1.39.0", reason = "use the `?` operator instead")] #[doc(alias = "?")] macro_rules! r#try { ($expr:expr $(,)?) => { match $expr { $crate::result::Result::Ok(val) => val, $crate::result::Result::Err(err) => { return $crate::result::Result::Err($crate::convert::From::from(err)); } } }; } /// Writes formatted data into a buffer. /// /// This macro accepts a 'writer', a format string, and a list of arguments. Arguments will be /// formatted according to the specified format string and the result will be passed to the writer. /// The writer may be any value with a `write_fmt` method; generally this comes from an /// implementation of either the [`fmt::Write`] or the [`io::Write`] trait. The macro /// returns whatever the `write_fmt` method returns; commonly a [`fmt::Result`], or an /// [`io::Result`]. /// /// See [`std::fmt`] for more information on the format string syntax. /// /// [`std::fmt`]: ../std/fmt/index.html /// [`fmt::Write`]: crate::fmt::Write /// [`io::Write`]: ../std/io/trait.Write.html /// [`fmt::Result`]: crate::fmt::Result /// [`io::Result`]: ../std/io/type.Result.html /// /// # Examples /// /// ``` /// use std::io::Write; /// /// fn main() -> std::io::Result<()> { /// let mut w = Vec::new(); /// write!(&mut w, "test")?; /// write!(&mut w, "formatted {}", "arguments")?; /// /// assert_eq!(w, b"testformatted arguments"); /// Ok(()) /// } /// ``` /// /// A module can import both `std::fmt::Write` and `std::io::Write` and call `write!` on objects /// implementing either, as objects do not typically implement both. However, the module must /// import the traits qualified so their names do not conflict: /// /// ``` /// use std::fmt::Write as FmtWrite; /// use std::io::Write as IoWrite; /// /// fn main() -> Result<(), Box<dyn std::error::Error>> { /// let mut s = String::new(); /// let mut v = Vec::new(); /// /// write!(&mut s, "{} {}", "abc", 123)?; // uses fmt::Write::write_fmt /// write!(&mut v, "s = {:?}", s)?; // uses io::Write::write_fmt /// assert_eq!(v, b"s = \"abc 123\""); /// Ok(()) /// } /// ``` /// /// Note: This macro can be used in `no_std` setups as well. /// In a `no_std` setup you are responsible for the implementation details of the components. /// /// ```no_run /// # extern crate core; /// use core::fmt::Write; /// /// struct Example; /// /// impl Write for Example { /// fn write_str(&mut self, _s: &str) -> core::fmt::Result { /// unimplemented!(); /// } /// } /// /// let mut m = Example{}; /// write!(&mut m, "Hello World").expect("Not written"); /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "write_macro")] macro_rules! write { ($dst:expr, $($arg:tt)*) => ($dst.write_fmt($crate::format_args!($($arg)*))) } /// Write formatted data into a buffer, with a newline appended. /// /// On all platforms, the newline is the LINE FEED character (`\n`/`U+000A`) alone /// (no additional CARRIAGE RETURN (`\r`/`U+000D`). /// /// For more information, see [`write!`]. For information on the format string syntax, see /// [`std::fmt`]. /// /// [`std::fmt`]: ../std/fmt/index.html /// /// # Examples /// /// ``` /// use std::io::{Write, Result}; /// /// fn main() -> Result<()> { /// let mut w = Vec::new(); /// writeln!(&mut w)?; /// writeln!(&mut w, "test")?; /// writeln!(&mut w, "formatted {}", "arguments")?; /// /// assert_eq!(&w[..], "\ntest\nformatted arguments\n".as_bytes()); /// Ok(()) /// } /// ``` /// /// A module can import both `std::fmt::Write` and `std::io::Write` and call `write!` on objects /// implementing either, as objects do not typically implement both. However, the module must /// import the traits qualified so their names do not conflict: /// /// ``` /// use std::fmt::Write as FmtWrite; /// use std::io::Write as IoWrite; /// /// fn main() -> Result<(), Box<dyn std::error::Error>> { /// let mut s = String::new(); /// let mut v = Vec::new(); /// /// writeln!(&mut s, "{} {}", "abc", 123)?; // uses fmt::Write::write_fmt /// writeln!(&mut v, "s = {:?}", s)?; // uses io::Write::write_fmt /// assert_eq!(v, b"s = \"abc 123\\n\"\n"); /// Ok(()) /// } /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "writeln_macro")] #[allow_internal_unstable(format_args_nl)] macro_rules! writeln { ($dst:expr $(,)?) => ( $crate::write!($dst, "\n") ); ($dst:expr, $($arg:tt)*) => ( $dst.write_fmt($crate::format_args_nl!($($arg)*)) ); } /// Indicates unreachable code. /// /// This is useful any time that the compiler can't determine that some code is unreachable. For /// example: /// /// * Match arms with guard conditions. /// * Loops that dynamically terminate. /// * Iterators that dynamically terminate. /// /// If the determination that the code is unreachable proves incorrect, the /// program immediately terminates with a [`panic!`]. /// /// The unsafe counterpart of this macro is the [`unreachable_unchecked`] function, which /// will cause undefined behavior if the code is reached. /// /// [`unreachable_unchecked`]: crate::hint::unreachable_unchecked /// /// # Panics /// /// This will always [`panic!`] because `unreachable!` is just a shorthand for `panic!` with a /// fixed, specific message. /// /// Like `panic!`, this macro has a second form for displaying custom values. /// /// # Examples /// /// Match arms: /// /// ``` /// # #[allow(dead_code)] /// fn foo(x: Option<i32>) { /// match x { /// Some(n) if n >= 0 => println!("Some(Non-negative)"), /// Some(n) if n < 0 => println!("Some(Negative)"), /// Some(_) => unreachable!(), // compile error if commented out /// None => println!("None") /// } /// } /// ``` /// /// Iterators: /// /// ``` /// # #[allow(dead_code)] /// fn divide_by_three(x: u32) -> u32 { // one of the poorest implementations of x/3 /// for i in 0.. { /// if 3*i < i { panic!("u32 overflow"); } /// if x < 3*i { return i-1; } /// } /// unreachable!("The loop should always return"); /// } /// ``` #[cfg(not(bootstrap))] #[macro_export] #[rustc_builtin_macro(unreachable)] #[allow_internal_unstable(edition_panic)] #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "unreachable_macro")] macro_rules! unreachable { // Expands to either `$crate::panic::unreachable_2015` or `$crate::panic::unreachable_2021` // depending on the edition of the caller. ($($arg:tt)*) => { /* compiler built-in */ }; } /// unreachable!() macro #[cfg(bootstrap)] #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "unreachable_macro")] #[allow_internal_unstable(core_panic)] macro_rules! unreachable { () => ({ $crate::panicking::panic("internal error: entered unreachable code") }); ($msg:expr $(,)?) => ({ $crate::unreachable!("{}", $msg) }); ($fmt:expr, $($arg:tt)*) => ({ $crate::panic!($crate::concat!("internal error: entered unreachable code: ", $fmt), $($arg)*) }); } /// Indicates unimplemented code by panicking with a message of "not implemented". /// /// This allows your code to type-check, which is useful if you are prototyping or /// implementing a trait that requires multiple methods which you don't plan to use all of. /// /// The difference between `unimplemented!` and [`todo!`] is that while `todo!` /// conveys an intent of implementing the functionality later and the message is "not yet /// implemented", `unimplemented!` makes no such claims. Its message is "not implemented". /// Also some IDEs will mark `todo!`s. /// /// # Panics /// /// This will always [`panic!`] because `unimplemented!` is just a shorthand for `panic!` with a /// fixed, specific message. /// /// Like `panic!`, this macro has a second form for displaying custom values. /// /// # Examples /// /// Say we have a trait `Foo`: /// /// ``` /// trait Foo { /// fn bar(&self) -> u8; /// fn baz(&self); /// fn qux(&self) -> Result<u64, ()>; /// } /// ``` /// /// We want to implement `Foo` for 'MyStruct', but for some reason it only makes sense /// to implement the `bar()` function. `baz()` and `qux()` will still need to be defined /// in our implementation of `Foo`, but we can use `unimplemented!` in their definitions /// to allow our code to compile. /// /// We still want to have our program stop running if the unimplemented methods are /// reached. /// /// ``` /// # trait Foo { /// # fn bar(&self) -> u8; /// # fn baz(&self); /// # fn qux(&self) -> Result<u64, ()>; /// # } /// struct MyStruct; /// /// impl Foo for MyStruct { /// fn bar(&self) -> u8 { /// 1 + 1 /// } /// /// fn baz(&self) { /// // It makes no sense to `baz` a `MyStruct`, so we have no logic here /// // at all. /// // This will display "thread 'main' panicked at 'not implemented'". /// unimplemented!(); /// } /// /// fn qux(&self) -> Result<u64, ()> { /// // We have some logic here, /// // We can add a message to unimplemented! to display our omission. /// // This will display: /// // "thread 'main' panicked at 'not implemented: MyStruct isn't quxable'". /// unimplemented!("MyStruct isn't quxable"); /// } /// } /// /// fn main() { /// let s = MyStruct; /// s.bar(); /// } /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "unimplemented_macro")] #[allow_internal_unstable(core_panic)] macro_rules! unimplemented { () => ($crate::panicking::panic("not implemented")); ($($arg:tt)+) => ($crate::panic!("not implemented: {}", $crate::format_args!($($arg)+))); } /// Indicates unfinished code. /// /// This can be useful if you are prototyping and are just looking to have your /// code typecheck. /// /// The difference between [`unimplemented!`] and `todo!` is that while `todo!` conveys /// an intent of implementing the functionality later and the message is "not yet /// implemented", `unimplemented!` makes no such claims. Its message is "not implemented". /// Also some IDEs will mark `todo!`s. /// /// # Panics /// /// This will always [`panic!`]. /// /// # Examples /// /// Here's an example of some in-progress code. We have a trait `Foo`: /// /// ``` /// trait Foo { /// fn bar(&self); /// fn baz(&self); /// } /// ``` /// /// We want to implement `Foo` on one of our types, but we also want to work on /// just `bar()` first. In order for our code to compile, we need to implement /// `baz()`, so we can use `todo!`: /// /// ``` /// # trait Foo { /// # fn bar(&self); /// # fn baz(&self); /// # } /// struct MyStruct; /// /// impl Foo for MyStruct { /// fn bar(&self) { /// // implementation goes here /// } /// /// fn baz(&self) { /// // let's not worry about implementing baz() for now /// todo!(); /// } /// } /// /// fn main() { /// let s = MyStruct; /// s.bar(); /// /// // we aren't even using baz(), so this is fine. /// } /// ``` #[macro_export] #[stable(feature = "todo_macro", since = "1.40.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "todo_macro")] #[allow_internal_unstable(core_panic)] macro_rules! todo { () => ($crate::panicking::panic("not yet implemented")); ($($arg:tt)+) => ($crate::panic!("not yet implemented: {}", $crate::format_args!($($arg)+))); } /// Definitions of built-in macros. /// /// Most of the macro properties (stability, visibility, etc.) are taken from the source code here, /// with exception of expansion functions transforming macro inputs into outputs, /// those functions are provided by the compiler. pub(crate) mod builtin { /// Causes compilation to fail with the given error message when encountered. /// /// This macro should be used when a crate uses a conditional compilation strategy to provide /// better error messages for erroneous conditions. It's the compiler-level form of [`panic!`], /// but emits an error during *compilation* rather than at *runtime*. /// /// # Examples /// /// Two such examples are macros and `#[cfg]` environments. /// /// Emit better compiler error if a macro is passed invalid values. Without the final branch, /// the compiler would still emit an error, but the error's message would not mention the two /// valid values. /// /// ```compile_fail /// macro_rules! give_me_foo_or_bar { /// (foo) => {}; /// (bar) => {}; /// ($x:ident) => { /// compile_error!("This macro only accepts `foo` or `bar`"); /// } /// } /// /// give_me_foo_or_bar!(neither); /// // ^ will fail at compile time with message "This macro only accepts `foo` or `bar`" /// ``` /// /// Emit compiler error if one of a number of features isn't available. /// /// ```compile_fail /// #[cfg(not(any(feature = "foo", feature = "bar")))] /// compile_error!("Either feature \"foo\" or \"bar\" must be enabled for this crate."); /// ``` #[stable(feature = "compile_error_macro", since = "1.20.0")] #[rustc_builtin_macro] #[macro_export] #[cfg_attr(not(test), rustc_diagnostic_item = "compile_error_macro")] macro_rules! compile_error { ($msg:expr $(,)?) => {{ /* compiler built-in */ }}; } /// Constructs parameters for the other string-formatting macros. /// /// This macro functions by taking a formatting string literal containing /// `{}` for each additional argument passed. `format_args!` prepares the /// additional parameters to ensure the output can be interpreted as a string /// and canonicalizes the arguments into a single type. Any value that implements /// the [`Display`] trait can be passed to `format_args!`, as can any /// [`Debug`] implementation be passed to a `{:?}` within the formatting string. /// /// This macro produces a value of type [`fmt::Arguments`]. This value can be /// passed to the macros within [`std::fmt`] for performing useful redirection. /// All other formatting macros ([`format!`], [`write!`], [`println!`], etc) are /// proxied through this one. `format_args!`, unlike its derived macros, avoids /// heap allocations. /// /// You can use the [`fmt::Arguments`] value that `format_args!` returns /// in `Debug` and `Display` contexts as seen below. The example also shows /// that `Debug` and `Display` format to the same thing: the interpolated /// format string in `format_args!`. /// /// ```rust /// let debug = format!("{:?}", format_args!("{} foo {:?}", 1, 2)); /// let display = format!("{}", format_args!("{} foo {:?}", 1, 2)); /// assert_eq!("1 foo 2", display); /// assert_eq!(display, debug); /// ``` /// /// For more information, see the documentation in [`std::fmt`]. /// /// [`Display`]: crate::fmt::Display /// [`Debug`]: crate::fmt::Debug /// [`fmt::Arguments`]: crate::fmt::Arguments /// [`std::fmt`]: ../std/fmt/index.html /// [`format!`]: ../std/macro.format.html /// [`println!`]: ../std/macro.println.html /// /// # Examples /// /// ``` /// use std::fmt; /// /// let s = fmt::format(format_args!("hello {}", "world")); /// assert_eq!(s, format!("hello {}", "world")); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "format_args_macro")] #[allow_internal_unsafe] #[allow_internal_unstable(fmt_internals)] #[rustc_builtin_macro] #[macro_export] macro_rules! format_args { ($fmt:expr) => {{ /* compiler built-in */ }}; ($fmt:expr, $($args:tt)*) => {{ /* compiler built-in */ }}; } /// Same as [`format_args`], but can be used in some const contexts. /// /// This macro is used by the panic macros for the `const_panic` feature. /// /// This macro will be removed once `format_args` is allowed in const contexts. #[unstable(feature = "const_format_args", issue = "none")] #[allow_internal_unstable(fmt_internals, const_fmt_arguments_new)] #[rustc_builtin_macro] #[macro_export] macro_rules! const_format_args { ($fmt:expr) => {{ /* compiler built-in */ }}; ($fmt:expr, $($args:tt)*) => {{ /* compiler built-in */ }}; } /// Same as [`format_args`], but adds a newline in the end. #[unstable( feature = "format_args_nl", issue = "none", reason = "`format_args_nl` is only for internal \ language use and is subject to change" )] #[allow_internal_unstable(fmt_internals)] #[rustc_builtin_macro] #[macro_export] macro_rules! format_args_nl { ($fmt:expr) => {{ /* compiler built-in */ }}; ($fmt:expr, $($args:tt)*) => {{ /* compiler built-in */ }}; } /// Inspects an environment variable at compile time. /// /// This macro will expand to the value of the named environment variable at /// compile time, yielding an expression of type `&'static str`. /// /// If the environment variable is not defined, then a compilation error /// will be emitted. To not emit a compile error, use the [`option_env!`] /// macro instead. /// /// # Examples /// /// ``` /// let path: &'static str = env!("PATH"); /// println!("the $PATH variable at the time of compiling was: {}", path); /// ``` /// /// You can customize the error message by passing a string as the second /// parameter: /// /// ```compile_fail /// let doc: &'static str = env!("documentation", "what's that?!"); /// ``` /// /// If the `documentation` environment variable is not defined, you'll get /// the following error: /// /// ```text /// error: what's that?! /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] #[macro_export] #[cfg_attr(not(test), rustc_diagnostic_item = "env_macro")] macro_rules! env { ($name:expr $(,)?) => {{ /* compiler built-in */ }}; ($name:expr, $error_msg:expr $(,)?) => {{ /* compiler built-in */ }}; } /// Optionally inspects an environment variable at compile time. /// /// If the named environment variable is present at compile time, this will /// expand into an expression of type `Option<&'static str>` whose value is /// `Some` of the value of the environment variable. If the environment /// variable is not present, then this will expand to `None`. See /// [`Option<T>`][Option] for more information on this type. /// /// A compile time error is never emitted when using this macro regardless /// of whether the environment variable is present or not. /// /// # Examples /// /// ``` /// let key: Option<&'static str> = option_env!("SECRET_KEY"); /// println!("the secret key might be: {:?}", key); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] #[macro_export] #[cfg_attr(not(test), rustc_diagnostic_item = "option_env_macro")] macro_rules! option_env { ($name:expr $(,)?) => {{ /* compiler built-in */ }}; } /// Concatenates identifiers into one identifier. /// /// This macro takes any number of comma-separated identifiers, and /// concatenates them all into one, yielding an expression which is a new /// identifier. Note that hygiene makes it such that this macro cannot /// capture local variables. Also, as a general rule, macros are only /// allowed in item, statement or expression position. That means while /// you may use this macro for referring to existing variables, functions or /// modules etc, you cannot define a new one with it. /// /// # Examples /// /// ``` /// #![feature(concat_idents)] /// /// # fn main() { /// fn foobar() -> u32 { 23 } /// /// let f = concat_idents!(foo, bar); /// println!("{}", f()); /// /// // fn concat_idents!(new, fun, name) { } // not usable in this way! /// # } /// ``` #[unstable( feature = "concat_idents", issue = "29599", reason = "`concat_idents` is not stable enough for use and is subject to change" )] #[rustc_builtin_macro] #[macro_export] macro_rules! concat_idents { ($($e:ident),+ $(,)?) => {{ /* compiler built-in */ }}; } /// Concatenates literals into a byte slice. /// /// This macro takes any number of comma-separated literals, and concatenates them all into /// one, yielding an expression of type `&[u8, _]`, which represents all of the literals /// concatenated left-to-right. The literals passed can be any combination of: /// /// - byte literals (`b'r'`) /// - byte strings (`b"Rust"`) /// - arrays of bytes/numbers (`[b'A', 66, b'C']`) /// /// # Examples /// /// ``` /// #![feature(concat_bytes)] /// /// # fn main() { /// let s: &[u8; 6] = concat_bytes!(b'A', b"BC", [68, b'E', 70]); /// assert_eq!(s, b"ABCDEF"); /// # } /// ``` #[unstable(feature = "concat_bytes", issue = "87555")] #[rustc_builtin_macro] #[macro_export] macro_rules! concat_bytes { ($($e:literal),+ $(,)?) => {{ /* compiler built-in */ }}; } /// Concatenates literals into a static string slice. /// /// This macro takes any number of comma-separated literals, yielding an /// expression of type `&'static str` which represents all of the literals /// concatenated left-to-right. /// /// Integer and floating point literals are stringified in order to be /// concatenated. /// /// # Examples /// /// ``` /// let s = concat!("test", 10, 'b', true); /// assert_eq!(s, "test10btrue"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] #[macro_export] #[cfg_attr(not(test), rustc_diagnostic_item = "concat_macro")] macro_rules! concat { ($($e:expr),* $(,)?) => {{ /* compiler built-in */ }}; } /// Expands to the line number on which it was invoked. /// /// With [`column!`] and [`file!`], these macros provide debugging information for /// developers about the location within the source. /// /// The expanded expression has type `u32` and is 1-based, so the first line /// in each file evaluates to 1, the second to 2, etc. This is consistent /// with error messages by common compilers or popular editors. /// The returned line is *not necessarily* the line of the `line!` invocation itself, /// but rather the first macro invocation leading up to the invocation /// of the `line!` macro. /// /// # Examples /// /// ``` /// let current_line = line!(); /// println!("defined on line: {}", current_line); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] #[macro_export] #[cfg_attr(not(test), rustc_diagnostic_item = "line_macro")] macro_rules! line { () => { /* compiler built-in */ }; } /// Expands to the column number at which it was invoked. /// /// With [`line!`] and [`file!`], these macros provide debugging information for /// developers about the location within the source. /// /// The expanded expression has type `u32` and is 1-based, so the first column /// in each line evaluates to 1, the second to 2, etc. This is consistent /// with error messages by common compilers or popular editors. /// The returned column is *not necessarily* the line of the `column!` invocation itself, /// but rather the first macro invocation leading up to the invocation /// of the `column!` macro. /// /// # Examples /// /// ``` /// let current_col = column!(); /// println!("defined on column: {}", current_col); /// ``` /// /// `column!` counts Unicode code points, not bytes or graphemes. As a result, the first two /// invocations return the same value, but the third does not. /// /// ``` /// let a = ("foobar", column!()).1; /// let b = ("人之初性本善", column!()).1; /// let c = ("f̅o̅o̅b̅a̅r̅", column!()).1; // Uses combining overline (U+0305) /// /// assert_eq!(a, b); /// assert_ne!(b, c); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] #[macro_export] #[cfg_attr(not(test), rustc_diagnostic_item = "column_macro")] macro_rules! column { () => { /* compiler built-in */ }; } /// Expands to the file name in which it was invoked. /// /// With [`line!`] and [`column!`], these macros provide debugging information for /// developers about the location within the source. /// /// The expanded expression has type `&'static str`, and the returned file /// is not the invocation of the `file!` macro itself, but rather the /// first macro invocation leading up to the invocation of the `file!` /// macro. /// /// # Examples /// /// ``` /// let this_file = file!(); /// println!("defined in file: {}", this_file); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] #[macro_export] #[cfg_attr(not(test), rustc_diagnostic_item = "file_macro")] macro_rules! file { () => { /* compiler built-in */ }; } /// Stringifies its arguments. /// /// This macro will yield an expression of type `&'static str` which is the /// stringification of all the tokens passed to the macro. No restrictions /// are placed on the syntax of the macro invocation itself. /// /// Note that the expanded results of the input tokens may change in the /// future. You should be careful if you rely on the output. /// /// # Examples /// /// ``` /// let one_plus_one = stringify!(1 + 1); /// assert_eq!(one_plus_one, "1 + 1"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] #[macro_export] #[cfg_attr(not(test), rustc_diagnostic_item = "stringify_macro")] macro_rules! stringify { ($($t:tt)*) => { /* compiler built-in */ }; } /// Includes a UTF-8 encoded file as a string. /// /// The file is located relative to the current file (similarly to how /// modules are found). The provided path is interpreted in a platform-specific /// way at compile time. So, for instance, an invocation with a Windows path /// containing backslashes `\` would not compile correctly on Unix. /// /// This macro will yield an expression of type `&'static str` which is the /// contents of the file. /// /// # Examples /// /// Assume there are two files in the same directory with the following /// contents: /// /// File 'spanish.in': /// /// ```text /// adiós /// ``` /// /// File 'main.rs': /// /// ```ignore (cannot-doctest-external-file-dependency) /// fn main() { /// let my_str = include_str!("spanish.in"); /// assert_eq!(my_str, "adiós\n"); /// print!("{}", my_str); /// } /// ``` /// /// Compiling 'main.rs' and running the resulting binary will print "adiós". #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] #[macro_export] #[cfg_attr(not(test), rustc_diagnostic_item = "include_str_macro")] macro_rules! include_str { ($file:expr $(,)?) => {{ /* compiler built-in */ }}; } /// Includes a file as a reference to a byte array. /// /// The file is located relative to the current file (similarly to how /// modules are found). The provided path is interpreted in a platform-specific /// way at compile time. So, for instance, an invocation with a Windows path /// containing backslashes `\` would not compile correctly on Unix. /// /// This macro will yield an expression of type `&'static [u8; N]` which is /// the contents of the file. /// /// # Examples /// /// Assume there are two files in the same directory with the following /// contents: /// /// File 'spanish.in': /// /// ```text /// adiós /// ``` /// /// File 'main.rs': /// /// ```ignore (cannot-doctest-external-file-dependency) /// fn main() { /// let bytes = include_bytes!("spanish.in"); /// assert_eq!(bytes, b"adi\xc3\xb3s\n"); /// print!("{}", String::from_utf8_lossy(bytes)); /// } /// ``` /// /// Compiling 'main.rs' and running the resulting binary will print "adiós". #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] #[macro_export] #[cfg_attr(not(test), rustc_diagnostic_item = "include_bytes_macro")] macro_rules! include_bytes { ($file:expr $(,)?) => {{ /* compiler built-in */ }}; } /// Expands to a string that represents the current module path. /// /// The current module path can be thought of as the hierarchy of modules /// leading back up to the crate root. The first component of the path /// returned is the name of the crate currently being compiled. /// /// # Examples /// /// ``` /// mod test { /// pub fn foo() { /// assert!(module_path!().ends_with("test")); /// } /// } /// /// test::foo(); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] #[macro_export] #[cfg_attr(not(test), rustc_diagnostic_item = "module_path_macro")] macro_rules! module_path { () => { /* compiler built-in */ }; } /// Evaluates boolean combinations of configuration flags at compile-time. /// /// In addition to the `#[cfg]` attribute, this macro is provided to allow /// boolean expression evaluation of configuration flags. This frequently /// leads to less duplicated code. /// /// The syntax given to this macro is the same syntax as the [`cfg`] /// attribute. /// /// `cfg!`, unlike `#[cfg]`, does not remove any code and only evaluates to true or false. For /// example, all blocks in an if/else expression need to be valid when `cfg!` is used for /// the condition, regardless of what `cfg!` is evaluating. /// /// [`cfg`]: ../reference/conditional-compilation.html#the-cfg-attribute /// /// # Examples /// /// ``` /// let my_directory = if cfg!(windows) { /// "windows-specific-directory" /// } else { /// "unix-directory" /// }; /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] #[macro_export] #[cfg_attr(not(test), rustc_diagnostic_item = "cfg_macro")] macro_rules! cfg { ($($cfg:tt)*) => { /* compiler built-in */ }; } /// Parses a file as an expression or an item according to the context. /// /// The file is located relative to the current file (similarly to how /// modules are found). The provided path is interpreted in a platform-specific /// way at compile time. So, for instance, an invocation with a Windows path /// containing backslashes `\` would not compile correctly on Unix. /// /// Using this macro is often a bad idea, because if the file is /// parsed as an expression, it is going to be placed in the /// surrounding code unhygienically. This could result in variables /// or functions being different from what the file expected if /// there are variables or functions that have the same name in /// the current file. /// /// # Examples /// /// Assume there are two files in the same directory with the following /// contents: /// /// File 'monkeys.in': /// /// ```ignore (only-for-syntax-highlight) /// ['🙈', '🙊', '🙉'] /// .iter() /// .cycle() /// .take(6) /// .collect::<String>() /// ``` /// /// File 'main.rs': /// /// ```ignore (cannot-doctest-external-file-dependency) /// fn main() { /// let my_string = include!("monkeys.in"); /// assert_eq!("🙈🙊🙉🙈🙊🙉", my_string); /// println!("{}", my_string); /// } /// ``` /// /// Compiling 'main.rs' and running the resulting binary will print /// "🙈🙊🙉🙈🙊🙉". #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] #[macro_export] #[cfg_attr(not(test), rustc_diagnostic_item = "include_macro")] macro_rules! include { ($file:expr $(,)?) => {{ /* compiler built-in */ }}; } /// Asserts that a boolean expression is `true` at runtime. /// /// This will invoke the [`panic!`] macro if the provided expression cannot be /// evaluated to `true` at runtime. /// /// # Uses /// /// Assertions are always checked in both debug and release builds, and cannot /// be disabled. See [`debug_assert!`] for assertions that are not enabled in /// release builds by default. /// /// Unsafe code may rely on `assert!` to enforce run-time invariants that, if /// violated could lead to unsafety. /// /// Other use-cases of `assert!` include testing and enforcing run-time /// invariants in safe code (whose violation cannot result in unsafety). /// /// # Custom Messages /// /// This macro has a second form, where a custom panic message can /// be provided with or without arguments for formatting. See [`std::fmt`] /// for syntax for this form. Expressions used as format arguments will only /// be evaluated if the assertion fails. /// /// [`std::fmt`]: ../std/fmt/index.html /// /// # Examples /// /// ``` /// // the panic message for these assertions is the stringified value of the /// // expression given. /// assert!(true); /// /// fn some_computation() -> bool { true } // a very simple function /// /// assert!(some_computation()); /// /// // assert with a custom message /// let x = true; /// assert!(x, "x wasn't true!"); /// /// let a = 3; let b = 27; /// assert!(a + b == 30, "a = {}, b = {}", a, b); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] #[macro_export] #[rustc_diagnostic_item = "assert_macro"] #[allow_internal_unstable(core_panic, edition_panic)] macro_rules! assert { ($cond:expr $(,)?) => {{ /* compiler built-in */ }}; ($cond:expr, $($arg:tt)+) => {{ /* compiler built-in */ }}; } /// Prints passed tokens into the standard output. #[unstable( feature = "log_syntax", issue = "29598", reason = "`log_syntax!` is not stable enough for use and is subject to change" )] #[rustc_builtin_macro] #[macro_export] macro_rules! log_syntax { ($($arg:tt)*) => { /* compiler built-in */ }; } /// Enables or disables tracing functionality used for debugging other macros. #[unstable( feature = "trace_macros", issue = "29598", reason = "`trace_macros` is not stable enough for use and is subject to change" )] #[rustc_builtin_macro] #[macro_export] macro_rules! trace_macros { (true) => {{ /* compiler built-in */ }}; (false) => {{ /* compiler built-in */ }}; } /// Attribute macro used to apply derive macros. /// /// See [the reference] for more info. /// /// [the reference]: ../../../reference/attributes/derive.html #[stable(feature = "rust1", since = "1.0.0")] #[rustc_builtin_macro] pub macro derive($item:item) { /* compiler built-in */ } /// Attribute macro applied to a function to turn it into a unit test. /// /// See [the reference] for more info. /// /// [the reference]: ../../../reference/attributes/testing.html#the-test-attribute #[stable(feature = "rust1", since = "1.0.0")] #[allow_internal_unstable(test, rustc_attrs)] #[rustc_builtin_macro] pub macro test($item:item) { /* compiler built-in */ } /// Attribute macro applied to a function to turn it into a benchmark test. #[unstable( feature = "test", issue = "50297", soft, reason = "`bench` is a part of custom test frameworks which are unstable" )] #[allow_internal_unstable(test, rustc_attrs)] #[rustc_builtin_macro] pub macro bench($item:item) { /* compiler built-in */ } /// An implementation detail of the `#[test]` and `#[bench]` macros. #[unstable( feature = "custom_test_frameworks", issue = "50297", reason = "custom test frameworks are an unstable feature" )] #[allow_internal_unstable(test, rustc_attrs)] #[rustc_builtin_macro] pub macro test_case($item:item) { /* compiler built-in */ } /// Attribute macro applied to a static to register it as a global allocator. /// /// See also [`std::alloc::GlobalAlloc`](../../../std/alloc/trait.GlobalAlloc.html). #[stable(feature = "global_allocator", since = "1.28.0")] #[allow_internal_unstable(rustc_attrs)] #[rustc_builtin_macro] pub macro global_allocator($item:item) { /* compiler built-in */ } /// Keeps the item it's applied to if the passed path is accessible, and removes it otherwise. #[unstable( feature = "cfg_accessible", issue = "64797", reason = "`cfg_accessible` is not fully implemented" )] #[rustc_builtin_macro] pub macro cfg_accessible($item:item) { /* compiler built-in */ } /// Expands all `#[cfg]` and `#[cfg_attr]` attributes in the code fragment it's applied to. #[unstable( feature = "cfg_eval", issue = "82679", reason = "`cfg_eval` is a recently implemented feature" )] #[rustc_builtin_macro] pub macro cfg_eval($($tt:tt)*) { /* compiler built-in */ } /// Unstable implementation detail of the `rustc` compiler, do not use. #[rustc_builtin_macro] #[stable(feature = "rust1", since = "1.0.0")] #[allow_internal_unstable(core_intrinsics, libstd_sys_internals)] #[rustc_deprecated( since = "1.52.0", reason = "rustc-serialize is deprecated and no longer supported" )] #[doc(hidden)] // While technically stable, using it is unstable, and deprecated. Hide it. pub macro RustcDecodable($item:item) { /* compiler built-in */ } /// Unstable implementation detail of the `rustc` compiler, do not use. #[rustc_builtin_macro] #[stable(feature = "rust1", since = "1.0.0")] #[allow_internal_unstable(core_intrinsics)] #[rustc_deprecated( since = "1.52.0", reason = "rustc-serialize is deprecated and no longer supported" )] #[doc(hidden)] // While technically stable, using it is unstable, and deprecated. Hide it. pub macro RustcEncodable($item:item) { /* compiler built-in */ } }
35.005814
146
0.587499
75fd2721f5b1031edb533b0cfab493f54eae4aae
2,809
use super::seed::*; use crate::theory::{hertz::*, key::Key, piano_key::PianoKey, pitch::Pitch, scale::Scale}; use rodio::source::Source; use std::{f32::consts::PI, fmt, str::FromStr, time::Duration}; pub struct MusicMaker { key: Key, seed: Box<dyn MusicSeed>, current_note: PianoKey, current_sample: usize, sample_rate: Hertz, volume: f32, } impl Default for MusicMaker { fn default() -> Self { Self { key: Key::default(), seed: Box::new(RandomSeed::default()), current_note: PianoKey::from_str("C4").unwrap(), current_sample: usize::default(), sample_rate: SAMPLE_RATE, volume: 2.0, } } } pub type Sample = f32; impl MusicMaker { pub fn new(base_note: PianoKey, scale: Scale, octaves: u8) -> Self { Self::default().set_key(base_note, scale, octaves) } fn get_frequency(&mut self) -> Sample { let pitch = Pitch::from(self.current_note); pitch.into() } fn new_note(&mut self) { let new_note = self.seed.get_note(self.key); //print!("{} ", new_note); TODO doenst work b/c sleep until end, i think? self.current_note = new_note; } pub fn set_key(mut self, base_note: PianoKey, scale: Scale, octaves: u8) -> Self { self.key = Key::new(scale, base_note, octaves); self } } impl Iterator for MusicMaker { type Item = Sample; // Sampled amplitude fn next(&mut self) -> Option<Self::Item> { self.current_sample = self.current_sample.wrapping_add(1); // will cycle let value = self.volume * PI * self.get_frequency() * self.current_sample as Sample / f64::from(self.sample_rate) as Sample; // when to switch notes? if self.current_sample as f64 >= f64::from(self.sample_rate) { self.current_sample = 0; self.new_note(); } Some(value.sin()) } } impl Source for MusicMaker { #[inline] fn current_frame_len(&self) -> Option<usize> { None } #[inline] fn channels(&self) -> u16 { 1 } #[inline] fn sample_rate(&self) -> u32 { f64::from(self.sample_rate) as u32 } #[inline] fn total_duration(&self) -> Option<Duration> { None } } impl fmt::Display for MusicMaker { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let key = self.key; write!( f, "Generating music from the {} {}\nOctaves: {} - {}\n{}", key.base_note.note, key.scale, key.base_note.octave, key.base_note.octave + key.octaves, key ) } } impl From<Pitch> for Sample { fn from(p: Pitch) -> Self { f64::from(p) as f32 } }
26.252336
91
0.56497
db0542a8979f9e050cbb5eb6621bf327b5b8320e
1,304
/* * Swaggy Jenkins * * Jenkins API clients generated from Swagger / Open API specification * * The version of the OpenAPI document: 1.1.2-pre.0 * Contact: [email protected] * Generated by: https://openapi-generator.tech */ #[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] pub struct SwapSpaceMonitorMemoryUsage2 { #[serde(rename = "_class", skip_serializing_if = "Option::is_none")] pub _class: Option<String>, #[serde(rename = "availablePhysicalMemory", skip_serializing_if = "Option::is_none")] pub available_physical_memory: Option<i32>, #[serde(rename = "availableSwapSpace", skip_serializing_if = "Option::is_none")] pub available_swap_space: Option<i32>, #[serde(rename = "totalPhysicalMemory", skip_serializing_if = "Option::is_none")] pub total_physical_memory: Option<i32>, #[serde(rename = "totalSwapSpace", skip_serializing_if = "Option::is_none")] pub total_swap_space: Option<i32>, } impl SwapSpaceMonitorMemoryUsage2 { pub fn new() -> SwapSpaceMonitorMemoryUsage2 { SwapSpaceMonitorMemoryUsage2 { _class: None, available_physical_memory: None, available_swap_space: None, total_physical_memory: None, total_swap_space: None, } } }
31.804878
89
0.693252
b9be21fc61b9949807e56c003897add958ebe2e4
3,936
// Copyright (c) 2018-2022 The MobileCoin Foundation #![deny(missing_docs)] //! Test Client use mc_common::logger::{create_app_logger, log, o}; use grpcio::{RpcStatus, RpcStatusCode}; use mc_fog_test_client::{ config::TestClientConfig, error::TestClientError, test_client::{TestClient, TestClientPolicy}, }; use mc_util_cli::ParserWithBuildInfo; use mc_util_grpc::AdminServer; use mc_util_parse::{load_css_file, CssSignature}; use serde::Serialize; use std::sync::Arc; #[derive(Serialize, Debug, Clone)] struct JsonData { pub policy: TestClientPolicy, pub config: TestClientConfig, } fn main() { mc_common::setup_panic_handler(); let (logger, _global_logger_guard) = create_app_logger(o!()); let config = TestClientConfig::parse(); let _tracer = mc_util_telemetry::setup_default_tracer(env!("CARGO_PKG_NAME")) .expect("Failed setting telemetry tracer"); // Set up test client policy taking into account the runtime config values let policy = TestClientPolicy { fail_fast_on_deadline: !config.measure_after_deadline, // Don't test RTH memos when passed --no_memos test_rth_memos: !config.no_memos, tx_submit_deadline: config.consensus_wait, tx_receive_deadline: config.consensus_wait, double_spend_wait: config.ledger_sync_wait, transfer_amount: config.transfer_amount, token_ids: config.token_ids.clone(), ..Default::default() }; let account_keys = config.load_accounts(&logger); // Start an admin server to publish prometheus metrics, if admin_listen_uri is // given let admin_server = config.admin_listen_uri.as_ref().map(|admin_listen_uri| { let json_data = JsonData { config: config.clone(), policy: policy.clone(), }; let get_config_json = Arc::new(move || { serde_json::to_string(&json_data).map_err(|err| { RpcStatus::with_message(RpcStatusCode::INTERNAL, format!("{:?}", err)) }) }); AdminServer::start( None, admin_listen_uri, "Fog Test Client".to_owned(), "".to_string(), Some(get_config_json), logger.clone(), ) .expect("Failed starting admin server") }); // Initialize test_client let test_client = TestClient::new( policy, account_keys, config.consensus_config.consensus_validators, config.fog_ledger, config.fog_view, config.grpc_retry_config, logger.clone(), ) .consensus_sigstruct(maybe_load_css(&config.consensus_enclave_css)) .fog_ingest_sigstruct(maybe_load_css(&config.ingest_enclave_css)) .fog_ledger_sigstruct(maybe_load_css(&config.ledger_enclave_css)) .fog_view_sigstruct(maybe_load_css(&config.view_enclave_css)); // Run continuously or run as a fixed length test, according to config if config.continuous { log::info!(logger, "One tx / {:?}", config.transfer_period); if admin_server.is_none() { log::warn!( logger, "Admin not configured, metrics will not be available" ); } test_client.run_continuously(config.transfer_period); } else { log::info!(logger, "Running {} test transfers", config.num_transactions); match test_client.run_test(config.num_transactions) { Ok(()) => log::info!(logger, "All tests passed"), Err(TestClientError::TxTimeout) => panic!( "Transactions could not clear in {:?} seconds", config.consensus_wait ), Err(e) => panic!("Unexpected error {:?}", e), } } } fn maybe_load_css(maybe_file: &Option<String>) -> Option<CssSignature> { maybe_file .as_ref() .map(|x| load_css_file(x).expect("Could not load css file")) }
32.8
86
0.64126
4ada6e2b564e42439c6499814f5e50ccbffadc19
10,506
use std::collections::HashMap; use std::sync::atomic::Ordering; use std::sync::Arc; use indexmap::IndexMap; use metrics::{Counter, Gauge, Histogram, Key, KeyName, Recorder, Unit}; use metrics_util::registry::{GenerationalAtomicStorage, Recency, Registry}; use parking_lot::RwLock; use crate::common::Snapshot; use crate::distribution::{Distribution, DistributionBuilder}; use crate::formatting::{ key_to_parts, sanitize_metric_name, write_help_line, write_metric_line, write_type_line, }; pub(crate) struct Inner { pub registry: Registry<Key, GenerationalAtomicStorage>, pub recency: Recency<Key>, pub distributions: RwLock<HashMap<String, IndexMap<Vec<String>, Distribution>>>, pub distribution_builder: DistributionBuilder, pub descriptions: RwLock<HashMap<String, &'static str>>, pub global_labels: IndexMap<String, String>, } impl Inner { fn get_recent_metrics(&self) -> Snapshot { let mut counters = HashMap::new(); let counter_handles = self.registry.get_counter_handles(); for (key, counter) in counter_handles { let gen = counter.get_generation(); if !self.recency.should_store_counter(&key, gen, &self.registry) { continue; } let (name, labels) = key_to_parts(&key, Some(&self.global_labels)); let value = counter.get_inner().load(Ordering::Acquire); let entry = counters.entry(name).or_insert_with(HashMap::new).entry(labels).or_insert(0); *entry = value; } let mut gauges = HashMap::new(); let gauge_handles = self.registry.get_gauge_handles(); for (key, gauge) in gauge_handles { let gen = gauge.get_generation(); if !self.recency.should_store_gauge(&key, gen, &self.registry) { continue; } let (name, labels) = key_to_parts(&key, Some(&self.global_labels)); let value = f64::from_bits(gauge.get_inner().load(Ordering::Acquire)); let entry = gauges.entry(name).or_insert_with(HashMap::new).entry(labels).or_insert(0.0); *entry = value; } let histogram_handles = self.registry.get_histogram_handles(); for (key, histogram) in histogram_handles { let gen = histogram.get_generation(); if !self.recency.should_store_histogram(&key, gen, &self.registry) { // Since we store aggregated distributions directly, when we're told that a metric // is not recent enough and should be/was deleted from the registry, we also need to // delete it on our side as well. let (name, labels) = key_to_parts(&key, Some(&self.global_labels)); let mut wg = self.distributions.write(); let delete_by_name = if let Some(by_name) = wg.get_mut(&name) { by_name.remove(&labels); by_name.is_empty() } else { false }; // If there's no more variants in the per-metric-name distribution map, then delete // it entirely, otherwise we end up with weird empty output during render. if delete_by_name { wg.remove(&name); } continue; } let (name, labels) = key_to_parts(&key, Some(&self.global_labels)); let mut wg = self.distributions.write(); let entry = wg .entry(name.clone()) .or_insert_with(IndexMap::new) .entry(labels) .or_insert_with(|| self.distribution_builder.get_distribution(name.as_str())); histogram.get_inner().clear_with(|samples| entry.record_samples(samples)); } let distributions = self.distributions.read().clone(); Snapshot { counters, gauges, distributions } } fn render(&self) -> String { let Snapshot { mut counters, mut distributions, mut gauges } = self.get_recent_metrics(); let mut output = String::new(); let descriptions = self.descriptions.read(); for (name, mut by_labels) in counters.drain() { if let Some(desc) = descriptions.get(name.as_str()) { write_help_line(&mut output, name.as_str(), desc); } write_type_line(&mut output, name.as_str(), "counter"); for (labels, value) in by_labels.drain() { write_metric_line::<&str, u64>(&mut output, &name, None, &labels, None, value); } output.push('\n'); } for (name, mut by_labels) in gauges.drain() { if let Some(desc) = descriptions.get(name.as_str()) { write_help_line(&mut output, name.as_str(), desc); } write_type_line(&mut output, name.as_str(), "gauge"); for (labels, value) in by_labels.drain() { write_metric_line::<&str, f64>(&mut output, &name, None, &labels, None, value); } output.push('\n'); } for (name, mut by_labels) in distributions.drain() { if let Some(desc) = descriptions.get(name.as_str()) { write_help_line(&mut output, name.as_str(), desc); } let distribution_type = self.distribution_builder.get_distribution_type(name.as_str()); write_type_line(&mut output, name.as_str(), distribution_type); for (labels, distribution) in by_labels.drain(..) { let (sum, count) = match distribution { Distribution::Summary(summary, quantiles, sum) => { for quantile in quantiles.iter() { let value = summary.quantile(quantile.value()).unwrap_or(0.0); write_metric_line( &mut output, &name, None, &labels, Some(("quantile", quantile.value())), value, ); } (sum, summary.count() as u64) } Distribution::Histogram(histogram) => { for (le, count) in histogram.buckets() { write_metric_line( &mut output, &name, Some("bucket"), &labels, Some(("le", le)), count, ); } write_metric_line( &mut output, &name, Some("bucket"), &labels, Some(("le", "+Inf")), histogram.count(), ); (histogram.sum(), histogram.count()) } }; write_metric_line::<&str, f64>(&mut output, &name, Some("sum"), &labels, None, sum); write_metric_line::<&str, u64>( &mut output, &name, Some("count"), &labels, None, count, ); } output.push('\n'); } output } } /// A Prometheus recorder. /// /// This recorder should be composed with other recorders or installed globally via /// [`metrics::set_boxed_recorder`]. /// /// Most users will not need to interact directly with the recorder, and can simply deal with the /// builder methods on [`PrometheusBuilder`](crate::PrometheusBuilder) for building and installing /// the recorder/exporter. pub struct PrometheusRecorder { inner: Arc<Inner>, } impl PrometheusRecorder { /// Gets a [`PrometheusHandle`] to this recorder. pub fn handle(&self) -> PrometheusHandle { PrometheusHandle { inner: self.inner.clone() } } fn add_description_if_missing(&self, key_name: &KeyName, description: &'static str) { let sanitized = sanitize_metric_name(key_name.as_str()); let mut descriptions = self.inner.descriptions.write(); descriptions.entry(sanitized).or_insert(description); } } impl From<Inner> for PrometheusRecorder { fn from(inner: Inner) -> Self { PrometheusRecorder { inner: Arc::new(inner) } } } impl Recorder for PrometheusRecorder { fn describe_counter(&self, key_name: KeyName, _unit: Option<Unit>, description: &'static str) { self.add_description_if_missing(&key_name, description); } fn describe_gauge(&self, key_name: KeyName, _unit: Option<Unit>, description: &'static str) { self.add_description_if_missing(&key_name, description); } fn describe_histogram( &self, key_name: KeyName, _unit: Option<Unit>, description: &'static str, ) { self.add_description_if_missing(&key_name, description); } fn register_counter(&self, key: &Key) -> Counter { self.inner.registry.get_or_create_counter(key, |c| c.clone().into()) } fn register_gauge(&self, key: &Key) -> Gauge { self.inner.registry.get_or_create_gauge(key, |c| c.clone().into()) } fn register_histogram(&self, key: &Key) -> Histogram { self.inner.registry.get_or_create_histogram(key, |c| c.clone().into()) } } /// Handle for accessing metrics stored via [`PrometheusRecorder`]. /// /// In certain scenarios, it may be necessary to directly handle requests that would otherwise be /// handled directly by the HTTP listener, or push gateway background task. [`PrometheusHandle`] /// allows rendering a snapshot of the current metrics stored by an installed [`PrometheusRecorder`] /// as a payload conforming to the Prometheus exposition format. #[derive(Clone)] pub struct PrometheusHandle { inner: Arc<Inner>, } impl PrometheusHandle { /// Takes a snapshot of the metrics held by the recorder and generates a payload conforming to /// the Prometheus exposition format. pub fn render(&self) -> String { self.inner.render() } }
38.203636
100
0.555207
db747cde9c93f06edef0050a96c32e1849dbc43f
7,267
//! A small [CBOR] codec suitable for `no_std` environments. //! //! The crate is organised around the following entities: //! //! - [`Encoder`] and [`Decoder`] for type-directed encoding and decoding //! of values. //! //! - [`Encode`] and [`Decode`] traits which can be implemented for any //! type that should be encoded to or decoded from CBOR. They are similar //! to [serde]'s `Serialize` and `Deserialize` traits but do not abstract //! over the encoder/decoder. //! //! Encoding and decoding proceeds in a type-directed way, i.e. by calling //! methods for expected data item types, e.g. [`Decoder::u32`] or //! [`Encoder::str`]. In addition there is support for data type inspection. //! The `Decoder` can be queried for the current data type which returns a //! [`data::Type`] that can represent every possible CBOR type and decoding //! can thus proceed based on this information. It is also possible to just //! tokenize the input bytes using a [`Tokenizer`](decode::Tokenizer), i.e. //! an `Iterator` over CBOR [`Token`](decode::Token)s. //! //! Optionally, `Encode` and `Decode` can be derived for structs and enums //! using the respective derive macros (*requires feature* `"derive"`). //! See [`minicbor_derive`] for details. //! //! For I/O support see [`minicbor-io`][1]. //! //! [1]: https://twittner.gitlab.io/minicbor/minicbor_io/ //! //! # Feature flags //! //! The following feature flags are supported: //! //! - `"alloc"`: Enables most collection types in a `no_std` environment. //! //! - `"std"`: Implies `"alloc"` and enables more functionality that depends //! on the `std` crate. //! //! - `"derive"`: Implies `"alloc"` and allows deriving [`Encode`] and //! [`Decode`] traits. //! //! - `"partial-skip-support"`: Enables the method [`Decoder::skip`] to skip //! over any CBOR item other than indefinite-length arrays or maps inside of //! regular maps or arrays. Support for skipping over any CBOR item is //! enabled by `"alloc"` but without`"alloc"` or `"partial-skip-support"` //! `Decoder::skip` is not available at all. //! //! - `"partial-derive-support"`: Implies `"partial-skip-support"` and allows //! deriving [`Encode`] and [`Decode`] traits, but does not support //! indefinite-length CBOR maps and arrays inside of regular CBOR maps and //! arrays. //! //! # Example: generic encoding and decoding //! //! ``` //! use minicbor::{Encode, Decode}; //! //! let input = ["hello", "world"]; //! let mut buffer = [0u8; 128]; //! //! minicbor::encode(&input, buffer.as_mut())?; //! let output: [&str; 2] = minicbor::decode(buffer.as_ref())?; //! assert_eq!(input, output); //! //! # Ok::<_, Box<dyn std::error::Error>>(()) //! ``` //! //! # Example: ad-hoc encoding //! //! ``` //! use minicbor::Encoder; //! //! let mut buffer = [0u8; 128]; //! let mut encoder = Encoder::new(&mut buffer[..]); //! //! encoder.begin_map()? // using an indefinite map here //! .str("hello")?.str("world")? //! .str("submap")?.map(2)? //! .u8(1)?.bool(true)? //! .u8(2)?.bool(false)? //! .u16(34234)?.array(3)?.u8(1)?.u8(2)?.u8(3)? //! .bool(true)?.null()? //! .end()?; //! //! # Ok::<_, Box<dyn std::error::Error>>(()) //! ``` //! //! # Example: ad-hoc decoding //! //! ``` //! use minicbor::Decoder; //! use minicbor::data::Tag; //! //! let input = [ //! 0xc0, 0x74, 0x32, 0x30, 0x31, 0x33, 0x2d, 0x30, //! 0x33, 0x2d, 0x32, 0x31, 0x54, 0x32, 0x30, 0x3a, //! 0x30, 0x34, 0x3a, 0x30, 0x30, 0x5a //! ]; //! //! let mut decoder = Decoder::new(&input); //! assert_eq!(Tag::DateTime, decoder.tag()?); //! assert_eq!("2013-03-21T20:04:00Z", decoder.str()?); //! # Ok::<_, Box<dyn std::error::Error>>(()) //! ``` //! //! # Example: tokenization //! //! ``` //! use minicbor::display; //! use minicbor::decode::{Token, Tokenizer}; //! //! let input = [0x83, 0x01, 0x9f, 0x02, 0x03, 0xff, 0x82, 0x04, 0x05]; //! //! assert_eq!("[1, [_ 2, 3], [4, 5]]", format!("{}", display(&input))); //! //! let tokens = Tokenizer::new(&input).collect::<Result<Vec<Token>, _>>()?; //! //! assert_eq! { &tokens[..], //! &[Token::Array(3), //! Token::U8(1), //! Token::BeginArray, //! Token::U8(2), //! Token::U8(3), //! Token::Break, //! Token::Array(2), //! Token::U8(4), //! Token::U8(5)] //! }; //! //! # Ok::<_, Box<dyn std::error::Error>>(()) //! ``` //! //! [CBOR]: https://tools.ietf.org/html/rfc7049 //! [serde]: https://serde.rs #![forbid(unused_imports, unused_variables)] #![allow(clippy::needless_lifetimes)] #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "alloc")] extern crate alloc; pub mod bytes; pub mod data; pub mod decode; pub mod encode; const UNSIGNED: u8 = 0x00; const SIGNED: u8 = 0x20; const BYTES: u8 = 0x40; const TEXT: u8 = 0x60; const ARRAY: u8 = 0x80; const MAP: u8 = 0xa0; const TAGGED: u8 = 0xc0; const SIMPLE: u8 = 0xe0; const BREAK: u8 = 0xff; pub use decode::{Decode, Decoder}; pub use encode::{Encode, Encoder}; #[cfg(any(feature = "derive", feature = "partial-derive-support"))] pub use minicbor_derive::*; /// Decode a type implementing [`Decode`] from the given byte slice. pub fn decode<'b, T>(b: &'b [u8]) -> Result<T, decode::Error> where T: Decode<'b> { Decoder::new(b).decode() } /// Encode a type implementing [`Encode`] to the given [`encode::Write`] impl. pub fn encode<T, W>(x: T, w: W) -> Result<(), encode::Error<W::Error>> where T: Encode, W: encode::Write { Encoder::new(w).encode(x)?.ok() } /// Encode a type implementing [`Encode`] and return the encoded byte vector. /// /// *Requires feature* `"std"`. #[cfg(feature = "std")] pub fn to_vec<T>(x: T) -> Result<Vec<u8>, encode::Error<std::io::Error>> where T: Encode { let mut e = Encoder::new(Vec::new()); x.encode(&mut e)?; Ok(e.into_inner()) } /// Display the given CBOR bytes in [diagnostic notation][1]. /// /// *Requires features* `"alloc"` and `"half"`. /// /// Quick syntax summary: /// /// - Maps are enclosed in curly braces: `{` and `}`. /// - Arrays are enclosed in brackets: `[` and `]`. /// - Indefinite maps start with `{_` instead of `{`. /// - Indefinite arrays start with `[_` instead of `[`. /// - Bytes are hex encoded and enclosed in `h'` and `'`. /// - Strings are enclosed in double quotes. /// - Numbers and booleans are displayed as in Rust but floats are always /// shown in scientific notation (this differs slightly from the RFC /// format). /// - Indefinite bytes are enclosed in `(_` and `)` except for the empty /// sequence which is shown as `''_`. /// - Indefinite strings are enclosed in `(_` and `)` except for the empty /// sequence which is shown as `""_`. /// - Tagged values are enclosed in `t(` and `)` where `t` is the numeric /// tag value. /// - Simple values are shown as `simple(n)` where `n` is the numeric /// simple value. /// - Undefined and null are shown as `undefined` and `null`. /// /// No error is produced should decoding fail, the error message /// becomes part of the display. /// /// [1]: https://www.rfc-editor.org/rfc/rfc8949.html#section-8 #[cfg(all(feature = "alloc", feature = "half"))] pub fn display<'b>(cbor: &'b [u8]) -> impl core::fmt::Display + 'b { decode::Tokenizer::new(cbor) }
31.872807
78
0.610568
90e86cf84ced2f0b8a06bac81dcb619aaccade23
1,612
//! A middleware that boxes HTTP response bodies. use crate::BoxBody; use futures::{future, TryFutureExt}; use linkerd_error::Error; use linkerd_stack::{layer, Proxy, Service}; use std::task::{Context, Poll}; #[derive(Clone, Debug)] pub struct BoxResponse<S>(S); impl<S> BoxResponse<S> { pub fn layer() -> impl layer::Layer<S, Service = Self> + Clone + Copy { layer::mk(Self) } } impl<S, Req, B> Service<Req> for BoxResponse<S> where S: Service<Req, Response = http::Response<B>>, B: http_body::Body + Send + 'static, B::Data: Send + 'static, B::Error: Into<Error> + 'static, { type Response = http::Response<BoxBody>; type Error = S::Error; type Future = future::MapOk<S::Future, fn(S::Response) -> Self::Response>; #[inline] fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.0.poll_ready(cx) } #[inline] fn call(&mut self, req: Req) -> Self::Future { self.0.call(req).map_ok(|rsp| rsp.map(BoxBody::new)) } } impl<Req, B, S, P> Proxy<Req, S> for BoxResponse<P> where B: http_body::Body + Send + 'static, B::Data: Send + 'static, B::Error: Into<Error>, S: Service<P::Request>, P: Proxy<Req, S, Response = http::Response<B>>, { type Request = P::Request; type Response = http::Response<BoxBody>; type Error = P::Error; type Future = future::MapOk<P::Future, fn(P::Response) -> Self::Response>; #[inline] fn proxy(&self, inner: &mut S, req: Req) -> Self::Future { self.0.proxy(inner, req).map_ok(|rsp| rsp.map(BoxBody::new)) } }
27.793103
85
0.609181
9bb54caed6d3b9dc6a80957ff0369f884ed3e2de
3,262
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use fnv::FnvBuildHasher; use indexmap::IndexMap; use intern::string_key::StringKey; use serde::{Deserialize, Serialize}; use strum::IntoEnumIterator; use strum_macros::EnumIter; type FnvIndexMap<K, V> = IndexMap<K, V, FnvBuildHasher>; #[derive( EnumIter, strum_macros::ToString, Debug, Copy, Clone, Serialize, Deserialize, PartialEq )] #[serde(deny_unknown_fields, rename_all = "lowercase")] pub enum TypegenLanguage { JavaScript, TypeScript, Flow, } impl Default for TypegenLanguage { fn default() -> Self { Self::JavaScript } } impl TypegenLanguage { pub fn get_variants_as_string() -> Vec<String> { let mut res = vec![]; for lang in Self::iter() { res.push(lang.to_string().to_lowercase()); } res } } #[derive(Debug, Serialize, Deserialize, Default)] #[serde(deny_unknown_fields, rename_all = "camelCase")] pub struct TypegenConfig { /// The desired output language, "flow" or "typescript". pub language: TypegenLanguage, /// # For Flow type generation /// When set, enum values are imported from a module with this suffix. /// For example, an enum Foo and this property set to ".test" would be /// imported from "Foo.test". /// Note: an empty string is allowed and different from not setting the /// value, in the example above it would just import from "Foo". pub enum_module_suffix: Option<String>, /// # For Flow type generation /// When set, generated input types will have the listed fields optional /// even if the schema defines them as required. #[serde(default)] pub optional_input_fields: Vec<StringKey>, /// # For Typescript type generation /// Whether to use the `import type` syntax introduced in Typescript /// version 3.8. This will prevent warnings from `importsNotUsedAsValues`. #[serde(default)] pub use_import_type_syntax: bool, /// A map from GraphQL scalar types to a custom JS type, example: /// { "Url": "String" } #[serde(default)] pub custom_scalar_types: FnvIndexMap<StringKey, StringKey>, /// Require all GraphQL scalar types mapping to be defined, will throw /// if a GraphQL scalar type doesn't have a JS type #[serde(default)] pub require_custom_scalar_types: bool, /// Work in progress new Flow type definitions #[serde(default)] pub flow_typegen: FlowTypegenConfig, /// This option enables emitting es modules artifacts. #[serde(default)] pub eager_es_modules: bool, } #[derive(Default, Debug, Serialize, Deserialize, Clone, Copy)] #[serde(deny_unknown_fields, tag = "phase")] pub struct FlowTypegenConfig { /// This option controls whether or not a catch-all entry is added to enum type definitions /// for values that may be added in the future. Enabling this means you will have to update /// your application whenever the GraphQL server schema adds new enum values to prevent it /// from breaking. #[serde(default)] pub no_future_proof_enums: bool, }
31.365385
95
0.685162
29f7da3039d2010ff22b29293c6450e6a84b62f0
5,391
use crate::models::DisplayTicketPricing; use chrono::{NaiveDateTime, Utc}; use db::prelude::*; use diesel::PgConnection; use uuid::Uuid; #[derive(Debug, Deserialize, PartialEq, Serialize)] pub struct UserDisplayTicketType { pub id: Uuid, pub name: String, pub description: Option<String>, pub status: TicketTypeStatus, pub available: u32, pub start_date: Option<NaiveDateTime>, pub end_date: NaiveDateTime, pub increment: i32, pub limit_per_person: u32, pub ticket_pricing: Option<DisplayTicketPricing>, pub redemption_code: Option<String>, pub event_id: Uuid, pub rank: i32, } impl UserDisplayTicketType { pub fn from_ticket_type( ticket_type: &TicketType, fee_schedule: &FeeSchedule, box_office_pricing: bool, redemption_code: Option<String>, conn: &PgConnection, ) -> Result<UserDisplayTicketType, DatabaseError> { let available = ticket_type.valid_available_ticket_count(conn)?; let ticket_pricing = match ticket_type .current_ticket_pricing(box_office_pricing, conn) .optional()? { Some(ticket_pricing) => Some(DisplayTicketPricing::from_ticket_pricing( &ticket_pricing, fee_schedule, redemption_code.clone(), Some(ticket_type.event_id), box_office_pricing, conn, )?), None => None, }; let mut result = UserDisplayTicketType { id: ticket_type.id, event_id: ticket_type.event_id, name: ticket_type.name.clone(), description: ticket_type.description.clone(), status: ticket_type.status(box_office_pricing, conn)?, start_date: ticket_type.start_date, end_date: ticket_type.end_date(conn)?, ticket_pricing, available, redemption_code: None, increment: ticket_type.increment, limit_per_person: ticket_type.limit_per_person as u32, rank: ticket_type.rank, }; if let Some(ref redemption_code) = redemption_code { if let Some(hold) = Hold::find_by_redemption_code(redemption_code, Some(ticket_type.event_id), conn).optional()? { if hold.ticket_type_id == ticket_type.id { result.description = Some(format!("Using promo code: {}", redemption_code)); let hold_limit_per_person = hold.max_per_user.unwrap_or(0) as u32; // Limited by the minimum of hold max_per_user and ticket_type limit_per_person with 0 acting as no limit if result.limit_per_person == 0 || result.limit_per_person > hold_limit_per_person { result.limit_per_person = hold_limit_per_person; } result.available = hold.quantity(conn)?.1; result.redemption_code = Some(redemption_code.clone()); } } else if let Some(code_availability) = Code::find_by_redemption_code_with_availability(redemption_code, Some(ticket_type.event_id), conn) .optional()? { let now = Utc::now().naive_utc(); if now >= code_availability.code.start_date && now <= code_availability.code.end_date { if TicketType::find_for_code(code_availability.code.id, conn)?.contains(&ticket_type) { result.description = Some(format!("Using promo code: {}", redemption_code)); let code_limit_per_person = code_availability.code.max_tickets_per_user.unwrap_or(0) as u32; // Limited by the minimum of code max_per_user and ticket_type limit_per_person with 0 acting as no limit if result.limit_per_person == 0 || result.limit_per_person > code_limit_per_person { result.limit_per_person = code_limit_per_person; } result.redemption_code = Some(redemption_code.clone()); } } } } let ticket_pricings = ticket_type.valid_ticket_pricing(true, conn)?; match result.status { TicketTypeStatus::OnSaleSoon => { let min_pricing = ticket_pricings.iter().min_by_key(|p| p.start_date); result.ticket_pricing = Some(DisplayTicketPricing::from_ticket_pricing( min_pricing.unwrap(), fee_schedule, redemption_code.clone(), Some(ticket_type.event_id), box_office_pricing, conn, )?); } TicketTypeStatus::SaleEnded => { let max_pricing = ticket_pricings.iter().max_by_key(|p| p.end_date); result.ticket_pricing = Some(DisplayTicketPricing::from_ticket_pricing( max_pricing.unwrap(), fee_schedule, redemption_code.clone(), Some(ticket_type.event_id), box_office_pricing, conn, )?); } _ => (), } Ok(result) } }
42.117188
129
0.575032
79d43e521021dc80a6974eefdfa735fb06c6d365
717
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] mod config; mod gui; use anyhow::{anyhow, Result}; use directories_next::ProjectDirs; use log::LevelFilter; use relm::Widget; pub use config::CONFIG; lazy_static::lazy_static! { pub static ref DIRS: ProjectDirs = ProjectDirs::from("com", "ecmelberk", "moonlander").expect("Cannot get project directories"); } fn main() -> Result<()> { let mut b = pretty_env_logger::formatted_builder(); b.filter_level(if cfg!(debug_assertions) { LevelFilter::Debug } else { LevelFilter::Info }); b.try_init()?; log::info!("Hello, moon!"); gui::Win::run(()).map_err(|_| anyhow!("Cannot run GTK application")) }
23.129032
132
0.665272
2201751d3ec63b2d6b7d484ed7e9adb4d1b6c263
2,847
/* * Ory APIs * * Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. * * The version of the OpenAPI document: v0.0.1-alpha.46 * Contact: [email protected] * Generated by: https://openapi-generator.tech */ /// GenericError : Error responses are sent when an error (e.g. unauthorized, bad request, ...) occurred. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GenericError { /// The status code #[serde(rename = "code", skip_serializing_if = "Option::is_none")] pub code: Option<i64>, /// Debug contains debug information. This is usually not available and has to be enabled. #[serde(rename = "debug", skip_serializing_if = "Option::is_none")] pub debug: Option<String>, /// Further error details #[serde(rename = "details", skip_serializing_if = "Option::is_none")] pub details: Option<serde_json::Value>, /// Name is the error name. #[serde(rename = "error", skip_serializing_if = "Option::is_none")] pub error: Option<String>, /// Description contains further information on the nature of the error. #[serde(rename = "error_description", skip_serializing_if = "Option::is_none")] pub error_description: Option<String>, /// The error ID Useful when trying to identify various errors in application logic. #[serde(rename = "id", skip_serializing_if = "Option::is_none")] pub id: Option<String>, /// Message contains the error message. #[serde(rename = "message")] pub message: String, /// A human-readable reason for the error #[serde(rename = "reason", skip_serializing_if = "Option::is_none")] pub reason: Option<String>, /// The request ID The request ID is often exposed internally in order to trace errors across service architectures. This is often a UUID. #[serde(rename = "request", skip_serializing_if = "Option::is_none")] pub request: Option<String>, /// The status description #[serde(rename = "status", skip_serializing_if = "Option::is_none")] pub status: Option<String>, /// Code represents the error status code (404, 403, 401, ...). #[serde(rename = "status_code", skip_serializing_if = "Option::is_none")] pub status_code: Option<i64>, } impl GenericError { /// Error responses are sent when an error (e.g. unauthorized, bad request, ...) occurred. pub fn new(message: String) -> GenericError { GenericError { code: None, debug: None, details: None, error: None, error_description: None, id: None, message, reason: None, request: None, status: None, status_code: None, } } }
39.541667
179
0.656129
edcd9ad07296f98997262ddf60a18edd8ec0b6ed
3,342
#[doc = "Register `PRIMECELL_ID_3` reader"] pub struct R(crate::R<PRIMECELL_ID_3_SPEC>); impl core::ops::Deref for R { type Target = crate::R<PRIMECELL_ID_3_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<PRIMECELL_ID_3_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<PRIMECELL_ID_3_SPEC>) -> Self { R(reader) } } #[doc = "Register `PRIMECELL_ID_3` writer"] pub struct W(crate::W<PRIMECELL_ID_3_SPEC>); impl core::ops::Deref for W { type Target = crate::W<PRIMECELL_ID_3_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<PRIMECELL_ID_3_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<PRIMECELL_ID_3_SPEC>) -> Self { W(writer) } } #[doc = "Field `PRIMECELL_ID_3` reader - PrimeCell Identification"] pub struct PRIMECELL_ID_3_R(crate::FieldReader<u8, u8>); impl PRIMECELL_ID_3_R { #[inline(always)] pub(crate) fn new(bits: u8) -> Self { PRIMECELL_ID_3_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PRIMECELL_ID_3_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PRIMECELL_ID_3` writer - PrimeCell Identification"] pub struct PRIMECELL_ID_3_W<'a> { w: &'a mut W, } impl<'a> PRIMECELL_ID_3_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0xff) | (value as u32 & 0xff); self.w } } impl R { #[doc = "Bits 0:7 - PrimeCell Identification"] #[inline(always)] pub fn primecell_id_3(&self) -> PRIMECELL_ID_3_R { PRIMECELL_ID_3_R::new((self.bits & 0xff) as u8) } } impl W { #[doc = "Bits 0:7 - PrimeCell Identification"] #[inline(always)] pub fn primecell_id_3(&mut self) -> PRIMECELL_ID_3_W { PRIMECELL_ID_3_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "DMA PrimeCell ID 3\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [primecell_id_3](index.html) module"] pub struct PRIMECELL_ID_3_SPEC; impl crate::RegisterSpec for PRIMECELL_ID_3_SPEC { type Ux = u32; } #[doc = "`read()` method returns [primecell_id_3::R](R) reader structure"] impl crate::Readable for PRIMECELL_ID_3_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [primecell_id_3::W](W) writer structure"] impl crate::Writable for PRIMECELL_ID_3_SPEC { type Writer = W; } #[doc = "`reset()` method sets PRIMECELL_ID_3 to value 0xb1"] impl crate::Resettable for PRIMECELL_ID_3_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0xb1 } }
32.134615
413
0.634051
fcfd77dab0aa3f88296d562f01147b1cc49c8dde
4,331
// xfail-fast // xfail-test // XFAIL'd because this is going to be revamped, and it's not compatible as // written with the new mutability rules. // Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Examples from Eric's internship final presentation. // // Code is easier to write in emacs, and it's good to be sure all the // code samples compile (or not) as they should. use double_buffer::client::*; use double_buffer::give_buffer; use core::comm::Selectable; macro_rules! select_if ( { $index:expr, $count:expr, $port:path => [ $($message:path$(($($x: ident),+))dont_type_this* -> $next:ident $e:expr),+ ], $( $ports:path => [ $($messages:path$(($($xs: ident),+))dont_type_this* -> $nexts:ident $es:expr),+ ], )* } => { if $index == $count { match core::pipes::try_recv($port) { $(Some($message($($($x,)+)* next)) => { let $next = next; $e })+ _ => fail!() } } else { select_if!( $index, $count + 1, $( $ports => [ $($messages$(($($xs),+))dont_type_this* -> $nexts $es),+ ], )* ) } }; { $index:expr, $count:expr, } => { fail!() } ) macro_rules! select ( { $( $port:path => { $($message:path$(($($x: ident),+))dont_type_this* -> $next:ident $e:expr),+ } )+ } => ({ let index = core::comm::selecti([$(($port).header()),+]); select_if!(index, 0, $( $port => [ $($message$(($($x),+))dont_type_this* -> $next $e),+ ], )+) }) ) // Types and protocols pub struct Buffer { foo: (), } impl Drop for Buffer { fn finalize(&self) {} } proto! double_buffer ( acquire:send { request -> wait_buffer } wait_buffer:recv { give_buffer(::Buffer) -> release } release:send { release(::Buffer) -> acquire } ) // Code examples fn render(_buffer: &Buffer) { // A dummy function. } fn draw_frame(+channel: double_buffer::client::acquire) { let channel = request(channel); select! ( channel => { give_buffer(buffer) -> channel { render(&buffer); release(channel, buffer) } } ); } fn draw_two_frames(+channel: double_buffer::client::acquire) { let channel = request(channel); let channel = select! ( channel => { give_buffer(buffer) -> channel { render(&buffer); release(channel, buffer) } } ); let channel = request(channel); select! ( channel => { give_buffer(buffer) -> channel { render(&buffer); release(channel, buffer) } } ); } #[cfg(bad1)] fn draw_two_frames_bad1(+channel: double_buffer::client::acquire) { let channel = request(channel); select! ( channel => { give_buffer(buffer) -> channel { render(&buffer); } } ); let channel = request(channel); select! ( channel => { give_buffer(buffer) -> channel { render(&buffer); release(channel, buffer) } } ); } #[cfg(bad2)] fn draw_two_frames_bad2(+channel: double_buffer::client::acquire) { let channel = request(channel); select! ( channel => { give_buffer(buffer) -> channel { render(&buffer); release(channel, buffer); render(&buffer); release(channel, buffer); } } ); } pub fn main() { }
24.061111
75
0.494805
d6843fee52dc592df9c3eed2fcbd21f950755ded
1,447
use cursive::view::{Boxable, Identifiable}; use cursive::views::{Dialog, EditView, LinearLayout, TextView}; use cursive::Cursive; // This example shows a way to access multiple views at the same time. fn main() { let mut siv = Cursive::default(); // Create a dialog with 2 edit fields, and a text view. // The text view indicates when the 2 fields content match. siv.add_layer( Dialog::around( LinearLayout::vertical() .child(EditView::new().on_edit(on_edit).with_name("1")) .child(EditView::new().on_edit(on_edit).with_name("2")) .child(TextView::new("match").with_name("match")) .fixed_width(10), ) .button("Quit", Cursive::quit), ); siv.run(); } // Compare the content of the two edit views, // and update the TextView accordingly. // // We'll ignore the `content` and `cursor` arguments, // and directly retrieve the content from the `Cursive` root. fn on_edit(siv: &mut Cursive, _content: &str, _cursor: usize) { // Get handles for each view. let edit_1 = siv.find_name::<EditView>("1").unwrap(); let edit_2 = siv.find_name::<EditView>("2").unwrap(); // Directly compare references to edit_1 and edit_2. let matches = edit_1.get_content() == edit_2.get_content(); siv.call_on_name("match", |v: &mut TextView| { v.set_content(if matches { "match" } else { "no match" }) }); }
33.651163
71
0.628887
186ad0d8834ba24a5b309951ef1c33ef9c8b520d
2,771
//! Provides components and systems to create an in game user interface. #![doc( html_logo_url = "https://amethyst.rs/brand/logo-standard.svg", html_root_url = "https://docs.amethyst.rs/stable" )] #![warn( missing_debug_implementations, missing_docs, rust_2018_idioms, rust_2018_compatibility )] #![warn(clippy::all)] #![allow(clippy::new_without_default)] pub use self::{ blink::BlinkSystem, bundle::UiBundle, button::{ UiButton, UiButtonAction, UiButtonActionRetrigger, UiButtonActionRetriggerSystem, UiButtonActionRetriggerSystemDesc, UiButtonActionType, UiButtonBuilder, UiButtonBuilderResources, UiButtonSystem, UiButtonSystemDesc, }, drag::{DragWidgetSystemDesc, Draggable}, event::{ targeted, targeted_below, Interactable, TargetedEvent, UiEvent, UiEventType, UiMouseSystem, }, event_retrigger::{ EventReceiver, EventRetrigger, EventRetriggerSystem, EventRetriggerSystemDesc, }, font::{ default::get_default_font, systemfont::{default_system_font, get_all_font_handles, list_system_font_families}, }, format::{FontAsset, FontHandle, TtfFormat}, glyphs::{UiGlyphsSystem, UiGlyphsSystemDesc}, image::UiImage, label::{UiLabel, UiLabelBuilder, UiLabelBuilderResources}, layout::{Anchor, ScaleMode, Stretch, UiTransformSystem, UiTransformSystemDesc}, pass::{DrawUi, DrawUiDesc, RenderUi}, prefab::{ NoCustomUi, TextEditingPrefab, ToNativeWidget, UiButtonData, UiCreator, UiFormat, UiImageLoadPrefab, UiImagePrefab, UiLoader, UiLoaderSystem, UiLoaderSystemDesc, UiPrefab, UiTextData, UiTransformData, UiWidget, }, resize::{ResizeSystem, ResizeSystemDesc, UiResize}, selection::{ Selectable, Selected, SelectionKeyboardSystem, SelectionKeyboardSystemDesc, SelectionMouseSystem, SelectionMouseSystemDesc, }, selection_order_cache::{CacheSelectionOrderSystem, CachedSelectionOrder}, sound::{ UiPlaySoundAction, UiSoundRetrigger, UiSoundRetriggerSystem, UiSoundRetriggerSystemDesc, UiSoundSystem, UiSoundSystemDesc, }, text::{LineMode, TextEditing, TextEditingMouseSystem, TextEditingMouseSystemDesc, UiText}, text_editing::{TextEditingInputSystem, TextEditingInputSystemDesc}, transform::{get_parent_pixel_size, UiFinder, UiTransform}, widgets::{Widget, WidgetId, Widgets}, }; pub(crate) use amethyst_core::ecs::prelude::Entity; mod blink; mod bundle; mod button; mod drag; mod event; mod event_retrigger; mod font; mod format; mod glyphs; mod image; mod label; mod layout; mod pass; mod prefab; mod resize; mod selection; mod selection_order_cache; mod sound; mod text; mod text_editing; mod transform; mod widgets;
32.22093
99
0.740888
4b58b32ae0944108b6a0bbb8bde9404057314fe5
4,118
pub use liquid_core::Value::Nil; #[allow(dead_code)] pub fn date(y: i32, m: u8, d: u8) -> liquid_core::Value { use liquid_core::model::Date; use liquid_core::model::Value; Value::scalar(Date::from_ymd(y, m, d)) } #[allow(dead_code)] pub fn with_time(_time: &str) -> liquid_core::Value { Nil } #[allow(unused_macros)] #[macro_export] macro_rules! v { ($($value:tt)+) => { ::liquid_core::value!($($value)+) }; } #[allow(unused_macros)] #[macro_export] macro_rules! o { ($($value:tt)+) => { ::liquid_core::object!($($value)+) }; } #[allow(unused_macros)] #[macro_export] macro_rules! a { ($($value:tt)+) => { ::liquid_core::array!($($value)+) }; } #[allow(unused_macros)] #[macro_export] macro_rules! assert_template_result { ($expected:expr, $template:expr, ) => { assert_template_result!($expected, $template); }; ($expected:expr, $template:expr) => { let assigns = ::liquid_core::Object::default(); assert_template_result!($expected, $template, assigns); }; ($expected:expr, $template:expr, $assigns: expr, ) => { assert_template_result!($expected, $template, $assigns); }; ($expected:expr, $template:expr, $assigns: expr) => { let liquid: ::liquid::ParserBuilder = ::liquid::ParserBuilder::with_stdlib(); let liquid = liquid.build().unwrap(); assert_template_result!($expected, $template, $assigns, liquid); }; ($expected:expr, $template:expr, $assigns: expr, $liquid: expr, ) => { assert_template_result!($expected, $template, $assigns, $liquid); }; ($expected:expr, $template:expr, $assigns: expr, $liquid: expr) => { let template = $liquid.parse($template.as_ref()).unwrap(); let rendered = template.render(&$assigns).unwrap(); assert_eq!($expected, rendered); }; } #[allow(unused_macros)] #[macro_export] macro_rules! assert_template_matches { ($expected:expr, $template:expr, ) => { assert_template_matches!($expected, $template); }; ($expected:expr, $template:expr) => { let assigns = liquid::value::Value::default(); assert_template_matches!($expected, $template, assigns); }; ($expected:expr, $template:expr, $assigns: expr, ) => { assert_template_matches!($expected, $template, $assigns); }; ($expected:expr, $template:expr, $assigns: expr) => { let template = ::liquid::ParserBuilder::with_stdlib() .build() .unwrap() .parse($template.as_ref()) .unwrap(); let rendered = template.render(&$assigns).unwrap(); let expected = $expected; println!("pattern={}", expected); let expected = regex::Regex::new(expected).unwrap(); println!("rendered={}", rendered); assert!(expected.is_match(&rendered)); }; } #[allow(unused_macros)] #[macro_export] macro_rules! assert_parse_error { ($template:expr, ) => { assert_parse_error!($template) }; ($template:expr) => {{ let liquid = ::liquid::ParserBuilder::with_stdlib().build().unwrap(); assert_parse_error!($template, liquid) }}; ($template:expr, $liquid:expr, ) => {{ assert_parse_error!($template, $liquid) }}; ($template:expr, $liquid:expr) => {{ let template = $liquid.parse($template); assert!(template.is_err()); template.err().unwrap() }}; } #[allow(unused_macros)] #[macro_export] macro_rules! assert_render_error { ($template:expr, ) => { assert_render_error!($template); }; ($template:expr) => { let assigns = ::liquid::Object::default(); assert_render_error!($template, assigns); }; ($template:expr, $assigns: expr, ) => { assert_render_error!($template, $assigns); }; ($template:expr, $assigns: expr) => { let template = ::liquid::ParserBuilder::with_stdlib() .build() .unwrap() .parse($template.as_ref()) .unwrap(); template.render(&$assigns).unwrap_err(); }; }
29.84058
85
0.589121
2285cd8bf6c6ec79316b336203cdee4d611faee3
1,693
// type aliases use gw_types::U256; pub use sparse_merkle_tree::H256; pub trait H256Ext { fn one() -> H256; fn from_u32(n: u32) -> H256; fn to_u32(&self) -> u32; fn from_u64(n: u64) -> H256; fn to_u64(&self) -> u64; fn from_u128(n: u128) -> H256; fn to_u128(&self) -> u128; fn from_u256(n: U256) -> H256; fn to_u256(&self) -> U256; } impl H256Ext for H256 { fn one() -> H256 { H256::from_u32(1) } fn from_u32(n: u32) -> H256 { let mut buf = [0u8; 32]; buf[..4].copy_from_slice(&n.to_le_bytes()); buf.into() } fn to_u32(&self) -> u32 { let mut n_bytes = [0u8; 4]; n_bytes.copy_from_slice(&self.as_slice()[..4]); u32::from_le_bytes(n_bytes) } fn from_u64(n: u64) -> H256 { let mut buf = [0u8; 32]; buf[..8].copy_from_slice(&n.to_le_bytes()); buf.into() } fn to_u64(&self) -> u64 { let mut n_bytes = [0u8; 8]; n_bytes.copy_from_slice(&self.as_slice()[..8]); u64::from_le_bytes(n_bytes) } fn from_u128(n: u128) -> H256 { let mut buf = [0u8; 32]; buf[..16].copy_from_slice(&n.to_le_bytes()); buf.into() } fn to_u128(&self) -> u128 { let mut n_bytes = [0u8; 16]; n_bytes.copy_from_slice(&self.as_slice()[..16]); u128::from_le_bytes(n_bytes) } fn from_u256(n: U256) -> H256 { let mut buf = [0u8; 32]; n.to_little_endian(&mut buf); buf.into() } fn to_u256(&self) -> U256 { let mut n_bytes = [0u8; 32]; n_bytes.copy_from_slice(&self.as_slice()[..32]); U256::from_little_endian(&n_bytes) } }
26.873016
56
0.537507
218c3d20db04318ae0c42d58b61d0d39989ab9ec
357
pub(crate) mod mesh; pub use mesh::Mesh; pub(crate) mod transform; pub use transform::Transform; pub(crate) mod camera_data; pub use camera_data::CameraData; pub(crate) mod material; pub use material::Material; pub(crate) mod skybox; pub use skybox::SkyboxData; pub(crate) mod light_data; pub use light_data::*; pub(crate) mod probe; pub use probe::*;
17.85
32
0.745098
9ba68192a09dcd4342b0c3dea4cb5b1b8306bd49
1,222
extern crate itertools; use itertools::Itertools; const BOOK_PRICE: f64 = 8.0; const DISCOUNT: &[f64] = &[1.0, 0.95, 0.9, 0.8, 0.75]; pub fn lowest_price(books: &[usize]) -> f64 { if books.is_empty() { return 0.0; } let mut stats = vec![0; 5]; for &book in books.iter() { stats[book - 1] += 1; } low_price(&stats) } pub fn low_price(stats: &[usize]) -> f64 { let types = stats.iter().filter(|&&c| c != 0).count(); if stats.iter().all(|&c| c == 1 || c == 0) { return BOOK_PRICE * types as f64 * DISCOUNT[types - 1]; } if stats.iter().filter(|&&c| c != 0).count() == 1 { return BOOK_PRICE * stats.iter().sum::<usize>() as f64; } let mut lowest: Option<f64> = None; for count in 2..6 { for comb in (1..6).filter(|&c| stats[c - 1] != 0).combinations(count) { let mut price = BOOK_PRICE * count as f64 * DISCOUNT[count - 1]; let mut remain = stats.to_vec(); for book in comb { remain[book - 1] -= 1; } price += low_price(&remain); lowest = lowest.map_or(Some(price), |l| Some(l.min(price))); } } return lowest.unwrap(); }
26.565217
79
0.518822
9b8c9df051dd5573d3597a9cd00b24d3c4e1b937
4,819
use anyhow::Result; use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use rusqlite::{params, Row, Transaction}; use std::str::FromStr; use crate::{Status, StatusError}; #[derive(Debug, Clone)] pub struct GuidanceRecord { pub(crate) id: String, pub(crate) checksum: String, pub(crate) description: Option<String>, pub(crate) maintainer_id: String, pub(crate) status: Status, pub(crate) creation_date: String, pub(crate) update_date: String, pub(crate) publication_date: Option<String>, pub(crate) canonical_url: Option<String>, pub(crate) content: String, } fn into_record(row: &Row) -> Result<GuidanceRecord> { let record = GuidanceRecord { id: row.get(0)?, checksum: row.get(1)?, description: row.get(2)?, maintainer_id: row.get(3)?, status: row.get(4)?, creation_date: row.get(5)?, update_date: row.get(6)?, publication_date: row.get(7)?, canonical_url: row.get(8)?, content: row.get(9)?, }; Ok(record) } impl GuidanceRecord { pub(crate) fn select_all(tx: &Transaction) -> Result<Vec<GuidanceRecord>> { let mut stmt = tx.prepare( r#" SELECT * FROM guidance "#, )?; let mut rows = stmt.query(params![])?; let mut result = Vec::new(); while let Some(row) = rows.next()? { let record = into_record(&row)?; result.push(record); } Ok(result) } pub(crate) fn select(tx: &Transaction, id: &str) -> Result<Option<GuidanceRecord>> { let mut stmt = tx.prepare( r#" SELECT * FROM guidance WHERE id = ?; "#, )?; let mut rows = stmt.query(params![id])?; if let Some(row) = rows.next()? { let result = into_record(&row)?; return Ok(Some(result)); } Ok(None) } pub(crate) fn delete(tx: &Transaction, id: &str) -> Result<()> { let mut stmt = tx.prepare( r#" DELETE FROM guidance WHERE id = ?; "#, )?; stmt.execute(params![id])?; Ok(()) } pub(crate) fn insert(tx: &Transaction, record: &GuidanceRecord) -> Result<()> { let values = params![ &record.id, &record.checksum, &record.description, &record.maintainer_id, &record.status, &record.creation_date, &record.update_date, &record.publication_date, &record.canonical_url, &record.content, ]; let mut stmt = tx.prepare( r#" INSERT INTO guidance VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?); "#, )?; stmt.execute(values)?; Ok(()) } } #[derive(Debug, Clone)] pub struct GuidanceStandardRecord { pub(crate) guidance_id: String, pub(crate) standard_id: String, } impl GuidanceStandardRecord { pub(crate) fn select(tx: &Transaction, id: &str) -> Result<Vec<GuidanceStandardRecord>> { let mut stmt = tx.prepare( r#" SELECT * FROM guidance_standard WHERE guidance_id = ?; "#, )?; let mut rows = stmt.query(params![id])?; let mut list = Vec::new(); while let Some(row) = rows.next()? { let result = GuidanceStandardRecord { guidance_id: row.get(0)?, standard_id: row.get(1)?, }; list.push(result); } Ok(list) } pub(crate) fn insert(tx: &Transaction, record: &GuidanceStandardRecord) -> Result<()> { let values = params![&record.guidance_id, &record.standard_id]; let mut stmt = tx.prepare( r#" INSERT INTO guidance_standard VALUES (?, ?); "#, )?; stmt.execute(values)?; Ok(()) } } impl FromSql for Status { #[inline] fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> { value.as_str().and_then(|s| match Status::from_str(s) { Ok(s) => Ok(s), // TODO: make StatusError more expressive. Err(_err) => { let e = StatusError; Err(FromSqlError::Other(Box::new(e))) } }) } } impl ToSql for Status { #[inline] fn to_sql(&self) -> std::result::Result<ToSqlOutput<'_>, rusqlite::Error> { let s = self.to_string(); Ok(ToSqlOutput::from(s)) } }
25.230366
93
0.501764
56c587faa54cf86d3f16b1a8b4624e17ebe08a9f
2,020
/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ use aws_config::meta::region::RegionProviderChain; use aws_sdk_dynamodb::{Client, Error, Region, PKG_VERSION}; use tokio_stream::StreamExt; use structopt::StructOpt; #[derive(Debug, StructOpt)] struct Opt { /// The AWS Region. #[structopt(short, long)] region: Option<String>, /// The name of the table. #[structopt(short, long)] table: String, /// Whether to display additional information. #[structopt(short, long)] verbose: bool, } /// Lists the items in a DynamoDB table. /// # Arguments /// /// * `-t TABLE` - The name of the table. /// * `[-r REGION]` - The region in which the client is created. /// If not supplied, uses the value of the **AWS_REGION** environment variable. /// If the environment variable is not set, defaults to **us-west-2**. /// * `[-v]` - Whether to display additional information. #[tokio::main] async fn main() -> Result<(), Error> { tracing_subscriber::fmt::init(); let Opt { table, region, verbose, } = Opt::from_args(); let region_provider = RegionProviderChain::first_try(region.map(Region::new)) .or_default_provider() .or_else(Region::new("us-west-2")); let shared_config = aws_config::from_env().region(region_provider).load().await; let client = Client::new(&shared_config); if verbose { println!("DynamoDB client version: {}", PKG_VERSION); println!( "Region: {}", shared_config.region().unwrap() ); println!("Table: {}", &table); println!(); } let items: Result<Vec<_>, _> = client .scan() .table_name(table) .into_paginator() .items() .send() .collect() .await; println!("Items in table:"); for item in items? { println!(" {:?}", item); } Ok(()) }
25.897436
84
0.584653
89ce6f56c6620863afa9a6cca35dbfcccbeb0b1f
5,540
//! A library for rank-one constraint systems. #![cfg_attr(not(feature = "std"), no_std)] #![warn(unused, future_incompatible, nonstandard_style, rust_2018_idioms)] #![allow(clippy::op_ref, clippy::suspicious_op_assign_impl)] #![cfg_attr(not(use_asm), forbid(unsafe_code))] #![cfg_attr(use_asm, feature(llvm_asm))] #![cfg_attr(use_asm, deny(unsafe_code))] #[cfg(not(feature = "std"))] extern crate alloc; #[cfg(not(feature = "std"))] #[allow(unused_imports)] use alloc::{ borrow::Cow, collections::{BTreeMap, BTreeSet}, string::{String, ToString}, vec::Vec, }; #[cfg(feature = "std")] #[allow(unused_imports)] use std::{ borrow::Cow, collections::{BTreeMap, BTreeSet}, string::{String, ToString}, vec::Vec, }; use ark_ff::Field; use ark_serialize::*; use ark_std::io; use core::cmp::Ordering; use smallvec::SmallVec as StackVec; mod constraint_system; mod error; mod impl_constraint_var; mod impl_lc; pub use constraint_system::{ConstraintSynthesizer, ConstraintSystem, Namespace}; pub use error::SynthesisError; type SmallVec<F> = StackVec<[(Variable, F); 16]>; /// Represents a variable in a constraint system. #[derive(PartialOrd, Ord, PartialEq, Eq, Copy, Clone, Debug)] pub struct Variable(Index); impl Variable { /// This constructs a variable with an arbitrary index. /// Circuit implementations are not recommended to use this. pub fn new_unchecked(idx: Index) -> Variable { Variable(idx) } /// This returns the index underlying the variable. /// Circuit implementations are not recommended to use this. pub fn get_unchecked(&self) -> Index { self.0 } } /// Represents the index of either an input variable or auxiliary variable. #[derive(Copy, Clone, PartialEq, Debug, Eq)] pub enum Index { /// Index of an input variable. Input(usize), /// Index of an auxiliary (or private) variable. Aux(usize), } impl CanonicalSerialize for Index { #[inline] fn serialize<W: io::Write>(&self, mut writer: W) -> Result<(), SerializationError> { match self { Index::Input(u) => { 0u8.serialize(&mut writer)?; u.serialize(&mut writer)?; } Index::Aux(u) => { 1u8.serialize(&mut writer)?; u.serialize(&mut writer)?; } } Ok(()) } #[inline] fn serialized_size(&self) -> usize { 1 + 0usize.serialized_size() } #[inline] fn serialize_uncompressed<W: io::Write>( &self, mut writer: W, ) -> Result<(), SerializationError> { match self { Index::Input(u) => { 0u8.serialize(&mut writer)?; u.serialize(&mut writer)?; } Index::Aux(u) => { 1u8.serialize(&mut writer)?; u.serialize(&mut writer)?; } } Ok(()) } #[inline] fn serialize_unchecked<W: io::Write>(&self, mut writer: W) -> Result<(), SerializationError> { match self { Index::Input(u) => { 0u8.serialize(&mut writer)?; u.serialize(&mut writer)?; } Index::Aux(u) => { 1u8.serialize(&mut writer)?; u.serialize(&mut writer)?; } } Ok(()) } #[inline] fn uncompressed_size(&self) -> usize { 1 + 0usize.serialized_size() } } impl CanonicalDeserialize for Index { #[inline] fn deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> { let t = u8::deserialize(&mut reader)?; let u = usize::deserialize(&mut reader)?; match t { 0u8 => Ok(Index::Input(u)), _ => Ok(Index::Aux(u)), } } #[inline] fn deserialize_uncompressed<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> { let t = u8::deserialize(&mut reader)?; let u = usize::deserialize(&mut reader)?; match t { 0u8 => Ok(Index::Input(u)), _ => Ok(Index::Aux(u)), } } #[inline] fn deserialize_unchecked<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> { let t = u8::deserialize(&mut reader)?; let u = usize::deserialize(&mut reader)?; match t { 0u8 => Ok(Index::Input(u)), _ => Ok(Index::Aux(u)), } } } impl PartialOrd for Index { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for Index { fn cmp(&self, other: &Self) -> Ordering { match (self, other) { (Index::Input(ref idx1), Index::Input(ref idx2)) | (Index::Aux(ref idx1), Index::Aux(ref idx2)) => idx1.cmp(idx2), (Index::Input(_), Index::Aux(_)) => Ordering::Less, (Index::Aux(_), Index::Input(_)) => Ordering::Greater, } } } /// This represents a linear combination of some variables, with coefficients /// in the field `F`. /// The `(coeff, var)` pairs in a `LinearCombination` are kept sorted according /// to the index of the variable in its constraint system. #[derive(Debug, Clone)] pub struct LinearCombination<F: Field>(pub SmallVec<F>); /// Either a `Variable` or a `LinearCombination`. #[derive(Clone, Debug)] pub enum ConstraintVar<F: Field> { /// A wrapper around a `LinearCombination`. LC(LinearCombination<F>), /// A wrapper around a `Variable`. Var(Variable), }
28.121827
98
0.577978
0adc741526b923a548d2f6d1ed465cd9f6ea2014
218,679
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. use std::fmt::Write; /// See [`CreateMemberInput`](crate::input::CreateMemberInput) pub mod create_member_input { /// A builder for [`CreateMemberInput`](crate::input::CreateMemberInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) client_request_token: std::option::Option<std::string::String>, pub(crate) invitation_id: std::option::Option<std::string::String>, pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) member_configuration: std::option::Option<crate::model::MemberConfiguration>, } impl Builder { /// <p>A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time. This identifier is required only if you make a service request directly using an HTTP client. It is generated automatically if you use an AWS SDK or the AWS CLI.</p> pub fn client_request_token(mut self, input: impl Into<std::string::String>) -> Self { self.client_request_token = Some(input.into()); self } pub fn set_client_request_token( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.client_request_token = input; self } /// <p>The unique identifier of the invitation that is sent to the member to join the network.</p> pub fn invitation_id(mut self, input: impl Into<std::string::String>) -> Self { self.invitation_id = Some(input.into()); self } pub fn set_invitation_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.invitation_id = input; self } /// <p>The unique identifier of the network in which the member is created.</p> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p>Member configuration parameters.</p> pub fn member_configuration(mut self, input: crate::model::MemberConfiguration) -> Self { self.member_configuration = Some(input); self } pub fn set_member_configuration( mut self, input: std::option::Option<crate::model::MemberConfiguration>, ) -> Self { self.member_configuration = input; self } /// Consumes the builder and constructs a [`CreateMemberInput`](crate::input::CreateMemberInput) pub fn build( self, ) -> std::result::Result<crate::input::CreateMemberInput, smithy_http::operation::BuildError> { Ok(crate::input::CreateMemberInput { client_request_token: self.client_request_token, invitation_id: self.invitation_id, network_id: self.network_id, member_configuration: self.member_configuration, }) } } } #[doc(hidden)] pub type CreateMemberInputOperationOutputAlias = crate::operation::CreateMember; #[doc(hidden)] pub type CreateMemberInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl CreateMemberInput { /// Consumes the builder and constructs an Operation<[`CreateMember`](crate::operation::CreateMember)> #[allow(clippy::let_and_return)] pub fn make_operation( mut self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::CreateMember, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ if self.client_request_token.is_none() { self.client_request_token = Some(_config.make_token.make_idempotency_token()); } let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = crate::operation_ser::serialize_operation_crate_operation_create_member(&self) .map_err(|err| { smithy_http::operation::BuildError::SerializationError(err.into()) })?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::CreateMember::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "CreateMember", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_1 = &self.network_id; let input_1 = input_1 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_1, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/members", NetworkId = network_id ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; Ok(builder.method("POST").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`CreateMemberInput`](crate::input::CreateMemberInput) pub fn builder() -> crate::input::create_member_input::Builder { crate::input::create_member_input::Builder::default() } } /// See [`CreateNetworkInput`](crate::input::CreateNetworkInput) pub mod create_network_input { /// A builder for [`CreateNetworkInput`](crate::input::CreateNetworkInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) client_request_token: std::option::Option<std::string::String>, pub(crate) name: std::option::Option<std::string::String>, pub(crate) description: std::option::Option<std::string::String>, pub(crate) framework: std::option::Option<crate::model::Framework>, pub(crate) framework_version: std::option::Option<std::string::String>, pub(crate) framework_configuration: std::option::Option<crate::model::NetworkFrameworkConfiguration>, pub(crate) voting_policy: std::option::Option<crate::model::VotingPolicy>, pub(crate) member_configuration: std::option::Option<crate::model::MemberConfiguration>, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, } impl Builder { /// <p>A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time. This identifier is required only if you make a service request directly using an HTTP client. It is generated automatically if you use an AWS SDK or the AWS CLI.</p> pub fn client_request_token(mut self, input: impl Into<std::string::String>) -> Self { self.client_request_token = Some(input.into()); self } pub fn set_client_request_token( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.client_request_token = input; self } /// <p>The name of the network.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// <p>An optional description for the network.</p> pub fn description(mut self, input: impl Into<std::string::String>) -> Self { self.description = Some(input.into()); self } pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.description = input; self } /// <p>The blockchain framework that the network uses.</p> pub fn framework(mut self, input: crate::model::Framework) -> Self { self.framework = Some(input); self } pub fn set_framework( mut self, input: std::option::Option<crate::model::Framework>, ) -> Self { self.framework = input; self } /// <p>The version of the blockchain framework that the network uses.</p> pub fn framework_version(mut self, input: impl Into<std::string::String>) -> Self { self.framework_version = Some(input.into()); self } pub fn set_framework_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.framework_version = input; self } /// <p> /// Configuration properties of the blockchain framework relevant to the network configuration. /// </p> pub fn framework_configuration( mut self, input: crate::model::NetworkFrameworkConfiguration, ) -> Self { self.framework_configuration = Some(input); self } pub fn set_framework_configuration( mut self, input: std::option::Option<crate::model::NetworkFrameworkConfiguration>, ) -> Self { self.framework_configuration = input; self } /// <p> /// The voting rules used by the network to determine if a proposal is approved. /// </p> pub fn voting_policy(mut self, input: crate::model::VotingPolicy) -> Self { self.voting_policy = Some(input); self } pub fn set_voting_policy( mut self, input: std::option::Option<crate::model::VotingPolicy>, ) -> Self { self.voting_policy = input; self } /// <p>Configuration properties for the first member within the network.</p> pub fn member_configuration(mut self, input: crate::model::MemberConfiguration) -> Self { self.member_configuration = Some(input); self } pub fn set_member_configuration( mut self, input: std::option::Option<crate::model::MemberConfiguration>, ) -> Self { self.member_configuration = input; self } pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// Consumes the builder and constructs a [`CreateNetworkInput`](crate::input::CreateNetworkInput) pub fn build( self, ) -> std::result::Result<crate::input::CreateNetworkInput, smithy_http::operation::BuildError> { Ok(crate::input::CreateNetworkInput { client_request_token: self.client_request_token, name: self.name, description: self.description, framework: self.framework, framework_version: self.framework_version, framework_configuration: self.framework_configuration, voting_policy: self.voting_policy, member_configuration: self.member_configuration, tags: self.tags, }) } } } #[doc(hidden)] pub type CreateNetworkInputOperationOutputAlias = crate::operation::CreateNetwork; #[doc(hidden)] pub type CreateNetworkInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl CreateNetworkInput { /// Consumes the builder and constructs an Operation<[`CreateNetwork`](crate::operation::CreateNetwork)> #[allow(clippy::let_and_return)] pub fn make_operation( mut self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::CreateNetwork, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ if self.client_request_token.is_none() { self.client_request_token = Some(_config.make_token.make_idempotency_token()); } let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = crate::operation_ser::serialize_operation_crate_operation_create_network(&self) .map_err(|err| { smithy_http::operation::BuildError::SerializationError(err.into()) })?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::CreateNetwork::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "CreateNetwork", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { write!(output, "/networks").expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; Ok(builder.method("POST").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`CreateNetworkInput`](crate::input::CreateNetworkInput) pub fn builder() -> crate::input::create_network_input::Builder { crate::input::create_network_input::Builder::default() } } /// See [`CreateNodeInput`](crate::input::CreateNodeInput) pub mod create_node_input { /// A builder for [`CreateNodeInput`](crate::input::CreateNodeInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) client_request_token: std::option::Option<std::string::String>, pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) member_id: std::option::Option<std::string::String>, pub(crate) node_configuration: std::option::Option<crate::model::NodeConfiguration>, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, } impl Builder { /// <p>A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time. This identifier is required only if you make a service request directly using an HTTP client. It is generated automatically if you use an AWS SDK or the AWS CLI.</p> pub fn client_request_token(mut self, input: impl Into<std::string::String>) -> Self { self.client_request_token = Some(input.into()); self } pub fn set_client_request_token( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.client_request_token = input; self } /// <p>The unique identifier of the network for the node.</p> /// <p>Ethereum public networks have the following <code>NetworkId</code>s:</p> /// <ul> /// <li> /// <p> /// <code>n-ethereum-mainnet</code> /// </p> /// </li> /// <li> /// <p> /// <code>n-ethereum-rinkeby</code> /// </p> /// </li> /// <li> /// <p> /// <code>n-ethereum-ropsten</code> /// </p> /// </li> /// </ul> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p>The unique identifier of the member that owns this node.</p> /// <p>Applies only to Hyperledger Fabric.</p> pub fn member_id(mut self, input: impl Into<std::string::String>) -> Self { self.member_id = Some(input.into()); self } pub fn set_member_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.member_id = input; self } /// <p>The properties of a node configuration.</p> pub fn node_configuration(mut self, input: crate::model::NodeConfiguration) -> Self { self.node_configuration = Some(input); self } pub fn set_node_configuration( mut self, input: std::option::Option<crate::model::NodeConfiguration>, ) -> Self { self.node_configuration = input; self } pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// Consumes the builder and constructs a [`CreateNodeInput`](crate::input::CreateNodeInput) pub fn build( self, ) -> std::result::Result<crate::input::CreateNodeInput, smithy_http::operation::BuildError> { Ok(crate::input::CreateNodeInput { client_request_token: self.client_request_token, network_id: self.network_id, member_id: self.member_id, node_configuration: self.node_configuration, tags: self.tags, }) } } } #[doc(hidden)] pub type CreateNodeInputOperationOutputAlias = crate::operation::CreateNode; #[doc(hidden)] pub type CreateNodeInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl CreateNodeInput { /// Consumes the builder and constructs an Operation<[`CreateNode`](crate::operation::CreateNode)> #[allow(clippy::let_and_return)] pub fn make_operation( mut self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::CreateNode, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ if self.client_request_token.is_none() { self.client_request_token = Some(_config.make_token.make_idempotency_token()); } let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = crate::operation_ser::serialize_operation_crate_operation_create_node(&self) .map_err(|err| { smithy_http::operation::BuildError::SerializationError(err.into()) })?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::CreateNode::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "CreateNode", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_2 = &self.network_id; let input_2 = input_2 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_2, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/nodes", NetworkId = network_id ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; Ok(builder.method("POST").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`CreateNodeInput`](crate::input::CreateNodeInput) pub fn builder() -> crate::input::create_node_input::Builder { crate::input::create_node_input::Builder::default() } } /// See [`CreateProposalInput`](crate::input::CreateProposalInput) pub mod create_proposal_input { /// A builder for [`CreateProposalInput`](crate::input::CreateProposalInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) client_request_token: std::option::Option<std::string::String>, pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) member_id: std::option::Option<std::string::String>, pub(crate) actions: std::option::Option<crate::model::ProposalActions>, pub(crate) description: std::option::Option<std::string::String>, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, } impl Builder { /// <p>A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time. This identifier is required only if you make a service request directly using an HTTP client. It is generated automatically if you use an AWS SDK or the AWS CLI.</p> pub fn client_request_token(mut self, input: impl Into<std::string::String>) -> Self { self.client_request_token = Some(input.into()); self } pub fn set_client_request_token( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.client_request_token = input; self } /// <p> /// The unique identifier of the network for which the proposal is made.</p> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p>The unique identifier of the member that is creating the proposal. This identifier is especially useful for identifying the member making the proposal when multiple members exist in a single AWS account.</p> pub fn member_id(mut self, input: impl Into<std::string::String>) -> Self { self.member_id = Some(input.into()); self } pub fn set_member_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.member_id = input; self } /// <p>The type of actions proposed, such as inviting a member or removing a member. The types of <code>Actions</code> in a proposal are mutually exclusive. For example, a proposal with <code>Invitations</code> actions cannot also contain <code>Removals</code> actions.</p> pub fn actions(mut self, input: crate::model::ProposalActions) -> Self { self.actions = Some(input); self } pub fn set_actions( mut self, input: std::option::Option<crate::model::ProposalActions>, ) -> Self { self.actions = input; self } /// <p>A description for the proposal that is visible to voting members, for example, "Proposal to add Example Corp. as member."</p> pub fn description(mut self, input: impl Into<std::string::String>) -> Self { self.description = Some(input.into()); self } pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.description = input; self } pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// Consumes the builder and constructs a [`CreateProposalInput`](crate::input::CreateProposalInput) pub fn build( self, ) -> std::result::Result< crate::input::CreateProposalInput, smithy_http::operation::BuildError, > { Ok(crate::input::CreateProposalInput { client_request_token: self.client_request_token, network_id: self.network_id, member_id: self.member_id, actions: self.actions, description: self.description, tags: self.tags, }) } } } #[doc(hidden)] pub type CreateProposalInputOperationOutputAlias = crate::operation::CreateProposal; #[doc(hidden)] pub type CreateProposalInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl CreateProposalInput { /// Consumes the builder and constructs an Operation<[`CreateProposal`](crate::operation::CreateProposal)> #[allow(clippy::let_and_return)] pub fn make_operation( mut self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::CreateProposal, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ if self.client_request_token.is_none() { self.client_request_token = Some(_config.make_token.make_idempotency_token()); } let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = crate::operation_ser::serialize_operation_crate_operation_create_proposal(&self) .map_err(|err| { smithy_http::operation::BuildError::SerializationError(err.into()) })?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::CreateProposal::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "CreateProposal", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_3 = &self.network_id; let input_3 = input_3 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_3, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/proposals", NetworkId = network_id ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; Ok(builder.method("POST").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`CreateProposalInput`](crate::input::CreateProposalInput) pub fn builder() -> crate::input::create_proposal_input::Builder { crate::input::create_proposal_input::Builder::default() } } /// See [`DeleteMemberInput`](crate::input::DeleteMemberInput) pub mod delete_member_input { /// A builder for [`DeleteMemberInput`](crate::input::DeleteMemberInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) member_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique identifier of the network from which the member is removed.</p> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p>The unique identifier of the member to remove.</p> pub fn member_id(mut self, input: impl Into<std::string::String>) -> Self { self.member_id = Some(input.into()); self } pub fn set_member_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.member_id = input; self } /// Consumes the builder and constructs a [`DeleteMemberInput`](crate::input::DeleteMemberInput) pub fn build( self, ) -> std::result::Result<crate::input::DeleteMemberInput, smithy_http::operation::BuildError> { Ok(crate::input::DeleteMemberInput { network_id: self.network_id, member_id: self.member_id, }) } } } #[doc(hidden)] pub type DeleteMemberInputOperationOutputAlias = crate::operation::DeleteMember; #[doc(hidden)] pub type DeleteMemberInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl DeleteMemberInput { /// Consumes the builder and constructs an Operation<[`DeleteMember`](crate::operation::DeleteMember)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::DeleteMember, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::DeleteMember::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "DeleteMember", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_4 = &self.network_id; let input_4 = input_4 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_4, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } let input_5 = &self.member_id; let input_5 = input_5 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "member_id", details: "cannot be empty or unset", })?; let member_id = smithy_http::label::fmt_string(input_5, false); if member_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "member_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/members/{MemberId}", NetworkId = network_id, MemberId = member_id ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; Ok(builder.method("DELETE").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`DeleteMemberInput`](crate::input::DeleteMemberInput) pub fn builder() -> crate::input::delete_member_input::Builder { crate::input::delete_member_input::Builder::default() } } /// See [`DeleteNodeInput`](crate::input::DeleteNodeInput) pub mod delete_node_input { /// A builder for [`DeleteNodeInput`](crate::input::DeleteNodeInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) member_id: std::option::Option<std::string::String>, pub(crate) node_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique identifier of the network that the node is on.</p> /// <p>Ethereum public networks have the following <code>NetworkId</code>s:</p> /// <ul> /// <li> /// <p> /// <code>n-ethereum-mainnet</code> /// </p> /// </li> /// <li> /// <p> /// <code>n-ethereum-rinkeby</code> /// </p> /// </li> /// <li> /// <p> /// <code>n-ethereum-ropsten</code> /// </p> /// </li> /// </ul> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p>The unique identifier of the member that owns this node.</p> /// <p>Applies only to Hyperledger Fabric and is required for Hyperledger Fabric.</p> pub fn member_id(mut self, input: impl Into<std::string::String>) -> Self { self.member_id = Some(input.into()); self } pub fn set_member_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.member_id = input; self } /// <p>The unique identifier of the node.</p> pub fn node_id(mut self, input: impl Into<std::string::String>) -> Self { self.node_id = Some(input.into()); self } pub fn set_node_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.node_id = input; self } /// Consumes the builder and constructs a [`DeleteNodeInput`](crate::input::DeleteNodeInput) pub fn build( self, ) -> std::result::Result<crate::input::DeleteNodeInput, smithy_http::operation::BuildError> { Ok(crate::input::DeleteNodeInput { network_id: self.network_id, member_id: self.member_id, node_id: self.node_id, }) } } } #[doc(hidden)] pub type DeleteNodeInputOperationOutputAlias = crate::operation::DeleteNode; #[doc(hidden)] pub type DeleteNodeInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl DeleteNodeInput { /// Consumes the builder and constructs an Operation<[`DeleteNode`](crate::operation::DeleteNode)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::DeleteNode, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::DeleteNode::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "DeleteNode", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_6 = &self.network_id; let input_6 = input_6 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_6, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } let input_7 = &self.node_id; let input_7 = input_7 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "node_id", details: "cannot be empty or unset", })?; let node_id = smithy_http::label::fmt_string(input_7, false); if node_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "node_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/nodes/{NodeId}", NetworkId = network_id, NodeId = node_id ) .expect("formatting should succeed"); Ok(()) } fn uri_query(&self, mut output: &mut String) { let mut query = smithy_http::query::Writer::new(&mut output); if let Some(inner_8) = &self.member_id { query.push_kv("memberId", &smithy_http::query::fmt_string(&inner_8)); } } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; self.uri_query(&mut uri); Ok(builder.method("DELETE").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`DeleteNodeInput`](crate::input::DeleteNodeInput) pub fn builder() -> crate::input::delete_node_input::Builder { crate::input::delete_node_input::Builder::default() } } /// See [`GetMemberInput`](crate::input::GetMemberInput) pub mod get_member_input { /// A builder for [`GetMemberInput`](crate::input::GetMemberInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) member_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique identifier of the network to which the member belongs.</p> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p>The unique identifier of the member.</p> pub fn member_id(mut self, input: impl Into<std::string::String>) -> Self { self.member_id = Some(input.into()); self } pub fn set_member_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.member_id = input; self } /// Consumes the builder and constructs a [`GetMemberInput`](crate::input::GetMemberInput) pub fn build( self, ) -> std::result::Result<crate::input::GetMemberInput, smithy_http::operation::BuildError> { Ok(crate::input::GetMemberInput { network_id: self.network_id, member_id: self.member_id, }) } } } #[doc(hidden)] pub type GetMemberInputOperationOutputAlias = crate::operation::GetMember; #[doc(hidden)] pub type GetMemberInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl GetMemberInput { /// Consumes the builder and constructs an Operation<[`GetMember`](crate::operation::GetMember)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::GetMember, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new(request, crate::operation::GetMember::new()) .with_metadata(smithy_http::operation::Metadata::new( "GetMember", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_9 = &self.network_id; let input_9 = input_9 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_9, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } let input_10 = &self.member_id; let input_10 = input_10 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "member_id", details: "cannot be empty or unset", })?; let member_id = smithy_http::label::fmt_string(input_10, false); if member_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "member_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/members/{MemberId}", NetworkId = network_id, MemberId = member_id ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`GetMemberInput`](crate::input::GetMemberInput) pub fn builder() -> crate::input::get_member_input::Builder { crate::input::get_member_input::Builder::default() } } /// See [`GetNetworkInput`](crate::input::GetNetworkInput) pub mod get_network_input { /// A builder for [`GetNetworkInput`](crate::input::GetNetworkInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) network_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique identifier of the network to get information about.</p> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// Consumes the builder and constructs a [`GetNetworkInput`](crate::input::GetNetworkInput) pub fn build( self, ) -> std::result::Result<crate::input::GetNetworkInput, smithy_http::operation::BuildError> { Ok(crate::input::GetNetworkInput { network_id: self.network_id, }) } } } #[doc(hidden)] pub type GetNetworkInputOperationOutputAlias = crate::operation::GetNetwork; #[doc(hidden)] pub type GetNetworkInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl GetNetworkInput { /// Consumes the builder and constructs an Operation<[`GetNetwork`](crate::operation::GetNetwork)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::GetNetwork, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::GetNetwork::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "GetNetwork", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_11 = &self.network_id; let input_11 = input_11 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_11, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } write!(output, "/networks/{NetworkId}", NetworkId = network_id) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`GetNetworkInput`](crate::input::GetNetworkInput) pub fn builder() -> crate::input::get_network_input::Builder { crate::input::get_network_input::Builder::default() } } /// See [`GetNodeInput`](crate::input::GetNodeInput) pub mod get_node_input { /// A builder for [`GetNodeInput`](crate::input::GetNodeInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) member_id: std::option::Option<std::string::String>, pub(crate) node_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique identifier of the network that the node is on.</p> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p>The unique identifier of the member that owns the node.</p> /// <p>Applies only to Hyperledger Fabric and is required for Hyperledger Fabric.</p> pub fn member_id(mut self, input: impl Into<std::string::String>) -> Self { self.member_id = Some(input.into()); self } pub fn set_member_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.member_id = input; self } /// <p>The unique identifier of the node.</p> pub fn node_id(mut self, input: impl Into<std::string::String>) -> Self { self.node_id = Some(input.into()); self } pub fn set_node_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.node_id = input; self } /// Consumes the builder and constructs a [`GetNodeInput`](crate::input::GetNodeInput) pub fn build( self, ) -> std::result::Result<crate::input::GetNodeInput, smithy_http::operation::BuildError> { Ok(crate::input::GetNodeInput { network_id: self.network_id, member_id: self.member_id, node_id: self.node_id, }) } } } #[doc(hidden)] pub type GetNodeInputOperationOutputAlias = crate::operation::GetNode; #[doc(hidden)] pub type GetNodeInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl GetNodeInput { /// Consumes the builder and constructs an Operation<[`GetNode`](crate::operation::GetNode)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation<crate::operation::GetNode, aws_http::AwsErrorRetryPolicy>, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new(request, crate::operation::GetNode::new()) .with_metadata(smithy_http::operation::Metadata::new( "GetNode", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_12 = &self.network_id; let input_12 = input_12 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_12, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } let input_13 = &self.node_id; let input_13 = input_13 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "node_id", details: "cannot be empty or unset", })?; let node_id = smithy_http::label::fmt_string(input_13, false); if node_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "node_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/nodes/{NodeId}", NetworkId = network_id, NodeId = node_id ) .expect("formatting should succeed"); Ok(()) } fn uri_query(&self, mut output: &mut String) { let mut query = smithy_http::query::Writer::new(&mut output); if let Some(inner_14) = &self.member_id { query.push_kv("memberId", &smithy_http::query::fmt_string(&inner_14)); } } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; self.uri_query(&mut uri); Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`GetNodeInput`](crate::input::GetNodeInput) pub fn builder() -> crate::input::get_node_input::Builder { crate::input::get_node_input::Builder::default() } } /// See [`GetProposalInput`](crate::input::GetProposalInput) pub mod get_proposal_input { /// A builder for [`GetProposalInput`](crate::input::GetProposalInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) proposal_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique identifier of the network for which the proposal is made.</p> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p>The unique identifier of the proposal.</p> pub fn proposal_id(mut self, input: impl Into<std::string::String>) -> Self { self.proposal_id = Some(input.into()); self } pub fn set_proposal_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.proposal_id = input; self } /// Consumes the builder and constructs a [`GetProposalInput`](crate::input::GetProposalInput) pub fn build( self, ) -> std::result::Result<crate::input::GetProposalInput, smithy_http::operation::BuildError> { Ok(crate::input::GetProposalInput { network_id: self.network_id, proposal_id: self.proposal_id, }) } } } #[doc(hidden)] pub type GetProposalInputOperationOutputAlias = crate::operation::GetProposal; #[doc(hidden)] pub type GetProposalInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl GetProposalInput { /// Consumes the builder and constructs an Operation<[`GetProposal`](crate::operation::GetProposal)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::GetProposal, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::GetProposal::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "GetProposal", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_15 = &self.network_id; let input_15 = input_15 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_15, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } let input_16 = &self.proposal_id; let input_16 = input_16 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "proposal_id", details: "cannot be empty or unset", })?; let proposal_id = smithy_http::label::fmt_string(input_16, false); if proposal_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "proposal_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/proposals/{ProposalId}", NetworkId = network_id, ProposalId = proposal_id ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`GetProposalInput`](crate::input::GetProposalInput) pub fn builder() -> crate::input::get_proposal_input::Builder { crate::input::get_proposal_input::Builder::default() } } /// See [`ListInvitationsInput`](crate::input::ListInvitationsInput) pub mod list_invitations_input { /// A builder for [`ListInvitationsInput`](crate::input::ListInvitationsInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) max_results: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// <p>The maximum number of invitations to return.</p> pub fn max_results(mut self, input: i32) -> Self { self.max_results = Some(input); self } pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.max_results = input; self } /// <p>The pagination token that indicates the next set of results to retrieve.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListInvitationsInput`](crate::input::ListInvitationsInput) pub fn build( self, ) -> std::result::Result< crate::input::ListInvitationsInput, smithy_http::operation::BuildError, > { Ok(crate::input::ListInvitationsInput { max_results: self.max_results, next_token: self.next_token, }) } } } #[doc(hidden)] pub type ListInvitationsInputOperationOutputAlias = crate::operation::ListInvitations; #[doc(hidden)] pub type ListInvitationsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListInvitationsInput { /// Consumes the builder and constructs an Operation<[`ListInvitations`](crate::operation::ListInvitations)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::ListInvitations, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::ListInvitations::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "ListInvitations", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { write!(output, "/invitations").expect("formatting should succeed"); Ok(()) } fn uri_query(&self, mut output: &mut String) { let mut query = smithy_http::query::Writer::new(&mut output); if let Some(inner_17) = &self.max_results { query.push_kv( "maxResults", &smithy_types::primitive::Encoder::from(*inner_17).encode(), ); } if let Some(inner_18) = &self.next_token { query.push_kv("nextToken", &smithy_http::query::fmt_string(&inner_18)); } } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; self.uri_query(&mut uri); Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListInvitationsInput`](crate::input::ListInvitationsInput) pub fn builder() -> crate::input::list_invitations_input::Builder { crate::input::list_invitations_input::Builder::default() } } /// See [`ListMembersInput`](crate::input::ListMembersInput) pub mod list_members_input { /// A builder for [`ListMembersInput`](crate::input::ListMembersInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) name: std::option::Option<std::string::String>, pub(crate) status: std::option::Option<crate::model::MemberStatus>, pub(crate) is_owned: std::option::Option<bool>, pub(crate) max_results: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique identifier of the network for which to list members.</p> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p>The optional name of the member to list.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// <p>An optional status specifier. If provided, only members currently in this status are listed.</p> pub fn status(mut self, input: crate::model::MemberStatus) -> Self { self.status = Some(input); self } pub fn set_status( mut self, input: std::option::Option<crate::model::MemberStatus>, ) -> Self { self.status = input; self } /// <p>An optional Boolean value. If provided, the request is limited either to /// members that the current AWS account owns (<code>true</code>) or that other AWS accounts /// own (<code>false</code>). If omitted, all members are listed.</p> pub fn is_owned(mut self, input: bool) -> Self { self.is_owned = Some(input); self } pub fn set_is_owned(mut self, input: std::option::Option<bool>) -> Self { self.is_owned = input; self } /// <p>The maximum number of members to return in the request.</p> pub fn max_results(mut self, input: i32) -> Self { self.max_results = Some(input); self } pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.max_results = input; self } /// <p>The pagination token that indicates the next set of results to retrieve.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListMembersInput`](crate::input::ListMembersInput) pub fn build( self, ) -> std::result::Result<crate::input::ListMembersInput, smithy_http::operation::BuildError> { Ok(crate::input::ListMembersInput { network_id: self.network_id, name: self.name, status: self.status, is_owned: self.is_owned, max_results: self.max_results, next_token: self.next_token, }) } } } #[doc(hidden)] pub type ListMembersInputOperationOutputAlias = crate::operation::ListMembers; #[doc(hidden)] pub type ListMembersInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListMembersInput { /// Consumes the builder and constructs an Operation<[`ListMembers`](crate::operation::ListMembers)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::ListMembers, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::ListMembers::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "ListMembers", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_19 = &self.network_id; let input_19 = input_19 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_19, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/members", NetworkId = network_id ) .expect("formatting should succeed"); Ok(()) } fn uri_query(&self, mut output: &mut String) { let mut query = smithy_http::query::Writer::new(&mut output); if let Some(inner_20) = &self.name { query.push_kv("name", &smithy_http::query::fmt_string(&inner_20)); } if let Some(inner_21) = &self.status { query.push_kv("status", &smithy_http::query::fmt_string(&inner_21)); } if let Some(inner_22) = &self.is_owned { query.push_kv( "isOwned", &smithy_types::primitive::Encoder::from(*inner_22).encode(), ); } if let Some(inner_23) = &self.max_results { query.push_kv( "maxResults", &smithy_types::primitive::Encoder::from(*inner_23).encode(), ); } if let Some(inner_24) = &self.next_token { query.push_kv("nextToken", &smithy_http::query::fmt_string(&inner_24)); } } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; self.uri_query(&mut uri); Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListMembersInput`](crate::input::ListMembersInput) pub fn builder() -> crate::input::list_members_input::Builder { crate::input::list_members_input::Builder::default() } } /// See [`ListNetworksInput`](crate::input::ListNetworksInput) pub mod list_networks_input { /// A builder for [`ListNetworksInput`](crate::input::ListNetworksInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) name: std::option::Option<std::string::String>, pub(crate) framework: std::option::Option<crate::model::Framework>, pub(crate) status: std::option::Option<crate::model::NetworkStatus>, pub(crate) max_results: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// <p>The name of the network.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// <p>An optional framework specifier. If provided, only networks of this framework type are listed.</p> pub fn framework(mut self, input: crate::model::Framework) -> Self { self.framework = Some(input); self } pub fn set_framework( mut self, input: std::option::Option<crate::model::Framework>, ) -> Self { self.framework = input; self } /// <p>An optional status specifier. If provided, only networks currently in this status are listed.</p> /// <p>Applies only to Hyperledger Fabric.</p> pub fn status(mut self, input: crate::model::NetworkStatus) -> Self { self.status = Some(input); self } pub fn set_status( mut self, input: std::option::Option<crate::model::NetworkStatus>, ) -> Self { self.status = input; self } /// <p>The maximum number of networks to list.</p> pub fn max_results(mut self, input: i32) -> Self { self.max_results = Some(input); self } pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.max_results = input; self } /// <p>The pagination token that indicates the next set of results to retrieve.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListNetworksInput`](crate::input::ListNetworksInput) pub fn build( self, ) -> std::result::Result<crate::input::ListNetworksInput, smithy_http::operation::BuildError> { Ok(crate::input::ListNetworksInput { name: self.name, framework: self.framework, status: self.status, max_results: self.max_results, next_token: self.next_token, }) } } } #[doc(hidden)] pub type ListNetworksInputOperationOutputAlias = crate::operation::ListNetworks; #[doc(hidden)] pub type ListNetworksInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListNetworksInput { /// Consumes the builder and constructs an Operation<[`ListNetworks`](crate::operation::ListNetworks)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::ListNetworks, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::ListNetworks::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "ListNetworks", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { write!(output, "/networks").expect("formatting should succeed"); Ok(()) } fn uri_query(&self, mut output: &mut String) { let mut query = smithy_http::query::Writer::new(&mut output); if let Some(inner_25) = &self.name { query.push_kv("name", &smithy_http::query::fmt_string(&inner_25)); } if let Some(inner_26) = &self.framework { query.push_kv("framework", &smithy_http::query::fmt_string(&inner_26)); } if let Some(inner_27) = &self.status { query.push_kv("status", &smithy_http::query::fmt_string(&inner_27)); } if let Some(inner_28) = &self.max_results { query.push_kv( "maxResults", &smithy_types::primitive::Encoder::from(*inner_28).encode(), ); } if let Some(inner_29) = &self.next_token { query.push_kv("nextToken", &smithy_http::query::fmt_string(&inner_29)); } } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; self.uri_query(&mut uri); Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListNetworksInput`](crate::input::ListNetworksInput) pub fn builder() -> crate::input::list_networks_input::Builder { crate::input::list_networks_input::Builder::default() } } /// See [`ListNodesInput`](crate::input::ListNodesInput) pub mod list_nodes_input { /// A builder for [`ListNodesInput`](crate::input::ListNodesInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) member_id: std::option::Option<std::string::String>, pub(crate) status: std::option::Option<crate::model::NodeStatus>, pub(crate) max_results: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique identifier of the network for which to list nodes.</p> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p>The unique identifier of the member who owns the nodes to list.</p> /// <p>Applies only to Hyperledger Fabric and is required for Hyperledger Fabric.</p> pub fn member_id(mut self, input: impl Into<std::string::String>) -> Self { self.member_id = Some(input.into()); self } pub fn set_member_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.member_id = input; self } /// <p>An optional status specifier. If provided, only nodes currently in this status are listed.</p> pub fn status(mut self, input: crate::model::NodeStatus) -> Self { self.status = Some(input); self } pub fn set_status(mut self, input: std::option::Option<crate::model::NodeStatus>) -> Self { self.status = input; self } /// <p>The maximum number of nodes to list.</p> pub fn max_results(mut self, input: i32) -> Self { self.max_results = Some(input); self } pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.max_results = input; self } /// <p>The pagination token that indicates the next set of results to retrieve.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListNodesInput`](crate::input::ListNodesInput) pub fn build( self, ) -> std::result::Result<crate::input::ListNodesInput, smithy_http::operation::BuildError> { Ok(crate::input::ListNodesInput { network_id: self.network_id, member_id: self.member_id, status: self.status, max_results: self.max_results, next_token: self.next_token, }) } } } #[doc(hidden)] pub type ListNodesInputOperationOutputAlias = crate::operation::ListNodes; #[doc(hidden)] pub type ListNodesInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListNodesInput { /// Consumes the builder and constructs an Operation<[`ListNodes`](crate::operation::ListNodes)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::ListNodes, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new(request, crate::operation::ListNodes::new()) .with_metadata(smithy_http::operation::Metadata::new( "ListNodes", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_30 = &self.network_id; let input_30 = input_30 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_30, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/nodes", NetworkId = network_id ) .expect("formatting should succeed"); Ok(()) } fn uri_query(&self, mut output: &mut String) { let mut query = smithy_http::query::Writer::new(&mut output); if let Some(inner_31) = &self.member_id { query.push_kv("memberId", &smithy_http::query::fmt_string(&inner_31)); } if let Some(inner_32) = &self.status { query.push_kv("status", &smithy_http::query::fmt_string(&inner_32)); } if let Some(inner_33) = &self.max_results { query.push_kv( "maxResults", &smithy_types::primitive::Encoder::from(*inner_33).encode(), ); } if let Some(inner_34) = &self.next_token { query.push_kv("nextToken", &smithy_http::query::fmt_string(&inner_34)); } } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; self.uri_query(&mut uri); Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListNodesInput`](crate::input::ListNodesInput) pub fn builder() -> crate::input::list_nodes_input::Builder { crate::input::list_nodes_input::Builder::default() } } /// See [`ListProposalsInput`](crate::input::ListProposalsInput) pub mod list_proposals_input { /// A builder for [`ListProposalsInput`](crate::input::ListProposalsInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) max_results: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// <p> /// The unique identifier of the network. /// </p> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p> /// The maximum number of proposals to return. /// </p> pub fn max_results(mut self, input: i32) -> Self { self.max_results = Some(input); self } pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.max_results = input; self } /// <p> /// The pagination token that indicates the next set of results to retrieve. /// </p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListProposalsInput`](crate::input::ListProposalsInput) pub fn build( self, ) -> std::result::Result<crate::input::ListProposalsInput, smithy_http::operation::BuildError> { Ok(crate::input::ListProposalsInput { network_id: self.network_id, max_results: self.max_results, next_token: self.next_token, }) } } } #[doc(hidden)] pub type ListProposalsInputOperationOutputAlias = crate::operation::ListProposals; #[doc(hidden)] pub type ListProposalsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListProposalsInput { /// Consumes the builder and constructs an Operation<[`ListProposals`](crate::operation::ListProposals)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::ListProposals, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::ListProposals::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "ListProposals", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_35 = &self.network_id; let input_35 = input_35 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_35, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/proposals", NetworkId = network_id ) .expect("formatting should succeed"); Ok(()) } fn uri_query(&self, mut output: &mut String) { let mut query = smithy_http::query::Writer::new(&mut output); if let Some(inner_36) = &self.max_results { query.push_kv( "maxResults", &smithy_types::primitive::Encoder::from(*inner_36).encode(), ); } if let Some(inner_37) = &self.next_token { query.push_kv("nextToken", &smithy_http::query::fmt_string(&inner_37)); } } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; self.uri_query(&mut uri); Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListProposalsInput`](crate::input::ListProposalsInput) pub fn builder() -> crate::input::list_proposals_input::Builder { crate::input::list_proposals_input::Builder::default() } } /// See [`ListProposalVotesInput`](crate::input::ListProposalVotesInput) pub mod list_proposal_votes_input { /// A builder for [`ListProposalVotesInput`](crate::input::ListProposalVotesInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) proposal_id: std::option::Option<std::string::String>, pub(crate) max_results: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// <p> /// The unique identifier of the network. /// </p> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p> /// The unique identifier of the proposal. /// </p> pub fn proposal_id(mut self, input: impl Into<std::string::String>) -> Self { self.proposal_id = Some(input.into()); self } pub fn set_proposal_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.proposal_id = input; self } /// <p> /// The maximum number of votes to return. /// </p> pub fn max_results(mut self, input: i32) -> Self { self.max_results = Some(input); self } pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.max_results = input; self } /// <p> /// The pagination token that indicates the next set of results to retrieve. /// </p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListProposalVotesInput`](crate::input::ListProposalVotesInput) pub fn build( self, ) -> std::result::Result< crate::input::ListProposalVotesInput, smithy_http::operation::BuildError, > { Ok(crate::input::ListProposalVotesInput { network_id: self.network_id, proposal_id: self.proposal_id, max_results: self.max_results, next_token: self.next_token, }) } } } #[doc(hidden)] pub type ListProposalVotesInputOperationOutputAlias = crate::operation::ListProposalVotes; #[doc(hidden)] pub type ListProposalVotesInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListProposalVotesInput { /// Consumes the builder and constructs an Operation<[`ListProposalVotes`](crate::operation::ListProposalVotes)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::ListProposalVotes, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::ListProposalVotes::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "ListProposalVotes", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_38 = &self.network_id; let input_38 = input_38 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_38, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } let input_39 = &self.proposal_id; let input_39 = input_39 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "proposal_id", details: "cannot be empty or unset", })?; let proposal_id = smithy_http::label::fmt_string(input_39, false); if proposal_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "proposal_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/proposals/{ProposalId}/votes", NetworkId = network_id, ProposalId = proposal_id ) .expect("formatting should succeed"); Ok(()) } fn uri_query(&self, mut output: &mut String) { let mut query = smithy_http::query::Writer::new(&mut output); if let Some(inner_40) = &self.max_results { query.push_kv( "maxResults", &smithy_types::primitive::Encoder::from(*inner_40).encode(), ); } if let Some(inner_41) = &self.next_token { query.push_kv("nextToken", &smithy_http::query::fmt_string(&inner_41)); } } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; self.uri_query(&mut uri); Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListProposalVotesInput`](crate::input::ListProposalVotesInput) pub fn builder() -> crate::input::list_proposal_votes_input::Builder { crate::input::list_proposal_votes_input::Builder::default() } } /// See [`ListTagsForResourceInput`](crate::input::ListTagsForResourceInput) pub mod list_tags_for_resource_input { /// A builder for [`ListTagsForResourceInput`](crate::input::ListTagsForResourceInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) resource_arn: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) of the resource. For more information about ARNs and their format, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>AWS General Reference</i>.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.resource_arn = Some(input.into()); self } pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.resource_arn = input; self } /// Consumes the builder and constructs a [`ListTagsForResourceInput`](crate::input::ListTagsForResourceInput) pub fn build( self, ) -> std::result::Result< crate::input::ListTagsForResourceInput, smithy_http::operation::BuildError, > { Ok(crate::input::ListTagsForResourceInput { resource_arn: self.resource_arn, }) } } } #[doc(hidden)] pub type ListTagsForResourceInputOperationOutputAlias = crate::operation::ListTagsForResource; #[doc(hidden)] pub type ListTagsForResourceInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListTagsForResourceInput { /// Consumes the builder and constructs an Operation<[`ListTagsForResource`](crate::operation::ListTagsForResource)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::ListTagsForResource, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::ListTagsForResource::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "ListTagsForResource", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_42 = &self.resource_arn; let input_42 = input_42 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "resource_arn", details: "cannot be empty or unset", })?; let resource_arn = smithy_http::label::fmt_string(input_42, false); if resource_arn.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "resource_arn", details: "cannot be empty or unset", }); } write!(output, "/tags/{ResourceArn}", ResourceArn = resource_arn) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListTagsForResourceInput`](crate::input::ListTagsForResourceInput) pub fn builder() -> crate::input::list_tags_for_resource_input::Builder { crate::input::list_tags_for_resource_input::Builder::default() } } /// See [`RejectInvitationInput`](crate::input::RejectInvitationInput) pub mod reject_invitation_input { /// A builder for [`RejectInvitationInput`](crate::input::RejectInvitationInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) invitation_id: std::option::Option<std::string::String>, } impl Builder { /// <p>The unique identifier of the invitation to reject.</p> pub fn invitation_id(mut self, input: impl Into<std::string::String>) -> Self { self.invitation_id = Some(input.into()); self } pub fn set_invitation_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.invitation_id = input; self } /// Consumes the builder and constructs a [`RejectInvitationInput`](crate::input::RejectInvitationInput) pub fn build( self, ) -> std::result::Result< crate::input::RejectInvitationInput, smithy_http::operation::BuildError, > { Ok(crate::input::RejectInvitationInput { invitation_id: self.invitation_id, }) } } } #[doc(hidden)] pub type RejectInvitationInputOperationOutputAlias = crate::operation::RejectInvitation; #[doc(hidden)] pub type RejectInvitationInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl RejectInvitationInput { /// Consumes the builder and constructs an Operation<[`RejectInvitation`](crate::operation::RejectInvitation)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::RejectInvitation, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::RejectInvitation::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "RejectInvitation", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_43 = &self.invitation_id; let input_43 = input_43 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "invitation_id", details: "cannot be empty or unset", })?; let invitation_id = smithy_http::label::fmt_string(input_43, false); if invitation_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "invitation_id", details: "cannot be empty or unset", }); } write!( output, "/invitations/{InvitationId}", InvitationId = invitation_id ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; Ok(builder.method("DELETE").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`RejectInvitationInput`](crate::input::RejectInvitationInput) pub fn builder() -> crate::input::reject_invitation_input::Builder { crate::input::reject_invitation_input::Builder::default() } } /// See [`TagResourceInput`](crate::input::TagResourceInput) pub mod tag_resource_input { /// A builder for [`TagResourceInput`](crate::input::TagResourceInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) resource_arn: std::option::Option<std::string::String>, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, } impl Builder { /// <p>The Amazon Resource Name (ARN) of the resource. For more information about ARNs and their format, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>AWS General Reference</i>.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.resource_arn = Some(input.into()); self } pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.resource_arn = input; self } pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// Consumes the builder and constructs a [`TagResourceInput`](crate::input::TagResourceInput) pub fn build( self, ) -> std::result::Result<crate::input::TagResourceInput, smithy_http::operation::BuildError> { Ok(crate::input::TagResourceInput { resource_arn: self.resource_arn, tags: self.tags, }) } } } #[doc(hidden)] pub type TagResourceInputOperationOutputAlias = crate::operation::TagResource; #[doc(hidden)] pub type TagResourceInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl TagResourceInput { /// Consumes the builder and constructs an Operation<[`TagResource`](crate::operation::TagResource)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::TagResource, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = crate::operation_ser::serialize_operation_crate_operation_tag_resource(&self) .map_err(|err| { smithy_http::operation::BuildError::SerializationError(err.into()) })?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::TagResource::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "TagResource", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_44 = &self.resource_arn; let input_44 = input_44 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "resource_arn", details: "cannot be empty or unset", })?; let resource_arn = smithy_http::label::fmt_string(input_44, false); if resource_arn.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "resource_arn", details: "cannot be empty or unset", }); } write!(output, "/tags/{ResourceArn}", ResourceArn = resource_arn) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; Ok(builder.method("POST").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`TagResourceInput`](crate::input::TagResourceInput) pub fn builder() -> crate::input::tag_resource_input::Builder { crate::input::tag_resource_input::Builder::default() } } /// See [`UntagResourceInput`](crate::input::UntagResourceInput) pub mod untag_resource_input { /// A builder for [`UntagResourceInput`](crate::input::UntagResourceInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) resource_arn: std::option::Option<std::string::String>, pub(crate) tag_keys: std::option::Option<std::vec::Vec<std::string::String>>, } impl Builder { /// <p>The Amazon Resource Name (ARN) of the resource. For more information about ARNs and their format, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>AWS General Reference</i>.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.resource_arn = Some(input.into()); self } pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.resource_arn = input; self } pub fn tag_keys(mut self, input: impl Into<std::string::String>) -> Self { let mut v = self.tag_keys.unwrap_or_default(); v.push(input.into()); self.tag_keys = Some(v); self } pub fn set_tag_keys( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.tag_keys = input; self } /// Consumes the builder and constructs a [`UntagResourceInput`](crate::input::UntagResourceInput) pub fn build( self, ) -> std::result::Result<crate::input::UntagResourceInput, smithy_http::operation::BuildError> { Ok(crate::input::UntagResourceInput { resource_arn: self.resource_arn, tag_keys: self.tag_keys, }) } } } #[doc(hidden)] pub type UntagResourceInputOperationOutputAlias = crate::operation::UntagResource; #[doc(hidden)] pub type UntagResourceInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl UntagResourceInput { /// Consumes the builder and constructs an Operation<[`UntagResource`](crate::operation::UntagResource)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::UntagResource, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::UntagResource::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "UntagResource", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_45 = &self.resource_arn; let input_45 = input_45 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "resource_arn", details: "cannot be empty or unset", })?; let resource_arn = smithy_http::label::fmt_string(input_45, false); if resource_arn.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "resource_arn", details: "cannot be empty or unset", }); } write!(output, "/tags/{ResourceArn}", ResourceArn = resource_arn) .expect("formatting should succeed"); Ok(()) } fn uri_query(&self, mut output: &mut String) { let mut query = smithy_http::query::Writer::new(&mut output); if let Some(inner_46) = &self.tag_keys { for inner_47 in inner_46 { query.push_kv("tagKeys", &smithy_http::query::fmt_string(&inner_47)); } } } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; self.uri_query(&mut uri); Ok(builder.method("DELETE").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`UntagResourceInput`](crate::input::UntagResourceInput) pub fn builder() -> crate::input::untag_resource_input::Builder { crate::input::untag_resource_input::Builder::default() } } /// See [`UpdateMemberInput`](crate::input::UpdateMemberInput) pub mod update_member_input { /// A builder for [`UpdateMemberInput`](crate::input::UpdateMemberInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) member_id: std::option::Option<std::string::String>, pub(crate) log_publishing_configuration: std::option::Option<crate::model::MemberLogPublishingConfiguration>, } impl Builder { /// <p>The unique identifier of the Managed Blockchain network to which the member belongs.</p> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p>The unique identifier of the member.</p> pub fn member_id(mut self, input: impl Into<std::string::String>) -> Self { self.member_id = Some(input.into()); self } pub fn set_member_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.member_id = input; self } /// <p>Configuration properties for publishing to Amazon CloudWatch Logs.</p> pub fn log_publishing_configuration( mut self, input: crate::model::MemberLogPublishingConfiguration, ) -> Self { self.log_publishing_configuration = Some(input); self } pub fn set_log_publishing_configuration( mut self, input: std::option::Option<crate::model::MemberLogPublishingConfiguration>, ) -> Self { self.log_publishing_configuration = input; self } /// Consumes the builder and constructs a [`UpdateMemberInput`](crate::input::UpdateMemberInput) pub fn build( self, ) -> std::result::Result<crate::input::UpdateMemberInput, smithy_http::operation::BuildError> { Ok(crate::input::UpdateMemberInput { network_id: self.network_id, member_id: self.member_id, log_publishing_configuration: self.log_publishing_configuration, }) } } } #[doc(hidden)] pub type UpdateMemberInputOperationOutputAlias = crate::operation::UpdateMember; #[doc(hidden)] pub type UpdateMemberInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl UpdateMemberInput { /// Consumes the builder and constructs an Operation<[`UpdateMember`](crate::operation::UpdateMember)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::UpdateMember, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = crate::operation_ser::serialize_operation_crate_operation_update_member(&self) .map_err(|err| { smithy_http::operation::BuildError::SerializationError(err.into()) })?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::UpdateMember::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "UpdateMember", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_48 = &self.network_id; let input_48 = input_48 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_48, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } let input_49 = &self.member_id; let input_49 = input_49 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "member_id", details: "cannot be empty or unset", })?; let member_id = smithy_http::label::fmt_string(input_49, false); if member_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "member_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/members/{MemberId}", NetworkId = network_id, MemberId = member_id ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; Ok(builder.method("PATCH").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`UpdateMemberInput`](crate::input::UpdateMemberInput) pub fn builder() -> crate::input::update_member_input::Builder { crate::input::update_member_input::Builder::default() } } /// See [`UpdateNodeInput`](crate::input::UpdateNodeInput) pub mod update_node_input { /// A builder for [`UpdateNodeInput`](crate::input::UpdateNodeInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) member_id: std::option::Option<std::string::String>, pub(crate) node_id: std::option::Option<std::string::String>, pub(crate) log_publishing_configuration: std::option::Option<crate::model::NodeLogPublishingConfiguration>, } impl Builder { /// <p>The unique identifier of the network that the node is on.</p> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p>The unique identifier of the member that owns the node.</p> /// <p>Applies only to Hyperledger Fabric.</p> pub fn member_id(mut self, input: impl Into<std::string::String>) -> Self { self.member_id = Some(input.into()); self } pub fn set_member_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.member_id = input; self } /// <p>The unique identifier of the node.</p> pub fn node_id(mut self, input: impl Into<std::string::String>) -> Self { self.node_id = Some(input.into()); self } pub fn set_node_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.node_id = input; self } /// <p>Configuration properties for publishing to Amazon CloudWatch Logs.</p> pub fn log_publishing_configuration( mut self, input: crate::model::NodeLogPublishingConfiguration, ) -> Self { self.log_publishing_configuration = Some(input); self } pub fn set_log_publishing_configuration( mut self, input: std::option::Option<crate::model::NodeLogPublishingConfiguration>, ) -> Self { self.log_publishing_configuration = input; self } /// Consumes the builder and constructs a [`UpdateNodeInput`](crate::input::UpdateNodeInput) pub fn build( self, ) -> std::result::Result<crate::input::UpdateNodeInput, smithy_http::operation::BuildError> { Ok(crate::input::UpdateNodeInput { network_id: self.network_id, member_id: self.member_id, node_id: self.node_id, log_publishing_configuration: self.log_publishing_configuration, }) } } } #[doc(hidden)] pub type UpdateNodeInputOperationOutputAlias = crate::operation::UpdateNode; #[doc(hidden)] pub type UpdateNodeInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl UpdateNodeInput { /// Consumes the builder and constructs an Operation<[`UpdateNode`](crate::operation::UpdateNode)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::UpdateNode, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = crate::operation_ser::serialize_operation_crate_operation_update_node(&self) .map_err(|err| { smithy_http::operation::BuildError::SerializationError(err.into()) })?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::UpdateNode::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "UpdateNode", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_50 = &self.network_id; let input_50 = input_50 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_50, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } let input_51 = &self.node_id; let input_51 = input_51 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "node_id", details: "cannot be empty or unset", })?; let node_id = smithy_http::label::fmt_string(input_51, false); if node_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "node_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/nodes/{NodeId}", NetworkId = network_id, NodeId = node_id ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; Ok(builder.method("PATCH").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`UpdateNodeInput`](crate::input::UpdateNodeInput) pub fn builder() -> crate::input::update_node_input::Builder { crate::input::update_node_input::Builder::default() } } /// See [`VoteOnProposalInput`](crate::input::VoteOnProposalInput) pub mod vote_on_proposal_input { /// A builder for [`VoteOnProposalInput`](crate::input::VoteOnProposalInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) network_id: std::option::Option<std::string::String>, pub(crate) proposal_id: std::option::Option<std::string::String>, pub(crate) voter_member_id: std::option::Option<std::string::String>, pub(crate) vote: std::option::Option<crate::model::VoteValue>, } impl Builder { /// <p> /// The unique identifier of the network. /// </p> pub fn network_id(mut self, input: impl Into<std::string::String>) -> Self { self.network_id = Some(input.into()); self } pub fn set_network_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.network_id = input; self } /// <p> /// The unique identifier of the proposal. /// </p> pub fn proposal_id(mut self, input: impl Into<std::string::String>) -> Self { self.proposal_id = Some(input.into()); self } pub fn set_proposal_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.proposal_id = input; self } /// <p>The unique identifier of the member casting the vote. /// </p> pub fn voter_member_id(mut self, input: impl Into<std::string::String>) -> Self { self.voter_member_id = Some(input.into()); self } pub fn set_voter_member_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.voter_member_id = input; self } /// <p> /// The value of the vote. /// </p> pub fn vote(mut self, input: crate::model::VoteValue) -> Self { self.vote = Some(input); self } pub fn set_vote(mut self, input: std::option::Option<crate::model::VoteValue>) -> Self { self.vote = input; self } /// Consumes the builder and constructs a [`VoteOnProposalInput`](crate::input::VoteOnProposalInput) pub fn build( self, ) -> std::result::Result< crate::input::VoteOnProposalInput, smithy_http::operation::BuildError, > { Ok(crate::input::VoteOnProposalInput { network_id: self.network_id, proposal_id: self.proposal_id, voter_member_id: self.voter_member_id, vote: self.vote, }) } } } #[doc(hidden)] pub type VoteOnProposalInputOperationOutputAlias = crate::operation::VoteOnProposal; #[doc(hidden)] pub type VoteOnProposalInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl VoteOnProposalInput { /// Consumes the builder and constructs an Operation<[`VoteOnProposal`](crate::operation::VoteOnProposal)> #[allow(clippy::let_and_return)] pub fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< smithy_http::operation::Operation< crate::operation::VoteOnProposal, aws_http::AwsErrorRetryPolicy, >, smithy_http::operation::BuildError, > { Ok({ let properties = smithy_http::property_bag::SharedPropertyBag::new(); let request = self.request_builder_base()?; let body = crate::operation_ser::serialize_operation_crate_operation_vote_on_proposal(&self) .map_err(|err| { smithy_http::operation::BuildError::SerializationError(err.into()) })?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = smithy_http::operation::Request::from_parts( request.map(smithy_http::body::SdkBody::from), properties, ); request.properties_mut().insert( aws_http::user_agent::AwsUserAgent::new_from_environment( crate::API_METADATA.clone(), ), ); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = smithy_http::operation::Operation::new( request, crate::operation::VoteOnProposal::new(), ) .with_metadata(smithy_http::operation::Metadata::new( "VoteOnProposal", "managedblockchain", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); op }) } fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> { let input_52 = &self.network_id; let input_52 = input_52 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", })?; let network_id = smithy_http::label::fmt_string(input_52, false); if network_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "network_id", details: "cannot be empty or unset", }); } let input_53 = &self.proposal_id; let input_53 = input_53 .as_ref() .ok_or(smithy_http::operation::BuildError::MissingField { field: "proposal_id", details: "cannot be empty or unset", })?; let proposal_id = smithy_http::label::fmt_string(input_53, false); if proposal_id.is_empty() { return Err(smithy_http::operation::BuildError::MissingField { field: "proposal_id", details: "cannot be empty or unset", }); } write!( output, "/networks/{NetworkId}/proposals/{ProposalId}/votes", NetworkId = network_id, ProposalId = proposal_id ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( &self, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut uri = String::new(); self.uri_base(&mut uri)?; Ok(builder.method("POST").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( &self, ) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> { let mut builder = self.update_http_builder(http::request::Builder::new())?; builder = smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } fn assemble( mut builder: http::request::Builder, body: smithy_http::body::SdkBody, ) -> http::request::Request<smithy_http::body::SdkBody> { if let Some(content_length) = body.content_length() { builder = smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`VoteOnProposalInput`](crate::input::VoteOnProposalInput) pub fn builder() -> crate::input::vote_on_proposal_input::Builder { crate::input::vote_on_proposal_input::Builder::default() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct VoteOnProposalInput { /// <p> /// The unique identifier of the network. /// </p> pub network_id: std::option::Option<std::string::String>, /// <p> /// The unique identifier of the proposal. /// </p> pub proposal_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the member casting the vote. /// </p> pub voter_member_id: std::option::Option<std::string::String>, /// <p> /// The value of the vote. /// </p> pub vote: std::option::Option<crate::model::VoteValue>, } impl std::fmt::Debug for VoteOnProposalInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("VoteOnProposalInput"); formatter.field("network_id", &self.network_id); formatter.field("proposal_id", &self.proposal_id); formatter.field("voter_member_id", &self.voter_member_id); formatter.field("vote", &self.vote); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateNodeInput { /// <p>The unique identifier of the network that the node is on.</p> pub network_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the member that owns the node.</p> /// <p>Applies only to Hyperledger Fabric.</p> pub member_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the node.</p> pub node_id: std::option::Option<std::string::String>, /// <p>Configuration properties for publishing to Amazon CloudWatch Logs.</p> pub log_publishing_configuration: std::option::Option<crate::model::NodeLogPublishingConfiguration>, } impl std::fmt::Debug for UpdateNodeInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateNodeInput"); formatter.field("network_id", &self.network_id); formatter.field("member_id", &self.member_id); formatter.field("node_id", &self.node_id); formatter.field( "log_publishing_configuration", &self.log_publishing_configuration, ); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateMemberInput { /// <p>The unique identifier of the Managed Blockchain network to which the member belongs.</p> pub network_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the member.</p> pub member_id: std::option::Option<std::string::String>, /// <p>Configuration properties for publishing to Amazon CloudWatch Logs.</p> pub log_publishing_configuration: std::option::Option<crate::model::MemberLogPublishingConfiguration>, } impl std::fmt::Debug for UpdateMemberInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateMemberInput"); formatter.field("network_id", &self.network_id); formatter.field("member_id", &self.member_id); formatter.field( "log_publishing_configuration", &self.log_publishing_configuration, ); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UntagResourceInput { /// <p>The Amazon Resource Name (ARN) of the resource. For more information about ARNs and their format, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>AWS General Reference</i>.</p> pub resource_arn: std::option::Option<std::string::String>, /// <p>The tag keys.</p> pub tag_keys: std::option::Option<std::vec::Vec<std::string::String>>, } impl std::fmt::Debug for UntagResourceInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UntagResourceInput"); formatter.field("resource_arn", &self.resource_arn); formatter.field("tag_keys", &self.tag_keys); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct TagResourceInput { /// <p>The Amazon Resource Name (ARN) of the resource. For more information about ARNs and their format, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>AWS General Reference</i>.</p> pub resource_arn: std::option::Option<std::string::String>, /// <p>The tags to assign to the specified resource. Tag values can be empty, for example, <code>"MyTagKey" : ""</code>. You can specify multiple key-value pairs in a single request, with an overall maximum of 50 tags added to each resource.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, } impl std::fmt::Debug for TagResourceInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("TagResourceInput"); formatter.field("resource_arn", &self.resource_arn); formatter.field("tags", &self.tags); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RejectInvitationInput { /// <p>The unique identifier of the invitation to reject.</p> pub invitation_id: std::option::Option<std::string::String>, } impl std::fmt::Debug for RejectInvitationInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RejectInvitationInput"); formatter.field("invitation_id", &self.invitation_id); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListTagsForResourceInput { /// <p>The Amazon Resource Name (ARN) of the resource. For more information about ARNs and their format, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html">Amazon Resource Names (ARNs)</a> in the <i>AWS General Reference</i>.</p> pub resource_arn: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListTagsForResourceInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListTagsForResourceInput"); formatter.field("resource_arn", &self.resource_arn); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListProposalVotesInput { /// <p> /// The unique identifier of the network. /// </p> pub network_id: std::option::Option<std::string::String>, /// <p> /// The unique identifier of the proposal. /// </p> pub proposal_id: std::option::Option<std::string::String>, /// <p> /// The maximum number of votes to return. /// </p> pub max_results: std::option::Option<i32>, /// <p> /// The pagination token that indicates the next set of results to retrieve. /// </p> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListProposalVotesInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListProposalVotesInput"); formatter.field("network_id", &self.network_id); formatter.field("proposal_id", &self.proposal_id); formatter.field("max_results", &self.max_results); formatter.field("next_token", &self.next_token); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListProposalsInput { /// <p> /// The unique identifier of the network. /// </p> pub network_id: std::option::Option<std::string::String>, /// <p> /// The maximum number of proposals to return. /// </p> pub max_results: std::option::Option<i32>, /// <p> /// The pagination token that indicates the next set of results to retrieve. /// </p> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListProposalsInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListProposalsInput"); formatter.field("network_id", &self.network_id); formatter.field("max_results", &self.max_results); formatter.field("next_token", &self.next_token); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListNodesInput { /// <p>The unique identifier of the network for which to list nodes.</p> pub network_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the member who owns the nodes to list.</p> /// <p>Applies only to Hyperledger Fabric and is required for Hyperledger Fabric.</p> pub member_id: std::option::Option<std::string::String>, /// <p>An optional status specifier. If provided, only nodes currently in this status are listed.</p> pub status: std::option::Option<crate::model::NodeStatus>, /// <p>The maximum number of nodes to list.</p> pub max_results: std::option::Option<i32>, /// <p>The pagination token that indicates the next set of results to retrieve.</p> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListNodesInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListNodesInput"); formatter.field("network_id", &self.network_id); formatter.field("member_id", &self.member_id); formatter.field("status", &self.status); formatter.field("max_results", &self.max_results); formatter.field("next_token", &self.next_token); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListNetworksInput { /// <p>The name of the network.</p> pub name: std::option::Option<std::string::String>, /// <p>An optional framework specifier. If provided, only networks of this framework type are listed.</p> pub framework: std::option::Option<crate::model::Framework>, /// <p>An optional status specifier. If provided, only networks currently in this status are listed.</p> /// <p>Applies only to Hyperledger Fabric.</p> pub status: std::option::Option<crate::model::NetworkStatus>, /// <p>The maximum number of networks to list.</p> pub max_results: std::option::Option<i32>, /// <p>The pagination token that indicates the next set of results to retrieve.</p> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListNetworksInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListNetworksInput"); formatter.field("name", &self.name); formatter.field("framework", &self.framework); formatter.field("status", &self.status); formatter.field("max_results", &self.max_results); formatter.field("next_token", &self.next_token); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListMembersInput { /// <p>The unique identifier of the network for which to list members.</p> pub network_id: std::option::Option<std::string::String>, /// <p>The optional name of the member to list.</p> pub name: std::option::Option<std::string::String>, /// <p>An optional status specifier. If provided, only members currently in this status are listed.</p> pub status: std::option::Option<crate::model::MemberStatus>, /// <p>An optional Boolean value. If provided, the request is limited either to /// members that the current AWS account owns (<code>true</code>) or that other AWS accounts /// own (<code>false</code>). If omitted, all members are listed.</p> pub is_owned: std::option::Option<bool>, /// <p>The maximum number of members to return in the request.</p> pub max_results: std::option::Option<i32>, /// <p>The pagination token that indicates the next set of results to retrieve.</p> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListMembersInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListMembersInput"); formatter.field("network_id", &self.network_id); formatter.field("name", &self.name); formatter.field("status", &self.status); formatter.field("is_owned", &self.is_owned); formatter.field("max_results", &self.max_results); formatter.field("next_token", &self.next_token); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListInvitationsInput { /// <p>The maximum number of invitations to return.</p> pub max_results: std::option::Option<i32>, /// <p>The pagination token that indicates the next set of results to retrieve.</p> pub next_token: std::option::Option<std::string::String>, } impl std::fmt::Debug for ListInvitationsInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListInvitationsInput"); formatter.field("max_results", &self.max_results); formatter.field("next_token", &self.next_token); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetProposalInput { /// <p>The unique identifier of the network for which the proposal is made.</p> pub network_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the proposal.</p> pub proposal_id: std::option::Option<std::string::String>, } impl std::fmt::Debug for GetProposalInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetProposalInput"); formatter.field("network_id", &self.network_id); formatter.field("proposal_id", &self.proposal_id); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetNodeInput { /// <p>The unique identifier of the network that the node is on.</p> pub network_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the member that owns the node.</p> /// <p>Applies only to Hyperledger Fabric and is required for Hyperledger Fabric.</p> pub member_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the node.</p> pub node_id: std::option::Option<std::string::String>, } impl std::fmt::Debug for GetNodeInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetNodeInput"); formatter.field("network_id", &self.network_id); formatter.field("member_id", &self.member_id); formatter.field("node_id", &self.node_id); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetNetworkInput { /// <p>The unique identifier of the network to get information about.</p> pub network_id: std::option::Option<std::string::String>, } impl std::fmt::Debug for GetNetworkInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetNetworkInput"); formatter.field("network_id", &self.network_id); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetMemberInput { /// <p>The unique identifier of the network to which the member belongs.</p> pub network_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the member.</p> pub member_id: std::option::Option<std::string::String>, } impl std::fmt::Debug for GetMemberInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetMemberInput"); formatter.field("network_id", &self.network_id); formatter.field("member_id", &self.member_id); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteNodeInput { /// <p>The unique identifier of the network that the node is on.</p> /// <p>Ethereum public networks have the following <code>NetworkId</code>s:</p> /// <ul> /// <li> /// <p> /// <code>n-ethereum-mainnet</code> /// </p> /// </li> /// <li> /// <p> /// <code>n-ethereum-rinkeby</code> /// </p> /// </li> /// <li> /// <p> /// <code>n-ethereum-ropsten</code> /// </p> /// </li> /// </ul> pub network_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the member that owns this node.</p> /// <p>Applies only to Hyperledger Fabric and is required for Hyperledger Fabric.</p> pub member_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the node.</p> pub node_id: std::option::Option<std::string::String>, } impl std::fmt::Debug for DeleteNodeInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteNodeInput"); formatter.field("network_id", &self.network_id); formatter.field("member_id", &self.member_id); formatter.field("node_id", &self.node_id); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteMemberInput { /// <p>The unique identifier of the network from which the member is removed.</p> pub network_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the member to remove.</p> pub member_id: std::option::Option<std::string::String>, } impl std::fmt::Debug for DeleteMemberInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteMemberInput"); formatter.field("network_id", &self.network_id); formatter.field("member_id", &self.member_id); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateProposalInput { /// <p>A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time. This identifier is required only if you make a service request directly using an HTTP client. It is generated automatically if you use an AWS SDK or the AWS CLI.</p> pub client_request_token: std::option::Option<std::string::String>, /// <p> /// The unique identifier of the network for which the proposal is made.</p> pub network_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the member that is creating the proposal. This identifier is especially useful for identifying the member making the proposal when multiple members exist in a single AWS account.</p> pub member_id: std::option::Option<std::string::String>, /// <p>The type of actions proposed, such as inviting a member or removing a member. The types of <code>Actions</code> in a proposal are mutually exclusive. For example, a proposal with <code>Invitations</code> actions cannot also contain <code>Removals</code> actions.</p> pub actions: std::option::Option<crate::model::ProposalActions>, /// <p>A description for the proposal that is visible to voting members, for example, "Proposal to add Example Corp. as member."</p> pub description: std::option::Option<std::string::String>, /// <p>Tags to assign to the proposal. Each tag consists of a key and optional value.</p> /// <p>When specifying tags during creation, you can specify multiple key-value pairs in a single request, with an overall maximum of 50 tags added to each resource. If the proposal is for a network invitation, the invitation inherits the tags added to the proposal.</p> /// <p>For more information about tags, see <a href="https://docs.aws.amazon.com/managed-blockchain/latest/ethereum-dev/tagging-resources.html">Tagging Resources</a> in the <i>Amazon Managed Blockchain Ethereum Developer Guide</i>, or <a href="https://docs.aws.amazon.com/managed-blockchain/latest/hyperledger-fabric-dev/tagging-resources.html">Tagging Resources</a> in the <i>Amazon Managed Blockchain Hyperledger Fabric Developer Guide</i>.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, } impl std::fmt::Debug for CreateProposalInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateProposalInput"); formatter.field("client_request_token", &self.client_request_token); formatter.field("network_id", &self.network_id); formatter.field("member_id", &self.member_id); formatter.field("actions", &self.actions); formatter.field("description", &self.description); formatter.field("tags", &self.tags); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateNodeInput { /// <p>A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time. This identifier is required only if you make a service request directly using an HTTP client. It is generated automatically if you use an AWS SDK or the AWS CLI.</p> pub client_request_token: std::option::Option<std::string::String>, /// <p>The unique identifier of the network for the node.</p> /// <p>Ethereum public networks have the following <code>NetworkId</code>s:</p> /// <ul> /// <li> /// <p> /// <code>n-ethereum-mainnet</code> /// </p> /// </li> /// <li> /// <p> /// <code>n-ethereum-rinkeby</code> /// </p> /// </li> /// <li> /// <p> /// <code>n-ethereum-ropsten</code> /// </p> /// </li> /// </ul> pub network_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the member that owns this node.</p> /// <p>Applies only to Hyperledger Fabric.</p> pub member_id: std::option::Option<std::string::String>, /// <p>The properties of a node configuration.</p> pub node_configuration: std::option::Option<crate::model::NodeConfiguration>, /// <p>Tags to assign to the node. Each tag consists of a key and optional value.</p> /// <p>When specifying tags during creation, you can specify multiple key-value pairs in a single request, with an overall maximum of 50 tags added to each resource.</p> /// <p>For more information about tags, see <a href="https://docs.aws.amazon.com/managed-blockchain/latest/ethereum-dev/tagging-resources.html">Tagging Resources</a> in the <i>Amazon Managed Blockchain Ethereum Developer Guide</i>, or <a href="https://docs.aws.amazon.com/managed-blockchain/latest/hyperledger-fabric-dev/tagging-resources.html">Tagging Resources</a> in the <i>Amazon Managed Blockchain Hyperledger Fabric Developer Guide</i>.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, } impl std::fmt::Debug for CreateNodeInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateNodeInput"); formatter.field("client_request_token", &self.client_request_token); formatter.field("network_id", &self.network_id); formatter.field("member_id", &self.member_id); formatter.field("node_configuration", &self.node_configuration); formatter.field("tags", &self.tags); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateNetworkInput { /// <p>A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time. This identifier is required only if you make a service request directly using an HTTP client. It is generated automatically if you use an AWS SDK or the AWS CLI.</p> pub client_request_token: std::option::Option<std::string::String>, /// <p>The name of the network.</p> pub name: std::option::Option<std::string::String>, /// <p>An optional description for the network.</p> pub description: std::option::Option<std::string::String>, /// <p>The blockchain framework that the network uses.</p> pub framework: std::option::Option<crate::model::Framework>, /// <p>The version of the blockchain framework that the network uses.</p> pub framework_version: std::option::Option<std::string::String>, /// <p> /// Configuration properties of the blockchain framework relevant to the network configuration. /// </p> pub framework_configuration: std::option::Option<crate::model::NetworkFrameworkConfiguration>, /// <p> /// The voting rules used by the network to determine if a proposal is approved. /// </p> pub voting_policy: std::option::Option<crate::model::VotingPolicy>, /// <p>Configuration properties for the first member within the network.</p> pub member_configuration: std::option::Option<crate::model::MemberConfiguration>, /// <p>Tags to assign to the network. Each tag consists of a key and optional value.</p> /// <p>When specifying tags during creation, you can specify multiple key-value pairs in a single request, with an overall maximum of 50 tags added to each resource.</p> /// <p>For more information about tags, see <a href="https://docs.aws.amazon.com/managed-blockchain/latest/ethereum-dev/tagging-resources.html">Tagging Resources</a> in the <i>Amazon Managed Blockchain Ethereum Developer Guide</i>, or <a href="https://docs.aws.amazon.com/managed-blockchain/latest/hyperledger-fabric-dev/tagging-resources.html">Tagging Resources</a> in the <i>Amazon Managed Blockchain Hyperledger Fabric Developer Guide</i>.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, } impl std::fmt::Debug for CreateNetworkInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateNetworkInput"); formatter.field("client_request_token", &self.client_request_token); formatter.field("name", &self.name); formatter.field("description", &self.description); formatter.field("framework", &self.framework); formatter.field("framework_version", &self.framework_version); formatter.field("framework_configuration", &self.framework_configuration); formatter.field("voting_policy", &self.voting_policy); formatter.field("member_configuration", &self.member_configuration); formatter.field("tags", &self.tags); formatter.finish() } } #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateMemberInput { /// <p>A unique, case-sensitive identifier that you provide to ensure the idempotency of the operation. An idempotent operation completes no more than one time. This identifier is required only if you make a service request directly using an HTTP client. It is generated automatically if you use an AWS SDK or the AWS CLI.</p> pub client_request_token: std::option::Option<std::string::String>, /// <p>The unique identifier of the invitation that is sent to the member to join the network.</p> pub invitation_id: std::option::Option<std::string::String>, /// <p>The unique identifier of the network in which the member is created.</p> pub network_id: std::option::Option<std::string::String>, /// <p>Member configuration parameters.</p> pub member_configuration: std::option::Option<crate::model::MemberConfiguration>, } impl std::fmt::Debug for CreateMemberInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateMemberInput"); formatter.field("client_request_token", &self.client_request_token); formatter.field("invitation_id", &self.invitation_id); formatter.field("network_id", &self.network_id); formatter.field("member_configuration", &self.member_configuration); formatter.finish() } }
41.964882
450
0.58461
11cd034aa16e7059d158fe748a4bbff65ef19d45
5,846
#[doc = "Register `SUBSCRIBE_STOPTX` reader"] pub struct R(crate::R<SUBSCRIBE_STOPTX_SPEC>); impl core::ops::Deref for R { type Target = crate::R<SUBSCRIBE_STOPTX_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<SUBSCRIBE_STOPTX_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<SUBSCRIBE_STOPTX_SPEC>) -> Self { R(reader) } } #[doc = "Register `SUBSCRIBE_STOPTX` writer"] pub struct W(crate::W<SUBSCRIBE_STOPTX_SPEC>); impl core::ops::Deref for W { type Target = crate::W<SUBSCRIBE_STOPTX_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<SUBSCRIBE_STOPTX_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<SUBSCRIBE_STOPTX_SPEC>) -> Self { W(writer) } } #[doc = "Field `CHIDX` reader - Channel that task STOPTX will subscribe to"] pub struct CHIDX_R(crate::FieldReader<u8, u8>); impl CHIDX_R { #[inline(always)] pub(crate) fn new(bits: u8) -> Self { CHIDX_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for CHIDX_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `CHIDX` writer - Channel that task STOPTX will subscribe to"] pub struct CHIDX_W<'a> { w: &'a mut W, } impl<'a> CHIDX_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x0f) | (value as u32 & 0x0f); self.w } } #[doc = "\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum EN_A { #[doc = "0: Disable subscription"] DISABLED = 0, #[doc = "1: Enable subscription"] ENABLED = 1, } impl From<EN_A> for bool { #[inline(always)] fn from(variant: EN_A) -> Self { variant as u8 != 0 } } #[doc = "Field `EN` reader - "] pub struct EN_R(crate::FieldReader<bool, EN_A>); impl EN_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { EN_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> EN_A { match self.bits { false => EN_A::DISABLED, true => EN_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { **self == EN_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { **self == EN_A::ENABLED } } impl core::ops::Deref for EN_R { type Target = crate::FieldReader<bool, EN_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `EN` writer - "] pub struct EN_W<'a> { w: &'a mut W, } impl<'a> EN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: EN_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Disable subscription"] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(EN_A::DISABLED) } #[doc = "Enable subscription"] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(EN_A::ENABLED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 31)) | ((value as u32 & 0x01) << 31); self.w } } impl R { #[doc = "Bits 0:3 - Channel that task STOPTX will subscribe to"] #[inline(always)] pub fn chidx(&self) -> CHIDX_R { CHIDX_R::new((self.bits & 0x0f) as u8) } #[doc = "Bit 31"] #[inline(always)] pub fn en(&self) -> EN_R { EN_R::new(((self.bits >> 31) & 0x01) != 0) } } impl W { #[doc = "Bits 0:3 - Channel that task STOPTX will subscribe to"] #[inline(always)] pub fn chidx(&mut self) -> CHIDX_W { CHIDX_W { w: self } } #[doc = "Bit 31"] #[inline(always)] pub fn en(&mut self) -> EN_W { EN_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Subscribe configuration for task STOPTX\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [subscribe_stoptx](index.html) module"] pub struct SUBSCRIBE_STOPTX_SPEC; impl crate::RegisterSpec for SUBSCRIBE_STOPTX_SPEC { type Ux = u32; } #[doc = "`read()` method returns [subscribe_stoptx::R](R) reader structure"] impl crate::Readable for SUBSCRIBE_STOPTX_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [subscribe_stoptx::W](W) writer structure"] impl crate::Writable for SUBSCRIBE_STOPTX_SPEC { type Writer = W; } #[doc = "`reset()` method sets SUBSCRIBE_STOPTX to value 0"] impl crate::Resettable for SUBSCRIBE_STOPTX_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
29.525253
436
0.585358
6a49bb0d41df5e628a70f42f579533fee2350584
9,236
use std::any::TypeId; use std::collections::HashMap; use std::marker::PhantomData; use std::os::raw::{c_int, c_void}; use std::sync::{Arc, Mutex}; use std::{ptr, str}; use libc; use context::Context; use ffi; use markers::NoRefUnwindSafe; use types::Callback; use util::{ assert_stack, init_error_metatables, protect_lua_closure, safe_pcall, safe_xpcall, userdata_destructor, }; /// Top level Lua struct which holds the Lua state itself. pub struct Lua { main_state: *mut ffi::lua_State, _no_ref_unwind_safe: NoRefUnwindSafe, } unsafe impl Send for Lua {} impl Drop for Lua { fn drop(&mut self) { unsafe { let extra = extra_data(self.main_state); rlua_debug_assert!( ffi::lua_gettop((*extra).ref_thread) == (*extra).ref_stack_max && (*extra).ref_stack_max as usize == (*extra).ref_free.len(), "reference leak detected" ); *rlua_expect!((*extra).registry_unref_list.lock(), "unref list poisoned") = None; Box::from_raw(extra); ffi::lua_close(self.main_state); } } } impl Lua { /// Creates a new Lua state and loads standard library without the `debug` library. pub fn new() -> Lua { unsafe { create_lua(false) } } /// Creates a new Lua state and loads the standard library including the `debug` library. /// /// The debug library is very unsound, loading it and using it breaks all the guarantees of /// rlua. pub unsafe fn new_with_debug() -> Lua { create_lua(true) } /// The main entry point of the rlua API. /// /// In order to create Lua values, load and execute Lua code, or otherwise interact with the Lua /// state in any way, you must first call `Lua::context` and then call methods on the provided /// [`Context]` parameter. /// /// rlua uses reference types like `String` and `Table` which reference shared data in the Lua /// state. These are special reference counted types that contain pointers to the main Lua /// state via the [`Context`] type, and there is a `'lua` lifetime associated with these. /// /// This `'lua` lifetime is somewhat special. It is what is sometimes called a "generative" /// lifetime or a "branding" lifetime, which is unique for each call to `Lua::context` and /// is invariant. /// /// The reason this entry point must be a callback is so that this unique lifetime can be /// generated as part of the callback's parameters. Even though this callback API is somewhat /// inconvenient, it has several advantages: /// /// - Inside calls to `Lua::context`, we know that all instances of the 'lua lifetime are the /// same unique lifetime. Thus, it is impossible for the user to accidentally mix handle /// types between different instances of `Lua`. /// - Because we know at compile time that handles cannot be mixed from different instances of /// `Lua`, we do not need to do runtime checks to make sure that handles are from the same /// state. /// - Handle types cannot escape the context call and the `'lua` context lifetime is in general /// very limited, preventing it from being stored in unexpected places. This is a benefit as /// it helps ensure the soundness of the API. /// /// It is not possible to return types with this `'lua` context lifetime from the given /// callback, or store them outside of the callback in any way. There is an escape hatch here, /// though: if you need to keep references to internal Lua values long-term, you can use the Lua /// registry via `Lua::set_named_registry_value` and `Lua::create_registry_value`. /// /// # Examples /// /// ``` /// # extern crate rlua; /// # use rlua::{Lua}; /// # fn main() { /// let lua = Lua::new(); /// lua.context(|lua_context| { /// lua_context.exec::<_, ()>(r#" /// print("hello world!") /// "#, None).unwrap(); /// }); /// # } /// ``` /// /// [`Context`]: struct.Context.html pub fn context<F, R>(&self, f: F) -> R where F: FnOnce(Context) -> R, { f(unsafe { Context::new(self.main_state) }) } } // Data associated with the main lua_State via lua_getextraspace. pub(crate) struct ExtraData { pub registered_userdata: HashMap<TypeId, c_int>, pub registry_unref_list: Arc<Mutex<Option<Vec<c_int>>>>, pub ref_thread: *mut ffi::lua_State, pub ref_stack_size: c_int, pub ref_stack_max: c_int, pub ref_free: Vec<c_int>, } pub(crate) unsafe fn extra_data(state: *mut ffi::lua_State) -> *mut ExtraData { *(ffi::lua_getextraspace(state) as *mut *mut ExtraData) } unsafe fn create_lua(load_debug: bool) -> Lua { unsafe extern "C" fn allocator( _: *mut c_void, ptr: *mut c_void, _: usize, nsize: usize, ) -> *mut c_void { if nsize == 0 { libc::free(ptr as *mut libc::c_void); ptr::null_mut() } else { let p = libc::realloc(ptr as *mut libc::c_void, nsize); if p.is_null() { // We require that OOM results in an abort, and that the lua allocator function // never errors. Since this is what rust itself normally does on OOM, this is // not really a huge loss. Importantly, this allows us to turn off the gc, and // then know that calling Lua API functions marked as 'm' will not result in a // 'longjmp' error while the gc is off. abort!("out of memory in rlua::Lua allocation, aborting!"); } else { p as *mut c_void } } } let state = ffi::lua_newstate(allocator, ptr::null_mut()); let ref_thread = rlua_expect!( protect_lua_closure(state, 0, 0, |state| { // Do not open the debug library, it can be used to cause unsafety. ffi::luaL_requiref(state, cstr!("_G"), ffi::luaopen_base, 1); ffi::luaL_requiref(state, cstr!("coroutine"), ffi::luaopen_coroutine, 1); ffi::luaL_requiref(state, cstr!("table"), ffi::luaopen_table, 1); ffi::luaL_requiref(state, cstr!("io"), ffi::luaopen_io, 1); ffi::luaL_requiref(state, cstr!("os"), ffi::luaopen_os, 1); ffi::luaL_requiref(state, cstr!("string"), ffi::luaopen_string, 1); ffi::luaL_requiref(state, cstr!("utf8"), ffi::luaopen_utf8, 1); ffi::luaL_requiref(state, cstr!("math"), ffi::luaopen_math, 1); ffi::luaL_requiref(state, cstr!("package"), ffi::luaopen_package, 1); ffi::lua_pop(state, 9); init_error_metatables(state); if load_debug { ffi::luaL_requiref(state, cstr!("debug"), ffi::luaopen_debug, 1); ffi::lua_pop(state, 1); } // Create the function metatable ffi::lua_pushlightuserdata( state, &FUNCTION_METATABLE_REGISTRY_KEY as *const u8 as *mut c_void, ); ffi::lua_newtable(state); ffi::lua_pushstring(state, cstr!("__gc")); ffi::lua_pushcfunction(state, userdata_destructor::<Callback>); ffi::lua_rawset(state, -3); ffi::lua_pushstring(state, cstr!("__metatable")); ffi::lua_pushboolean(state, 0); ffi::lua_rawset(state, -3); ffi::lua_rawset(state, ffi::LUA_REGISTRYINDEX); // Override pcall and xpcall with versions that cannot be used to catch rust panics. ffi::lua_rawgeti(state, ffi::LUA_REGISTRYINDEX, ffi::LUA_RIDX_GLOBALS); ffi::lua_pushstring(state, cstr!("pcall")); ffi::lua_pushcfunction(state, safe_pcall); ffi::lua_rawset(state, -3); ffi::lua_pushstring(state, cstr!("xpcall")); ffi::lua_pushcfunction(state, safe_xpcall); ffi::lua_rawset(state, -3); ffi::lua_pop(state, 1); // Create ref stack thread and place it in the registry to prevent it from being garbage // collected. let ref_thread = ffi::lua_newthread(state); ffi::luaL_ref(state, ffi::LUA_REGISTRYINDEX); ref_thread }), "Error during Lua construction", ); rlua_debug_assert!(ffi::lua_gettop(state) == 0, "stack leak during creation"); assert_stack(state, ffi::LUA_MINSTACK); // Create ExtraData, and place it in the lua_State "extra space" let extra = Box::into_raw(Box::new(ExtraData { registered_userdata: HashMap::new(), registry_unref_list: Arc::new(Mutex::new(Some(Vec::new()))), ref_thread, // We need 1 extra stack space to move values in and out of the ref stack. ref_stack_size: ffi::LUA_MINSTACK - 1, ref_stack_max: 0, ref_free: Vec::new(), })); *(ffi::lua_getextraspace(state) as *mut *mut ExtraData) = extra; Lua { main_state: state, _no_ref_unwind_safe: PhantomData, } } pub(crate) static FUNCTION_METATABLE_REGISTRY_KEY: u8 = 0;
37.697959
100
0.607514
1c0be28d07591017786f0e0fa771877ecec8e8e6
2,014
// Copyright 2020 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::cmp; pub fn convert_byte_size(num: f64) -> String { let negative = if num.is_sign_positive() { "" } else { "-" }; let num = num.abs(); let units = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]; if num < 1_f64 { return format!("{}{} {}", negative, num, "B"); } let delimiter = 1000_f64; let exponent = cmp::min( (num.ln() / delimiter.ln()).floor() as i32, (units.len() - 1) as i32, ); let pretty_bytes = format!("{:.2}", num / delimiter.powi(exponent)) .parse::<f64>() .unwrap() * 1_f64; let unit = units[exponent as usize]; format!("{}{} {}", negative, pretty_bytes, unit) } pub fn convert_number_size(num: f64) -> String { let negative = if num.is_sign_positive() { "" } else { "-" }; let num = num.abs(); let units = [ "", " thousand", " million", " billion", " trillion", " quadrillion", ]; if num < 1_f64 { return format!("{}{}", negative, num); } let delimiter = 1000_f64; let exponent = cmp::min( (num.ln() / delimiter.ln()).floor() as i32, (units.len() - 1) as i32, ); let pretty_bytes = format!("{:.2}", num / delimiter.powi(exponent)) .parse::<f64>() .unwrap() * 1_f64; let unit = units[exponent as usize]; format!("{}{}{}", negative, pretty_bytes, unit) }
31.46875
75
0.573486
335c08003218b116b4aa7d8b20aadbd2b9bb6081
1,058
use anyhow::Result; use bevy_asset::AssetLoader; use std::{io::Cursor, path::Path, sync::Arc}; /// A source of audio data #[derive(Debug, Clone)] pub struct AudioSource { pub bytes: Arc<[u8]>, } impl AsRef<[u8]> for AudioSource { fn as_ref(&self) -> &[u8] { &self.bytes } } /// Loads mp3 files as [AudioSource] [Assets](bevy_asset::Assets) #[derive(Default)] pub struct Mp3Loader; impl AssetLoader<AudioSource> for Mp3Loader { fn from_bytes(&self, _asset_path: &Path, bytes: Vec<u8>) -> Result<AudioSource> { Ok(AudioSource { bytes: bytes.into(), }) } fn extensions(&self) -> &[&str] { static EXTENSIONS: &[&str] = &["mp3", "flac", "wav", "ogg"]; EXTENSIONS } } pub trait Decodable: Send + Sync + 'static { type Decoder; fn decoder(&self) -> Self::Decoder; } impl Decodable for AudioSource { type Decoder = rodio::Decoder<Cursor<AudioSource>>; fn decoder(&self) -> Self::Decoder { rodio::Decoder::new(Cursor::new(self.clone())).unwrap() } }
22.510638
85
0.606805
14ffe6a38af9a6f28da13ff85cbbc20a16cc95aa
8,561
//! A type-safe wrapper around the sys module, which in turn exposes //! the API exported by winpty.dll. //! https://github.com/rprichard/winpty/blob/master/src/include/winpty.h #![allow(dead_code)] use super::sys::*; use bitflags::bitflags; use failure::{bail, ensure, format_err, Error}; use filedescriptor::{FileDescriptor, OwnedHandle}; use std::ffi::{OsStr, OsString}; use std::os::windows::ffi::{OsStrExt, OsStringExt}; use std::os::windows::io::FromRawHandle; use std::ptr; use winapi::shared::minwindef::DWORD; use winapi::shared::ntdef::LPCWSTR; use winapi::um::fileapi::{CreateFileW, OPEN_EXISTING}; use winapi::um::handleapi::INVALID_HANDLE_VALUE; use winapi::um::winbase::INFINITE; use winapi::um::winnt::HANDLE; use winapi::um::winnt::{GENERIC_READ, GENERIC_WRITE}; bitflags! { pub struct AgentFlags : u64 { const CONERR = WINPTY_FLAG_CONERR; const PLAIN_OUTPUT = WINPTY_FLAG_PLAIN_OUTPUT; const COLOR_ESCAPES = WINPTY_FLAG_COLOR_ESCAPES; const ALLOW_DESKTOP_CREATE = WINPTY_FLAG_ALLOW_CURPROC_DESKTOP_CREATION; } } bitflags! { pub struct SpawnFlags : u64 { const AUTO_SHUTDOWN = WINPTY_SPAWN_FLAG_AUTO_SHUTDOWN; const EXIT_AFTER_SHUTDOWN = WINPTY_SPAWN_FLAG_EXIT_AFTER_SHUTDOWN; } } #[repr(u32)] pub enum MouseMode { None = WINPTY_MOUSE_MODE_NONE, Auto = WINPTY_MOUSE_MODE_AUTO, Force = WINPTY_MOUSE_MODE_FORCE, } pub enum Timeout { Infinite, Milliseconds(DWORD), } pub struct WinPtyConfig { config: *mut winpty_config_t, } fn wstr_to_osstr(wstr: LPCWSTR) -> Result<OsString, Error> { ensure!(!wstr.is_null(), "LPCWSTR is null"); let slice = unsafe { std::slice::from_raw_parts(wstr, libc::wcslen(wstr)) }; Ok(OsString::from_wide(slice)) } fn wstr_to_string(wstr: LPCWSTR) -> Result<String, Error> { ensure!(!wstr.is_null(), "LPCWSTR is null"); let slice = unsafe { std::slice::from_raw_parts(wstr, libc::wcslen(wstr)) }; String::from_utf16(slice).map_err(|e| format_err!("String::from_utf16: {}", e)) } fn check_err<T>(err: winpty_error_ptr_t, value: T) -> Result<T, Error> { if err.is_null() { return Ok(value); } unsafe { let code = (WINPTY.winpty_error_code)(err); if code == WINPTY_ERROR_SUCCESS { return Ok(value); } let converted = wstr_to_string((WINPTY.winpty_error_msg)(err))?; (WINPTY.winpty_error_free)(err); bail!("winpty error code {}: {}", code, converted) } } impl WinPtyConfig { pub fn new(flags: AgentFlags) -> Result<Self, Error> { let mut err: winpty_error_ptr_t = ptr::null_mut(); let config = unsafe { (WINPTY.winpty_config_new)(flags.bits(), &mut err) }; let config = check_err(err, config)?; ensure!( !config.is_null(), "winpty_config_new returned nullptr but no error" ); Ok(Self { config }) } pub fn set_initial_size(&mut self, cols: c_int, rows: c_int) { unsafe { (WINPTY.winpty_config_set_initial_size)(self.config, cols, rows) } } pub fn set_mouse_mode(&mut self, mode: MouseMode) { unsafe { (WINPTY.winpty_config_set_mouse_mode)(self.config, mode as c_int) } } pub fn set_agent_timeout(&mut self, timeout: Timeout) { let duration = match timeout { Timeout::Infinite => INFINITE, Timeout::Milliseconds(n) => n, }; unsafe { (WINPTY.winpty_config_set_agent_timeout)(self.config, duration) } } pub fn open(&self) -> Result<WinPty, Error> { let mut err: winpty_error_ptr_t = ptr::null_mut(); let pty = unsafe { (WINPTY.winpty_open)(self.config, &mut err) }; let pty = check_err(err, pty)?; ensure!(!pty.is_null(), "winpty_open returned nullptr but no error"); Ok(WinPty { pty }) } } impl Drop for WinPtyConfig { fn drop(&mut self) { unsafe { (WINPTY.winpty_config_free)(self.config) } } } pub struct WinPty { pty: *mut winpty_t, } impl Drop for WinPty { fn drop(&mut self) { unsafe { (WINPTY.winpty_free)(self.pty) } } } fn pipe_client(name: LPCWSTR, for_read: bool) -> Result<FileDescriptor, Error> { let handle = unsafe { CreateFileW( name, if for_read { GENERIC_READ } else { GENERIC_WRITE }, 0, ptr::null_mut(), OPEN_EXISTING, 0, ptr::null_mut(), ) }; if handle == INVALID_HANDLE_VALUE { let err = std::io::Error::last_os_error(); bail!("failed to open {:?}: {}", wstr_to_string(name), err); } else { Ok(unsafe { FileDescriptor::from_raw_handle(handle) }) } } impl WinPty { pub fn agent_process(&self) -> HANDLE { unsafe { (WINPTY.winpty_agent_process)(self.pty) } } pub fn conin(&self) -> Result<FileDescriptor, Error> { pipe_client(unsafe { (WINPTY.winpty_conin_name)(self.pty) }, false) } pub fn conout(&self) -> Result<FileDescriptor, Error> { pipe_client(unsafe { (WINPTY.winpty_conout_name)(self.pty) }, true) } pub fn conerr(&self) -> Result<FileDescriptor, Error> { pipe_client(unsafe { (WINPTY.winpty_conerr_name)(self.pty) }, true) } pub fn set_size(&mut self, cols: c_int, rows: c_int) -> Result<bool, Error> { let mut err: winpty_error_ptr_t = ptr::null_mut(); let result = unsafe { (WINPTY.winpty_set_size)(self.pty, cols, rows, &mut err) }; Ok(result != 0) } pub fn spawn(&mut self, config: &SpawnConfig) -> Result<SpawnedProcess, Error> { let mut err: winpty_error_ptr_t = ptr::null_mut(); let mut create_process_error: DWORD = 0; let mut process_handle: HANDLE = ptr::null_mut(); let mut thread_handle: HANDLE = ptr::null_mut(); let result = unsafe { (WINPTY.winpty_spawn)( self.pty, config.spawn_config, &mut process_handle, &mut thread_handle, &mut create_process_error, &mut err, ) }; let thread_handle = unsafe { OwnedHandle::from_raw_handle(thread_handle) }; let process_handle = unsafe { OwnedHandle::from_raw_handle(process_handle) }; let result = check_err(err, result)?; if result == 0 { let err = std::io::Error::from_raw_os_error(create_process_error as _); bail!("winpty_spawn failed: {}", err); } Ok(SpawnedProcess { thread_handle, process_handle, }) } } pub struct SpawnedProcess { pub process_handle: OwnedHandle, pub thread_handle: OwnedHandle, } pub struct SpawnConfig { spawn_config: *mut winpty_spawn_config_t, } /// Construct a null terminated wide string from an OsStr fn str_to_wide(s: &OsStr) -> Vec<u16> { let mut wide: Vec<u16> = s.encode_wide().collect(); wide.push(0); wide } fn str_ptr(s: &Option<Vec<u16>>) -> LPCWSTR { match s { None => ptr::null(), Some(v) => v.as_ptr(), } } impl SpawnConfig { pub fn with_os_str_args( flags: SpawnFlags, appname: Option<&OsStr>, cmdline: Option<&OsStr>, cwd: Option<&OsStr>, env: Option<&OsStr>, ) -> Result<Self, Error> { let appname = appname.map(str_to_wide); let cmdline = cmdline.map(str_to_wide); let cwd = cwd.map(str_to_wide); let env = env.map(str_to_wide); Self::new(flags, appname, cmdline, cwd, env) } pub fn new( flags: SpawnFlags, appname: Option<Vec<u16>>, cmdline: Option<Vec<u16>>, cwd: Option<Vec<u16>>, env: Option<Vec<u16>>, ) -> Result<Self, Error> { let mut err: winpty_error_ptr_t = ptr::null_mut(); let spawn_config = unsafe { (WINPTY.winpty_spawn_config_new)( flags.bits(), str_ptr(&appname), str_ptr(&cmdline), str_ptr(&cwd), str_ptr(&env), &mut err, ) }; let spawn_config = check_err(err, spawn_config)?; ensure!( !spawn_config.is_null(), "winpty_spawn_config_new returned nullptr but no error" ); Ok(Self { spawn_config }) } } impl Drop for SpawnConfig { fn drop(&mut self) { unsafe { (WINPTY.winpty_spawn_config_free)(self.spawn_config) } } }
30.466192
89
0.604719
dd95ad3525925ef7ec380ba53ba0fb0e62676f26
1,069
/* * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma version(1) #pragma rs java_package_name(rs2spirv) #pragma rs_fp_relaxed static float brightM = 0.f; static float brightC = 0.f; void setBright(float v) { brightM = pow(2.f, v / 100.f); brightC = 127.f - brightM * 127.f; } uchar4 __attribute__((kernel)) contrast(uchar4 in) { float3 v = convert_float3(in.rgb) * brightM + brightC; uchar4 o; o.rgb = convert_uchar3(clamp(v, 0.f, 255.f)); o.a = 0xff; return o; }
30.542857
75
0.703461
1873271f855b3038e4d0a287323772dd361026a5
42,861
// This is a part of Chrono. // See README.md and LICENSE.txt for details. //! # Chrono: Date and Time for Rust //! //! It aims to be a feature-complete superset of //! the [time](https://github.com/rust-lang-deprecated/time) library. //! In particular, //! //! * Chrono strictly adheres to ISO 8601. //! * Chrono is timezone-aware by default, with separate timezone-naive types. //! * Chrono is space-optimal and (while not being the primary goal) reasonably efficient. //! //! There were several previous attempts to bring a good date and time library to Rust, //! which Chrono builds upon and should acknowledge: //! //! * [Initial research on //! the wiki](https://github.com/rust-lang/rust-wiki-backup/blob/master/Lib-datetime.md) //! * Dietrich Epp's [datetime-rs](https://github.com/depp/datetime-rs) //! * Luis de Bethencourt's [rust-datetime](https://github.com/luisbg/rust-datetime) //! //! Any significant changes to Chrono are documented in //! the [`CHANGELOG.md`](https://github.com/chronotope/chrono/blob/master/CHANGELOG.md) file. //! //! ## Usage //! //! Put this in your `Cargo.toml`: //! //! ```toml //! [dependencies] //! chrono = "0.4" //! ``` //! //! Or, if you want [Serde](https://github.com/serde-rs/serde) include the //! feature like this: //! //! ```toml //! [dependencies] //! chrono = { version = "0.4", features = ["serde"] } //! ``` //! //! Then put this in your crate root: //! //! ```rust //! extern crate chrono; //! ``` //! //! Avoid using `use chrono::*;` as Chrono exports several modules other than types. //! If you prefer the glob imports, use the following instead: //! //! ```rust //! use chrono::prelude::*; //! ``` //! //! ## Overview //! //! ### Duration //! //! Chrono currently uses //! the [`time::Duration`](https://docs.rs/time/0.1.40/time/struct.Duration.html) type //! from the `time` crate to represent the magnitude of a time span. //! Since this has the same name to the newer, standard type for duration, //! the reference will refer this type as `OldDuration`. //! Note that this is an "accurate" duration represented as seconds and //! nanoseconds and does not represent "nominal" components such as days or //! months. //! //! Chrono does not yet natively support //! the standard [`Duration`](https://doc.rust-lang.org/std/time/struct.Duration.html) type, //! but it will be supported in the future. //! Meanwhile you can convert between two types with //! [`Duration::from_std`](https://docs.rs/time/0.1.40/time/struct.Duration.html#method.from_std) //! and //! [`Duration::to_std`](https://docs.rs/time/0.1.40/time/struct.Duration.html#method.to_std) //! methods. //! //! ### Date and Time //! //! Chrono provides a //! [**`DateTime`**](./struct.DateTime.html) //! type to represent a date and a time in a timezone. //! //! For more abstract moment-in-time tracking such as internal timekeeping //! that is unconcerned with timezones, consider //! [`time::SystemTime`](https://doc.rust-lang.org/std/time/struct.SystemTime.html), //! which tracks your system clock, or //! [`time::Instant`](https://doc.rust-lang.org/std/time/struct.Instant.html), which //! is an opaque but monotonically-increasing representation of a moment in time. //! //! `DateTime` is timezone-aware and must be constructed from //! the [**`TimeZone`**](./offset/trait.TimeZone.html) object, //! which defines how the local date is converted to and back from the UTC date. //! There are three well-known `TimeZone` implementations: //! //! * [**`Utc`**](./offset/struct.Utc.html) specifies the UTC time zone. It is most efficient. //! //! * [**`Local`**](./offset/struct.Local.html) specifies the system local time zone. //! //! * [**`FixedOffset`**](./offset/struct.FixedOffset.html) specifies //! an arbitrary, fixed time zone such as UTC+09:00 or UTC-10:30. //! This often results from the parsed textual date and time. //! Since it stores the most information and does not depend on the system environment, //! you would want to normalize other `TimeZone`s into this type. //! //! `DateTime`s with different `TimeZone` types are distinct and do not mix, //! but can be converted to each other using //! the [`DateTime::with_timezone`](./struct.DateTime.html#method.with_timezone) method. //! //! You can get the current date and time in the UTC time zone //! ([`Utc::now()`](./offset/struct.Utc.html#method.now)) //! or in the local time zone //! ([`Local::now()`](./offset/struct.Local.html#method.now)). //! //! ```rust //! use chrono::prelude::*; //! //! let utc: DateTime<Utc> = Utc::now(); // e.g. `2014-11-28T12:45:59.324310806Z` //! let local: DateTime<Local> = Local::now(); // e.g. `2014-11-28T21:45:59.324310806+09:00` //! # let _ = utc; let _ = local; //! ``` //! //! Alternatively, you can create your own date and time. //! This is a bit verbose due to Rust's lack of function and method overloading, //! but in turn we get a rich combination of initialization methods. //! //! ```rust //! use chrono::prelude::*; //! use chrono::offset::LocalResult; //! //! let dt = Utc.ymd(2014, 7, 8).and_hms(9, 10, 11); // `2014-07-08T09:10:11Z` //! // July 8 is 188th day of the year 2014 (`o` for "ordinal") //! assert_eq!(dt, Utc.yo(2014, 189).and_hms(9, 10, 11)); //! // July 8 is Tuesday in ISO week 28 of the year 2014. //! assert_eq!(dt, Utc.isoywd(2014, 28, Weekday::Tue).and_hms(9, 10, 11)); //! //! let dt = Utc.ymd(2014, 7, 8).and_hms_milli(9, 10, 11, 12); // `2014-07-08T09:10:11.012Z` //! assert_eq!(dt, Utc.ymd(2014, 7, 8).and_hms_micro(9, 10, 11, 12_000)); //! assert_eq!(dt, Utc.ymd(2014, 7, 8).and_hms_nano(9, 10, 11, 12_000_000)); //! //! // dynamic verification //! assert_eq!(Utc.ymd_opt(2014, 7, 8).and_hms_opt(21, 15, 33), //! LocalResult::Single(Utc.ymd(2014, 7, 8).and_hms(21, 15, 33))); //! assert_eq!(Utc.ymd_opt(2014, 7, 8).and_hms_opt(80, 15, 33), LocalResult::None); //! assert_eq!(Utc.ymd_opt(2014, 7, 38).and_hms_opt(21, 15, 33), LocalResult::None); //! //! // other time zone objects can be used to construct a local datetime. //! // obviously, `local_dt` is normally different from `dt`, but `fixed_dt` should be identical. //! let local_dt = Local.ymd(2014, 7, 8).and_hms_milli(9, 10, 11, 12); //! let fixed_dt = FixedOffset::east(9 * 3600).ymd(2014, 7, 8).and_hms_milli(18, 10, 11, 12); //! assert_eq!(dt, fixed_dt); //! # let _ = local_dt; //! ``` //! //! Various properties are available to the date and time, and can be altered individually. //! Most of them are defined in the traits [`Datelike`](./trait.Datelike.html) and //! [`Timelike`](./trait.Timelike.html) which you should `use` before. //! Addition and subtraction is also supported. //! The following illustrates most supported operations to the date and time: //! //! ```rust //! # extern crate chrono; fn main() { //! use chrono::{prelude::*, Duration}; //! //! // assume this returned `2014-11-28T21:45:59.324310806+09:00`: //! let dt = FixedOffset::east(9*3600).ymd(2014, 11, 28).and_hms_nano(21, 45, 59, 324310806); //! //! // property accessors //! assert_eq!((dt.year(), dt.month(), dt.day()), (2014, 11, 28)); //! assert_eq!((dt.month0(), dt.day0()), (10, 27)); // for unfortunate souls //! assert_eq!((dt.hour(), dt.minute(), dt.second()), (21, 45, 59)); //! assert_eq!(dt.weekday(), Weekday::Fri); //! assert_eq!(dt.weekday().number_from_monday(), 5); // Mon=1, ..., Sun=7 //! assert_eq!(dt.ordinal(), 332); // the day of year //! assert_eq!(dt.num_days_from_ce(), 735565); // the number of days from and including Jan 1, 1 //! //! // time zone accessor and manipulation //! assert_eq!(dt.offset().fix().local_minus_utc(), 9 * 3600); //! assert_eq!(dt.timezone(), FixedOffset::east(9 * 3600)); //! assert_eq!(dt.with_timezone(&Utc), Utc.ymd(2014, 11, 28).and_hms_nano(12, 45, 59, 324310806)); //! //! // a sample of property manipulations (validates dynamically) //! assert_eq!(dt.with_day(29).unwrap().weekday(), Weekday::Sat); // 2014-11-29 is Saturday //! assert_eq!(dt.with_day(32), None); //! assert_eq!(dt.with_year(-300).unwrap().num_days_from_ce(), -109606); // November 29, 301 BCE //! //! // arithmetic operations //! let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10); //! let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8); //! assert_eq!(dt1.signed_duration_since(dt2), Duration::seconds(-2 * 3600 + 2)); //! assert_eq!(dt2.signed_duration_since(dt1), Duration::seconds(2 * 3600 - 2)); //! assert_eq!(Utc.ymd(1970, 1, 1).and_hms(0, 0, 0) + Duration::seconds(1_000_000_000), //! Utc.ymd(2001, 9, 9).and_hms(1, 46, 40)); //! assert_eq!(Utc.ymd(1970, 1, 1).and_hms(0, 0, 0) - Duration::seconds(1_000_000_000), //! Utc.ymd(1938, 4, 24).and_hms(22, 13, 20)); //! # } //! ``` //! //! ### Formatting and Parsing //! //! Formatting is done via the [`format`](./struct.DateTime.html#method.format) method, //! which format is equivalent to the familiar `strftime` format. //! //! See [`format::strftime`](./format/strftime/index.html#specifiers) //! documentation for full syntax and list of specifiers. //! //! The default `to_string` method and `{:?}` specifier also give a reasonable representation. //! Chrono also provides [`to_rfc2822`](./struct.DateTime.html#method.to_rfc2822) and //! [`to_rfc3339`](./struct.DateTime.html#method.to_rfc3339) methods //! for well-known formats. //! //! ```rust //! use chrono::prelude::*; //! //! let dt = Utc.ymd(2014, 11, 28).and_hms(12, 0, 9); //! assert_eq!(dt.format("%Y-%m-%d %H:%M:%S").to_string(), "2014-11-28 12:00:09"); //! assert_eq!(dt.format("%a %b %e %T %Y").to_string(), "Fri Nov 28 12:00:09 2014"); //! assert_eq!(dt.format("%a %b %e %T %Y").to_string(), dt.format("%c").to_string()); //! //! assert_eq!(dt.to_string(), "2014-11-28 12:00:09 UTC"); //! assert_eq!(dt.to_rfc2822(), "Fri, 28 Nov 2014 12:00:09 +0000"); //! assert_eq!(dt.to_rfc3339(), "2014-11-28T12:00:09+00:00"); //! assert_eq!(format!("{:?}", dt), "2014-11-28T12:00:09Z"); //! //! // Note that milli/nanoseconds are only printed if they are non-zero //! let dt_nano = Utc.ymd(2014, 11, 28).and_hms_nano(12, 0, 9, 1); //! assert_eq!(format!("{:?}", dt_nano), "2014-11-28T12:00:09.000000001Z"); //! ``` //! //! Parsing can be done with three methods: //! //! 1. The standard [`FromStr`](https://doc.rust-lang.org/std/str/trait.FromStr.html) trait //! (and [`parse`](https://doc.rust-lang.org/std/primitive.str.html#method.parse) method //! on a string) can be used for parsing `DateTime<FixedOffset>`, `DateTime<Utc>` and //! `DateTime<Local>` values. This parses what the `{:?}` //! ([`std::fmt::Debug`](https://doc.rust-lang.org/std/fmt/trait.Debug.html)) //! format specifier prints, and requires the offset to be present. //! //! 2. [`DateTime::parse_from_str`](./struct.DateTime.html#method.parse_from_str) parses //! a date and time with offsets and returns `DateTime<FixedOffset>`. //! This should be used when the offset is a part of input and the caller cannot guess that. //! It *cannot* be used when the offset can be missing. //! [`DateTime::parse_from_rfc2822`](./struct.DateTime.html#method.parse_from_rfc2822) //! and //! [`DateTime::parse_from_rfc3339`](./struct.DateTime.html#method.parse_from_rfc3339) //! are similar but for well-known formats. //! //! 3. [`Offset::datetime_from_str`](./offset/trait.TimeZone.html#method.datetime_from_str) is //! similar but returns `DateTime` of given offset. //! When the explicit offset is missing from the input, it simply uses given offset. //! It issues an error when the input contains an explicit offset different //! from the current offset. //! //! More detailed control over the parsing process is available via //! [`format`](./format/index.html) module. //! //! ```rust //! use chrono::prelude::*; //! //! let dt = Utc.ymd(2014, 11, 28).and_hms(12, 0, 9); //! let fixed_dt = dt.with_timezone(&FixedOffset::east(9*3600)); //! //! // method 1 //! assert_eq!("2014-11-28T12:00:09Z".parse::<DateTime<Utc>>(), Ok(dt.clone())); //! assert_eq!("2014-11-28T21:00:09+09:00".parse::<DateTime<Utc>>(), Ok(dt.clone())); //! assert_eq!("2014-11-28T21:00:09+09:00".parse::<DateTime<FixedOffset>>(), Ok(fixed_dt.clone())); //! //! // method 2 //! assert_eq!(DateTime::parse_from_str("2014-11-28 21:00:09 +09:00", "%Y-%m-%d %H:%M:%S %z"), //! Ok(fixed_dt.clone())); //! assert_eq!(DateTime::parse_from_rfc2822("Fri, 28 Nov 2014 21:00:09 +0900"), //! Ok(fixed_dt.clone())); //! assert_eq!(DateTime::parse_from_rfc3339("2014-11-28T21:00:09+09:00"), Ok(fixed_dt.clone())); //! //! // method 3 //! assert_eq!(Utc.datetime_from_str("2014-11-28 12:00:09", "%Y-%m-%d %H:%M:%S"), Ok(dt.clone())); //! assert_eq!(Utc.datetime_from_str("Fri Nov 28 12:00:09 2014", "%a %b %e %T %Y"), Ok(dt.clone())); //! //! // oops, the year is missing! //! assert!(Utc.datetime_from_str("Fri Nov 28 12:00:09", "%a %b %e %T %Y").is_err()); //! // oops, the format string does not include the year at all! //! assert!(Utc.datetime_from_str("Fri Nov 28 12:00:09", "%a %b %e %T").is_err()); //! // oops, the weekday is incorrect! //! assert!(Utc.datetime_from_str("Sat Nov 28 12:00:09 2014", "%a %b %e %T %Y").is_err()); //! ``` //! //! Again : See [`format::strftime`](./format/strftime/index.html#specifiers) //! documentation for full syntax and list of specifiers. //! //! ### Conversion from and to EPOCH timestamps //! //! Use [`Utc.timestamp(seconds, nanoseconds)`](./offset/trait.TimeZone.html#method.timestamp) //! to construct a [`DateTime<Utc>`](./struct.DateTime.html) from a UNIX timestamp //! (seconds, nanoseconds that passed since January 1st 1970). //! //! Use [`DateTime.timestamp`](./struct.DateTime.html#method.timestamp) to get the timestamp (in seconds) //! from a [`DateTime`](./struct.DateTime.html). Additionally, you can use //! [`DateTime.timestamp_subsec_nanos`](./struct.DateTime.html#method.timestamp_subsec_nanos) //! to get the number of additional number of nanoseconds. //! //! ```rust //! // We need the trait in scope to use Utc::timestamp(). //! use chrono::{DateTime, TimeZone, Utc}; //! //! // Construct a datetime from epoch: //! let dt = Utc.timestamp(1_500_000_000, 0); //! assert_eq!(dt.to_rfc2822(), "Fri, 14 Jul 2017 02:40:00 +0000"); //! //! // Get epoch value from a datetime: //! let dt = DateTime::parse_from_rfc2822("Fri, 14 Jul 2017 02:40:00 +0000").unwrap(); //! assert_eq!(dt.timestamp(), 1_500_000_000); //! ``` //! //! ### Individual date //! //! Chrono also provides an individual date type ([**`Date`**](./struct.Date.html)). //! It also has time zones attached, and have to be constructed via time zones. //! Most operations available to `DateTime` are also available to `Date` whenever appropriate. //! //! ```rust //! use chrono::prelude::*; //! use chrono::offset::LocalResult; //! //! # // these *may* fail, but only very rarely. just rerun the test if you were that unfortunate ;) //! assert_eq!(Utc::today(), Utc::now().date()); //! assert_eq!(Local::today(), Local::now().date()); //! //! assert_eq!(Utc.ymd(2014, 11, 28).weekday(), Weekday::Fri); //! assert_eq!(Utc.ymd_opt(2014, 11, 31), LocalResult::None); //! assert_eq!(Utc.ymd(2014, 11, 28).and_hms_milli(7, 8, 9, 10).format("%H%M%S").to_string(), //! "070809"); //! ``` //! //! There is no timezone-aware `Time` due to the lack of usefulness and also the complexity. //! //! `DateTime` has [`date`](./struct.DateTime.html#method.date) method //! which returns a `Date` which represents its date component. //! There is also a [`time`](./struct.DateTime.html#method.time) method, //! which simply returns a naive local time described below. //! //! ### Naive date and time //! //! Chrono provides naive counterparts to `Date`, (non-existent) `Time` and `DateTime` //! as [**`NaiveDate`**](./naive/struct.NaiveDate.html), //! [**`NaiveTime`**](./naive/struct.NaiveTime.html) and //! [**`NaiveDateTime`**](./naive/struct.NaiveDateTime.html) respectively. //! //! They have almost equivalent interfaces as their timezone-aware twins, //! but are not associated to time zones obviously and can be quite low-level. //! They are mostly useful for building blocks for higher-level types. //! //! Timezone-aware `DateTime` and `Date` types have two methods returning naive versions: //! [`naive_local`](./struct.DateTime.html#method.naive_local) returns //! a view to the naive local time, //! and [`naive_utc`](./struct.DateTime.html#method.naive_utc) returns //! a view to the naive UTC time. //! //! ## Limitations //! //! Only proleptic Gregorian calendar (i.e. extended to support older dates) is supported. //! Be very careful if you really have to deal with pre-20C dates, they can be in Julian or others. //! //! Date types are limited in about +/- 262,000 years from the common epoch. //! Time types are limited in the nanosecond accuracy. //! //! [Leap seconds are supported in the representation but //! Chrono doesn't try to make use of them](./naive/struct.NaiveTime.html#leap-second-handling). //! (The main reason is that leap seconds are not really predictable.) //! Almost *every* operation over the possible leap seconds will ignore them. //! Consider using `NaiveDateTime` with the implicit TAI (International Atomic Time) scale //! if you want. //! //! Chrono inherently does not support an inaccurate or partial date and time representation. //! Any operation that can be ambiguous will return `None` in such cases. //! For example, "a month later" of 2014-01-30 is not well-defined //! and consequently `Utc.ymd(2014, 1, 30).with_month(2)` returns `None`. //! //! Advanced time zone handling is not yet supported. //! For now you can try the [Chrono-tz](https://github.com/chronotope/chrono-tz/) crate instead. #![doc(html_root_url = "https://docs.rs/chrono/latest/")] #![cfg_attr(feature = "bench", feature(test))] // lib stability features as per RFC #507 #![deny(missing_docs)] #![deny(missing_debug_implementations)] #![deny(dead_code)] #![cfg_attr(not(any(feature = "std", test)), no_std)] // The explicit 'static lifetimes are still needed for rustc 1.13-16 // backward compatibility, and this appeases clippy. If minimum rustc // becomes 1.17, should be able to remove this, those 'static lifetimes, // and use `static` in a lot of places `const` is used now. // // Similarly, redundant_field_names lints on not using the // field-init-shorthand, which was stabilized in rust 1.17. // // Changing trivially_copy_pass_by_ref would require an incompatible version // bump. #![cfg_attr(feature = "cargo-clippy", allow( const_static_lifetime, redundant_field_names, trivially_copy_pass_by_ref, ))] #[cfg(feature = "alloc")] extern crate alloc; #[cfg(any(feature = "std", test))] extern crate std as core; #[cfg(all(feature = "std", not(feature="alloc")))] extern crate std as alloc; // These are required by the `time` module: #[cfg(all(feature="clock", target_os = "redox"))] extern crate syscall; #[cfg(unix)] extern crate libc; #[cfg(all(feature="clock", windows))] extern crate winapi; #[cfg(all(feature="clock", any(target_os = "macos", target_os = "ios")))] extern crate mach; extern crate num_integer; extern crate num_traits; #[cfg(feature = "rustc-serialize")] extern crate rustc_serialize; #[cfg(feature = "serde")] extern crate serde as serdelib; #[cfg(all(target_arch = "wasm32", not(target_os = "wasi"), feature = "wasmbind"))] extern crate wasm_bindgen; #[cfg(all(target_arch = "wasm32", not(target_os = "wasi"), feature = "wasmbind"))] extern crate js_sys; #[cfg(feature = "bench")] extern crate test; #[cfg(test)] #[macro_use] extern crate log; #[cfg(test)] #[macro_use] extern crate doc_comment; #[cfg(test)] doctest!("../README.md"); pub mod time; pub use time::{Duration, PreciseTime, SteadyTime}; pub use time as oldtime; // this reexport is to aid the transition and should not be in the prelude! #[cfg(feature="clock")] #[doc(no_inline)] pub use offset::Local; #[doc(no_inline)] pub use offset::{TimeZone, Offset, LocalResult, Utc, FixedOffset}; #[doc(no_inline)] pub use naive::{NaiveDate, IsoWeek, NaiveTime, NaiveDateTime}; pub use date::{Date, MIN_DATE, MAX_DATE}; pub use datetime::{DateTime, SecondsFormat}; #[cfg(feature = "rustc-serialize")] pub use datetime::rustc_serialize::TsSeconds; pub use format::{ParseError, ParseResult}; pub use round::SubsecRound; /// A convenience module appropriate for glob imports (`use chrono::prelude::*;`). pub mod prelude { #[doc(no_inline)] pub use {Datelike, Timelike, Weekday}; #[doc(no_inline)] pub use {TimeZone, Offset}; #[cfg(feature="clock")] #[doc(no_inline)] pub use Local; #[doc(no_inline)] pub use {Utc, FixedOffset}; #[doc(no_inline)] pub use {NaiveDate, NaiveTime, NaiveDateTime}; #[doc(no_inline)] pub use Date; #[doc(no_inline)] pub use {DateTime, SecondsFormat}; #[doc(no_inline)] pub use SubsecRound; } // useful throughout the codebase macro_rules! try_opt { ($e:expr) => (match $e { Some(v) => v, None => return None }) } mod div; pub mod offset; pub mod naive { //! Date and time types unconcerned with timezones. //! //! They are primarily building blocks for other types //! (e.g. [`TimeZone`](../offset/trait.TimeZone.html)), //! but can be also used for the simpler date and time handling. mod internals; mod date; mod isoweek; mod time; mod datetime; pub use self::date::{NaiveDate, MIN_DATE, MAX_DATE}; pub use self::isoweek::IsoWeek; pub use self::time::NaiveTime; pub use self::datetime::NaiveDateTime; #[cfg(feature = "rustc-serialize")] #[allow(deprecated)] pub use self::datetime::rustc_serialize::TsSeconds; #[cfg(feature = "__internal_bench")] #[doc(hidden)] pub use self::internals::YearFlags as __BenchYearFlags; /// Serialization/Deserialization of naive types in alternate formats /// /// The various modules in here are intended to be used with serde's [`with` /// annotation][1] to serialize as something other than the default [RFC /// 3339][2] format. /// /// [1]: https://serde.rs/attributes.html#field-attributes /// [2]: https://tools.ietf.org/html/rfc3339 #[cfg(feature = "serde")] pub mod serde { pub use super::datetime::serde::*; } } mod date; mod datetime; pub mod format; mod round; #[cfg(feature = "__internal_bench")] #[doc(hidden)] pub use naive::__BenchYearFlags; /// Serialization/Deserialization in alternate formats /// /// The various modules in here are intended to be used with serde's [`with` /// annotation][1] to serialize as something other than the default [RFC /// 3339][2] format. /// /// [1]: https://serde.rs/attributes.html#field-attributes /// [2]: https://tools.ietf.org/html/rfc3339 #[cfg(feature = "serde")] pub mod serde { pub use super::datetime::serde::*; } // Until rust 1.18 there is no "pub(crate)" so to share this we need it in the root #[cfg(feature = "serde")] enum SerdeError<V: fmt::Display, D: fmt::Display> { NonExistent { timestamp: V }, Ambiguous { timestamp: V, min: D, max: D }, } /// Construct a [`SerdeError::NonExistent`] #[cfg(feature = "serde")] fn ne_timestamp<T: fmt::Display>(ts: T) -> SerdeError<T, u8> { SerdeError::NonExistent::<T, u8> { timestamp: ts } } #[cfg(feature = "serde")] impl<V: fmt::Display, D: fmt::Display> fmt::Debug for SerdeError<V, D> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "ChronoSerdeError({})", self) } } // impl<V: fmt::Display, D: fmt::Debug> core::error::Error for SerdeError<V, D> {} #[cfg(feature = "serde")] impl<V: fmt::Display, D: fmt::Display> fmt::Display for SerdeError<V, D> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { &SerdeError::NonExistent { ref timestamp } => write!( f, "value is not a legal timestamp: {}", timestamp), &SerdeError::Ambiguous { ref timestamp, ref min, ref max } => write!( f, "value is an ambiguous timestamp: {}, could be either of {}, {}", timestamp, min, max), } } } /// The day of week. /// /// The order of the days of week depends on the context. /// (This is why this type does *not* implement `PartialOrd` or `Ord` traits.) /// One should prefer `*_from_monday` or `*_from_sunday` methods to get the correct result. #[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)] #[cfg_attr(feature = "rustc-serialize", derive(RustcEncodable, RustcDecodable))] pub enum Weekday { /// Monday. Mon = 0, /// Tuesday. Tue = 1, /// Wednesday. Wed = 2, /// Thursday. Thu = 3, /// Friday. Fri = 4, /// Saturday. Sat = 5, /// Sunday. Sun = 6, } impl Weekday { /// The next day in the week. /// /// `w`: | `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun` /// ----------- | ----- | ----- | ----- | ----- | ----- | ----- | ----- /// `w.succ()`: | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun` | `Mon` #[inline] pub fn succ(&self) -> Weekday { match *self { Weekday::Mon => Weekday::Tue, Weekday::Tue => Weekday::Wed, Weekday::Wed => Weekday::Thu, Weekday::Thu => Weekday::Fri, Weekday::Fri => Weekday::Sat, Weekday::Sat => Weekday::Sun, Weekday::Sun => Weekday::Mon, } } /// The previous day in the week. /// /// `w`: | `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun` /// ----------- | ----- | ----- | ----- | ----- | ----- | ----- | ----- /// `w.pred()`: | `Sun` | `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` #[inline] pub fn pred(&self) -> Weekday { match *self { Weekday::Mon => Weekday::Sun, Weekday::Tue => Weekday::Mon, Weekday::Wed => Weekday::Tue, Weekday::Thu => Weekday::Wed, Weekday::Fri => Weekday::Thu, Weekday::Sat => Weekday::Fri, Weekday::Sun => Weekday::Sat, } } /// Returns a day-of-week number starting from Monday = 1. (ISO 8601 weekday number) /// /// `w`: | `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun` /// ------------------------- | ----- | ----- | ----- | ----- | ----- | ----- | ----- /// `w.number_from_monday()`: | 1 | 2 | 3 | 4 | 5 | 6 | 7 #[inline] pub fn number_from_monday(&self) -> u32 { match *self { Weekday::Mon => 1, Weekday::Tue => 2, Weekday::Wed => 3, Weekday::Thu => 4, Weekday::Fri => 5, Weekday::Sat => 6, Weekday::Sun => 7, } } /// Returns a day-of-week number starting from Sunday = 1. /// /// `w`: | `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun` /// ------------------------- | ----- | ----- | ----- | ----- | ----- | ----- | ----- /// `w.number_from_sunday()`: | 2 | 3 | 4 | 5 | 6 | 7 | 1 #[inline] pub fn number_from_sunday(&self) -> u32 { match *self { Weekday::Mon => 2, Weekday::Tue => 3, Weekday::Wed => 4, Weekday::Thu => 5, Weekday::Fri => 6, Weekday::Sat => 7, Weekday::Sun => 1, } } /// Returns a day-of-week number starting from Monday = 0. /// /// `w`: | `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun` /// --------------------------- | ----- | ----- | ----- | ----- | ----- | ----- | ----- /// `w.num_days_from_monday()`: | 0 | 1 | 2 | 3 | 4 | 5 | 6 #[inline] pub fn num_days_from_monday(&self) -> u32 { match *self { Weekday::Mon => 0, Weekday::Tue => 1, Weekday::Wed => 2, Weekday::Thu => 3, Weekday::Fri => 4, Weekday::Sat => 5, Weekday::Sun => 6, } } /// Returns a day-of-week number starting from Sunday = 0. /// /// `w`: | `Mon` | `Tue` | `Wed` | `Thu` | `Fri` | `Sat` | `Sun` /// --------------------------- | ----- | ----- | ----- | ----- | ----- | ----- | ----- /// `w.num_days_from_sunday()`: | 1 | 2 | 3 | 4 | 5 | 6 | 0 #[inline] pub fn num_days_from_sunday(&self) -> u32 { match *self { Weekday::Mon => 1, Weekday::Tue => 2, Weekday::Wed => 3, Weekday::Thu => 4, Weekday::Fri => 5, Weekday::Sat => 6, Weekday::Sun => 0, } } } impl fmt::Display for Weekday { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(match *self { Weekday::Mon => "Mon", Weekday::Tue => "Tue", Weekday::Wed => "Wed", Weekday::Thu => "Thu", Weekday::Fri => "Fri", Weekday::Sat => "Sat", Weekday::Sun => "Sun", }) } } /// Any weekday can be represented as an integer from 0 to 6, which equals to /// [`Weekday::num_days_from_monday`](#method.num_days_from_monday) in this implementation. /// Do not heavily depend on this though; use explicit methods whenever possible. impl num_traits::FromPrimitive for Weekday { #[inline] fn from_i64(n: i64) -> Option<Weekday> { match n { 0 => Some(Weekday::Mon), 1 => Some(Weekday::Tue), 2 => Some(Weekday::Wed), 3 => Some(Weekday::Thu), 4 => Some(Weekday::Fri), 5 => Some(Weekday::Sat), 6 => Some(Weekday::Sun), _ => None, } } #[inline] fn from_u64(n: u64) -> Option<Weekday> { match n { 0 => Some(Weekday::Mon), 1 => Some(Weekday::Tue), 2 => Some(Weekday::Wed), 3 => Some(Weekday::Thu), 4 => Some(Weekday::Fri), 5 => Some(Weekday::Sat), 6 => Some(Weekday::Sun), _ => None, } } } use core::fmt; /// An error resulting from reading `Weekday` value with `FromStr`. #[derive(Clone, PartialEq)] pub struct ParseWeekdayError { _dummy: (), } impl fmt::Debug for ParseWeekdayError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "ParseWeekdayError {{ .. }}") } } // the actual `FromStr` implementation is in the `format` module to leverage the existing code #[cfg(feature = "serde")] mod weekday_serde { use super::Weekday; use core::fmt; use serdelib::{ser, de}; impl ser::Serialize for Weekday { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: ser::Serializer { serializer.collect_str(&self) } } struct WeekdayVisitor; impl<'de> de::Visitor<'de> for WeekdayVisitor { type Value = Weekday; fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Weekday") } fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: de::Error { value.parse().map_err(|_| E::custom("short or long weekday names expected")) } } impl<'de> de::Deserialize<'de> for Weekday { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: de::Deserializer<'de> { deserializer.deserialize_str(WeekdayVisitor) } } #[cfg(test)] extern crate serde_json; #[test] fn test_serde_serialize() { use self::serde_json::to_string; use Weekday::*; let cases: Vec<(Weekday, &str)> = vec![ (Mon, "\"Mon\""), (Tue, "\"Tue\""), (Wed, "\"Wed\""), (Thu, "\"Thu\""), (Fri, "\"Fri\""), (Sat, "\"Sat\""), (Sun, "\"Sun\""), ]; for (weekday, expected_str) in cases { let string = to_string(&weekday).unwrap(); assert_eq!(string, expected_str); } } #[test] fn test_serde_deserialize() { use self::serde_json::from_str; use Weekday::*; let cases: Vec<(&str, Weekday)> = vec![ ("\"mon\"", Mon), ("\"MONDAY\"", Mon), ("\"MonDay\"", Mon), ("\"mOn\"", Mon), ("\"tue\"", Tue), ("\"tuesday\"", Tue), ("\"wed\"", Wed), ("\"wednesday\"", Wed), ("\"thu\"", Thu), ("\"thursday\"", Thu), ("\"fri\"", Fri), ("\"friday\"", Fri), ("\"sat\"", Sat), ("\"saturday\"", Sat), ("\"sun\"", Sun), ("\"sunday\"", Sun), ]; for (str, expected_weekday) in cases { let weekday = from_str::<Weekday>(str).unwrap(); assert_eq!(weekday, expected_weekday); } let errors: Vec<&str> = vec![ "\"not a weekday\"", "\"monDAYs\"", "\"mond\"", "mon", "\"thur\"", "\"thurs\"", ]; for str in errors { from_str::<Weekday>(str).unwrap_err(); } } } /// The common set of methods for date component. pub trait Datelike: Sized { /// Returns the year number in the [calendar date](./naive/struct.NaiveDate.html#calendar-date). fn year(&self) -> i32; /// Returns the absolute year number starting from 1 with a boolean flag, /// which is false when the year predates the epoch (BCE/BC) and true otherwise (CE/AD). #[inline] fn year_ce(&self) -> (bool, u32) { let year = self.year(); if year < 1 { (false, (1 - year) as u32) } else { (true, year as u32) } } /// Returns the month number starting from 1. /// /// The return value ranges from 1 to 12. fn month(&self) -> u32; /// Returns the month number starting from 0. /// /// The return value ranges from 0 to 11. fn month0(&self) -> u32; /// Returns the day of month starting from 1. /// /// The return value ranges from 1 to 31. (The last day of month differs by months.) fn day(&self) -> u32; /// Returns the day of month starting from 0. /// /// The return value ranges from 0 to 30. (The last day of month differs by months.) fn day0(&self) -> u32; /// Returns the day of year starting from 1. /// /// The return value ranges from 1 to 366. (The last day of year differs by years.) fn ordinal(&self) -> u32; /// Returns the day of year starting from 0. /// /// The return value ranges from 0 to 365. (The last day of year differs by years.) fn ordinal0(&self) -> u32; /// Returns the day of week. fn weekday(&self) -> Weekday; /// Returns the ISO week. fn iso_week(&self) -> IsoWeek; /// Makes a new value with the year number changed. /// /// Returns `None` when the resulting value would be invalid. fn with_year(&self, year: i32) -> Option<Self>; /// Makes a new value with the month number (starting from 1) changed. /// /// Returns `None` when the resulting value would be invalid. fn with_month(&self, month: u32) -> Option<Self>; /// Makes a new value with the month number (starting from 0) changed. /// /// Returns `None` when the resulting value would be invalid. fn with_month0(&self, month0: u32) -> Option<Self>; /// Makes a new value with the day of month (starting from 1) changed. /// /// Returns `None` when the resulting value would be invalid. fn with_day(&self, day: u32) -> Option<Self>; /// Makes a new value with the day of month (starting from 0) changed. /// /// Returns `None` when the resulting value would be invalid. fn with_day0(&self, day0: u32) -> Option<Self>; /// Makes a new value with the day of year (starting from 1) changed. /// /// Returns `None` when the resulting value would be invalid. fn with_ordinal(&self, ordinal: u32) -> Option<Self>; /// Makes a new value with the day of year (starting from 0) changed. /// /// Returns `None` when the resulting value would be invalid. fn with_ordinal0(&self, ordinal0: u32) -> Option<Self>; /// Counts the days in the proleptic Gregorian calendar, with January 1, Year 1 (CE) as day 1. /// /// # Examples /// /// ``` /// use chrono::{NaiveDate, Datelike}; /// /// assert_eq!(NaiveDate::from_ymd(1970, 1, 1).num_days_from_ce(), 719_163); /// assert_eq!(NaiveDate::from_ymd(2, 1, 1).num_days_from_ce(), 366); /// assert_eq!(NaiveDate::from_ymd(1, 1, 1).num_days_from_ce(), 1); /// assert_eq!(NaiveDate::from_ymd(0, 1, 1).num_days_from_ce(), -365); /// ``` fn num_days_from_ce(&self) -> i32 { // See test_num_days_from_ce_against_alternative_impl below for a more straightforward // implementation. // we know this wouldn't overflow since year is limited to 1/2^13 of i32's full range. let mut year = self.year() - 1; let mut ndays = 0; if year < 0 { let excess = 1 + (-year) / 400; year += excess * 400; ndays -= excess * 146_097; } let div_100 = year / 100; ndays += ((year * 1461) >> 2) - div_100 + (div_100 >> 2); ndays + self.ordinal() as i32 } } /// The common set of methods for time component. pub trait Timelike: Sized { /// Returns the hour number from 0 to 23. fn hour(&self) -> u32; /// Returns the hour number from 1 to 12 with a boolean flag, /// which is false for AM and true for PM. #[inline] fn hour12(&self) -> (bool, u32) { let hour = self.hour(); let mut hour12 = hour % 12; if hour12 == 0 { hour12 = 12; } (hour >= 12, hour12) } /// Returns the minute number from 0 to 59. fn minute(&self) -> u32; /// Returns the second number from 0 to 59. fn second(&self) -> u32; /// Returns the number of nanoseconds since the whole non-leap second. /// The range from 1,000,000,000 to 1,999,999,999 represents /// the [leap second](./naive/struct.NaiveTime.html#leap-second-handling). fn nanosecond(&self) -> u32; /// Makes a new value with the hour number changed. /// /// Returns `None` when the resulting value would be invalid. fn with_hour(&self, hour: u32) -> Option<Self>; /// Makes a new value with the minute number changed. /// /// Returns `None` when the resulting value would be invalid. fn with_minute(&self, min: u32) -> Option<Self>; /// Makes a new value with the second number changed. /// /// Returns `None` when the resulting value would be invalid. /// As with the [`second`](#tymethod.second) method, /// the input range is restricted to 0 through 59. fn with_second(&self, sec: u32) -> Option<Self>; /// Makes a new value with nanoseconds since the whole non-leap second changed. /// /// Returns `None` when the resulting value would be invalid. /// As with the [`nanosecond`](#tymethod.nanosecond) method, /// the input range can exceed 1,000,000,000 for leap seconds. fn with_nanosecond(&self, nano: u32) -> Option<Self>; /// Returns the number of non-leap seconds past the last midnight. #[inline] fn num_seconds_from_midnight(&self) -> u32 { self.hour() * 3600 + self.minute() * 60 + self.second() } } #[cfg(test)] extern crate num_iter; #[test] fn test_readme_doomsday() { use num_iter::range_inclusive; for y in range_inclusive(naive::MIN_DATE.year(), naive::MAX_DATE.year()) { // even months let d4 = NaiveDate::from_ymd(y, 4, 4); let d6 = NaiveDate::from_ymd(y, 6, 6); let d8 = NaiveDate::from_ymd(y, 8, 8); let d10 = NaiveDate::from_ymd(y, 10, 10); let d12 = NaiveDate::from_ymd(y, 12, 12); // nine to five, seven-eleven let d59 = NaiveDate::from_ymd(y, 5, 9); let d95 = NaiveDate::from_ymd(y, 9, 5); let d711 = NaiveDate::from_ymd(y, 7, 11); let d117 = NaiveDate::from_ymd(y, 11, 7); // "March 0" let d30 = NaiveDate::from_ymd(y, 3, 1).pred(); let weekday = d30.weekday(); let other_dates = [d4, d6, d8, d10, d12, d59, d95, d711, d117]; assert!(other_dates.iter().all(|d| d.weekday() == weekday)); } } /// Tests `Datelike::num_days_from_ce` against an alternative implementation. /// /// The alternative implementation is not as short as the current one but it is simpler to /// understand, with less unexplained magic constants. #[test] fn test_num_days_from_ce_against_alternative_impl() { /// Returns the number of multiples of `div` in the range `start..end`. /// /// If the range `start..end` is back-to-front, i.e. `start` is greater than `end`, the /// behaviour is defined by the following equation: /// `in_between(start, end, div) == - in_between(end, start, div)`. /// /// When `div` is 1, this is equivalent to `end - start`, i.e. the length of `start..end`. /// /// # Panics /// /// Panics if `div` is not positive. fn in_between(start: i32, end: i32, div: i32) -> i32 { assert!(div > 0, "in_between: nonpositive div = {}", div); let start = (start.div_euclid(div), start.rem_euclid(div)); let end = ( end.div_euclid(div), end.rem_euclid(div)); // The lowest multiple of `div` greater than or equal to `start`, divided. let start = start.0 + (start.1 != 0) as i32; // The lowest multiple of `div` greater than or equal to `end`, divided. let end = end.0 + ( end.1 != 0) as i32; end - start } /// Alternative implementation to `Datelike::num_days_from_ce` fn num_days_from_ce<Date: Datelike>(date: &Date) -> i32 { let year = date.year(); let diff = move |div| in_between(1, year, div); // 365 days a year, one more in leap years. In the gregorian calendar, leap years are all // the multiples of 4 except multiples of 100 but including multiples of 400. date.ordinal() as i32 + 365 * diff(1) + diff(4) - diff(100) + diff(400) } use num_iter::range_inclusive; for year in range_inclusive(naive::MIN_DATE.year(), naive::MAX_DATE.year()) { let jan1_year = NaiveDate::from_ymd(year, 1, 1); assert_eq!(jan1_year.num_days_from_ce(), num_days_from_ce(&jan1_year), "on {:?}", jan1_year); let mid_year = jan1_year + Duration::days(133); assert_eq!(mid_year.num_days_from_ce(), num_days_from_ce(&mid_year), "on {:?}", mid_year); } }
37.762996
105
0.600173
d6c5b8a592a51791836ed4a62c88f653a4bc07cd
3,537
use crate::core::{AxisDirection, MaterialSide}; use crate::primitives::RaytracingObject; use nalgebra::{Affine3, Point3, Unit, Vector2, Vector3}; pub trait Intersectable { fn intersect(&self, ray: &Ray, max_distance: Option<f64>) -> Option<Intersection>; } #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum RayType { Primary, Secondary(u8), Shadow, } #[derive(Debug)] pub struct Ray { pub ray_type: RayType, pub origin: Point3<f64>, pub direction: Vector3<f64>, pub refractive_index: f64, } impl Ray { pub fn get_depth(&self) -> u8 { match self.ray_type { RayType::Primary => 0, RayType::Secondary(depth) => depth, RayType::Shadow => panic!("shadow rays have no depth"), } } pub fn transform(&self, transform: Affine3<f64>) -> Ray { let origin = transform * self.origin; let direction = transform * self.direction; Ray { ray_type: self.ray_type, origin, direction, refractive_index: self.refractive_index, } } } #[derive(Debug, Copy, Clone)] pub enum IntermediateData { Empty, CubeHitFace(AxisDirection), // Axis pointing to hit face in object space Barycentric(f64, f64, f64), // Barycentric coordinates of hit point } #[derive(Debug)] struct IntersectionData { hit_point: Point3<f64>, normal: Unit<Vector3<f64>>, uv: Vector2<f64>, } #[derive(Debug)] pub struct Intersection<'a> { pub object: &'a dyn RaytracingObject, pub distance: f64, intermediate: IntermediateData, data: Option<IntersectionData>, } impl<'a> Intersection<'a> { pub fn new_with_data( object: &'a dyn RaytracingObject, distance: f64, intermediate: IntermediateData, ) -> Self { Self { object, distance, intermediate, data: None, } } pub fn new(object: &'a dyn RaytracingObject, distance: f64) -> Self { Self::new_with_data(object, distance, IntermediateData::Empty) } pub fn compute_data(&mut self, ray: &Ray) { let transform = self.object.get_transform(); let hit_point = ray.origin + ray.direction * self.distance; let object_hit_point = transform.inverse() * hit_point; let object_normal = self .object .surface_normal(&object_hit_point, self.intermediate); let normal = Unit::new_normalize(transform.inverse_transpose() * object_normal.into_inner()); let normal = match self.object.get_material().side() { MaterialSide::Both => { if normal.dot(&ray.direction) > 0.0 { -normal } else { normal } } MaterialSide::Front => normal, MaterialSide::Back => -normal, }; let uv = self .object .uv(&object_hit_point, &object_normal, self.intermediate); self.data = Some(IntersectionData { hit_point, normal, uv, }); } fn get_data(&self) -> &IntersectionData { self.data.as_ref().expect("intersection data not computed") } pub fn get_hit_point(&self) -> Point3<f64> { self.get_data().hit_point } pub fn get_normal(&self) -> Unit<Vector3<f64>> { self.get_data().normal } pub fn get_uv(&self) -> Vector2<f64> { self.get_data().uv } }
26.2
92
0.579587
8ad3b2670bcc6f295844ee3a742b9726a9f83fa9
135
mod client; mod link; mod register_client; mod spec; pub use client::*; pub use link::*; pub use register_client::*; pub use spec::*;
13.5
27
0.696296
efa743e57ab72144d7c988939fdb24d811d6cf61
983
//! Testes da seção de informações sobre o transporte use std::convert::TryFrom; use std::fs::File; use crate::*; #[test] fn from_instance() -> Result<(), String> { let f = File::open("xmls/nfe_layout4.xml").map_err(|e| e.to_string())?; let transporte = Nfe::try_from(f).map_err(|e| e.to_string())?.transporte; assert_eq!(ModalidadeFrete::SemTransporte, transporte.modalidade); Ok(()) } #[test] fn manual() -> Result<(), Error> { let xml = "<transp><modFrete>9</modFrete></transp>"; let transporte = xml.parse::<Transporte>()?; assert_eq!(ModalidadeFrete::SemTransporte, transporte.modalidade); Ok(()) } #[test] fn to_string() -> Result<(), Error> { let mut xml_original = "<transp><modFrete>9</modFrete></transp>".to_string(); xml_original.retain(|c| c != '\n' && c != ' '); let transporte = xml_original.parse::<Transporte>()?; let xml_novo = transporte.to_string(); assert_eq!(xml_original, xml_novo); Ok(()) }
23.97561
81
0.636826
dd974ee21f183292857cafa73f9d5b824f132d28
9,164
use crate::types::{ file::InputFile, photo_size::PhotoSize, primitive::{Float, Integer}, }; use serde::{Deserialize, Serialize}; use serde_json::Error as JsonError; use std::{error::Error as StdError, fmt}; /// The part of the face relative to which the mask should be placed #[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] #[serde(rename_all = "lowercase")] pub enum MaskPositionPoint { /// “forehead” Forehead, /// “eyes” Eyes, /// “mouth” Mouth, /// “chin” Chin, } /// Position on faces where a mask should be placed by default #[derive(Clone, Copy, Debug, Deserialize, Serialize)] pub struct MaskPosition { /// The part of the face relative /// to which the mask should be placed pub point: MaskPositionPoint, /// Shift by X-axis measured in widths /// of the mask scaled to the face size, /// from left to right. /// For example, choosing -1.0 /// will place mask just /// to the left of the default mask position pub x_shift: Float, /// Shift by Y-axis measured /// in heights of the mask scaled to the face size, /// from top to bottom. /// For example, 1.0 will place /// the mask just below the default mask position pub y_shift: Float, /// Mask scaling coefficient. /// For example, 2.0 means double size pub scale: Float, } impl MaskPosition { pub(crate) fn serialize(&self) -> Result<String, MaskPositionError> { serde_json::to_string(self).map_err(MaskPositionError::Serialize) } } /// An error occurred with mask position #[derive(Debug)] pub enum MaskPositionError { /// Failed to serialize mask position Serialize(JsonError), } impl StdError for MaskPositionError { fn source(&self) -> Option<&(dyn StdError + 'static)> { match self { MaskPositionError::Serialize(err) => Some(err), } } } impl fmt::Display for MaskPositionError { fn fmt(&self, out: &mut fmt::Formatter) -> fmt::Result { match self { MaskPositionError::Serialize(err) => write!(out, "can not serialize mask position: {}", err), } } } /// Sticker #[derive(Clone, Debug, Deserialize)] pub struct Sticker { /// Identifier for this file, which can be used to download or reuse the file pub file_id: String, /// Unique identifier for this file /// /// It is supposed to be the same over time and for different bots. /// Can't be used to download or reuse the file. pub file_unique_id: String, /// Sticker width pub width: Integer, /// Sticker height pub height: Integer, /// Sticker thumbnail in the .webp or .jpg format pub thumb: Option<PhotoSize>, /// Emoji associated with the sticker pub emoji: Option<String>, /// Name of the sticker set to which the sticker belongs pub set_name: Option<String>, /// For mask stickers, the position where the mask should be placed pub mask_position: Option<MaskPosition>, /// File size pub file_size: Option<Integer>, /// True, if the sticker is animated pub is_animated: bool, } /// Sticker set #[derive(Clone, Debug, Deserialize)] pub struct StickerSet { /// Sticker set name pub name: String, /// Sticker set title pub title: String, /// True, if the sticker set contains masks pub contains_masks: bool, /// List of all set stickers pub stickers: Vec<Sticker>, /// True, if the sticker set contains animated stickers pub is_animated: bool, /// Sticker set thumbnail in the .WEBP or .TGS format pub thumb: Option<PhotoSize>, } /// A new sticker to upload #[derive(Debug)] pub struct NewSticker { pub(crate) kind: NewStickerKind, } #[derive(Debug)] pub(crate) enum NewStickerKind { Png(InputFile), Tgs(InputFile), } impl NewSticker { /// PNG image /// /// Must be up to 512 kilobytes in size, dimensions must not exceed 512px, /// and either width or height must be exactly 512px pub fn png<I>(file: I) -> Self where I: Into<InputFile>, { Self { kind: NewStickerKind::Png(file.into()), } } /// TGS animation /// /// See <https://core.telegram.org/animated_stickers#technical-requirements> /// for technical requirements pub fn tgs<I>(file: I) -> Self where I: Into<InputFile>, { Self { kind: NewStickerKind::Tgs(file.into()), } } } #[cfg(test)] mod tests { #![allow(clippy::float_cmp)] use super::*; #[test] fn deserialize_sticker_full() { let data: Sticker = serde_json::from_value(serde_json::json!({ "file_id": "test file id", "file_unique_id": "unique-id", "width": 512, "height": 512, "thumb": { "file_id": "AdddddUuUUUUccccUUmm_PPP", "file_unique_id": "unique-thumb-id", "width": 24, "height": 24, "file_size": 12324 }, "emoji": ":D", "set_name": "sticker set name", "mask_position": { "point": "forehead", "x_shift": 3.0, "y_shift": 2.0, "scale": 3.0, }, "file_size": 1234, "is_animated": false })) .unwrap(); assert_eq!(data.file_id, "test file id"); assert_eq!(data.file_unique_id, "unique-id"); assert_eq!(data.width, 512); assert_eq!(data.height, 512); assert!(!data.is_animated); let thumb = data.thumb.unwrap(); assert_eq!(thumb.file_id, "AdddddUuUUUUccccUUmm_PPP"); assert_eq!(thumb.file_unique_id, "unique-thumb-id"); assert_eq!(thumb.width, 24); assert_eq!(thumb.height, 24); assert_eq!(thumb.file_size.unwrap(), 12324); assert_eq!(data.emoji.unwrap(), ":D"); assert_eq!(data.set_name.unwrap(), "sticker set name"); let mask_position = data.mask_position.unwrap(); assert_eq!(mask_position.point, MaskPositionPoint::Forehead); assert_eq!(mask_position.x_shift, 3.0); assert_eq!(mask_position.y_shift, 2.0); assert_eq!(mask_position.scale, 3.0); assert_eq!(data.file_size.unwrap(), 1234); } #[test] fn deserialize_sticker_partial() { let data: Sticker = serde_json::from_value(serde_json::json!({ "file_id": "test file id", "file_unique_id": "unique-id", "width": 512, "height": 512, "is_animated": true })) .unwrap(); assert_eq!(data.file_id, "test file id"); assert_eq!(data.file_unique_id, "unique-id"); assert_eq!(data.width, 512); assert_eq!(data.height, 512); assert!(data.is_animated); assert!(data.thumb.is_none()); assert!(data.emoji.is_none()); assert!(data.set_name.is_none()); assert!(data.file_size.is_none()); } #[test] fn mask_position_point() { assert_eq!( serde_json::to_string(&MaskPositionPoint::Forehead).unwrap(), r#""forehead""# ); assert_eq!(serde_json::to_string(&MaskPositionPoint::Eyes).unwrap(), r#""eyes""#); assert_eq!(serde_json::to_string(&MaskPositionPoint::Mouth).unwrap(), r#""mouth""#); assert_eq!(serde_json::to_string(&MaskPositionPoint::Chin).unwrap(), r#""chin""#); assert_eq!( serde_json::from_str::<MaskPositionPoint>(r#""forehead""#).unwrap(), MaskPositionPoint::Forehead ); assert_eq!( serde_json::from_str::<MaskPositionPoint>(r#""eyes""#).unwrap(), MaskPositionPoint::Eyes ); assert_eq!( serde_json::from_str::<MaskPositionPoint>(r#""mouth""#).unwrap(), MaskPositionPoint::Mouth ); assert_eq!( serde_json::from_str::<MaskPositionPoint>(r#""chin""#).unwrap(), MaskPositionPoint::Chin ); } #[test] fn deserialize_sticker_set() { let data: StickerSet = serde_json::from_value(serde_json::json!({ "name": "test", "title": "test", "contains_masks": false, "stickers": [], "is_animated": false, "thumb": { "file_id": "thumb-file-id", "file_unique_id": "thumb-file-unique-id", "width": 512, "height": 512, "file_size": 2048, } })) .unwrap(); assert_eq!(data.name, "test"); assert_eq!(data.title, "test"); assert!(!data.is_animated); assert!(!data.contains_masks); assert!(data.stickers.is_empty()); let thumb = data.thumb.unwrap(); assert_eq!(thumb.file_id, "thumb-file-id"); assert_eq!(thumb.file_unique_id, "thumb-file-unique-id"); assert_eq!(thumb.width, 512); assert_eq!(thumb.height, 512); assert_eq!(thumb.file_size.unwrap(), 2048); } }
30.546667
105
0.583806
d545e1efe6791312d29d03faebdb93f8c1635239
12,636
use crate::{ dpi::{PhysicalPosition, Size}, event::ModifiersState, icon::Icon, platform_impl::platform::{event_loop, util}, window::{CursorIcon, Fullscreen, Theme, WindowAttributes}, }; use parking_lot::MutexGuard; use std::io; use windows_sys::Win32::{ Foundation::{HWND, RECT}, Graphics::Gdi::InvalidateRgn, UI::WindowsAndMessaging::{ SendMessageW, SetWindowLongW, SetWindowPos, ShowWindow, GWL_EXSTYLE, GWL_STYLE, HWND_NOTOPMOST, HWND_TOPMOST, SWP_ASYNCWINDOWPOS, SWP_FRAMECHANGED, SWP_NOACTIVATE, SWP_NOMOVE, SWP_NOSIZE, SWP_NOZORDER, SW_HIDE, SW_MAXIMIZE, SW_MINIMIZE, SW_RESTORE, SW_SHOW, WINDOWPLACEMENT, WINDOW_EX_STYLE, WINDOW_STYLE, WS_BORDER, WS_CAPTION, WS_CHILD, WS_CLIPCHILDREN, WS_CLIPSIBLINGS, WS_EX_ACCEPTFILES, WS_EX_APPWINDOW, WS_EX_LEFT, WS_EX_NOREDIRECTIONBITMAP, WS_EX_TOPMOST, WS_EX_WINDOWEDGE, WS_MAXIMIZE, WS_MAXIMIZEBOX, WS_MINIMIZE, WS_MINIMIZEBOX, WS_OVERLAPPED, WS_OVERLAPPEDWINDOW, WS_POPUP, WS_SIZEBOX, WS_SYSMENU, WS_VISIBLE, }, }; /// Contains information about states and the window that the callback is going to use. pub struct WindowState { pub mouse: MouseProperties, /// Used by `WM_GETMINMAXINFO`. pub min_size: Option<Size>, pub max_size: Option<Size>, pub window_icon: Option<Icon>, pub taskbar_icon: Option<Icon>, pub saved_window: Option<SavedWindow>, pub scale_factor: f64, pub modifiers_state: ModifiersState, pub fullscreen: Option<Fullscreen>, pub current_theme: Theme, pub preferred_theme: Option<Theme>, pub high_surrogate: Option<u16>, pub window_flags: WindowFlags, } #[derive(Clone)] pub struct SavedWindow { pub placement: WINDOWPLACEMENT, } #[derive(Clone)] pub struct MouseProperties { pub cursor: CursorIcon, pub capture_count: u32, cursor_flags: CursorFlags, pub last_position: Option<PhysicalPosition<f64>>, } bitflags! { pub struct CursorFlags: u8 { const GRABBED = 1 << 0; const HIDDEN = 1 << 1; const IN_WINDOW = 1 << 2; } } bitflags! { pub struct WindowFlags: u32 { const RESIZABLE = 1 << 0; const DECORATIONS = 1 << 1; const VISIBLE = 1 << 2; const ON_TASKBAR = 1 << 3; const ALWAYS_ON_TOP = 1 << 4; const NO_BACK_BUFFER = 1 << 5; const TRANSPARENT = 1 << 6; const CHILD = 1 << 7; const MAXIMIZED = 1 << 8; const POPUP = 1 << 14; /// Marker flag for fullscreen. Should always match `WindowState::fullscreen`, but is /// included here to make masking easier. const MARKER_EXCLUSIVE_FULLSCREEN = 1 << 9; const MARKER_BORDERLESS_FULLSCREEN = 1 << 13; /// The `WM_SIZE` event contains some parameters that can effect the state of `WindowFlags`. /// In most cases, it's okay to let those parameters change the state. However, when we're /// running the `WindowFlags::apply_diff` function, we *don't* want those parameters to /// effect our stored state, because the purpose of `apply_diff` is to update the actual /// window's state to match our stored state. This controls whether to accept those changes. const MARKER_RETAIN_STATE_ON_SIZE = 1 << 10; const MARKER_IN_SIZE_MOVE = 1 << 11; const MINIMIZED = 1 << 12; const EXCLUSIVE_FULLSCREEN_OR_MASK = WindowFlags::ALWAYS_ON_TOP.bits; const NO_DECORATIONS_AND_MASK = !WindowFlags::RESIZABLE.bits; const INVISIBLE_AND_MASK = !WindowFlags::MAXIMIZED.bits; } } impl WindowState { pub fn new( attributes: &WindowAttributes, taskbar_icon: Option<Icon>, scale_factor: f64, current_theme: Theme, preferred_theme: Option<Theme>, ) -> WindowState { WindowState { mouse: MouseProperties { cursor: CursorIcon::default(), capture_count: 0, cursor_flags: CursorFlags::empty(), last_position: None, }, min_size: attributes.min_inner_size, max_size: attributes.max_inner_size, window_icon: attributes.window_icon.clone(), taskbar_icon, saved_window: None, scale_factor, modifiers_state: ModifiersState::default(), fullscreen: None, current_theme, preferred_theme, high_surrogate: None, window_flags: WindowFlags::empty(), } } pub fn window_flags(&self) -> WindowFlags { self.window_flags } pub fn set_window_flags<F>(mut this: MutexGuard<'_, Self>, window: HWND, f: F) where F: FnOnce(&mut WindowFlags), { let old_flags = this.window_flags; f(&mut this.window_flags); let new_flags = this.window_flags; drop(this); old_flags.apply_diff(window, new_flags); } pub fn set_window_flags_in_place<F>(&mut self, f: F) where F: FnOnce(&mut WindowFlags), { f(&mut self.window_flags); } } impl MouseProperties { pub fn cursor_flags(&self) -> CursorFlags { self.cursor_flags } pub fn set_cursor_flags<F>(&mut self, window: HWND, f: F) -> Result<(), io::Error> where F: FnOnce(&mut CursorFlags), { let old_flags = self.cursor_flags; f(&mut self.cursor_flags); match self.cursor_flags.refresh_os_cursor(window) { Ok(()) => (), Err(e) => { self.cursor_flags = old_flags; return Err(e); } } Ok(()) } } impl WindowFlags { fn mask(mut self) -> WindowFlags { if self.contains(WindowFlags::MARKER_EXCLUSIVE_FULLSCREEN) { self |= WindowFlags::EXCLUSIVE_FULLSCREEN_OR_MASK; } if !self.contains(WindowFlags::VISIBLE) { self &= WindowFlags::INVISIBLE_AND_MASK; } if !self.contains(WindowFlags::DECORATIONS) { self &= WindowFlags::NO_DECORATIONS_AND_MASK; } self } pub fn to_window_styles(self) -> (WINDOW_STYLE, WINDOW_EX_STYLE) { let (mut style, mut style_ex) = (WS_OVERLAPPED, WS_EX_LEFT); if self.contains(WindowFlags::RESIZABLE) { style |= WS_SIZEBOX | WS_MAXIMIZEBOX; } if self.contains(WindowFlags::DECORATIONS) { style |= WS_CAPTION | WS_MINIMIZEBOX | WS_BORDER; style_ex = WS_EX_WINDOWEDGE; } if self.contains(WindowFlags::VISIBLE) { style |= WS_VISIBLE; } if self.contains(WindowFlags::ON_TASKBAR) { style_ex |= WS_EX_APPWINDOW; } if self.contains(WindowFlags::ALWAYS_ON_TOP) { style_ex |= WS_EX_TOPMOST; } if self.contains(WindowFlags::NO_BACK_BUFFER) { style_ex |= WS_EX_NOREDIRECTIONBITMAP; } if self.contains(WindowFlags::CHILD) { style |= WS_CHILD; // This is incompatible with WS_POPUP if that gets added eventually. } if self.contains(WindowFlags::POPUP) { style |= WS_POPUP; } if self.contains(WindowFlags::MINIMIZED) { style |= WS_MINIMIZE; } if self.contains(WindowFlags::MAXIMIZED) { style |= WS_MAXIMIZE; } style |= WS_CLIPSIBLINGS | WS_CLIPCHILDREN | WS_SYSMENU; style_ex |= WS_EX_ACCEPTFILES; if self.intersects( WindowFlags::MARKER_EXCLUSIVE_FULLSCREEN | WindowFlags::MARKER_BORDERLESS_FULLSCREEN, ) { style &= !WS_OVERLAPPEDWINDOW; } (style, style_ex) } /// Adjust the window client rectangle to the return value, if present. fn apply_diff(mut self, window: HWND, mut new: WindowFlags) { self = self.mask(); new = new.mask(); let diff = self ^ new; if diff == WindowFlags::empty() { return; } if diff.contains(WindowFlags::VISIBLE) { unsafe { ShowWindow( window, match new.contains(WindowFlags::VISIBLE) { true => SW_SHOW, false => SW_HIDE, }, ); } } if diff.contains(WindowFlags::ALWAYS_ON_TOP) { unsafe { SetWindowPos( window, match new.contains(WindowFlags::ALWAYS_ON_TOP) { true => HWND_TOPMOST, false => HWND_NOTOPMOST, }, 0, 0, 0, 0, SWP_ASYNCWINDOWPOS | SWP_NOMOVE | SWP_NOSIZE | SWP_NOACTIVATE, ); InvalidateRgn(window, 0, false.into()); } } if diff.contains(WindowFlags::MAXIMIZED) || new.contains(WindowFlags::MAXIMIZED) { unsafe { ShowWindow( window, match new.contains(WindowFlags::MAXIMIZED) { true => SW_MAXIMIZE, false => SW_RESTORE, }, ); } } // Minimize operations should execute after maximize for proper window animations if diff.contains(WindowFlags::MINIMIZED) { unsafe { ShowWindow( window, match new.contains(WindowFlags::MINIMIZED) { true => SW_MINIMIZE, false => SW_RESTORE, }, ); } } if diff != WindowFlags::empty() { let (style, style_ex) = new.to_window_styles(); unsafe { SendMessageW(window, *event_loop::SET_RETAIN_STATE_ON_SIZE_MSG_ID, 1, 0); // This condition is necessary to avoid having an unrestorable window if !new.contains(WindowFlags::MINIMIZED) { SetWindowLongW(window, GWL_STYLE, style as i32); SetWindowLongW(window, GWL_EXSTYLE, style_ex as i32); } let mut flags = SWP_NOZORDER | SWP_NOMOVE | SWP_NOSIZE | SWP_FRAMECHANGED; // We generally don't want style changes here to affect window // focus, but for fullscreen windows they must be activated // (i.e. focused) so that they appear on top of the taskbar if !new.contains(WindowFlags::MARKER_EXCLUSIVE_FULLSCREEN) && !new.contains(WindowFlags::MARKER_BORDERLESS_FULLSCREEN) { flags |= SWP_NOACTIVATE; } // Refresh the window frame SetWindowPos(window, 0, 0, 0, 0, 0, flags); SendMessageW(window, *event_loop::SET_RETAIN_STATE_ON_SIZE_MSG_ID, 0, 0); } } } } impl CursorFlags { fn refresh_os_cursor(self, window: HWND) -> Result<(), io::Error> { let client_rect = util::get_client_rect(window)?; if util::is_focused(window) { let cursor_clip = match self.contains(CursorFlags::GRABBED) { true => Some(client_rect), false => None, }; let rect_to_tuple = |rect: RECT| (rect.left, rect.top, rect.right, rect.bottom); let active_cursor_clip = rect_to_tuple(util::get_cursor_clip()?); let desktop_rect = rect_to_tuple(util::get_desktop_rect()); let active_cursor_clip = match desktop_rect == active_cursor_clip { true => None, false => Some(active_cursor_clip), }; // We do this check because calling `set_cursor_clip` incessantly will flood the event // loop with `WM_MOUSEMOVE` events, and `refresh_os_cursor` is called by `set_cursor_flags` // which at times gets called once every iteration of the eventloop. if active_cursor_clip != cursor_clip.map(rect_to_tuple) { util::set_cursor_clip(cursor_clip)?; } } let cursor_in_client = self.contains(CursorFlags::IN_WINDOW); if cursor_in_client { util::set_cursor_hidden(self.contains(CursorFlags::HIDDEN)); } else { util::set_cursor_hidden(false); } Ok(()) } }
33.606383
103
0.571937
911348ccad2a4b9c933ffac69f246030a53c83a3
16,832
#[doc = "Reader of register INTENSET"] pub type R = crate::R<u32, super::INTENSET>; #[doc = "Writer for register INTENSET"] pub type W = crate::W<u32, super::INTENSET>; #[doc = "Register INTENSET `reset()`'s with value 0"] impl crate::ResetValue for super::INTENSET { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Write '1' to Enable interrupt for STOPPED event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum STOPPED_A { #[doc = "0: Read: Disabled"] DISABLED, #[doc = "1: Read: Enabled"] ENABLED, } impl From<STOPPED_A> for bool { #[inline(always)] fn from(variant: STOPPED_A) -> Self { match variant { STOPPED_A::DISABLED => false, STOPPED_A::ENABLED => true, } } } #[doc = "Reader of field `STOPPED`"] pub type STOPPED_R = crate::R<bool, STOPPED_A>; impl STOPPED_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> STOPPED_A { match self.bits { false => STOPPED_A::DISABLED, true => STOPPED_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == STOPPED_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == STOPPED_A::ENABLED } } #[doc = "Write '1' to Enable interrupt for STOPPED event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum STOPPED_AW { #[doc = "1: Enable"] SET, } impl From<STOPPED_AW> for bool { #[inline(always)] fn from(variant: STOPPED_AW) -> Self { match variant { STOPPED_AW::SET => true, } } } #[doc = "Write proxy for field `STOPPED`"] pub struct STOPPED_W<'a> { w: &'a mut W, } impl<'a> STOPPED_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: STOPPED_AW) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Enable"] #[inline(always)] pub fn set(self) -> &'a mut W { self.variant(STOPPED_AW::SET) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1); self.w } } #[doc = "Write '1' to Enable interrupt for RXDREADY event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RXDREADY_A { #[doc = "0: Read: Disabled"] DISABLED, #[doc = "1: Read: Enabled"] ENABLED, } impl From<RXDREADY_A> for bool { #[inline(always)] fn from(variant: RXDREADY_A) -> Self { match variant { RXDREADY_A::DISABLED => false, RXDREADY_A::ENABLED => true, } } } #[doc = "Reader of field `RXDREADY`"] pub type RXDREADY_R = crate::R<bool, RXDREADY_A>; impl RXDREADY_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RXDREADY_A { match self.bits { false => RXDREADY_A::DISABLED, true => RXDREADY_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == RXDREADY_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == RXDREADY_A::ENABLED } } #[doc = "Write '1' to Enable interrupt for RXDREADY event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RXDREADY_AW { #[doc = "1: Enable"] SET, } impl From<RXDREADY_AW> for bool { #[inline(always)] fn from(variant: RXDREADY_AW) -> Self { match variant { RXDREADY_AW::SET => true, } } } #[doc = "Write proxy for field `RXDREADY`"] pub struct RXDREADY_W<'a> { w: &'a mut W, } impl<'a> RXDREADY_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RXDREADY_AW) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Enable"] #[inline(always)] pub fn set(self) -> &'a mut W { self.variant(RXDREADY_AW::SET) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2); self.w } } #[doc = "Write '1' to Enable interrupt for TXDSENT event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TXDSENT_A { #[doc = "0: Read: Disabled"] DISABLED, #[doc = "1: Read: Enabled"] ENABLED, } impl From<TXDSENT_A> for bool { #[inline(always)] fn from(variant: TXDSENT_A) -> Self { match variant { TXDSENT_A::DISABLED => false, TXDSENT_A::ENABLED => true, } } } #[doc = "Reader of field `TXDSENT`"] pub type TXDSENT_R = crate::R<bool, TXDSENT_A>; impl TXDSENT_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TXDSENT_A { match self.bits { false => TXDSENT_A::DISABLED, true => TXDSENT_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == TXDSENT_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == TXDSENT_A::ENABLED } } #[doc = "Write '1' to Enable interrupt for TXDSENT event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TXDSENT_AW { #[doc = "1: Enable"] SET, } impl From<TXDSENT_AW> for bool { #[inline(always)] fn from(variant: TXDSENT_AW) -> Self { match variant { TXDSENT_AW::SET => true, } } } #[doc = "Write proxy for field `TXDSENT`"] pub struct TXDSENT_W<'a> { w: &'a mut W, } impl<'a> TXDSENT_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TXDSENT_AW) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Enable"] #[inline(always)] pub fn set(self) -> &'a mut W { self.variant(TXDSENT_AW::SET) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7); self.w } } #[doc = "Write '1' to Enable interrupt for ERROR event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ERROR_A { #[doc = "0: Read: Disabled"] DISABLED, #[doc = "1: Read: Enabled"] ENABLED, } impl From<ERROR_A> for bool { #[inline(always)] fn from(variant: ERROR_A) -> Self { match variant { ERROR_A::DISABLED => false, ERROR_A::ENABLED => true, } } } #[doc = "Reader of field `ERROR`"] pub type ERROR_R = crate::R<bool, ERROR_A>; impl ERROR_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ERROR_A { match self.bits { false => ERROR_A::DISABLED, true => ERROR_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == ERROR_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == ERROR_A::ENABLED } } #[doc = "Write '1' to Enable interrupt for ERROR event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ERROR_AW { #[doc = "1: Enable"] SET, } impl From<ERROR_AW> for bool { #[inline(always)] fn from(variant: ERROR_AW) -> Self { match variant { ERROR_AW::SET => true, } } } #[doc = "Write proxy for field `ERROR`"] pub struct ERROR_W<'a> { w: &'a mut W, } impl<'a> ERROR_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ERROR_AW) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Enable"] #[inline(always)] pub fn set(self) -> &'a mut W { self.variant(ERROR_AW::SET) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9); self.w } } #[doc = "Write '1' to Enable interrupt for BB event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum BB_A { #[doc = "0: Read: Disabled"] DISABLED, #[doc = "1: Read: Enabled"] ENABLED, } impl From<BB_A> for bool { #[inline(always)] fn from(variant: BB_A) -> Self { match variant { BB_A::DISABLED => false, BB_A::ENABLED => true, } } } #[doc = "Reader of field `BB`"] pub type BB_R = crate::R<bool, BB_A>; impl BB_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> BB_A { match self.bits { false => BB_A::DISABLED, true => BB_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == BB_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == BB_A::ENABLED } } #[doc = "Write '1' to Enable interrupt for BB event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum BB_AW { #[doc = "1: Enable"] SET, } impl From<BB_AW> for bool { #[inline(always)] fn from(variant: BB_AW) -> Self { match variant { BB_AW::SET => true, } } } #[doc = "Write proxy for field `BB`"] pub struct BB_W<'a> { w: &'a mut W, } impl<'a> BB_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: BB_AW) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Enable"] #[inline(always)] pub fn set(self) -> &'a mut W { self.variant(BB_AW::SET) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14); self.w } } #[doc = "Write '1' to Enable interrupt for SUSPENDED event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SUSPENDED_A { #[doc = "0: Read: Disabled"] DISABLED, #[doc = "1: Read: Enabled"] ENABLED, } impl From<SUSPENDED_A> for bool { #[inline(always)] fn from(variant: SUSPENDED_A) -> Self { match variant { SUSPENDED_A::DISABLED => false, SUSPENDED_A::ENABLED => true, } } } #[doc = "Reader of field `SUSPENDED`"] pub type SUSPENDED_R = crate::R<bool, SUSPENDED_A>; impl SUSPENDED_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SUSPENDED_A { match self.bits { false => SUSPENDED_A::DISABLED, true => SUSPENDED_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == SUSPENDED_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == SUSPENDED_A::ENABLED } } #[doc = "Write '1' to Enable interrupt for SUSPENDED event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SUSPENDED_AW { #[doc = "1: Enable"] SET, } impl From<SUSPENDED_AW> for bool { #[inline(always)] fn from(variant: SUSPENDED_AW) -> Self { match variant { SUSPENDED_AW::SET => true, } } } #[doc = "Write proxy for field `SUSPENDED`"] pub struct SUSPENDED_W<'a> { w: &'a mut W, } impl<'a> SUSPENDED_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SUSPENDED_AW) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Enable"] #[inline(always)] pub fn set(self) -> &'a mut W { self.variant(SUSPENDED_AW::SET) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18); self.w } } impl R { #[doc = "Bit 1 - Write '1' to Enable interrupt for STOPPED event"] #[inline(always)] pub fn stopped(&self) -> STOPPED_R { STOPPED_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Write '1' to Enable interrupt for RXDREADY event"] #[inline(always)] pub fn rxdready(&self) -> RXDREADY_R { RXDREADY_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 7 - Write '1' to Enable interrupt for TXDSENT event"] #[inline(always)] pub fn txdsent(&self) -> TXDSENT_R { TXDSENT_R::new(((self.bits >> 7) & 0x01) != 0) } #[doc = "Bit 9 - Write '1' to Enable interrupt for ERROR event"] #[inline(always)] pub fn error(&self) -> ERROR_R { ERROR_R::new(((self.bits >> 9) & 0x01) != 0) } #[doc = "Bit 14 - Write '1' to Enable interrupt for BB event"] #[inline(always)] pub fn bb(&self) -> BB_R { BB_R::new(((self.bits >> 14) & 0x01) != 0) } #[doc = "Bit 18 - Write '1' to Enable interrupt for SUSPENDED event"] #[inline(always)] pub fn suspended(&self) -> SUSPENDED_R { SUSPENDED_R::new(((self.bits >> 18) & 0x01) != 0) } } impl W { #[doc = "Bit 1 - Write '1' to Enable interrupt for STOPPED event"] #[inline(always)] pub fn stopped(&mut self) -> STOPPED_W { STOPPED_W { w: self } } #[doc = "Bit 2 - Write '1' to Enable interrupt for RXDREADY event"] #[inline(always)] pub fn rxdready(&mut self) -> RXDREADY_W { RXDREADY_W { w: self } } #[doc = "Bit 7 - Write '1' to Enable interrupt for TXDSENT event"] #[inline(always)] pub fn txdsent(&mut self) -> TXDSENT_W { TXDSENT_W { w: self } } #[doc = "Bit 9 - Write '1' to Enable interrupt for ERROR event"] #[inline(always)] pub fn error(&mut self) -> ERROR_W { ERROR_W { w: self } } #[doc = "Bit 14 - Write '1' to Enable interrupt for BB event"] #[inline(always)] pub fn bb(&mut self) -> BB_W { BB_W { w: self } } #[doc = "Bit 18 - Write '1' to Enable interrupt for SUSPENDED event"] #[inline(always)] pub fn suspended(&mut self) -> SUSPENDED_W { SUSPENDED_W { w: self } } }
28.100167
86
0.550261
0af53ce21476266f7efcdaf72eaa3cc46dc1929c
4,007
use super::bit_depth::BitDepth; use super::image::BitMap; use super::rgba::Rgba; pub struct RgbQuad { data: Vec<Rgba>, } /// /// Used for constants /// impl RgbQuad { pub fn single_rgb_quad_size() -> usize { 4 } } /// /// Core implementation /// impl RgbQuad { /// /// From a from_slice of bytes, read in a list of colors used to render the /// bitmap image /// pub fn from_slice(bit_stream: &[u8]) -> Result<RgbQuad, &'static str> { if bit_stream.len() == 0 { return Ok(RgbQuad::empty()); } let mut data = Vec::new(); if bit_stream.len() % 4 != 0 { return Err("Not enough data to parse Rgb quad colors"); } let colors_used = bit_stream.len() / 4; for index in 0..colors_used { let i: usize = index * 4; data.push(Rgba::bgra( bit_stream[i], bit_stream[i + 1], bit_stream[i + 2], bit_stream[i + 3], )); } Ok(RgbQuad { data }) } /// /// From a bitmap, create a list of unique colors that are used to create /// the bitmap /// pub fn from(bitmap: &BitMap, bit_depth: BitDepth) -> RgbQuad { match bit_depth { BitDepth::Color2Bit | BitDepth::Color16Bit | BitDepth::Color256Bit => RgbQuad { data: bitmap.get_all_unique_colors(), }, _ => RgbQuad::empty(), } } /// /// Create a empty rgb quad /// fn empty() -> RgbQuad { RgbQuad { data: Vec::new() } } pub fn get_bytes_size(&self) -> u32 { 4 * self.data.len() as u32 } pub fn as_bytes(&self) -> Vec<u8> { let mut bytes = Vec::new(); for rgb in &self.data { bytes.push(rgb.get_blue()); bytes.push(rgb.get_green()); bytes.push(rgb.get_red()); bytes.push(rgb.get_alpha()); } bytes } /// /// Clone the colors /// pub fn clone_colors(&self) -> Vec<Rgba> { self.data.clone() } /// /// Get the number of colors stored in RgbQuad /// pub fn len(&self) -> usize { self.data.len() } } #[cfg(debug_assertions)] impl std::fmt::Display for RgbQuad { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { for c in &self.data { write!(f, "{}\n", c).unwrap(); } write!(f, "") } } #[cfg(test)] mod test { use super::BitDepth; use super::BitMap; use super::RgbQuad; use super::Rgba; #[test] fn rgb_quad_byte_size() { let q = RgbQuad::empty(); assert_eq!(q.get_bytes_size(), 0); } #[test] fn rgb_quad_clone_colors() { let q = RgbQuad::empty(); assert_eq!(q.clone_colors().len(), 0); } #[test] fn rgb_quad_colors_length() { let q = RgbQuad::empty(); assert_eq!(q.len(), 0); } #[test] fn crating_a_rgb_quad_from_bitmap() { let mut b = BitMap::new(2, 2); let quad = RgbQuad::from(&b, BitDepth::Color2Bit); assert_eq!(quad.as_bytes().len(), quad.get_bytes_size() as usize); b.set_pixel(0, 0, Rgba::rgb(255, 0, 0)).unwrap(); b.set_pixel(1, 0, Rgba::rgb(0, 0, 255)).unwrap(); b.set_pixel(0, 1, Rgba::black()).unwrap(); let quad = RgbQuad::from(&b, BitDepth::Color16Bit); assert_eq!(quad.as_bytes().len(), quad.get_bytes_size() as usize); b.resize_by(20.0).unwrap(); let quad = RgbQuad::from(&b, BitDepth::AllColors); assert_eq!(quad.as_bytes().len(), quad.get_bytes_size() as usize); let quad = RgbQuad::from(&b, BitDepth::AllColorsAndShades); assert_eq!(quad.as_bytes().len(), quad.get_bytes_size() as usize); b.color_to_gray(); let quad = RgbQuad::from(&b, BitDepth::Color256Bit); assert_eq!(quad.as_bytes().len(), quad.get_bytes_size() as usize); } }
25.360759
91
0.528076
50df5618e061e6f4197665645cd76d79fa8d8cd5
10,184
use std::{collections::HashMap, fmt::Write}; use krpc_proto::{ r#type::TypeCode, Class, Enumeration, EnumerationValue, Error, Procedure, Services, Type, }; use protobuf_but_worse::encoding::EncodingResult; mod class; mod connection; mod control; mod vessel; pub use connection::KrpcConnection; pub use control::Control; pub use vessel::Vessel; type CallResult<T = ()> = EncodingResult<Result<T, Error>>; fn clean_doc(doc: &Option<String>) -> String { let mut doc = doc.as_deref().unwrap_or_default().to_string(); doc = doc.replace("<doc>", "").replace("</doc>", ""); doc = doc.replace("<summary>", "").replace("</summary>", ""); doc = doc .replace("<returns>", "# Returns\n\n") .replace("</returns>", "\n"); doc = doc .replace("<remarks>", "# Remarks\n") .replace("</remarks>", "\n"); doc = doc .replace("<item><description>", "- ") .replace("</description></item>", "\n"); doc = doc .replace("<list type=\"bullet\">", "") .replace("</list>", ""); doc = doc.replace("<see cref=\"", "").replace("\" />", ""); doc = doc.replace("<c>", "`").replace("</c>", "`"); let doc = doc.trim(); let mut res = String::new(); for doc_line in doc.lines() { writeln!(res, "/// {}", doc_line).unwrap(); } res } fn print_class(class: &Class) -> String { let mut res = String::new(); res += &clean_doc(&class.documentation); writeln!(res, "struct {};", class.name.as_ref().unwrap()).unwrap(); res } fn print_enumeration_value(value: &EnumerationValue) -> String { let mut res = String::new(); res += &clean_doc(&value.documentation); let name = value.name.as_deref().unwrap(); let value = value.value.unwrap_or(0); writeln!(res, "{} = {},", name, value).unwrap(); res } fn print_enumeration(enumeration: &Enumeration) -> String { let mut res = String::new(); res += &clean_doc(&enumeration.documentation); writeln!(res, "enum {} {{", enumeration.name.as_ref().unwrap()).unwrap(); for value in &enumeration.values { for line in print_enumeration_value(value).lines() { writeln!(res, " {}", line).unwrap(); } } writeln!(res, "}}").unwrap(); res } fn print_type(r#type: &Type) -> String { let mut res = String::new(); let code = r#type.code.as_ref().unwrap(); match code { krpc_proto::r#type::TypeCode::Tuple => { assert!(r#type.types.len() >= 1); write!(res, "(").unwrap(); for t in &r#type.types { write!(res, "{}, ", print_type(&t)).unwrap(); } res = res.trim_end().to_string(); write!(res, ")").unwrap(); } krpc_proto::r#type::TypeCode::List => { assert_eq!(r#type.types.len(), 1); write!(res, "List<").unwrap(); let t = &r#type.types[0]; write!(res, "{}", print_type(t)).unwrap(); write!(res, ">").unwrap(); } krpc_proto::r#type::TypeCode::Set => { assert_eq!(r#type.types.len(), 1); write!(res, "Set<").unwrap(); let t = &r#type.types[0]; write!(res, "{}", print_type(t)).unwrap(); write!(res, ">").unwrap(); } krpc_proto::r#type::TypeCode::Dictionary => { assert_eq!(r#type.types.len(), 2); write!(res, "Dictionary<").unwrap(); let k = &r#type.types[0]; let v = &r#type.types[1]; write!(res, "{}, {}", print_type(k), print_type(v)).unwrap(); write!(res, ">").unwrap(); } c if !r#type.types.is_empty() => panic!("{:?} has sub-types", c), c => write!(res, "{:?}", c).unwrap(), } res } fn print_procedure(procedure: &Procedure) -> String { let mut res = String::new(); res += &clean_doc(&procedure.documentation); let mut name = procedure.name.as_deref().unwrap(); writeln!(res, "/// real name: {}", name).unwrap(); // We have to do method name stripping here, // because parameters are immutable if is_nonstatic_method(procedure) { name = name.split_at(name.find('_').unwrap() + 1).1; } else if is_static_method(procedure) { name = name.split_at(name.find('_').unwrap() + 1).1; name = name.strip_prefix("static_").unwrap(); } write!(res, "fn {}(", name).unwrap(); if let Some(param) = &procedure.parameters.first() { if is_nonstatic_method(procedure) { write!(res, "&self").unwrap(); } else { let name = match param.name.as_deref().unwrap().trim() { "type" => "r#type", n => n, }; write!( res, "{}: {}", name, print_type(param.r#type.as_ref().unwrap()) ) .unwrap(); } for param in &procedure.parameters[1..] { let name = match param.name.as_deref().unwrap().trim() { "type" => "r#type", n => n, }; write!( res, ", {}: {}", name, print_type(param.r#type.as_ref().unwrap()) ) .unwrap(); } } write!(res, ")").unwrap(); if let Some(ret) = procedure.return_type.as_ref() { let ret = print_type(&ret); if matches!(procedure.return_is_nullable, Some(true)) { write!(res, " -> Option<{}>", ret).unwrap(); } else { write!(res, " -> {}", ret).unwrap(); } } if let Some(first) = procedure.game_scenes.first() { writeln!(res).unwrap(); write!(res, " where GameScene: ").unwrap(); write!(res, "{:?}", first).unwrap(); for scene in &procedure.game_scenes { write!(res, " + {:?}", scene).unwrap(); } } writeln!(res, ";").unwrap(); res } // Checks if first parameter is `this: Class` fn is_nonstatic_method(p: &Procedure) -> bool { p.parameters .first() .filter(|f| { let name = f.name.as_deref(); let code = f.r#type.as_ref().and_then(|t| t.code); name == Some("this") && code == Some(TypeCode::Class) }) .is_some() } fn is_static_method(p: &Procedure) -> bool { p.name.as_deref().unwrap().contains("_static_") } // Turns list of procedures into list of class impl's + list of free procedures fn declasser( procedures: &[Procedure], ) -> (HashMap<&str, Vec<&Procedure>>, Vec<&Procedure>) { let mut map: HashMap<_, Vec<_>> = HashMap::new(); let mut free = vec![]; for p in procedures { if is_nonstatic_method(p) || is_static_method(p) { let name = p.name.as_ref().unwrap(); let (class_name, _) = name.split_at(name.find("_").unwrap()); map.entry(class_name).or_default().push(p); } else { free.push(p); } } (map, free) } fn wrap_comments(text: String, line_width: usize) -> String { let mut res = String::new(); for line in text.lines() { let is_comment = line.find("/// "); if let Some(pos) = is_comment { let (prefix, mut line) = line.split_at(pos + 4); // Correct line width for indent and slashes let line_width = line_width - prefix.len(); let mut wrapped = false; while line.chars().count() > line_width { wrapped = true; let last_space = line.char_indices().take(line_width).fold( None, |acc, x| match x.1 { ' ' => Some(x.0), _ => acc, }, ); if let Some(i) = last_space { let (comment, rest) = line.split_at(i); // Skip space line = &rest[1..]; writeln!(res, "{}{}", prefix, comment).unwrap(); } else { writeln!(res, "{}{}", prefix, line).unwrap(); line = ""; } } if !line.is_empty() || !wrapped { writeln!(res, "{}{}", prefix, line).unwrap(); } } else { writeln!(res, "{}", line).unwrap(); } } res } pub fn dump_services_info(services: &Services) -> String { let mut res = String::new(); for service in &services.services { res += &clean_doc(&service.documentation); let service_name = service.name.as_deref().unwrap(); writeln!(res, "mod {} {{", service_name).unwrap(); for proc in &service.classes { let text = print_class(proc); for line in text.lines() { writeln!(res, " {}", line).unwrap(); } writeln!(res).unwrap(); } for proc in &service.enumerations { let text = print_enumeration(proc); for line in text.lines() { writeln!(res, " {}", line).unwrap(); } writeln!(res).unwrap(); } let (map, free) = declasser(&service.procedures); for proc in &free { let text = print_procedure(proc); for line in text.lines() { writeln!(res, " {}", line).unwrap(); } writeln!(res).unwrap(); } let mut map: Vec<_> = map.into_iter().collect(); map.sort_by_key(|&(name, _)| name); for (class, procs) in &map { writeln!(res, " impl {} {{", class).unwrap(); for proc in procs { let text = print_procedure(proc); for line in text.lines() { writeln!(res, " {}", line).unwrap(); } writeln!(res).unwrap(); } writeln!(res, " }}").unwrap(); writeln!(res).unwrap(); } writeln!(res, "}}").unwrap(); writeln!(res).unwrap(); } wrap_comments(res, 80) }
33.172638
79
0.490966