hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
bb268f8ec846a9f8df8a6ff7fb0084fedee1eff2
4,748
use cgmath::Point2; use ggez::event::{KeyCode, KeyMods}; use ggez::graphics::{self, Canvas, Color, DrawParam, Text}; use ggez::{Context, GameResult}; use scene::{BoxedScene, Scene, SceneChangeEvent, SceneName}; use std::fs::File; use std::io::Error; use std::io::Result; use std::path::Path; use uorustlibs::color::Color as ColorTrait; use uorustlibs::hues::{Hue, HueGroup, HueReader}; static HEIGHT: f32 = 16.0; pub struct HuesScene { reader: Result<HueReader<File>>, index: u32, texture: Option<Canvas>, exiting: bool, } impl<'a> HuesScene { pub fn new(ctx: &mut Context) -> BoxedScene<'a, SceneName, ()> { let mut scene = Box::new(HuesScene { reader: HueReader::new(&Path::new("./assets/hues.mul")), texture: None, index: 0, exiting: false, }); scene.load_group(ctx).expect("Failed to create slice"); scene } fn load_group(&mut self, ctx: &mut Context) -> GameResult<()> { let dest = Canvas::with_window_size(ctx)?; let maybe_group = match self.reader { Ok(ref mut hue_reader) => hue_reader.read_hue_group(self.index), Err(ref x) => Err(Error::new(x.kind(), "Whoops")), }; match maybe_group { Ok(group) => { graphics::set_canvas(ctx, Some(&dest)); graphics::clear(ctx, graphics::BLACK); self.draw_hue_group(ctx, self.index, &group)?; graphics::set_canvas(ctx, None); } Err(_) => { graphics::set_canvas(ctx, Some(&dest)); graphics::clear(ctx, graphics::BLACK); graphics::set_canvas(ctx, None); } }; self.texture = Some(dest); Ok(()) } fn draw_hue_group( &self, ctx: &mut Context, group_idx: u32, group: &HueGroup, ) -> GameResult<()> { for (idx, hue) in group.entries.iter().enumerate() { self.draw_hue(ctx, &hue, idx as u32)?; } let label = Text::new(format!("Group {} - {}", group_idx, group.header)); graphics::draw( ctx, &label, (Point2::new(0.0, HEIGHT * 8.0 + 4.0), graphics::WHITE), )?; Ok(()) } fn draw_hue(&self, ctx: &mut Context, hue: &Hue, hue_idx: u32) -> GameResult<()> { for (col_idx, &color) in hue.color_table.iter().enumerate() { let (r, g, b, _) = color.to_rgba(); let rect = graphics::Rect::new(col_idx as f32 * 16.0, hue_idx as f32 * HEIGHT, 16.0, HEIGHT); let r1 = graphics::Mesh::new_rectangle( ctx, graphics::DrawMode::fill(), rect, Color::from_rgba(r, g, b, 255), )?; graphics::draw(ctx, &r1, DrawParam::default()).unwrap(); } let label_text = format!( "{}: {} - {}", if hue.name.trim().len() > 0 { &hue.name } else { "NONE" }, hue.table_start, hue.table_end ); let label = Text::new(format!("{}", label_text)); graphics::draw( ctx, &label, ( Point2::new(hue.color_table.len() as f32 * 16.0, hue_idx as f32 * HEIGHT), graphics::WHITE, ), )?; Ok(()) } } impl Scene<SceneName, ()> for HuesScene { fn draw(&mut self, ctx: &mut Context, _engine_data: &mut ()) -> GameResult<()> { match self.texture { Some(ref texture) => { graphics::draw(ctx, texture, DrawParam::default())?; } None => (), }; Ok(()) } fn update( &mut self, _ctx: &mut Context, _engine_data: &mut (), ) -> GameResult<Option<SceneChangeEvent<SceneName>>> { if self.exiting { Ok(Some(SceneChangeEvent::PopScene)) } else { Ok(None) } } fn key_down_event( &mut self, ctx: &mut Context, keycode: KeyCode, _keymods: KeyMods, _repeat: bool, _engine_data: &mut (), ) { match keycode { KeyCode::Escape => self.exiting = true, KeyCode::Left => { if self.index > 0 { self.index -= 1; self.load_group(ctx).expect("Failed to create slice"); } } KeyCode::Right => { self.index += 1; self.load_group(ctx).expect("Failed to create slice"); } _ => (), } } }
30.050633
98
0.483361
d6733050216d248634c8c5eec2ea807514f16405
543
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // run-pass // pretty-expanded FIXME #23616 fn main() { ({return},); }
31.941176
68
0.720074
62bd99b38c25f64ace9782742d62ef5d2c6129c3
17,250
use std::collections::HashMap; use std::convert::TryInto; use std::ffi::CString; use snafu::Snafu; #[cfg(feature = "usexmlsec")] use libxml::parser::Parser as XmlParser; #[cfg(feature = "usexmlsec")] use xmlsec::{self, XmlSecDocumentExt, XmlSecKey, XmlSecKeyFormat, XmlSecSignatureContext}; #[cfg(feature = "usexmlsec")] const XMLNS_XML_DSIG: &str = "http://www.w3.org/2000/09/xmldsig#"; #[cfg(feature = "usexmlsec")] const XMLNS_SIGVER: &str = "urn:urn-5:08Z8lPlI4JVjifINTfCtfelirUo"; #[cfg(feature = "usexmlsec")] const ATTRIB_SIGVER: &str = "sv"; #[cfg(feature = "usexmlsec")] const VALUE_SIGVER: &str = "verified"; #[derive(Debug, Snafu)] pub enum Error { InvalidSignature, #[snafu(display("base64 decoding Error: {}", error))] Base64Error { error: base64::DecodeError, }, XmlMissingRootElement, #[cfg(feature = "usexmlsec")] #[snafu(display("xml sec Error: {}", error))] XmlParseError { error: libxml::parser::XmlParseError, }, #[cfg(feature = "usexmlsec")] #[snafu(display("xml sec Error: {}", error))] XmlSecError { error: xmlsec::XmlSecError, }, #[cfg(feature = "usexmlsec")] #[snafu(display("failed to remove attribute: {}", error))] XmlAttributeRemovalError { error: Box<dyn std::error::Error> }, #[cfg(feature = "usexmlsec")] #[snafu(display("failed to define namespace: {}", error))] XmlNamespaceDefinitionError { error: Box<dyn std::error::Error> }, #[cfg(feature = "usexmlsec")] #[snafu(display("OpenSSL error stack: {}", error))] OpenSSLError { error: openssl::error::ErrorStack, }, } impl From<base64::DecodeError> for Error { fn from(error: base64::DecodeError) -> Self { Error::Base64Error { error } } } #[cfg(feature = "usexmlsec")] impl From<xmlsec::XmlSecError> for Error { fn from(error: xmlsec::XmlSecError) -> Self { Error::XmlSecError { error } } } #[cfg(feature = "usexmlsec")] impl From<libxml::parser::XmlParseError> for Error { fn from(error: libxml::parser::XmlParseError) -> Self { Error::XmlParseError { error } } } #[cfg(feature = "usexmlsec")] impl From<openssl::error::ErrorStack> for Error { fn from(error: openssl::error::ErrorStack) -> Self { Error::OpenSSLError { error } } } #[cfg(feature = "usexmlsec")] pub fn sign_xml<Bytes: AsRef<[u8]>>( xml: Bytes, private_key_der: &[u8], id_attribute: &str, path_to_element_to_sign: &str, namespaces: Option<&[(&str, &str)]>, ) -> Result<String, Error> { let parser = XmlParser::default(); let document = parser.parse_string(xml)?; let key = XmlSecKey::from_memory(private_key_der, XmlSecKeyFormat::Der, None)?; document.specify_idattr(path_to_element_to_sign, id_attribute, namespaces)?; let mut context = XmlSecSignatureContext::new()?; context.insert_key(key); context.sign_document(&document)?; Ok(document.to_string()) } #[cfg(feature = "usexmlsec")] pub fn verify_signed_xml<Bytes: AsRef<[u8]>>( xml: Bytes, x509_cert_der: &[u8], id_attribute: Option<&str>, path_to_signed_element: &str, namespaces: Option<&[(&str, &str)]> ) -> Result<(), Error> { let parser = XmlParser::default(); let document = parser.parse_string(xml)?; let key = XmlSecKey::from_memory(x509_cert_der, XmlSecKeyFormat::CertDer, None)?; let mut context = XmlSecSignatureContext::new()?; context.insert_key(key); document.specify_idattr(path_to_signed_element, id_attribute.unwrap_or("ID"), namespaces)?; let valid = context.verify_document(&document)?; if !valid { return Err(Error::InvalidSignature); } Ok(()) } /// Searches the document for all attributes named `ID` and stores them and their values in the XML /// document's internal ID table. /// /// This is necessary for signature verification to successfully follow the references from a /// `<dsig:Signature>` element to the element it has signed. #[cfg(feature = "usexmlsec")] fn collect_id_attributes(doc: &mut libxml::tree::Document) -> Result<(), Error> { const ID_STR: &str = "ID"; let id_attr_name = CString::new(ID_STR).unwrap(); let mut nodes_to_visit = Vec::new(); if let Some(root_elem) = doc.get_root_element() { nodes_to_visit.push(root_elem); } while let Some(node) = nodes_to_visit.pop() { if let Some(id_value) = node.get_attribute(ID_STR) { let id_value_cstr = CString::new(id_value).unwrap(); let node_ptr = node.node_ptr(); unsafe { let attr = libxml::bindings::xmlHasProp( node_ptr, id_attr_name.as_ptr() as *const u8, ); assert!(!attr.is_null()); libxml::bindings::xmlAddID( std::ptr::null_mut(), doc.doc_ptr(), id_value_cstr.as_ptr() as *const u8, attr ); } } for child in node.get_child_elements() { nodes_to_visit.push(child); } } Ok(()) } /// Finds and returns all `<dsig:Signature>` elements in the subtree rooted at the given node. #[cfg(feature = "usexmlsec")] fn find_signature_nodes(node: &libxml::tree::Node) -> Vec<libxml::tree::Node> { let mut ret = Vec::new(); if let Some(ns) = &node.get_namespace() { if ns.get_href() == XMLNS_XML_DSIG && node.get_name() == "Signature" { ret.push(node.clone()); } } for child in node.get_child_elements() { let mut children = find_signature_nodes(&child); ret.append(&mut children); } ret } /// Removes all signature-verified attributes ([`ATTRIB_SIGVER`] in the namespace [`XMLNS_SIGVER`]) /// from all elements in the subtree rooted at the given node. #[cfg(feature = "usexmlsec")] pub fn remove_signature_verified_attributes(node: &mut libxml::tree::Node) -> Result<(), Error> { node.remove_attribute_ns(ATTRIB_SIGVER, XMLNS_SIGVER) .map_err(|err| Error::XmlAttributeRemovalError { error: err })?; for mut child_elem in node.get_child_elements() { remove_signature_verified_attributes(&mut child_elem)?; } Ok(()) } /// Obtains the first child element of the given node that has the given name and namespace. #[cfg(feature = "usexmlsec")] fn get_first_child_name_ns(node: &libxml::tree::Node, name: &str, ns: &str) -> Option<libxml::tree::Node> { let mut found_node = None; for child in node.get_child_elements() { if let Some(child_ns) = child.get_namespace() { if child_ns.get_href() != ns { continue; } } else { continue; } if child.get_name() == name { found_node = Some(child.clone()); break; } } found_node } /// Searches the subtree rooted at the given node and returns the elements which match the given /// predicate. #[cfg(feature = "usexmlsec")] fn get_elements_by_predicate<F: FnMut(&libxml::tree::Node) -> bool>( elem: &libxml::tree::Node, mut pred: F, ) -> Vec<libxml::tree::Node> { let mut nodes_to_visit = Vec::new(); let mut nodes = Vec::new(); nodes_to_visit.push(elem.clone()); while let Some(node) = nodes_to_visit.pop() { if pred(&node) { nodes.push(node.clone()); } let mut children = node.get_child_elements(); nodes_to_visit.append(&mut children); } nodes } /// Searches for and returns the element with the given value of the `ID` attribute from the subtree /// rooted at the given node. #[cfg(feature = "usexmlsec")] fn get_element_by_id(elem: &libxml::tree::Node, id: &str) -> Option<libxml::tree::Node> { let mut elems = get_elements_by_predicate(elem, |node| node.get_attribute("ID") .map(|node_id| node_id == id) .unwrap_or(false) ); let elem = elems.drain(..).nth(0); elem } /// Searches for and returns the element with the given pointer value from the subtree rooted at the /// given node. #[cfg(feature = "usexmlsec")] fn get_node_by_ptr(elem: &libxml::tree::Node, ptr: *const libxml::bindings::xmlNode) -> Option<libxml::tree::Node> { let mut elems = get_elements_by_predicate(elem, |node| { let node_ptr = node.node_ptr() as *const _; node_ptr == ptr }); let elem = elems.drain(..).nth(0); elem } #[cfg(feature = "usexmlsec")] struct XPathContext { pub pointer: libxml::bindings::xmlXPathContextPtr, } #[cfg(feature = "usexmlsec")] impl Drop for XPathContext { fn drop(&mut self) { unsafe { libxml::bindings::xmlXPathFreeContext(self.pointer) } } } #[cfg(feature = "usexmlsec")] struct XPathObject { pub pointer: libxml::bindings::xmlXPathObjectPtr, } #[cfg(feature = "usexmlsec")] impl Drop for XPathObject { fn drop(&mut self) { unsafe { libxml::bindings::xmlXPathFreeObject(self.pointer) } } } /// Searches for and returns the element at the root of the subtree signed by the given signature /// node. #[cfg(feature = "usexmlsec")] fn get_signed_node(signature_node: &libxml::tree::Node, doc: &libxml::tree::Document) -> Option<libxml::tree::Node> { let object_elem_opt = get_first_child_name_ns(signature_node, "Object", XMLNS_XML_DSIG); if let Some(object_elem) = object_elem_opt { return Some(object_elem); } let sig_info_elem_opt = get_first_child_name_ns(signature_node, "SignedInfo", XMLNS_XML_DSIG); if let Some(sig_info_elem) = sig_info_elem_opt { let ref_elem_opt = get_first_child_name_ns(&sig_info_elem, "Reference", XMLNS_XML_DSIG); if let Some(ref_elem) = ref_elem_opt { if let Some(uri) = ref_elem.get_attribute("URI") { if uri.starts_with('#') { // prepare a XPointer context let c_uri = CString::new(&uri[1..]).unwrap(); let ctx_ptr = unsafe { libxml::bindings::xmlXPtrNewContext( doc.doc_ptr(), signature_node.node_ptr(), std::ptr::null_mut(), ) }; if ctx_ptr.is_null() { return None; } let ctx = XPathContext { pointer: ctx_ptr }; // evaluate the XPointer expression let obj_ptr = unsafe { libxml::bindings::xmlXPtrEval( c_uri.as_ptr() as *const libxml::bindings::xmlChar, ctx.pointer, ) }; if obj_ptr.is_null() { return None; } let obj = XPathObject { pointer: obj_ptr }; // extract the nodeset from the result let obj_type = unsafe { (*obj.pointer).type_ }; if obj_type != libxml::bindings::xmlXPathObjectType_XPATH_NODESET { return None; } let obj_nodeset = unsafe { (*obj.pointer).nodesetval }; let nodeset_count = unsafe { (*obj_nodeset).nodeNr }; // go through the nodes and find them in the document for i in 0..nodeset_count { let node_ptr_ptr = unsafe { (*obj_nodeset).nodeTab.offset(i.try_into().unwrap()) }; let node_ptr = unsafe { *node_ptr_ptr }; if let Some(node) = get_node_by_ptr(&doc.get_root_element().unwrap(), node_ptr) { return Some(node); } } } } } } None } /// Place the signature-verified attributes ([`ATTRIB_SIGVER`] in the given namespace) on the given /// element, all its descendants and its whole chain of ancestors (but not necessarily all their /// descendants). #[cfg(feature = "usexmlsec")] fn place_signature_verified_attributes( root_elem: libxml::tree::Node, doc: &libxml::tree::Document, ns: &libxml::tree::Namespace, ) { let mut ptr_to_required_node: HashMap<usize, libxml::tree::Node> = HashMap::new(); let mut signature_nodes = find_signature_nodes(&root_elem); for sig_node in signature_nodes.drain(..) { if let Some(sig_root_node) = get_signed_node(&sig_node, doc) { let mut nodes = Vec::new(); let mut parent = sig_root_node.get_parent(); nodes.push(sig_root_node); // mark all children while let Some(node) = nodes.pop() { let node_ptr = node.node_ptr() as usize; for child in node.get_child_elements() { nodes.push(child); } ptr_to_required_node.entry(node_ptr).or_insert(node); } // mark the ancestor chain while let Some(p) = parent { let p_ptr = p.node_ptr() as usize; parent = p.get_parent(); ptr_to_required_node.entry(p_ptr).or_insert(p); } } } drop(root_elem); for node in ptr_to_required_node.values_mut() { node.set_attribute_ns(ATTRIB_SIGVER, VALUE_SIGVER, ns).unwrap(); } } /// Remove all elements that do not contain a signature-verified attribute ([`ATTRIB_SIGVER`] in /// the namespace [`XMLNS_SIGVER`]). #[cfg(feature = "usexmlsec")] fn remove_unverified_elements(node: &mut libxml::tree::Node) { // depth-first for mut child in node.get_child_elements() { remove_unverified_elements(&mut child); } if node.get_attribute_ns(ATTRIB_SIGVER, XMLNS_SIGVER) != Some(String::from(VALUE_SIGVER)) { // element is unverified; remove it node.unlink_node(); } } /// Takes an XML document, parses it, verifies all XML digital signatures against the given /// certificates, and returns a derived version of the document where all elements that are not /// covered by a digital signature have been removed. #[cfg(feature = "usexmlsec")] pub(crate) fn reduce_xml_to_signed( xml_str: &str, certs: &[openssl::x509::X509], ) -> Result<String, Error> { let mut xml = XmlParser::default().parse_string(xml_str)?; let mut root_elem = xml.get_root_element().ok_or(Error::XmlMissingRootElement)?; // collect ID attribute values and tell libxml about them collect_id_attributes(&mut xml)?; // verify each signature { let mut signature_nodes = find_signature_nodes(&root_elem); for sig_node in signature_nodes.drain(..) { let mut sig_ctx = XmlSecSignatureContext::new()?; let mut verified = false; for openssl_key in certs { let key_data = openssl_key.to_der()?; let key = XmlSecKey::from_memory(&key_data, XmlSecKeyFormat::CertDer, None)?; sig_ctx.insert_key(key); verified = sig_ctx.verify_node(&sig_node)?; if verified { break; } } if !verified { return Err(Error::InvalidSignature); } } } // define the "signature verified" namespace let sig_ver_ns = libxml::tree::Namespace::new( "sv", XMLNS_SIGVER, &mut root_elem, ) .map_err(|err| Error::XmlNamespaceDefinitionError { error: err })?; // remove all existing "signature verified" attributes // (we can't do this before verifying the signatures: // they might be contained in the XML document proper and signed) remove_signature_verified_attributes(&mut root_elem)?; // place the "signature verified" attributes on all elements that are: // * signed // * a descendant of a signed element // * an ancestor of a signed element place_signature_verified_attributes(root_elem, &xml, &sig_ver_ns); // delete all elements that don't have a "signature verified" attribute let mut root_elem = xml.get_root_element().ok_or(Error::XmlMissingRootElement)?; remove_unverified_elements(&mut root_elem); // remove all "signature verified" attributes again remove_signature_verified_attributes(&mut root_elem)?; // serialize XML again let reduced_xml_str = xml.to_string(); Ok(reduced_xml_str) } // Util // strip out 76-width format and decode base64 pub fn decode_x509_cert(x509_cert: &str) -> Result<Vec<u8>, base64::DecodeError> { let stripped = x509_cert .as_bytes() .to_vec() .into_iter() .filter(|b| !b" \n\t\r\x0b\x0c".contains(b)) .collect::<Vec<u8>>(); base64::decode(&stripped) } // 76-width base64 encoding (MIME) pub fn mime_encode_x509_cert(x509_cert_der: &[u8]) -> String { data_encoding::BASE64_MIME.encode(x509_cert_der) } pub fn gen_saml_response_id() -> String { format!("id{}", uuid::Uuid::new_v4().to_string()) } pub fn gen_saml_assertion_id() -> String { format!("_{}", uuid::Uuid::new_v4().to_string()) }
34.22619
117
0.608174
6220618c377bfa86c23d14ef3448250168fd9429
189
//! Configuration for the Relay CLI and server. #![warn(missing_docs)] mod config; mod types; mod upstream; pub use crate::config::*; pub use crate::types::*; pub use crate::upstream::*;
17.181818
47
0.698413
50f8c5ca85d0c5654e54a27160062d2ff95082a7
7,535
#[macro_use] pub(crate) mod macros; mod from_delimited_data; mod to_delimited_data; pub(crate) mod alias; pub(crate) mod ansi; pub(crate) mod append; pub(crate) mod args; pub(crate) mod autoenv; pub(crate) mod autoenv_trust; pub(crate) mod autoenv_untrust; pub(crate) mod autoview; pub(crate) mod benchmark; pub(crate) mod build_string; pub(crate) mod cal; pub(crate) mod cd; pub(crate) mod char_; pub(crate) mod classified; #[cfg(feature = "clipboard")] pub(crate) mod clip; pub(crate) mod command; pub(crate) mod compact; pub(crate) mod config; pub(crate) mod constants; pub(crate) mod count; pub(crate) mod cp; pub(crate) mod date; pub(crate) mod debug; pub(crate) mod default; pub(crate) mod do_; pub(crate) mod drop; pub(crate) mod du; pub(crate) mod each; pub(crate) mod echo; pub(crate) mod enter; pub(crate) mod every; pub(crate) mod exit; pub(crate) mod first; pub(crate) mod format; pub(crate) mod from; pub(crate) mod from_csv; pub(crate) mod from_eml; pub(crate) mod from_ics; pub(crate) mod from_ini; pub(crate) mod from_json; pub(crate) mod from_ods; pub(crate) mod from_ssv; pub(crate) mod from_toml; pub(crate) mod from_tsv; pub(crate) mod from_url; pub(crate) mod from_vcf; pub(crate) mod from_xlsx; pub(crate) mod from_xml; pub(crate) mod from_yaml; pub(crate) mod get; pub(crate) mod group_by; pub(crate) mod group_by_date; pub(crate) mod headers; pub(crate) mod help; pub(crate) mod histogram; pub(crate) mod history; pub(crate) mod if_; pub(crate) mod insert; pub(crate) mod is_empty; pub(crate) mod keep; pub(crate) mod last; pub(crate) mod lines; pub(crate) mod ls; pub(crate) mod math; pub(crate) mod merge; pub(crate) mod mkdir; pub(crate) mod move_; pub(crate) mod next; pub(crate) mod nth; pub(crate) mod open; pub(crate) mod parse; pub(crate) mod path; pub(crate) mod pivot; pub(crate) mod plugin; pub(crate) mod prepend; pub(crate) mod prev; pub(crate) mod pwd; pub(crate) mod random; pub(crate) mod range; pub(crate) mod reduce; pub(crate) mod reject; pub(crate) mod rename; pub(crate) mod reverse; pub(crate) mod rm; pub(crate) mod run_alias; pub(crate) mod run_external; pub(crate) mod save; pub(crate) mod select; pub(crate) mod shells; pub(crate) mod shuffle; pub(crate) mod size; pub(crate) mod skip; pub(crate) mod sleep; pub(crate) mod sort_by; pub(crate) mod split; pub(crate) mod split_by; pub(crate) mod str_; pub(crate) mod table; pub(crate) mod tags; pub(crate) mod to; pub(crate) mod to_csv; pub(crate) mod to_html; pub(crate) mod to_json; pub(crate) mod to_md; pub(crate) mod to_toml; pub(crate) mod to_tsv; pub(crate) mod to_url; pub(crate) mod to_xml; pub(crate) mod to_yaml; pub(crate) mod trim; pub(crate) mod uniq; pub(crate) mod update; pub(crate) mod url_; pub(crate) mod version; pub(crate) mod what; pub(crate) mod where_; pub(crate) mod which_; pub(crate) mod with_env; pub(crate) mod wrap; pub(crate) use autoview::Autoview; pub(crate) use cd::Cd; pub(crate) use command::{ whole_stream_command, Command, Example, UnevaluatedCallInfo, WholeStreamCommand, }; pub(crate) use alias::Alias; pub(crate) use ansi::Ansi; pub(crate) use append::Append; pub(crate) use autoenv::Autoenv; pub(crate) use autoenv_trust::AutoenvTrust; pub(crate) use autoenv_untrust::AutoenvUnTrust; pub(crate) use benchmark::Benchmark; pub(crate) use build_string::BuildString; pub(crate) use cal::Cal; pub(crate) use char_::Char; pub(crate) use compact::Compact; pub(crate) use config::{ Config, ConfigClear, ConfigGet, ConfigLoad, ConfigPath, ConfigRemove, ConfigSet, ConfigSetInto, }; pub(crate) use count::Count; pub(crate) use cp::Cpy; pub(crate) use date::{Date, DateFormat, DateNow, DateUTC}; pub(crate) use debug::Debug; pub(crate) use default::Default; pub(crate) use do_::Do; pub(crate) use drop::Drop; pub(crate) use du::Du; pub(crate) use each::Each; pub(crate) use echo::Echo; pub(crate) use if_::If; pub(crate) use is_empty::IsEmpty; pub(crate) use update::Update; pub(crate) mod kill; pub(crate) use kill::Kill; pub(crate) mod clear; pub(crate) use clear::Clear; pub(crate) mod touch; pub(crate) use enter::Enter; pub(crate) use every::Every; pub(crate) use exit::Exit; pub(crate) use first::First; pub(crate) use format::Format; pub(crate) use from::From; pub(crate) use from_csv::FromCSV; pub(crate) use from_eml::FromEML; pub(crate) use from_ics::FromIcs; pub(crate) use from_ini::FromINI; pub(crate) use from_json::FromJSON; pub(crate) use from_ods::FromODS; pub(crate) use from_ssv::FromSSV; pub(crate) use from_toml::FromTOML; pub(crate) use from_tsv::FromTSV; pub(crate) use from_url::FromURL; pub(crate) use from_vcf::FromVcf; pub(crate) use from_xlsx::FromXLSX; pub(crate) use from_xml::FromXML; pub(crate) use from_yaml::FromYAML; pub(crate) use from_yaml::FromYML; pub(crate) use get::Get; pub(crate) use group_by::GroupBy; pub(crate) use group_by_date::GroupByDate; pub(crate) use headers::Headers; pub(crate) use help::Help; pub(crate) use histogram::Histogram; pub(crate) use history::History; pub(crate) use insert::Insert; pub(crate) use keep::{Keep, KeepUntil, KeepWhile}; pub(crate) use last::Last; pub(crate) use lines::Lines; pub(crate) use ls::Ls; pub(crate) use math::{ Math, MathAverage, MathEval, MathMaximum, MathMedian, MathMinimum, MathMode, MathStddev, MathSummation, MathVariance, }; pub(crate) use merge::Merge; pub(crate) use mkdir::Mkdir; pub(crate) use move_::{Move, MoveColumn, Mv}; pub(crate) use next::Next; pub(crate) use nth::Nth; pub(crate) use open::Open; pub(crate) use parse::Parse; pub(crate) use path::{ PathBasename, PathCommand, PathDirname, PathExists, PathExpand, PathExtension, PathFilestem, PathType, }; pub(crate) use pivot::Pivot; pub(crate) use prepend::Prepend; pub(crate) use prev::Previous; pub(crate) use pwd::Pwd; #[cfg(feature = "uuid_crate")] pub(crate) use random::RandomUUID; pub(crate) use random::{Random, RandomBool, RandomDice}; pub(crate) use range::Range; pub(crate) use reduce::Reduce; pub(crate) use reject::Reject; pub(crate) use rename::Rename; pub(crate) use reverse::Reverse; pub(crate) use rm::Remove; pub(crate) use run_external::RunExternalCommand; pub(crate) use save::Save; pub(crate) use select::Select; pub(crate) use shells::Shells; pub(crate) use shuffle::Shuffle; pub(crate) use size::Size; pub(crate) use skip::{Skip, SkipUntil, SkipWhile}; pub(crate) use sleep::Sleep; pub(crate) use sort_by::SortBy; pub(crate) use split::{Split, SplitChars, SplitColumn, SplitRow}; pub(crate) use split_by::SplitBy; pub(crate) use str_::{ Str, StrCamelCase, StrCapitalize, StrCollect, StrContains, StrDowncase, StrEndsWith, StrFindReplace, StrFrom, StrIndexOf, StrKebabCase, StrLength, StrPascalCase, StrReverse, StrScreamingSnakeCase, StrSet, StrSnakeCase, StrStartsWith, StrSubstring, StrToDatetime, StrToDecimal, StrToInteger, StrTrim, StrTrimLeft, StrTrimRight, StrUpcase, }; pub(crate) use table::Table; pub(crate) use tags::Tags; pub(crate) use to::To; pub(crate) use to_csv::ToCSV; pub(crate) use to_html::ToHTML; pub(crate) use to_json::ToJSON; pub(crate) use to_md::ToMarkdown; pub(crate) use to_toml::ToTOML; pub(crate) use to_tsv::ToTSV; pub(crate) use to_url::ToURL; pub(crate) use to_xml::ToXML; pub(crate) use to_yaml::ToYAML; pub(crate) use touch::Touch; pub(crate) use trim::Trim; pub(crate) use uniq::Uniq; pub(crate) use url_::{UrlCommand, UrlHost, UrlPath, UrlQuery, UrlScheme}; pub(crate) use version::Version; pub(crate) use what::What; pub(crate) use where_::Where; pub(crate) use which_::Which; pub(crate) use with_env::WithEnv; pub(crate) use wrap::Wrap;
28.327068
99
0.742933
fe0b500aa150dca5946110b1d3d363bef6ce00e8
7,346
#![allow(unused_imports, non_camel_case_types)] use crate::models::r5::CodeableConcept::CodeableConcept; use crate::models::r5::Element::Element; use crate::models::r5::Extension::Extension; use crate::models::r5::InsurancePlan_Limit::InsurancePlan_Limit; use serde_json::json; use serde_json::value::Value; use std::borrow::Cow; /// Details of a Health Insurance product/plan provided by an organization. #[derive(Debug)] pub struct InsurancePlan_Benefit<'a> { pub(crate) value: Cow<'a, Value>, } impl InsurancePlan_Benefit<'_> { pub fn new(value: &Value) -> InsurancePlan_Benefit { InsurancePlan_Benefit { value: Cow::Borrowed(value), } } pub fn to_json(&self) -> Value { (*self.value).clone() } /// Extensions for requirement pub fn _requirement(&self) -> Option<Element> { if let Some(val) = self.value.get("_requirement") { return Some(Element { value: Cow::Borrowed(val), }); } return None; } /// May be used to represent additional information that is not part of the basic /// definition of the element. To make the use of extensions safe and manageable, /// there is a strict set of governance applied to the definition and use of /// extensions. Though any implementer can define an extension, there is a set of /// requirements that SHALL be met as part of the definition of the extension. pub fn extension(&self) -> Option<Vec<Extension>> { if let Some(Value::Array(val)) = self.value.get("extension") { return Some( val.into_iter() .map(|e| Extension { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// Unique id for the element within a resource (for internal references). This may be /// any string value that does not contain spaces. pub fn id(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("id") { return Some(string); } return None; } /// The specific limits on the benefit. pub fn limit(&self) -> Option<Vec<InsurancePlan_Limit>> { if let Some(Value::Array(val)) = self.value.get("limit") { return Some( val.into_iter() .map(|e| InsurancePlan_Limit { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// May be used to represent additional information that is not part of the basic /// definition of the element and that modifies the understanding of the element /// in which it is contained and/or the understanding of the containing element's /// descendants. Usually modifier elements provide negation or qualification. To make /// the use of extensions safe and manageable, there is a strict set of governance /// applied to the definition and use of extensions. Though any implementer can define /// an extension, there is a set of requirements that SHALL be met as part of the /// definition of the extension. Applications processing a resource are required to /// check for modifier extensions. Modifier extensions SHALL NOT change the meaning /// of any elements on Resource or DomainResource (including cannot change the meaning /// of modifierExtension itself). pub fn modifier_extension(&self) -> Option<Vec<Extension>> { if let Some(Value::Array(val)) = self.value.get("modifierExtension") { return Some( val.into_iter() .map(|e| Extension { value: Cow::Borrowed(e), }) .collect::<Vec<_>>(), ); } return None; } /// The referral requirements to have access/coverage for this benefit. pub fn requirement(&self) -> Option<&str> { if let Some(Value::String(string)) = self.value.get("requirement") { return Some(string); } return None; } /// Type of benefit (primary care; speciality care; inpatient; outpatient). pub fn fhir_type(&self) -> CodeableConcept { CodeableConcept { value: Cow::Borrowed(&self.value["type"]), } } pub fn validate(&self) -> bool { if let Some(_val) = self._requirement() { if !_val.validate() { return false; } } if let Some(_val) = self.extension() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.id() {} if let Some(_val) = self.limit() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.modifier_extension() { if !_val.into_iter().map(|e| e.validate()).all(|x| x == true) { return false; } } if let Some(_val) = self.requirement() {} if !self.fhir_type().validate() { return false; } return true; } } #[derive(Debug)] pub struct InsurancePlan_BenefitBuilder { pub(crate) value: Value, } impl InsurancePlan_BenefitBuilder { pub fn build(&self) -> InsurancePlan_Benefit { InsurancePlan_Benefit { value: Cow::Owned(self.value.clone()), } } pub fn with(existing: InsurancePlan_Benefit) -> InsurancePlan_BenefitBuilder { InsurancePlan_BenefitBuilder { value: (*existing.value).clone(), } } pub fn new(fhir_type: CodeableConcept) -> InsurancePlan_BenefitBuilder { let mut __value: Value = json!({}); __value["type"] = json!(fhir_type.value); return InsurancePlan_BenefitBuilder { value: __value }; } pub fn _requirement<'a>(&'a mut self, val: Element) -> &'a mut InsurancePlan_BenefitBuilder { self.value["_requirement"] = json!(val.value); return self; } pub fn extension<'a>( &'a mut self, val: Vec<Extension>, ) -> &'a mut InsurancePlan_BenefitBuilder { self.value["extension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn id<'a>(&'a mut self, val: &str) -> &'a mut InsurancePlan_BenefitBuilder { self.value["id"] = json!(val); return self; } pub fn limit<'a>( &'a mut self, val: Vec<InsurancePlan_Limit>, ) -> &'a mut InsurancePlan_BenefitBuilder { self.value["limit"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn modifier_extension<'a>( &'a mut self, val: Vec<Extension>, ) -> &'a mut InsurancePlan_BenefitBuilder { self.value["modifierExtension"] = json!(val.into_iter().map(|e| e.value).collect::<Vec<_>>()); return self; } pub fn requirement<'a>(&'a mut self, val: &str) -> &'a mut InsurancePlan_BenefitBuilder { self.value["requirement"] = json!(val); return self; } }
34.488263
97
0.575279
769ca1d459ef43757c78a43d9a3e88d165820e08
2,949
use egui::Color32; /// Converts a hex string with a leading '#' into a egui::Color32. /// - The first three channels are interpreted as R, G, B. /// - The fourth channel, if present, is used as the alpha value. /// - Both upper and lowercase characters can be used for the hex values. /// /// *Adapted from: https://docs.rs/raster/0.1.0/src/raster/lib.rs.html#425-725. /// Credit goes to original authors.* pub fn color_from_hex(hex: &str) -> Result<Color32, String> { // Convert a hex string to decimal. Eg. "00" -> 0. "FF" -> 255. fn _hex_dec(hex_string: &str) -> Result<u8, String> { match u8::from_str_radix(hex_string, 16) { Ok(o) => Ok(o), Err(e) => Err(format!("Error parsing hex: {}", e)), } } if hex.len() == 9 && hex.starts_with('#') { // #FFFFFFFF (Red Green Blue Alpha) return Ok(Color32::from_rgba_premultiplied( _hex_dec(&hex[1..3])?, _hex_dec(&hex[3..5])?, _hex_dec(&hex[5..7])?, _hex_dec(&hex[7..9])?, )); } else if hex.len() == 7 && hex.starts_with('#') { // #FFFFFF (Red Green Blue) return Ok(Color32::from_rgb( _hex_dec(&hex[1..3])?, _hex_dec(&hex[3..5])?, _hex_dec(&hex[5..7])?, )); } Err(format!( "Error parsing hex: {}. Example of valid formats: #FFFFFF or #ffffffff", hex )) } /// Converts a Color32 into its canonical hexadecimal representation. /// - The color string will be preceded by '#'. /// - If the alpha channel is completely opaque, it will be ommitted. /// - Characters from 'a' to 'f' will be written in lowercase. pub fn color_to_hex(color: Color32) -> String { if color.a() < 255 { format!( "#{:02x?}{:02x?}{:02x?}{:02x?}", color.r(), color.g(), color.b(), color.a() ) } else { format!("#{:02x?}{:02x?}{:02x?}", color.r(), color.g(), color.b()) } } #[cfg(test)] mod tests { use super::*; #[test] pub fn test_color_from_and_to_hex() { assert_eq!( color_from_hex("#00ff00").unwrap(), Color32::from_rgb(0, 255, 0) ); assert_eq!( color_from_hex("#5577AA").unwrap(), Color32::from_rgb(85, 119, 170) ); assert_eq!( color_from_hex("#E2e2e277").unwrap(), Color32::from_rgba_premultiplied(226, 226, 226, 119) ); assert!(color_from_hex("abcdefgh").is_err()); assert_eq!( color_to_hex(Color32::from_rgb(0, 255, 0)), "#00ff00".to_string() ); assert_eq!( color_to_hex(Color32::from_rgb(85, 119, 170)), "#5577aa".to_string() ); assert_eq!( color_to_hex(Color32::from_rgba_premultiplied(226, 226, 226, 119)), "e2e2e277".to_string() ); } }
31.37234
80
0.529332
8956d3665db2257880ebaf543d4fea20f9ea2dda
390
#[doc = r" Value read from the register"] pub struct R { bits: u32, } impl super::CID2 { #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } }
18.571429
51
0.505128
398fc4e8e76450131c0041b0593532bee7780929
2,884
/* * EVE Swagger Interface * * An OpenAPI for EVE Online * * OpenAPI spec version: 1.3.8 * * Generated by: https://github.com/swagger-api/swagger-codegen.git */ /// GetFwLeaderboardsVictoryPoints : Top 4 rankings of factions by victory points from yesterday, last week and in total #[allow(unused_imports)] use serde_json::Value; #[derive(Debug, Serialize, Deserialize)] pub struct GetFwLeaderboardsVictoryPoints { /// Top 4 ranking of factions active in faction warfare by total victory points. A faction is considered \"active\" if they have participated in faction warfare in the past 14 days #[serde(rename = "active_total")] active_total: Vec<::models::GetFwLeaderboardsActiveTotalActiveTotal1>, /// Top 4 ranking of factions by victory points in the past week #[serde(rename = "last_week")] last_week: Vec<::models::GetFwLeaderboardsLastWeekLastWeek1>, /// Top 4 ranking of factions by victory points in the past day #[serde(rename = "yesterday")] yesterday: Vec<::models::GetFwLeaderboardsYesterdayYesterday1> } impl GetFwLeaderboardsVictoryPoints { /// Top 4 rankings of factions by victory points from yesterday, last week and in total pub fn new(active_total: Vec<::models::GetFwLeaderboardsActiveTotalActiveTotal1>, last_week: Vec<::models::GetFwLeaderboardsLastWeekLastWeek1>, yesterday: Vec<::models::GetFwLeaderboardsYesterdayYesterday1>) -> GetFwLeaderboardsVictoryPoints { GetFwLeaderboardsVictoryPoints { active_total: active_total, last_week: last_week, yesterday: yesterday } } pub fn set_active_total(&mut self, active_total: Vec<::models::GetFwLeaderboardsActiveTotalActiveTotal1>) { self.active_total = active_total; } pub fn with_active_total(mut self, active_total: Vec<::models::GetFwLeaderboardsActiveTotalActiveTotal1>) -> GetFwLeaderboardsVictoryPoints { self.active_total = active_total; self } pub fn active_total(&self) -> &Vec<::models::GetFwLeaderboardsActiveTotalActiveTotal1> { &self.active_total } pub fn set_last_week(&mut self, last_week: Vec<::models::GetFwLeaderboardsLastWeekLastWeek1>) { self.last_week = last_week; } pub fn with_last_week(mut self, last_week: Vec<::models::GetFwLeaderboardsLastWeekLastWeek1>) -> GetFwLeaderboardsVictoryPoints { self.last_week = last_week; self } pub fn last_week(&self) -> &Vec<::models::GetFwLeaderboardsLastWeekLastWeek1> { &self.last_week } pub fn set_yesterday(&mut self, yesterday: Vec<::models::GetFwLeaderboardsYesterdayYesterday1>) { self.yesterday = yesterday; } pub fn with_yesterday(mut self, yesterday: Vec<::models::GetFwLeaderboardsYesterdayYesterday1>) -> GetFwLeaderboardsVictoryPoints { self.yesterday = yesterday; self } pub fn yesterday(&self) -> &Vec<::models::GetFwLeaderboardsYesterdayYesterday1> { &self.yesterday } }
33.929412
245
0.748613
085d78063b7a61565bd08d5fe8d2fa93d50dce24
2,457
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use config::{ config::{NodeConfig, NodeConfigHelpers}, trusted_peers::{TrustedPeersConfig, TrustedPeersConfigHelpers}, }; use crypto::{ed25519::*, test_utils::KeyPair}; use failure::prelude::*; use proto_conv::IntoProtoBytes; use rand::{Rng, SeedableRng}; use std::{convert::TryFrom, fs::File, io::prelude::*, path::Path}; use types::{account_address::AccountAddress, validator_public_keys::ValidatorPublicKeys}; use vm_genesis::encode_genesis_transaction_with_validator; pub fn gen_genesis_transaction<P: AsRef<Path>>( path: P, faucet_account_keypair: &KeyPair<Ed25519PrivateKey, Ed25519PublicKey>, trusted_peer_config: &TrustedPeersConfig, ) -> Result<()> { let validator_set = trusted_peer_config .peers .iter() .map(|(peer_id, peer)| { ValidatorPublicKeys::new( AccountAddress::try_from(peer_id.clone()).expect("[config] invalid peer_id"), peer.get_consensus_public().clone(), peer.get_network_signing_public().clone(), peer.get_network_identity_public().clone(), ) }) .collect(); let transaction = encode_genesis_transaction_with_validator( &faucet_account_keypair.private_key, faucet_account_keypair.public_key.clone(), validator_set, ); let mut file = File::create(path)?; file.write_all(&transaction.into_proto_bytes()?)?; Ok(()) } /// Returns the config as well as the genesis keyapir pub fn get_test_config() -> (NodeConfig, KeyPair<Ed25519PrivateKey, Ed25519PublicKey>) { // TODO: test config should be moved here instead of config crate let config = NodeConfigHelpers::get_single_node_test_config(true); // Those configs should be different on every call. We bypass the // costly StdRng initialization let mut seed_rng = rand::rngs::OsRng::new().expect("can't access OsRng"); let seed_buf: [u8; 32] = seed_rng.gen(); let mut rng = rand::rngs::StdRng::from_seed(seed_buf); let (private_key, _) = compat::generate_keypair(&mut rng); let keypair = KeyPair::from(private_key); gen_genesis_transaction( &config.execution.genesis_file_location, &keypair, &TrustedPeersConfigHelpers::get_test_config(1, None).1, ) .expect("[config] failed to create genesis transaction"); (config, keypair) }
39
93
0.687424
3a39d91ddc3537294c657f3ee7e653f2d0a41a33
2,074
// Take a look at the license at the top of the repository in the LICENSE file. use std::marker::PhantomData; use std::rc::{self, Rc}; use std::sync::{self, Arc}; // rustdoc-stripper-ignore-next /// Trait for generalizing downgrading a strong reference to a weak reference. pub trait Downgrade where Self: Sized, { // rustdoc-stripper-ignore-next /// Weak reference type. type Weak; // rustdoc-stripper-ignore-next /// Downgrade to a weak reference. fn downgrade(&self) -> Self::Weak; } // rustdoc-stripper-ignore-next /// Trait for generalizing upgrading a weak reference to a strong reference. pub trait Upgrade where Self: Sized, { // rustdoc-stripper-ignore-next /// Strong reference type. type Strong; // rustdoc-stripper-ignore-next /// Try upgrading a weak reference to a strong reference. fn upgrade(&self) -> Option<Self::Strong>; } impl<T: Downgrade + crate::ObjectType> Upgrade for crate::WeakRef<T> { type Strong = T; fn upgrade(&self) -> Option<Self::Strong> { self.upgrade() } } impl<T> Downgrade for PhantomData<T> { type Weak = PhantomData<T>; fn downgrade(&self) -> Self::Weak { PhantomData } } impl<T: Downgrade> Downgrade for &T { type Weak = T::Weak; fn downgrade(&self) -> Self::Weak { T::downgrade(*self) } } impl<T> Downgrade for Arc<T> { type Weak = sync::Weak<T>; fn downgrade(&self) -> Self::Weak { Arc::downgrade(self) } } impl<T> Upgrade for PhantomData<T> { type Strong = PhantomData<T>; fn upgrade(&self) -> Option<Self::Strong> { Some(PhantomData) } } impl<T> Upgrade for sync::Weak<T> { type Strong = Arc<T>; fn upgrade(&self) -> Option<Self::Strong> { self.upgrade() } } impl<T> Downgrade for Rc<T> { type Weak = rc::Weak<T>; fn downgrade(&self) -> Self::Weak { Rc::downgrade(self) } } impl<T> Upgrade for rc::Weak<T> { type Strong = Rc<T>; fn upgrade(&self) -> Option<Self::Strong> { self.upgrade() } }
20.74
79
0.618129
29c72b54ec2780d325bfa614e78bf7d2b975b62a
7,670
// Generated from definition io.k8s.api.admissionregistration.v1beta1.ServiceReference /// ServiceReference holds a reference to Service.legacy.k8s.io #[derive(Clone, Debug, Default, PartialEq)] pub struct ServiceReference { /// `name` is the name of the service. Required pub name: String, /// `namespace` is the namespace of the service. Required pub namespace: String, /// `path` is an optional URL path which will be sent in any request to this service. pub path: Option<String>, } impl<'de> crate::serde::Deserialize<'de> for ServiceReference { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_name, Key_namespace, Key_path, Other, } impl<'de> crate::serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> { struct Visitor; impl<'de> crate::serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error { Ok(match v { "name" => Field::Key_name, "namespace" => Field::Key_namespace, "path" => Field::Key_path, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> crate::serde::de::Visitor<'de> for Visitor { type Value = ServiceReference; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("ServiceReference") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> { let mut value_name: Option<String> = None; let mut value_namespace: Option<String> = None; let mut value_path: Option<String> = None; while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_name => value_name = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Key_namespace => value_namespace = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Key_path => value_path = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(ServiceReference { name: value_name.unwrap_or_default(), namespace: value_namespace.unwrap_or_default(), path: value_path, }) } } deserializer.deserialize_struct( "ServiceReference", &[ "name", "namespace", "path", ], Visitor, ) } } impl crate::serde::Serialize for ServiceReference { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer { let mut state = serializer.serialize_struct( "ServiceReference", 2 + self.path.as_ref().map_or(0, |_| 1), )?; crate::serde::ser::SerializeStruct::serialize_field(&mut state, "name", &self.name)?; crate::serde::ser::SerializeStruct::serialize_field(&mut state, "namespace", &self.namespace)?; if let Some(value) = &self.path { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "path", value)?; } crate::serde::ser::SerializeStruct::end(state) } } #[cfg(feature = "schemars")] impl crate::schemars::JsonSchema for ServiceReference { fn schema_name() -> String { "io.k8s.api.admissionregistration.v1beta1.ServiceReference".to_owned() } fn json_schema(__gen: &mut crate::schemars::gen::SchemaGenerator) -> crate::schemars::schema::Schema { crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject { metadata: Some(Box::new(crate::schemars::schema::Metadata { description: Some("ServiceReference holds a reference to Service.legacy.k8s.io".to_owned()), ..Default::default() })), instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::Object))), object: Some(Box::new(crate::schemars::schema::ObjectValidation { properties: IntoIterator::into_iter([ ( "name".to_owned(), crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject { metadata: Some(Box::new(crate::schemars::schema::Metadata { description: Some("`name` is the name of the service. Required".to_owned()), ..Default::default() })), instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))), ..Default::default() }), ), ( "namespace".to_owned(), crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject { metadata: Some(Box::new(crate::schemars::schema::Metadata { description: Some("`namespace` is the namespace of the service. Required".to_owned()), ..Default::default() })), instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))), ..Default::default() }), ), ( "path".to_owned(), crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject { metadata: Some(Box::new(crate::schemars::schema::Metadata { description: Some("`path` is an optional URL path which will be sent in any request to this service.".to_owned()), ..Default::default() })), instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))), ..Default::default() }), ), ]).collect(), required: IntoIterator::into_iter([ "name", "namespace", ]).map(std::borrow::ToOwned::to_owned).collect(), ..Default::default() })), ..Default::default() }) } }
45.384615
151
0.507953
eb8ec60989e09f751130143aaf7d4e946fd4ff79
480
use sea_orm::entity::prelude::*; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, DeriveEntityModel, Deserialize, Serialize)] #[sea_orm(table_name = "user")] pub struct Model { #[sea_orm(primary_key)] #[serde(skip_deserializing)] pub id: i32, pub name: String, pub first_name: String, pub last_name: String, } #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] pub enum Relation {} impl ActiveModelBehavior for ActiveModel {}
25.263158
77
0.710417
62d37cf4f4d24f4660e3630502c18e21208ec4b1
12,388
use inner::{Inner, Values}; use std::borrow::Borrow; use std::cell; use std::collections::hash_map::RandomState; use std::hash::{BuildHasher, Hash}; use std::iter::{self, FromIterator}; use std::marker::PhantomData; use std::mem; use std::sync::atomic; use std::sync::atomic::AtomicPtr; use std::sync::{self, Arc}; /// A handle that may be used to read from the eventually consistent map. /// /// Note that any changes made to the map will not be made visible until the writer calls /// `refresh()`. In other words, all operations performed on a `ReadHandle` will *only* see writes /// to the map that preceeded the last call to `refresh()`. pub struct ReadHandle<K, V, M = (), S = RandomState> where K: Eq + Hash, S: BuildHasher, { pub(crate) inner: sync::Arc<AtomicPtr<Inner<K, V, M, S>>>, pub(crate) epochs: crate::Epochs, epoch: sync::Arc<sync::atomic::AtomicUsize>, my_epoch: sync::atomic::AtomicUsize, // Since a `ReadHandle` keeps track of its own epoch, it is not safe for multiple threads to // call `with_handle` at the same time. We *could* keep it `Sync` and make `with_handle` // require `&mut self`, but that seems overly excessive. It would also mean that all other // methods on `ReadHandle` would now take `&mut self`, *and* that `ReadHandle` can no longer be // `Clone`. Since optin_builtin_traits is still an unstable feature, we use this hack to make // `ReadHandle` be marked as `!Sync` (since it contains an `Cell` which is `!Sync`). _not_sync_no_feature: PhantomData<cell::Cell<()>>, } /// A type that is both `Sync` and `Send` and lets you produce new [`ReadHandle`] instances. /// /// This serves as a handy way to distribute read handles across many threads without requiring /// additional external locking to synchronize access to the non-`Sync` `ReadHandle` type. Note /// that this _internally_ takes a lock whenever you call [`ReadHandleFactory::handle`], so /// you should not expect producing new handles rapidly to scale well. pub struct ReadHandleFactory<K, V, M = (), S = RandomState> where K: Eq + Hash, S: BuildHasher, { inner: sync::Arc<AtomicPtr<Inner<K, V, M, S>>>, epochs: crate::Epochs, } impl<K, V, M, S> Clone for ReadHandleFactory<K, V, M, S> where K: Eq + Hash, S: BuildHasher, { fn clone(&self) -> Self { Self { inner: sync::Arc::clone(&self.inner), epochs: sync::Arc::clone(&self.epochs), } } } impl<K, V, M, S> ReadHandleFactory<K, V, M, S> where K: Eq + Hash, S: BuildHasher, { /// Produce a new [`ReadHandle`] to the same map as this factory was originally produced from. pub fn handle(&self) -> ReadHandle<K, V, M, S> { ReadHandle::new( sync::Arc::clone(&self.inner), sync::Arc::clone(&self.epochs), ) } } impl<K, V, M, S> Clone for ReadHandle<K, V, M, S> where K: Eq + Hash, S: BuildHasher, { fn clone(&self) -> Self { ReadHandle::new( sync::Arc::clone(&self.inner), sync::Arc::clone(&self.epochs), ) } } pub(crate) fn new<K, V, M, S>( inner: Inner<K, V, M, S>, epochs: crate::Epochs, ) -> ReadHandle<K, V, M, S> where K: Eq + Hash, S: BuildHasher, { let store = Box::into_raw(Box::new(inner)); ReadHandle::new(sync::Arc::new(AtomicPtr::new(store)), epochs) } impl<K, V, M, S> ReadHandle<K, V, M, S> where K: Eq + Hash, S: BuildHasher, { fn new(inner: sync::Arc<AtomicPtr<Inner<K, V, M, S>>>, epochs: crate::Epochs) -> Self { // tell writer about our epoch tracker let epoch = sync::Arc::new(atomic::AtomicUsize::new(0)); epochs.lock().unwrap().push(Arc::clone(&epoch)); Self { epochs, epoch, my_epoch: atomic::AtomicUsize::new(0), inner, _not_sync_no_feature: PhantomData, } } /// Create a new `Sync` type that can produce additional `ReadHandle`s for use in other /// threads. pub fn factory(&self) -> ReadHandleFactory<K, V, M, S> { ReadHandleFactory { inner: sync::Arc::clone(&self.inner), epochs: sync::Arc::clone(&self.epochs), } } } impl<K, V, M, S> ReadHandle<K, V, M, S> where K: Eq + Hash, S: BuildHasher, M: Clone, { fn with_handle<F, T>(&self, f: F) -> Option<T> where F: FnOnce(&Inner<K, V, M, S>) -> T, { // once we update our epoch, the writer can no longer do a swap until we set the MSB to // indicate that we've finished our read. however, we still need to deal with the case of a // race between when the writer reads our epoch and when they decide to make the swap. // // assume that there is a concurrent writer. it just swapped the atomic pointer from A to // B. the writer wants to modify A, and needs to know if that is safe. we can be in any of // the following cases when we atomically swap out our epoch: // // 1. the writer has read our previous epoch twice // 2. the writer has already read our previous epoch once // 3. the writer has not yet read our previous epoch // // let's discuss each of these in turn. // // 1. since writers assume they are free to proceed if they read an epoch with MSB set // twice in a row, this is equivalent to case (2) below. // 2. the writer will see our epoch change, and so will assume that we have read B. it // will therefore feel free to modify A. note that *another* pointer swap can happen, // back to A, but then the writer would be block on our epoch, and so cannot modify // A *or* B. consequently, using a pointer we read *after* the epoch swap is definitely // safe here. // 3. the writer will read our epoch, notice that MSB is not set, and will keep reading, // continuing to observe that it is still not set until we finish our read. thus, // neither A nor B are being modified, and we can safely use either. // // in all cases, using a pointer we read *after* updating our epoch is safe. // so, update our epoch tracker. let epoch = self.my_epoch.fetch_add(1, atomic::Ordering::Relaxed); self.epoch.store(epoch + 1, atomic::Ordering::Release); // ensure that the pointer read happens strictly after updating the epoch atomic::fence(atomic::Ordering::SeqCst); // then, atomically read pointer, and use the map being pointed to let r_handle = self.inner.load(atomic::Ordering::Acquire); let res = unsafe { r_handle.as_ref().map(f) }; // we've finished reading -- let the writer know self.epoch.store( (epoch + 1) | 1usize << (mem::size_of::<usize>() * 8 - 1), atomic::Ordering::Release, ); res } /// Returns the number of non-empty keys present in the map. pub fn len(&self) -> usize { self.with_handle(|inner| inner.data.len()).unwrap_or(0) } /// Returns true if the map contains no elements. pub fn is_empty(&self) -> bool { self.with_handle(|inner| inner.data.is_empty()) .unwrap_or(true) } /// Get the current meta value. pub fn meta(&self) -> Option<M> { self.with_handle(|inner| inner.meta.clone()) } /// Internal version of `get_and` fn get_raw<Q: ?Sized, F, T>(&self, key: &Q, then: F) -> Option<T> where F: FnOnce(&Values<V>) -> T, K: Borrow<Q>, Q: Hash + Eq, { self.with_handle(move |inner| { if !inner.is_ready() { None } else { inner.data.get(key).map(then) } }) .unwrap_or(None) } /// Applies a function to the values corresponding to the key, and returns the result. /// /// The key may be any borrowed form of the map's key type, but `Hash` and `Eq` on the borrowed /// form *must* match those for the key type. /// /// Note that not all writes will be included with this read -- only those that have been /// refreshed by the writer. If no refresh has happened, this function returns `None`. /// /// If no values exist for the given key, no refresh has happened, or the map has been /// destroyed, `then` will not be called, and `None` will be returned. #[inline] pub fn get_and<Q: ?Sized, F, T>(&self, key: &Q, then: F) -> Option<T> where F: FnOnce(&[V]) -> T, K: Borrow<Q>, Q: Hash + Eq, { // call `borrow` here to monomorphize `get_raw` fewer times self.get_raw(key.borrow(), |values| then(&**values)) } /// Applies a function to the values corresponding to the key, and returns the result alongside /// the meta information. /// /// The key may be any borrowed form of the map's key type, but `Hash` and `Eq` on the borrowed /// form *must* match those for the key type. /// /// Note that not all writes will be included with this read -- only those that have been /// refreshed by the writer. If no refresh has happened, or if the map has been closed by the /// writer, this function returns `None`. /// /// If no values exist for the given key, `then` will not be called, and `Some(None, _)` is /// returned. pub fn meta_get_and<Q: ?Sized, F, T>(&self, key: &Q, then: F) -> Option<(Option<T>, M)> where F: FnOnce(&[V]) -> T, K: Borrow<Q>, Q: Hash + Eq, { self.with_handle(move |inner| { if !inner.is_ready() { None } else { let res = inner.data.get(key).map(move |v| then(&**v)); let res = (res, inner.meta.clone()); Some(res) } }) .unwrap_or(None) } /// If the writer has destroyed this map, this method will return true. /// /// See `WriteHandle::destroy`. pub fn is_destroyed(&self) -> bool { self.with_handle(|_| ()).is_none() } /// Returns true if the map contains any values for the specified key. /// /// The key may be any borrowed form of the map's key type, but `Hash` and `Eq` on the borrowed /// form *must* match those for the key type. pub fn contains_key<Q: ?Sized>(&self, key: &Q) -> bool where K: Borrow<Q>, Q: Hash + Eq, { self.with_handle(move |inner| inner.data.contains_key(key)) .unwrap_or(false) } /// Read all values in the map, and transform them into a new collection. /// /// Be careful with this function! While the iteration is ongoing, any writer that tries to /// refresh will block waiting on this reader to finish. pub fn for_each<F>(&self, mut f: F) where F: FnMut(&K, &[V]), { self.with_handle(move |inner| { for (k, vs) in &inner.data { f(k, &vs[..]) } }); } /// Read all values in the map, and transform them into a new collection. pub fn map_into<Map, Collector, Target>(&self, mut f: Map) -> Collector where Map: FnMut(&K, &[V]) -> Target, Collector: FromIterator<Target>, { self.with_handle(move |inner| { Collector::from_iter(inner.data.iter().map(|(k, vs)| f(k, &vs[..]))) }) .unwrap_or_else(|| Collector::from_iter(iter::empty())) } } #[cfg(test)] mod test { use crate::new; // the idea of this test is to allocate 64 elements, and only use 17. The vector will // probably try to fit either exactly the length, to the next highest power of 2 from // the length, or something else entirely, E.g. 17, 32, etc., // but it should always end up being smaller than the original 64 elements reserved. #[test] fn reserve_and_fit() { const MIN: usize = (1 << 4) + 1; const MAX: usize = (1 << 6); let (r, mut w) = new(); w.reserve(0, MAX).refresh(); r.get_raw(&0, |vs| assert_eq!(vs.capacity(), MAX)).unwrap(); for i in 0..MIN { w.insert(0, i); } w.fit_all().refresh(); r.get_raw(&0, |vs| assert!(vs.capacity() < MAX)).unwrap(); } }
35.193182
99
0.590814
f48c99bd87e8263feaae51cbd7d44f0d7cd6894a
17,026
use ecs::id::EntityId; use ecs::Ecs; use ecs::component::Position; use ecs::component::Actor; use ecs::component::Render; use tcod::colors; use ecs::component::MonsterAi; use ecs::component::Corpse; use render::RenderOrder; use message::{Message, MessageLog}; use ecs::component::Name; use std::rc::Rc; use ecs::component::Inventory; use ecs::component::Item; use game::state::GameState; use ecs::spell::SpellResult; use ecs::spell::SpellStatus; use tcod::Map; use ecs::component::Level; use settings::Settings; /// This struct defines the Result of one single action. A message can be created, and also /// a reaction can happen. struct ActionResult { reactions: Vec<EntityAction>, message: Option<Vec<Message>>, state: Option<GameState>, } impl ActionResult { /// Return a `ActionResult` with all values being `None` pub fn none() -> ActionResult { ActionResult { reactions: vec![], message: None, state: None, } } } /// All possible interactions between `Component`s #[derive(PartialEq)] pub enum EntityAction { MeleeAttack(EntityId, EntityId), TakeDamage(EntityId, u32, EntityId), MoveTo(EntityId, (i32, i32)), MoveRelative(EntityId, (i32, i32)), Die(EntityId), PickUpItem(EntityId, EntityId), DropItem(EntityId, u8), UseItem(EntityId, u8), AddItemToInventory(EntityId, EntityId), RemoveItemFromInventory(EntityId, EntityId), SetAiTarget(EntityId, EntityId), RewardXp(EntityId, u32), LevelUp(EntityId), UpdateFov(EntityId), LookForTarget(EntityId), Idle, } impl EntityAction { /// Execute the action pub fn execute(&self, ecs: &mut Ecs, fov_map: &Map, log: Rc<MessageLog>, settings: &Settings) -> Option<GameState> { let result = match *self { EntityAction::MoveTo(entity_id, pos) => self.move_to_action(ecs, entity_id, pos), EntityAction::MoveRelative(entity_id, delta) => self.move_relative_action(ecs, entity_id, delta), EntityAction::MeleeAttack(attacker_id, target_id) => self.melee_attack_action(ecs, attacker_id, target_id), EntityAction::TakeDamage(entity_id, damage, attacker_id) => self.take_damage_action(ecs, entity_id, damage, attacker_id), EntityAction::Die(entity_id) => self.die_action(ecs, entity_id), EntityAction::PickUpItem(entity_id, item_id) => self.pick_up_item_action(ecs, entity_id, item_id), EntityAction::DropItem(entity_id, item_number) => self.drop_item_action(ecs, entity_id, item_number), EntityAction::AddItemToInventory(entity_id, item_id) => self.add_item_to_inventory_action(ecs, entity_id, item_id), EntityAction::RemoveItemFromInventory(entity_id, item_id) => self.remove_item_from_inventory_action(ecs, entity_id, item_id), EntityAction::UseItem(entity_id, item_number) => self.use_item_action(ecs, fov_map, entity_id, item_number), EntityAction::SetAiTarget(entity_id, target_id) => self.set_ai_target_action(ecs, entity_id, target_id), EntityAction::RewardXp(entity_id, xp) => self.reward_xp(ecs, entity_id, xp), EntityAction::LevelUp(entity_id) => self.level_up(ecs, entity_id), EntityAction::LookForTarget(entity_id) => self.look_for_target_action(ecs, entity_id, settings), EntityAction::UpdateFov(entity_id) => self.update_fov_action(ecs, entity_id, settings), EntityAction::Idle => ActionResult::none() // Idle - do nothing }; if let Some(messages) = result.message { for message in messages { log.add(message); } } let mut resulting_state = None; for reaction in result.reactions { resulting_state = if let Some(state) = reaction.execute(ecs, fov_map, Rc::clone(&log), settings) { Some(state) } else { resulting_state } } match result.state { None => { resulting_state } _ => { result.state } } } fn melee_attack_action(&self, ecs: &mut Ecs, attacker_id: EntityId, target_id: EntityId) -> ActionResult { let attacker_name = EntityAction::get_entity_name(ecs, attacker_id).to_uppercase(); let target_name = EntityAction::get_entity_name(ecs, target_id); match ecs.get_component::<Actor>(attacker_id) { Some(actor) => { match actor.calculate_attack(ecs, target_id) { Some(damage) => { ActionResult { message: Some(vec![Message::new(format!("The {} attacks the {} .", attacker_name, target_name), colors::WHITE)]), reactions: vec![EntityAction::TakeDamage(target_id, damage, attacker_id)], state: None, } } None => ActionResult::none() } } None => ActionResult::none() } } fn reward_xp(&self, ecs: &mut Ecs, entity_id: EntityId, xp: u32) -> ActionResult { let entity_name = EntityAction::get_entity_name(ecs, entity_id).to_uppercase(); if let Some(l) = ecs.get_component_mut::<Level>(entity_id) { let reactions = if l.reward_xp(xp) { vec![EntityAction::LevelUp(entity_id)] } else { vec![] }; let message = Message::new(format!("{} gains {} XP", entity_name, xp), colors::WHITE); ActionResult { reactions, message: Some(vec![message]), state: None, } } else { ActionResult::none() } } fn level_up(&self, ecs: &mut Ecs, entity_id: EntityId) -> ActionResult { let entity_name = EntityAction::get_entity_name(ecs, entity_id).to_uppercase(); if let Some(l) = ecs.get_component_mut::<Level>(entity_id) { l.level_up(); let message = Message::new(format!("{} feels stronger: Reached level {}.", entity_name, l.level), colors::YELLOW); ActionResult { reactions: vec![], message: Some(vec![message]), state: Some(GameState::ShowLeveUpMenu), } } else { ActionResult::none() } } fn move_to_action(&self, ecs: &mut Ecs, entity_id: EntityId, pos: (i32, i32)) -> ActionResult { if let Some(c) = ecs.get_component_mut::<Position>(entity_id) { c.move_absolute(pos) }; ActionResult::none() } fn move_relative_action(&self, ecs: &mut Ecs, entity_id: EntityId, delta: (i32, i32)) -> ActionResult { if let Some(c) = ecs.get_component_mut::<Position>(entity_id) { c.move_relative(delta) }; ActionResult::none() } fn take_damage_action(&self, ecs: &mut Ecs, entity_id: EntityId, damage: u32, attacker_id: EntityId) -> ActionResult { let entity_name = EntityAction::get_entity_name(ecs, entity_id).to_uppercase(); if let Some(e) = ecs.get_component_mut::<Actor>(entity_id) { e.take_damage(damage); let message = Message::new(if damage > 0 { format!("The {} takes {} damage.", entity_name, damage) } else { format!("The {} takes no damage.", entity_name) }, colors::WHITE); return if e.hp <= 0 { ActionResult { reactions: vec![ EntityAction::Die(entity_id), EntityAction::RewardXp(attacker_id, e.xp_reward) ], message: Some(vec![message]), state: None, } } else { ActionResult { reactions: vec![], message: Some(vec![message]), state: None, } }; } ActionResult::none() } fn use_item_action(&self, ecs: &mut Ecs, fov_map: &Map, entity_id: EntityId, item_number: u8) -> ActionResult { let entity_name = EntityAction::get_entity_name(ecs, entity_id).to_uppercase(); let mut item_name = "".to_string(); let mut item_id = 0; let spell = if let Some(inventory) = ecs.get_component::<Inventory>(entity_id) { if inventory.items.len() > item_number as usize { item_id = inventory.items[item_number as usize]; item_name = EntityAction::get_entity_name(ecs, item_id).to_uppercase(); if let Some(i) = ecs.get_component::<Item>(item_id) { i.use_item() } else { None } } else { None } } else { None }; if let Some(s) = spell { let mut messages = vec![Message::new(format!("{} uses {}", entity_name, item_name), colors::WHITE)]; let id = ecs.player_entity_id; let cast_result = s.cast(ecs, fov_map, id); let state = match cast_result { SpellResult { status: SpellStatus::Success, .. } => { self.use_item_success(ecs, item_id) } SpellResult { status: SpellStatus::Targeting(spell, caster_id), .. } => { Some(GameState::Targeting(spell, caster_id)) } SpellResult { status: SpellStatus::Fail, .. } => { Some(GameState::ShowInventoryUse) } }; match cast_result { SpellResult { message: Some(message), .. } => messages.push(message), _ => () } return ActionResult { message: Some(messages), reactions: cast_result.reactions, state, }; } else { ActionResult { message: None, reactions: vec![], state: Some(GameState::ShowInventoryUse), } } } fn use_item_success(&self, ecs: &mut Ecs, item_id: EntityId) -> Option<GameState> { ecs.destroy_entity(&item_id); None } fn remove_item_from_inventory_action(&self, ecs: &mut Ecs, entity_id: EntityId, item_id: EntityId) -> ActionResult { if let Some(inventory) = ecs.get_component_mut::<Inventory>(entity_id) { inventory.remove_item_id(item_id); } ActionResult::none() } fn drop_item_action(&self, ecs: &mut Ecs, entity_id: EntityId, item_number: u8) -> ActionResult { let entity_name = EntityAction::get_entity_name(ecs, entity_id).to_uppercase(); let mut item_name = "".to_string(); let mut item_id = 0; let item_position = if let Some(inventory) = ecs.get_component::<Inventory>(entity_id) { if inventory.items.len() > item_number as usize { item_id = inventory.items[item_number as usize]; item_name = EntityAction::get_entity_name(ecs, item_id).to_uppercase(); if let Some(p) = ecs.get_component::<Position>(entity_id) { let mut item_position = Position::new(entity_id, false); item_position.position = p.position; Some(item_position) } else { None } } else { None } } else { None }; if let Some(p) = item_position { let message = Message::new(format!("{} dropped {} on the floor", entity_name, item_name), colors::YELLOW); ecs.register_component(item_id, p); if let Some(inventory) = ecs.get_component_mut::<Inventory>(entity_id) { inventory.remove_item(item_number as usize); } ActionResult { reactions: vec![], message: Some(vec![message]), state: None, } } else { ActionResult { reactions: vec![], message: None, state: Some(GameState::ShowInventoryDrop), } } } fn pick_up_item_action(&self, ecs: &mut Ecs, entity_id: EntityId, item_id: EntityId) -> ActionResult { let entity_name = EntityAction::get_entity_name(ecs, entity_id).to_uppercase(); let item_name = EntityAction::get_entity_name(ecs, item_id).to_uppercase(); if let Some(inventory) = ecs.get_component::<Inventory>(entity_id) { if inventory.free_space() > 0 { let message = Message::new(format!("{} picked up the {}", entity_name, item_name), colors::BLUE); ActionResult { reactions: vec![EntityAction::AddItemToInventory(entity_id, item_id)], message: Some(vec![message]), state: None, } } else { let message = Message::new(format!("You can't pick up {}: Inventory is full.", item_name), colors::YELLOW); ActionResult { reactions: vec![], message: Some(vec![message]), state: None, } } } else { ActionResult::none() } } fn add_item_to_inventory_action(&self, ecs: &mut Ecs, entity_id: EntityId, item_id: EntityId) -> ActionResult { ecs.remove_component::<Position>(item_id); if let Some(inventory) = ecs.get_component_mut::<Inventory>(entity_id) { inventory.add_item(item_id); } ActionResult::none() } fn die_action(&self, ecs: &mut Ecs, entity_id: EntityId) -> ActionResult { let entity_name = EntityAction::get_entity_name(ecs, entity_id).to_uppercase(); let message = if entity_id == ecs.player_entity_id { Message::new("YOU DIED".to_string(), colors::RED) } else { Message::new(format!("The {} died.", entity_name), colors::ORANGE) }; // Override the Rendering with the default corpse glyph ecs.register_component(entity_id, Render::new(entity_id, '%', colors::DARK_CRIMSON, RenderOrder::Corpse)); // Remove the AI and the Creature components ecs.remove_component::<MonsterAi>(entity_id); // Add the Corpse component ecs.register_component(entity_id, Corpse {}); // Set non blocking match ecs.get_component_mut::<Position>(entity_id) { Some(p) => p.is_blocking = false, None => () } ActionResult { reactions: vec![], message: Some(vec![message]), state: None, } } fn set_ai_target_action(&self, ecs: &mut Ecs, entity_id: EntityId, target_id: EntityId) -> ActionResult { if let Some(ai) = ecs.get_component_mut::<MonsterAi>(entity_id) { ai.set_target(target_id); } ActionResult { reactions: vec![], message: None, state: None, } } fn look_for_target_action(&self, ecs: &mut Ecs, entity_id: EntityId, settings: &Settings) -> ActionResult { let target_in_fov = { if let Some(ai) = ecs.get_component::<MonsterAi>(entity_id) { ai.is_within_ai_distance(ecs, settings) && ai.is_target_in_fov(ecs, settings) } else { false } } ; if let Some(ai) = ecs.get_component_mut::<MonsterAi>(entity_id) { ai.set_chasing_target(target_in_fov); } ActionResult { reactions: vec![], message: None, state: None, } } fn update_fov_action(&self, ecs: &mut Ecs, entity_id: EntityId, settings: &Settings) -> ActionResult { let position = { if let Some(p) = ecs.get_component::<Position>(entity_id) { Some((p.x(), p.y())) } else { None } }; match position { Some((x, y)) => { if let Some(ai) = ecs.get_component_mut::<MonsterAi>(entity_id) { ai.recompute_fov(settings, x, y) } }, _ => {} } ActionResult { reactions: vec![], message: None, state: None, } } fn get_entity_name(ecs: &Ecs, id: EntityId) -> String { match ecs.get_component::<Name>(id) { Some(n) => n.name.clone(), None => format!("nameless entity (#{})", id) } } }
36.14862
141
0.548808
62c804e6759d563a6c1bd6703a433658098e74a8
5,041
use crate::common::{ jcli_wrapper, jcli_wrapper::jcli_transaction_wrapper::JCLITransactionWrapper, jormungandr::{ConfigurationBuilder, JormungandrProcess, Role, Starter}, startup, }; use jormungandr_lib::interfaces::{AccountState, InitialUTxO, SettingsDto, UTxOInfo}; use jormungandr_testing_utils::wallet::Wallet; use assert_fs::prelude::*; use assert_fs::TempDir; #[derive(Clone, Debug, PartialEq)] struct LedgerSnapshot { settings: SettingsDto, utxo_info: UTxOInfo, account_state: AccountState, } impl LedgerSnapshot { pub fn new(settings: SettingsDto, utxo_info: UTxOInfo, account_state: AccountState) -> Self { LedgerSnapshot { settings, utxo_info, account_state, } } } fn take_snapshot( account_receiver: &Wallet, jormungandr: &JormungandrProcess, utxo_info: UTxOInfo, ) -> LedgerSnapshot { let rest_uri = jormungandr.rest_uri(); let settings = jcli_wrapper::assert_get_rest_settings(&rest_uri); let account = jcli_wrapper::assert_rest_account_get_stats( &account_receiver.address().to_string(), &rest_uri, ); jcli_wrapper::assert_rest_utxo_get_returns_same_utxo(&rest_uri, &utxo_info); LedgerSnapshot::new(settings, utxo_info, account) } pub fn do_simple_transaction( sender: &Wallet, account_receiver: &Wallet, utxo_sender: &UTxOInfo, utxo_receiver: &Wallet, jormungandr: &JormungandrProcess, ) -> UTxOInfo { const TX_VALUE: u64 = 50; let mut tx = JCLITransactionWrapper::new_transaction(&jormungandr.genesis_block_hash().to_string()); let transaction_message = tx .assert_add_input_from_utxo(utxo_sender) .assert_add_output(&account_receiver.address().to_string(), TX_VALUE.into()) .assert_add_output(&utxo_receiver.address().to_string(), TX_VALUE.into()) .assert_finalize() .seal_with_witness_for_address(&sender) .assert_to_message(); let tx_id = tx.get_fragment_id(); jcli_wrapper::assert_transaction_in_block(&transaction_message, &jormungandr); UTxOInfo::new(tx_id, 1, utxo_receiver.address(), TX_VALUE.into()) } #[test] pub fn test_node_recovers_from_node_restart() { let temp_dir = TempDir::new().unwrap(); let sender = startup::create_new_utxo_address(); let account_receiver = startup::create_new_account_address(); let utxo_receiver = startup::create_new_utxo_address(); let config = ConfigurationBuilder::new() .with_funds(vec![InitialUTxO { address: sender.address(), value: 100.into(), }]) .with_storage(&temp_dir.child("storage")) .build(&temp_dir); let jormungandr = Starter::new().config(config.clone()).start().unwrap(); let utxo_sender = config.block0_utxo_for_address(&sender); let new_utxo = do_simple_transaction( &sender, &account_receiver, &utxo_sender, &utxo_receiver, &jormungandr, ); let snapshot_before = take_snapshot(&account_receiver, &jormungandr, new_utxo.clone()); jormungandr.stop(); std::thread::sleep(std::time::Duration::from_secs(1)); let jormungandr = Starter::new() .config(config) .role(Role::Leader) .start() .unwrap(); std::thread::sleep(std::time::Duration::from_secs(1)); let snapshot_after = take_snapshot(&account_receiver, &jormungandr, new_utxo); assert_eq!( snapshot_before, snapshot_after, "Different snaphot after restart {:?} vs {:?}", snapshot_before, snapshot_after ); } #[test] pub fn test_node_recovers_kill_signal() { let temp_dir = TempDir::new().unwrap(); let sender = startup::create_new_utxo_address(); let account_receiver = startup::create_new_account_address(); let utxo_receiver = startup::create_new_utxo_address(); let config = ConfigurationBuilder::new() .with_funds(vec![InitialUTxO { address: sender.address(), value: 100.into(), }]) .with_storage(&temp_dir.child("storage")) .build(&temp_dir); let jormungandr = Starter::new().config(config.clone()).start().unwrap(); let utxo_sender = config.block0_utxo_for_address(&sender); let new_utxo = do_simple_transaction( &sender, &account_receiver, &utxo_sender, &utxo_receiver, &jormungandr, ); let snapshot_before = take_snapshot(&account_receiver, &jormungandr, new_utxo.clone()); jormungandr.stop(); std::thread::sleep(std::time::Duration::from_secs(1)); let jormungandr = Starter::new() .config(config) .role(Role::Leader) .start() .unwrap(); std::thread::sleep(std::time::Duration::from_secs(1)); let snapshot_after = take_snapshot(&account_receiver, &jormungandr, new_utxo); assert_eq!( snapshot_before, snapshot_after, "Different snaphot after restart {:?} vs {:?}", snapshot_before, snapshot_after ); }
31.310559
97
0.670304
f559eaa75c14e283cc29df5144a8ccfa993a98bb
14,914
use core::fmt; use std::{cell::RefCell, collections::HashMap, rc::Rc}; use crate::shared::{Day, PartSolution}; #[derive(Default, Debug)] pub struct Bag { pub name: String, pub parents: RefCell<Vec<Rc<Bag>>>, pub children: RefCell<Vec<(u32, Rc<Bag>)>>, } impl fmt::Display for Bag { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let child_names: Vec<String> = self .children .borrow() .iter() .map(|(c, b)| format!("Name: {}, count: {}", b.name, c)) .collect(); let parent_names: Vec<String> = self .parents .borrow() .iter() .map(|b| format!("Name: {}", b.name)) .collect(); write!( f, "Name: {}, parents: {:?}, children: {:?} ", self.name, parent_names, child_names ) } } pub fn map_bag_color_with_count(bag_color_with_count: &str) -> (u32, String) { let split = bag_color_with_count.trim().split_once(' ').unwrap(); let _test: Vec<char> = bag_color_with_count.chars().into_iter().collect(); (split.0.parse().unwrap(), split.1.to_string()) } pub fn parse_bag_line(bag_line: &str) -> (String, Vec<(u32, String)>) { let cleaned_up = bag_line .replace("bags", "") .replace("bag", "") .replace('.', ""); let split: Vec<&str> = cleaned_up.split("contain").into_iter().collect(); let bag_name = split.get(0).unwrap().trim(); let inside_bags = split.get(1).unwrap().trim(); if inside_bags == "no other" { return (bag_name.to_string(), Vec::<(u32, String)>::new()); } let inside_bags_with_count = inside_bags .split(',') .map(map_bag_color_with_count) .collect(); (bag_name.to_string(), inside_bags_with_count) } pub fn parse_bags(bag_lines: &[String]) -> HashMap<String, Rc<Bag>> { let mut bag_parsed: HashMap<String, Rc<Bag>> = HashMap::new(); for bag_line in bag_lines { let (bag_name, count_with_bag_name) = parse_bag_line(bag_line); let mut parsed_child_bags_current_line: Vec<(u32, Rc<Bag>)> = Vec::new(); for (count, child_bag_name) in count_with_bag_name { let bag = bag_parsed .entry(child_bag_name.to_string()) .or_insert_with(|| { Rc::new(Bag { name: child_bag_name.to_string(), ..Bag::default() }) }); parsed_child_bags_current_line.push((count, Rc::clone(bag))); } let bag: &Rc<Bag> = bag_parsed.entry(bag_name.to_string()).or_insert_with(|| { Rc::new(Bag { name: bag_name.to_string(), ..Bag::default() }) }); let mut bag_children = bag.children.borrow_mut(); for (count, child_bag) in parsed_child_bags_current_line { let mut child_bag_parents = child_bag.parents.borrow_mut(); child_bag_parents.push(Rc::clone(bag)); bag_children.push((count, Rc::clone(&child_bag))); } } bag_parsed } fn get_parent_names_recursive(bag: &Rc<Bag>) -> Vec<String> { let mut my_parent_names: Vec<String> = bag .parents .borrow() .iter() .map(|b| b.name.clone()) .collect(); bag.parents .borrow() .iter() .for_each(|p| my_parent_names.append(&mut get_parent_names_recursive(p))); my_parent_names } fn count_parents(bag_parsed: &HashMap<String, Rc<Bag>>, start: &str) -> u32 { let start_bag = bag_parsed.get(start).unwrap(); let mut parent_names = get_parent_names_recursive(start_bag); parent_names.sort(); parent_names.dedup(); parent_names.len() as u32 } fn count_bags_recursive(bag: &Rc<Bag>) -> u32 { let children = bag.children.borrow(); println!("Bag {}", &bag.name); children .iter() .map(|(c, b)| { let sum_of_children = count_bags_recursive(b); println!("Child {}*{} has {} children", c, b.name, sum_of_children); c + c * sum_of_children }) .sum() } pub struct Solution {} impl Day for Solution { fn part_1(&self) -> PartSolution { const BAG_NAME: &str = "shiny gold"; let lines: Vec<String> = include_str!("input.txt").lines().map(Into::into).collect(); let bags = parse_bags(&lines); PartSolution::U32(count_parents(&bags, BAG_NAME)) } fn part_2(&self) -> PartSolution { const BAG_NAME: &str = "shiny gold"; let lines: Vec<String> = include_str!("input.txt").lines().map(Into::into).collect(); let bags = parse_bags(&lines); PartSolution::U32(count_bags_recursive(bags.get(BAG_NAME).unwrap())) } } #[cfg(test)] mod tests { mod part_1 { use crate::{ day_07::{count_parents, parse_bag_line, parse_bags, Solution}, shared::{Day, PartSolution}, }; #[test] fn outcome() { assert_eq!(PartSolution::U32(272), (Solution {}).part_1()); } #[test] fn parse_bag_line_1() { let input = "light red bags contain 1 bright white bag, 2 muted yellow bags."; let result = parse_bag_line(input); assert_eq!( result, ( "light red".to_string(), vec![ (1, "bright white".to_string()), (2, "muted yellow".to_string()) ] ), ); } #[test] fn parse_bag_line_2() { let input = "dark orange bags contain 3 bright white bags, 4 muted yellow bags."; let result = parse_bag_line(input); assert_eq!( result, ( "dark orange".to_string(), vec![ (3, "bright white".to_string()), (4, "muted yellow".to_string()) ] ), ); } #[test] fn parse_bag_line_3() { let input = "bright white bags contain 1 shiny gold bag."; let result = parse_bag_line(input); assert_eq!( result, ( "bright white".to_string(), vec![(1, "shiny gold".to_string())] ), ); } #[test] fn parse_bag_line_4() { let input = "muted yellow bags contain 2 shiny gold bags, 9 faded blue bags."; let result = parse_bag_line(input); assert_eq!( result, ( "muted yellow".to_string(), vec![(2, "shiny gold".to_string()), (9, "faded blue".to_string())] ), ); } #[test] fn parse_bag_line_5() { let input = "shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags."; let result = parse_bag_line(input); assert_eq!( result, ( "shiny gold".to_string(), vec![ (1, "dark olive".to_string()), (2, "vibrant plum".to_string()) ] ), ); } #[test] fn parse_bag_line_6() { let input = "dark olive bags contain 3 faded blue bags, 4 dotted black bags."; let result = parse_bag_line(input); assert_eq!( result, ( "dark olive".to_string(), vec![ (3, "faded blue".to_string()), (4, "dotted black".to_string()) ] ), ); } #[test] fn parse_bag_line_7() { let input = "vibrant plum bags contain 5 faded blue bags, 6 dotted black bags."; let result = parse_bag_line(input); assert_eq!( result, ( "vibrant plum".to_string(), vec![ (5, "faded blue".to_string()), (6, "dotted black".to_string()) ] ), ); } #[test] fn parse_bag_line_8() { let input = "faded blue bags contain no other bags."; let result = parse_bag_line(input); assert_eq!(result, ("faded blue".to_string(), vec![])); } #[test] fn parse_bag_line_9() { let input = "dotted black bags contain no other bags."; let result = parse_bag_line(input); assert_eq!(result, ("dotted black".to_string(), vec![])); } #[test] fn big_test() { let input = [ "light red bags contain 1 bright white bag, 2 muted yellow bags.", "dark orange bags contain 3 bright white bags, 4 muted yellow bags.", "bright white bags contain 1 shiny gold bag.", "muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.", "shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.", "dark olive bags contain 3 faded blue bags, 4 dotted black bags.", "vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.", "faded blue bags contain no other bags.", "dotted black bags contain no other bags.", ]; let lines: Vec<String> = input.map(Into::into).into(); let rst = count_parents(&parse_bags(&lines), "shiny gold"); assert_eq!(rst, 4); } } mod part_2 { use crate::{ day_07::{count_bags_recursive, parse_bag_line, parse_bags, Solution}, shared::{Day, PartSolution}, }; #[test] fn outcome() { assert_eq!(PartSolution::U32(172_246), (Solution {}).part_2()); } #[test] fn parse_bag_line_1() { let input = "light red bags contain 1 bright white bag, 2 muted yellow bags."; let result = parse_bag_line(input); assert_eq!( result, ( "light red".to_string(), vec![ (1, "bright white".to_string()), (2, "muted yellow".to_string()) ] ), ); } #[test] fn parse_bag_line_2() { let input = "dark orange bags contain 3 bright white bags, 4 muted yellow bags."; let result = parse_bag_line(input); assert_eq!( result, ( "dark orange".to_string(), vec![ (3, "bright white".to_string()), (4, "muted yellow".to_string()) ] ), ); } #[test] fn parse_bag_line_3() { let input = "bright white bags contain 1 shiny gold bag."; let result = parse_bag_line(input); assert_eq!( result, ( "bright white".to_string(), vec![(1, "shiny gold".to_string())] ), ); } #[test] fn parse_bag_line_4() { let input = "muted yellow bags contain 2 shiny gold bags, 9 faded blue bags."; let result = parse_bag_line(input); assert_eq!( result, ( "muted yellow".to_string(), vec![(2, "shiny gold".to_string()), (9, "faded blue".to_string())] ), ); } #[test] fn parse_bag_line_5() { let input = "shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags."; let result = parse_bag_line(input); assert_eq!( result, ( "shiny gold".to_string(), vec![ (1, "dark olive".to_string()), (2, "vibrant plum".to_string()) ] ), ); } #[test] fn parse_bag_line_6() { let input = "dark olive bags contain 3 faded blue bags, 4 dotted black bags."; let result = parse_bag_line(input); assert_eq!( result, ( "dark olive".to_string(), vec![ (3, "faded blue".to_string()), (4, "dotted black".to_string()) ] ), ); } #[test] fn parse_bag_line_7() { let input = "vibrant plum bags contain 5 faded blue bags, 6 dotted black bags."; let result = parse_bag_line(input); assert_eq!( result, ( "vibrant plum".to_string(), vec![ (5, "faded blue".to_string()), (6, "dotted black".to_string()) ] ), ); } #[test] fn parse_bag_line_8() { let input = "faded blue bags contain no other bags."; let result = parse_bag_line(input); assert_eq!(result, ("faded blue".to_string(), vec![])); } #[test] fn parse_bag_line_9() { let input = "dotted black bags contain no other bags."; let result = parse_bag_line(input); assert_eq!(result, ("dotted black".to_string(), vec![])); } #[test] fn big_test() { const BAG_NAME: &str = "shiny gold"; let input = [ "shiny gold bags contain 2 dark red bags.", "dark red bags contain 2 dark orange bags.", "dark orange bags contain 2 dark yellow bags.", "dark yellow bags contain 2 dark green bags.", "dark green bags contain 2 dark blue bags.", "dark blue bags contain 2 dark violet bags.", "dark violet bags contain no other bags.", ]; let lines: Vec<String> = input.map(Into::into).into(); let bags = parse_bags(&lines); let rst = count_bags_recursive(bags.get(BAG_NAME).unwrap()); assert_eq!(rst, 126); } } }
28.516252
93
0.46976
488335db04d8b8b0400bee564e447b7fa7db1e4f
1,959
use std::collections::HashMap; use simsearch::SimSearch; use crate::bitwarden::api::CipherData; use super::data::{StatefulUserData, Unlocked}; pub fn search_items(term: &str, simsearch: &SimSearch<String>) -> Option<Vec<String>> { if term.is_empty() { return None; } Some(simsearch.search(term)) } pub fn get_search_index(ud: &StatefulUserData<Unlocked>) -> SimSearch<String> { let mut ss = SimSearch::new(); if let Some(tokenized_rows) = get_tokenized_rows(ud) { for (k, tokens) in tokenized_rows { // SimSearch will still tokenize (split) each of the tokens // that are passed here. Passing them this way just avoids // concatenating them into a string. let tokens: Vec<_> = tokens.iter().map(|s| s.as_str()).collect(); ss.insert_tokens(k.clone(), &tokens); } } ss } fn get_tokenized_rows(ud: &StatefulUserData<Unlocked>) -> Option<HashMap<String, Vec<String>>> { let vd = ud.vault_data(); let org_keys = ud.get_org_keys_for_vault(); let (user_enc_key, user_mac_key) = ud.decrypt_keys()?; let res = vd .iter() .filter_map(|(k, v)| { // Get appropriate keys for this item let (ec, mc) = match &v.organization_id { Some(oid) => match org_keys.get(oid) { Some(keys) => (&keys.0, &keys.1), None => return None, }, _ => (&user_enc_key, &user_mac_key), }; // All items: name let mut tokens = vec![v.name.decrypt_to_string(ec, mc)]; // Login items: url and username if let CipherData::Login(l) = &v.data { tokens.push(l.username.decrypt_to_string(ec, mc)); tokens.push(l.uri.decrypt_to_string(ec, mc)); }; Some((k.clone(), tokens)) }) .collect(); Some(res) }
30.609375
96
0.561511
081fe817eb4e862e1e9b5da5d17bdf8c7f7466d9
19,834
//! Stream utilities for Tokio. //! //! A `Stream` is an asynchronous sequence of values. It can be thought of as an asynchronous version of the standard library's `Iterator` trait. //! //! This module provides helpers to work with them. mod all; use all::AllFuture; mod any; use any::AnyFuture; mod chain; use chain::Chain; mod collect; use collect::Collect; pub use collect::FromStream; mod empty; pub use empty::{empty, Empty}; mod filter; use filter::Filter; mod filter_map; use filter_map::FilterMap; mod fuse; use fuse::Fuse; mod iter; pub use iter::{iter, Iter}; mod map; use map::Map; mod merge; use merge::Merge; mod next; use next::Next; mod once; pub use once::{once, Once}; mod pending; pub use pending::{pending, Pending}; mod try_next; use try_next::TryNext; mod take; use take::Take; mod take_while; use take_while::TakeWhile; pub use futures_core::Stream; /// An extension trait for `Stream`s that provides a variety of convenient /// combinator functions. pub trait StreamExt: Stream { /// Consumes and returns the next value in the stream or `None` if the /// stream is finished. /// /// Equivalent to: /// /// ```ignore /// async fn next(&mut self) -> Option<Self::Item>; /// ``` /// /// Note that because `next` doesn't take ownership over the stream, /// the [`Stream`] type must be [`Unpin`]. If you want to use `next` with a /// [`!Unpin`](Unpin) stream, you'll first have to pin the stream. This can /// be done by boxing the stream using [`Box::pin`] or /// pinning it to the stack using the `pin_mut!` macro from the `pin_utils` /// crate. /// /// # Examples /// /// ``` /// # #[tokio::main] /// # async fn main() { /// use tokio::stream::{self, StreamExt}; /// /// let mut stream = stream::iter(1..=3); /// /// assert_eq!(stream.next().await, Some(1)); /// assert_eq!(stream.next().await, Some(2)); /// assert_eq!(stream.next().await, Some(3)); /// assert_eq!(stream.next().await, None); /// # } /// ``` fn next(&mut self) -> Next<'_, Self> where Self: Unpin, { Next::new(self) } /// Consumes and returns the next item in the stream. If an error is /// encountered before the next item, the error is returned instead. /// /// Equivalent to: /// /// ```ignore /// async fn try_next(&mut self) -> Result<Option<T>, E>; /// ``` /// /// This is similar to the [`next`](StreamExt::next) combinator, /// but returns a [`Result<Option<T>, E>`](Result) rather than /// an [`Option<Result<T, E>>`](Option), making for easy use /// with the [`?`](std::ops::Try) operator. /// /// # Examples /// /// ``` /// # #[tokio::main] /// # async fn main() { /// use tokio::stream::{self, StreamExt}; /// /// let mut stream = stream::iter(vec![Ok(1), Ok(2), Err("nope")]); /// /// assert_eq!(stream.try_next().await, Ok(Some(1))); /// assert_eq!(stream.try_next().await, Ok(Some(2))); /// assert_eq!(stream.try_next().await, Err("nope")); /// # } /// ``` fn try_next<T, E>(&mut self) -> TryNext<'_, Self> where Self: Stream<Item = Result<T, E>> + Unpin, { TryNext::new(self) } /// Maps this stream's items to a different type, returning a new stream of /// the resulting type. /// /// The provided closure is executed over all elements of this stream as /// they are made available. It is executed inline with calls to /// [`poll_next`](Stream::poll_next). /// /// Note that this function consumes the stream passed into it and returns a /// wrapped version of it, similar to the existing `map` methods in the /// standard library. /// /// # Examples /// /// ``` /// # #[tokio::main] /// # async fn main() { /// use tokio::stream::{self, StreamExt}; /// /// let stream = stream::iter(1..=3); /// let mut stream = stream.map(|x| x + 3); /// /// assert_eq!(stream.next().await, Some(4)); /// assert_eq!(stream.next().await, Some(5)); /// assert_eq!(stream.next().await, Some(6)); /// # } /// ``` fn map<T, F>(self, f: F) -> Map<Self, F> where F: FnMut(Self::Item) -> T, Self: Sized, { Map::new(self, f) } /// Combine two streams into one by interleaving the output of both as it /// is produced. /// /// Values are produced from the merged stream in the order they arrive from /// the two source streams. If both source streams provide values /// simultaneously, the merge stream alternates between them. This provides /// some level of fairness. /// /// The merged stream completes once **both** source streams complete. When /// one source stream completes before the other, the merge stream /// exclusively polls the remaining stream. /// /// # Examples /// /// ``` /// use tokio::stream::StreamExt; /// use tokio::sync::mpsc; /// use tokio::time; /// /// use std::time::Duration; /// /// # /* /// #[tokio::main] /// # */ /// # #[tokio::main(basic_scheduler)] /// async fn main() { /// # time::pause(); /// let (mut tx1, rx1) = mpsc::channel(10); /// let (mut tx2, rx2) = mpsc::channel(10); /// /// let mut rx = rx1.merge(rx2); /// /// tokio::spawn(async move { /// // Send some values immediately /// tx1.send(1).await.unwrap(); /// tx1.send(2).await.unwrap(); /// /// // Let the other task send values /// time::delay_for(Duration::from_millis(20)).await; /// /// tx1.send(4).await.unwrap(); /// }); /// /// tokio::spawn(async move { /// // Wait for the first task to send values /// time::delay_for(Duration::from_millis(5)).await; /// /// tx2.send(3).await.unwrap(); /// /// time::delay_for(Duration::from_millis(25)).await; /// /// // Send the final value /// tx2.send(5).await.unwrap(); /// }); /// /// assert_eq!(1, rx.next().await.unwrap()); /// assert_eq!(2, rx.next().await.unwrap()); /// assert_eq!(3, rx.next().await.unwrap()); /// assert_eq!(4, rx.next().await.unwrap()); /// assert_eq!(5, rx.next().await.unwrap()); /// /// // The merged stream is consumed /// assert!(rx.next().await.is_none()); /// } /// ``` fn merge<U>(self, other: U) -> Merge<Self, U> where U: Stream<Item = Self::Item>, Self: Sized, { Merge::new(self, other) } /// Filters the values produced by this stream according to the provided /// predicate. /// /// As values of this stream are made available, the provided predicate `f` /// will be run against them. If the predicate /// resolves to `true`, then the stream will yield the value, but if the /// predicate resolves to `false`, then the value /// will be discarded and the next value will be produced. /// /// Note that this function consumes the stream passed into it and returns a /// wrapped version of it, similar to [`Iterator::filter`] method in the /// standard library. /// /// # Examples /// /// ``` /// # #[tokio::main] /// # async fn main() { /// use tokio::stream::{self, StreamExt}; /// /// let stream = stream::iter(1..=8); /// let mut evens = stream.filter(|x| x % 2 == 0); /// /// assert_eq!(Some(2), evens.next().await); /// assert_eq!(Some(4), evens.next().await); /// assert_eq!(Some(6), evens.next().await); /// assert_eq!(Some(8), evens.next().await); /// assert_eq!(None, evens.next().await); /// # } /// ``` fn filter<F>(self, f: F) -> Filter<Self, F> where F: FnMut(&Self::Item) -> bool, Self: Sized, { Filter::new(self, f) } /// Filters the values produced by this stream while simultaneously mapping /// them to a different type according to the provided closure. /// /// As values of this stream are made available, the provided function will /// be run on them. If the predicate `f` resolves to /// [`Some(item)`](Some) then the stream will yield the value `item`, but if /// it resolves to [`None`] then the next value will be produced. /// /// Note that this function consumes the stream passed into it and returns a /// wrapped version of it, similar to [`Iterator::filter_map`] method in the /// standard library. /// /// # Examples /// ``` /// # #[tokio::main] /// # async fn main() { /// use tokio::stream::{self, StreamExt}; /// /// let stream = stream::iter(1..=8); /// let mut evens = stream.filter_map(|x| { /// if x % 2 == 0 { Some(x + 1) } else { None } /// }); /// /// assert_eq!(Some(3), evens.next().await); /// assert_eq!(Some(5), evens.next().await); /// assert_eq!(Some(7), evens.next().await); /// assert_eq!(Some(9), evens.next().await); /// assert_eq!(None, evens.next().await); /// # } /// ``` fn filter_map<T, F>(self, f: F) -> FilterMap<Self, F> where F: FnMut(Self::Item) -> Option<T>, Self: Sized, { FilterMap::new(self, f) } /// Creates a stream which ends after the first `None`. /// /// After a stream returns `None`, behavior is undefined. Future calls to /// `poll_next` may or may not return `Some(T)` again or they may panic. /// `fuse()` adapts a stream, ensuring that after `None` is given, it will /// return `None` forever. /// /// # Examples /// /// ``` /// use tokio::stream::{Stream, StreamExt}; /// /// use std::pin::Pin; /// use std::task::{Context, Poll}; /// /// // a stream which alternates between Some and None /// struct Alternate { /// state: i32, /// } /// /// impl Stream for Alternate { /// type Item = i32; /// /// fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Option<i32>> { /// let val = self.state; /// self.state = self.state + 1; /// /// // if it's even, Some(i32), else None /// if val % 2 == 0 { /// Poll::Ready(Some(val)) /// } else { /// Poll::Ready(None) /// } /// } /// } /// /// #[tokio::main] /// async fn main() { /// let mut stream = Alternate { state: 0 }; /// /// // the stream goes back and forth /// assert_eq!(stream.next().await, Some(0)); /// assert_eq!(stream.next().await, None); /// assert_eq!(stream.next().await, Some(2)); /// assert_eq!(stream.next().await, None); /// /// // however, once it is fused /// let mut stream = stream.fuse(); /// /// assert_eq!(stream.next().await, Some(4)); /// assert_eq!(stream.next().await, None); /// /// // it will always return `None` after the first time. /// assert_eq!(stream.next().await, None); /// assert_eq!(stream.next().await, None); /// assert_eq!(stream.next().await, None); /// } /// ``` fn fuse(self) -> Fuse<Self> where Self: Sized, { Fuse::new(self) } /// Creates a new stream of at most `n` items of the underlying stream. /// /// Once `n` items have been yielded from this stream then it will always /// return that the stream is done. /// /// # Examples /// /// ``` /// # #[tokio::main] /// # async fn main() { /// use tokio::stream::{self, StreamExt}; /// /// let mut stream = stream::iter(1..=10).take(3); /// /// assert_eq!(Some(1), stream.next().await); /// assert_eq!(Some(2), stream.next().await); /// assert_eq!(Some(3), stream.next().await); /// assert_eq!(None, stream.next().await); /// # } /// ``` fn take(self, n: usize) -> Take<Self> where Self: Sized, { Take::new(self, n) } /// Take elements from this stream while the provided predicate /// resolves to `true`. /// /// This function, like `Iterator::take_while`, will take elements from the /// stream until the predicate `f` resolves to `false`. Once one element /// returns false it will always return that the stream is done. /// /// # Examples /// /// ``` /// # #[tokio::main] /// # async fn main() { /// use tokio::stream::{self, StreamExt}; /// /// let mut stream = stream::iter(1..=10).take_while(|x| *x <= 3); /// /// assert_eq!(Some(1), stream.next().await); /// assert_eq!(Some(2), stream.next().await); /// assert_eq!(Some(3), stream.next().await); /// assert_eq!(None, stream.next().await); /// # } /// ``` fn take_while<F>(self, f: F) -> TakeWhile<Self, F> where F: FnMut(&Self::Item) -> bool, Self: Sized, { TakeWhile::new(self, f) } /// Tests if every element of the stream matches a predicate. /// /// `all()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the stream, and if they all return /// `true`, then so does `all`. If any of them return `false`, it /// returns `false`. An empty stream returns `true`. /// /// `all()` is short-circuiting; in other words, it will stop processing /// as soon as it finds a `false`, given that no matter what else happens, /// the result will also be `false`. /// /// An empty stream returns `true`. /// /// # Examples /// /// Basic usage: /// /// ``` /// # #[tokio::main] /// # async fn main() { /// use tokio::stream::{self, StreamExt}; /// /// let a = [1, 2, 3]; /// /// assert!(stream::iter(&a).all(|&x| x > 0).await); /// /// assert!(!stream::iter(&a).all(|&x| x > 2).await); /// # } /// ``` /// /// Stopping at the first `false`: /// /// ``` /// # #[tokio::main] /// # async fn main() { /// use tokio::stream::{self, StreamExt}; /// /// let a = [1, 2, 3]; /// /// let mut iter = stream::iter(&a); /// /// assert!(!iter.all(|&x| x != 2).await); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next().await, Some(&3)); /// # } /// ``` fn all<F>(&mut self, f: F) -> AllFuture<'_, Self, F> where Self: Unpin, F: FnMut(Self::Item) -> bool, { AllFuture::new(self, f) } /// Tests if any element of the stream matches a predicate. /// /// `any()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the stream, and if any of them return /// `true`, then so does `any()`. If they all return `false`, it /// returns `false`. /// /// `any()` is short-circuiting; in other words, it will stop processing /// as soon as it finds a `true`, given that no matter what else happens, /// the result will also be `true`. /// /// An empty stream returns `false`. /// /// Basic usage: /// /// ``` /// # #[tokio::main] /// # async fn main() { /// use tokio::stream::{self, StreamExt}; /// /// let a = [1, 2, 3]; /// /// assert!(stream::iter(&a).any(|&x| x > 0).await); /// /// assert!(!stream::iter(&a).any(|&x| x > 5).await); /// # } /// ``` /// /// Stopping at the first `true`: /// /// ``` /// # #[tokio::main] /// # async fn main() { /// use tokio::stream::{self, StreamExt}; /// /// let a = [1, 2, 3]; /// /// let mut iter = stream::iter(&a); /// /// assert!(iter.any(|&x| x != 2).await); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next().await, Some(&2)); /// # } /// ``` fn any<F>(&mut self, f: F) -> AnyFuture<'_, Self, F> where Self: Unpin, F: FnMut(Self::Item) -> bool, { AnyFuture::new(self, f) } /// Combine two streams into one by first returning all values from the /// first stream then all values from the second stream. /// /// As long as `self` still has values to emit, no values from `other` are /// emitted, even if some are ready. /// /// # Examples /// /// ``` /// use tokio::stream::{self, StreamExt}; /// /// #[tokio::main] /// async fn main() { /// let one = stream::iter(vec![1, 2, 3]); /// let two = stream::iter(vec![4, 5, 6]); /// /// let mut stream = one.chain(two); /// /// assert_eq!(stream.next().await, Some(1)); /// assert_eq!(stream.next().await, Some(2)); /// assert_eq!(stream.next().await, Some(3)); /// assert_eq!(stream.next().await, Some(4)); /// assert_eq!(stream.next().await, Some(5)); /// assert_eq!(stream.next().await, Some(6)); /// assert_eq!(stream.next().await, None); /// } /// ``` fn chain<U>(self, other: U) -> Chain<Self, U> where U: Stream<Item = Self::Item>, Self: Sized, { Chain::new(self, other) } /// Drain stream pushing all emitted values into a collection. /// /// `collect` streams all values, awaiting as needed. Values are pushed into /// a collection. A number of different target collection types are /// supported, including [`Vec`](std::vec::Vec), /// [`String`](std::string::String), and [`Bytes`](bytes::Bytes). /// /// # `Result` /// /// `collect()` can also be used with streams of type `Result<T, E>` where /// `T: FromStream<_>`. In this case, `collect()` will stream as long as /// values yielded from the stream are `Ok(_)`. If `Err(_)` is encountered, /// streaming is terminated and `collect()` returns the `Err`. /// /// # Notes /// /// `FromStream` is currently a sealed trait. Stabilization is pending /// enhancements to the Rust langague. /// /// # Examples /// /// Basic usage: /// /// ``` /// use tokio::stream::{self, StreamExt}; /// /// #[tokio::main] /// async fn main() { /// let doubled: Vec<i32> = /// stream::iter(vec![1, 2, 3]) /// .map(|x| x * 2) /// .collect() /// .await; /// /// assert_eq!(vec![2, 4, 6], doubled); /// } /// ``` /// /// Collecting a stream of `Result` values /// /// ``` /// use tokio::stream::{self, StreamExt}; /// /// #[tokio::main] /// async fn main() { /// // A stream containing only `Ok` values will be collected /// let values: Result<Vec<i32>, &str> = /// stream::iter(vec![Ok(1), Ok(2), Ok(3)]) /// .collect() /// .await; /// /// assert_eq!(Ok(vec![1, 2, 3]), values); /// /// // A stream containing `Err` values will return the first error. /// let results = vec![Ok(1), Err("no"), Ok(2), Ok(3), Err("nein")]; /// /// let values: Result<Vec<i32>, &str> = /// stream::iter(results) /// .collect() /// .await; /// /// assert_eq!(Err("no"), values); /// } /// ``` fn collect<T>(self) -> Collect<Self, T> where T: FromStream<Self::Item>, Self: Sized, { Collect::new(self) } } impl<St: ?Sized> StreamExt for St where St: Stream {}
30.097117
145
0.519714
5d0a4b80d8aecf764e82320d8b52c1ff95582b60
1,493
use std::str::FromStr; use anyhow::Result; #[allow(clippy::unusual_byte_groupings)] const WINS: &[u32] = &[ 0b_11111_00000_00000_00000_00000, 0b_00000_11111_00000_00000_00000, 0b_00000_00000_11111_00000_00000, 0b_00000_00000_00000_11111_00000, 0b_00000_00000_00000_00000_11111, 0b_10000_10000_10000_10000_10000, 0b_01000_01000_01000_01000_01000, 0b_00100_00100_00100_00100_00100, 0b_00010_00010_00010_00010_00010, 0b_00001_00001_00001_00001_00001, ]; #[derive(Debug, Clone)] pub struct Bingo { pub fields: Vec<u8>, /// Bitmap pub hits: u32, } impl Bingo { pub fn hit(&mut self, val: u8) { for (i, field) in self.fields.iter().copied().enumerate() { if val == field { self.hits |= 1 << i; } } } pub fn check(&self) -> bool { WINS.iter().copied().any(|win| (self.hits & win) == win) } pub fn score(&self) -> usize { let mut score = 0; for (i, field) in self.fields.iter().copied().enumerate() { if self.hits & (1 << i) == 0 { score += field as usize; } } score } } impl FromStr for Bingo { type Err = anyhow::Error; fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(Self { fields: s .split_whitespace() .map(|n| Ok(n.parse()?)) .collect::<Result<_>>()?, hits: 0, }) } }
22.969231
67
0.556597
8705bb46b20e465cd18f60ec0738d177bde966ec
18,133
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0. //! This is the core implementation of a batch system. Generally there will be two //! different kind of FSMs in TiKV's FSM system. One is normal FSM, which usually //! represents a peer, the other is control FSM, which usually represents something //! that controls how the former is created or metrics are collected. use crate::config::Config; use crate::fsm::{Fsm, FsmScheduler, Priority}; use crate::mailbox::BasicMailbox; use crate::router::Router; use crossbeam::channel::{self, SendError}; use file_system::{set_io_type, IOType}; use std::borrow::Cow; use std::sync::atomic::AtomicUsize; use std::sync::Arc; use std::thread::{self, JoinHandle}; use std::time::Duration; use tikv_util::mpsc; use tikv_util::time::Instant; use tikv_util::{debug, error, info, safe_panic, thd_name, warn}; /// A unify type for FSMs so that they can be sent to channel easily. enum FsmTypes<N, C> { Normal(Box<N>), Control(Box<C>), // Used as a signal that scheduler should be shutdown. Empty, } // A macro to introduce common definition of scheduler. macro_rules! impl_sched { ($name:ident, $ty:path, Fsm = $fsm:tt) => { pub struct $name<N, C> { sender: channel::Sender<FsmTypes<N, C>>, low_sender: channel::Sender<FsmTypes<N, C>>, } impl<N, C> Clone for $name<N, C> { #[inline] fn clone(&self) -> $name<N, C> { $name { sender: self.sender.clone(), low_sender: self.low_sender.clone(), } } } impl<N, C> FsmScheduler for $name<N, C> where $fsm: Fsm, { type Fsm = $fsm; #[inline] fn schedule(&self, fsm: Box<Self::Fsm>) { let sender = match fsm.get_priority() { Priority::Normal => &self.sender, Priority::Low => &self.low_sender, }; match sender.send($ty(fsm)) { Ok(()) => {} // TODO: use debug instead. Err(SendError($ty(fsm))) => warn!("failed to schedule fsm {:p}", fsm), _ => unreachable!(), } } fn shutdown(&self) { // TODO: close it explicitly once it's supported. // Magic number, actually any number greater than poll pool size works. for _ in 0..100 { let _ = self.sender.send(FsmTypes::Empty); let _ = self.low_sender.send(FsmTypes::Empty); } } } }; } impl_sched!(NormalScheduler, FsmTypes::Normal, Fsm = N); impl_sched!(ControlScheduler, FsmTypes::Control, Fsm = C); /// A basic struct for a round of polling. #[allow(clippy::vec_box)] pub struct Batch<N, C> { normals: Vec<Box<N>>, timers: Vec<Instant>, control: Option<Box<C>>, } impl<N: Fsm, C: Fsm> Batch<N, C> { /// Create a a batch with given batch size. pub fn with_capacity(cap: usize) -> Batch<N, C> { Batch { normals: Vec::with_capacity(cap), timers: Vec::with_capacity(cap), control: None, } } fn push(&mut self, fsm: FsmTypes<N, C>) -> bool { match fsm { FsmTypes::Normal(n) => { self.normals.push(n); self.timers.push(Instant::now_coarse()); } FsmTypes::Control(c) => { assert!(self.control.is_none()); self.control = Some(c); } FsmTypes::Empty => return false, } true } fn is_empty(&self) -> bool { self.normals.is_empty() && self.control.is_none() } fn clear(&mut self) { self.normals.clear(); self.timers.clear(); self.control.take(); } /// Put back the FSM located at index. /// /// Only when channel length is larger than `checked_len` will trigger /// further notification. This function may fail if channel length is /// larger than the given value before FSM is released. pub fn release(&mut self, index: usize, checked_len: usize) { let mut fsm = self.normals.swap_remove(index); let mailbox = fsm.take_mailbox().unwrap(); mailbox.release(fsm); if mailbox.len() == checked_len { self.timers.swap_remove(index); } else { match mailbox.take_fsm() { None => (), Some(mut s) => { s.set_mailbox(Cow::Owned(mailbox)); let last_index = self.normals.len(); self.normals.push(s); self.normals.swap(index, last_index); } } } } /// Remove the normal FSM located at `index`. /// /// This method should only be called when the FSM is stopped. /// If there are still messages in channel, the FSM is untouched and /// the function will return false to let caller to keep polling. pub fn remove(&mut self, index: usize) { let mut fsm = self.normals.swap_remove(index); let mailbox = fsm.take_mailbox().unwrap(); if mailbox.is_empty() { mailbox.release(fsm); self.timers.swap_remove(index); } else { fsm.set_mailbox(Cow::Owned(mailbox)); let last_index = self.normals.len(); self.normals.push(fsm); self.normals.swap(index, last_index); } } /// Schedule the normal FSM located at `index`. pub fn reschedule(&mut self, router: &BatchRouter<N, C>, index: usize) { let fsm = self.normals.swap_remove(index); self.timers.swap_remove(index); router.normal_scheduler.schedule(fsm); } /// Same as `release`, but working on control FSM. pub fn release_control(&mut self, control_box: &BasicMailbox<C>, checked_len: usize) -> bool { let s = self.control.take().unwrap(); control_box.release(s); if control_box.len() == checked_len { true } else { match control_box.take_fsm() { None => true, Some(s) => { self.control = Some(s); false } } } } /// Same as `remove`, but working on control FSM. pub fn remove_control(&mut self, control_box: &BasicMailbox<C>) { if control_box.is_empty() { let s = self.control.take().unwrap(); control_box.release(s); } } } /// A handler that poll all FSM in ready. /// /// A General process works like following: /// ```text /// loop { /// begin /// if control is ready: /// handle_control /// foreach ready normal: /// handle_normal /// end /// } /// ``` /// /// Note that, every poll thread has its own handler, which doesn't have to be /// Sync. pub trait PollHandler<N, C> { /// This function is called at the very beginning of every round. fn begin(&mut self, batch_size: usize); /// This function is called when handling readiness for control FSM. /// /// If returned value is Some, then it represents a length of channel. This /// function will only be called for the same fsm after channel's lengh is /// larger than the value. If it returns None, then this function will /// still be called for the same FSM in the next loop unless the FSM is /// stopped. fn handle_control(&mut self, control: &mut C) -> Option<usize>; /// This function is called when handling readiness for normal FSM. /// /// The returned value is handled in the same way as `handle_control`. fn handle_normal(&mut self, normal: &mut N) -> Option<usize>; /// This function is called at the end of every round. fn end(&mut self, batch: &mut [Box<N>]); /// This function is called when batch system is going to sleep. fn pause(&mut self) {} /// This function returns the priority of this handler. fn get_priority(&self) -> Priority { Priority::Normal } } /// Internal poller that fetches batch and call handler hooks for readiness. struct Poller<N: Fsm, C: Fsm, Handler> { router: Router<N, C, NormalScheduler<N, C>, ControlScheduler<N, C>>, fsm_receiver: channel::Receiver<FsmTypes<N, C>>, handler: Handler, max_batch_size: usize, reschedule_duration: Duration, } enum ReschedulePolicy { Release(usize), Remove, Schedule, } impl<N: Fsm, C: Fsm, Handler: PollHandler<N, C>> Poller<N, C, Handler> { fn fetch_fsm(&mut self, batch: &mut Batch<N, C>) -> bool { if batch.control.is_some() { return true; } if let Ok(fsm) = self.fsm_receiver.try_recv() { return batch.push(fsm); } if batch.is_empty() { self.handler.pause(); if let Ok(fsm) = self.fsm_receiver.recv() { return batch.push(fsm); } } !batch.is_empty() } // Poll for readiness and forward to handler. Remove stale peer if necessary. fn poll(&mut self) { let mut batch = Batch::with_capacity(self.max_batch_size); let mut reschedule_fsms = Vec::with_capacity(self.max_batch_size); // Fetch batch after every round is finished. It's helpful to protect regions // from becoming hungry if some regions are hot points. Since we fetch new fsm every time // calling `poll`, we do not need to configure a large value for `self.max_batch_size`. let mut run = true; while run && self.fetch_fsm(&mut batch) { // If there is some region wait to be deal, we must deal with it even if it has overhead // max size of batch. It's helpful to protect regions from becoming hungry // if some regions are hot points. let max_batch_size = std::cmp::max(self.max_batch_size, batch.normals.len()); self.handler.begin(max_batch_size); if batch.control.is_some() { let len = self.handler.handle_control(batch.control.as_mut().unwrap()); if batch.control.as_ref().unwrap().is_stopped() { batch.remove_control(&self.router.control_box); } else if let Some(len) = len { batch.release_control(&self.router.control_box, len); } } let mut hot_fsm_count = 0; for (i, p) in batch.normals.iter_mut().enumerate() { let len = self.handler.handle_normal(p); if p.is_stopped() { reschedule_fsms.push((i, ReschedulePolicy::Remove)); } else if p.get_priority() != self.handler.get_priority() { reschedule_fsms.push((i, ReschedulePolicy::Schedule)); } else { if batch.timers[i].saturating_elapsed() >= self.reschedule_duration { hot_fsm_count += 1; // We should only reschedule a half of the hot regions, otherwise, // it's possible all the hot regions are fetched in a batch the // next time. if hot_fsm_count % 2 == 0 { reschedule_fsms.push((i, ReschedulePolicy::Schedule)); continue; } } if let Some(l) = len { reschedule_fsms.push((i, ReschedulePolicy::Release(l))); } } } let mut fsm_cnt = batch.normals.len(); while batch.normals.len() < max_batch_size { if let Ok(fsm) = self.fsm_receiver.try_recv() { run = batch.push(fsm); } // If we receive a ControlFsm, break this cycle and call `end`. Because ControlFsm // may change state of the handler, we shall deal with it immediately after // calling `begin` of `Handler`. if !run || fsm_cnt >= batch.normals.len() { break; } let len = self.handler.handle_normal(&mut batch.normals[fsm_cnt]); if batch.normals[fsm_cnt].is_stopped() { reschedule_fsms.push((fsm_cnt, ReschedulePolicy::Remove)); } else if let Some(l) = len { reschedule_fsms.push((fsm_cnt, ReschedulePolicy::Release(l))); } fsm_cnt += 1; } self.handler.end(&mut batch.normals); // Because release use `swap_remove` internally, so using pop here // to remove the correct FSM. while let Some((r, mark)) = reschedule_fsms.pop() { match mark { ReschedulePolicy::Release(l) => batch.release(r, l), ReschedulePolicy::Remove => batch.remove(r), ReschedulePolicy::Schedule => batch.reschedule(&self.router, r), } } } batch.clear(); } } /// A builder trait that can build up poll handlers. pub trait HandlerBuilder<N, C> { type Handler: PollHandler<N, C>; fn build(&mut self, priority: Priority) -> Self::Handler; } /// A system that can poll FSMs concurrently and in batch. /// /// To use the system, two type of FSMs and their PollHandlers need /// to be defined: Normal and Control. Normal FSM handles the general /// task while Control FSM creates normal FSM instances. pub struct BatchSystem<N: Fsm, C: Fsm> { name_prefix: Option<String>, router: BatchRouter<N, C>, receiver: channel::Receiver<FsmTypes<N, C>>, low_receiver: channel::Receiver<FsmTypes<N, C>>, pool_size: usize, max_batch_size: usize, workers: Vec<JoinHandle<()>>, reschedule_duration: Duration, low_priority_pool_size: usize, } impl<N, C> BatchSystem<N, C> where N: Fsm + Send + 'static, C: Fsm + Send + 'static, { pub fn router(&self) -> &BatchRouter<N, C> { &self.router } fn start_poller<B>(&mut self, name: String, priority: Priority, builder: &mut B) where B: HandlerBuilder<N, C>, B::Handler: Send + 'static, { let handler = builder.build(priority); let receiver = match priority { Priority::Normal => self.receiver.clone(), Priority::Low => self.low_receiver.clone(), }; let mut poller = Poller { router: self.router.clone(), fsm_receiver: receiver, handler, max_batch_size: self.max_batch_size, reschedule_duration: self.reschedule_duration, }; let props = tikv_util::thread_group::current_properties(); let t = thread::Builder::new() .name(name) .spawn(move || { tikv_util::thread_group::set_properties(props); set_io_type(IOType::ForegroundWrite); poller.poll(); }) .unwrap(); self.workers.push(t); } /// Start the batch system. pub fn spawn<B>(&mut self, name_prefix: String, mut builder: B) where B: HandlerBuilder<N, C>, B::Handler: Send + 'static, { for i in 0..self.pool_size { self.start_poller( thd_name!(format!("{}-{}", name_prefix, i)), Priority::Normal, &mut builder, ); } for i in 0..self.low_priority_pool_size { self.start_poller( thd_name!(format!("{}-low-{}", name_prefix, i)), Priority::Low, &mut builder, ); } self.name_prefix = Some(name_prefix); } /// Shutdown the batch system and wait till all background threads exit. pub fn shutdown(&mut self) { if self.name_prefix.is_none() { return; } let name_prefix = self.name_prefix.take().unwrap(); info!("shutdown batch system {}", name_prefix); self.router.broadcast_shutdown(); let mut last_error = None; for h in self.workers.drain(..) { debug!("waiting for {}", h.thread().name().unwrap()); if let Err(e) = h.join() { error!("failed to join worker thread: {:?}", e); last_error = Some(e); } } if let Some(e) = last_error { safe_panic!("failed to join worker thread: {:?}", e); } info!("batch system {} is stopped.", name_prefix); } } pub type BatchRouter<N, C> = Router<N, C, NormalScheduler<N, C>, ControlScheduler<N, C>>; /// Create a batch system with the given thread name prefix and pool size. /// /// `sender` and `controller` should be paired. pub fn create_system<N: Fsm, C: Fsm>( cfg: &Config, sender: mpsc::LooseBoundedSender<C::Message>, controller: Box<C>, ) -> (BatchRouter<N, C>, BatchSystem<N, C>) { let state_cnt = Arc::new(AtomicUsize::new(0)); let control_box = BasicMailbox::new(sender, controller, state_cnt.clone()); let (tx, rx) = channel::unbounded(); let (tx2, rx2) = channel::unbounded(); let normal_scheduler = NormalScheduler { sender: tx.clone(), low_sender: tx2.clone(), }; let control_scheduler = ControlScheduler { sender: tx, low_sender: tx2, }; let router = Router::new(control_box, normal_scheduler, control_scheduler, state_cnt); let system = BatchSystem { name_prefix: None, router: router.clone(), receiver: rx, low_receiver: rx2, pool_size: cfg.pool_size, max_batch_size: cfg.max_batch_size(), reschedule_duration: cfg.reschedule_duration.0, workers: vec![], low_priority_pool_size: cfg.low_priority_pool_size, }; (router, system) }
35.554902
100
0.558098
6a49a166bfbbe56984712d47adcc76ae8c12aade
1,650
use std::borrow::Cow; use serde::{Deserialize, Serialize}; /// get string representation length without casting const fn get_strlen(n: u32) -> usize { let mut n = n; let mut l = 1; while n >= 10 { n /= 10; l += 1; } l } #[derive(Clone, Debug, Default, PartialEq)] pub struct EncodedText<const N: u32>(pub String); impl<const N: u32> AsRef<String> for EncodedText<N> { fn as_ref(&self) -> &String { &self.0 } } impl<const N: u32> AsMut<String> for EncodedText<N> { fn as_mut(&mut self) -> &mut String { &mut self.0 } } impl<'de, const N: u32> Deserialize<'de> for EncodedText<N> { fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> { let a = <Cow<'_, str> as Deserialize>::deserialize(deserializer)?; let key_len = get_strlen(N); let (field1, field2) = match a.find('\u{1}') { Some(i) => (&a[0..i], if a.len() > i + (key_len + 2) { &a[(i + key_len + 2)..] } else { "" }), None => (&a[0..], ""), }; let len: usize = field1.parse().map_err(serde::de::Error::custom)?; if field2.len() == len { Ok(EncodedText(field2.to_owned())) } else { Err(serde::de::Error::custom(format!("Field {} length not matching, got {} but expected {}", N, field2.len(), len))) } } } impl<const N: u32> Serialize for EncodedText<N> { fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { let temp = format!("{}\u{1}{}={}", self.0.len(), N, self.0); temp.serialize(serializer) } }
28.947368
128
0.546667
eb21fab917e8bba7f7ef5b0cd3d7a6a9ef9953d8
296
#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::path::PathBuf; use crate::Fd; #[cfg_attr(feature = "serde", serde(crate = "renamed_serde"))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct OpenFile { pub path: PathBuf, pub fd: Option<Fd>, }
21.142857
62
0.685811
ff6ad341c8efbbe62c159ebc405375e5ec3a8fb1
9,260
// Copyright (c) 2021-present, Cruise LLC // // This source code is licensed under the Apache License, Version 2.0, // found in the LICENSE-APACHE file in the root directory of this source tree. // You may not use this file except in compliance with the License. use wrflib::*; #[derive(Clone, Default)] #[repr(C)] struct ColorPickerIns { base: QuadIns, hue: f32, sat: f32, val: f32, hover: f32, down: f32, } static SHADER: Shader = Shader { build_geom: Some(QuadIns::build_geom), code_to_concatenate: &[ Cx::STD_SHADER, QuadIns::SHADER, code_fragment!( r#" instance hue: float; instance sat: float; instance val: float; instance hover: float; instance down: float; fn circ_to_rect(u: float, v: float) -> vec2 { let u2 = u * u; let v2 = v * v; return vec2( 0.5 * sqrt(2. + 2. * sqrt(2.) * u + u2 - v2) - 0.5 * sqrt(2. - 2. * sqrt(2.) * u + u2 - v2), 0.5 * sqrt(2. + 2. * sqrt(2.) * v - u2 + v2) - 0.5 * sqrt(2. - 2. * sqrt(2.) * v - u2 + v2) ); } fn pixel() -> vec4 { let rgbv = hsv2rgb(vec4(hue, sat, val, 1.)); let w = rect_size.x; let h = rect_size.y; let df = Df::viewport(pos * vec2(w, h)); let c = vec2(w, h) * 0.5; let radius = w * 0.37; let inner = w * 0.28; df.hexagon(c, w * 0.45); df.hexagon(c, w * 0.4); df.subtract(); let ang = atan(pos.x * w - c.x, 0.0001 + pos.y * h - c.y) / PI * 0.5 - 0.33333; df.fill(hsv2rgb(vec4(ang, 1.0, 1.0, 1.0))); df.new_path(); let rsize = inner / sqrt(2.0); df.rect(c - rsize, vec2(rsize * 2.0)); let norm_rect = (vec2(pos.x * w, pos.y * h) - (c - inner)) / (2. * inner); let circ = clamp(circ_to_rect(norm_rect.x * 2. - 1., norm_rect.y * 2. - 1.), vec2(-1.), vec2(1.)); df.fill(hsv2rgb(vec4(hue, (circ.x * .5 + .5), 1. - (circ.y * .5 + .5), 1.))); df.new_path(); let col_angle = (hue + .333333) * 2. * PI; let circle_puk = vec2(sin(col_angle) * radius, cos(col_angle) * radius) + c; let rect_puk = c + vec2(sat * 2. * rsize - rsize, (1. - val) * 2. * rsize - rsize); let color = mix(mix(#3, #E, hover), #F, down); let puck_size = 0.1 * w; df.circle(rect_puk, puck_size); df.rect(c - rsize, vec2(rsize * 2.0)); df.intersect(); df.fill(color); df.new_path(); df.circle(rect_puk, puck_size - 1. - 2. * hover + down); df.rect(c - rsize, vec2(rsize * 2.0)); df.intersect(); df.fill(rgbv); df.new_path(); df.circle(circle_puk, puck_size); df.fill(color); df.new_path(); df.circle(circle_puk, puck_size - 1. - 2. * hover + down); df.fill(rgbv); df.new_path(); return df.result; }"# ), ], ..Shader::DEFAULT }; pub enum ColorPickerEvent { Change { hsva: Vec4 }, DoneChanging, None, } #[derive(Default)] pub struct ColorPicker { component_id: ComponentId, size: f32, area: Area, animator: Animator, drag_mode: ColorPickerDragMode, } #[derive(Clone, Debug, PartialEq)] enum ColorPickerDragMode { Wheel, Rect, None, } impl Default for ColorPickerDragMode { fn default() -> Self { ColorPickerDragMode::None } } const ANIM_DEFAULT: Anim = Anim { duration: 0.2, tracks: &[ // ColorPickerIns::hover Track::Float { key_frames: &[(1.0, 0.0)], ease: Ease::DEFAULT }, // ColorPickerIns::down Track::Float { key_frames: &[(1.0, 0.0)], ease: Ease::DEFAULT }, ], ..Anim::DEFAULT }; const ANIM_HOVER: Anim = Anim { duration: 0.2, tracks: &[ // ColorPickerIns::hover Track::Float { key_frames: &[(0.0, 1.0)], ease: Ease::DEFAULT }, // ColorPickerIns::down Track::Float { key_frames: &[(1.0, 0.0)], ease: Ease::DEFAULT }, ], ..Anim::DEFAULT }; const ANIM_DOWN: Anim = Anim { duration: 0.2, tracks: &[ // ColorPickerIns::hover Track::Float { key_frames: &[(1.0, 1.0)], ease: Ease::DEFAULT }, // ColorPickerIns::down Track::Float { key_frames: &[(0.0, 0.0), (1.0, 1.0)], ease: Ease::DEFAULT }, ], ..Anim::DEFAULT }; impl ColorPicker { fn animate(&mut self, cx: &mut Cx) { let color_picker = self.area.get_first_mut::<ColorPickerIns>(cx); color_picker.hover = self.animator.get_float(0); color_picker.down = self.animator.get_float(1); } fn handle_pointer(&mut self, cx: &mut Cx, rel: Vec2) -> ColorPickerEvent { let color_picker = self.area.get_first_mut::<ColorPickerIns>(cx); let size = color_picker.base.rect_size.x; let vx = rel.x - 0.5 * size; let vy = rel.y - 0.5 * size; let rsize = (size * 0.28) / 2.0f32.sqrt(); let last_hue = color_picker.hue; let last_sat = color_picker.sat; let last_val = color_picker.val; match self.drag_mode { ColorPickerDragMode::Rect => { color_picker.sat = ((vx + rsize) / (2.0 * rsize)).clamp(0.0, 1.0); color_picker.val = 1.0 - ((vy + rsize) / (2.0 * rsize)).clamp(0.0, 1.0); } ColorPickerDragMode::Wheel => { color_picker.hue = vx.atan2(vy) / std::f32::consts::PI * 0.5 - 0.33333; } _ => (), } if last_hue != color_picker.hue || last_sat != color_picker.sat || last_val != color_picker.val { return ColorPickerEvent::Change { hsva: Vec4 { x: color_picker.hue, y: color_picker.sat, z: color_picker.val, w: 1.0 }, }; } ColorPickerEvent::None } pub fn handle(&mut self, cx: &mut Cx, event: &mut Event) -> ColorPickerEvent { if self.animator.handle(cx, event) { self.animate(cx); } match event.hits_pointer(cx, self.component_id, self.area.get_rect_for_first_instance(cx)) { Event::PointerHover(pe) => { cx.set_hover_mouse_cursor(MouseCursor::Arrow); match pe.hover_state { HoverState::In => { self.animator.play_anim(cx, ANIM_HOVER); } HoverState::Out => { self.animator.play_anim(cx, ANIM_DEFAULT); } _ => (), } } Event::PointerDown(pe) => { self.animator.play_anim(cx, ANIM_DOWN); cx.set_down_mouse_cursor(MouseCursor::Arrow); let color_picker = self.area.get_first::<ColorPickerIns>(cx); let size = color_picker.base.rect_size.x; let rsize = (size * 0.28) / 2.0f32.sqrt(); let vx = pe.rel.x - 0.5 * size; let vy = pe.rel.y - 0.5 * size; if vx >= -rsize && vx <= rsize && vy >= -rsize && vy <= rsize { self.drag_mode = ColorPickerDragMode::Rect; } else if vx >= -0.5 * size && vx <= 0.5 * size && vy >= -0.5 * size && vy <= 0.5 * size { self.drag_mode = ColorPickerDragMode::Wheel; } else { self.drag_mode = ColorPickerDragMode::None; } return self.handle_pointer(cx, pe.rel); } Event::PointerUp(pe) => { if pe.is_over { if pe.input_type.has_hovers() { self.animator.play_anim(cx, ANIM_HOVER); } else { self.animator.play_anim(cx, ANIM_DEFAULT); } } else { self.animator.play_anim(cx, ANIM_DEFAULT); } self.drag_mode = ColorPickerDragMode::None; return ColorPickerEvent::DoneChanging; } Event::PointerMove(pe) => return self.handle_pointer(cx, pe.rel), _ => (), } ColorPickerEvent::None } pub fn draw(&mut self, cx: &mut Cx, hsva: Vec4, height_scale: f32) { // i wanna draw a wheel with 'width' set but height a fixed height. self.size = cx.get_box_rect().size.x; let rect = cx.add_box(LayoutSize { width: Width::Fill, height: Height::Fix(self.size * height_scale) }); self.area = cx.add_instances( &SHADER, &[ColorPickerIns { base: QuadIns::from_rect(rect), hue: hsva.x, sat: hsva.y, val: hsva.z, ..Default::default() }], ); self.animator.draw(cx, ANIM_DEFAULT); self.animate(cx); } }
34.552239
126
0.490065
e80e3bf3fd0cf39c01d505ea137e9b8c21f42432
6,138
// Copyright 2022 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![allow(non_snake_case)] use std::cell::Cell; use std::sync::Arc; use backtrace::Backtrace; use crate::exception::ErrorCodeBacktrace; use crate::ErrorCode; pub static ABORT_SESSION: u16 = 1042; pub static ABORT_QUERY: u16 = 1043; thread_local! { pub static ENABLE_BACKTRACE: Cell<bool> = Cell::new(true); } macro_rules! build_exceptions { ($($body:ident($code:expr)),*$(,)*) => { impl ErrorCode { $( pub fn $body(display_text: impl Into<String>) -> ErrorCode { let bt = ENABLE_BACKTRACE.with(|v| v.get()).then(|| ErrorCodeBacktrace::Origin(Arc::new(Backtrace::new()))); ErrorCode::create( $code, display_text.into(), None, bt, ) } paste::item! { pub fn [< $body:snake _ code >] () -> u16{ $code } pub fn [< $body Code >] () -> u16{ $code } } )* } } } // Internal errors [0, 2000]. build_exceptions! { Ok(0), UnknownTypeOfQuery(1001), UnImplement(1002), UnknownDatabase(1003), UnknownDatabaseId(1004), SyntaxException(1005), BadArguments(1006), IllegalDataType(1007), UnknownFunction(1008), IllegalFunctionState(1009), BadDataValueType(1010), UnknownPlan(1011), IllegalPipelineState(1012), BadTransformType(1013), IllegalTransformConnectionState(1014), LogicalError(1015), EmptyData(1016), DataStructMissMatch(1017), BadDataArrayLength(1018), UnknownContextID(1019), UnknownTableId(1020), UnknownTableFunction(1021), BadOption(1022), CannotReadFile(1023), ParquetError(1024), UnknownTable(1025), IllegalAggregateExp(1026), UnknownAggregateFunction(1027), NumberArgumentsNotMatch(1028), NotFoundStream(1029), EmptyDataFromServer(1030), NotFoundLocalNode(1031), PlanScheduleError(1032), BadPlanInputs(1033), DuplicateClusterNode(1034), NotFoundClusterNode(1035), BadAddressFormat(1036), DnsParseError(1037), CannotConnectNode(1038), DuplicateGetStream(1039), Timeout(1040), TooManyUserConnections(1041), AbortedSession(ABORT_SESSION), AbortedQuery(ABORT_QUERY), NotFoundSession(1044), CannotListenerPort(1045), BadBytes(1046), InitPrometheusFailure(1047), ScalarSubqueryBadRows(1048), Overflow(1049), InvalidMetaBinaryFormat(1050), AuthenticateFailure(1051), TLSConfigurationFailure(1052), UnknownSession(1053), UnexpectedError(1054), DateTimeParseError(1055), BadPredicateRows(1056), SHA1CheckFailed(1057), UnknownColumn(1058), InvalidSourceFormat(1059), StrParseError(1060), IllegalGrant(1061), ManagementModePermissionDenied(1062), PermissionDenied(1063), UnmarshalError(1064), SemanticError(1065), InvalidAuthInfo(1066), InvalidTimezone(1067), InvalidDate(1068), InvalidTimestamp(1069), // Uncategorized error codes. UnexpectedResponseType(1066), UnknownException(1067), TokioError(1068), // Pipeline executor error codes. PipelineAreadlyStarted(1069), PipelineNotStarted(1070), PipelineUnInitialized(1071), // Http query error codes. HttpNotFound(1072), // Network error codes. NetworkRequestError(1073), // Tenant error codes. TenantIsEmpty(1101), IndexOutOfBounds(1102), } // Metasvr errors [2001, 3000]. build_exceptions! { // Meta service does not work. MetaServiceError(2001), InvalidConfig(2002), MetaStorageError(2003), TableVersionMismatched(2009), OCCRetryFailure(2011), // User api error codes. UnknownUser(2201), UserAlreadyExists(2202), IllegalUserInfoFormat(2203), UnknownRole(2204), IllegalUserSettingFormat(2205), // Meta api error codes. DatabaseAlreadyExists(2301), TableAlreadyExists(2302), IllegalMetaState(2304), MetaNodeInternalError(2305), ViewAlreadyExists(2306), // Cluster error codes. ClusterUnknownNode(2401), ClusterNodeAlreadyExists(2402), // Stage error codes. UnknownStage(2501), StageAlreadyExists(2502), IllegalUserStageFormat(2503), // User defined function error codes. IllegalUDFFormat(2601), UnknownUDF(2602), UdfAlreadyExists(2603), // Database error codes. UnknownDatabaseEngine(2701), UnknownTableEngine(2702), UnsupportedEngineParams(2703), // Share error codes. ShareAlreadyExists(2705), UnknownShare(2706), UnknownShareId(2707), // Variable error codes. UnknownVariable(2801), // Warehouse error codes UnknownWarehouse(2901), WarehouseAlreadyExists(2902), IllegalWarehouseMetaFormat(2903), IllegalWarehouseInfoFormat(2904), } // Storage errors [3001, 4000]. build_exceptions! { StorageNotFound(3001), StoragePermissionDenied(3002), StorageOther(4000) } // Cache errors [4001, 5000]. build_exceptions! { DiskCacheIOError(4001), DiskCacheFileTooLarge(4002), DiskCacheFileNotInCache(4003), } // Service errors [5001,6000]. build_exceptions! { // A task that already stopped and can not stop twice. AlreadyStarted(5001), // A task that already started and can not start twice. AlreadyStopped(5002), }
26.343348
128
0.655751
feacc8010eb322359fb99a4874d02d04dfb45043
1,115
use crate::{Header, EntryKind}; extern crate walkdir; use walkdir::WalkDir; use std::path::{Path}; use std::convert::{TryFrom}; impl TryFrom<&Path> for Header { type Error = std::io::Error; fn try_from(p: &Path) -> Result<Self, Self::Error> { let mut result: Vec<EntryKind> = vec![]; if p.is_file() { result.push(EntryKind::File(String::from(p.to_str().unwrap()), p.metadata()?.len())); return Ok(Self{ paths: result }) } // for each sub-file / sub-directory (and reccursively) for entry in WalkDir::new(p) { match entry?.path() { path if path.is_file() => { result.push(EntryKind::File( String::from(path.to_str().unwrap()), path.metadata()?.len()) ) } path if path.is_dir() => { result.push(EntryKind::Directory(String::from(path.to_str().unwrap()))) } _ => unreachable!() } } Ok(Self{ paths: result }) } }
32.794118
97
0.486996
e4b6fff31dda4bd45f6ca31c8182ca03b81f4376
1,227
pub use jsonrpc_core::types::response::Failure as RpcFailure; use thiserror::Error; use zksync_eth_signer::error::SignerError; #[derive(Debug, Error, PartialEq)] pub enum ClientError { #[error("Network '{0}' is not supported")] NetworkNotSupported(String), #[error("Unable to decode server response: {0}")] MalformedResponse(String), #[error("RPC error: {0:?}")] RpcError(RpcFailure), #[error("Network error: {0}")] NetworkError(String), #[error("Provided account credentials are incorrect")] IncorrectCredentials, #[error("Seed too short, must be at least 32 bytes long")] SeedTooShort, #[error("Token is not supported by zkSync")] UnknownToken, #[error("Incorrect address")] IncorrectAddress, #[error("Operation timeout")] OperationTimeout, #[error("Polling interval is too small")] PollingIntervalIsTooSmall, #[error("Signing error: {0}")] SigningError(SignerError), #[error("Missing required field for a transaction: {0}")] MissingRequiredField(String), #[error("Ethereum private key was not provided for this wallet")] NoEthereumPrivateKey, #[error("Provided value is not packable")] NotPackableValue, }
29.926829
69
0.682152
3ac175cf8fc328d7c076bc48901e7d9b3ff29418
734
use ascacou::Board; fn main() { use rand::seq::IteratorRandom; use std::collections::HashMap; let mut rng = rand::thread_rng(); let mut map: HashMap<i8, usize> = HashMap::new(); let mut durations = 0; for _i in 0..10_000 { let now = std::time::Instant::now(); let mut board = Board::random_empty(); let mut current_player = 1i8; while !board.is_terminal() { current_player = - current_player; let mov = board.possible_moves().into_iter().choose(&mut rng).unwrap(); board = board.next(mov); } let score = board.current_score(); durations += now.elapsed().as_nanos(); *map.entry(score).or_insert(1) += 1; } println!("total={}ns average={}ns", durations, durations / 10_000); println!("{:?}", map); }
28.230769
74
0.655313
01cc17101afde600e46adc8c9c4a8904c938097f
7,044
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use serde::Serialize; use std::io::{Read, Seek, Write}; use crate::mp4box::*; #[derive(Debug, Clone, PartialEq, Serialize)] pub struct Hev1Box { pub data_reference_index: u16, pub width: u16, pub height: u16, #[serde(with = "value_u32")] pub horizresolution: FixedPointU16, #[serde(with = "value_u32")] pub vertresolution: FixedPointU16, pub frame_count: u16, pub depth: u16, pub hvcc: HvcCBox, } impl Default for Hev1Box { fn default() -> Self { Hev1Box { data_reference_index: 0, width: 0, height: 0, horizresolution: FixedPointU16::new(0x48), vertresolution: FixedPointU16::new(0x48), frame_count: 1, depth: 0x0018, hvcc: HvcCBox::default(), } } } impl Hev1Box { pub fn new(config: &HevcConfig) -> Self { Hev1Box { data_reference_index: 1, width: config.width, height: config.height, horizresolution: FixedPointU16::new(0x48), vertresolution: FixedPointU16::new(0x48), frame_count: 1, depth: 0x0018, hvcc: HvcCBox::new(), } } pub fn get_type(&self) -> BoxType { BoxType::Hev1Box } pub fn get_size(&self) -> u64 { HEADER_SIZE + 8 + 70 + self.hvcc.box_size() } } impl Mp4Box for Hev1Box { fn box_type(&self) -> BoxType { return self.get_type(); } fn box_size(&self) -> u64 { return self.get_size(); } fn to_json(&self) -> Result<String> { Ok(serde_json::to_string(&self).unwrap()) } fn summary(&self) -> Result<String> { let s = format!( "data_reference_index={} width={} height={} frame_count={}", self.data_reference_index, self.width, self.height, self.frame_count ); Ok(s) } } impl<R: Read + Seek> ReadBox<&mut R> for Hev1Box { fn read_box(reader: &mut R, size: u64) -> Result<Self> { let start = box_start(reader)?; reader.read_u32::<BigEndian>()?; // reserved reader.read_u16::<BigEndian>()?; // reserved let data_reference_index = reader.read_u16::<BigEndian>()?; reader.read_u32::<BigEndian>()?; // pre-defined, reserved reader.read_u64::<BigEndian>()?; // pre-defined reader.read_u32::<BigEndian>()?; // pre-defined let width = reader.read_u16::<BigEndian>()?; let height = reader.read_u16::<BigEndian>()?; let horizresolution = FixedPointU16::new_raw(reader.read_u32::<BigEndian>()?); let vertresolution = FixedPointU16::new_raw(reader.read_u32::<BigEndian>()?); reader.read_u32::<BigEndian>()?; // reserved let frame_count = reader.read_u16::<BigEndian>()?; skip_bytes(reader, 32)?; // compressorname let depth = reader.read_u16::<BigEndian>()?; reader.read_i16::<BigEndian>()?; // pre-defined let header = BoxHeader::read(reader)?; let BoxHeader { name, size: s } = header; if name == BoxType::HvcCBox { let hvcc = HvcCBox::read_box(reader, s)?; skip_bytes_to(reader, start + size)?; Ok(Hev1Box { data_reference_index, width, height, horizresolution, vertresolution, frame_count, depth, hvcc, }) } else { Err(Error::InvalidData("hvcc not found")) } } } impl<W: Write> WriteBox<&mut W> for Hev1Box { fn write_box(&self, writer: &mut W) -> Result<u64> { let size = self.box_size(); BoxHeader::new(self.box_type(), size).write(writer)?; writer.write_u32::<BigEndian>(0)?; // reserved writer.write_u16::<BigEndian>(0)?; // reserved writer.write_u16::<BigEndian>(self.data_reference_index)?; writer.write_u32::<BigEndian>(0)?; // pre-defined, reserved writer.write_u64::<BigEndian>(0)?; // pre-defined writer.write_u32::<BigEndian>(0)?; // pre-defined writer.write_u16::<BigEndian>(self.width)?; writer.write_u16::<BigEndian>(self.height)?; writer.write_u32::<BigEndian>(self.horizresolution.raw_value())?; writer.write_u32::<BigEndian>(self.vertresolution.raw_value())?; writer.write_u32::<BigEndian>(0)?; // reserved writer.write_u16::<BigEndian>(self.frame_count)?; // skip compressorname write_zeros(writer, 32)?; writer.write_u16::<BigEndian>(self.depth)?; writer.write_i16::<BigEndian>(-1)?; // pre-defined self.hvcc.write_box(writer)?; Ok(size) } } #[derive(Debug, Clone, PartialEq, Default, Serialize)] pub struct HvcCBox { pub configuration_version: u8, } impl HvcCBox { pub fn new() -> Self { Self { configuration_version: 1, } } } impl Mp4Box for HvcCBox { fn box_type(&self) -> BoxType { BoxType::HvcCBox } fn box_size(&self) -> u64 { let size = HEADER_SIZE + 1; size } fn to_json(&self) -> Result<String> { Ok(serde_json::to_string(&self).unwrap()) } fn summary(&self) -> Result<String> { let s = format!("configuration_version={}", self.configuration_version); Ok(s) } } impl<R: Read + Seek> ReadBox<&mut R> for HvcCBox { fn read_box(reader: &mut R, size: u64) -> Result<Self> { let start = box_start(reader)?; let configuration_version = reader.read_u8()?; skip_bytes_to(reader, start + size)?; Ok(HvcCBox { configuration_version, }) } } impl<W: Write> WriteBox<&mut W> for HvcCBox { fn write_box(&self, writer: &mut W) -> Result<u64> { let size = self.box_size(); BoxHeader::new(self.box_type(), size).write(writer)?; writer.write_u8(self.configuration_version)?; Ok(size) } } #[cfg(test)] mod tests { use super::*; use crate::mp4box::BoxHeader; use std::io::Cursor; #[test] fn test_hev1() { let src_box = Hev1Box { data_reference_index: 1, width: 320, height: 240, horizresolution: FixedPointU16::new(0x48), vertresolution: FixedPointU16::new(0x48), frame_count: 1, depth: 24, hvcc: HvcCBox { configuration_version: 1, }, }; let mut buf = Vec::new(); src_box.write_box(&mut buf).unwrap(); assert_eq!(buf.len(), src_box.box_size() as usize); let mut reader = Cursor::new(&buf); let header = BoxHeader::read(&mut reader).unwrap(); assert_eq!(header.name, BoxType::Hev1Box); assert_eq!(src_box.box_size(), header.size); let dst_box = Hev1Box::read_box(&mut reader, header.size).unwrap(); assert_eq!(src_box, dst_box); } }
28.634146
86
0.568001
4a797d19bad2e53be2aa5d206a5ae1e7de5cfb5c
99
#![allow(non_upper_case_globals)] pub const arg0: u8 = 1; pub fn main() { format!("{}", 1); }
14.142857
33
0.585859
9c25c104f35a8f9cccb3b8db910e3f0c6e0fd385
5,174
use std::borrow::Cow; use std::num::{NonZeroU128, NonZeroU64}; use std::sync::Arc; use rand::Rng; use crate::{ span::{Span, SpanStatus}, TraceCollector, }; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct TraceId(pub NonZeroU128); impl TraceId { pub fn new(val: u128) -> Option<Self> { Some(Self(NonZeroU128::new(val)?)) } pub fn get(self) -> u128 { self.0.get() } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct SpanId(pub NonZeroU64); impl SpanId { pub fn new(val: u64) -> Option<Self> { Some(Self(NonZeroU64::new(val)?)) } pub fn gen() -> Self { // Should this be a UUID? Self(rand::thread_rng().gen()) } pub fn get(self) -> u64 { self.0.get() } } /// The immutable context of a `Span` /// /// Importantly this contains all the information necessary to create a child `Span` #[derive(Debug, Clone)] pub struct SpanContext { pub trace_id: TraceId, pub parent_span_id: Option<SpanId>, pub span_id: SpanId, /// Link to other spans, can be cross-trace if this span aggregates multiple spans. /// /// See <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/overview.md#links-between-spans>. pub links: Vec<(TraceId, SpanId)>, pub collector: Option<Arc<dyn TraceCollector>>, } impl SpanContext { /// Create a new root span context, sent to `collector`. The /// new span context has a random trace_id and span_id, and thus /// is not connected to any existing span or trace. pub fn new(collector: Arc<dyn TraceCollector>) -> Self { let mut rng = rand::thread_rng(); let trace_id: u128 = rng.gen_range(1..u128::MAX); let span_id: u64 = rng.gen_range(1..u64::MAX); Self { trace_id: TraceId(NonZeroU128::new(trace_id).unwrap()), parent_span_id: None, span_id: SpanId(NonZeroU64::new(span_id).unwrap()), links: vec![], collector: Some(collector), } } /// Creates a new child of the Span described by this TraceContext pub fn child(&self, name: impl Into<Cow<'static, str>>) -> Span { Span { name: name.into(), ctx: Self { trace_id: self.trace_id, span_id: SpanId::gen(), collector: self.collector.clone(), links: vec![], parent_span_id: Some(self.span_id), }, start: None, end: None, status: SpanStatus::Unknown, metadata: Default::default(), events: Default::default(), } } } impl PartialEq for SpanContext { fn eq(&self, other: &Self) -> bool { self.trace_id == other.trace_id && self.parent_span_id == other.parent_span_id && self.span_id == other.span_id && self.links == other.links && self.collector.is_some() == other.collector.is_some() } } #[cfg(test)] mod tests { use super::*; use crate::RingBufferTraceCollector; #[test] fn test_new() { // two newly created spans should not have duplicated trace or span ids let collector = Arc::new(RingBufferTraceCollector::new(5)) as _; let ctx1 = SpanContext::new(Arc::clone(&collector)); let ctx2 = SpanContext::new(collector); assert_ne!(ctx1.trace_id, ctx2.trace_id); assert_ne!(ctx1.span_id, ctx2.span_id); } #[test] fn test_partial_eq() { let collector_1 = Arc::new(RingBufferTraceCollector::new(5)) as _; let collector_2 = Arc::new(RingBufferTraceCollector::new(5)) as _; let ctx_ref = SpanContext { trace_id: TraceId::new(1).unwrap(), parent_span_id: Some(SpanId::new(2).unwrap()), span_id: SpanId::new(3).unwrap(), links: vec![ (TraceId::new(4).unwrap(), SpanId::new(5).unwrap()), (TraceId::new(6).unwrap(), SpanId::new(7).unwrap()), ], collector: Some(collector_1), }; let ctx = SpanContext { ..ctx_ref.clone() }; assert_eq!(ctx_ref, ctx); let ctx = SpanContext { trace_id: TraceId::new(10).unwrap(), ..ctx_ref.clone() }; assert_ne!(ctx_ref, ctx); let ctx = SpanContext { parent_span_id: Some(SpanId::new(10).unwrap()), ..ctx_ref.clone() }; assert_ne!(ctx_ref, ctx); let ctx = SpanContext { span_id: SpanId::new(10).unwrap(), ..ctx_ref.clone() }; assert_ne!(ctx_ref, ctx); let ctx = SpanContext { links: vec![(TraceId::new(4).unwrap(), SpanId::new(5).unwrap())], ..ctx_ref.clone() }; assert_ne!(ctx_ref, ctx); let ctx = SpanContext { collector: None, ..ctx_ref.clone() }; assert_ne!(ctx_ref, ctx); let ctx = SpanContext { collector: Some(collector_2), ..ctx_ref.clone() }; assert_eq!(ctx_ref, ctx); } }
28.119565
132
0.559915
72c0931374d9bb80ed2ca269d8d93b400cfb0b4f
898
use assert_cmd::prelude::*; use std::process::Command; #[test] fn valid_arguments() -> Result<(), Box<dyn std::error::Error>> { let prefix_args = ["--test-argument-parser", "space"]; let mut cmd = Command::cargo_bin("ockam")?; cmd.args(&prefix_args) .arg("create") .arg("space-name") .arg("/ip4/127.0.0.1/tcp/8080"); cmd.assert().success(); let mut cmd = Command::cargo_bin("ockam")?; cmd.args(&prefix_args).arg("list"); cmd.assert().success(); let mut cmd = Command::cargo_bin("ockam")?; cmd.args(&prefix_args) .arg("show") .arg("space-id") .arg("/ip4/127.0.0.1/tcp/8080"); cmd.assert().success(); let mut cmd = Command::cargo_bin("ockam")?; cmd.args(&prefix_args) .arg("delete") .arg("space-id") .arg("/ip4/127.0.0.1/tcp/8080"); cmd.assert().success(); Ok(()) }
25.657143
64
0.55902
5b0c75bcad4c7a42593201c9592d43566e185a36
10,511
use { crate::{kit, prelude::*, theme, ui}, reclutch::display as gfx, }; pub struct ComboListItem<T: 'static> { label: kit::Label<T>, selected: bool, painter: theme::Painter<Self>, common: ui::CommonRef, listeners: ui::ListenerList<kit::ReadWrite<Self>>, components: ui::ComponentList<Self>, } impl<T: 'static> ComboListItem<T> { pub fn new(parent: ui::CommonRef, aux: &mut ui::Aux<T>) -> Self { let common = ui::CommonRef::new(parent); ComboListItem { label: kit::Label::new(common.clone(), aux), selected: false, painter: theme::get_painter(aux.theme.as_ref(), theme::painters::COMBO_LIST_ITEM), common, listeners: ui::ListenerList::new(vec![]), components: ui::ComponentList::new().and_push(kit::InteractionState::new( aux, kit::interaction_forwarder(None), None, None, )), } } pub fn set_text(&mut self, text: impl ToString) { self.label.set_text(text.to_string()); self.resize(); } pub fn text(&self) -> String { match self.label.text() { gfx::DisplayText::Simple(s) => s.clone(), _ => String::new(), } } pub fn set_selected(&mut self, selected: bool) { self.selected = selected; self.repaint(); } #[inline] pub fn selected(&self) -> bool { self.selected } fn resize(&mut self) { self.set_size(self.label.bounds().size); self.repaint(); } } impl<T: 'static> ui::Element for ComboListItem<T> { type Aux = T; #[inline] fn common(&self) -> &ui::CommonRef { &self.common } #[inline] fn update(&mut self, aux: &mut ui::Aux<Self::Aux>) { ui::dispatch_components(self, aux, |x| &mut x.components).unwrap(); ui::dispatch_list::<kit::ReadWrite<Self>, _>((self, aux), |(x, _)| &mut x.listeners); } fn draw(&mut self, display: &mut dyn gfx::GraphicsDisplay, aux: &mut ui::Aux<Self::Aux>) { ui::draw( self, |o, a| theme::paint(o, |o| &mut o.painter, a), display, aux, None, ) } } impl<T: 'static> ui::WidgetChildren<T> for ComboListItem<T> { crate::children![for <T>; label]; } pub struct ComboList<T: 'static> { combos: Vec<String>, items: Vec<ComboListItem<T>>, painter: theme::Painter<Self>, common: ui::CommonRef, listeners: ui::ListenerList<(ui::Write<Self>, ui::Write<ui::Aux<T>>)>, components: ui::ComponentList<Self>, } impl<T: 'static> ComboList<T> { pub fn new(parent: ui::CommonRef, aux: &mut ui::Aux<T>) -> Self { let common = ui::CommonRef::new(parent); let focus_listener = kit::focus_handler( aux, kit::focus_forwarder(), kit::FocusConfig { interaction_handler: common.with(|x| x.id()), mouse_trigger: Default::default(), }, ); ComboList { combos: Vec::new(), items: Vec::new(), painter: theme::get_painter(aux.theme.as_ref(), theme::painters::COMBO_LIST), common, listeners: ui::ListenerList::new(vec![focus_listener]), components: ui::ComponentList::new().and_push(kit::InteractionState::new( aux, kit::interaction_forwarder(None), None, None, )), } } pub fn set_combos(&mut self, combos: &[String], aux: &mut ui::Aux<T>) { self.combos = combos.to_owned(); self.update_items(aux); } #[inline] pub fn combos(&self) -> &[String] { &self.combos } fn update_items(&mut self, aux: &mut ui::Aux<T>) { let mut stack = ui::layout::VStack::new().into_node(None); self.items = Vec::with_capacity(self.combos.len()); let w = self.size().width; let mut h = 0.; for combo in &self.combos { let mut item = ComboListItem::new(self.common.clone(), aux); item.set_text(combo); let item_size = item.size(); h += item_size.height; item.set_size(gfx::Size::new(w, item_size.height)); stack.push(&item, None); self.items.push(item); } self.set_size(gfx::Size::new(w, h)); self.set_layout(stack); ui::layout::update_layout(self); } } impl<T: 'static> ui::Element for ComboList<T> { type Aux = T; #[inline] fn common(&self) -> &ui::CommonRef { &self.common } fn update(&mut self, aux: &mut ui::Aux<Self::Aux>) { ui::dispatch_components(self, aux, |x| &mut x.components).unwrap(); ui::dispatch_list::<kit::ReadWrite<Self>, _>((self, aux), |(x, _)| &mut x.listeners); ui::propagate_repaint(self); } fn draw(&mut self, display: &mut dyn gfx::GraphicsDisplay, aux: &mut ui::Aux<Self::Aux>) { ui::draw( self, |o, a| theme::paint(o, |o| &mut o.painter, a), display, aux, None, ) } } impl<T: 'static> ui::WidgetChildren<T> for ComboList<T> { fn children(&self) -> Vec<&dyn WidgetChildren<T>> { self.items .iter() .map(|x| x as &dyn WidgetChildren<T>) .collect() } fn children_mut(&mut self) -> Vec<&mut dyn WidgetChildren<T>> { self.items .iter_mut() .map(|x| x as &mut dyn WidgetChildren<T>) .collect() } } pub struct ComboBox<T: 'static> { combos: Vec<String>, label: kit::Label<T>, list: Option<ComboList<T>>, selected: Option<usize>, painter: theme::Painter<Self>, common: ui::CommonRef, listeners: ui::ListenerList<kit::ReadWrite<Self>>, components: ui::ComponentList<Self>, } impl<T: 'static> ComboBox<T> { pub fn new(parent: ui::CommonRef, aux: &mut ui::Aux<T>) -> Self { let common = ui::CommonRef::new(parent); let focus_listener = kit::focus_handler( aux, kit::focus_forwarder(), kit::FocusConfig { interaction_handler: common.with(|x| x.id()), mouse_trigger: Default::default(), }, ); ComboBox { combos: Vec::new(), label: kit::Label::new(common.clone(), aux), list: None, selected: None, painter: theme::get_painter(aux.theme.as_ref(), theme::painters::COMBO_BOX), common, listeners: ui::ListenerList::new(vec![focus_listener]), components: ui::ComponentList::new().and_push(kit::InteractionState::new( aux, |obj: &mut Self, aux, ev| { match ev { kit::InteractionEvent::Press(_) => obj.show_combo_list(aux), _ => {} } kit::interaction_forwarder(None)(obj, aux, ev); }, None, None, )), } } pub fn set_combos(&mut self, combos: &[String], aux: &mut ui::Aux<T>) { self.combos = combos.to_vec(); self.selected = if self.combos.is_empty() { None } else { Some(0) }; self.show_combo_list(aux); self.repaint(); self.update_label(); self.resize(); } #[inline] pub fn combos(&self) -> &[String] { &self.combos } pub fn set_selected(&mut self, selected: usize) { self.selected = Some(selected); self.repaint(); self.update_label(); self.resize(); } #[inline] pub fn selected(&self) -> Option<usize> { self.selected } pub fn selected_combo(&self) -> Option<&str> { self.selected .and_then(|x| self.combos.get(x).map(|x| &x[..])) } pub fn show_combo_list(&mut self, aux: &mut ui::Aux<T>) { let mut list = ComboList::new(self.common.clone(), aux); list.set_combos(&self.combos, aux); self.list = Some(list); } #[inline] pub fn hide_combo_list(&mut self) { self.list = None; } #[inline] pub fn is_combo_list_open(&self) -> bool { !self.list.is_none() } fn update_label(&mut self) { self.label.set_text( self.selected_combo() .map(|x| x.to_string()) .unwrap_or_default(), ); } fn resize(&mut self) { let metrics = theme::multi_metrics( self, &[theme::metrics::PADDING_X, theme::metrics::PADDING_Y], |x| &mut x.painter, ); let padding = gfx::Size::new(metrics[0].unwrap(), metrics[1].unwrap()); let label_bounds = self.label.bounds(); self.set_size(label_bounds.size + padding); let bounds = self.bounds(); let x = ui::layout::align_x(label_bounds, bounds, ui::layout::Alignment::Begin, 6.); let y = ui::layout::align_y(label_bounds, bounds, ui::layout::Alignment::Middle, 0.) - 1.; self.label.set_position(gfx::Point::new(x, y)); } } impl<T: 'static> ui::Element for ComboBox<T> { type Aux = T; #[inline] fn common(&self) -> &ui::CommonRef { &self.common } #[inline] fn update(&mut self, aux: &mut ui::Aux<Self::Aux>) { ui::dispatch_components(self, aux, |x| &mut x.components).unwrap(); ui::dispatch_list::<kit::ReadWrite<Self>, _>((self, aux), |(x, _)| &mut x.listeners); ui::propagate_repaint(self); } fn draw(&mut self, display: &mut dyn gfx::GraphicsDisplay, aux: &mut ui::Aux<Self::Aux>) { ui::draw( self, |o, a| theme::paint(o, |o| &mut o.painter, a), display, aux, None, ) } } impl<T: 'static> ui::WidgetChildren<T> for ComboBox<T> { fn children(&self) -> Vec<&dyn ui::WidgetChildren<T>> { if let Some(list) = &self.list { vec![&self.label, list] } else { vec![&self.label] } } fn children_mut(&mut self) -> Vec<&mut dyn ui::WidgetChildren<T>> { if let Some(list) = &mut self.list { vec![&mut self.label, list] } else { vec![&mut self.label] } } }
27.587927
98
0.521644
ac97ec70be207c6fca136bd0687cc8108cc8b93a
693
// run-pass // Testing that a libsyntax can parse modules with canonicalized base path // ignore-cross-compile #![feature(rustc_private)] extern crate syntax; use std::path::Path; use syntax::sess::ParseSess; use syntax::source_map::FilePathMapping; use syntax::parse; #[path = "mod_dir_simple/test.rs"] mod gravy; pub fn main() { syntax::with_default_globals(|| parse()); assert_eq!(gravy::foo(), 10); } fn parse() { let parse_session = ParseSess::new(FilePathMapping::empty()); let path = Path::new(file!()); let path = path.canonicalize().unwrap(); let mut parser = parse::new_parser_from_file(&parse_session, &path); let _ = parser.parse_crate_mod(); }
22.354839
74
0.691198
d760c5e605f1ae7f1bf38e5bb195dbb43c2370f5
146
// Copyright 2020 WHTCORPS INC Project Authors. Licensed Under Apache-2.0 mod ctx; pub use self::ctx::*; pub use crate::codec::{Error, Result};
20.857143
73
0.719178
e504d0ce4b84328eae065888bffac3f472b74165
1,076
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // // Copied from // https://github.com/rust-lang/rust/blob/master/src/test/run-pass/smallest-hello-world.rs extern { fn puts(s: *const u8); } extern "rust-intrinsic" { pub fn transmute<T, U>(t: T) -> U; } #[lang = "stack_exhausted"] extern fn stack_exhausted() {} #[lang = "eh_personality"] extern fn eh_personality() {} #[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} } // Additions // If we don't implement this here the linker will link against libunwind // which will then require kill, getpid()... which isn't available. #[no_mangle] pub unsafe fn __aeabi_unwind_cpp_pr0() -> () { loop {} }
39.851852
90
0.707249
39222be2ece594dfaded2cb49a9998f158cbec05
7,910
#![no_main] #![no_std] use arb_001 as _; // global logger + panicking-behavior + memory layout use bbqueue::{consts::*, framed::FrameGrantW, BBBuffer, ConstBBBuffer}; use embedded_hal::blocking::delay::{DelayMs, DelayUs}; use embedded_hal::digital::v2::OutputPin; use nrf52840_hal::{ self as hal, clocks::LfOscConfiguration, gpio::{p0::Parts as P0Parts, p1::Parts as P1Parts, Level}, pac::{Peripherals, SPIM0, SPIS1, TIMER2, UARTE0}, ppi::{Parts as PpiParts, Ppi0}, spim::{Frequency, Pins as SpimPins, Spim, TransferSplit, MODE_0}, spis::{Mode, Pins as SpisPins, Spis, Transfer}, timer::{Instance as TimerInstance, Periodic, Timer}, uarte::{Baudrate, Parity, Pins}, }; use anachro_client::{pubsub_table, Client, ClientIoError, Error}; use anachro_server::{Broker, Uuid}; use anachro_icd::Version; use anachro_spi::{arbitrator::EncLogicHLArbitrator, component::EncLogicHLComponent}; use anachro_spi_nrf52::{arbitrator::NrfSpiArbLL, component::NrfSpiComLL}; use heapless::{consts, Vec as HVec}; use postcard::to_slice_cobs; use serde::{Deserialize, Serialize}; use fleet_uarte::{ anachro_io::AnachroUarte, app::UarteApp, buffer::UarteBuffer, buffer::UarteParts, cobs_buf::Buffer, irq::{UarteIrq, UarteTimer}, }; use core::sync::atomic::AtomicBool; use groundhog_nrf52::GlobalRollingTimer; use groundhog::RollingTimer; static FLEET_BUFFER: UarteBuffer<U2048, U2048> = UarteBuffer { txd_buf: BBBuffer(ConstBBBuffer::new()), rxd_buf: BBBuffer(ConstBBBuffer::new()), timeout_flag: AtomicBool::new(false), }; #[derive(Debug, Serialize, Deserialize, Clone)] pub struct Demo { foo: u32, bar: i16, baz: (u8, u8), } pubsub_table! { AnachroTable, Subs => { Something: "foo/bar/baz" => Demo, Else: "bib/bim/bap" => (), }, Pubs => { Etwas: "short/send" => (), Anders: "send/short" => (), }, } #[rtic::app(device = crate::hal::pac, peripherals = true, monotonic = groundhog_nrf52::GlobalRollingTimer)] const APP: () = { struct Resources { broker: Broker, anachro_uarte: AnachroUarte<U2048, U2048, U512>, uarte_timer: UarteTimer<TIMER2>, uarte_irq: UarteIrq<U2048, U2048, Ppi0, UARTE0>, } #[init(spawn = [anachro_periodic])] fn init(ctx: init::Context) -> init::LateResources { defmt::info!("Hello, world!"); let board = ctx.device; // Setup clocks let clocks = hal::clocks::Clocks::new(board.CLOCK); let clocks = clocks.enable_ext_hfosc(); let clocks = clocks.set_lfclk_src_external(LfOscConfiguration::NoExternalNoBypass); clocks.start_lfclk(); // Setup global timer GlobalRollingTimer::init(board.TIMER0); let p0_gpios = P0Parts::new(board.P0); let p1_gpios = P1Parts::new(board.P1); let ppis = PpiParts::new(board.PPI); // // D18/A0 CARD1-GO P0.04 // let mut card1_go = p0_gpios.p0_04; // // D19/A1 CARD2-GO P0.05 // let mut card2_go = p0_gpios.p0_05; // // D20/A2 CARD3-GO P0.30 // let card3_go = p0_gpios.p0_30; // // D21/A3 CARD4-GO P0.28 // let card4_go = p0_gpios.p0_28; // // D22/A4 CARD5-GO P0.02 // let card5_go = p0_gpios.p0_02; // // D23/A5 CARD6-GO P0.03 // let card6_go = p0_gpios.p0_03; // // SCLK/D15 CARD7-GO P0.14 // let card7_go = p0_gpios.p0_14; // // D13 CARDx-COPI P1.09 // let cardx_copi = p1_gpios.p1_09; // // D12 CARDx-SCK P0.08 // let cardx_sck = p0_gpios.p0_08; // // D11 CARDx-CSn P0.06 // let cardx_csn = p0_gpios.p0_06; // // D10 CARDx-CIPO P0.27 // let cardx_cipo = p0_gpios.p0_27; // D9 let d9 = p0_gpios.p0_26.into_floating_input(); // D6 SERIAL2-TX P0.07 let serial2_tx = p0_gpios.p0_07; // D5 SERIAL2-RX P1.08 let serial2_rx = p1_gpios.p1_08; // // SCL SERIAL1-TX P0.11 // let serial1_tx = p0_gpios.p0_11; // // SDA SERIAL1-RX P0.12 // let serial1_rx = p0_gpios.p0_12; let UarteParts { app, timer, irq } = FLEET_BUFFER .try_split( Pins { rxd: serial2_rx.into_floating_input().degrade(), txd: serial2_tx.into_push_pull_output(Level::Low).degrade(), cts: None, rts: None, }, Parity::EXCLUDED, Baudrate::BAUD1M, board.TIMER2, ppis.ppi0, board.UARTE0, 255, 10_000, ) .map_err(drop) .unwrap(); let an_uarte = AnachroUarte::new(app, Buffer::new(), Uuid::from_bytes([42u8; 16])); let mut broker = Broker::default(); broker .register_client(&Uuid::from_bytes([42u8; 16])) .unwrap(); // Spawn periodic tasks ctx.spawn.anachro_periodic().ok(); init::LateResources { broker, anachro_uarte: an_uarte, uarte_timer: timer, uarte_irq: irq, } // defmt::info!("Starting loop"); // let mut countdown = 0; // let mut last_d9 = false; // loop { // } } #[task(resources = [broker, anachro_uarte], schedule = [anachro_periodic])] fn anachro_periodic(ctx: anachro_periodic::Context) { // static mut HAS_CONNECTED: bool = false; let broker = ctx.resources.broker; let uarte = ctx.resources.anachro_uarte; let mut out_msgs: HVec<_, consts::U16> = HVec::new(); match broker.process_msg(uarte, &mut out_msgs) { Ok(_) => {} Err(e) => { defmt::error!("broker proc msg: {:?}", e); // arb_001::exit(); } } if !out_msgs.is_empty() { defmt::info!("broker sending {:?} msgs", out_msgs.len()); } let mut serout: HVec<HVec<u8, consts::U128>, consts::U16> = HVec::new(); for msg in out_msgs { // TODO: Routing defmt::info!("Out message!"); use postcard::to_vec_cobs; if let Ok(resp) = to_vec_cobs(&msg.msg) { defmt::info!("resp out: {:?}", &resp[..]); serout.push(resp).unwrap(); } else { defmt::error!("Ser failed!"); arb_001::exit(); } } for msg in serout { match uarte.enqueue(&msg) { Ok(_) => defmt::info!("arb_port enqueued."), Err(()) => { defmt::error!("enqueue failed!"); arb_001::exit(); } } } ctx.schedule .anachro_periodic(ctx.scheduled + 1_000) // 1ms .ok(); } #[task(binds = TIMER2, resources = [uarte_timer])] fn timer2(ctx: timer2::Context) { // fleet uarte timer ctx.resources.uarte_timer.interrupt(); } #[task(binds = UARTE0_UART0, resources = [uarte_irq])] fn uarte0(ctx: uarte0::Context) { // fleet uarte interrupt ctx.resources.uarte_irq.interrupt(); } #[idle] fn idle(_ctx: idle::Context) -> ! { loop { // Don't WFI/WFE for now cortex_m::asm::nop(); } } // Sacrificial hardware interrupts extern "C" { fn SWI1_EGU1(); // fn SWI2_EGU2(); // fn SWI3_EGU3(); } }; // #[cortex_m_rt::entry] // fn main() -> ! { // defmt::info!("Hello, world!"); // // defmt::error!("Connected!"); // // arb_001::exit() // }
29.625468
107
0.539697
260e41f634133e1a7a6d87a24d73c8949250d456
2,233
use crate::error::EntityDecoratorError; use crate::get_meta; use quote::ToTokens; lazy_static! { static ref VALID_TO_ONE_WRAPPER: Vec<&'static str> = vec!["Option", "Box"]; static ref VALID_TO_MANY_WRAPPER: Vec<&'static str> = vec!["Vec", "HashSet"]; } pub(crate) fn get_field_type(item: &syn::Field) -> syn::Result<FieldType> { if let Some(syn::Meta::List(syn::MetaList { ref nested, .. })) = get_meta(&item.attrs)?.last() { if let Some(syn::NestedMeta::Meta(ref meta_item)) = nested.last() { match meta_item { syn::Meta::Path(syn::Path { segments, .. }) => { if let Some(seg) = segments.last() { let field_ty = &seg.ident; if field_ty == "id" { return Ok(FieldType::Id); } else if field_ty == "to_many" { return Ok(FieldType::ToMany); } else if field_ty == "to_one" { return Ok(FieldType::ToOne); } else { return Err(syn::Error::new_spanned( field_ty, EntityDecoratorError::InvalidUnitDecorator(field_ty.to_string()), )); } } else { return Err(syn::Error::new_spanned( meta_item, EntityDecoratorError::InvalidUnitDecorator( meta_item.path().segments.to_token_stream().to_string(), ), )); } }, _ => { return Err(syn::Error::new_spanned( meta_item, EntityDecoratorError::InvalidUnitDecorator( meta_item.path().segments.to_token_stream().to_string(), ), )); }, } } } Ok(FieldType::Plain) } #[derive(Debug, Eq, PartialEq)] pub(crate) enum FieldType { Id, ToOne, ToMany, Plain, }
37.216667
100
0.434393
8974a16250cbe2fef63b621e005624d71c2a6345
14,032
// Copyright 2016 Mozilla Foundation. See the COPYRIGHT // file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use simd::u16x8; use simd::u8x16; use simd::Simd; // TODO: Migrate unaligned access to stdlib code if/when the RFC // https://github.com/rust-lang/rfcs/pull/1725 is implemented. #[inline(always)] pub unsafe fn load16_unaligned(ptr: *const u8) -> u8x16 { let mut simd = ::std::mem::uninitialized(); ::std::ptr::copy_nonoverlapping(ptr, &mut simd as *mut u8x16 as *mut u8, 16); simd } #[allow(dead_code)] #[inline(always)] pub unsafe fn load16_aligned(ptr: *const u8) -> u8x16 { *(ptr as *const u8x16) } #[inline(always)] pub unsafe fn store16_unaligned(ptr: *mut u8, s: u8x16) { ::std::ptr::copy_nonoverlapping(&s as *const u8x16 as *const u8, ptr, 16); } #[allow(dead_code)] #[inline(always)] pub unsafe fn store16_aligned(ptr: *mut u8, s: u8x16) { *(ptr as *mut u8x16) = s; } #[inline(always)] pub unsafe fn load8_unaligned(ptr: *const u16) -> u16x8 { let mut simd = ::std::mem::uninitialized(); ::std::ptr::copy_nonoverlapping(ptr as *const u8, &mut simd as *mut u16x8 as *mut u8, 16); simd } #[allow(dead_code)] #[inline(always)] pub unsafe fn load8_aligned(ptr: *const u16) -> u16x8 { *(ptr as *const u16x8) } #[inline(always)] pub unsafe fn store8_unaligned(ptr: *mut u16, s: u16x8) { ::std::ptr::copy_nonoverlapping(&s as *const u16x8 as *const u8, ptr as *mut u8, 16); } #[allow(dead_code)] #[inline(always)] pub unsafe fn store8_aligned(ptr: *mut u16, s: u16x8) { *(ptr as *mut u16x8) = s; } extern "platform-intrinsic" { fn simd_shuffle16<T: Simd, U: Simd<Elem = T::Elem>>(x: T, y: T, idx: [u32; 16]) -> U; } // #[inline(always)] // fn simd_byte_swap_u8(s: u8x16) -> u8x16 { // unsafe { // simd_shuffle16(s, s, [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14]) // } // } // #[inline(always)] // pub fn simd_byte_swap(s: u16x8) -> u16x8 { // to_u16_lanes(simd_byte_swap_u8(to_u8_lanes(s))) // } #[inline(always)] pub fn simd_byte_swap(s: u16x8) -> u16x8 { let left = s << 8; let right = s >> 8; left | right } #[inline(always)] pub fn to_u16_lanes(s: u8x16) -> u16x8 { unsafe { ::std::mem::transmute(s) } } // #[inline(always)] // pub fn to_u8_lanes(s: u16x8) -> u8x16 { // unsafe { ::std::mem::transmute(s) } // } cfg_if! { if #[cfg(target_feature = "sse2")] { use simd::i16x8; use simd::i8x16; extern "platform-intrinsic" { fn x86_mm_movemask_epi8(x: i8x16) -> i32; } // Expose low-level mask instead of higher-level conclusion, // because the non-ASCII case would perform less well otherwise. #[inline(always)] pub fn mask_ascii(s: u8x16) -> i32 { unsafe { let signed: i8x16 = ::std::mem::transmute_copy(&s); x86_mm_movemask_epi8(signed) } } } else { } } cfg_if! { if #[cfg(target_feature = "sse2")] { #[inline(always)] pub fn simd_is_ascii(s: u8x16) -> bool { unsafe { let signed: i8x16 = ::std::mem::transmute_copy(&s); x86_mm_movemask_epi8(signed) == 0 } } } else if #[cfg(target_arch = "aarch64")]{ extern "platform-intrinsic" { fn aarch64_vmaxvq_u8(x: u8x16) -> u8; } #[inline(always)] pub fn simd_is_ascii(s: u8x16) -> bool { unsafe { aarch64_vmaxvq_u8(s) < 0x80 } } } else { #[inline(always)] pub fn simd_is_ascii(s: u8x16) -> bool { // This optimizes better on ARM than // the lt formulation. let highest_ascii = u8x16::splat(0x7F); !s.gt(highest_ascii).any() } } } cfg_if! { if #[cfg(target_feature = "sse2")] { #[inline(always)] pub fn simd_is_str_latin1(s: u8x16) -> bool { if simd_is_ascii(s) { return true; } let above_str_latin1 = u8x16::splat(0xC4); s.lt(above_str_latin1).all() } } else if #[cfg(target_arch = "aarch64")]{ #[inline(always)] pub fn simd_is_str_latin1(s: u8x16) -> bool { unsafe { aarch64_vmaxvq_u8(s) < 0xC4 } } } else { #[inline(always)] pub fn simd_is_str_latin1(s: u8x16) -> bool { let above_str_latin1 = u8x16::splat(0xC4); s.lt(above_str_latin1).all() } } } cfg_if! { if #[cfg(target_arch = "aarch64")]{ extern "platform-intrinsic" { fn aarch64_vmaxvq_u16(x: u16x8) -> u16; } #[inline(always)] pub fn simd_is_basic_latin(s: u16x8) -> bool { unsafe { aarch64_vmaxvq_u16(s) < 0x80 } } #[inline(always)] pub fn simd_is_latin1(s: u16x8) -> bool { unsafe { aarch64_vmaxvq_u16(s) < 0x100 } } } else { #[inline(always)] pub fn simd_is_basic_latin(s: u16x8) -> bool { let above_ascii = u16x8::splat(0x80); s.lt(above_ascii).all() } #[inline(always)] pub fn simd_is_latin1(s: u16x8) -> bool { // For some reason, on SSE2 this formulation // seems faster in this case while the above // function is better the other way round... let highest_latin1 = u16x8::splat(0xFF); !s.gt(highest_latin1).any() } } } #[inline(always)] pub fn contains_surrogates(s: u16x8) -> bool { let mask = u16x8::splat(0xF800); let surrogate_bits = u16x8::splat(0xD800); (s & mask).eq(surrogate_bits).any() } cfg_if! { if #[cfg(target_arch = "aarch64")]{ macro_rules! aarch64_return_false_if_below_hebrew { ($s:ident) => ({ unsafe { if aarch64_vmaxvq_u16($s) < 0x0590 { return false; } } }) } macro_rules! non_aarch64_return_false_if_all { ($s:ident) => () } } else { macro_rules! aarch64_return_false_if_below_hebrew { ($s:ident) => () } macro_rules! non_aarch64_return_false_if_all { ($s:ident) => ({ if $s.all() { return false; } }) } } } macro_rules! in_range16x8 { ($s:ident, $start:expr, $end:expr) => {{ // SIMD sub is wrapping ($s - u16x8::splat($start)).lt(u16x8::splat($end - $start)) }}; } #[inline(always)] pub fn is_u16x8_bidi(s: u16x8) -> bool { // We try to first quickly refute the RTLness of the vector. If that // fails, we do the real RTL check, so in that case we end up wasting // the work for the up-front quick checks. Even the quick-check is // two-fold in order to return `false` ASAP if everything is below // Hebrew. aarch64_return_false_if_below_hebrew!(s); let below_hebrew = s.lt(u16x8::splat(0x0590)); non_aarch64_return_false_if_all!(below_hebrew); if (below_hebrew | in_range16x8!(s, 0x0900, 0x200F) | in_range16x8!(s, 0x2068, 0xD802)).all() { return false; } // Quick refutation failed. Let's do the full check. (in_range16x8!(s, 0x0590, 0x0900) | in_range16x8!(s, 0xFB1D, 0xFE00) | in_range16x8!(s, 0xFE70, 0xFEFF) | in_range16x8!(s, 0xD802, 0xD804) | in_range16x8!(s, 0xD83A, 0xD83C) | s.eq(u16x8::splat(0x200F)) | s.eq(u16x8::splat(0x202B)) | s.eq(u16x8::splat(0x202E)) | s.eq(u16x8::splat(0x2067))) .any() } #[inline(always)] pub fn simd_unpack(s: u8x16) -> (u16x8, u16x8) { unsafe { let first: u8x16 = simd_shuffle16( s, u8x16::splat(0), [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23], ); let second: u8x16 = simd_shuffle16( s, u8x16::splat(0), [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31], ); ( ::std::mem::transmute_copy(&first), ::std::mem::transmute_copy(&second), ) } } cfg_if! { if #[cfg(target_feature = "sse2")] { extern "platform-intrinsic" { fn x86_mm_packus_epi16(x: i16x8, y: i16x8) -> u8x16; } #[inline(always)] pub fn simd_pack(a: u16x8, b: u16x8) -> u8x16 { unsafe { let first: i16x8 = ::std::mem::transmute_copy(&a); let second: i16x8 = ::std::mem::transmute_copy(&b); x86_mm_packus_epi16(first, second) } } } else { #[inline(always)] pub fn simd_pack(a: u16x8, b: u16x8) -> u8x16 { unsafe { let first: u8x16 = ::std::mem::transmute_copy(&a); let second: u8x16 = ::std::mem::transmute_copy(&b); simd_shuffle16( first, second, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30], ) } } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_unpack() { let ascii: [u8; 16] = [ 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, ]; let basic_latin: [u16; 16] = [ 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, ]; let simd = unsafe { load16_unaligned(ascii.as_ptr()) }; let mut vec = Vec::with_capacity(16); vec.resize(16, 0u16); let (first, second) = simd_unpack(simd); let ptr = vec.as_mut_ptr(); unsafe { store8_unaligned(ptr, first); store8_unaligned(ptr.offset(8), second); } assert_eq!(&vec[..], &basic_latin[..]); } #[test] fn test_simd_is_basic_latin_success() { let ascii: [u8; 16] = [ 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, ]; let basic_latin: [u16; 16] = [ 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, ]; let first = unsafe { load8_unaligned(basic_latin.as_ptr()) }; let second = unsafe { load8_unaligned(basic_latin.as_ptr().offset(8)) }; let mut vec = Vec::with_capacity(16); vec.resize(16, 0u8); let ptr = vec.as_mut_ptr(); assert!(simd_is_basic_latin(first | second)); unsafe { store16_unaligned(ptr, simd_pack(first, second)); } assert_eq!(&vec[..], &ascii[..]); } #[test] fn test_simd_is_basic_latin_c0() { let input: [u16; 16] = [ 0x61, 0x62, 0x63, 0x81, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, ]; let first = unsafe { load8_unaligned(input.as_ptr()) }; let second = unsafe { load8_unaligned(input.as_ptr().offset(8)) }; assert!(!simd_is_basic_latin(first | second)); } #[test] fn test_simd_is_basic_latin_0fff() { let input: [u16; 16] = [ 0x61, 0x62, 0x63, 0x0FFF, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, ]; let first = unsafe { load8_unaligned(input.as_ptr()) }; let second = unsafe { load8_unaligned(input.as_ptr().offset(8)) }; assert!(!simd_is_basic_latin(first | second)); } #[test] fn test_simd_is_basic_latin_ffff() { let input: [u16; 16] = [ 0x61, 0x62, 0x63, 0xFFFF, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, ]; let first = unsafe { load8_unaligned(input.as_ptr()) }; let second = unsafe { load8_unaligned(input.as_ptr().offset(8)) }; assert!(!simd_is_basic_latin(first | second)); } #[test] fn test_simd_is_ascii_success() { let ascii: [u8; 16] = [ 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, ]; let simd = unsafe { load16_unaligned(ascii.as_ptr()) }; assert!(simd_is_ascii(simd)); } #[test] fn test_simd_is_ascii_failure() { let input: [u8; 16] = [ 0x61, 0x62, 0x63, 0x64, 0x81, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, ]; let simd = unsafe { load16_unaligned(input.as_ptr()) }; assert!(!simd_is_ascii(simd)); } #[cfg(target_feature = "sse2")] #[test] fn test_check_ascii() { let input: [u8; 16] = [ 0x61, 0x62, 0x63, 0x64, 0x81, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, ]; let simd = unsafe { load16_unaligned(input.as_ptr()) }; let mask = mask_ascii(simd); assert_ne!(mask, 0); assert_eq!(mask.trailing_zeros(), 4); } #[test] fn test_alu() { let input: [u8; 16] = [ 0x61, 0x62, 0x63, 0x64, 0x81, 0x66, 0x67, 0x68, 0x69, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, ]; let mut alu = 0u64; unsafe { ::std::ptr::copy_nonoverlapping(input.as_ptr(), &mut alu as *mut u64 as *mut u8, 8); } let masked = alu & 0x8080808080808080; assert_eq!(masked.trailing_zeros(), 39); } }
29.791932
99
0.537129
d669668f77161e1675adc2bcc3570238b1ebac8a
1,881
pub trait IntoFlatZipIter<I> { fn flat_zip(self, other: I) -> FlatZipIter<I>; } pub struct FlatZipIter<I> { iters: Vec<I>, } impl<I: Iterator> IntoFlatZipIter<I> for I { fn flat_zip(self, other: I) -> FlatZipIter<I> { FlatZipIter { iters: vec![self, other], } } } impl<I: Iterator> IntoFlatZipIter<I> for FlatZipIter<I> { fn flat_zip(mut self, other: I) -> FlatZipIter<I> { self.iters.push(other); FlatZipIter { iters: self.iters, } } } impl<I: Iterator> Iterator for FlatZipIter<I> { type Item = Vec<<I as Iterator>::Item>; fn next(&mut self) -> Option<Self::Item> { match self .iters .iter_mut() .map(|i| i.next().map_or_else(|| Err("None encountered"), |x| Ok(x))) .collect::<Result<Self::Item, &str>>() { Err(_) => None, Ok(v) => Some(v), } } } #[cfg(test)] mod tests { use crate::iter::flat_zip::IntoFlatZipIter; #[test] fn test_flat_zip() { let arr1 = vec![1, 2, 3]; let arr2 = vec![4, 5, 6]; let arr3 = vec![7, 8, 9]; let expected_1 = vec![vec![&1, &4, &7], vec![&2, &5, &8], vec![&3, &6, &9]]; for (i1, i2) in arr1 .iter() .flat_zip(arr2.iter()) .flat_zip(arr3.iter()) .zip(expected_1.into_iter()) { assert_eq!(i1, i2); } let expected_2 = vec![vec![&1, &4, &7, &10], vec![&2, &5, &8, &11], vec![ &3, &6, &9, &12, ]]; let arr4 = vec![10, 11, 12, 13]; for (i1, i2) in arr1 .iter() .flat_zip(arr2.iter()) .flat_zip(arr3.iter()) .flat_zip(arr4.iter()) .zip(expected_2.into_iter()) { assert_eq!(i1, i2); } } }
24.75
84
0.466773
f8fb1d981b7dc7dcc014f7932a40ab57408d0fc8
24,254
//! Board file for Nucleo-F429ZI development board //! //! - <https://www.st.com/en/evaluation-tools/nucleo-f429zi.html> #![no_std] // Disable this attribute when documenting, as a workaround for // https://github.com/rust-lang/rust/issues/62184. #![cfg_attr(not(doc), no_main)] #![deny(missing_docs)] use capsules::virtual_alarm::VirtualMuxAlarm; use components::gpio::GpioComponent; use kernel::capabilities; use kernel::component::Component; use kernel::dynamic_deferred_call::{DynamicDeferredCall, DynamicDeferredCallClientState}; use kernel::hil::led::LedHigh; use kernel::platform::{KernelResources, SyscallDriverLookup}; use kernel::scheduler::round_robin::RoundRobinSched; use kernel::{create_capability, debug, static_init}; use stm32f429zi::gpio::{AlternateFunction, Mode, PinId, PortId}; use stm32f429zi::interrupt_service::Stm32f429ziDefaultPeripherals; /// Support routines for debugging I/O. pub mod io; // Number of concurrent processes this platform supports. const NUM_PROCS: usize = 4; const NUM_UPCALLS_IPC: usize = NUM_PROCS + 1; // Actual memory for holding the active process structures. static mut PROCESSES: [Option<&'static dyn kernel::process::Process>; NUM_PROCS] = [None, None, None, None]; static mut CHIP: Option<&'static stm32f429zi::chip::Stm32f4xx<Stm32f429ziDefaultPeripherals>> = None; // How should the kernel respond when a process faults. const FAULT_RESPONSE: kernel::process::PanicFaultPolicy = kernel::process::PanicFaultPolicy {}; /// Dummy buffer that causes the linker to reserve enough space for the stack. #[no_mangle] #[link_section = ".stack_buffer"] pub static mut STACK_MEMORY: [u8; 0x2000] = [0; 0x2000]; /// A structure representing this platform that holds references to all /// capsules for this platform. struct NucleoF429ZI { console: &'static capsules::console::Console<'static>, ipc: kernel::ipc::IPC<NUM_PROCS, NUM_UPCALLS_IPC>, led: &'static capsules::led::LedDriver< 'static, LedHigh<'static, stm32f429zi::gpio::Pin<'static>>, >, button: &'static capsules::button::Button<'static, stm32f429zi::gpio::Pin<'static>>, adc: &'static capsules::adc::AdcVirtualized<'static>, alarm: &'static capsules::alarm::AlarmDriver< 'static, VirtualMuxAlarm<'static, stm32f429zi::tim2::Tim2<'static>>, >, temperature: &'static capsules::temperature::TemperatureSensor<'static>, gpio: &'static capsules::gpio::GPIO<'static, stm32f429zi::gpio::Pin<'static>>, scheduler: &'static RoundRobinSched<'static>, systick: cortexm4::systick::SysTick, } /// Mapping of integer syscalls to objects that implement syscalls. impl SyscallDriverLookup for NucleoF429ZI { fn with_driver<F, R>(&self, driver_num: usize, f: F) -> R where F: FnOnce(Option<&dyn kernel::syscall::SyscallDriver>) -> R, { match driver_num { capsules::console::DRIVER_NUM => f(Some(self.console)), capsules::led::DRIVER_NUM => f(Some(self.led)), capsules::button::DRIVER_NUM => f(Some(self.button)), capsules::adc::DRIVER_NUM => f(Some(self.adc)), capsules::alarm::DRIVER_NUM => f(Some(self.alarm)), capsules::temperature::DRIVER_NUM => f(Some(self.temperature)), kernel::ipc::DRIVER_NUM => f(Some(&self.ipc)), capsules::gpio::DRIVER_NUM => f(Some(self.gpio)), _ => f(None), } } } impl KernelResources< stm32f429zi::chip::Stm32f4xx< 'static, stm32f429zi::interrupt_service::Stm32f429ziDefaultPeripherals<'static>, >, > for NucleoF429ZI { type SyscallDriverLookup = Self; type SyscallFilter = (); type ProcessFault = (); type Scheduler = RoundRobinSched<'static>; type SchedulerTimer = cortexm4::systick::SysTick; type WatchDog = (); type ContextSwitchCallback = (); fn syscall_driver_lookup(&self) -> &Self::SyscallDriverLookup { &self } fn syscall_filter(&self) -> &Self::SyscallFilter { &() } fn process_fault(&self) -> &Self::ProcessFault { &() } fn scheduler(&self) -> &Self::Scheduler { self.scheduler } fn scheduler_timer(&self) -> &Self::SchedulerTimer { &self.systick } fn watchdog(&self) -> &Self::WatchDog { &() } fn context_switch_callback(&self) -> &Self::ContextSwitchCallback { &() } } /// Helper function called during bring-up that configures DMA. unsafe fn setup_dma( dma: &stm32f429zi::dma1::Dma1, dma_streams: &'static [stm32f429zi::dma1::Stream; 8], usart3: &'static stm32f429zi::usart::Usart, ) { use stm32f429zi::dma1::Dma1Peripheral; use stm32f429zi::usart; dma.enable_clock(); let usart3_tx_stream = &dma_streams[Dma1Peripheral::USART3_TX.get_stream_idx()]; let usart3_rx_stream = &dma_streams[Dma1Peripheral::USART3_RX.get_stream_idx()]; usart3.set_dma( usart::TxDMA(usart3_tx_stream), usart::RxDMA(usart3_rx_stream), ); usart3_tx_stream.set_client(usart3); usart3_rx_stream.set_client(usart3); usart3_tx_stream.setup(Dma1Peripheral::USART3_TX); usart3_rx_stream.setup(Dma1Peripheral::USART3_RX); cortexm4::nvic::Nvic::new(Dma1Peripheral::USART3_TX.get_stream_irqn()).enable(); cortexm4::nvic::Nvic::new(Dma1Peripheral::USART3_RX.get_stream_irqn()).enable(); } /// Helper function called during bring-up that configures multiplexed I/O. unsafe fn set_pin_primary_functions( syscfg: &stm32f429zi::syscfg::Syscfg, gpio_ports: &'static stm32f429zi::gpio::GpioPorts<'static>, ) { use kernel::hil::gpio::Configure; syscfg.enable_clock(); gpio_ports.get_port_from_port_id(PortId::B).enable_clock(); // User LD2 is connected to PB07. Configure PB07 as `debug_gpio!(0, ...)` gpio_ports.get_pin(PinId::PB07).map(|pin| { pin.make_output(); // Configure kernel debug gpios as early as possible kernel::debug::assign_gpios(Some(pin), None, None); }); gpio_ports.get_port_from_port_id(PortId::D).enable_clock(); // pd8 and pd9 (USART3) is connected to ST-LINK virtual COM port gpio_ports.get_pin(PinId::PD08).map(|pin| { pin.set_mode(Mode::AlternateFunctionMode); // AF7 is USART2_TX pin.set_alternate_function(AlternateFunction::AF7); }); gpio_ports.get_pin(PinId::PD09).map(|pin| { pin.set_mode(Mode::AlternateFunctionMode); // AF7 is USART2_RX pin.set_alternate_function(AlternateFunction::AF7); }); gpio_ports.get_port_from_port_id(PortId::C).enable_clock(); // button is connected on pc13 gpio_ports.get_pin(PinId::PC13).map(|pin| { pin.enable_interrupt(); }); // set interrupt for pin D0 gpio_ports.get_pin(PinId::PG09).map(|pin| { pin.enable_interrupt(); }); // Enable clocks for GPIO Ports // Disable some of them if you don't need some of the GPIOs gpio_ports.get_port_from_port_id(PortId::A).enable_clock(); // Ports B, C and D are already enabled gpio_ports.get_port_from_port_id(PortId::E).enable_clock(); gpio_ports.get_port_from_port_id(PortId::F).enable_clock(); gpio_ports.get_port_from_port_id(PortId::G).enable_clock(); gpio_ports.get_port_from_port_id(PortId::H).enable_clock(); // Arduino A0 gpio_ports.get_pin(PinId::PA03).map(|pin| { pin.set_mode(stm32f429zi::gpio::Mode::AnalogMode); }); // Arduino A1 gpio_ports.get_pin(PinId::PC00).map(|pin| { pin.set_mode(stm32f429zi::gpio::Mode::AnalogMode); }); // Arduino A2 gpio_ports.get_pin(PinId::PC03).map(|pin| { pin.set_mode(stm32f429zi::gpio::Mode::AnalogMode); }); // Arduino A3 gpio_ports.get_pin(PinId::PF03).map(|pin| { pin.set_mode(stm32f429zi::gpio::Mode::AnalogMode); }); // Arduino A4 gpio_ports.get_pin(PinId::PF05).map(|pin| { pin.set_mode(stm32f429zi::gpio::Mode::AnalogMode); }); // Arduino A5 gpio_ports.get_pin(PinId::PF10).map(|pin| { pin.set_mode(stm32f429zi::gpio::Mode::AnalogMode); }); } /// Helper function for miscellaneous peripheral functions unsafe fn setup_peripherals(tim2: &stm32f429zi::tim2::Tim2) { // USART3 IRQn is 39 cortexm4::nvic::Nvic::new(stm32f429zi::nvic::USART3).enable(); // TIM2 IRQn is 28 tim2.enable_clock(); tim2.start(); cortexm4::nvic::Nvic::new(stm32f429zi::nvic::TIM2).enable(); } /// Statically initialize the core peripherals for the chip. /// /// This is in a separate, inline(never) function so that its stack frame is /// removed when this function returns. Otherwise, the stack space used for /// these static_inits is wasted. #[inline(never)] unsafe fn get_peripherals() -> ( &'static mut Stm32f429ziDefaultPeripherals<'static>, &'static stm32f429zi::syscfg::Syscfg<'static>, &'static stm32f429zi::dma1::Dma1<'static>, ) { // We use the default HSI 16Mhz clock let rcc = static_init!(stm32f429zi::rcc::Rcc, stm32f429zi::rcc::Rcc::new()); let syscfg = static_init!( stm32f429zi::syscfg::Syscfg, stm32f429zi::syscfg::Syscfg::new(rcc) ); let exti = static_init!( stm32f429zi::exti::Exti, stm32f429zi::exti::Exti::new(syscfg) ); let dma1 = static_init!(stm32f429zi::dma1::Dma1, stm32f429zi::dma1::Dma1::new(rcc)); let peripherals = static_init!( Stm32f429ziDefaultPeripherals, Stm32f429ziDefaultPeripherals::new(rcc, exti, dma1) ); (peripherals, syscfg, dma1) } /// Main function. /// /// This is called after RAM initialization is complete. #[no_mangle] pub unsafe fn main() { stm32f429zi::init(); let (peripherals, syscfg, dma1) = get_peripherals(); peripherals.init(); let base_peripherals = &peripherals.stm32f4; setup_peripherals(&base_peripherals.tim2); set_pin_primary_functions(syscfg, &base_peripherals.gpio_ports); setup_dma( dma1, &base_peripherals.dma_streams, &base_peripherals.usart3, ); let board_kernel = static_init!(kernel::Kernel, kernel::Kernel::new(&PROCESSES)); let dynamic_deferred_call_clients = static_init!([DynamicDeferredCallClientState; 2], Default::default()); let dynamic_deferred_caller = static_init!( DynamicDeferredCall, DynamicDeferredCall::new(dynamic_deferred_call_clients) ); DynamicDeferredCall::set_global_instance(dynamic_deferred_caller); let chip = static_init!( stm32f429zi::chip::Stm32f4xx<Stm32f429ziDefaultPeripherals>, stm32f429zi::chip::Stm32f4xx::new(peripherals) ); CHIP = Some(chip); // UART // Create a shared UART channel for kernel debug. base_peripherals.usart3.enable_clock(); let uart_mux = components::console::UartMuxComponent::new( &base_peripherals.usart3, 115200, dynamic_deferred_caller, ) .finalize(()); io::WRITER.set_initialized(); // Create capabilities that the board needs to call certain protected kernel // functions. let memory_allocation_capability = create_capability!(capabilities::MemoryAllocationCapability); let main_loop_capability = create_capability!(capabilities::MainLoopCapability); let process_management_capability = create_capability!(capabilities::ProcessManagementCapability); // Setup the console. let console = components::console::ConsoleComponent::new( board_kernel, capsules::console::DRIVER_NUM, uart_mux, ) .finalize(()); // Create the debugger object that handles calls to `debug!()`. components::debug_writer::DebugWriterComponent::new(uart_mux).finalize(()); // LEDs // Clock to Port A is enabled in `set_pin_primary_functions()` let gpio_ports = &base_peripherals.gpio_ports; let led = components::led::LedsComponent::new(components::led_component_helper!( LedHigh<'static, stm32f429zi::gpio::Pin>, LedHigh::new(gpio_ports.get_pin(stm32f429zi::gpio::PinId::PB00).unwrap()), LedHigh::new(gpio_ports.get_pin(stm32f429zi::gpio::PinId::PB07).unwrap()), LedHigh::new(gpio_ports.get_pin(stm32f429zi::gpio::PinId::PB14).unwrap()), )) .finalize(components::led_component_buf!( LedHigh<'static, stm32f429zi::gpio::Pin> )); // BUTTONs let button = components::button::ButtonComponent::new( board_kernel, capsules::button::DRIVER_NUM, components::button_component_helper!( stm32f429zi::gpio::Pin, ( gpio_ports.get_pin(stm32f429zi::gpio::PinId::PC13).unwrap(), kernel::hil::gpio::ActivationMode::ActiveHigh, kernel::hil::gpio::FloatingState::PullNone ) ), ) .finalize(components::button_component_buf!(stm32f429zi::gpio::Pin)); // ALARM let tim2 = &base_peripherals.tim2; let mux_alarm = components::alarm::AlarmMuxComponent::new(tim2).finalize( components::alarm_mux_component_helper!(stm32f429zi::tim2::Tim2), ); let alarm = components::alarm::AlarmDriverComponent::new( board_kernel, capsules::alarm::DRIVER_NUM, mux_alarm, ) .finalize(components::alarm_component_helper!(stm32f429zi::tim2::Tim2)); // GPIO let gpio = GpioComponent::new( board_kernel, capsules::gpio::DRIVER_NUM, components::gpio_component_helper!( stm32f429zi::gpio::Pin, // Arduino like RX/TX 0 => gpio_ports.get_pin(PinId::PG09).unwrap(), //D0 1 => gpio_ports.pins[6][14].as_ref().unwrap(), //D1 2 => gpio_ports.pins[5][15].as_ref().unwrap(), //D2 3 => gpio_ports.pins[4][13].as_ref().unwrap(), //D3 4 => gpio_ports.pins[5][14].as_ref().unwrap(), //D4 5 => gpio_ports.pins[4][11].as_ref().unwrap(), //D5 6 => gpio_ports.pins[4][9].as_ref().unwrap(), //D6 7 => gpio_ports.pins[5][13].as_ref().unwrap(), //D7 8 => gpio_ports.pins[5][12].as_ref().unwrap(), //D8 9 => gpio_ports.pins[3][15].as_ref().unwrap(), //D9 // SPI Pins 10 => gpio_ports.pins[3][14].as_ref().unwrap(), //D10 11 => gpio_ports.pins[0][7].as_ref().unwrap(), //D11 12 => gpio_ports.pins[0][6].as_ref().unwrap(), //D12 13 => gpio_ports.pins[0][5].as_ref().unwrap(), //D13 // I2C Pins 14 => gpio_ports.pins[1][9].as_ref().unwrap(), //D14 15 => gpio_ports.pins[1][8].as_ref().unwrap(), //D15 16 => gpio_ports.pins[2][6].as_ref().unwrap(), //D16 17 => gpio_ports.pins[1][15].as_ref().unwrap(), //D17 18 => gpio_ports.pins[1][13].as_ref().unwrap(), //D18 19 => gpio_ports.pins[1][12].as_ref().unwrap(), //D19 20 => gpio_ports.pins[0][15].as_ref().unwrap(), //D20 21 => gpio_ports.pins[2][7].as_ref().unwrap(), //D21 // SPI B Pins // 22 => gpio_ports.pins[1][5].as_ref().unwrap(), //D22 // 23 => gpio_ports.pins[1][3].as_ref().unwrap(), //D23 // 24 => gpio_ports.pins[0][4].as_ref().unwrap(), //D24 // 24 => gpio_ports.pins[1][4].as_ref().unwrap(), //D25 // QSPI 26 => gpio_ports.pins[1][6].as_ref().unwrap(), //D26 27 => gpio_ports.pins[1][2].as_ref().unwrap(), //D27 28 => gpio_ports.pins[3][13].as_ref().unwrap(), //D28 29 => gpio_ports.pins[3][12].as_ref().unwrap(), //D29 30 => gpio_ports.pins[3][11].as_ref().unwrap(), //D30 31 => gpio_ports.pins[4][2].as_ref().unwrap(), //D31 // Timer Pins 32 => gpio_ports.pins[0][0].as_ref().unwrap(), //D32 33 => gpio_ports.pins[1][0].as_ref().unwrap(), //D33 34 => gpio_ports.pins[4][0].as_ref().unwrap(), //D34 35 => gpio_ports.pins[1][11].as_ref().unwrap(), //D35 36 => gpio_ports.pins[1][10].as_ref().unwrap(), //D36 37 => gpio_ports.pins[4][15].as_ref().unwrap(), //D37 38 => gpio_ports.pins[4][14].as_ref().unwrap(), //D38 39 => gpio_ports.pins[4][12].as_ref().unwrap(), //D39 40 => gpio_ports.pins[4][10].as_ref().unwrap(), //D40 41 => gpio_ports.pins[4][7].as_ref().unwrap(), //D41 42 => gpio_ports.pins[4][8].as_ref().unwrap(), //D42 // SDMMC 43 => gpio_ports.pins[2][8].as_ref().unwrap(), //D43 44 => gpio_ports.pins[2][9].as_ref().unwrap(), //D44 45 => gpio_ports.pins[2][10].as_ref().unwrap(), //D45 46 => gpio_ports.pins[2][11].as_ref().unwrap(), //D46 47 => gpio_ports.pins[2][12].as_ref().unwrap(), //D47 48 => gpio_ports.pins[3][2].as_ref().unwrap(), //D48 49 => gpio_ports.pins[6][2].as_ref().unwrap(), //D49 50 => gpio_ports.pins[6][3].as_ref().unwrap(), //D50 // USART 51 => gpio_ports.pins[3][7].as_ref().unwrap(), //D51 52 => gpio_ports.pins[3][6].as_ref().unwrap(), //D52 53 => gpio_ports.pins[3][5].as_ref().unwrap(), //D53 54 => gpio_ports.pins[3][4].as_ref().unwrap(), //D54 55 => gpio_ports.pins[3][3].as_ref().unwrap(), //D55 56 => gpio_ports.pins[4][2].as_ref().unwrap(), //D56 57 => gpio_ports.pins[4][4].as_ref().unwrap(), //D57 58 => gpio_ports.pins[4][5].as_ref().unwrap(), //D58 59 => gpio_ports.pins[4][6].as_ref().unwrap(), //D59 60 => gpio_ports.pins[4][3].as_ref().unwrap(), //D60 61 => gpio_ports.pins[5][8].as_ref().unwrap(), //D61 62 => gpio_ports.pins[5][7].as_ref().unwrap(), //D62 63 => gpio_ports.pins[5][9].as_ref().unwrap(), //D63 64 => gpio_ports.pins[6][1].as_ref().unwrap(), //D64 65 => gpio_ports.pins[6][0].as_ref().unwrap(), //D65 66 => gpio_ports.pins[3][1].as_ref().unwrap(), //D66 67 => gpio_ports.pins[3][0].as_ref().unwrap(), //D67 68 => gpio_ports.pins[5][0].as_ref().unwrap(), //D68 69 => gpio_ports.pins[5][1].as_ref().unwrap(), //D69 70 => gpio_ports.pins[5][2].as_ref().unwrap(), //D70 71 => gpio_ports.pins[0][7].as_ref().unwrap() //D71 // ADC Pins // Enable the to use the ADC pins as GPIO // 72 => gpio_ports.pins[0][3].as_ref().unwrap(), //A0 // 73 => gpio_ports.pins[2][0].as_ref().unwrap(), //A1 // 74 => gpio_ports.pins[2][3].as_ref().unwrap(), //A2 // 75 => gpio_ports.pins[5][3].as_ref().unwrap(), //A3 // 76 => gpio_ports.pins[5][5].as_ref().unwrap(), //A4 // 77 => gpio_ports.pins[5][10].as_ref().unwrap(), //A5 // 78 => gpio_ports.pins[1][1].as_ref().unwrap(), //A6 // 79 => gpio_ports.pins[2][2].as_ref().unwrap(), //A7 // 80 => gpio_ports.pins[5][4].as_ref().unwrap() //A8 ), ) .finalize(components::gpio_component_buf!(stm32f429zi::gpio::Pin)); // ADC let adc_mux = components::adc::AdcMuxComponent::new(&base_peripherals.adc1) .finalize(components::adc_mux_component_helper!(stm32f429zi::adc::Adc)); let temp_sensor = components::temperature_stm::TemperatureSTMComponent::new(2.5, 0.76) .finalize(components::temperaturestm_adc_component_helper!( // spi type stm32f429zi::adc::Adc, // chip select stm32f429zi::adc::Channel::Channel18, // spi mux adc_mux )); let grant_cap = create_capability!(capabilities::MemoryAllocationCapability); let grant_temperature = board_kernel.create_grant(capsules::temperature::DRIVER_NUM, &grant_cap); let temp = static_init!( capsules::temperature::TemperatureSensor<'static>, capsules::temperature::TemperatureSensor::new(temp_sensor, grant_temperature) ); kernel::hil::sensors::TemperatureDriver::set_client(temp_sensor, temp); let adc_channel_0 = components::adc::AdcComponent::new(&adc_mux, stm32f429zi::adc::Channel::Channel3) .finalize(components::adc_component_helper!(stm32f429zi::adc::Adc)); let adc_channel_1 = components::adc::AdcComponent::new(&adc_mux, stm32f429zi::adc::Channel::Channel10) .finalize(components::adc_component_helper!(stm32f429zi::adc::Adc)); let adc_channel_2 = components::adc::AdcComponent::new(&adc_mux, stm32f429zi::adc::Channel::Channel13) .finalize(components::adc_component_helper!(stm32f429zi::adc::Adc)); let adc_channel_3 = components::adc::AdcComponent::new(&adc_mux, stm32f429zi::adc::Channel::Channel9) .finalize(components::adc_component_helper!(stm32f429zi::adc::Adc)); let adc_channel_4 = components::adc::AdcComponent::new(&adc_mux, stm32f429zi::adc::Channel::Channel15) .finalize(components::adc_component_helper!(stm32f429zi::adc::Adc)); let adc_channel_5 = components::adc::AdcComponent::new(&adc_mux, stm32f429zi::adc::Channel::Channel8) .finalize(components::adc_component_helper!(stm32f429zi::adc::Adc)); let adc_syscall = components::adc::AdcVirtualComponent::new(board_kernel, capsules::adc::DRIVER_NUM) .finalize(components::adc_syscall_component_helper!( adc_channel_0, adc_channel_1, adc_channel_2, adc_channel_3, adc_channel_4, adc_channel_5 )); // PROCESS CONSOLE let process_console = components::process_console::ProcessConsoleComponent::new( board_kernel, uart_mux, mux_alarm, ) .finalize(components::process_console_component_helper!( stm32f429zi::tim2::Tim2 )); let _ = process_console.start(); let scheduler = components::sched::round_robin::RoundRobinComponent::new(&PROCESSES) .finalize(components::rr_component_helper!(NUM_PROCS)); let nucleo_f429zi = NucleoF429ZI { console: console, ipc: kernel::ipc::IPC::new( board_kernel, kernel::ipc::DRIVER_NUM, &memory_allocation_capability, ), adc: adc_syscall, led: led, temperature: temp, button: button, alarm: alarm, gpio: gpio, scheduler, systick: cortexm4::systick::SysTick::new(), }; // // Optional kernel tests // // // // See comment in `boards/imix/src/main.rs` // virtual_uart_rx_test::run_virtual_uart_receive(mux_uart); debug!("Initialization complete. Entering main loop"); /// These symbols are defined in the linker script. extern "C" { /// Beginning of the ROM region containing app images. static _sapps: u8; /// End of the ROM region containing app images. static _eapps: u8; /// Beginning of the RAM region for app memory. static mut _sappmem: u8; /// End of the RAM region for app memory. static _eappmem: u8; } kernel::process::load_processes( board_kernel, chip, core::slice::from_raw_parts( &_sapps as *const u8, &_eapps as *const u8 as usize - &_sapps as *const u8 as usize, ), core::slice::from_raw_parts_mut( &mut _sappmem as *mut u8, &_eappmem as *const u8 as usize - &_sappmem as *const u8 as usize, ), &mut PROCESSES, &FAULT_RESPONSE, &process_management_capability, ) .unwrap_or_else(|err| { debug!("Error loading processes!"); debug!("{:?}", err); }); //Uncomment to run multi alarm test /*components::test::multi_alarm_test::MultiAlarmTestComponent::new(mux_alarm) .finalize(components::multi_alarm_test_component_buf!(stm32f429zi::tim2::Tim2)) .run();*/ board_kernel.kernel_loop( &nucleo_f429zi, chip, Some(&nucleo_f429zi.ipc), &main_loop_capability, ); }
38.195276
100
0.624845
2f20ecf4550fa2320be48dd87c357d3e310f3a52
13,763
//! Anchor ⚓ is a framework for Solana's Sealevel runtime providing several //! convenient developer tools. //! //! - Rust eDSL for writing safe, secure, and high level Solana programs //! - [IDL](https://en.wikipedia.org/wiki/Interface_description_language) specification //! - TypeScript package for generating clients from IDL //! - CLI and workspace management for developing complete applications //! //! If you're familiar with developing in Ethereum's //! [Solidity](https://docs.soliditylang.org/en/v0.7.4/), //! [Truffle](https://www.trufflesuite.com/), //! [web3.js](https://github.com/ethereum/web3.js) or Parity's //! [Ink!](https://github.com/paritytech/ink), then the experience will be //! familiar. Although the syntax and semantics are targeted at Solana, the high //! level workflow of writing RPC request handlers, emitting an IDL, and //! generating clients from IDL is the same. //! //! For detailed tutorials and examples on how to use Anchor, see the guided //! [tutorials](https://project-serum.github.io/anchor) or examples in the GitHub //! [repository](https://github.com/project-serum/anchor). //! //! Presented here are the Rust primitives for building on Solana. extern crate self as anchor_lang; use bytemuck::{Pod, Zeroable}; use solana_program::account_info::AccountInfo; use solana_program::entrypoint::ProgramResult; use solana_program::instruction::AccountMeta; use solana_program::program_error::ProgramError; use solana_program::pubkey::Pubkey; use std::io::Write; mod account_meta; mod accounts; mod bpf_upgradeable_state; mod common; mod context; mod ctor; mod error; #[doc(hidden)] pub mod idl; mod system_program; pub use crate::accounts::account::Account; #[doc(hidden)] #[allow(deprecated)] pub use crate::accounts::cpi_account::CpiAccount; #[doc(hidden)] #[allow(deprecated)] pub use crate::accounts::cpi_state::CpiState; #[allow(deprecated)] pub use crate::accounts::loader::Loader; pub use crate::accounts::loader_account::AccountLoader; pub use crate::accounts::program::Program; #[doc(hidden)] #[allow(deprecated)] pub use crate::accounts::program_account::ProgramAccount; pub use crate::accounts::signer::Signer; #[doc(hidden)] #[allow(deprecated)] pub use crate::accounts::state::ProgramState; pub use crate::accounts::system_account::SystemAccount; pub use crate::accounts::sysvar::Sysvar; pub use crate::accounts::unchecked_account::UncheckedAccount; pub use crate::system_program::System; mod vec; pub use crate::bpf_upgradeable_state::*; #[doc(hidden)] #[allow(deprecated)] pub use crate::context::CpiStateContext; pub use crate::context::{Context, CpiContext}; pub use anchor_attribute_access_control::access_control; pub use anchor_attribute_account::{account, declare_id, zero_copy}; pub use anchor_attribute_constant::constant; pub use anchor_attribute_error::error; pub use anchor_attribute_event::{emit, event}; pub use anchor_attribute_interface::interface; pub use anchor_attribute_program::program; pub use anchor_attribute_state::state; pub use anchor_derive_accounts::Accounts; /// Borsh is the default serialization format for instructions and accounts. pub use borsh::{BorshDeserialize as AnchorDeserialize, BorshSerialize as AnchorSerialize}; pub use solana_program; /// A data structure of validated accounts that can be deserialized from the /// input to a Solana program. Implementations of this trait should perform any /// and all requisite constraint checks on accounts to ensure the accounts /// maintain any invariants required for the program to run securely. In most /// cases, it's recommended to use the [`Accounts`](./derive.Accounts.html) /// derive macro to implement this trait. pub trait Accounts<'info>: ToAccountMetas + ToAccountInfos<'info> + Sized { /// Returns the validated accounts struct. What constitutes "valid" is /// program dependent. However, users of these types should never have to /// worry about account substitution attacks. For example, if a program /// expects a `Mint` account from the SPL token program in a particular /// field, then it should be impossible for this method to return `Ok` if /// any other account type is given--from the SPL token program or elsewhere. /// /// `program_id` is the currently executing program. `accounts` is the /// set of accounts to construct the type from. For every account used, /// the implementation should mutate the slice, consuming the used entry /// so that it cannot be used again. fn try_accounts( program_id: &Pubkey, accounts: &mut &[AccountInfo<'info>], ix_data: &[u8], ) -> Result<Self, ProgramError>; } /// The exit procedure for an account. Any cleanup or persistence to storage /// should be done here. pub trait AccountsExit<'info>: ToAccountMetas + ToAccountInfos<'info> { /// `program_id` is the currently executing program. fn exit(&self, program_id: &Pubkey) -> ProgramResult; } /// The close procedure to initiate garabage collection of an account, allowing /// one to retrieve the rent exemption. pub trait AccountsClose<'info>: ToAccountInfos<'info> { fn close(&self, sol_destination: AccountInfo<'info>) -> ProgramResult; } /// Transformation to /// [`AccountMeta`](../solana_program/instruction/struct.AccountMeta.html) /// structs. pub trait ToAccountMetas { /// `is_signer` is given as an optional override for the signer meta field. /// This covers the edge case when a program-derived-address needs to relay /// a transaction from a client to another program but sign the transaction /// before the relay. The client cannot mark the field as a signer, and so /// we have to override the is_signer meta field given by the client. fn to_account_metas(&self, is_signer: Option<bool>) -> Vec<AccountMeta>; } /// Transformation to /// [`AccountInfo`](../solana_program/account_info/struct.AccountInfo.html) /// structs. pub trait ToAccountInfos<'info> { fn to_account_infos(&self) -> Vec<AccountInfo<'info>>; } /// Transformation to an `AccountInfo` struct. pub trait ToAccountInfo<'info> { fn to_account_info(&self) -> AccountInfo<'info>; } /// A data structure that can be serialized and stored into account storage, /// i.e. an /// [`AccountInfo`](../solana_program/account_info/struct.AccountInfo.html#structfield.data)'s /// mutable data slice. /// /// Implementors of this trait should ensure that any subsequent usage of the /// `AccountDeserialize` trait succeeds if and only if the account is of the /// correct type. /// /// In most cases, one can use the default implementation provided by the /// [`#[account]`](./attr.account.html) attribute. pub trait AccountSerialize { /// Serializes the account data into `writer`. fn try_serialize<W: Write>(&self, writer: &mut W) -> Result<(), ProgramError>; } /// A data structure that can be deserialized and stored into account storage, /// i.e. an /// [`AccountInfo`](../solana_program/account_info/struct.AccountInfo.html#structfield.data)'s /// mutable data slice. pub trait AccountDeserialize: Sized { /// Deserializes previously initialized account data. Should fail for all /// uninitialized accounts, where the bytes are zeroed. Implementations /// should be unique to a particular account type so that one can never /// successfully deserialize the data of one account type into another. /// For example, if the SPL token program were to implement this trait, /// it should be impossible to deserialize a `Mint` account into a token /// `Account`. fn try_deserialize(buf: &mut &[u8]) -> Result<Self, ProgramError>; /// Deserializes account data without checking the account discriminator. /// This should only be used on account initialization, when the bytes of /// the account are zeroed. fn try_deserialize_unchecked(buf: &mut &[u8]) -> Result<Self, ProgramError>; } /// An account data structure capable of zero copy deserialization. pub trait ZeroCopy: Discriminator + Copy + Clone + Zeroable + Pod {} /// Calculates the data for an instruction invocation, where the data is /// `Sha256(<namespace>::<method_name>)[..8] || BorshSerialize(args)`. /// `args` is a borsh serialized struct of named fields for each argument given /// to an instruction. pub trait InstructionData: AnchorSerialize { fn data(&self) -> Vec<u8>; } /// An event that can be emitted via a Solana log. pub trait Event: AnchorSerialize + AnchorDeserialize + Discriminator { fn data(&self) -> Vec<u8>; } // The serialized event data to be emitted via a Solana log. // TODO: remove this on the next major version upgrade. #[doc(hidden)] #[deprecated(since = "0.4.2", note = "Please use Event instead")] pub trait EventData: AnchorSerialize + Discriminator { fn data(&self) -> Vec<u8>; } /// 8 byte unique identifier for a type. pub trait Discriminator { fn discriminator() -> [u8; 8]; } /// Bump seed for program derived addresses. pub trait Bump { fn seed(&self) -> u8; } /// Defines an address expected to own an account. pub trait Owner { fn owner() -> Pubkey; } /// Defines the id of a program. pub trait Id { fn id() -> Pubkey; } /// Defines the Pubkey of an account. pub trait Key { fn key(&self) -> Pubkey; } impl Key for Pubkey { fn key(&self) -> Pubkey { *self } } impl ToAccountMetas for Pubkey { fn to_account_metas(&self, is_signer: Option<bool>) -> Vec<AccountMeta> { vec![ anchor_lang::solana_program::instruction::AccountMeta::new_readonly( self.clone(), is_signer.unwrap_or(false), ), ] } } /// The prelude contains all commonly used components of the crate. /// All programs should include it via `anchor_lang::prelude::*;`. pub mod prelude { pub use super::{ access_control, account, constant, declare_id, emit, error, event, interface, program, require, solana_program::bpf_loader_upgradeable::UpgradeableLoaderState, state, zero_copy, Account, AccountDeserialize, AccountLoader, AccountSerialize, Accounts, AccountsExit, AnchorDeserialize, AnchorSerialize, Context, CpiContext, Id, Key, Owner, Program, ProgramData, Signer, System, SystemAccount, Sysvar, ToAccountInfo, ToAccountInfos, ToAccountMetas, UncheckedAccount, }; #[allow(deprecated)] pub use super::{ accounts::cpi_account::CpiAccount, accounts::cpi_state::CpiState, accounts::loader::Loader, accounts::program_account::ProgramAccount, accounts::state::ProgramState, CpiStateContext, }; pub use borsh; pub use solana_program::account_info::{next_account_info, AccountInfo}; pub use solana_program::entrypoint::ProgramResult; pub use solana_program::instruction::AccountMeta; pub use solana_program::msg; pub use solana_program::program_error::ProgramError; pub use solana_program::pubkey::Pubkey; pub use solana_program::sysvar::clock::Clock; pub use solana_program::sysvar::epoch_schedule::EpochSchedule; pub use solana_program::sysvar::fees::Fees; pub use solana_program::sysvar::instructions::Instructions; pub use solana_program::sysvar::recent_blockhashes::RecentBlockhashes; pub use solana_program::sysvar::rent::Rent; pub use solana_program::sysvar::rewards::Rewards; pub use solana_program::sysvar::slot_hashes::SlotHashes; pub use solana_program::sysvar::slot_history::SlotHistory; pub use solana_program::sysvar::stake_history::StakeHistory; pub use solana_program::sysvar::Sysvar as SolanaSysvar; pub use thiserror; } // Internal module used by macros and unstable apis. #[doc(hidden)] pub mod __private { use solana_program::program_error::ProgramError; use solana_program::pubkey::Pubkey; pub use crate::ctor::Ctor; pub use crate::error::{Error, ErrorCode}; pub use anchor_attribute_account::ZeroCopyAccessor; pub use anchor_attribute_event::EventIndex; pub use base64; pub use bytemuck; pub mod state { pub use crate::accounts::state::*; } // The starting point for user defined error codes. pub const ERROR_CODE_OFFSET: u32 = 6000; // Calculates the size of an account, which may be larger than the deserialized // data in it. This trait is currently only used for `#[state]` accounts. #[doc(hidden)] pub trait AccountSize { fn size(&self) -> Result<u64, ProgramError>; } // Very experimental trait. pub trait ZeroCopyAccessor<Ty> { fn get(&self) -> Ty; fn set(input: &Ty) -> Self; } impl ZeroCopyAccessor<Pubkey> for [u8; 32] { fn get(&self) -> Pubkey { Pubkey::new(self) } fn set(input: &Pubkey) -> [u8; 32] { input.to_bytes() } } pub use crate::accounts::state::PROGRAM_STATE_SEED; pub const CLOSED_ACCOUNT_DISCRIMINATOR: [u8; 8] = [255, 255, 255, 255, 255, 255, 255, 255]; } /// Ensures a condition is true, otherwise returns the given error. /// Use this with a custom error type. /// /// # Example /// /// After defining an `ErrorCode` /// /// ```ignore /// #[error] /// pub struct ErrorCode { /// InvalidArgument, /// } /// ``` /// /// One can write a `require` assertion as /// /// ```ignore /// require!(condition, InvalidArgument); /// ``` /// /// which would exit the program with the `InvalidArgument` error code if /// `condition` is false. #[macro_export] macro_rules! require { ($invariant:expr, $error:tt $(,)?) => { if !($invariant) { return Err(crate::ErrorCode::$error.into()); } }; ($invariant:expr, $error:expr $(,)?) => { if !($invariant) { return Err($error.into()); } }; }
37.197297
99
0.707331
d77e54ebe4a00dad93875e6fdbe22af3183bec95
19,834
/*! The types module provides a way of associating globs on file names to file types. This can be used to match specific types of files. For example, among the default file types provided, the Rust file type is defined to be `*.rs` with name `rust`. Similarly, the C file type is defined to be `*.{c,h}` with name `c`. Note that the set of default types may change over time. # Example This shows how to create and use a simple file type matcher using the default file types defined in this crate. ``` use ignore::types::TypesBuilder; let mut builder = TypesBuilder::new(); builder.add_defaults(); builder.select("rust"); let matcher = builder.build().unwrap(); assert!(matcher.matched("foo.rs", false).is_whitelist()); assert!(matcher.matched("foo.c", false).is_ignore()); ``` # Example: negation This is like the previous example, but shows how negating a file type works. That is, this will let us match file paths that *don't* correspond to a particular file type. ``` use ignore::types::TypesBuilder; let mut builder = TypesBuilder::new(); builder.add_defaults(); builder.negate("c"); let matcher = builder.build().unwrap(); assert!(matcher.matched("foo.rs", false).is_none()); assert!(matcher.matched("foo.c", false).is_ignore()); ``` # Example: custom file type definitions This shows how to extend this library default file type definitions with your own. ``` use ignore::types::TypesBuilder; let mut builder = TypesBuilder::new(); builder.add_defaults(); builder.add("foo", "*.foo"); // Another way of adding a file type definition. // This is useful when accepting input from an end user. builder.add_def("bar:*.bar"); // Note: we only select `foo`, not `bar`. builder.select("foo"); let matcher = builder.build().unwrap(); assert!(matcher.matched("x.foo", false).is_whitelist()); // This is ignored because we only selected the `foo` file type. assert!(matcher.matched("x.bar", false).is_ignore()); ``` We can also add file type definitions based on other definitions. ``` use ignore::types::TypesBuilder; let mut builder = TypesBuilder::new(); builder.add_defaults(); builder.add("foo", "*.foo"); builder.add_def("bar:include:foo,cpp"); builder.select("bar"); let matcher = builder.build().unwrap(); assert!(matcher.matched("x.foo", false).is_whitelist()); assert!(matcher.matched("y.cpp", false).is_whitelist()); ``` */ use std::cell::RefCell; use std::collections::HashMap; use std::path::Path; use std::sync::Arc; use globset::{GlobBuilder, GlobSet, GlobSetBuilder}; use regex::Regex; use thread_local::ThreadLocal; use default_types::DEFAULT_TYPES; use pathutil::file_name; use {Error, Match}; /// Glob represents a single glob in a set of file type definitions. /// /// There may be more than one glob for a particular file type. /// /// This is used to report information about the highest precedent glob /// that matched. /// /// Note that not all matches necessarily correspond to a specific glob. /// For example, if there are one or more selections and a file path doesn't /// match any of those selections, then the file path is considered to be /// ignored. /// /// The lifetime `'a` refers to the lifetime of the underlying file type /// definition, which corresponds to the lifetime of the file type matcher. #[derive(Clone, Debug)] pub struct Glob<'a>(GlobInner<'a>); #[derive(Clone, Debug)] enum GlobInner<'a> { /// No glob matched, but the file path should still be ignored. UnmatchedIgnore, /// A glob matched. Matched { /// The file type definition which provided the glob. def: &'a FileTypeDef, /// The index of the glob that matched inside the file type definition. which: usize, /// Whether the selection was negated or not. negated: bool, }, } impl<'a> Glob<'a> { fn unmatched() -> Glob<'a> { Glob(GlobInner::UnmatchedIgnore) } /// Return the file type defintion that matched, if one exists. A file type /// definition always exists when a specific definition matches a file /// path. pub fn file_type_def(&self) -> Option<&FileTypeDef> { match self { Glob(GlobInner::UnmatchedIgnore) => None, Glob(GlobInner::Matched { def, .. }) => Some(def), } } } /// A single file type definition. /// /// File type definitions can be retrieved in aggregate from a file type /// matcher. File type definitions are also reported when its responsible /// for a match. #[derive(Clone, Debug, Eq, PartialEq)] pub struct FileTypeDef { name: String, globs: Vec<String>, } impl FileTypeDef { /// Return the name of this file type. pub fn name(&self) -> &str { &self.name } /// Return the globs used to recognize this file type. pub fn globs(&self) -> &[String] { &self.globs } } /// Types is a file type matcher. #[derive(Clone, Debug)] pub struct Types { /// All of the file type definitions, sorted lexicographically by name. defs: Vec<FileTypeDef>, /// All of the selections made by the user. selections: Vec<Selection<FileTypeDef>>, /// Whether there is at least one Selection::Select in our selections. /// When this is true, a Match::None is converted to Match::Ignore. has_selected: bool, /// A mapping from glob index in the set to two indices. The first is an /// index into `selections` and the second is an index into the /// corresponding file type definition's list of globs. glob_to_selection: Vec<(usize, usize)>, /// The set of all glob selections, used for actual matching. set: GlobSet, /// Temporary storage for globs that match. matches: Arc<ThreadLocal<RefCell<Vec<usize>>>>, } /// Indicates the type of a selection for a particular file type. #[derive(Clone, Debug)] enum Selection<T> { Select(String, T), Negate(String, T), } impl<T> Selection<T> { fn is_negated(&self) -> bool { match *self { Selection::Select(..) => false, Selection::Negate(..) => true, } } fn name(&self) -> &str { match *self { Selection::Select(ref name, _) => name, Selection::Negate(ref name, _) => name, } } fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Selection<U> { match self { Selection::Select(name, inner) => { Selection::Select(name, f(inner)) } Selection::Negate(name, inner) => { Selection::Negate(name, f(inner)) } } } fn inner(&self) -> &T { match *self { Selection::Select(_, ref inner) => inner, Selection::Negate(_, ref inner) => inner, } } } impl Types { /// Creates a new file type matcher that never matches any path and /// contains no file type definitions. pub fn empty() -> Types { Types { defs: vec![], selections: vec![], has_selected: false, glob_to_selection: vec![], set: GlobSetBuilder::new().build().unwrap(), matches: Arc::new(ThreadLocal::default()), } } /// Returns true if and only if this matcher has zero selections. pub fn is_empty(&self) -> bool { self.selections.is_empty() } /// Returns the number of selections used in this matcher. pub fn len(&self) -> usize { self.selections.len() } /// Return the set of current file type definitions. /// /// Definitions and globs are sorted. pub fn definitions(&self) -> &[FileTypeDef] { &self.defs } /// Returns a match for the given path against this file type matcher. /// /// The path is considered whitelisted if it matches a selected file type. /// The path is considered ignored if it matches a negated file type. /// If at least one file type is selected and `path` doesn't match, then /// the path is also considered ignored. pub fn matched<'a, P: AsRef<Path>>( &'a self, path: P, is_dir: bool, ) -> Match<Glob<'a>> { // File types don't apply to directories, and we can't do anything // if our glob set is empty. if is_dir || self.set.is_empty() { return Match::None; } // We only want to match against the file name, so extract it. // If one doesn't exist, then we can't match it. let name = match file_name(path.as_ref()) { Some(name) => name, None if self.has_selected => { return Match::Ignore(Glob::unmatched()); } None => { return Match::None; } }; let mut matches = self.matches.get_or_default().borrow_mut(); self.set.matches_into(name, &mut *matches); // The highest precedent match is the last one. if let Some(&i) = matches.last() { let (isel, iglob) = self.glob_to_selection[i]; let sel = &self.selections[isel]; let glob = Glob(GlobInner::Matched { def: sel.inner(), which: iglob, negated: sel.is_negated(), }); return if sel.is_negated() { Match::Ignore(glob) } else { Match::Whitelist(glob) }; } if self.has_selected { Match::Ignore(Glob::unmatched()) } else { Match::None } } } /// TypesBuilder builds a type matcher from a set of file type definitions and /// a set of file type selections. pub struct TypesBuilder { types: HashMap<String, FileTypeDef>, selections: Vec<Selection<()>>, } impl TypesBuilder { /// Create a new builder for a file type matcher. /// /// The builder contains *no* type definitions to start with. A set /// of default type definitions can be added with `add_defaults`, and /// additional type definitions can be added with `select` and `negate`. pub fn new() -> TypesBuilder { TypesBuilder { types: HashMap::new(), selections: vec![] } } /// Build the current set of file type definitions *and* selections into /// a file type matcher. pub fn build(&self) -> Result<Types, Error> { let defs = self.definitions(); let has_selected = self.selections.iter().any(|s| !s.is_negated()); let mut selections = vec![]; let mut glob_to_selection = vec![]; let mut build_set = GlobSetBuilder::new(); for (isel, selection) in self.selections.iter().enumerate() { let def = match self.types.get(selection.name()) { Some(def) => def.clone(), None => { let name = selection.name().to_string(); return Err(Error::UnrecognizedFileType(name)); } }; for (iglob, glob) in def.globs.iter().enumerate() { build_set.add( GlobBuilder::new(glob) .literal_separator(true) .build() .map_err(|err| Error::Glob { glob: Some(glob.to_string()), err: err.kind().to_string(), })?, ); glob_to_selection.push((isel, iglob)); } selections.push(selection.clone().map(move |_| def)); } let set = build_set .build() .map_err(|err| Error::Glob { glob: None, err: err.to_string() })?; Ok(Types { defs: defs, selections: selections, has_selected: has_selected, glob_to_selection: glob_to_selection, set: set, matches: Arc::new(ThreadLocal::default()), }) } /// Return the set of current file type definitions. /// /// Definitions and globs are sorted. pub fn definitions(&self) -> Vec<FileTypeDef> { let mut defs = vec![]; for def in self.types.values() { let mut def = def.clone(); def.globs.sort(); defs.push(def); } defs.sort_by(|def1, def2| def1.name().cmp(def2.name())); defs } /// Select the file type given by `name`. /// /// If `name` is `all`, then all file types currently defined are selected. pub fn select(&mut self, name: &str) -> &mut TypesBuilder { if name == "all" { for name in self.types.keys() { self.selections.push(Selection::Select(name.to_string(), ())); } } else { self.selections.push(Selection::Select(name.to_string(), ())); } self } /// Ignore the file type given by `name`. /// /// If `name` is `all`, then all file types currently defined are negated. pub fn negate(&mut self, name: &str) -> &mut TypesBuilder { if name == "all" { for name in self.types.keys() { self.selections.push(Selection::Negate(name.to_string(), ())); } } else { self.selections.push(Selection::Negate(name.to_string(), ())); } self } /// Clear any file type definitions for the type name given. pub fn clear(&mut self, name: &str) -> &mut TypesBuilder { self.types.remove(name); self } /// Add a new file type definition. `name` can be arbitrary and `pat` /// should be a glob recognizing file paths belonging to the `name` type. /// /// If `name` is `all` or otherwise contains any character that is not a /// Unicode letter or number, then an error is returned. pub fn add(&mut self, name: &str, glob: &str) -> Result<(), Error> { lazy_static! { static ref RE: Regex = Regex::new(r"^[\pL\pN]+$").unwrap(); }; if name == "all" || !RE.is_match(name) { return Err(Error::InvalidDefinition); } let (key, glob) = (name.to_string(), glob.to_string()); self.types .entry(key) .or_insert_with(|| FileTypeDef { name: name.to_string(), globs: vec![], }) .globs .push(glob); Ok(()) } /// Add a new file type definition specified in string form. There are two /// valid formats: /// 1. `{name}:{glob}`. This defines a 'root' definition that associates the /// given name with the given glob. /// 2. `{name}:include:{comma-separated list of already defined names}. /// This defines an 'include' definition that associates the given name /// with the definitions of the given existing types. /// Names may not include any characters that are not /// Unicode letters or numbers. pub fn add_def(&mut self, def: &str) -> Result<(), Error> { let parts: Vec<&str> = def.split(':').collect(); match parts.len() { 2 => { let name = parts[0]; let glob = parts[1]; if name.is_empty() || glob.is_empty() { return Err(Error::InvalidDefinition); } self.add(name, glob) } 3 => { let name = parts[0]; let types_string = parts[2]; if name.is_empty() || parts[1] != "include" || types_string.is_empty() { return Err(Error::InvalidDefinition); } let types = types_string.split(','); // Check ahead of time to ensure that all types specified are // present and fail fast if not. if types.clone().any(|t| !self.types.contains_key(t)) { return Err(Error::InvalidDefinition); } for type_name in types { let globs = self.types.get(type_name).unwrap().globs.clone(); for glob in globs { self.add(name, &glob)?; } } Ok(()) } _ => Err(Error::InvalidDefinition), } } /// Add a set of default file type definitions. pub fn add_defaults(&mut self) -> &mut TypesBuilder { static MSG: &'static str = "adding a default type should never fail"; for &(name, exts) in DEFAULT_TYPES { for ext in exts { self.add(name, ext).expect(MSG); } } self } } #[cfg(test)] mod tests { use super::TypesBuilder; macro_rules! matched { ($name:ident, $types:expr, $sel:expr, $selnot:expr, $path:expr) => { matched!($name, $types, $sel, $selnot, $path, true); }; (not, $name:ident, $types:expr, $sel:expr, $selnot:expr, $path:expr) => { matched!($name, $types, $sel, $selnot, $path, false); }; ($name:ident, $types:expr, $sel:expr, $selnot:expr, $path:expr, $matched:expr) => { #[test] fn $name() { let mut btypes = TypesBuilder::new(); for tydef in $types { btypes.add_def(tydef).unwrap(); } for sel in $sel { btypes.select(sel); } for selnot in $selnot { btypes.negate(selnot); } let types = btypes.build().unwrap(); let mat = types.matched($path, false); assert_eq!($matched, !mat.is_ignore()); } }; } fn types() -> Vec<&'static str> { vec![ "html:*.html", "html:*.htm", "rust:*.rs", "js:*.js", "foo:*.{rs,foo}", "combo:include:html,rust", ] } matched!(match1, types(), vec!["rust"], vec![], "lib.rs"); matched!(match2, types(), vec!["html"], vec![], "index.html"); matched!(match3, types(), vec!["html"], vec![], "index.htm"); matched!(match4, types(), vec!["html", "rust"], vec![], "main.rs"); matched!(match5, types(), vec![], vec![], "index.html"); matched!(match6, types(), vec![], vec!["rust"], "index.html"); matched!(match7, types(), vec!["foo"], vec!["rust"], "main.foo"); matched!(match8, types(), vec!["combo"], vec![], "index.html"); matched!(match9, types(), vec!["combo"], vec![], "lib.rs"); matched!(not, matchnot1, types(), vec!["rust"], vec![], "index.html"); matched!(not, matchnot2, types(), vec![], vec!["rust"], "main.rs"); matched!(not, matchnot3, types(), vec!["foo"], vec!["rust"], "main.rs"); matched!(not, matchnot4, types(), vec!["rust"], vec!["foo"], "main.rs"); matched!(not, matchnot5, types(), vec!["rust"], vec!["foo"], "main.foo"); matched!(not, matchnot6, types(), vec!["combo"], vec![], "leftpad.js"); #[test] fn test_invalid_defs() { let mut btypes = TypesBuilder::new(); for tydef in types() { btypes.add_def(tydef).unwrap(); } // Preserve the original definitions for later comparison. let original_defs = btypes.definitions(); let bad_defs = vec![ // Reference to type that does not exist "combo:include:html,python", // Bad format "combo:foobar:html,rust", "", ]; for def in bad_defs { assert!(btypes.add_def(def).is_err()); // Ensure that nothing changed, even if some of the includes were valid. assert_eq!(btypes.definitions(), original_defs); } } }
33.503378
84
0.559746
3abe570336cf10ad940cb192e301a17eb85422c6
19,482
//! This module specifies the input to rust-analyzer. In some sense, this is //! **the** most important module, because all other fancy stuff is strictly //! derived from this input. //! //! Note that neither this module, nor any other part of the analyzer's core do //! actual IO. See `vfs` and `project_model` in the `rust-analyzer` crate for how //! actual IO is done and lowered to input. use std::{fmt, iter::FromIterator, ops, panic::RefUnwindSafe, str::FromStr, sync::Arc}; use cfg::CfgOptions; use rustc_hash::{FxHashMap, FxHashSet}; use syntax::SmolStr; use tt::{ExpansionError, Subtree}; use vfs::{file_set::FileSet, FileId, VfsPath}; /// Files are grouped into source roots. A source root is a directory on the /// file systems which is watched for changes. Typically it corresponds to a /// Rust crate. Source roots *might* be nested: in this case, a file belongs to /// the nearest enclosing source root. Paths to files are always relative to a /// source root, and the analyzer does not know the root path of the source root at /// all. So, a file from one source root can't refer to a file in another source /// root by path. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct SourceRootId(pub u32); #[derive(Clone, Debug, PartialEq, Eq)] pub struct SourceRoot { /// Sysroot or crates.io library. /// /// Libraries are considered mostly immutable, this assumption is used to /// optimize salsa's query structure pub is_library: bool, pub(crate) file_set: FileSet, } impl SourceRoot { pub fn new_local(file_set: FileSet) -> SourceRoot { SourceRoot { is_library: false, file_set } } pub fn new_library(file_set: FileSet) -> SourceRoot { SourceRoot { is_library: true, file_set } } pub fn path_for_file(&self, file: &FileId) -> Option<&VfsPath> { self.file_set.path_for_file(file) } pub fn file_for_path(&self, path: &VfsPath) -> Option<&FileId> { self.file_set.file_for_path(path) } pub fn iter(&self) -> impl Iterator<Item = FileId> + '_ { self.file_set.iter() } } /// `CrateGraph` is a bit of information which turns a set of text files into a /// number of Rust crates. /// /// Each crate is defined by the `FileId` of its root module, the set of enabled /// `cfg` flags and the set of dependencies. /// /// Note that, due to cfg's, there might be several crates for a single `FileId`! /// /// For the purposes of analysis, a crate does not have a name. Instead, names /// are specified on dependency edges. That is, a crate might be known under /// different names in different dependent crates. /// /// Note that `CrateGraph` is build-system agnostic: it's a concept of the Rust /// language proper, not a concept of the build system. In practice, we get /// `CrateGraph` by lowering `cargo metadata` output. /// /// `CrateGraph` is `!Serialize` by design, see /// <https://github.com/rust-analyzer/rust-analyzer/blob/master/docs/dev/architecture.md#serialization> #[derive(Debug, Clone, Default /* Serialize, Deserialize */)] pub struct CrateGraph { arena: FxHashMap<CrateId, CrateData>, } #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct CrateId(pub u32); #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct CrateName(SmolStr); impl CrateName { /// Creates a crate name, checking for dashes in the string provided. /// Dashes are not allowed in the crate names, /// hence the input string is returned as `Err` for those cases. pub fn new(name: &str) -> Result<CrateName, &str> { if name.contains('-') { Err(name) } else { Ok(Self(SmolStr::new(name))) } } /// Creates a crate name, unconditionally replacing the dashes with underscores. pub fn normalize_dashes(name: &str) -> CrateName { Self(SmolStr::new(name.replace('-', "_"))) } } impl fmt::Display for CrateName { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } impl ops::Deref for CrateName { type Target = str; fn deref(&self) -> &str { &*self.0 } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct CrateDisplayName { // The name we use to display various paths (with `_`). crate_name: CrateName, // The name as specified in Cargo.toml (with `-`). canonical_name: String, } impl From<CrateName> for CrateDisplayName { fn from(crate_name: CrateName) -> CrateDisplayName { let canonical_name = crate_name.to_string(); CrateDisplayName { crate_name, canonical_name } } } impl fmt::Display for CrateDisplayName { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.crate_name) } } impl ops::Deref for CrateDisplayName { type Target = str; fn deref(&self) -> &str { &*self.crate_name } } impl CrateDisplayName { pub fn from_canonical_name(canonical_name: String) -> CrateDisplayName { let crate_name = CrateName::normalize_dashes(&canonical_name); CrateDisplayName { crate_name, canonical_name } } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct ProcMacroId(pub u32); #[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)] pub enum ProcMacroKind { CustomDerive, FuncLike, Attr, } pub trait ProcMacroExpander: fmt::Debug + Send + Sync + RefUnwindSafe { fn expand( &self, subtree: &Subtree, attrs: Option<&Subtree>, env: &Env, ) -> Result<Subtree, ExpansionError>; } #[derive(Debug, Clone)] pub struct ProcMacro { pub name: SmolStr, pub kind: ProcMacroKind, pub expander: Arc<dyn ProcMacroExpander>, } #[derive(Debug, Clone)] pub struct CrateData { pub root_file_id: FileId, pub edition: Edition, /// A name used in the package's project declaration: for Cargo projects, /// its `[package].name` can be different for other project types or even /// absent (a dummy crate for the code snippet, for example). /// /// For purposes of analysis, crates are anonymous (only names in /// `Dependency` matters), this name should only be used for UI. pub display_name: Option<CrateDisplayName>, pub cfg_options: CfgOptions, pub potential_cfg_options: CfgOptions, pub env: Env, pub dependencies: Vec<Dependency>, pub proc_macro: Vec<ProcMacro>, } #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum Edition { Edition2015, Edition2018, Edition2021, } impl Edition { pub const CURRENT: Edition = Edition::Edition2018; } #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct Env { entries: FxHashMap<String, String>, } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Dependency { pub crate_id: CrateId, pub name: CrateName, } impl CrateGraph { pub fn add_crate_root( &mut self, file_id: FileId, edition: Edition, display_name: Option<CrateDisplayName>, cfg_options: CfgOptions, potential_cfg_options: CfgOptions, env: Env, proc_macro: Vec<ProcMacro>, ) -> CrateId { let data = CrateData { root_file_id: file_id, edition, display_name, cfg_options, potential_cfg_options, env, proc_macro, dependencies: Vec::new(), }; let crate_id = CrateId(self.arena.len() as u32); let prev = self.arena.insert(crate_id, data); assert!(prev.is_none()); crate_id } pub fn add_dep( &mut self, from: CrateId, name: CrateName, to: CrateId, ) -> Result<(), CyclicDependenciesError> { let _p = profile::span("add_dep"); if self.dfs_find(from, to, &mut FxHashSet::default()) { return Err(CyclicDependenciesError { from: (from, self[from].display_name.clone()), to: (to, self[to].display_name.clone()), }); } self.arena.get_mut(&from).unwrap().add_dep(name, to); Ok(()) } pub fn is_empty(&self) -> bool { self.arena.is_empty() } pub fn iter(&self) -> impl Iterator<Item = CrateId> + '_ { self.arena.keys().copied() } /// Returns an iterator over all transitive dependencies of the given crate, /// including the crate itself. pub fn transitive_deps(&self, of: CrateId) -> impl Iterator<Item = CrateId> + '_ { let mut worklist = vec![of]; let mut deps = FxHashSet::default(); while let Some(krate) = worklist.pop() { if !deps.insert(krate) { continue; } worklist.extend(self[krate].dependencies.iter().map(|dep| dep.crate_id)); } deps.into_iter() } /// Returns all transitive reverse dependencies of the given crate, /// including the crate itself. pub fn transitive_rev_deps(&self, of: CrateId) -> impl Iterator<Item = CrateId> + '_ { let mut worklist = vec![of]; let mut rev_deps = FxHashSet::default(); rev_deps.insert(of); let mut inverted_graph = FxHashMap::<_, Vec<_>>::default(); self.arena.iter().for_each(|(&krate, data)| { data.dependencies .iter() .for_each(|dep| inverted_graph.entry(dep.crate_id).or_default().push(krate)) }); while let Some(krate) = worklist.pop() { if let Some(krate_rev_deps) = inverted_graph.get(&krate) { krate_rev_deps .iter() .copied() .filter(|&rev_dep| rev_deps.insert(rev_dep)) .for_each(|rev_dep| worklist.push(rev_dep)); } } rev_deps.into_iter() } /// Returns all crates in the graph, sorted in topological order (ie. dependencies of a crate /// come before the crate itself). pub fn crates_in_topological_order(&self) -> Vec<CrateId> { let mut res = Vec::new(); let mut visited = FxHashSet::default(); for krate in self.arena.keys().copied() { go(self, &mut visited, &mut res, krate); } return res; fn go( graph: &CrateGraph, visited: &mut FxHashSet<CrateId>, res: &mut Vec<CrateId>, source: CrateId, ) { if !visited.insert(source) { return; } for dep in graph[source].dependencies.iter() { go(graph, visited, res, dep.crate_id) } res.push(source) } } // FIXME: this only finds one crate with the given root; we could have multiple pub fn crate_id_for_crate_root(&self, file_id: FileId) -> Option<CrateId> { let (&crate_id, _) = self.arena.iter().find(|(_crate_id, data)| data.root_file_id == file_id)?; Some(crate_id) } /// Extends this crate graph by adding a complete disjoint second crate /// graph. /// /// The ids of the crates in the `other` graph are shifted by the return /// amount. pub fn extend(&mut self, other: CrateGraph) -> u32 { let start = self.arena.len() as u32; self.arena.extend(other.arena.into_iter().map(|(id, mut data)| { let new_id = id.shift(start); for dep in &mut data.dependencies { dep.crate_id = dep.crate_id.shift(start); } (new_id, data) })); start } fn dfs_find(&self, target: CrateId, from: CrateId, visited: &mut FxHashSet<CrateId>) -> bool { if !visited.insert(from) { return false; } if target == from { return true; } for dep in &self[from].dependencies { let crate_id = dep.crate_id; if self.dfs_find(target, crate_id, visited) { return true; } } false } // Work around for https://github.com/rust-analyzer/rust-analyzer/issues/6038. // As hacky as it gets. pub fn patch_cfg_if(&mut self) -> bool { let cfg_if = self.hacky_find_crate("cfg_if"); let std = self.hacky_find_crate("std"); match (cfg_if, std) { (Some(cfg_if), Some(std)) => { self.arena.get_mut(&cfg_if).unwrap().dependencies.clear(); self.arena .get_mut(&std) .unwrap() .dependencies .push(Dependency { crate_id: cfg_if, name: CrateName::new("cfg_if").unwrap() }); true } _ => false, } } fn hacky_find_crate(&self, display_name: &str) -> Option<CrateId> { self.iter().find(|it| self[*it].display_name.as_deref() == Some(display_name)) } } impl ops::Index<CrateId> for CrateGraph { type Output = CrateData; fn index(&self, crate_id: CrateId) -> &CrateData { &self.arena[&crate_id] } } impl CrateId { fn shift(self, amount: u32) -> CrateId { CrateId(self.0 + amount) } } impl CrateData { fn add_dep(&mut self, name: CrateName, crate_id: CrateId) { self.dependencies.push(Dependency { crate_id, name }) } } impl FromStr for Edition { type Err = ParseEditionError; fn from_str(s: &str) -> Result<Self, Self::Err> { let res = match s { "2015" => Edition::Edition2015, "2018" => Edition::Edition2018, "2021" => Edition::Edition2021, _ => return Err(ParseEditionError { invalid_input: s.to_string() }), }; Ok(res) } } impl fmt::Display for Edition { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(match self { Edition::Edition2015 => "2015", Edition::Edition2018 => "2018", Edition::Edition2021 => "2021", }) } } impl FromIterator<(String, String)> for Env { fn from_iter<T: IntoIterator<Item = (String, String)>>(iter: T) -> Self { Env { entries: FromIterator::from_iter(iter) } } } impl Env { pub fn set(&mut self, env: &str, value: String) { self.entries.insert(env.to_owned(), value); } pub fn get(&self, env: &str) -> Option<String> { self.entries.get(env).cloned() } pub fn iter(&self) -> impl Iterator<Item = (&str, &str)> { self.entries.iter().map(|(k, v)| (k.as_str(), v.as_str())) } } #[derive(Debug)] pub struct ParseEditionError { invalid_input: String, } impl fmt::Display for ParseEditionError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "invalid edition: {:?}", self.invalid_input) } } impl std::error::Error for ParseEditionError {} #[derive(Debug)] pub struct CyclicDependenciesError { from: (CrateId, Option<CrateDisplayName>), to: (CrateId, Option<CrateDisplayName>), } impl fmt::Display for CyclicDependenciesError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let render = |(id, name): &(CrateId, Option<CrateDisplayName>)| match name { Some(it) => format!("{}({:?})", it, id), None => format!("{:?}", id), }; write!(f, "cyclic deps: {} -> {}", render(&self.from), render(&self.to)) } } #[cfg(test)] mod tests { use super::{CfgOptions, CrateGraph, CrateName, Dependency, Edition::Edition2018, Env, FileId}; #[test] fn detect_cyclic_dependency_indirect() { let mut graph = CrateGraph::default(); let crate1 = graph.add_crate_root( FileId(1u32), Edition2018, None, CfgOptions::default(), CfgOptions::default(), Env::default(), Default::default(), ); let crate2 = graph.add_crate_root( FileId(2u32), Edition2018, None, CfgOptions::default(), CfgOptions::default(), Env::default(), Default::default(), ); let crate3 = graph.add_crate_root( FileId(3u32), Edition2018, None, CfgOptions::default(), CfgOptions::default(), Env::default(), Default::default(), ); assert!(graph.add_dep(crate1, CrateName::new("crate2").unwrap(), crate2).is_ok()); assert!(graph.add_dep(crate2, CrateName::new("crate3").unwrap(), crate3).is_ok()); assert!(graph.add_dep(crate3, CrateName::new("crate1").unwrap(), crate1).is_err()); } #[test] fn detect_cyclic_dependency_direct() { let mut graph = CrateGraph::default(); let crate1 = graph.add_crate_root( FileId(1u32), Edition2018, None, CfgOptions::default(), CfgOptions::default(), Env::default(), Default::default(), ); let crate2 = graph.add_crate_root( FileId(2u32), Edition2018, None, CfgOptions::default(), CfgOptions::default(), Env::default(), Default::default(), ); assert!(graph.add_dep(crate1, CrateName::new("crate2").unwrap(), crate2).is_ok()); assert!(graph.add_dep(crate2, CrateName::new("crate2").unwrap(), crate2).is_err()); } #[test] fn it_works() { let mut graph = CrateGraph::default(); let crate1 = graph.add_crate_root( FileId(1u32), Edition2018, None, CfgOptions::default(), CfgOptions::default(), Env::default(), Default::default(), ); let crate2 = graph.add_crate_root( FileId(2u32), Edition2018, None, CfgOptions::default(), CfgOptions::default(), Env::default(), Default::default(), ); let crate3 = graph.add_crate_root( FileId(3u32), Edition2018, None, CfgOptions::default(), CfgOptions::default(), Env::default(), Default::default(), ); assert!(graph.add_dep(crate1, CrateName::new("crate2").unwrap(), crate2).is_ok()); assert!(graph.add_dep(crate2, CrateName::new("crate3").unwrap(), crate3).is_ok()); } #[test] fn dashes_are_normalized() { let mut graph = CrateGraph::default(); let crate1 = graph.add_crate_root( FileId(1u32), Edition2018, None, CfgOptions::default(), CfgOptions::default(), Env::default(), Default::default(), ); let crate2 = graph.add_crate_root( FileId(2u32), Edition2018, None, CfgOptions::default(), CfgOptions::default(), Env::default(), Default::default(), ); assert!(graph .add_dep(crate1, CrateName::normalize_dashes("crate-name-with-dashes"), crate2) .is_ok()); assert_eq!( graph[crate1].dependencies, vec![Dependency { crate_id: crate2, name: CrateName::new("crate_name_with_dashes").unwrap() }] ); } }
30.972973
103
0.580125
67b816bb2b1a3ec57b23322163104d9f29eb8531
8,145
//! Types //! //! See: [6.7 Types](http://erlang.org/doc/apps/erts/absform.html#id88630) use ast; use ast::literal; use ast::common; pub type UnaryOp = common::UnaryOp<Type>; pub type BinaryOp = common::BinaryOp<Type>; #[derive(Debug,Clone)] pub enum Type { Atom(Box<literal::Atom>), Integer(Box<literal::Integer>), Var(Box<common::Var>), Annotated(Box<Annotated>), UnaryOp(Box<UnaryOp>), BinaryOp(Box<BinaryOp>), BitString(Box<BitString>), Nil(Box<common::Nil>), AnyFun(Box<AnyFun>), Function(Box<Fun>), Range(Box<Range>), Map(Box<Map>), BuiltIn(Box<BuiltInType>), Record(Box<Record>), Remote(Box<RemoteType>), AnyTuple(Box<AnyTuple>), Tuple(Box<Tuple>), Union(Box<Union>), User(Box<UserType>), } impl_from!(Type::Atom(literal::Atom)); impl_from!(Type::Integer(literal::Integer)); impl_from!(Type::Var(common::Var)); impl_from!(Type::Annotated(Annotated)); impl_from!(Type::UnaryOp(UnaryOp)); impl_from!(Type::BinaryOp(BinaryOp)); impl_from!(Type::BitString(BitString)); impl_from!(Type::Nil(common::Nil)); impl_from!(Type::AnyFun(AnyFun)); impl_from!(Type::Function(Fun)); impl_from!(Type::Range(Range)); impl_from!(Type::Map(Map)); impl_from!(Type::BuiltIn(BuiltInType)); impl_from!(Type::Record(Record)); impl_from!(Type::Remote(RemoteType)); impl_from!(Type::AnyTuple(AnyTuple)); impl_from!(Type::Tuple(Tuple)); impl_from!(Type::Union(Union)); impl_from!(Type::User(UserType)); impl ast::Node for Type { fn line(&self) -> ast::LineNum { match *self { Type::Integer(ref x) => x.line(), Type::Atom(ref x) => x.line(), Type::Var(ref x) => x.line(), Type::Annotated(ref x) => x.line(), Type::UnaryOp(ref x) => x.line(), Type::BinaryOp(ref x) => x.line(), Type::BitString(ref x) => x.line(), Type::Nil(ref x) => x.line(), Type::AnyFun(ref x) => x.line(), Type::Function(ref x) => x.line(), Type::Range(ref x) => x.line(), Type::Map(ref x) => x.line(), Type::BuiltIn(ref x) => x.line(), Type::Record(ref x) => x.line(), Type::Remote(ref x) => x.line(), Type::AnyTuple(ref x) => x.line(), Type::Tuple(ref x) => x.line(), Type::Union(ref x) => x.line(), Type::User(ref x) => x.line(), } } } impl Type { pub fn any(line: ast::LineNum) -> Self { Type::BuiltIn(Box::new(BuiltInType::new(line, "any".to_string(), Vec::new()))) } } #[derive(Debug,Clone)] pub struct UserType { pub line: ast::LineNum, pub name: String, pub args: Vec<Type>, } impl_node!(UserType); impl UserType { pub fn new(line: ast::LineNum, name: String, args: Vec<Type>) -> Self { UserType { line: line, name: name, args: args, } } } #[derive(Debug,Clone)] pub struct Union { pub line: ast::LineNum, pub types: Vec<Type>, } impl_node!(Union); impl Union { pub fn new(line: ast::LineNum, types: Vec<Type>) -> Self { Union { line: line, types: types, } } } #[derive(Debug,Clone)] pub struct AnyTuple { pub line: ast::LineNum, } impl_node!(AnyTuple); impl AnyTuple { pub fn new(line: ast::LineNum) -> Self { AnyTuple { line: line } } } #[derive(Debug,Clone)] pub struct Tuple { pub line: ast::LineNum, pub elements: Vec<Type>, } impl_node!(Tuple); impl Tuple { pub fn new(line: ast::LineNum, elements: Vec<Type>) -> Self { Tuple { line: line, elements: elements, } } } #[derive(Debug,Clone)] pub struct RemoteType { pub line: ast::LineNum, pub module: String, pub function: String, pub args: Vec<Type>, } impl_node!(RemoteType); impl RemoteType { pub fn new(line: ast::LineNum, module: String, function: String, args: Vec<Type>) -> Self { RemoteType { line: line, module: module, function: function, args: args, } } } #[derive(Debug,Clone)] pub struct Record { pub line: ast::LineNum, pub name: String, pub fields: Vec<RecordField>, } impl_node!(Record); impl Record { pub fn new(line: ast::LineNum, name: String, fields: Vec<RecordField>) -> Self { Record { line: line, name: name, fields: fields, } } } #[derive(Debug,Clone)] pub struct RecordField { pub line: ast::LineNum, pub name: String, pub ty: Type, } impl_node!(RecordField); impl RecordField { pub fn new(line: ast::LineNum, name: String, ty: Type) -> Self { RecordField { line: line, name: name, ty: ty, } } } #[derive(Debug,Clone)] pub struct BuiltInType { pub line: ast::LineNum, pub name: String, pub args: Vec<Type>, } impl_node!(BuiltInType); impl BuiltInType { pub fn new(line: ast::LineNum, name: String, args: Vec<Type>) -> Self { BuiltInType { line: line, name: name, args: args, } } } #[derive(Debug,Clone)] pub struct Map { pub line: ast::LineNum, pub pairs: Vec<MapPair>, } impl_node!(Map); impl Map { pub fn new(line: ast::LineNum, pairs: Vec<MapPair>) -> Self { Map { line: line, pairs: pairs, } } } #[derive(Debug,Clone)] pub struct MapPair { pub line: ast::LineNum, pub key: Type, pub value: Type, } impl_node!(MapPair); impl MapPair { pub fn new(line: ast::LineNum, key: Type, value: Type) -> Self { MapPair { line: line, key: key, value: value, } } } #[derive(Debug,Clone)] pub struct Annotated { pub line: ast::LineNum, pub name: common::Var, pub ty: Type, } impl_node!(Annotated); impl Annotated { pub fn new(line: ast::LineNum, name: common::Var, ty: Type) -> Self { Annotated { line: line, name: name, ty: ty, } } } #[derive(Debug,Clone)] pub struct BitString { pub line: ast::LineNum, pub bytes: u64, pub tail_bits: u64, } impl_node!(BitString); impl BitString { pub fn new(line: ast::LineNum, bytes: u64, tail_bits: u64) -> Self { BitString { line: line, bytes: bytes, tail_bits: tail_bits, } } } #[derive(Debug,Clone)] pub struct AnyFun { pub line: ast::LineNum, pub return_type: Option<Type>, } impl_node!(AnyFun); impl AnyFun { pub fn new(line: ast::LineNum) -> Self { AnyFun { line: line, return_type: None, } } pub fn return_type(mut self, return_type: Type) -> Self { self.return_type = Some(return_type); self } } #[derive(Debug,Clone)] pub struct Fun { pub line: ast::LineNum, pub args: Vec<Type>, pub return_type: Type, pub constraints: Vec<Constraint>, } impl_node!(Fun); impl Fun { pub fn new(line: ast::LineNum, args: Vec<Type>, return_type: Type) -> Self { Fun { line: line, args: args, return_type: return_type, constraints: Vec::new(), } } pub fn constraints(mut self, constraints: Vec<Constraint>) -> Self { self.constraints = constraints; self } } #[derive(Debug,Clone)] pub struct Constraint { pub line: ast::LineNum, pub var: common::Var, pub subtype: Type, } impl_node!(Constraint); impl Constraint { pub fn new(line: ast::LineNum, var: common::Var, subtype: Type) -> Self { Constraint { line: line, var: var, subtype: subtype, } } } #[derive(Debug,Clone)] pub struct Range { pub line: ast::LineNum, pub low: Type, pub high: Type, } impl_node!(Range); impl Range { pub fn new(line: ast::LineNum, low: Type, high: Type) -> Self { Range { line: line, low: low, high: high, } } }
23.139205
95
0.556047
e5d7aa916bba872aabd73997470395236d2943f2
257
mod terminal; pub use self::terminal::{RenderTarget, Terminal, TerminalEvent}; mod tui; pub use self::tui::{CoreEvent, Tui, TuiService, TuiServiceBuilder}; mod cmd; pub use self::cmd::{Command, ParseCommandError}; mod conf; pub use self::conf::Settings;
21.416667
67
0.747082
eb9d847788a65b85bd4998dc4267ddefb4c4fbb8
47,282
//! Lower-level Server connection API. //! //! The types in this module are to provide a lower-level API based around a //! single connection. Accepting a connection and binding it with a service //! are not handled at this level. This module provides the building blocks to //! customize those things externally. //! //! If you don't have need to manage connections yourself, consider using the //! higher-level [Server](super) API. //! //! ## Example //! A simple example that uses the `Http` struct to talk HTTP over a Tokio TCP stream //! ```no_run //! # #[cfg(all(feature = "http1", feature = "runtime"))] //! # mod rt { //! use http::{Request, Response, StatusCode}; //! use hyper::{server::conn::Http, service::service_fn, Body}; //! use std::{net::SocketAddr, convert::Infallible}; //! use tokio::net::TcpListener; //! //! #[tokio::main] //! async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> { //! let addr: SocketAddr = ([127, 0, 0, 1], 8080).into(); //! //! let mut tcp_listener = TcpListener::bind(addr).await?; //! loop { //! let (tcp_stream, _) = tcp_listener.accept().await?; //! tokio::task::spawn(async move { //! if let Err(http_err) = Http::new() //! .http1_only(true) //! .http1_keep_alive(true) //! .serve_connection(tcp_stream, service_fn(hello)) //! .await { //! eprintln!("Error while serving HTTP connection: {}", http_err); //! } //! }); //! } //! } //! //! async fn hello(_req: Request<Body>) -> Result<Response<Body>, Infallible> { //! Ok(Response::new(Body::from("Hello World!"))) //! } //! # } //! ``` #[cfg(all( any(feature = "http1", feature = "http2"), not(all(feature = "http1", feature = "http2")) ))] use std::marker::PhantomData; #[cfg(feature = "tcp")] use std::net::SocketAddr; use std::time::Duration; #[cfg(feature = "http2")] use crate::common::io::Rewind; #[cfg(all(feature = "http1", feature = "http2"))] use crate::error::{Kind, Parse}; #[cfg(feature = "http1")] use crate::upgrade::Upgraded; cfg_feature! { #![any(feature = "http1", feature = "http2")] use std::error::Error as StdError; use std::fmt; use bytes::Bytes; use pin_project_lite::pin_project; use tokio::io::{AsyncRead, AsyncWrite}; use tracing::trace; use super::accept::Accept; use crate::body::{Body, HttpBody}; use crate::common::{task, Future, Pin, Poll, Unpin}; #[cfg(not(all(feature = "http1", feature = "http2")))] use crate::common::Never; use crate::common::exec::{ConnStreamExec, Exec, NewSvcExec}; use crate::proto; use crate::service::{HttpService, MakeServiceRef}; use self::spawn_all::NewSvcTask; pub(super) use self::spawn_all::{NoopWatcher, Watcher}; pub(super) use self::upgrades::UpgradeableConnection; } #[cfg(feature = "tcp")] pub use super::tcp::{AddrIncoming, AddrStream}; /// A lower-level configuration of the HTTP protocol. /// /// This structure is used to configure options for an HTTP server connection. /// /// If you don't have need to manage connections yourself, consider using the /// higher-level [Server](super) API. #[derive(Clone, Debug)] #[cfg(any(feature = "http1", feature = "http2"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Http<E = Exec> { exec: E, h1_half_close: bool, h1_keep_alive: bool, h1_title_case_headers: bool, h1_preserve_header_case: bool, #[cfg(all(feature = "http1", feature = "runtime"))] h1_header_read_timeout: Option<Duration>, h1_writev: Option<bool>, #[cfg(feature = "http2")] h2_builder: proto::h2::server::Config, mode: ConnectionMode, max_buf_size: Option<usize>, pipeline_flush: bool, } /// The internal mode of HTTP protocol which indicates the behavior when a parse error occurs. #[cfg(any(feature = "http1", feature = "http2"))] #[derive(Clone, Debug, PartialEq)] enum ConnectionMode { /// Always use HTTP/1 and do not upgrade when a parse error occurs. #[cfg(feature = "http1")] H1Only, /// Always use HTTP/2. #[cfg(feature = "http2")] H2Only, /// Use HTTP/1 and try to upgrade to h2 when a parse error occurs. #[cfg(all(feature = "http1", feature = "http2"))] Fallback, } #[cfg(any(feature = "http1", feature = "http2"))] pin_project! { /// A stream mapping incoming IOs to new services. /// /// Yields `Connecting`s that are futures that should be put on a reactor. #[must_use = "streams do nothing unless polled"] #[derive(Debug)] pub(super) struct Serve<I, S, E = Exec> { #[pin] incoming: I, make_service: S, protocol: Http<E>, } } #[cfg(any(feature = "http1", feature = "http2"))] pin_project! { /// A future building a new `Service` to a `Connection`. /// /// Wraps the future returned from `MakeService` into one that returns /// a `Connection`. #[must_use = "futures do nothing unless polled"] #[derive(Debug)] #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Connecting<I, F, E = Exec> { #[pin] future: F, io: Option<I>, protocol: Http<E>, } } #[cfg(any(feature = "http1", feature = "http2"))] pin_project! { #[must_use = "futures do nothing unless polled"] #[derive(Debug)] pub(super) struct SpawnAll<I, S, E> { // TODO: re-add `pub(super)` once rustdoc can handle this. // // See https://github.com/rust-lang/rust/issues/64705 #[pin] pub(super) serve: Serve<I, S, E>, } } #[cfg(any(feature = "http1", feature = "http2"))] pin_project! { /// A future binding a connection with a Service. /// /// Polling this future will drive HTTP forward. #[must_use = "futures do nothing unless polled"] #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Connection<T, S, E = Exec> where S: HttpService<Body>, { pub(super) conn: Option<ProtoServer<T, S::ResBody, S, E>>, fallback: Fallback<E>, } } #[cfg(feature = "http1")] type Http1Dispatcher<T, B, S> = proto::h1::Dispatcher<proto::h1::dispatch::Server<S, Body>, B, T, proto::ServerTransaction>; #[cfg(all(not(feature = "http1"), feature = "http2"))] type Http1Dispatcher<T, B, S> = (Never, PhantomData<(T, Box<Pin<B>>, Box<Pin<S>>)>); #[cfg(feature = "http2")] type Http2Server<T, B, S, E> = proto::h2::Server<Rewind<T>, S, B, E>; #[cfg(all(not(feature = "http2"), feature = "http1"))] type Http2Server<T, B, S, E> = ( Never, PhantomData<(T, Box<Pin<S>>, Box<Pin<B>>, Box<Pin<E>>)>, ); #[cfg(any(feature = "http1", feature = "http2"))] pin_project! { #[project = ProtoServerProj] pub(super) enum ProtoServer<T, B, S, E = Exec> where S: HttpService<Body>, B: HttpBody, { H1 { #[pin] h1: Http1Dispatcher<T, B, S>, }, H2 { #[pin] h2: Http2Server<T, B, S, E>, }, } } #[cfg(all(feature = "http1", feature = "http2"))] #[derive(Clone, Debug)] enum Fallback<E> { ToHttp2(proto::h2::server::Config, E), Http1Only, } #[cfg(all( any(feature = "http1", feature = "http2"), not(all(feature = "http1", feature = "http2")) ))] type Fallback<E> = PhantomData<E>; #[cfg(all(feature = "http1", feature = "http2"))] impl<E> Fallback<E> { fn to_h2(&self) -> bool { match *self { Fallback::ToHttp2(..) => true, Fallback::Http1Only => false, } } } #[cfg(all(feature = "http1", feature = "http2"))] impl<E> Unpin for Fallback<E> {} /// Deconstructed parts of a `Connection`. /// /// This allows taking apart a `Connection` at a later time, in order to /// reclaim the IO object, and additional related pieces. #[derive(Debug)] #[cfg(any(feature = "http1", feature = "http2"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Parts<T, S> { /// The original IO object used in the handshake. pub io: T, /// A buffer of bytes that have been read but not processed as HTTP. /// /// If the client sent additional bytes after its last request, and /// this connection "ended" with an upgrade, the read buffer will contain /// those bytes. /// /// You will want to check for any existing bytes if you plan to continue /// communicating on the IO object. pub read_buf: Bytes, /// The `Service` used to serve this connection. pub service: S, _inner: (), } // ===== impl Http ===== #[cfg(any(feature = "http1", feature = "http2"))] impl Http { /// Creates a new instance of the HTTP protocol, ready to spawn a server or /// start accepting connections. pub fn new() -> Http { Http { exec: Exec::Default, h1_half_close: false, h1_keep_alive: true, h1_title_case_headers: false, h1_preserve_header_case: false, #[cfg(all(feature = "http1", feature = "runtime"))] h1_header_read_timeout: None, h1_writev: None, #[cfg(feature = "http2")] h2_builder: Default::default(), mode: ConnectionMode::default(), max_buf_size: None, pipeline_flush: false, } } } #[cfg(any(feature = "http1", feature = "http2"))] impl<E> Http<E> { /// Sets whether HTTP1 is required. /// /// Default is false #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_only(&mut self, val: bool) -> &mut Self { if val { self.mode = ConnectionMode::H1Only; } else { #[cfg(feature = "http2")] { self.mode = ConnectionMode::Fallback; } } self } /// Set whether HTTP/1 connections should support half-closures. /// /// Clients can chose to shutdown their write-side while waiting /// for the server to respond. Setting this to `true` will /// prevent closing the connection immediately if `read` /// detects an EOF in the middle of a request. /// /// Default is `false`. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_half_close(&mut self, val: bool) -> &mut Self { self.h1_half_close = val; self } /// Enables or disables HTTP/1 keep-alive. /// /// Default is true. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_keep_alive(&mut self, val: bool) -> &mut Self { self.h1_keep_alive = val; self } /// Set whether HTTP/1 connections will write header names as title case at /// the socket level. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Self { self.h1_title_case_headers = enabled; self } /// Set whether to support preserving original header cases. /// /// Currently, this will record the original cases received, and store them /// in a private extension on the `Request`. It will also look for and use /// such an extension in any provided `Response`. /// /// Since the relevant extension is still private, there is no way to /// interact with the original cases. The only effect this can have now is /// to forward the cases in a proxy-like fashion. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_preserve_header_case(&mut self, enabled: bool) -> &mut Self { self.h1_preserve_header_case = enabled; self } /// Set a timeout for reading client request headers. If a client does not /// transmit the entire header within this time, the connection is closed. /// /// Default is None. #[cfg(all(feature = "http1", feature = "runtime"))] #[cfg_attr(docsrs, doc(cfg(all(feature = "http1", feature = "runtime"))))] pub fn http1_header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self { self.h1_header_read_timeout = Some(read_timeout); self } /// Set whether HTTP/1 connections should try to use vectored writes, /// or always flatten into a single buffer. /// /// Note that setting this to false may mean more copies of body data, /// but may also improve performance when an IO transport doesn't /// support vectored writes well, such as most TLS implementations. /// /// Setting this to true will force hyper to use queued strategy /// which may eliminate unnecessary cloning on some TLS backends /// /// Default is `auto`. In this mode hyper will try to guess which /// mode to use #[inline] #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_writev(&mut self, val: bool) -> &mut Self { self.h1_writev = Some(val); self } /// Sets whether HTTP2 is required. /// /// Default is false #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_only(&mut self, val: bool) -> &mut Self { if val { self.mode = ConnectionMode::H2Only; } else { #[cfg(feature = "http1")] { self.mode = ConnectionMode::Fallback; } } self } /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 /// stream-level flow control. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. /// /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self { if let Some(sz) = sz.into() { self.h2_builder.adaptive_window = false; self.h2_builder.initial_stream_window_size = sz; } self } /// Sets the max connection-level flow control for HTTP2. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_connection_window_size( &mut self, sz: impl Into<Option<u32>>, ) -> &mut Self { if let Some(sz) = sz.into() { self.h2_builder.adaptive_window = false; self.h2_builder.initial_conn_window_size = sz; } self } /// Sets whether to use an adaptive flow control. /// /// Enabling this will override the limits set in /// `http2_initial_stream_window_size` and /// `http2_initial_connection_window_size`. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { use proto::h2::SPEC_WINDOW_SIZE; self.h2_builder.adaptive_window = enabled; if enabled { self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE; self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE; } self } /// Sets the maximum frame size to use for HTTP2. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self { if let Some(sz) = sz.into() { self.h2_builder.max_frame_size = sz; } self } /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 /// connections. /// /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. /// /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_concurrent_streams(&mut self, max: impl Into<Option<u32>>) -> &mut Self { self.h2_builder.max_concurrent_streams = max.into(); self } /// Sets an interval for HTTP2 Ping frames should be sent to keep a /// connection alive. /// /// Pass `None` to disable HTTP2 keep-alive. /// /// Default is currently disabled. /// /// # Cargo Feature /// /// Requires the `runtime` cargo feature to be enabled. #[cfg(feature = "runtime")] #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_interval( &mut self, interval: impl Into<Option<Duration>>, ) -> &mut Self { self.h2_builder.keep_alive_interval = interval.into(); self } /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. /// /// If the ping is not acknowledged within the timeout, the connection will /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. /// /// Default is 20 seconds. /// /// # Cargo Feature /// /// Requires the `runtime` cargo feature to be enabled. #[cfg(feature = "runtime")] #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { self.h2_builder.keep_alive_timeout = timeout; self } /// Set the maximum write buffer size for each HTTP/2 stream. /// /// Default is currently ~400KB, but may change. /// /// # Panics /// /// The value must be no larger than `u32::MAX`. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self { assert!(max <= std::u32::MAX as usize); self.h2_builder.max_send_buffer_size = max; self } /// Set the maximum buffer size for the connection. /// /// Default is ~400kb. /// /// # Panics /// /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn max_buf_size(&mut self, max: usize) -> &mut Self { assert!( max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, "the max_buf_size cannot be smaller than the minimum that h1 specifies." ); self.max_buf_size = Some(max); self } /// Aggregates flushes to better support pipelined responses. /// /// Experimental, may have bugs. /// /// Default is false. pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self { self.pipeline_flush = enabled; self } /// Set the executor used to spawn background tasks. /// /// Default uses implicit default (like `tokio::spawn`). pub fn with_executor<E2>(self, exec: E2) -> Http<E2> { Http { exec, h1_half_close: self.h1_half_close, h1_keep_alive: self.h1_keep_alive, h1_title_case_headers: self.h1_title_case_headers, h1_preserve_header_case: self.h1_preserve_header_case, #[cfg(all(feature = "http1", feature = "runtime"))] h1_header_read_timeout: self.h1_header_read_timeout, h1_writev: self.h1_writev, #[cfg(feature = "http2")] h2_builder: self.h2_builder, mode: self.mode, max_buf_size: self.max_buf_size, pipeline_flush: self.pipeline_flush, } } /// Bind a connection together with a [`Service`](crate::service::Service). /// /// This returns a Future that must be polled in order for HTTP to be /// driven on the connection. /// /// # Example /// /// ``` /// # use hyper::{Body, Request, Response}; /// # use hyper::service::Service; /// # use hyper::server::conn::Http; /// # use tokio::io::{AsyncRead, AsyncWrite}; /// # async fn run<I, S>(some_io: I, some_service: S) /// # where /// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static, /// # S: Service<hyper::Request<Body>, Response=hyper::Response<Body>> + Send + 'static, /// # S::Error: Into<Box<dyn std::error::Error + Send + Sync>>, /// # S::Future: Send, /// # { /// let http = Http::new(); /// let conn = http.serve_connection(some_io, some_service); /// /// if let Err(e) = conn.await { /// eprintln!("server connection error: {}", e); /// } /// # } /// # fn main() {} /// ``` pub fn serve_connection<S, I, Bd>(&self, io: I, service: S) -> Connection<I, S, E> where S: HttpService<Body, ResBody = Bd>, S::Error: Into<Box<dyn StdError + Send + Sync>>, Bd: HttpBody + 'static, Bd::Error: Into<Box<dyn StdError + Send + Sync>>, I: AsyncRead + AsyncWrite + Unpin, E: ConnStreamExec<S::Future, Bd>, { #[cfg(feature = "http1")] macro_rules! h1 { () => {{ let mut conn = proto::Conn::new(io); if !self.h1_keep_alive { conn.disable_keep_alive(); } if self.h1_half_close { conn.set_allow_half_close(); } if self.h1_title_case_headers { conn.set_title_case_headers(); } if self.h1_preserve_header_case { conn.set_preserve_header_case(); } #[cfg(all(feature = "http1", feature = "runtime"))] if let Some(header_read_timeout) = self.h1_header_read_timeout { conn.set_http1_header_read_timeout(header_read_timeout); } if let Some(writev) = self.h1_writev { if writev { conn.set_write_strategy_queue(); } else { conn.set_write_strategy_flatten(); } } conn.set_flush_pipeline(self.pipeline_flush); if let Some(max) = self.max_buf_size { conn.set_max_buf_size(max); } let sd = proto::h1::dispatch::Server::new(service); ProtoServer::H1 { h1: proto::h1::Dispatcher::new(sd, conn), } }}; } let proto = match self.mode { #[cfg(feature = "http1")] #[cfg(not(feature = "http2"))] ConnectionMode::H1Only => h1!(), #[cfg(feature = "http2")] #[cfg(feature = "http1")] ConnectionMode::H1Only | ConnectionMode::Fallback => h1!(), #[cfg(feature = "http2")] ConnectionMode::H2Only => { let rewind_io = Rewind::new(io); let h2 = proto::h2::Server::new(rewind_io, service, &self.h2_builder, self.exec.clone()); ProtoServer::H2 { h2 } } }; Connection { conn: Some(proto), #[cfg(all(feature = "http1", feature = "http2"))] fallback: if self.mode == ConnectionMode::Fallback { Fallback::ToHttp2(self.h2_builder.clone(), self.exec.clone()) } else { Fallback::Http1Only }, #[cfg(not(all(feature = "http1", feature = "http2")))] fallback: PhantomData, } } pub(super) fn serve<I, IO, IE, S, Bd>(&self, incoming: I, make_service: S) -> Serve<I, S, E> where I: Accept<Conn = IO, Error = IE>, IE: Into<Box<dyn StdError + Send + Sync>>, IO: AsyncRead + AsyncWrite + Unpin, S: MakeServiceRef<IO, Body, ResBody = Bd>, S::Error: Into<Box<dyn StdError + Send + Sync>>, Bd: HttpBody, E: ConnStreamExec<<S::Service as HttpService<Body>>::Future, Bd>, { Serve { incoming, make_service, protocol: self.clone(), } } } // ===== impl Connection ===== #[cfg(any(feature = "http1", feature = "http2"))] impl<I, B, S, E> Connection<I, S, E> where S: HttpService<Body, ResBody = B>, S::Error: Into<Box<dyn StdError + Send + Sync>>, I: AsyncRead + AsyncWrite + Unpin, B: HttpBody + 'static, B::Error: Into<Box<dyn StdError + Send + Sync>>, E: ConnStreamExec<S::Future, B>, { /// Start a graceful shutdown process for this connection. /// /// This `Connection` should continue to be polled until shutdown /// can finish. /// /// # Note /// /// This should only be called while the `Connection` future is still /// pending. If called after `Connection::poll` has resolved, this does /// nothing. pub fn graceful_shutdown(mut self: Pin<&mut Self>) { match self.conn { #[cfg(feature = "http1")] Some(ProtoServer::H1 { ref mut h1, .. }) => { h1.disable_keep_alive(); } #[cfg(feature = "http2")] Some(ProtoServer::H2 { ref mut h2 }) => { h2.graceful_shutdown(); } None => (), #[cfg(not(feature = "http1"))] Some(ProtoServer::H1 { ref mut h1, .. }) => match h1.0 {}, #[cfg(not(feature = "http2"))] Some(ProtoServer::H2 { ref mut h2 }) => match h2.0 {}, } } /// Return the inner IO object, and additional information. /// /// If the IO object has been "rewound" the io will not contain those bytes rewound. /// This should only be called after `poll_without_shutdown` signals /// that the connection is "done". Otherwise, it may not have finished /// flushing all necessary HTTP bytes. /// /// # Panics /// This method will panic if this connection is using an h2 protocol. pub fn into_parts(self) -> Parts<I, S> { self.try_into_parts() .unwrap_or_else(|| panic!("h2 cannot into_inner")) } /// Return the inner IO object, and additional information, if available. /// /// This method will return a `None` if this connection is using an h2 protocol. pub fn try_into_parts(self) -> Option<Parts<I, S>> { match self.conn.unwrap() { #[cfg(feature = "http1")] ProtoServer::H1 { h1, .. } => { let (io, read_buf, dispatch) = h1.into_inner(); Some(Parts { io, read_buf, service: dispatch.into_service(), _inner: (), }) } ProtoServer::H2 { .. } => None, #[cfg(not(feature = "http1"))] ProtoServer::H1 { h1, .. } => match h1.0 {}, } } /// Poll the connection for completion, but without calling `shutdown` /// on the underlying IO. /// /// This is useful to allow running a connection while doing an HTTP /// upgrade. Once the upgrade is completed, the connection would be "done", /// but it is not desired to actually shutdown the IO object. Instead you /// would take it back using `into_parts`. pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> where S: Unpin, S::Future: Unpin, B: Unpin, { loop { match *self.conn.as_mut().unwrap() { #[cfg(feature = "http1")] ProtoServer::H1 { ref mut h1, .. } => match ready!(h1.poll_without_shutdown(cx)) { Ok(()) => return Poll::Ready(Ok(())), Err(e) => { #[cfg(feature = "http2")] match *e.kind() { Kind::Parse(Parse::VersionH2) if self.fallback.to_h2() => { self.upgrade_h2(); continue; } _ => (), } return Poll::Ready(Err(e)); } }, #[cfg(feature = "http2")] ProtoServer::H2 { ref mut h2 } => return Pin::new(h2).poll(cx).map_ok(|_| ()), #[cfg(not(feature = "http1"))] ProtoServer::H1 { ref mut h1, .. } => match h1.0 {}, #[cfg(not(feature = "http2"))] ProtoServer::H2 { ref mut h2 } => match h2.0 {}, }; } } /// Prevent shutdown of the underlying IO object at the end of service the request, /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. /// /// # Error /// /// This errors if the underlying connection protocol is not HTTP/1. pub fn without_shutdown(self) -> impl Future<Output = crate::Result<Parts<I, S>>> where S: Unpin, S::Future: Unpin, B: Unpin, { let mut conn = Some(self); futures_util::future::poll_fn(move |cx| { ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?; Poll::Ready(conn.take().unwrap().try_into_parts().ok_or_else(crate::Error::new_without_shutdown_not_h1)) }) } #[cfg(all(feature = "http1", feature = "http2"))] fn upgrade_h2(&mut self) { trace!("Trying to upgrade connection to h2"); let conn = self.conn.take(); let (io, read_buf, dispatch) = match conn.unwrap() { ProtoServer::H1 { h1, .. } => h1.into_inner(), ProtoServer::H2 { .. } => { panic!("h2 cannot into_inner"); } }; let mut rewind_io = Rewind::new(io); rewind_io.rewind(read_buf); let (builder, exec) = match self.fallback { Fallback::ToHttp2(ref builder, ref exec) => (builder, exec), Fallback::Http1Only => unreachable!("upgrade_h2 with Fallback::Http1Only"), }; let h2 = proto::h2::Server::new(rewind_io, dispatch.into_service(), builder, exec.clone()); debug_assert!(self.conn.is_none()); self.conn = Some(ProtoServer::H2 { h2 }); } /// Enable this connection to support higher-level HTTP upgrades. /// /// See [the `upgrade` module](crate::upgrade) for more. pub fn with_upgrades(self) -> UpgradeableConnection<I, S, E> where I: Send, { UpgradeableConnection { inner: self } } } #[cfg(any(feature = "http1", feature = "http2"))] impl<I, B, S, E> Future for Connection<I, S, E> where S: HttpService<Body, ResBody = B>, S::Error: Into<Box<dyn StdError + Send + Sync>>, I: AsyncRead + AsyncWrite + Unpin + 'static, B: HttpBody + 'static, B::Error: Into<Box<dyn StdError + Send + Sync>>, E: ConnStreamExec<S::Future, B>, { type Output = crate::Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { loop { match ready!(Pin::new(self.conn.as_mut().unwrap()).poll(cx)) { Ok(done) => { match done { proto::Dispatched::Shutdown => {} #[cfg(feature = "http1")] proto::Dispatched::Upgrade(pending) => { // With no `Send` bound on `I`, we can't try to do // upgrades here. In case a user was trying to use // `Body::on_upgrade` with this API, send a special // error letting them know about that. pending.manual(); } }; return Poll::Ready(Ok(())); } Err(e) => { #[cfg(feature = "http1")] #[cfg(feature = "http2")] match *e.kind() { Kind::Parse(Parse::VersionH2) if self.fallback.to_h2() => { self.upgrade_h2(); continue; } _ => (), } return Poll::Ready(Err(e)); } } } } } #[cfg(any(feature = "http1", feature = "http2"))] impl<I, S> fmt::Debug for Connection<I, S> where S: HttpService<Body>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Connection").finish() } } // ===== impl ConnectionMode ===== #[cfg(any(feature = "http1", feature = "http2"))] impl Default for ConnectionMode { #[cfg(all(feature = "http1", feature = "http2"))] fn default() -> ConnectionMode { ConnectionMode::Fallback } #[cfg(all(feature = "http1", not(feature = "http2")))] fn default() -> ConnectionMode { ConnectionMode::H1Only } #[cfg(all(not(feature = "http1"), feature = "http2"))] fn default() -> ConnectionMode { ConnectionMode::H2Only } } // ===== impl Serve ===== #[cfg(any(feature = "http1", feature = "http2"))] impl<I, S, E> Serve<I, S, E> { /// Get a reference to the incoming stream. #[inline] pub(super) fn incoming_ref(&self) -> &I { &self.incoming } /* /// Get a mutable reference to the incoming stream. #[inline] pub fn incoming_mut(&mut self) -> &mut I { &mut self.incoming } */ /// Spawn all incoming connections onto the executor in `Http`. pub(super) fn spawn_all(self) -> SpawnAll<I, S, E> { SpawnAll { serve: self } } } #[cfg(any(feature = "http1", feature = "http2"))] impl<I, IO, IE, S, B, E> Serve<I, S, E> where I: Accept<Conn = IO, Error = IE>, IO: AsyncRead + AsyncWrite + Unpin, IE: Into<Box<dyn StdError + Send + Sync>>, S: MakeServiceRef<IO, Body, ResBody = B>, B: HttpBody, E: ConnStreamExec<<S::Service as HttpService<Body>>::Future, B>, { fn poll_next_( self: Pin<&mut Self>, cx: &mut task::Context<'_>, ) -> Poll<Option<crate::Result<Connecting<IO, S::Future, E>>>> { let me = self.project(); match ready!(me.make_service.poll_ready_ref(cx)) { Ok(()) => (), Err(e) => { trace!("make_service closed"); return Poll::Ready(Some(Err(crate::Error::new_user_make_service(e)))); } } if let Some(item) = ready!(me.incoming.poll_accept(cx)) { let io = item.map_err(crate::Error::new_accept)?; let new_fut = me.make_service.make_service_ref(&io); Poll::Ready(Some(Ok(Connecting { future: new_fut, io: Some(io), protocol: me.protocol.clone(), }))) } else { Poll::Ready(None) } } } // ===== impl Connecting ===== #[cfg(any(feature = "http1", feature = "http2"))] impl<I, F, S, FE, E, B> Future for Connecting<I, F, E> where I: AsyncRead + AsyncWrite + Unpin, F: Future<Output = Result<S, FE>>, S: HttpService<Body, ResBody = B>, B: HttpBody + 'static, B::Error: Into<Box<dyn StdError + Send + Sync>>, E: ConnStreamExec<S::Future, B>, { type Output = Result<Connection<I, S, E>, FE>; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { let mut me = self.project(); let service = ready!(me.future.poll(cx))?; let io = Option::take(&mut me.io).expect("polled after complete"); Poll::Ready(Ok(me.protocol.serve_connection(io, service))) } } // ===== impl SpawnAll ===== #[cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))] impl<S, E> SpawnAll<AddrIncoming, S, E> { pub(super) fn local_addr(&self) -> SocketAddr { self.serve.incoming.local_addr() } } #[cfg(any(feature = "http1", feature = "http2"))] impl<I, S, E> SpawnAll<I, S, E> { pub(super) fn incoming_ref(&self) -> &I { self.serve.incoming_ref() } } #[cfg(any(feature = "http1", feature = "http2"))] impl<I, IO, IE, S, B, E> SpawnAll<I, S, E> where I: Accept<Conn = IO, Error = IE>, IE: Into<Box<dyn StdError + Send + Sync>>, IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, S: MakeServiceRef<IO, Body, ResBody = B>, B: HttpBody, E: ConnStreamExec<<S::Service as HttpService<Body>>::Future, B>, { pub(super) fn poll_watch<W>( self: Pin<&mut Self>, cx: &mut task::Context<'_>, watcher: &W, ) -> Poll<crate::Result<()>> where E: NewSvcExec<IO, S::Future, S::Service, E, W>, W: Watcher<IO, S::Service, E>, { let mut me = self.project(); loop { if let Some(connecting) = ready!(me.serve.as_mut().poll_next_(cx)?) { let fut = NewSvcTask::new(connecting, watcher.clone()); me.serve .as_mut() .project() .protocol .exec .execute_new_svc(fut); } else { return Poll::Ready(Ok(())); } } } } // ===== impl ProtoServer ===== #[cfg(any(feature = "http1", feature = "http2"))] impl<T, B, S, E> Future for ProtoServer<T, B, S, E> where T: AsyncRead + AsyncWrite + Unpin, S: HttpService<Body, ResBody = B>, S::Error: Into<Box<dyn StdError + Send + Sync>>, B: HttpBody + 'static, B::Error: Into<Box<dyn StdError + Send + Sync>>, E: ConnStreamExec<S::Future, B>, { type Output = crate::Result<proto::Dispatched>; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { match self.project() { #[cfg(feature = "http1")] ProtoServerProj::H1 { h1, .. } => h1.poll(cx), #[cfg(feature = "http2")] ProtoServerProj::H2 { h2 } => h2.poll(cx), #[cfg(not(feature = "http1"))] ProtoServerProj::H1 { h1, .. } => match h1.0 {}, #[cfg(not(feature = "http2"))] ProtoServerProj::H2 { h2 } => match h2.0 {}, } } } #[cfg(any(feature = "http1", feature = "http2"))] pub(crate) mod spawn_all { use std::error::Error as StdError; use tokio::io::{AsyncRead, AsyncWrite}; use tracing::debug; use super::{Connecting, UpgradeableConnection}; use crate::body::{Body, HttpBody}; use crate::common::exec::ConnStreamExec; use crate::common::{task, Future, Pin, Poll, Unpin}; use crate::service::HttpService; use pin_project_lite::pin_project; // Used by `SpawnAll` to optionally watch a `Connection` future. // // The regular `hyper::Server` just uses a `NoopWatcher`, which does // not need to watch anything, and so returns the `Connection` untouched. // // The `Server::with_graceful_shutdown` needs to keep track of all active // connections, and signal that they start to shutdown when prompted, so // it has a `GracefulWatcher` implementation to do that. pub trait Watcher<I, S: HttpService<Body>, E>: Clone { type Future: Future<Output = crate::Result<()>>; fn watch(&self, conn: UpgradeableConnection<I, S, E>) -> Self::Future; } #[allow(missing_debug_implementations)] #[derive(Copy, Clone)] pub struct NoopWatcher; impl<I, S, E> Watcher<I, S, E> for NoopWatcher where I: AsyncRead + AsyncWrite + Unpin + Send + 'static, S: HttpService<Body>, E: ConnStreamExec<S::Future, S::ResBody>, S::ResBody: 'static, <S::ResBody as HttpBody>::Error: Into<Box<dyn StdError + Send + Sync>>, { type Future = UpgradeableConnection<I, S, E>; fn watch(&self, conn: UpgradeableConnection<I, S, E>) -> Self::Future { conn } } // This is a `Future<Item=(), Error=()>` spawned to an `Executor` inside // the `SpawnAll`. By being a nameable type, we can be generic over the // user's `Service::Future`, and thus an `Executor` can execute it. // // Doing this allows for the server to conditionally require `Send` futures, // depending on the `Executor` configured. // // Users cannot import this type, nor the associated `NewSvcExec`. Instead, // a blanket implementation for `Executor<impl Future>` is sufficient. pin_project! { #[allow(missing_debug_implementations)] pub struct NewSvcTask<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> { #[pin] state: State<I, N, S, E, W>, } } pin_project! { #[project = StateProj] pub(super) enum State<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> { Connecting { #[pin] connecting: Connecting<I, N, E>, watcher: W, }, Connected { #[pin] future: W::Future, }, } } impl<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> NewSvcTask<I, N, S, E, W> { pub(super) fn new(connecting: Connecting<I, N, E>, watcher: W) -> Self { NewSvcTask { state: State::Connecting { connecting, watcher, }, } } } impl<I, N, S, NE, B, E, W> Future for NewSvcTask<I, N, S, E, W> where I: AsyncRead + AsyncWrite + Unpin + Send + 'static, N: Future<Output = Result<S, NE>>, NE: Into<Box<dyn StdError + Send + Sync>>, S: HttpService<Body, ResBody = B>, B: HttpBody + 'static, B::Error: Into<Box<dyn StdError + Send + Sync>>, E: ConnStreamExec<S::Future, B>, W: Watcher<I, S, E>, { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { // If it weren't for needing to name this type so the `Send` bounds // could be projected to the `Serve` executor, this could just be // an `async fn`, and much safer. Woe is me. let mut me = self.project(); loop { let next = { match me.state.as_mut().project() { StateProj::Connecting { connecting, watcher, } => { let res = ready!(connecting.poll(cx)); let conn = match res { Ok(conn) => conn, Err(err) => { let err = crate::Error::new_user_make_service(err); debug!("connecting error: {}", err); return Poll::Ready(()); } }; let future = watcher.watch(conn.with_upgrades()); State::Connected { future } } StateProj::Connected { future } => { return future.poll(cx).map(|res| { if let Err(err) = res { debug!("connection error: {}", err); } }); } } }; me.state.set(next); } } } } #[cfg(any(feature = "http1", feature = "http2"))] mod upgrades { use super::*; // A future binding a connection with a Service with Upgrade support. // // This type is unnameable outside the crate, and so basically just an // `impl Future`, without requiring Rust 1.26. #[must_use = "futures do nothing unless polled"] #[allow(missing_debug_implementations)] pub struct UpgradeableConnection<T, S, E> where S: HttpService<Body>, { pub(super) inner: Connection<T, S, E>, } impl<I, B, S, E> UpgradeableConnection<I, S, E> where S: HttpService<Body, ResBody = B>, S::Error: Into<Box<dyn StdError + Send + Sync>>, I: AsyncRead + AsyncWrite + Unpin, B: HttpBody + 'static, B::Error: Into<Box<dyn StdError + Send + Sync>>, E: ConnStreamExec<S::Future, B>, { /// Start a graceful shutdown process for this connection. /// /// This `Connection` should continue to be polled until shutdown /// can finish. pub fn graceful_shutdown(mut self: Pin<&mut Self>) { Pin::new(&mut self.inner).graceful_shutdown() } } impl<I, B, S, E> Future for UpgradeableConnection<I, S, E> where S: HttpService<Body, ResBody = B>, S::Error: Into<Box<dyn StdError + Send + Sync>>, I: AsyncRead + AsyncWrite + Unpin + Send + 'static, B: HttpBody + 'static, B::Error: Into<Box<dyn StdError + Send + Sync>>, E: ConnStreamExec<S::Future, B>, { type Output = crate::Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> { loop { match ready!(Pin::new(self.inner.conn.as_mut().unwrap()).poll(cx)) { Ok(proto::Dispatched::Shutdown) => return Poll::Ready(Ok(())), #[cfg(feature = "http1")] Ok(proto::Dispatched::Upgrade(pending)) => { match self.inner.conn.take() { Some(ProtoServer::H1 { h1, .. }) => { let (io, buf, _) = h1.into_inner(); pending.fulfill(Upgraded::new(io, buf)); return Poll::Ready(Ok(())); } _ => { drop(pending); unreachable!("Upgrade expects h1") } }; } Err(e) => { #[cfg(feature = "http1")] #[cfg(feature = "http2")] match *e.kind() { Kind::Parse(Parse::VersionH2) if self.inner.fallback.to_h2() => { self.inner.upgrade_h2(); continue; } _ => (), } return Poll::Ready(Err(e)); } } } } } }
34.487236
116
0.538577
ed018eb906d7212575f60971d174b226ff11fce6
4,565
// Copyright (c) Microsoft Corporation. // Licensed under the MIT license OR Apache 2.0 //! Types for building an Snocat server and accepting, authenticating, and routing connections #![warn(unused_imports)] use futures::future::FutureExt; use std::collections::HashSet; use std::{ops::RangeInclusive, sync::Arc}; use tokio::sync::Mutex; pub mod modular; #[derive(Debug, Clone)] pub struct PortRangeAllocator { range: std::ops::RangeInclusive<u16>, allocated: Arc<Mutex<std::collections::HashSet<u16>>>, mark_queue: tokio::sync::mpsc::UnboundedSender<u16>, // UnboundedReceiver does not implement clone, so we need an ArcMut of it mark_receiver: Arc<Mutex<tokio::sync::mpsc::UnboundedReceiver<u16>>>, } #[derive(thiserror::Error, Debug)] pub enum PortRangeAllocationError { #[error("No ports were available to be allocated in range {0:?}")] NoFreePorts(std::ops::RangeInclusive<u16>), } impl PortRangeAllocator { pub fn new<T: Into<u16>>(bind_port_range: std::ops::RangeInclusive<T>) -> PortRangeAllocator { let (start, end): (u16, u16) = { let (a, b) = bind_port_range.into_inner(); (a.into(), b.into()) }; let (mark_sender, mark_receiver) = tokio::sync::mpsc::unbounded_channel(); PortRangeAllocator { range: std::ops::RangeInclusive::new(start, end), allocated: Default::default(), mark_queue: mark_sender, mark_receiver: Arc::new(Mutex::new(mark_receiver)), } } pub async fn allocate(&self) -> Result<PortRangeAllocationHandle, PortRangeAllocationError> { // Used for cleaning up in the deallocator let cloned_self = self.clone(); let range = self.range.clone(); let mark_receiver = Arc::clone(&self.mark_receiver); let mut lock = self.allocated.lock().await; // Consume existing marks for freed ports { let mut mark_receiver = mark_receiver.lock().await; Self::cleanup_freed_ports(&mut *lock, &mut *mark_receiver); } let port = range .clone() .into_iter() .filter(|test_port| !lock.contains(test_port)) .min() .ok_or_else(|| PortRangeAllocationError::NoFreePorts(range.clone()))?; let allocation = PortRangeAllocationHandle::new(port, cloned_self); lock.insert(allocation.port); Ok(allocation) } pub async fn free(&self, port: u16) -> Result<bool, anyhow::Error> { let mark_receiver = Arc::clone(&self.mark_receiver); let mut lock = self.allocated.lock().await; let removed = lock.remove(&port); if removed { tracing::trace!(port = port, "unbound port"); } let mut mark_receiver = mark_receiver.lock().await; Self::cleanup_freed_ports(&mut *lock, &mut *mark_receiver); Ok(removed) } fn cleanup_freed_ports( allocations: &mut HashSet<u16>, mark_receiver: &mut tokio::sync::mpsc::UnboundedReceiver<u16>, ) { // recv waits forever if a sender can still produce values // skip that by only receiving those immediately available // HACK: Relies on unbounded receivers being immediately available without intermediate polling while let Some(Some(marked)) = mark_receiver.recv().now_or_never() { let removed = allocations.remove(&marked); if removed { tracing::trace!(port = marked, "unbound marked port"); } } } pub fn mark_freed(&self, port: u16) { match self.allocated.try_lock() { // fast path if synchronous is possible, skipping the mark queue Ok(mut allocations) => { // remove specified port; we don't care if it succeeded or not let _removed = allocations.remove(&port); return; } Err(_would_block) => { match self.mark_queue.send(port) { // Message queued, do nothing Ok(()) => (), // Other side was closed // Without a receiver, we don't actually need to free anything, so do nothing Err(_send_error) => (), } } } } pub fn range(&self) -> &RangeInclusive<u16> { &self.range } } pub struct PortRangeAllocationHandle { port: u16, allocated_in: Option<PortRangeAllocator>, } impl PortRangeAllocationHandle { pub fn new(port: u16, allocated_in: PortRangeAllocator) -> Self { Self { port, allocated_in: Some(allocated_in), } } pub fn port(&self) -> u16 { self.port } } impl Drop for PortRangeAllocationHandle { fn drop(&mut self) { match std::mem::replace(&mut self.allocated_in, None) { None => (), Some(allocator) => { allocator.mark_freed(self.port); } } } }
31.267123
99
0.658269
e250feb50d704fce77348cb17480f954853eefb5
59
#[ cfg( feature = "meta_tools" ) ] pub use meta_tools::*;
14.75
34
0.610169
ab93ed8e0a709a103477f1914ba04a786fd55bcd
39,499
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0. //! This crate implements a simple SQL query engine to work with TiDB pushed down executors. //! //! The query engine is able to scan and understand rows stored by TiDB, run against a //! series of executors and then return the execution result. The query engine is provided via //! TiKV Coprocessor interface. However standalone UDF functions are also exported and can be used //! standalone. #![allow(elided_lifetimes_in_paths)] // Necessary until rpn_fn accepts functions annotated with lifetimes. #![allow(incomplete_features)] #![feature(proc_macro_hygiene)] #![feature(specialization)] #![feature(test)] #![feature(const_fn_fn_ptr_basics)] #![feature(const_fn_trait_bound)] #![feature(const_mut_refs)] #[macro_use(box_err, box_try, try_opt)] extern crate tikv_util; #[macro_use(other_err)] extern crate tidb_query_common; #[cfg(test)] extern crate test; pub mod types; pub mod impl_arithmetic; pub mod impl_cast; pub mod impl_compare; pub mod impl_compare_in; pub mod impl_control; pub mod impl_encryption; pub mod impl_json; pub mod impl_like; pub mod impl_math; pub mod impl_miscellaneous; pub mod impl_op; pub mod impl_other; pub mod impl_string; pub mod impl_time; pub use self::types::*; use tidb_query_datatype::{Charset, Collation, FieldTypeAccessor, FieldTypeFlag}; use tipb::{Expr, FieldType, ScalarFuncSig}; use tidb_query_common::Result; use tidb_query_datatype::codec::data_type::*; use tidb_query_datatype::match_template_charset; use tidb_query_datatype::match_template_collator; use self::impl_arithmetic::*; use self::impl_cast::*; use self::impl_compare::*; use self::impl_compare_in::*; use self::impl_control::*; use self::impl_encryption::*; use self::impl_json::*; use self::impl_like::*; use self::impl_math::*; use self::impl_miscellaneous::*; use self::impl_op::*; use self::impl_other::*; use self::impl_string::*; use self::impl_time::*; fn map_to_binary_fn_sig(expr: &Expr) -> Result<RpnFnMeta> { let children = expr.get_children(); let ret_field_type = children[0].get_field_type(); Ok(match_template_charset! { TT, match Charset::from_name(ret_field_type.get_charset()).map_err(tidb_query_datatype::codec::Error::from)? { Charset::TT => to_binary_fn_meta::<TT>(), } }) } fn map_from_binary_fn_sig(expr: &Expr) -> Result<RpnFnMeta> { let ret_field_type = expr.get_field_type(); Ok(match_template_charset! { TT, match Charset::from_name(ret_field_type.get_charset()).map_err(tidb_query_datatype::codec::Error::from)? { Charset::TT => from_binary_fn_meta::<TT>(), } }) } fn map_string_compare_sig<Cmp: CmpOp>(ret_field_type: &FieldType) -> Result<RpnFnMeta> { Ok(match_template_collator! { TT, match ret_field_type.as_accessor().collation().map_err(tidb_query_datatype::codec::Error::from)? { Collation::TT => compare_bytes_fn_meta::<TT, Cmp>() } }) } fn map_compare_in_string_sig(ret_field_type: &FieldType) -> Result<RpnFnMeta> { Ok(match_template_collator! { TT, match ret_field_type.as_accessor().collation().map_err(tidb_query_datatype::codec::Error::from)? { Collation::TT => compare_in_by_hash_bytes_fn_meta::<TT>() } }) } fn map_like_sig(ret_field_type: &FieldType) -> Result<RpnFnMeta> { Ok(match_template_collator! { TT, match ret_field_type.as_accessor().collation().map_err(tidb_query_datatype::codec::Error::from)? { Collation::TT => like_fn_meta::<TT>() } }) } fn map_locate_2_args_utf8_sig(ret_field_type: &FieldType) -> Result<RpnFnMeta> { Ok(match_template_collator! { TT, match ret_field_type.as_accessor().collation().map_err(tidb_query_datatype::codec::Error::from)? { Collation::TT => locate_2_args_utf8_fn_meta::<TT>() } }) } fn map_locate_3_args_utf8_sig(ret_field_type: &FieldType) -> Result<RpnFnMeta> { Ok(match_template_collator! { TT, match ret_field_type.as_accessor().collation().map_err(tidb_query_datatype::codec::Error::from)? { Collation::TT => locate_3_args_utf8_fn_meta::<TT>() } }) } fn map_strcmp_sig(ret_field_type: &FieldType) -> Result<RpnFnMeta> { Ok(match_template_collator! { TT, match ret_field_type.as_accessor().collation().map_err(tidb_query_datatype::codec::Error::from)? { Collation::TT => strcmp_fn_meta::<TT>() } }) } fn map_find_in_set_sig(ret_field_type: &FieldType) -> Result<RpnFnMeta> { Ok(match_template_collator! { TT, match ret_field_type.as_accessor().collation().map_err(tidb_query_datatype::codec::Error::from)? { Collation::TT => find_in_set_fn_meta::<TT>() } }) } fn map_ord_sig(ret_field_type: &FieldType) -> Result<RpnFnMeta> { Ok(match_template_collator! { TT, match ret_field_type.as_accessor().collation().map_err(tidb_query_datatype::codec::Error::from)? { Collation::TT => ord_fn_meta::<TT>() } }) } fn map_int_sig<F>(value: ScalarFuncSig, children: &[Expr], mapper: F) -> Result<RpnFnMeta> where F: Fn(bool, bool) -> RpnFnMeta, { // FIXME: The signature for different signed / unsigned int should be inferred at TiDB side. if children.len() != 2 { return Err(other_err!( "ScalarFunction {:?} (params = {}) is not supported in batch mode", value, children.len() )); } let lhs_is_unsigned = children[0] .get_field_type() .as_accessor() .flag() .contains(FieldTypeFlag::UNSIGNED); let rhs_is_unsigned = children[1] .get_field_type() .as_accessor() .flag() .contains(FieldTypeFlag::UNSIGNED); Ok(mapper(lhs_is_unsigned, rhs_is_unsigned)) } fn compare_mapper<F: CmpOp>(lhs_is_unsigned: bool, rhs_is_unsigned: bool) -> RpnFnMeta { match (lhs_is_unsigned, rhs_is_unsigned) { (false, false) => compare_fn_meta::<BasicComparer<Int, F>>(), (false, true) => compare_fn_meta::<IntUintComparer<F>>(), (true, false) => compare_fn_meta::<UintIntComparer<F>>(), (true, true) => compare_fn_meta::<UintUintComparer<F>>(), } } fn plus_mapper(lhs_is_unsigned: bool, rhs_is_unsigned: bool) -> RpnFnMeta { match (lhs_is_unsigned, rhs_is_unsigned) { (false, false) => arithmetic_fn_meta::<IntIntPlus>(), (false, true) => arithmetic_fn_meta::<IntUintPlus>(), (true, false) => arithmetic_fn_meta::<UintIntPlus>(), (true, true) => arithmetic_fn_meta::<UintUintPlus>(), } } fn minus_mapper(lhs_is_unsigned: bool, rhs_is_unsigned: bool) -> RpnFnMeta { match (lhs_is_unsigned, rhs_is_unsigned) { (false, false) => arithmetic_fn_meta::<IntIntMinus>(), (false, true) => arithmetic_fn_meta::<IntUintMinus>(), (true, false) => arithmetic_fn_meta::<UintIntMinus>(), (true, true) => arithmetic_fn_meta::<UintUintMinus>(), } } fn multiply_mapper(lhs_is_unsigned: bool, rhs_is_unsigned: bool) -> RpnFnMeta { match (lhs_is_unsigned, rhs_is_unsigned) { (false, false) => arithmetic_fn_meta::<IntIntMultiply>(), (false, true) => arithmetic_fn_meta::<IntUintMultiply>(), (true, false) => arithmetic_fn_meta::<UintIntMultiply>(), (true, true) => arithmetic_fn_meta::<UintUintMultiply>(), } } fn mod_mapper(lhs_is_unsigned: bool, rhs_is_unsigned: bool) -> RpnFnMeta { match (lhs_is_unsigned, rhs_is_unsigned) { (false, false) => arithmetic_fn_meta::<IntIntMod>(), (false, true) => arithmetic_fn_meta::<IntUintMod>(), (true, false) => arithmetic_fn_meta::<UintIntMod>(), (true, true) => arithmetic_fn_meta::<UintUintMod>(), } } fn divide_mapper(lhs_is_unsigned: bool, rhs_is_unsigned: bool) -> RpnFnMeta { match (lhs_is_unsigned, rhs_is_unsigned) { (false, false) => arithmetic_fn_meta::<IntDivideInt>(), (false, true) => arithmetic_fn_meta::<IntDivideUint>(), (true, false) => arithmetic_fn_meta::<UintDivideInt>(), (true, true) => arithmetic_fn_meta::<UintDivideUint>(), } } fn map_rhs_int_sig<F>(value: ScalarFuncSig, children: &[Expr], mapper: F) -> Result<RpnFnMeta> where F: Fn(bool) -> RpnFnMeta, { // FIXME: The signature for different signed / unsigned int should be inferred at TiDB side. if children.len() != 2 { return Err(other_err!( "ScalarFunction {:?} (params = {}) is not supported in batch mode", value, children.len() )); } let rhs_is_unsigned = children[1] .get_field_type() .as_accessor() .flag() .contains(FieldTypeFlag::UNSIGNED); Ok(mapper(rhs_is_unsigned)) } fn truncate_int_mapper(rhs_is_unsigned: bool) -> RpnFnMeta { if rhs_is_unsigned { truncate_int_with_uint_fn_meta() } else { truncate_int_with_int_fn_meta() } } fn truncate_uint_mapper(rhs_is_unsigned: bool) -> RpnFnMeta { if rhs_is_unsigned { truncate_uint_with_uint_fn_meta() } else { truncate_uint_with_int_fn_meta() } } fn truncate_real_mapper(rhs_is_unsigned: bool) -> RpnFnMeta { if rhs_is_unsigned { truncate_real_with_uint_fn_meta() } else { truncate_real_with_int_fn_meta() } } fn truncate_decimal_mapper(rhs_is_unsigned: bool) -> RpnFnMeta { if rhs_is_unsigned { truncate_decimal_with_uint_fn_meta() } else { truncate_decimal_with_int_fn_meta() } } pub fn map_unary_minus_int_func(value: ScalarFuncSig, children: &[Expr]) -> Result<RpnFnMeta> { if children.len() != 1 { return Err(other_err!( "ScalarFunction {:?} (params = {}) is not supported in batch mode", value, children.len() )); } if children[0] .get_field_type() .as_accessor() .flag() .contains(FieldTypeFlag::UNSIGNED) { Ok(unary_minus_uint_fn_meta()) } else { Ok(unary_minus_int_fn_meta()) } } fn map_lower_sig(value: ScalarFuncSig, children: &[Expr]) -> Result<RpnFnMeta> { if children.len() != 1 { return Err(other_err!( "ScalarFunction {:?} (params = {}) is not supported in batch mode", value, children.len() )); } if children[0].get_field_type().is_binary_string_like() { Ok(lower_fn_meta()) } else { let ret_field_type = children[0].get_field_type(); Ok(match_template_charset! { TT, match Charset::from_name(ret_field_type.get_charset()).map_err(tidb_query_datatype::codec::Error::from)? { Charset::TT => lower_utf8_fn_meta::<TT>(), } }) } } fn map_upper_sig(value: ScalarFuncSig, children: &[Expr]) -> Result<RpnFnMeta> { if children.len() != 1 { return Err(other_err!( "ScalarFunction {:?} (params = {}) is not supported in batch mode", value, children.len() )); } let ret_field_type = children[0].get_field_type(); Ok(match_template_charset! { TT, match Charset::from_name(ret_field_type.get_charset()).map_err(tidb_query_datatype::codec::Error::from)? { Charset::TT => upper_utf8_fn_meta::<TT>(), } }) } fn map_lower_utf8_sig(value: ScalarFuncSig, children: &[Expr]) -> Result<RpnFnMeta> { if children.len() != 1 { return Err(other_err!( "ScalarFunction {:?} (params = {}) is not supported in batch mode", value, children.len() )); } let ret_field_type = children[0].get_field_type(); Ok(match_template_charset! { TT, match Charset::from_name(ret_field_type.get_charset()).map_err(tidb_query_datatype::codec::Error::from)? { Charset::TT => lower_utf8_fn_meta::<TT>(), } }) } #[rustfmt::skip] fn map_expr_node_to_rpn_func(expr: &Expr) -> Result<RpnFnMeta> { let value = expr.get_sig(); let children = expr.get_children(); let ft = expr.get_field_type(); Ok(match value { // impl_arithmetic ScalarFuncSig::PlusInt => map_int_sig(value, children, plus_mapper)?, ScalarFuncSig::PlusIntUnsignedUnsigned => arithmetic_fn_meta::<UintUintPlus>(), ScalarFuncSig::PlusIntUnsignedSigned => arithmetic_fn_meta::<UintIntPlus>(), ScalarFuncSig::PlusIntSignedUnsigned => arithmetic_fn_meta::<IntUintPlus>(), ScalarFuncSig::PlusIntSignedSigned => arithmetic_fn_meta::<IntIntPlus>(), ScalarFuncSig::PlusReal => arithmetic_fn_meta::<RealPlus>(), ScalarFuncSig::PlusDecimal => arithmetic_fn_meta::<DecimalPlus>(), ScalarFuncSig::MinusInt => map_int_sig(value, children, minus_mapper)?, ScalarFuncSig::MinusReal => arithmetic_fn_meta::<RealMinus>(), ScalarFuncSig::MinusDecimal => arithmetic_fn_meta::<DecimalMinus>(), ScalarFuncSig::MultiplyDecimal => arithmetic_fn_meta::<DecimalMultiply>(), ScalarFuncSig::MultiplyInt => map_int_sig(value, children, multiply_mapper)?, ScalarFuncSig::MultiplyIntUnsigned => arithmetic_fn_meta::<UintUintMultiply>(), ScalarFuncSig::MultiplyReal => arithmetic_fn_meta::<RealMultiply>(), ScalarFuncSig::DivideDecimal => arithmetic_with_ctx_fn_meta::<DecimalDivide>(), ScalarFuncSig::DivideReal => arithmetic_with_ctx_fn_meta::<RealDivide>(), ScalarFuncSig::IntDivideInt => map_int_sig(value, children, divide_mapper)?, ScalarFuncSig::IntDivideDecimal => int_divide_decimal_fn_meta(), ScalarFuncSig::ModReal => arithmetic_fn_meta::<RealMod>(), ScalarFuncSig::ModDecimal => arithmetic_with_ctx_fn_meta::<DecimalMod>(), ScalarFuncSig::ModInt => map_int_sig(value, children, mod_mapper)?, ScalarFuncSig::ModIntUnsignedUnsigned => arithmetic_fn_meta::<UintUintMod>(), ScalarFuncSig::ModIntUnsignedSigned => arithmetic_fn_meta::<UintIntMod>(), ScalarFuncSig::ModIntSignedUnsigned => arithmetic_fn_meta::<IntUintMod>(), ScalarFuncSig::ModIntSignedSigned => arithmetic_fn_meta::<IntIntMod>(), // impl_cast ScalarFuncSig::CastIntAsInt | ScalarFuncSig::CastIntAsReal | ScalarFuncSig::CastIntAsString | ScalarFuncSig::CastIntAsDecimal | ScalarFuncSig::CastIntAsTime | ScalarFuncSig::CastIntAsDuration | ScalarFuncSig::CastIntAsJson | ScalarFuncSig::CastRealAsInt | ScalarFuncSig::CastRealAsReal | ScalarFuncSig::CastRealAsString | ScalarFuncSig::CastRealAsDecimal | ScalarFuncSig::CastRealAsTime | ScalarFuncSig::CastRealAsDuration | ScalarFuncSig::CastRealAsJson | ScalarFuncSig::CastDecimalAsInt | ScalarFuncSig::CastDecimalAsReal | ScalarFuncSig::CastDecimalAsString | ScalarFuncSig::CastDecimalAsDecimal | ScalarFuncSig::CastDecimalAsTime | ScalarFuncSig::CastDecimalAsDuration | ScalarFuncSig::CastDecimalAsJson | ScalarFuncSig::CastStringAsInt | ScalarFuncSig::CastStringAsReal | ScalarFuncSig::CastStringAsString | ScalarFuncSig::CastStringAsDecimal | ScalarFuncSig::CastStringAsTime | ScalarFuncSig::CastStringAsDuration | ScalarFuncSig::CastStringAsJson | ScalarFuncSig::CastTimeAsInt | ScalarFuncSig::CastTimeAsReal | ScalarFuncSig::CastTimeAsString | ScalarFuncSig::CastTimeAsDecimal | ScalarFuncSig::CastTimeAsTime | ScalarFuncSig::CastTimeAsDuration | ScalarFuncSig::CastTimeAsJson | ScalarFuncSig::CastDurationAsInt | ScalarFuncSig::CastDurationAsReal | ScalarFuncSig::CastDurationAsString | ScalarFuncSig::CastDurationAsDecimal | ScalarFuncSig::CastDurationAsTime | ScalarFuncSig::CastDurationAsDuration | ScalarFuncSig::CastDurationAsJson | ScalarFuncSig::CastJsonAsInt | ScalarFuncSig::CastJsonAsReal | ScalarFuncSig::CastJsonAsString | ScalarFuncSig::CastJsonAsDecimal | ScalarFuncSig::CastJsonAsTime | ScalarFuncSig::CastJsonAsDuration | ScalarFuncSig::CastJsonAsJson => map_cast_func(expr)?, ScalarFuncSig::ToBinary => map_to_binary_fn_sig(expr)?, ScalarFuncSig::FromBinary => map_from_binary_fn_sig(expr)?, // impl_compare ScalarFuncSig::LtInt => map_int_sig(value, children, compare_mapper::<CmpOpLT>)?, ScalarFuncSig::LtReal => compare_fn_meta::<BasicComparer<Real, CmpOpLT>>(), ScalarFuncSig::LtDecimal => compare_fn_meta::<BasicComparer<Decimal, CmpOpLT>>(), ScalarFuncSig::LtString => map_string_compare_sig::<CmpOpLT>(ft)?, ScalarFuncSig::LtTime => compare_fn_meta::<BasicComparer<DateTime, CmpOpLT>>(), ScalarFuncSig::LtDuration => compare_fn_meta::<BasicComparer<Duration, CmpOpLT>>(), ScalarFuncSig::LtJson => compare_json_fn_meta::<CmpOpLT>(), ScalarFuncSig::LeInt => map_int_sig(value, children, compare_mapper::<CmpOpLE>)?, ScalarFuncSig::LeReal => compare_fn_meta::<BasicComparer<Real, CmpOpLE>>(), ScalarFuncSig::LeDecimal => compare_fn_meta::<BasicComparer<Decimal, CmpOpLE>>(), ScalarFuncSig::LeString => map_string_compare_sig::<CmpOpLE>(ft)?, ScalarFuncSig::LeTime => compare_fn_meta::<BasicComparer<DateTime, CmpOpLE>>(), ScalarFuncSig::LeDuration => compare_fn_meta::<BasicComparer<Duration, CmpOpLE>>(), ScalarFuncSig::LeJson => compare_json_fn_meta::<CmpOpLE>(), ScalarFuncSig::GreatestInt => greatest_int_fn_meta(), ScalarFuncSig::GreatestDecimal => greatest_decimal_fn_meta(), ScalarFuncSig::GreatestString => greatest_string_fn_meta(), ScalarFuncSig::GreatestReal => greatest_real_fn_meta(), ScalarFuncSig::GreatestTime | ScalarFuncSig::GreatestDate => greatest_datetime_fn_meta(), ScalarFuncSig::GreatestCmpStringAsDate => greatest_cmp_string_as_date_fn_meta(), ScalarFuncSig::GreatestCmpStringAsTime => greatest_cmp_string_as_time_fn_meta(), ScalarFuncSig::GreatestDuration => greatest_duration_fn_meta(), ScalarFuncSig::LeastInt => least_int_fn_meta(), ScalarFuncSig::IntervalInt => interval_int_fn_meta(), ScalarFuncSig::LeastDecimal => least_decimal_fn_meta(), ScalarFuncSig::LeastString => least_string_fn_meta(), ScalarFuncSig::LeastReal => least_real_fn_meta(), ScalarFuncSig::LeastTime | ScalarFuncSig::LeastDate=> least_datetime_fn_meta(), ScalarFuncSig::LeastCmpStringAsDate => least_cmp_string_as_date_fn_meta(), ScalarFuncSig::LeastCmpStringAsTime=> least_cmp_string_as_time_fn_meta(), ScalarFuncSig::LeastDuration => least_duration_fn_meta(), ScalarFuncSig::IntervalReal => interval_real_fn_meta(), ScalarFuncSig::GtInt => map_int_sig(value, children, compare_mapper::<CmpOpGT>)?, ScalarFuncSig::GtReal => compare_fn_meta::<BasicComparer<Real, CmpOpGT>>(), ScalarFuncSig::GtDecimal => compare_fn_meta::<BasicComparer<Decimal, CmpOpGT>>(), ScalarFuncSig::GtString => map_string_compare_sig::<CmpOpGT>(ft)?, ScalarFuncSig::GtTime => compare_fn_meta::<BasicComparer<DateTime, CmpOpGT>>(), ScalarFuncSig::GtDuration => compare_fn_meta::<BasicComparer<Duration, CmpOpGT>>(), ScalarFuncSig::GtJson => compare_json_fn_meta::<CmpOpGT>(), ScalarFuncSig::GeInt => map_int_sig(value, children, compare_mapper::<CmpOpGE>)?, ScalarFuncSig::GeReal => compare_fn_meta::<BasicComparer<Real, CmpOpGE>>(), ScalarFuncSig::GeDecimal => compare_fn_meta::<BasicComparer<Decimal, CmpOpGE>>(), ScalarFuncSig::GeString => map_string_compare_sig::<CmpOpGE>(ft)?, ScalarFuncSig::GeTime => compare_fn_meta::<BasicComparer<DateTime, CmpOpGE>>(), ScalarFuncSig::GeDuration => compare_fn_meta::<BasicComparer<Duration, CmpOpGE>>(), ScalarFuncSig::GeJson => compare_json_fn_meta::<CmpOpGE>(), ScalarFuncSig::NeInt => map_int_sig(value, children, compare_mapper::<CmpOpNE>)?, ScalarFuncSig::NeReal => compare_fn_meta::<BasicComparer<Real, CmpOpNE>>(), ScalarFuncSig::NeDecimal => compare_fn_meta::<BasicComparer<Decimal, CmpOpNE>>(), ScalarFuncSig::NeString => map_string_compare_sig::<CmpOpNE>(ft)?, ScalarFuncSig::NeTime => compare_fn_meta::<BasicComparer<DateTime, CmpOpNE>>(), ScalarFuncSig::NeDuration => compare_fn_meta::<BasicComparer<Duration, CmpOpNE>>(), ScalarFuncSig::NeJson => compare_json_fn_meta::<CmpOpNE>(), ScalarFuncSig::EqInt => map_int_sig(value, children, compare_mapper::<CmpOpEQ>)?, ScalarFuncSig::EqReal => compare_fn_meta::<BasicComparer<Real, CmpOpEQ>>(), ScalarFuncSig::EqDecimal => compare_fn_meta::<BasicComparer<Decimal, CmpOpEQ>>(), ScalarFuncSig::EqString => map_string_compare_sig::<CmpOpEQ>(ft)?, ScalarFuncSig::EqTime => compare_fn_meta::<BasicComparer<DateTime, CmpOpEQ>>(), ScalarFuncSig::EqDuration => compare_fn_meta::<BasicComparer<Duration, CmpOpEQ>>(), ScalarFuncSig::EqJson => compare_json_fn_meta::<CmpOpEQ>(), ScalarFuncSig::NullEqInt => map_int_sig(value, children, compare_mapper::<CmpOpNullEQ>)?, ScalarFuncSig::NullEqReal => compare_fn_meta::<BasicComparer<Real, CmpOpNullEQ>>(), ScalarFuncSig::NullEqDecimal => compare_fn_meta::<BasicComparer<Decimal, CmpOpNullEQ>>(), ScalarFuncSig::NullEqString => map_string_compare_sig::<CmpOpNullEQ>(ft)?, ScalarFuncSig::NullEqTime => compare_fn_meta::<BasicComparer<DateTime, CmpOpNullEQ>>(), ScalarFuncSig::NullEqDuration => compare_fn_meta::<BasicComparer<Duration, CmpOpNullEQ>>(), ScalarFuncSig::NullEqJson => compare_json_fn_meta::<CmpOpNullEQ>(), ScalarFuncSig::CoalesceInt => coalesce_fn_meta::<Int>(), ScalarFuncSig::CoalesceReal => coalesce_fn_meta::<Real>(), ScalarFuncSig::CoalesceString => coalesce_bytes_fn_meta(), ScalarFuncSig::CoalesceDecimal => coalesce_fn_meta::<Decimal>(), ScalarFuncSig::CoalesceTime => coalesce_fn_meta::<DateTime>(), ScalarFuncSig::CoalesceDuration => coalesce_fn_meta::<Duration>(), ScalarFuncSig::CoalesceJson => coalesce_json_fn_meta(), // impl_compare_in ScalarFuncSig::InInt => compare_in_int_type_by_hash_fn_meta(), ScalarFuncSig::InReal => compare_in_by_hash_fn_meta::<NormalInByHash::<Real>>(), ScalarFuncSig::InString => map_compare_in_string_sig(ft)?, ScalarFuncSig::InDecimal => compare_in_by_hash_fn_meta::<NormalInByHash::<Decimal>>(), ScalarFuncSig::InTime => compare_in_by_compare_fn_meta::<DateTime>(), ScalarFuncSig::InDuration => compare_in_by_hash_fn_meta::<NormalInByHash::<Duration>>(), ScalarFuncSig::InJson => compare_in_by_compare_json_fn_meta(), // impl_control ScalarFuncSig::IfNullInt => if_null_fn_meta::<Int>(), ScalarFuncSig::IfNullReal => if_null_fn_meta::<Real>(), ScalarFuncSig::IfNullString => if_null_bytes_fn_meta(), ScalarFuncSig::IfNullDecimal => if_null_fn_meta::<Decimal>(), ScalarFuncSig::IfNullTime => if_null_fn_meta::<DateTime>(), ScalarFuncSig::IfNullDuration => if_null_fn_meta::<Duration>(), ScalarFuncSig::IfNullJson => if_null_json_fn_meta(), ScalarFuncSig::IfInt => if_condition_fn_meta::<Int>(), ScalarFuncSig::IfReal => if_condition_fn_meta::<Real>(), ScalarFuncSig::IfDecimal => if_condition_fn_meta::<Decimal>(), ScalarFuncSig::IfTime => if_condition_fn_meta::<DateTime>(), ScalarFuncSig::IfString => if_condition_bytes_fn_meta(), ScalarFuncSig::IfDuration => if_condition_fn_meta::<Duration>(), ScalarFuncSig::IfJson => if_condition_json_fn_meta(), ScalarFuncSig::CaseWhenInt => case_when_fn_meta::<Int>(), ScalarFuncSig::CaseWhenReal => case_when_fn_meta::<Real>(), ScalarFuncSig::CaseWhenString => case_when_bytes_fn_meta(), ScalarFuncSig::CaseWhenDecimal => case_when_fn_meta::<Decimal>(), ScalarFuncSig::CaseWhenTime => case_when_fn_meta::<DateTime>(), ScalarFuncSig::CaseWhenDuration => case_when_fn_meta::<Duration>(), ScalarFuncSig::CaseWhenJson => case_when_json_fn_meta(), // impl_encryption ScalarFuncSig::UncompressedLength => uncompressed_length_fn_meta(), ScalarFuncSig::Md5 => md5_fn_meta(), ScalarFuncSig::Sha1 => sha1_fn_meta(), ScalarFuncSig::Sha2 => sha2_fn_meta(), ScalarFuncSig::Compress => compress_fn_meta(), ScalarFuncSig::Uncompress => uncompress_fn_meta(), ScalarFuncSig::RandomBytes => random_bytes_fn_meta(), ScalarFuncSig::Password => password_fn_meta(), // impl_json ScalarFuncSig::JsonDepthSig => json_depth_fn_meta(), ScalarFuncSig::JsonTypeSig => json_type_fn_meta(), ScalarFuncSig::JsonSetSig => json_set_fn_meta(), ScalarFuncSig::JsonReplaceSig => json_replace_fn_meta(), ScalarFuncSig::JsonInsertSig => json_insert_fn_meta(), ScalarFuncSig::JsonArraySig => json_array_fn_meta(), ScalarFuncSig::JsonObjectSig => json_object_fn_meta(), ScalarFuncSig::JsonMergeSig => json_merge_fn_meta(), ScalarFuncSig::JsonUnquoteSig => json_unquote_fn_meta(), ScalarFuncSig::JsonExtractSig => json_extract_fn_meta(), ScalarFuncSig::JsonLengthSig => json_length_fn_meta(), ScalarFuncSig::JsonRemoveSig => json_remove_fn_meta(), ScalarFuncSig::JsonKeysSig => json_keys_fn_meta(), ScalarFuncSig::JsonKeys2ArgsSig => json_keys_fn_meta(), ScalarFuncSig::JsonQuoteSig => json_quote_fn_meta(), // impl_like ScalarFuncSig::LikeSig => map_like_sig(ft)?, ScalarFuncSig::RegexpSig => regexp_fn_meta(), ScalarFuncSig::RegexpUtf8Sig => regexp_utf8_fn_meta(), // impl_math ScalarFuncSig::AbsInt => abs_int_fn_meta(), ScalarFuncSig::AbsUInt => abs_uint_fn_meta(), ScalarFuncSig::AbsReal => abs_real_fn_meta(), ScalarFuncSig::AbsDecimal => abs_decimal_fn_meta(), ScalarFuncSig::CeilReal => ceil_fn_meta::<CeilReal>(), ScalarFuncSig::CeilDecToDec => ceil_fn_meta::<CeilDecToDec>(), ScalarFuncSig::CeilDecToInt => ceil_fn_meta::<CeilDecToInt>(), ScalarFuncSig::CeilIntToInt => ceil_fn_meta::<CeilIntToInt>(), ScalarFuncSig::CeilIntToDec => ceil_fn_meta::<CeilIntToDec>(), ScalarFuncSig::FloorReal => floor_fn_meta::<FloorReal>(), ScalarFuncSig::FloorDecToInt => floor_fn_meta::<FloorDecToInt>(), ScalarFuncSig::FloorDecToDec => floor_fn_meta::<FloorDecToDec>(), ScalarFuncSig::FloorIntToInt => floor_fn_meta::<FloorIntToInt>(), ScalarFuncSig::FloorIntToDec => floor_fn_meta::<FloorIntToDec>(), ScalarFuncSig::Pi => pi_fn_meta(), ScalarFuncSig::Crc32 => crc32_fn_meta(), ScalarFuncSig::Log1Arg => log_1_arg_fn_meta(), ScalarFuncSig::Log2Args => log_2_arg_fn_meta(), ScalarFuncSig::Log2 => log2_fn_meta(), ScalarFuncSig::Log10 => log10_fn_meta(), ScalarFuncSig::Sin => sin_fn_meta(), ScalarFuncSig::Cos => cos_fn_meta(), ScalarFuncSig::Tan => tan_fn_meta(), ScalarFuncSig::Cot => cot_fn_meta(), ScalarFuncSig::Pow => pow_fn_meta(), ScalarFuncSig::Asin => asin_fn_meta(), ScalarFuncSig::Acos => acos_fn_meta(), ScalarFuncSig::Atan1Arg => atan_1_arg_fn_meta(), ScalarFuncSig::Atan2Args => atan_2_args_fn_meta(), ScalarFuncSig::Sign => sign_fn_meta(), ScalarFuncSig::Sqrt => sqrt_fn_meta(), ScalarFuncSig::Exp => exp_fn_meta(), ScalarFuncSig::Degrees => degrees_fn_meta(), ScalarFuncSig::Radians => radians_fn_meta(), ScalarFuncSig::Conv => conv_fn_meta(), ScalarFuncSig::Rand => rand_fn_meta(), ScalarFuncSig::RandWithSeedFirstGen => rand_with_seed_first_gen_fn_meta(), ScalarFuncSig::RoundReal => round_real_fn_meta(), ScalarFuncSig::RoundInt => round_int_fn_meta(), ScalarFuncSig::RoundDec => round_dec_fn_meta(), ScalarFuncSig::TruncateInt => map_rhs_int_sig(value, children, truncate_int_mapper)?, ScalarFuncSig::TruncateUint => map_rhs_int_sig(value, children, truncate_uint_mapper)?, ScalarFuncSig::TruncateReal => map_rhs_int_sig(value, children, truncate_real_mapper)?, ScalarFuncSig::TruncateDecimal => map_rhs_int_sig(value, children, truncate_decimal_mapper)?, ScalarFuncSig::RoundWithFracInt => round_with_frac_int_fn_meta(), ScalarFuncSig::RoundWithFracDec => round_with_frac_dec_fn_meta(), ScalarFuncSig::RoundWithFracReal => round_with_frac_real_fn_meta(), // impl_miscellaneous ScalarFuncSig::DecimalAnyValue => any_value_fn_meta::<Decimal>(), ScalarFuncSig::DurationAnyValue => any_value_fn_meta::<Duration>(), ScalarFuncSig::IntAnyValue => any_value_fn_meta::<Int>(), ScalarFuncSig::JsonAnyValue => any_value_json_fn_meta(), ScalarFuncSig::RealAnyValue => any_value_fn_meta::<Real>(), ScalarFuncSig::StringAnyValue => any_value_bytes_fn_meta(), ScalarFuncSig::TimeAnyValue => any_value_fn_meta::<DateTime>(), ScalarFuncSig::InetAton => inet_aton_fn_meta(), ScalarFuncSig::InetNtoa => inet_ntoa_fn_meta(), ScalarFuncSig::Inet6Aton => inet6_aton_fn_meta(), ScalarFuncSig::Inet6Ntoa => inet6_ntoa_fn_meta(), ScalarFuncSig::IsIPv4 => is_ipv4_fn_meta(), ScalarFuncSig::IsIPv4Compat => is_ipv4_compat_fn_meta(), ScalarFuncSig::IsIPv4Mapped => is_ipv4_mapped_fn_meta(), ScalarFuncSig::IsIPv6 => is_ipv6_fn_meta(), ScalarFuncSig::Uuid => uuid_fn_meta(), // impl_op ScalarFuncSig::IntIsNull => is_null_fn_meta::<Int>(), ScalarFuncSig::RealIsNull => is_null_fn_meta::<Real>(), ScalarFuncSig::DecimalIsNull => is_null_fn_meta::<Decimal>(), ScalarFuncSig::StringIsNull => is_null_bytes_fn_meta(), ScalarFuncSig::TimeIsNull => is_null_fn_meta::<DateTime>(), ScalarFuncSig::DurationIsNull => is_null_fn_meta::<Duration>(), ScalarFuncSig::JsonIsNull => is_null_json_fn_meta(), ScalarFuncSig::IntIsTrue => int_is_true_fn_meta::<KeepNullOff>(), ScalarFuncSig::IntIsTrueWithNull => int_is_true_fn_meta::<KeepNullOn>(), ScalarFuncSig::RealIsTrue => real_is_true_fn_meta::<KeepNullOff>(), ScalarFuncSig::RealIsTrueWithNull => real_is_true_fn_meta::<KeepNullOn>(), ScalarFuncSig::DecimalIsTrue => decimal_is_true_fn_meta::<KeepNullOff>(), ScalarFuncSig::DecimalIsTrueWithNull => decimal_is_true_fn_meta::<KeepNullOn>(), ScalarFuncSig::IntIsFalse => int_is_false_fn_meta::<KeepNullOff>(), ScalarFuncSig::IntIsFalseWithNull => int_is_false_fn_meta::<KeepNullOn>(), ScalarFuncSig::RealIsFalse => real_is_false_fn_meta::<KeepNullOff>(), ScalarFuncSig::RealIsFalseWithNull => real_is_false_fn_meta::<KeepNullOn>(), ScalarFuncSig::DecimalIsFalse => decimal_is_false_fn_meta::<KeepNullOff>(), ScalarFuncSig::DecimalIsFalseWithNull => decimal_is_false_fn_meta::<KeepNullOn>(), ScalarFuncSig::LogicalAnd => logical_and_fn_meta(), ScalarFuncSig::LogicalOr => logical_or_fn_meta(), ScalarFuncSig::LogicalXor => logical_xor_fn_meta(), ScalarFuncSig::UnaryNotInt => unary_not_int_fn_meta(), ScalarFuncSig::UnaryNotReal => unary_not_real_fn_meta(), ScalarFuncSig::UnaryNotDecimal => unary_not_decimal_fn_meta(), ScalarFuncSig::UnaryMinusInt => map_unary_minus_int_func(value, children)?, ScalarFuncSig::UnaryMinusReal => unary_minus_real_fn_meta(), ScalarFuncSig::UnaryMinusDecimal => unary_minus_decimal_fn_meta(), ScalarFuncSig::BitAndSig => bit_and_fn_meta(), ScalarFuncSig::BitOrSig => bit_or_fn_meta(), ScalarFuncSig::BitXorSig => bit_xor_fn_meta(), ScalarFuncSig::BitNegSig => bit_neg_fn_meta(), ScalarFuncSig::LeftShift => left_shift_fn_meta(), ScalarFuncSig::RightShift => right_shift_fn_meta(), // impl_other ScalarFuncSig::BitCount => bit_count_fn_meta(), // impl_string ScalarFuncSig::Bin => bin_fn_meta(), ScalarFuncSig::Length => length_fn_meta(), ScalarFuncSig::UnHex => unhex_fn_meta(), ScalarFuncSig::Locate2ArgsUtf8 => map_locate_2_args_utf8_sig(ft)?, ScalarFuncSig::Locate3ArgsUtf8 => map_locate_3_args_utf8_sig(ft)?, ScalarFuncSig::BitLength => bit_length_fn_meta(), ScalarFuncSig::Ord => map_ord_sig(ft)?, ScalarFuncSig::Concat => concat_fn_meta(), ScalarFuncSig::ConcatWs => concat_ws_fn_meta(), ScalarFuncSig::Ascii => ascii_fn_meta(), ScalarFuncSig::ReverseUtf8 => reverse_utf8_fn_meta(), ScalarFuncSig::Reverse => reverse_fn_meta(), ScalarFuncSig::HexIntArg => hex_int_arg_fn_meta(), ScalarFuncSig::HexStrArg => hex_str_arg_fn_meta(), ScalarFuncSig::LTrim => ltrim_fn_meta(), ScalarFuncSig::RTrim => rtrim_fn_meta(), ScalarFuncSig::Lpad => lpad_fn_meta(), ScalarFuncSig::LpadUtf8 => lpad_utf8_fn_meta(), ScalarFuncSig::Rpad => rpad_fn_meta(), ScalarFuncSig::RpadUtf8 => rpad_utf8_fn_meta(), ScalarFuncSig::AddStringAndDuration => add_string_and_duration_fn_meta(), ScalarFuncSig::SubStringAndDuration => sub_string_and_duration_fn_meta(), ScalarFuncSig::Trim1Arg => trim_1_arg_fn_meta(), ScalarFuncSig::Trim2Args => trim_2_args_fn_meta(), ScalarFuncSig::Trim3Args => trim_3_args_fn_meta(), ScalarFuncSig::FromBase64 => from_base64_fn_meta(), ScalarFuncSig::Replace => replace_fn_meta(), ScalarFuncSig::Left => left_fn_meta(), ScalarFuncSig::LeftUtf8 => left_utf8_fn_meta(), ScalarFuncSig::Right => right_fn_meta(), ScalarFuncSig::Insert => insert_fn_meta(), ScalarFuncSig::InsertUtf8 => insert_utf8_fn_meta(), ScalarFuncSig::RightUtf8 => right_utf8_fn_meta(), ScalarFuncSig::UpperUtf8 => map_upper_sig(value, children)?, ScalarFuncSig::Upper => upper_fn_meta(), ScalarFuncSig::Lower => map_lower_sig(value, children)?, ScalarFuncSig::LowerUtf8 => map_lower_utf8_sig(value, children)?, ScalarFuncSig::Locate2Args => locate_2_args_fn_meta(), ScalarFuncSig::Locate3Args => locate_3_args_fn_meta(), ScalarFuncSig::FieldInt => field_fn_meta::<Int>(), ScalarFuncSig::FieldReal => field_fn_meta::<Real>(), ScalarFuncSig::FieldString => field_bytes_fn_meta(), ScalarFuncSig::Elt => elt_fn_meta(), ScalarFuncSig::MakeSet => make_set_fn_meta(), ScalarFuncSig::Space => space_fn_meta(), ScalarFuncSig::SubstringIndex => substring_index_fn_meta(), ScalarFuncSig::Strcmp => map_strcmp_sig(ft)?, ScalarFuncSig::Instr => instr_fn_meta(), ScalarFuncSig::InstrUtf8 => instr_utf8_fn_meta(), ScalarFuncSig::Quote => quote_fn_meta(), ScalarFuncSig::OctInt => oct_int_fn_meta(), ScalarFuncSig::OctString => oct_string_fn_meta(), ScalarFuncSig::FindInSet => map_find_in_set_sig(ft)?, ScalarFuncSig::CharLength => char_length_fn_meta(), ScalarFuncSig::CharLengthUtf8 => char_length_utf8_fn_meta(), ScalarFuncSig::ToBase64 => to_base64_fn_meta(), ScalarFuncSig::Repeat => repeat_fn_meta(), ScalarFuncSig::Substring2Args => substring_2_args_fn_meta(), ScalarFuncSig::Substring3Args => substring_3_args_fn_meta(), ScalarFuncSig::Substring2ArgsUtf8 => substring_2_args_utf8_fn_meta(), ScalarFuncSig::Substring3ArgsUtf8 => substring_3_args_utf8_fn_meta(), // impl_time ScalarFuncSig::DateFormatSig => date_format_fn_meta(), ScalarFuncSig::Date => date_fn_meta(), ScalarFuncSig::SysDateWithFsp => sysdate_with_fsp_fn_meta(), ScalarFuncSig::SysDateWithoutFsp => sysdate_without_fsp_fn_meta(), ScalarFuncSig::WeekOfYear => week_of_year_fn_meta(), ScalarFuncSig::DayOfYear => day_of_year_fn_meta(), ScalarFuncSig::DayOfWeek => day_of_week_fn_meta(), ScalarFuncSig::DayOfMonth => day_of_month_fn_meta(), ScalarFuncSig::WeekWithMode => week_with_mode_fn_meta(), ScalarFuncSig::WeekWithoutMode => week_without_mode_fn_meta(), ScalarFuncSig::YearWeekWithMode => year_week_with_mode_fn_meta(), ScalarFuncSig::YearWeekWithoutMode => year_week_without_mode_fn_meta(), ScalarFuncSig::WeekDay => week_day_fn_meta(), ScalarFuncSig::ToDays => to_days_fn_meta(), ScalarFuncSig::ToSeconds => to_seconds_fn_meta(), ScalarFuncSig::DateDiff => date_diff_fn_meta(), ScalarFuncSig::NullTimeDiff => null_time_diff_fn_meta(), ScalarFuncSig::AddDatetimeAndDuration => add_datetime_and_duration_fn_meta(), ScalarFuncSig::AddDatetimeAndString => add_datetime_and_string_fn_meta(), ScalarFuncSig::AddDateAndString => add_date_and_string_fn_meta(), ScalarFuncSig::AddTimeDateTimeNull => add_time_datetime_null_fn_meta(), ScalarFuncSig::AddTimeDurationNull => add_time_duration_null_fn_meta(), ScalarFuncSig::AddTimeStringNull => add_time_string_null_fn_meta(), ScalarFuncSig::SubDatetimeAndDuration => sub_datetime_and_duration_fn_meta(), ScalarFuncSig::SubDatetimeAndString => sub_datetime_and_string_fn_meta(), ScalarFuncSig::FromDays => from_days_fn_meta(), ScalarFuncSig::Year => year_fn_meta(), ScalarFuncSig::Month => month_fn_meta(), ScalarFuncSig::MonthName => month_name_fn_meta(), ScalarFuncSig::MakeDate => make_date_fn_meta(), ScalarFuncSig::Hour => hour_fn_meta(), ScalarFuncSig::Minute => minute_fn_meta(), ScalarFuncSig::Second => second_fn_meta(), ScalarFuncSig::TimeToSec => time_to_sec_fn_meta(), ScalarFuncSig::MicroSecond => micro_second_fn_meta(), ScalarFuncSig::DayName => day_name_fn_meta(), ScalarFuncSig::PeriodAdd => period_add_fn_meta(), ScalarFuncSig::PeriodDiff => period_diff_fn_meta(), ScalarFuncSig::LastDay => last_day_fn_meta(), ScalarFuncSig::AddDurationAndDuration => add_duration_and_duration_fn_meta(), ScalarFuncSig::AddDurationAndString => add_duration_and_string_fn_meta(), ScalarFuncSig::SubDurationAndDuration => sub_duration_and_duration_fn_meta(), ScalarFuncSig::SubDurationAndString => sub_duration_and_string_fn_meta(), ScalarFuncSig::MakeTime => make_time_fn_meta(), ScalarFuncSig::DurationDurationTimeDiff => duration_duration_time_diff_fn_meta(), ScalarFuncSig::StringDurationTimeDiff => string_duration_time_diff_fn_meta(), ScalarFuncSig::StringStringTimeDiff => string_string_time_diff_fn_meta(), ScalarFuncSig::DurationStringTimeDiff => duration_string_time_diff_fn_meta(), ScalarFuncSig::Quarter => quarter_fn_meta(), _ => return Err(other_err!( "ScalarFunction {:?} is not supported in batch mode", value )), }) }
49.497494
122
0.686524
e95ab81a6f9e79cf25f6b1c879522fd1440713c3
643
use tokio::fs::File; use super::GlobalOpts; use crate::{common, prelude::*, protocol, router, Error, Result}; /// Launch remote endpoint #[derive(Debug, clap::Clap)] pub(super) struct Opts; pub(super) async fn run(_: GlobalOpts, _: Opts) -> Result<()> { // TODO: subscriber should forward loggings to the server. let stdin = File::open("/dev/stdin").await?; let stdout = File::create("/dev/stdout").await?; let reader = common::new_reader(stdin).err_into::<Error>(); let writer = common::new_writer(stdout).sink_map_err(Error::from); router::spawn(protocol::ProcessKind::Local, reader, writer).await?; Ok(()) }
32.15
71
0.671851
edf613d7d3dbcc36289fc2c3883c0c049ecd886e
8,938
use iomath::vectors::{ Vector2, UVector2, Vector3, Vector4 }; use iomath::quaternions::Quaternion; #[test] fn vector_2_empty() { let vector = Vector2::empty(); assert_eq!(vector, Vector2 { x: 0.0, y: 0.0 }); } #[test] fn vector_2_new() { let vector = Vector2::new(-0.5, 1.333); assert_eq!(vector, Vector2 { x: -0.5, y: 1.333 }); } #[test] fn vector_2_copy() { let vector_from = Vector2::new(0.0, 1.1); let vector_to = vector_from; assert_eq!(vector_from, Vector2::new(0.0, 1.1)); assert_eq!(vector_to, Vector2::new(0.0, 1.1)); } #[test] fn vector_2_from_scalar() { let vector = Vector2::from_scalar(1.7); assert_eq!(vector, Vector2 { x: 1.7, y: 1.7 }); } #[test] fn vector_2_from_vector_3() { let vector = Vector2::from(Vector3::new(1.9, 5.6, 0.7)); assert_eq!(vector, Vector2::new(1.9, 5.6)); } #[test] fn vector_2_from_vector_4() { let vector = Vector2::from(Vector4::new(1.9, 5.6, 0.7, 8.7)); assert_eq!(vector, Vector2::new(1.9, 5.6)); } #[test] fn vector_2_from_quaternion() { let vector = Vector2::from(Quaternion::identity()); assert_eq!(vector, Vector2::new(0.0, 0.0)); } #[test] fn vector_2_index() { let vector = Vector2::new(1.3, 2.7); assert_eq!(vector, Vector2::new(vector[0], vector[1])); } #[test] fn vector_2_index_out_of_bounds() { let vector = Vector2::new(7.3, 2.4); assert_eq!(vector, Vector2::new(vector[0], vector[256])) } #[test] fn vector_2_index_mut() { let mut vector = Vector2::new(5.6, 3.8); vector[0] *= 0.5; vector[1] *= 2.0; assert_eq!(vector, Vector2::new(2.8, 7.6)); } #[test] fn vector_2_index_mut_out_of_bounds() { let mut vector = Vector2::new(4.5, -7.6); vector[1024] = -1.0; assert_eq!(vector, Vector2::new(4.5, -1.0)); } #[test] fn vector_2_add_scalar() { let vector = Vector2::new(1.0, -2.0); let result = vector + 3.0; assert_eq!(result, Vector2::new(4.0, 1.0)); } #[test] fn vector_2_add_vector() { let first_add = Vector2::new(5.0, 6.0); let second_add = Vector2::new(-1.0, 4.5); let result = first_add + second_add; assert_eq!(result, Vector2::new(4.0, 10.5)); } #[test] fn vector_2_add_assign_scalar() { let mut vector = Vector2::new(2.5, -1.0); vector += 0.5; assert_eq!(vector, Vector2::new(3.0, -0.5)); } #[test] fn vector_2_add_assign_vector() { let mut vector = Vector2::new(2.5, -1.0); vector += Vector2::new(0.5, 1.0); assert_eq!(vector, Vector2::new(3.0, 0.0)); } #[test] fn vector_2_sub_scalar() { let vector = Vector2::new(1.0, -2.0); let result = vector - 3.0; assert_eq!(result, Vector2::new(-2.0, -5.0)); } #[test] fn vector_2_sub_vector() { let minuend = Vector2::new(5.0, 6.0); let subtrahend = Vector2::new(-1.0, 4.5); let result = minuend - subtrahend; assert_eq!(result, Vector2::new(6.0, 1.5)); } #[test] fn vector_2_sub_assign_scalar() { let mut vector = Vector2::new(2.5, -1.0); vector -= 0.5; assert_eq!(vector, Vector2::new(2.0, -1.5)); } #[test] fn vector_2_sub_assign_vector() { let mut vector = Vector2::new(2.5, -1.0); vector -= Vector2::new(0.5, 1.0); assert_eq!(vector, Vector2::new(2.0, -2.0)); } #[test] fn vector_2_mul_scalar() { let vector = Vector2::new(1.0, -2.0); let result = vector * 3.0; assert_eq!(result, Vector2::new(3.0, -6.0)); } #[test] fn vector_2_mul_vector() { let first_mul = Vector2::new(5.0, 6.0); let second_mul = Vector2::new(-1.0, 4.5); let result = first_mul * second_mul; assert_eq!(result, Vector2::new(-5.0, 27.0)); } #[test] fn vector_2_mul_assign_scalar() { let mut vector = Vector2::new(2.5, -1.0); vector *= 0.5; assert_eq!(vector, Vector2::new(1.25, -0.5)); } #[test] fn vector_2_mul_assign_vector() { let mut vector = Vector2::new(2.5, -1.0); vector *= Vector2::new(4.0, -11.0); assert_eq!(vector, Vector2::new(10.0, 11.0)); } #[test] fn vector_2_div_scalar() { let vector = Vector2::new(1.0, -2.0); let result = vector / 3.0; assert_eq!(result, Vector2::new(0.33333334, -0.6666667)); } #[test] fn vector_2_div_vector() { let dividend = Vector2::new(5.0, 6.0); let divider = Vector2::new(-1.0, 4.5); let result = dividend / divider; assert_eq!(result, Vector2::new(-5.0, 1.3333334)); } #[test] fn vector_2_div_assign_scalar() { let mut vector = Vector2::new(2.5, -1.0); vector /= 0.5; assert_eq!(vector, Vector2::new(5.0, -2.0)); } #[test] fn vector_2_div_assign_vector() { let mut vector = Vector2::new(2.5, -1.0); vector /= Vector2::new(4.0, -11.0); assert_eq!(vector, Vector2::new(0.625, 0.09090909)); } #[test] fn vector_2_neg() { let vector = -Vector2::new(-1.0, 5.3); assert_eq!(vector, Vector2::new(1.0, -5.3)); } #[test] fn bit_vector_2_and_scalar() { let vector = UVector2::new(2, 4); let result = vector & 2; assert_eq!(result, UVector2::new(2, 0)); } #[test] fn bit_vector_2_and_vector() { let vector = UVector2::new(4, 1); let result = vector & UVector2::new(1, 1); assert_eq!(result, UVector2::new(0, 1)); } #[test] fn bit_vector_2_and_assign_scalar() { let mut vector = UVector2::new(3, 7); vector &= 3; assert_eq!(vector, UVector2::new(3, 3)); } #[test] fn bit_vector_2_and_assign_vector() { let mut vector = UVector2::new(9, 3); vector &= UVector2::new(3, 9); assert_eq!(vector, UVector2::new(1, 1)); } #[test] fn bit_vector_2_or_scalar() { let vector = UVector2::new(2, 4); let result = vector | 2; assert_eq!(result, UVector2::new(2, 6)); } #[test] fn bit_vector_2_or_vector() { let vector = UVector2::new(4, 1); let result = vector | UVector2::new(1, 1); assert_eq!(result, UVector2::new(5, 1)); } #[test] fn bit_vector_2_or_assign_scalar() { let mut vector = UVector2::new(3, 7); vector |= 3; assert_eq!(vector, UVector2::new(3, 7)); } #[test] fn bit_vector_2_or_assign_vector() { let mut vector = UVector2::new(9, 3); vector |= UVector2::new(3, 9); assert_eq!(vector, UVector2::new(11, 11)); } #[test] fn bit_vector_2_xor_scalar() { let vector = UVector2::new(2, 4); let result = vector ^ 2; assert_eq!(result, UVector2::new(0, 6)); } #[test] fn bit_vector_2_xor_vector() { let vector = UVector2::new(4, 1); let result = vector ^ UVector2::new(1, 1); assert_eq!(result, UVector2::new(5, 0)); } #[test] fn bit_vector_2_xor_assign_scalar() { let mut vector = UVector2::new(3, 7); vector ^= 3; assert_eq!(vector, UVector2::new(0, 4)); } #[test] fn bit_vector_2_xor_assign_vector() { let mut vector = UVector2::new(9, 3); vector ^= UVector2::new(3, 9); assert_eq!(vector, UVector2::new(10, 10)); } #[test] fn vector_2_rem_scalar() { let vector = Vector2::new(15.6, 11.0); let result = vector % 10.0; assert_eq!(result, Vector2::new(5.6000004, 1.0)); } #[test] fn vector_2_rem_vector() { let vector = Vector2::new(5.3, 7.0); let result = vector % Vector2::new(5.3, 3.5); assert_eq!(result, Vector2::new(0.0, 0.0)); } #[test] fn vector_2_rem_assign_scalar() { let mut vector = Vector2::new(15.6, 11.0); vector %= 10.0; assert_eq!(vector, Vector2::new(5.6000004, 1.0)); } #[test] fn vector_2_rem_assign_vector() { let mut vector = Vector2::new(5.3, 7.0); vector %= Vector2::new(5.3, 3.5); assert_eq!(vector, Vector2::new(0.0, 0.0)); } #[test] fn vector_2_shl_scalar() { let vector = UVector2::new(2, 3); let result = vector << 4; assert_eq!(result, UVector2::new(32, 48)); } #[test] fn vector_2_shl_vector() { let vector = UVector2::new(4, 5); let result = vector << UVector2::new(1, 2); assert_eq!(result, UVector2::new(8, 20)); } #[test] fn vector_2_shl_assign_scalar() { let mut vector = UVector2::new(2, 3); vector <<= 4; assert_eq!(vector, UVector2::new(32, 48)); } #[test] fn vector_2_shl_assign_vector() { let mut vector = UVector2::new(4, 5); vector <<= UVector2::new(1, 2); assert_eq!(vector, UVector2::new(8, 20)); } #[test] fn vector_2_shr_scalar() { let vector = UVector2::new(2, 10); let result = vector >> 3; assert_eq!(result, UVector2::new(0, 1)); } #[test] fn vector_2_shr_vector() { let vector = UVector2::new(17, 26); let result = vector >> UVector2::new(4, 2); assert_eq!(result, UVector2::new(1, 6)); } #[test] fn vector_2_shr_assign_scalar() { let mut vector = UVector2::new(2, 10); vector >>= 3; assert_eq!(vector, UVector2::new(0, 1)); } #[test] fn vector_2_shr_assign_vector() { let mut vector = UVector2::new(17, 26); vector >>= UVector2::new(4, 2); assert_eq!(vector, UVector2::new(1, 6)); } #[test] fn vector_2_debug_struct() { let vector = Vector2::new(11.0, 4.5); assert_eq!(format!("{:?}", vector), "TVector2<f32> { x: 11.0, y: 4.5 }"); }
21.180095
77
0.608637
765c0d1b4d717706b38a147987bcea5b328ec3b8
5,822
use proc_macro2::TokenStream; use quote::quote; #[proc_macro] pub fn assert_c(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = TokenStream::from(input); let input_as_string = reconstruct(input); quote!( inline_c::run(inline_c::Language::C, #input_as_string).map_err(|e| panic!(e.to_string())).unwrap() ) .into() } #[proc_macro] pub fn assert_cxx(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = TokenStream::from(input); let input_as_string = reconstruct(input); quote!( inline_c::run(inline_c::Language::Cxx, #input_as_string).map_err(|e| panic!(e.to_string())).unwrap() ) .into() } fn reconstruct(input: TokenStream) -> String { use proc_macro2::{Delimiter, Spacing, TokenTree::*}; let mut output = String::new(); let mut iterator = input.into_iter().peekable(); loop { match iterator.next() { Some(Punct(token)) => { let token_value = token.as_char(); match token_value { '#' => { output.push('\n'); output.push(token_value); match iterator.peek() { Some(Ident(include)) if include.to_string() == "include".to_string() => { iterator.next(); match iterator.next() { Some(Punct(punct)) => { if punct.as_char() != '<' { panic!( "Invalid opening token after `#include`, received `{:?}`.", token ) } output.push_str("include <"); loop { match iterator.next() { Some(Punct(punct)) => { let punct = punct.as_char(); if punct == '>' { break; } output.push(punct) } Some(Ident(ident)) => { output.push_str(&ident.to_string()) } token => panic!( "Invalid token in `#include` value, with `{:?}`.", token ), } } output.push('>'); output.push('\n'); } Some(Literal(literal)) => { output.push_str("include "); output.push_str(&literal.to_string()); output.push('\n'); } Some(token) => panic!( "Invalid opening token after `#include`, received `{:?}`.", token ), None => panic!("`#include` must be followed by `<` or `\"`."), } } _ => (), } } ';' => { output.push(token_value); output.push('\n'); } _ => { output.push(token_value); if token.spacing() == Spacing::Alone { output.push(' '); } } } } Some(Ident(ident)) => { output.push_str(&ident.to_string()); output.push(' '); } Some(Group(group)) => { let group_output = reconstruct(group.stream()); match group.delimiter() { Delimiter::Parenthesis => { output.push('('); output.push_str(&group_output); output.push(')'); } Delimiter::Brace => { output.push('{'); output.push('\n'); output.push_str(&group_output); output.push('\n'); output.push('}'); } Delimiter::Bracket => { output.push('['); output.push_str(&group_output); output.push(']'); } Delimiter::None => { output.push_str(&group_output); } } } Some(token) => { output.push_str(&token.to_string()); } None => break, } } output }
35.284848
108
0.305393
8a3fd32321ba39ca5861236418b6864330be3a7e
7,670
use geo_types::line_string::PointsIter; use std::iter::Rev; use {CoordinateType, LineString, Point}; pub(crate) fn twice_signed_ring_area<T>(linestring: &LineString<T>) -> T where T: CoordinateType, { if linestring.0.is_empty() || linestring.0.len() == 1 { return T::zero(); } let mut tmp = T::zero(); for line in linestring.lines() { tmp = tmp + line.determinant(); } tmp } enum EitherIter<T, I1, I2> where I1: Iterator<Item = T>, I2: Iterator<Item = T>, { A(I1), B(I2), } impl<T, I1, I2> Iterator for EitherIter<T, I1, I2> where I1: Iterator<Item = T>, I2: Iterator<Item = T>, { type Item = T; fn next(&mut self) -> Option<Self::Item> { match self { EitherIter::A(iter) => iter.next(), EitherIter::B(iter) => iter.next(), } } } /// Iterates through a list of `Point`s pub struct Points<'a, T>(EitherIter<Point<T>, PointsIter<'a, T>, Rev<PointsIter<'a, T>>>) where T: CoordinateType + 'a; impl<'a, T> Iterator for Points<'a, T> where T: CoordinateType, { type Item = Point<T>; fn next(&mut self) -> Option<Self::Item> { self.0.next() } } /// How a linestring is wound, clockwise or counter-clockwise #[derive(PartialEq, Clone, Debug, Eq)] pub enum WindingOrder { Clockwise, CounterClockwise, } /// Calculate, and work with, the winding order pub trait Winding<T> where T: CoordinateType, { /// Return the winding order of this object fn winding_order(&self) -> Option<WindingOrder>; /// True iff this clockwise fn is_cw(&self) -> bool { self.winding_order() == Some(WindingOrder::Clockwise) } /// True iff this is wound counterclockwise fn is_ccw(&self) -> bool { self.winding_order() == Some(WindingOrder::CounterClockwise) } /// Iterate over the points in a clockwise order /// /// The object isn't changed, and the points are returned either in order, or in reverse /// order, so that the resultant order makes it appear clockwise fn points_cw(&self) -> Points<T>; /// Iterate over the points in a counter-clockwise order /// /// The object isn't changed, and the points are returned either in order, or in reverse /// order, so that the resultant order makes it appear counter-clockwise fn points_ccw(&self) -> Points<T>; /// Change this objects's points so they are in clockwise winding order fn make_cw_winding(&mut self); /// Change this line's points so they are in counterclockwise winding order fn make_ccw_winding(&mut self); /// Return a clone of this object, but in the specified winding order fn clone_to_winding_order(&self, winding_order: WindingOrder) -> Self where Self: Sized + Clone, { let mut new: Self = self.clone(); new.make_winding_order(winding_order); new } /// Change the winding order so that it is in this winding order fn make_winding_order(&mut self, winding_order: WindingOrder) { match winding_order { WindingOrder::Clockwise => self.make_cw_winding(), WindingOrder::CounterClockwise => self.make_ccw_winding(), } } } impl<T> Winding<T> for LineString<T> where T: CoordinateType, { /// Returns the winding order of this line /// None if the winding order is undefined. fn winding_order(&self) -> Option<WindingOrder> { let shoelace = twice_signed_ring_area(self); if shoelace < T::zero() { Some(WindingOrder::Clockwise) } else if shoelace > T::zero() { Some(WindingOrder::CounterClockwise) } else if shoelace == T::zero() { None } else { // make compiler stop complaining unreachable!() } } /// Iterate over the points in a clockwise order /// /// The Linestring isn't changed, and the points are returned either in order, or in reverse /// order, so that the resultant order makes it appear clockwise fn points_cw(&self) -> Points<T> { match self.winding_order() { Some(WindingOrder::CounterClockwise) => Points(EitherIter::B(self.points_iter().rev())), _ => Points(EitherIter::A(self.points_iter())), } } /// Iterate over the points in a counter-clockwise order /// /// The Linestring isn't changed, and the points are returned either in order, or in reverse /// order, so that the resultant order makes it appear counter-clockwise fn points_ccw(&self) -> Points<T> { match self.winding_order() { Some(WindingOrder::Clockwise) => Points(EitherIter::B(self.points_iter().rev())), _ => Points(EitherIter::A(self.points_iter())), } } /// Change this line's points so they are in clockwise winding order fn make_cw_winding(&mut self) { if let Some(WindingOrder::CounterClockwise) = self.winding_order() { self.0.reverse(); } } /// Change this line's points so they are in counterclockwise winding order fn make_ccw_winding(&mut self) { if let Some(WindingOrder::Clockwise) = self.winding_order() { self.0.reverse(); } } } #[cfg(test)] mod test { use super::*; #[test] fn winding_order() { // 3 points forming a triangle let a = Point::new(0., 0.); let b = Point::new(2., 0.); let c = Point::new(1., 2.); // That triangle, but in clockwise ordering let cw_line = LineString::from(vec![a.0, c.0, b.0, a.0]); // That triangle, but in counterclockwise ordering let ccw_line = LineString::from(vec![a.0, b.0, c.0, a.0]); assert_eq!(cw_line.winding_order(), Some(WindingOrder::Clockwise)); assert_eq!(cw_line.is_cw(), true); assert_eq!(cw_line.is_ccw(), false); assert_eq!( ccw_line.winding_order(), Some(WindingOrder::CounterClockwise) ); assert_eq!(ccw_line.is_cw(), false); assert_eq!(ccw_line.is_ccw(), true); let cw_points1: Vec<_> = cw_line.points_cw().collect(); assert_eq!(cw_points1.len(), 4); assert_eq!(cw_points1[0], a); assert_eq!(cw_points1[1], c); assert_eq!(cw_points1[2], b); assert_eq!(cw_points1[3], a); let ccw_points1: Vec<_> = cw_line.points_ccw().collect(); assert_eq!(ccw_points1.len(), 4); assert_eq!(ccw_points1[0], a); assert_eq!(ccw_points1[1], b); assert_eq!(ccw_points1[2], c); assert_eq!(ccw_points1[3], a); assert_ne!(cw_points1, ccw_points1); let cw_points2: Vec<_> = ccw_line.points_cw().collect(); let ccw_points2: Vec<_> = ccw_line.points_ccw().collect(); // cw_line and ccw_line are wound differently, but the ordered winding iterator should have // make them similar assert_eq!(cw_points2, cw_points2); assert_eq!(ccw_points2, ccw_points2); // test make_clockwise_winding let mut new_line1 = ccw_line.clone(); new_line1.make_cw_winding(); assert_eq!(new_line1.winding_order(), Some(WindingOrder::Clockwise)); assert_eq!(new_line1, cw_line); assert_ne!(new_line1, ccw_line); // test make_counterclockwise_winding let mut new_line2 = cw_line.clone(); new_line2.make_ccw_winding(); assert_eq!( new_line2.winding_order(), Some(WindingOrder::CounterClockwise) ); assert_ne!(new_line2, cw_line); assert_eq!(new_line2, ccw_line); } }
31.052632
100
0.614472
62e38ad9bfa662a84ecd25a2356fce2828332e74
72,101
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! This module contains TyKind and its major components use hir::def_id::DefId; use infer::canonical::Canonical; use mir::interpret::ConstValue; use middle::region; use polonius_engine::Atom; use rustc_data_structures::indexed_vec::Idx; use ty::subst::{Substs, Subst, Kind, UnpackedKind}; use ty::{self, AdtDef, TypeFlags, Ty, TyCtxt, TypeFoldable}; use ty::{List, TyS, ParamEnvAnd, ParamEnv}; use util::captures::Captures; use mir::interpret::{Scalar, Pointer}; use std::iter; use std::cmp::Ordering; use rustc_target::spec::abi; use syntax::ast::{self, Ident}; use syntax::symbol::{keywords, InternedString}; use serialize; use hir; use self::InferTy::*; use self::TyKind::*; #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct TypeAndMut<'tcx> { pub ty: Ty<'tcx>, pub mutbl: hir::Mutability, } #[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, RustcEncodable, RustcDecodable, Copy)] /// A "free" region `fr` can be interpreted as "some region /// at least as big as the scope `fr.scope`". pub struct FreeRegion { pub scope: DefId, pub bound_region: BoundRegion, } #[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, RustcEncodable, RustcDecodable, Copy)] pub enum BoundRegion { /// An anonymous region parameter for a given fn (&T) BrAnon(u32), /// Named region parameters for functions (a in &'a T) /// /// The def-id is needed to distinguish free regions in /// the event of shadowing. BrNamed(DefId, InternedString), /// Fresh bound identifiers created during GLB computations. BrFresh(u32), /// Anonymous region for the implicit env pointer parameter /// to a closure BrEnv, } impl BoundRegion { pub fn is_named(&self) -> bool { match *self { BoundRegion::BrNamed(..) => true, _ => false, } } } /// N.B., If you change this, you'll probably want to change the corresponding /// AST structure in `libsyntax/ast.rs` as well. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub enum TyKind<'tcx> { /// The primitive boolean type. Written as `bool`. Bool, /// The primitive character type; holds a Unicode scalar value /// (a non-surrogate code point). Written as `char`. Char, /// A primitive signed integer type. For example, `i32`. Int(ast::IntTy), /// A primitive unsigned integer type. For example, `u32`. Uint(ast::UintTy), /// A primitive floating-point type. For example, `f64`. Float(ast::FloatTy), /// Structures, enumerations and unions. /// /// Substs here, possibly against intuition, *may* contain `Param`s. /// That is, even after substitution it is possible that there are type /// variables. This happens when the `Adt` corresponds to an ADT /// definition and not a concrete use of it. Adt(&'tcx AdtDef, &'tcx Substs<'tcx>), Foreign(DefId), /// The pointee of a string slice. Written as `str`. Str, /// An array with the given length. Written as `[T; n]`. Array(Ty<'tcx>, &'tcx ty::Const<'tcx>), /// The pointee of an array slice. Written as `[T]`. Slice(Ty<'tcx>), /// A raw pointer. Written as `*mut T` or `*const T` RawPtr(TypeAndMut<'tcx>), /// A reference; a pointer with an associated lifetime. Written as /// `&'a mut T` or `&'a T`. Ref(Region<'tcx>, Ty<'tcx>, hir::Mutability), /// The anonymous type of a function declaration/definition. Each /// function has a unique type, which is output (for a function /// named `foo` returning an `i32`) as `fn() -> i32 {foo}`. /// /// For example the type of `bar` here: /// /// ```rust /// fn foo() -> i32 { 1 } /// let bar = foo; // bar: fn() -> i32 {foo} /// ``` FnDef(DefId, &'tcx Substs<'tcx>), /// A pointer to a function. Written as `fn() -> i32`. /// /// For example the type of `bar` here: /// /// ```rust /// fn foo() -> i32 { 1 } /// let bar: fn() -> i32 = foo; /// ``` FnPtr(PolyFnSig<'tcx>), /// A trait, defined with `trait`. Dynamic(Binder<&'tcx List<ExistentialPredicate<'tcx>>>, ty::Region<'tcx>), /// The anonymous type of a closure. Used to represent the type of /// `|a| a`. Closure(DefId, ClosureSubsts<'tcx>), /// The anonymous type of a generator. Used to represent the type of /// `|a| yield a`. Generator(DefId, GeneratorSubsts<'tcx>, hir::GeneratorMovability), /// A type representin the types stored inside a generator. /// This should only appear in GeneratorInteriors. GeneratorWitness(Binder<&'tcx List<Ty<'tcx>>>), /// The never type `!` Never, /// A tuple type. For example, `(i32, bool)`. Tuple(&'tcx List<Ty<'tcx>>), /// The projection of an associated type. For example, /// `<T as Trait<..>>::N`. Projection(ProjectionTy<'tcx>), /// A placeholder type used when we do not have enough information /// to normalize the projection of an associated type to an /// existing concrete type. Currently only used with chalk-engine. UnnormalizedProjection(ProjectionTy<'tcx>), /// Opaque (`impl Trait`) type found in a return type. /// The `DefId` comes either from /// * the `impl Trait` ast::Ty node, /// * or the `existential type` declaration /// The substitutions are for the generics of the function in question. /// After typeck, the concrete type can be found in the `types` map. Opaque(DefId, &'tcx Substs<'tcx>), /// A type parameter; for example, `T` in `fn f<T>(x: T) {} Param(ParamTy), /// A type variable used during type checking. Infer(InferTy), /// A placeholder for a type which could not be computed; this is /// propagated to avoid useless error messages. Error, } /// A closure can be modeled as a struct that looks like: /// /// struct Closure<'l0...'li, T0...Tj, CK, CS, U0...Uk> { /// upvar0: U0, /// ... /// upvark: Uk /// } /// /// where: /// /// - 'l0...'li and T0...Tj are the lifetime and type parameters /// in scope on the function that defined the closure, /// - CK represents the *closure kind* (Fn vs FnMut vs FnOnce). This /// is rather hackily encoded via a scalar type. See /// `TyS::to_opt_closure_kind` for details. /// - CS represents the *closure signature*, representing as a `fn()` /// type. For example, `fn(u32, u32) -> u32` would mean that the closure /// implements `CK<(u32, u32), Output = u32>`, where `CK` is the trait /// specified above. /// - U0...Uk are type parameters representing the types of its upvars /// (borrowed, if appropriate; that is, if Ui represents a by-ref upvar, /// and the up-var has the type `Foo`, then `Ui = &Foo`). /// /// So, for example, given this function: /// /// fn foo<'a, T>(data: &'a mut T) { /// do(|| data.count += 1) /// } /// /// the type of the closure would be something like: /// /// struct Closure<'a, T, U0> { /// data: U0 /// } /// /// Note that the type of the upvar is not specified in the struct. /// You may wonder how the impl would then be able to use the upvar, /// if it doesn't know it's type? The answer is that the impl is /// (conceptually) not fully generic over Closure but rather tied to /// instances with the expected upvar types: /// /// impl<'b, 'a, T> FnMut() for Closure<'a, T, &'b mut &'a mut T> { /// ... /// } /// /// You can see that the *impl* fully specified the type of the upvar /// and thus knows full well that `data` has type `&'b mut &'a mut T`. /// (Here, I am assuming that `data` is mut-borrowed.) /// /// Now, the last question you may ask is: Why include the upvar types /// as extra type parameters? The reason for this design is that the /// upvar types can reference lifetimes that are internal to the /// creating function. In my example above, for example, the lifetime /// `'b` represents the scope of the closure itself; this is some /// subset of `foo`, probably just the scope of the call to the to /// `do()`. If we just had the lifetime/type parameters from the /// enclosing function, we couldn't name this lifetime `'b`. Note that /// there can also be lifetimes in the types of the upvars themselves, /// if one of them happens to be a reference to something that the /// creating fn owns. /// /// OK, you say, so why not create a more minimal set of parameters /// that just includes the extra lifetime parameters? The answer is /// primarily that it would be hard --- we don't know at the time when /// we create the closure type what the full types of the upvars are, /// nor do we know which are borrowed and which are not. In this /// design, we can just supply a fresh type parameter and figure that /// out later. /// /// All right, you say, but why include the type parameters from the /// original function then? The answer is that codegen may need them /// when monomorphizing, and they may not appear in the upvars. A /// closure could capture no variables but still make use of some /// in-scope type parameter with a bound (e.g., if our example above /// had an extra `U: Default`, and the closure called `U::default()`). /// /// There is another reason. This design (implicitly) prohibits /// closures from capturing themselves (except via a trait /// object). This simplifies closure inference considerably, since it /// means that when we infer the kind of a closure or its upvars, we /// don't have to handle cycles where the decisions we make for /// closure C wind up influencing the decisions we ought to make for /// closure C (which would then require fixed point iteration to /// handle). Plus it fixes an ICE. :P /// /// ## Generators /// /// Perhaps surprisingly, `ClosureSubsts` are also used for /// generators. In that case, what is written above is only half-true /// -- the set of type parameters is similar, but the role of CK and /// CS are different. CK represents the "yield type" and CS /// represents the "return type" of the generator. /// /// It'd be nice to split this struct into ClosureSubsts and /// GeneratorSubsts, I believe. -nmatsakis #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct ClosureSubsts<'tcx> { /// Lifetime and type parameters from the enclosing function, /// concatenated with the types of the upvars. /// /// These are separated out because codegen wants to pass them around /// when monomorphizing. pub substs: &'tcx Substs<'tcx>, } /// Struct returned by `split()`. Note that these are subslices of the /// parent slice and not canonical substs themselves. struct SplitClosureSubsts<'tcx> { closure_kind_ty: Ty<'tcx>, closure_sig_ty: Ty<'tcx>, upvar_kinds: &'tcx [Kind<'tcx>], } impl<'tcx> ClosureSubsts<'tcx> { /// Divides the closure substs into their respective /// components. Single source of truth with respect to the /// ordering. fn split(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> SplitClosureSubsts<'tcx> { let generics = tcx.generics_of(def_id); let parent_len = generics.parent_count; SplitClosureSubsts { closure_kind_ty: self.substs.type_at(parent_len), closure_sig_ty: self.substs.type_at(parent_len + 1), upvar_kinds: &self.substs[parent_len + 2..], } } #[inline] pub fn upvar_tys(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> impl Iterator<Item=Ty<'tcx>> + 'tcx { let SplitClosureSubsts { upvar_kinds, .. } = self.split(def_id, tcx); upvar_kinds.iter().map(|t| { if let UnpackedKind::Type(ty) = t.unpack() { ty } else { bug!("upvar should be type") } }) } /// Returns the closure kind for this closure; may return a type /// variable during inference. To get the closure kind during /// inference, use `infcx.closure_kind(def_id, substs)`. pub fn closure_kind_ty(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { self.split(def_id, tcx).closure_kind_ty } /// Returns the type representing the closure signature for this /// closure; may contain type variables during inference. To get /// the closure signature during inference, use /// `infcx.fn_sig(def_id)`. pub fn closure_sig_ty(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { self.split(def_id, tcx).closure_sig_ty } /// Returns the closure kind for this closure; only usable outside /// of an inference context, because in that context we know that /// there are no type variables. /// /// If you have an inference context, use `infcx.closure_kind()`. pub fn closure_kind(self, def_id: DefId, tcx: TyCtxt<'_, 'tcx, 'tcx>) -> ty::ClosureKind { self.split(def_id, tcx).closure_kind_ty.to_opt_closure_kind().unwrap() } /// Extracts the signature from the closure; only usable outside /// of an inference context, because in that context we know that /// there are no type variables. /// /// If you have an inference context, use `infcx.closure_sig()`. pub fn closure_sig(self, def_id: DefId, tcx: TyCtxt<'_, 'tcx, 'tcx>) -> ty::PolyFnSig<'tcx> { match self.closure_sig_ty(def_id, tcx).sty { ty::FnPtr(sig) => sig, ref t => bug!("closure_sig_ty is not a fn-ptr: {:?}", t), } } } #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct GeneratorSubsts<'tcx> { pub substs: &'tcx Substs<'tcx>, } struct SplitGeneratorSubsts<'tcx> { yield_ty: Ty<'tcx>, return_ty: Ty<'tcx>, witness: Ty<'tcx>, upvar_kinds: &'tcx [Kind<'tcx>], } impl<'tcx> GeneratorSubsts<'tcx> { fn split(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> SplitGeneratorSubsts<'tcx> { let generics = tcx.generics_of(def_id); let parent_len = generics.parent_count; SplitGeneratorSubsts { yield_ty: self.substs.type_at(parent_len), return_ty: self.substs.type_at(parent_len + 1), witness: self.substs.type_at(parent_len + 2), upvar_kinds: &self.substs[parent_len + 3..], } } /// This describes the types that can be contained in a generator. /// It will be a type variable initially and unified in the last stages of typeck of a body. /// It contains a tuple of all the types that could end up on a generator frame. /// The state transformation MIR pass may only produce layouts which mention types /// in this tuple. Upvars are not counted here. pub fn witness(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { self.split(def_id, tcx).witness } #[inline] pub fn upvar_tys(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> impl Iterator<Item=Ty<'tcx>> + 'tcx { let SplitGeneratorSubsts { upvar_kinds, .. } = self.split(def_id, tcx); upvar_kinds.iter().map(|t| { if let UnpackedKind::Type(ty) = t.unpack() { ty } else { bug!("upvar should be type") } }) } /// Returns the type representing the yield type of the generator. pub fn yield_ty(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { self.split(def_id, tcx).yield_ty } /// Returns the type representing the return type of the generator. pub fn return_ty(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { self.split(def_id, tcx).return_ty } /// Return the "generator signature", which consists of its yield /// and return types. /// /// NB. Some bits of the code prefers to see this wrapped in a /// binder, but it never contains bound regions. Probably this /// function should be removed. pub fn poly_sig(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> PolyGenSig<'tcx> { ty::Binder::dummy(self.sig(def_id, tcx)) } /// Return the "generator signature", which consists of its yield /// and return types. pub fn sig(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> GenSig<'tcx> { ty::GenSig { yield_ty: self.yield_ty(def_id, tcx), return_ty: self.return_ty(def_id, tcx), } } } impl<'a, 'gcx, 'tcx> GeneratorSubsts<'tcx> { /// This returns the types of the MIR locals which had to be stored across suspension points. /// It is calculated in rustc_mir::transform::generator::StateTransform. /// All the types here must be in the tuple in GeneratorInterior. pub fn state_tys( self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'tcx>, ) -> impl Iterator<Item=Ty<'tcx>> + Captures<'gcx> + 'a { let state = tcx.generator_layout(def_id).fields.iter(); state.map(move |d| d.ty.subst(tcx, self.substs)) } /// This is the types of the fields of a generate which /// is available before the generator transformation. /// It includes the upvars and the state discriminant which is u32. pub fn pre_transforms_tys(self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> impl Iterator<Item=Ty<'tcx>> + 'a { self.upvar_tys(def_id, tcx).chain(iter::once(tcx.types.u32)) } /// This is the types of all the fields stored in a generator. /// It includes the upvars, state types and the state discriminant which is u32. pub fn field_tys(self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> impl Iterator<Item=Ty<'tcx>> + Captures<'gcx> + 'a { self.pre_transforms_tys(def_id, tcx).chain(self.state_tys(def_id, tcx)) } } #[derive(Debug, Copy, Clone)] pub enum UpvarSubsts<'tcx> { Closure(ClosureSubsts<'tcx>), Generator(GeneratorSubsts<'tcx>), } impl<'tcx> UpvarSubsts<'tcx> { #[inline] pub fn upvar_tys(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> impl Iterator<Item=Ty<'tcx>> + 'tcx { let upvar_kinds = match self { UpvarSubsts::Closure(substs) => substs.split(def_id, tcx).upvar_kinds, UpvarSubsts::Generator(substs) => substs.split(def_id, tcx).upvar_kinds, }; upvar_kinds.iter().map(|t| { if let UnpackedKind::Type(ty) = t.unpack() { ty } else { bug!("upvar should be type") } }) } } #[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq, Hash, RustcEncodable, RustcDecodable)] pub enum ExistentialPredicate<'tcx> { /// e.g. Iterator Trait(ExistentialTraitRef<'tcx>), /// e.g. Iterator::Item = T Projection(ExistentialProjection<'tcx>), /// e.g. Send AutoTrait(DefId), } impl<'a, 'gcx, 'tcx> ExistentialPredicate<'tcx> { /// Compares via an ordering that will not change if modules are reordered or other changes are /// made to the tree. In particular, this ordering is preserved across incremental compilations. pub fn stable_cmp(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, other: &Self) -> Ordering { use self::ExistentialPredicate::*; match (*self, *other) { (Trait(_), Trait(_)) => Ordering::Equal, (Projection(ref a), Projection(ref b)) => tcx.def_path_hash(a.item_def_id).cmp(&tcx.def_path_hash(b.item_def_id)), (AutoTrait(ref a), AutoTrait(ref b)) => tcx.trait_def(*a).def_path_hash.cmp(&tcx.trait_def(*b).def_path_hash), (Trait(_), _) => Ordering::Less, (Projection(_), Trait(_)) => Ordering::Greater, (Projection(_), _) => Ordering::Less, (AutoTrait(_), _) => Ordering::Greater, } } } impl<'a, 'gcx, 'tcx> Binder<ExistentialPredicate<'tcx>> { pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) -> ty::Predicate<'tcx> { use ty::ToPredicate; match *self.skip_binder() { ExistentialPredicate::Trait(tr) => Binder(tr).with_self_ty(tcx, self_ty).to_predicate(), ExistentialPredicate::Projection(p) => ty::Predicate::Projection(Binder(p.with_self_ty(tcx, self_ty))), ExistentialPredicate::AutoTrait(did) => { let trait_ref = Binder(ty::TraitRef { def_id: did, substs: tcx.mk_substs_trait(self_ty, &[]), }); trait_ref.to_predicate() } } } } impl<'tcx> serialize::UseSpecializedDecodable for &'tcx List<ExistentialPredicate<'tcx>> {} impl<'tcx> List<ExistentialPredicate<'tcx>> { pub fn principal(&self) -> ExistentialTraitRef<'tcx> { match self[0] { ExistentialPredicate::Trait(tr) => tr, other => bug!("first predicate is {:?}", other), } } #[inline] pub fn projection_bounds<'a>(&'a self) -> impl Iterator<Item=ExistentialProjection<'tcx>> + 'a { self.iter().filter_map(|predicate| { match *predicate { ExistentialPredicate::Projection(p) => Some(p), _ => None, } }) } #[inline] pub fn auto_traits<'a>(&'a self) -> impl Iterator<Item=DefId> + 'a { self.iter().filter_map(|predicate| { match *predicate { ExistentialPredicate::AutoTrait(d) => Some(d), _ => None } }) } } impl<'tcx> Binder<&'tcx List<ExistentialPredicate<'tcx>>> { pub fn principal(&self) -> PolyExistentialTraitRef<'tcx> { Binder::bind(self.skip_binder().principal()) } #[inline] pub fn projection_bounds<'a>(&'a self) -> impl Iterator<Item=PolyExistentialProjection<'tcx>> + 'a { self.skip_binder().projection_bounds().map(Binder::bind) } #[inline] pub fn auto_traits<'a>(&'a self) -> impl Iterator<Item=DefId> + 'a { self.skip_binder().auto_traits() } pub fn iter<'a>(&'a self) -> impl DoubleEndedIterator<Item=Binder<ExistentialPredicate<'tcx>>> + 'tcx { self.skip_binder().iter().cloned().map(Binder::bind) } } /// A complete reference to a trait. These take numerous guises in syntax, /// but perhaps the most recognizable form is in a where clause: /// /// T : Foo<U> /// /// This would be represented by a trait-reference where the def-id is the /// def-id for the trait `Foo` and the substs define `T` as parameter 0, /// and `U` as parameter 1. /// /// Trait references also appear in object types like `Foo<U>`, but in /// that case the `Self` parameter is absent from the substitutions. /// /// Note that a `TraitRef` introduces a level of region binding, to /// account for higher-ranked trait bounds like `T : for<'a> Foo<&'a /// U>` or higher-ranked object types. #[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct TraitRef<'tcx> { pub def_id: DefId, pub substs: &'tcx Substs<'tcx>, } impl<'tcx> TraitRef<'tcx> { pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>) -> TraitRef<'tcx> { TraitRef { def_id: def_id, substs: substs } } /// Returns a TraitRef of the form `P0: Foo<P1..Pn>` where `Pi` /// are the parameters defined on trait. pub fn identity<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId) -> TraitRef<'tcx> { TraitRef { def_id, substs: Substs::identity_for_item(tcx, def_id), } } pub fn self_ty(&self) -> Ty<'tcx> { self.substs.type_at(0) } pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator<Item=Ty<'tcx>> + 'a { // Select only the "input types" from a trait-reference. For // now this is all the types that appear in the // trait-reference, but it should eventually exclude // associated types. self.substs.types() } pub fn from_method(tcx: TyCtxt<'_, '_, 'tcx>, trait_id: DefId, substs: &Substs<'tcx>) -> ty::TraitRef<'tcx> { let defs = tcx.generics_of(trait_id); ty::TraitRef { def_id: trait_id, substs: tcx.intern_substs(&substs[..defs.params.len()]) } } } pub type PolyTraitRef<'tcx> = Binder<TraitRef<'tcx>>; impl<'tcx> PolyTraitRef<'tcx> { pub fn self_ty(&self) -> Ty<'tcx> { self.skip_binder().self_ty() } pub fn def_id(&self) -> DefId { self.skip_binder().def_id } pub fn to_poly_trait_predicate(&self) -> ty::PolyTraitPredicate<'tcx> { // Note that we preserve binding levels Binder(ty::TraitPredicate { trait_ref: self.skip_binder().clone() }) } } /// An existential reference to a trait, where `Self` is erased. /// For example, the trait object `Trait<'a, 'b, X, Y>` is: /// /// exists T. T: Trait<'a, 'b, X, Y> /// /// The substitutions don't include the erased `Self`, only trait /// type and lifetime parameters (`[X, Y]` and `['a, 'b]` above). #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct ExistentialTraitRef<'tcx> { pub def_id: DefId, pub substs: &'tcx Substs<'tcx>, } impl<'a, 'gcx, 'tcx> ExistentialTraitRef<'tcx> { pub fn input_types<'b>(&'b self) -> impl DoubleEndedIterator<Item=Ty<'tcx>> + 'b { // Select only the "input types" from a trait-reference. For // now this is all the types that appear in the // trait-reference, but it should eventually exclude // associated types. self.substs.types() } pub fn erase_self_ty(tcx: TyCtxt<'a, 'gcx, 'tcx>, trait_ref: ty::TraitRef<'tcx>) -> ty::ExistentialTraitRef<'tcx> { // Assert there is a Self. trait_ref.substs.type_at(0); ty::ExistentialTraitRef { def_id: trait_ref.def_id, substs: tcx.intern_substs(&trait_ref.substs[1..]) } } /// Object types don't have a self-type specified. Therefore, when /// we convert the principal trait-ref into a normal trait-ref, /// you must give *some* self-type. A common choice is `mk_err()` /// or some placeholder type. pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) -> ty::TraitRef<'tcx> { // otherwise the escaping regions would be captured by the binder // debug_assert!(!self_ty.has_escaping_regions()); ty::TraitRef { def_id: self.def_id, substs: tcx.mk_substs_trait(self_ty, self.substs) } } } pub type PolyExistentialTraitRef<'tcx> = Binder<ExistentialTraitRef<'tcx>>; impl<'tcx> PolyExistentialTraitRef<'tcx> { pub fn def_id(&self) -> DefId { self.skip_binder().def_id } /// Object types don't have a self-type specified. Therefore, when /// we convert the principal trait-ref into a normal trait-ref, /// you must give *some* self-type. A common choice is `mk_err()` /// or some placeholder type. pub fn with_self_ty(&self, tcx: TyCtxt<'_, '_, 'tcx>, self_ty: Ty<'tcx>) -> ty::PolyTraitRef<'tcx> { self.map_bound(|trait_ref| trait_ref.with_self_ty(tcx, self_ty)) } } /// Binder is a binder for higher-ranked lifetimes. It is part of the /// compiler's representation for things like `for<'a> Fn(&'a isize)` /// (which would be represented by the type `PolyTraitRef == /// Binder<TraitRef>`). Note that when we instantiate, /// erase, or otherwise "discharge" these bound regions, we change the /// type from `Binder<T>` to just `T` (see /// e.g. `liberate_late_bound_regions`). #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct Binder<T>(T); impl<T> Binder<T> { /// Wraps `value` in a binder, asserting that `value` does not /// contain any bound regions that would be bound by the /// binder. This is commonly used to 'inject' a value T into a /// different binding level. pub fn dummy<'tcx>(value: T) -> Binder<T> where T: TypeFoldable<'tcx> { debug_assert!(!value.has_escaping_regions()); Binder(value) } /// Wraps `value` in a binder, binding late-bound regions (if any). pub fn bind<'tcx>(value: T) -> Binder<T> { Binder(value) } /// Skips the binder and returns the "bound" value. This is a /// risky thing to do because it's easy to get confused about /// debruijn indices and the like. It is usually better to /// discharge the binder using `no_late_bound_regions` or /// `replace_late_bound_regions` or something like /// that. `skip_binder` is only valid when you are either /// extracting data that has nothing to do with bound regions, you /// are doing some sort of test that does not involve bound /// regions, or you are being very careful about your depth /// accounting. /// /// Some examples where `skip_binder` is reasonable: /// /// - extracting the def-id from a PolyTraitRef; /// - comparing the self type of a PolyTraitRef to see if it is equal to /// a type parameter `X`, since the type `X` does not reference any regions pub fn skip_binder(&self) -> &T { &self.0 } pub fn as_ref(&self) -> Binder<&T> { Binder(&self.0) } pub fn map_bound_ref<F, U>(&self, f: F) -> Binder<U> where F: FnOnce(&T) -> U { self.as_ref().map_bound(f) } pub fn map_bound<F, U>(self, f: F) -> Binder<U> where F: FnOnce(T) -> U { Binder(f(self.0)) } /// Unwraps and returns the value within, but only if it contains /// no bound regions at all. (In other words, if this binder -- /// and indeed any enclosing binder -- doesn't bind anything at /// all.) Otherwise, returns `None`. /// /// (One could imagine having a method that just unwraps a single /// binder, but permits late-bound regions bound by enclosing /// binders, but that would require adjusting the debruijn /// indices, and given the shallow binding structure we often use, /// would not be that useful.) pub fn no_late_bound_regions<'tcx>(self) -> Option<T> where T : TypeFoldable<'tcx> { if self.skip_binder().has_escaping_regions() { None } else { Some(self.skip_binder().clone()) } } /// Given two things that have the same binder level, /// and an operation that wraps on their contents, execute the operation /// and then wrap its result. /// /// `f` should consider bound regions at depth 1 to be free, and /// anything it produces with bound regions at depth 1 will be /// bound in the resulting return value. pub fn fuse<U,F,R>(self, u: Binder<U>, f: F) -> Binder<R> where F: FnOnce(T, U) -> R { Binder(f(self.0, u.0)) } /// Split the contents into two things that share the same binder /// level as the original, returning two distinct binders. /// /// `f` should consider bound regions at depth 1 to be free, and /// anything it produces with bound regions at depth 1 will be /// bound in the resulting return values. pub fn split<U,V,F>(self, f: F) -> (Binder<U>, Binder<V>) where F: FnOnce(T) -> (U, V) { let (u, v) = f(self.0); (Binder(u), Binder(v)) } } /// Represents the projection of an associated type. In explicit UFCS /// form this would be written `<T as Trait<..>>::N`. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct ProjectionTy<'tcx> { /// The parameters of the associated item. pub substs: &'tcx Substs<'tcx>, /// The DefId of the TraitItem for the associated type N. /// /// Note that this is not the DefId of the TraitRef containing this /// associated type, which is in tcx.associated_item(item_def_id).container. pub item_def_id: DefId, } impl<'a, 'tcx> ProjectionTy<'tcx> { /// Construct a ProjectionTy by searching the trait from trait_ref for the /// associated item named item_name. pub fn from_ref_and_name( tcx: TyCtxt<'_, '_, '_>, trait_ref: ty::TraitRef<'tcx>, item_name: Ident ) -> ProjectionTy<'tcx> { let item_def_id = tcx.associated_items(trait_ref.def_id).find(|item| { item.kind == ty::AssociatedKind::Type && tcx.hygienic_eq(item_name, item.ident, trait_ref.def_id) }).unwrap().def_id; ProjectionTy { substs: trait_ref.substs, item_def_id, } } /// Extracts the underlying trait reference from this projection. /// For example, if this is a projection of `<T as Iterator>::Item`, /// then this function would return a `T: Iterator` trait reference. pub fn trait_ref(&self, tcx: TyCtxt<'_, '_, '_>) -> ty::TraitRef<'tcx> { let def_id = tcx.associated_item(self.item_def_id).container.id(); ty::TraitRef { def_id, substs: self.substs, } } pub fn self_ty(&self) -> Ty<'tcx> { self.substs.type_at(0) } } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct GenSig<'tcx> { pub yield_ty: Ty<'tcx>, pub return_ty: Ty<'tcx>, } pub type PolyGenSig<'tcx> = Binder<GenSig<'tcx>>; impl<'tcx> PolyGenSig<'tcx> { pub fn yield_ty(&self) -> ty::Binder<Ty<'tcx>> { self.map_bound_ref(|sig| sig.yield_ty) } pub fn return_ty(&self) -> ty::Binder<Ty<'tcx>> { self.map_bound_ref(|sig| sig.return_ty) } } /// Signature of a function type, which I have arbitrarily /// decided to use to refer to the input/output types. /// /// - `inputs` is the list of arguments and their modes. /// - `output` is the return type. /// - `variadic` indicates whether this is a variadic function. (only true for foreign fns) #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct FnSig<'tcx> { pub inputs_and_output: &'tcx List<Ty<'tcx>>, pub variadic: bool, pub unsafety: hir::Unsafety, pub abi: abi::Abi, } impl<'tcx> FnSig<'tcx> { pub fn inputs(&self) -> &'tcx [Ty<'tcx>] { &self.inputs_and_output[..self.inputs_and_output.len() - 1] } pub fn output(&self) -> Ty<'tcx> { self.inputs_and_output[self.inputs_and_output.len() - 1] } } pub type PolyFnSig<'tcx> = Binder<FnSig<'tcx>>; impl<'tcx> PolyFnSig<'tcx> { pub fn inputs(&self) -> Binder<&'tcx [Ty<'tcx>]> { self.map_bound_ref(|fn_sig| fn_sig.inputs()) } pub fn input(&self, index: usize) -> ty::Binder<Ty<'tcx>> { self.map_bound_ref(|fn_sig| fn_sig.inputs()[index]) } pub fn inputs_and_output(&self) -> ty::Binder<&'tcx List<Ty<'tcx>>> { self.map_bound_ref(|fn_sig| fn_sig.inputs_and_output) } pub fn output(&self) -> ty::Binder<Ty<'tcx>> { self.map_bound_ref(|fn_sig| fn_sig.output()) } pub fn variadic(&self) -> bool { self.skip_binder().variadic } pub fn unsafety(&self) -> hir::Unsafety { self.skip_binder().unsafety } pub fn abi(&self) -> abi::Abi { self.skip_binder().abi } } pub type CanonicalPolyFnSig<'tcx> = Canonical<'tcx, Binder<FnSig<'tcx>>>; #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct ParamTy { pub idx: u32, pub name: InternedString, } impl<'a, 'gcx, 'tcx> ParamTy { pub fn new(index: u32, name: InternedString) -> ParamTy { ParamTy { idx: index, name: name } } pub fn for_self() -> ParamTy { ParamTy::new(0, keywords::SelfType.name().as_interned_str()) } pub fn for_def(def: &ty::GenericParamDef) -> ParamTy { ParamTy::new(def.index, def.name) } pub fn to_ty(self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { tcx.mk_ty_param(self.idx, self.name) } pub fn is_self(&self) -> bool { // FIXME(#50125): Ignoring `Self` with `idx != 0` might lead to weird behavior elsewhere, // but this should only be possible when using `-Z continue-parse-after-error` like // `compile-fail/issue-36638.rs`. self.name == keywords::SelfType.name().as_str() && self.idx == 0 } } /// A [De Bruijn index][dbi] is a standard means of representing /// regions (and perhaps later types) in a higher-ranked setting. In /// particular, imagine a type like this: /// /// for<'a> fn(for<'b> fn(&'b isize, &'a isize), &'a char) /// ^ ^ | | | /// | | | | | /// | +------------+ 0 | | /// | | | /// +--------------------------------+ 1 | /// | | /// +------------------------------------------+ 0 /// /// In this type, there are two binders (the outer fn and the inner /// fn). We need to be able to determine, for any given region, which /// fn type it is bound by, the inner or the outer one. There are /// various ways you can do this, but a De Bruijn index is one of the /// more convenient and has some nice properties. The basic idea is to /// count the number of binders, inside out. Some examples should help /// clarify what I mean. /// /// Let's start with the reference type `&'b isize` that is the first /// argument to the inner function. This region `'b` is assigned a De /// Bruijn index of 0, meaning "the innermost binder" (in this case, a /// fn). The region `'a` that appears in the second argument type (`&'a /// isize`) would then be assigned a De Bruijn index of 1, meaning "the /// second-innermost binder". (These indices are written on the arrays /// in the diagram). /// /// What is interesting is that De Bruijn index attached to a particular /// variable will vary depending on where it appears. For example, /// the final type `&'a char` also refers to the region `'a` declared on /// the outermost fn. But this time, this reference is not nested within /// any other binders (i.e., it is not an argument to the inner fn, but /// rather the outer one). Therefore, in this case, it is assigned a /// De Bruijn index of 0, because the innermost binder in that location /// is the outer fn. /// /// [dbi]: http://en.wikipedia.org/wiki/De_Bruijn_index newtype_index! { pub struct DebruijnIndex { DEBUG_FORMAT = "DebruijnIndex({})", const INNERMOST = 0, } } pub type Region<'tcx> = &'tcx RegionKind; /// Representation of regions. /// /// Unlike types, most region variants are "fictitious", not concrete, /// regions. Among these, `ReStatic`, `ReEmpty` and `ReScope` are the only /// ones representing concrete regions. /// /// ## Bound Regions /// /// These are regions that are stored behind a binder and must be substituted /// with some concrete region before being used. There are 2 kind of /// bound regions: early-bound, which are bound in an item's Generics, /// and are substituted by a Substs, and late-bound, which are part of /// higher-ranked types (e.g. `for<'a> fn(&'a ())`) and are substituted by /// the likes of `liberate_late_bound_regions`. The distinction exists /// because higher-ranked lifetimes aren't supported in all places. See [1][2]. /// /// Unlike Param-s, bound regions are not supposed to exist "in the wild" /// outside their binder, e.g. in types passed to type inference, and /// should first be substituted (by placeholder regions, free regions, /// or region variables). /// /// ## Placeholder and Free Regions /// /// One often wants to work with bound regions without knowing their precise /// identity. For example, when checking a function, the lifetime of a borrow /// can end up being assigned to some region parameter. In these cases, /// it must be ensured that bounds on the region can't be accidentally /// assumed without being checked. /// /// To do this, we replace the bound regions with placeholder markers, /// which don't satisfy any relation not explicitly provided. /// /// There are 2 kinds of placeholder regions in rustc: `ReFree` and /// `RePlaceholder`. When checking an item's body, `ReFree` is supposed /// to be used. These also support explicit bounds: both the internally-stored /// *scope*, which the region is assumed to outlive, as well as other /// relations stored in the `FreeRegionMap`. Note that these relations /// aren't checked when you `make_subregion` (or `eq_types`), only by /// `resolve_regions_and_report_errors`. /// /// When working with higher-ranked types, some region relations aren't /// yet known, so you can't just call `resolve_regions_and_report_errors`. /// `RePlaceholder` is designed for this purpose. In these contexts, /// there's also the risk that some inference variable laying around will /// get unified with your placeholder region: if you want to check whether /// `for<'a> Foo<'_>: 'a`, and you substitute your bound region `'a` /// with a placeholder region `'%a`, the variable `'_` would just be /// instantiated to the placeholder region `'%a`, which is wrong because /// the inference variable is supposed to satisfy the relation /// *for every value of the placeholder region*. To ensure that doesn't /// happen, you can use `leak_check`. This is more clearly explained /// by the [rustc guide]. /// /// [1]: http://smallcultfollowing.com/babysteps/blog/2013/10/29/intermingled-parameter-lists/ /// [2]: http://smallcultfollowing.com/babysteps/blog/2013/11/04/intermingled-parameter-lists/ /// [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/traits/hrtb.html #[derive(Clone, PartialEq, Eq, Hash, Copy, RustcEncodable, RustcDecodable, PartialOrd, Ord)] pub enum RegionKind { // Region bound in a type or fn declaration which will be // substituted 'early' -- that is, at the same time when type // parameters are substituted. ReEarlyBound(EarlyBoundRegion), // Region bound in a function scope, which will be substituted when the // function is called. ReLateBound(DebruijnIndex, BoundRegion), /// When checking a function body, the types of all arguments and so forth /// that refer to bound region parameters are modified to refer to free /// region parameters. ReFree(FreeRegion), /// A concrete region naming some statically determined scope /// (e.g. an expression or sequence of statements) within the /// current function. ReScope(region::Scope), /// Static data that has an "infinite" lifetime. Top in the region lattice. ReStatic, /// A region variable. Should not exist after typeck. ReVar(RegionVid), /// A placeholder region - basically the higher-ranked version of ReFree. /// Should not exist after typeck. RePlaceholder(ty::Placeholder), /// Empty lifetime is for data that is never accessed. /// Bottom in the region lattice. We treat ReEmpty somewhat /// specially; at least right now, we do not generate instances of /// it during the GLB computations, but rather /// generate an error instead. This is to improve error messages. /// The only way to get an instance of ReEmpty is to have a region /// variable with no constraints. ReEmpty, /// Erased region, used by trait selection, in MIR and during codegen. ReErased, /// These are regions bound in the "defining type" for a /// closure. They are used ONLY as part of the /// `ClosureRegionRequirements` that are produced by MIR borrowck. /// See `ClosureRegionRequirements` for more details. ReClosureBound(RegionVid), /// Canonicalized region, used only when preparing a trait query. ReCanonical(BoundTyIndex), } impl<'tcx> serialize::UseSpecializedDecodable for Region<'tcx> {} #[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord)] pub struct EarlyBoundRegion { pub def_id: DefId, pub index: u32, pub name: InternedString, } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct TyVid { pub index: u32, } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct IntVid { pub index: u32, } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct FloatVid { pub index: u32, } newtype_index! { pub struct RegionVid { DEBUG_FORMAT = custom, } } impl Atom for RegionVid { fn index(self) -> usize { Idx::index(self) } } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub enum InferTy { TyVar(TyVid), IntVar(IntVid), FloatVar(FloatVid), /// A `FreshTy` is one that is generated as a replacement for an /// unbound type variable. This is convenient for caching etc. See /// `infer::freshen` for more details. FreshTy(u32), FreshIntTy(u32), FreshFloatTy(u32), /// Bound type variable, used only when preparing a trait query. BoundTy(BoundTy), } newtype_index! { pub struct BoundTyIndex { .. } } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct BoundTy { pub level: DebruijnIndex, pub var: BoundTyIndex, } impl_stable_hash_for!(struct BoundTy { level, var }); /// A `ProjectionPredicate` for an `ExistentialTraitRef`. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct ExistentialProjection<'tcx> { pub item_def_id: DefId, pub substs: &'tcx Substs<'tcx>, pub ty: Ty<'tcx>, } pub type PolyExistentialProjection<'tcx> = Binder<ExistentialProjection<'tcx>>; impl<'a, 'tcx, 'gcx> ExistentialProjection<'tcx> { /// Extracts the underlying existential trait reference from this projection. /// For example, if this is a projection of `exists T. <T as Iterator>::Item == X`, /// then this function would return a `exists T. T: Iterator` existential trait /// reference. pub fn trait_ref(&self, tcx: TyCtxt<'_, '_, '_>) -> ty::ExistentialTraitRef<'tcx> { let def_id = tcx.associated_item(self.item_def_id).container.id(); ty::ExistentialTraitRef{ def_id, substs: self.substs, } } pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) -> ty::ProjectionPredicate<'tcx> { // otherwise the escaping regions would be captured by the binders debug_assert!(!self_ty.has_escaping_regions()); ty::ProjectionPredicate { projection_ty: ty::ProjectionTy { item_def_id: self.item_def_id, substs: tcx.mk_substs_trait(self_ty, self.substs), }, ty: self.ty, } } } impl<'a, 'tcx, 'gcx> PolyExistentialProjection<'tcx> { pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) -> ty::PolyProjectionPredicate<'tcx> { self.map_bound(|p| p.with_self_ty(tcx, self_ty)) } pub fn item_def_id(&self) -> DefId { return self.skip_binder().item_def_id; } } impl DebruijnIndex { /// Returns the resulting index when this value is moved into /// `amount` number of new binders. So e.g. if you had /// /// for<'a> fn(&'a x) /// /// and you wanted to change to /// /// for<'a> fn(for<'b> fn(&'a x)) /// /// you would need to shift the index for `'a` into 1 new binder. #[must_use] pub fn shifted_in(self, amount: u32) -> DebruijnIndex { DebruijnIndex::from_u32(self.as_u32() + amount) } /// Update this index in place by shifting it "in" through /// `amount` number of binders. pub fn shift_in(&mut self, amount: u32) { *self = self.shifted_in(amount); } /// Returns the resulting index when this value is moved out from /// `amount` number of new binders. #[must_use] pub fn shifted_out(self, amount: u32) -> DebruijnIndex { DebruijnIndex::from_u32(self.as_u32() - amount) } /// Update in place by shifting out from `amount` binders. pub fn shift_out(&mut self, amount: u32) { *self = self.shifted_out(amount); } /// Adjusts any Debruijn Indices so as to make `to_binder` the /// innermost binder. That is, if we have something bound at `to_binder`, /// it will now be bound at INNERMOST. This is an appropriate thing to do /// when moving a region out from inside binders: /// /// ``` /// for<'a> fn(for<'b> for<'c> fn(&'a u32), _) /// // Binder: D3 D2 D1 ^^ /// ``` /// /// Here, the region `'a` would have the debruijn index D3, /// because it is the bound 3 binders out. However, if we wanted /// to refer to that region `'a` in the second argument (the `_`), /// those two binders would not be in scope. In that case, we /// might invoke `shift_out_to_binder(D3)`. This would adjust the /// debruijn index of `'a` to D1 (the innermost binder). /// /// If we invoke `shift_out_to_binder` and the region is in fact /// bound by one of the binders we are shifting out of, that is an /// error (and should fail an assertion failure). pub fn shifted_out_to_binder(self, to_binder: DebruijnIndex) -> Self { self.shifted_out(to_binder.as_u32() - INNERMOST.as_u32()) } } impl_stable_hash_for!(struct DebruijnIndex { private }); /// Region utilities impl RegionKind { /// Is this region named by the user? pub fn has_name(&self) -> bool { match *self { RegionKind::ReEarlyBound(ebr) => ebr.has_name(), RegionKind::ReLateBound(_, br) => br.is_named(), RegionKind::ReFree(fr) => fr.bound_region.is_named(), RegionKind::ReScope(..) => false, RegionKind::ReStatic => true, RegionKind::ReVar(..) => false, RegionKind::RePlaceholder(placeholder) => placeholder.name.is_named(), RegionKind::ReEmpty => false, RegionKind::ReErased => false, RegionKind::ReClosureBound(..) => false, RegionKind::ReCanonical(..) => false, } } pub fn is_late_bound(&self) -> bool { match *self { ty::ReLateBound(..) => true, _ => false, } } pub fn bound_at_or_above_binder(&self, index: DebruijnIndex) -> bool { match *self { ty::ReLateBound(debruijn, _) => debruijn >= index, _ => false, } } /// Adjusts any Debruijn Indices so as to make `to_binder` the /// innermost binder. That is, if we have something bound at `to_binder`, /// it will now be bound at INNERMOST. This is an appropriate thing to do /// when moving a region out from inside binders: /// /// ``` /// for<'a> fn(for<'b> for<'c> fn(&'a u32), _) /// // Binder: D3 D2 D1 ^^ /// ``` /// /// Here, the region `'a` would have the debruijn index D3, /// because it is the bound 3 binders out. However, if we wanted /// to refer to that region `'a` in the second argument (the `_`), /// those two binders would not be in scope. In that case, we /// might invoke `shift_out_to_binder(D3)`. This would adjust the /// debruijn index of `'a` to D1 (the innermost binder). /// /// If we invoke `shift_out_to_binder` and the region is in fact /// bound by one of the binders we are shifting out of, that is an /// error (and should fail an assertion failure). pub fn shifted_out_to_binder(&self, to_binder: ty::DebruijnIndex) -> RegionKind { match *self { ty::ReLateBound(debruijn, r) => ty::ReLateBound( debruijn.shifted_out_to_binder(to_binder), r, ), r => r } } pub fn keep_in_local_tcx(&self) -> bool { if let ty::ReVar(..) = self { true } else { false } } pub fn type_flags(&self) -> TypeFlags { let mut flags = TypeFlags::empty(); if self.keep_in_local_tcx() { flags = flags | TypeFlags::KEEP_IN_LOCAL_TCX; } match *self { ty::ReVar(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; flags = flags | TypeFlags::HAS_RE_INFER; } ty::RePlaceholder(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; flags = flags | TypeFlags::HAS_RE_SKOL; } ty::ReLateBound(..) => { flags = flags | TypeFlags::HAS_RE_LATE_BOUND; } ty::ReEarlyBound(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; flags = flags | TypeFlags::HAS_RE_EARLY_BOUND; } ty::ReEmpty | ty::ReStatic | ty::ReFree { .. } | ty::ReScope { .. } => { flags = flags | TypeFlags::HAS_FREE_REGIONS; } ty::ReErased => { } ty::ReCanonical(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; flags = flags | TypeFlags::HAS_CANONICAL_VARS; } ty::ReClosureBound(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; } } match *self { ty::ReStatic | ty::ReEmpty | ty::ReErased | ty::ReLateBound(..) => (), _ => flags = flags | TypeFlags::HAS_FREE_LOCAL_NAMES, } debug!("type_flags({:?}) = {:?}", self, flags); flags } /// Given an early-bound or free region, returns the def-id where it was bound. /// For example, consider the regions in this snippet of code: /// /// ``` /// impl<'a> Foo { /// ^^ -- early bound, declared on an impl /// /// fn bar<'b, 'c>(x: &self, y: &'b u32, z: &'c u64) where 'static: 'c /// ^^ ^^ ^ anonymous, late-bound /// | early-bound, appears in where-clauses /// late-bound, appears only in fn args /// {..} /// } /// ``` /// /// Here, `free_region_binding_scope('a)` would return the def-id /// of the impl, and for all the other highlighted regions, it /// would return the def-id of the function. In other cases (not shown), this /// function might return the def-id of a closure. pub fn free_region_binding_scope(&self, tcx: TyCtxt<'_, '_, '_>) -> DefId { match self { ty::ReEarlyBound(br) => { tcx.parent_def_id(br.def_id).unwrap() } ty::ReFree(fr) => fr.scope, _ => bug!("free_region_binding_scope invoked on inappropriate region: {:?}", self), } } } /// Type utilities impl<'a, 'gcx, 'tcx> TyS<'tcx> { pub fn is_unit(&self) -> bool { match self.sty { Tuple(ref tys) => tys.is_empty(), _ => false, } } pub fn is_never(&self) -> bool { match self.sty { Never => true, _ => false, } } pub fn is_primitive(&self) -> bool { match self.sty { Bool | Char | Int(_) | Uint(_) | Float(_) => true, _ => false, } } pub fn is_ty_var(&self) -> bool { match self.sty { Infer(TyVar(_)) => true, _ => false, } } pub fn is_ty_infer(&self) -> bool { match self.sty { Infer(_) => true, _ => false, } } pub fn is_phantom_data(&self) -> bool { if let Adt(def, _) = self.sty { def.is_phantom_data() } else { false } } pub fn is_bool(&self) -> bool { self.sty == Bool } pub fn is_param(&self, index: u32) -> bool { match self.sty { ty::Param(ref data) => data.idx == index, _ => false, } } pub fn is_self(&self) -> bool { match self.sty { Param(ref p) => p.is_self(), _ => false, } } pub fn is_slice(&self) -> bool { match self.sty { RawPtr(TypeAndMut { ty, .. }) | Ref(_, ty, _) => match ty.sty { Slice(_) | Str => true, _ => false, }, _ => false } } #[inline] pub fn is_simd(&self) -> bool { match self.sty { Adt(def, _) => def.repr.simd(), _ => false, } } pub fn sequence_element_type(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { match self.sty { Array(ty, _) | Slice(ty) => ty, Str => tcx.mk_mach_uint(ast::UintTy::U8), _ => bug!("sequence_element_type called on non-sequence value: {}", self), } } pub fn simd_type(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { match self.sty { Adt(def, substs) => { def.non_enum_variant().fields[0].ty(tcx, substs) } _ => bug!("simd_type called on invalid type") } } pub fn simd_size(&self, _cx: TyCtxt<'_, '_, '_>) -> usize { match self.sty { Adt(def, _) => def.non_enum_variant().fields.len(), _ => bug!("simd_size called on invalid type") } } pub fn is_region_ptr(&self) -> bool { match self.sty { Ref(..) => true, _ => false, } } pub fn is_mutable_pointer(&self) -> bool { match self.sty { RawPtr(TypeAndMut { mutbl: hir::Mutability::MutMutable, .. }) | Ref(_, _, hir::Mutability::MutMutable) => true, _ => false } } pub fn is_unsafe_ptr(&self) -> bool { match self.sty { RawPtr(_) => return true, _ => return false, } } /// Returns `true` if this type is an `Arc<T>`. pub fn is_arc(&self) -> bool { match self.sty { Adt(def, _) => def.is_arc(), _ => false, } } /// Returns `true` if this type is an `Rc<T>`. pub fn is_rc(&self) -> bool { match self.sty { Adt(def, _) => def.is_rc(), _ => false, } } pub fn is_box(&self) -> bool { match self.sty { Adt(def, _) => def.is_box(), _ => false, } } /// panics if called on any type other than `Box<T>` pub fn boxed_ty(&self) -> Ty<'tcx> { match self.sty { Adt(def, substs) if def.is_box() => substs.type_at(0), _ => bug!("`boxed_ty` is called on non-box type {:?}", self), } } /// A scalar type is one that denotes an atomic datum, with no sub-components. /// (A RawPtr is scalar because it represents a non-managed pointer, so its /// contents are abstract to rustc.) pub fn is_scalar(&self) -> bool { match self.sty { Bool | Char | Int(_) | Float(_) | Uint(_) | Infer(IntVar(_)) | Infer(FloatVar(_)) | FnDef(..) | FnPtr(_) | RawPtr(_) => true, _ => false } } /// Returns true if this type is a floating point type and false otherwise. pub fn is_floating_point(&self) -> bool { match self.sty { Float(_) | Infer(FloatVar(_)) => true, _ => false, } } pub fn is_trait(&self) -> bool { match self.sty { Dynamic(..) => true, _ => false, } } pub fn is_enum(&self) -> bool { match self.sty { Adt(adt_def, _) => { adt_def.is_enum() } _ => false, } } pub fn is_closure(&self) -> bool { match self.sty { Closure(..) => true, _ => false, } } pub fn is_generator(&self) -> bool { match self.sty { Generator(..) => true, _ => false, } } pub fn is_integral(&self) -> bool { match self.sty { Infer(IntVar(_)) | Int(_) | Uint(_) => true, _ => false } } pub fn is_fresh_ty(&self) -> bool { match self.sty { Infer(FreshTy(_)) => true, _ => false, } } pub fn is_fresh(&self) -> bool { match self.sty { Infer(FreshTy(_)) => true, Infer(FreshIntTy(_)) => true, Infer(FreshFloatTy(_)) => true, _ => false, } } pub fn is_char(&self) -> bool { match self.sty { Char => true, _ => false, } } pub fn is_fp(&self) -> bool { match self.sty { Infer(FloatVar(_)) | Float(_) => true, _ => false } } pub fn is_numeric(&self) -> bool { self.is_integral() || self.is_fp() } pub fn is_signed(&self) -> bool { match self.sty { Int(_) => true, _ => false, } } pub fn is_machine(&self) -> bool { match self.sty { Int(ast::IntTy::Isize) | Uint(ast::UintTy::Usize) => false, Int(..) | Uint(..) | Float(..) => true, _ => false, } } pub fn has_concrete_skeleton(&self) -> bool { match self.sty { Param(_) | Infer(_) | Error => false, _ => true, } } /// Returns the type and mutability of *ty. /// /// The parameter `explicit` indicates if this is an *explicit* dereference. /// Some types---notably unsafe ptrs---can only be dereferenced explicitly. pub fn builtin_deref(&self, explicit: bool) -> Option<TypeAndMut<'tcx>> { match self.sty { Adt(def, _) if def.is_box() => { Some(TypeAndMut { ty: self.boxed_ty(), mutbl: hir::MutImmutable, }) }, Ref(_, ty, mutbl) => Some(TypeAndMut { ty, mutbl }), RawPtr(mt) if explicit => Some(mt), _ => None, } } /// Returns the type of `ty[i]`. pub fn builtin_index(&self) -> Option<Ty<'tcx>> { match self.sty { Array(ty, _) | Slice(ty) => Some(ty), _ => None, } } pub fn fn_sig(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> PolyFnSig<'tcx> { match self.sty { FnDef(def_id, substs) => { tcx.fn_sig(def_id).subst(tcx, substs) } FnPtr(f) => f, _ => bug!("Ty::fn_sig() called on non-fn type: {:?}", self) } } pub fn is_fn(&self) -> bool { match self.sty { FnDef(..) | FnPtr(_) => true, _ => false, } } pub fn is_impl_trait(&self) -> bool { match self.sty { Opaque(..) => true, _ => false, } } pub fn ty_adt_def(&self) -> Option<&'tcx AdtDef> { match self.sty { Adt(adt, _) => Some(adt), _ => None, } } /// Returns the regions directly referenced from this type (but /// not types reachable from this type via `walk_tys`). This /// ignores late-bound regions binders. pub fn regions(&self) -> Vec<ty::Region<'tcx>> { match self.sty { Ref(region, _, _) => { vec![region] } Dynamic(ref obj, region) => { let mut v = vec![region]; v.extend(obj.principal().skip_binder().substs.regions()); v } Adt(_, substs) | Opaque(_, substs) => { substs.regions().collect() } Closure(_, ClosureSubsts { ref substs }) | Generator(_, GeneratorSubsts { ref substs }, _) => { substs.regions().collect() } Projection(ref data) | UnnormalizedProjection(ref data) => { data.substs.regions().collect() } FnDef(..) | FnPtr(_) | GeneratorWitness(..) | Bool | Char | Int(_) | Uint(_) | Float(_) | Str | Array(..) | Slice(_) | RawPtr(_) | Never | Tuple(..) | Foreign(..) | Param(_) | Infer(_) | Error => { vec![] } } } /// When we create a closure, we record its kind (i.e., what trait /// it implements) into its `ClosureSubsts` using a type /// parameter. This is kind of a phantom type, except that the /// most convenient thing for us to are the integral types. This /// function converts such a special type into the closure /// kind. To go the other way, use /// `tcx.closure_kind_ty(closure_kind)`. /// /// Note that during type checking, we use an inference variable /// to represent the closure kind, because it has not yet been /// inferred. Once upvar inference (in `src/librustc_typeck/check/upvar.rs`) /// is complete, that type variable will be unified. pub fn to_opt_closure_kind(&self) -> Option<ty::ClosureKind> { match self.sty { Int(int_ty) => match int_ty { ast::IntTy::I8 => Some(ty::ClosureKind::Fn), ast::IntTy::I16 => Some(ty::ClosureKind::FnMut), ast::IntTy::I32 => Some(ty::ClosureKind::FnOnce), _ => bug!("cannot convert type `{:?}` to a closure kind", self), }, Infer(_) => None, Error => Some(ty::ClosureKind::Fn), _ => bug!("cannot convert type `{:?}` to a closure kind", self), } } /// Fast path helper for testing if a type is `Sized`. /// /// Returning true means the type is known to be sized. Returning /// `false` means nothing -- could be sized, might not be. pub fn is_trivially_sized(&self, tcx: TyCtxt<'_, '_, 'tcx>) -> bool { match self.sty { ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) | ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) | ty::FnDef(..) | ty::FnPtr(_) | ty::RawPtr(..) | ty::Char | ty::Ref(..) | ty::Generator(..) | ty::GeneratorWitness(..) | ty::Array(..) | ty::Closure(..) | ty::Never | ty::Error => true, ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => false, ty::Tuple(tys) => tys.iter().all(|ty| ty.is_trivially_sized(tcx)), ty::Adt(def, _substs) => def.sized_constraint(tcx).is_empty(), ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => false, ty::UnnormalizedProjection(..) => bug!("only used with chalk-engine"), ty::Infer(ty::TyVar(_)) => false, ty::Infer(ty::BoundTy(_)) | ty::Infer(ty::FreshTy(_)) | ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) => bug!("is_trivially_sized applied to unexpected type: {:?}", self), } } } /// Typed constant value. #[derive(Copy, Clone, Debug, Hash, RustcEncodable, RustcDecodable, Eq, PartialEq, Ord, PartialOrd)] pub struct Const<'tcx> { pub ty: Ty<'tcx>, pub val: ConstValue<'tcx>, } impl<'tcx> Const<'tcx> { pub fn unevaluated( tcx: TyCtxt<'_, '_, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>, ty: Ty<'tcx>, ) -> &'tcx Self { tcx.mk_const(Const { val: ConstValue::Unevaluated(def_id, substs), ty, }) } #[inline] pub fn from_const_value( tcx: TyCtxt<'_, '_, 'tcx>, val: ConstValue<'tcx>, ty: Ty<'tcx>, ) -> &'tcx Self { tcx.mk_const(Const { val, ty, }) } #[inline] pub fn from_scalar( tcx: TyCtxt<'_, '_, 'tcx>, val: Scalar, ty: Ty<'tcx>, ) -> &'tcx Self { Self::from_const_value(tcx, ConstValue::Scalar(val), ty) } #[inline] pub fn from_bits( tcx: TyCtxt<'_, '_, 'tcx>, bits: u128, ty: ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> &'tcx Self { let ty = tcx.lift_to_global(&ty).unwrap(); let size = tcx.layout_of(ty).unwrap_or_else(|e| { panic!("could not compute layout for {:?}: {:?}", ty, e) }).size; let shift = 128 - size.bits(); let truncated = (bits << shift) >> shift; assert_eq!(truncated, bits, "from_bits called with untruncated value"); Self::from_scalar(tcx, Scalar::Bits { bits, size: size.bytes() as u8 }, ty.value) } #[inline] pub fn zero_sized(tcx: TyCtxt<'_, '_, 'tcx>, ty: Ty<'tcx>) -> &'tcx Self { Self::from_scalar(tcx, Scalar::Bits { bits: 0, size: 0 }, ty) } #[inline] pub fn from_bool(tcx: TyCtxt<'_, '_, 'tcx>, v: bool) -> &'tcx Self { Self::from_bits(tcx, v as u128, ParamEnv::empty().and(tcx.types.bool)) } #[inline] pub fn from_usize(tcx: TyCtxt<'_, '_, 'tcx>, n: u64) -> &'tcx Self { Self::from_bits(tcx, n as u128, ParamEnv::empty().and(tcx.types.usize)) } #[inline] pub fn to_bits( &self, tcx: TyCtxt<'_, '_, 'tcx>, ty: ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> Option<u128> { if self.ty != ty.value { return None; } let ty = tcx.lift_to_global(&ty).unwrap(); let size = tcx.layout_of(ty).ok()?.size; self.val.try_to_bits(size) } #[inline] pub fn to_ptr(&self) -> Option<Pointer> { self.val.try_to_ptr() } #[inline] pub fn assert_bits( &self, tcx: TyCtxt<'_, '_, '_>, ty: ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> Option<u128> { assert_eq!(self.ty, ty.value); let ty = tcx.lift_to_global(&ty).unwrap(); let size = tcx.layout_of(ty).ok()?.size; self.val.try_to_bits(size) } #[inline] pub fn assert_bool(&self, tcx: TyCtxt<'_, '_, '_>) -> Option<bool> { self.assert_bits(tcx, ParamEnv::empty().and(tcx.types.bool)).and_then(|v| match v { 0 => Some(false), 1 => Some(true), _ => None, }) } #[inline] pub fn assert_usize(&self, tcx: TyCtxt<'_, '_, '_>) -> Option<u64> { self.assert_bits(tcx, ParamEnv::empty().and(tcx.types.usize)).map(|v| v as u64) } #[inline] pub fn unwrap_bits( &self, tcx: TyCtxt<'_, '_, '_>, ty: ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> u128 { self.assert_bits(tcx, ty).unwrap_or_else(|| bug!("expected bits of {}, got {:#?}", ty.value, self)) } #[inline] pub fn unwrap_usize(&self, tcx: TyCtxt<'_, '_, '_>) -> u64 { self.assert_usize(tcx).unwrap_or_else(|| bug!("expected constant usize, got {:#?}", self)) } } impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Const<'tcx> {}
34.697305
100
0.585609
33e697dfb89e38abba827f6d4e916bf52f25fc20
2,327
#[cfg(feature = "serializers")] use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer}; use std::{ fmt::{self, Display}, str::{self, FromStr}, }; use crate::{algorithm::HashAlgorithm, error::Error, hash::Hash}; /// Block identifiers #[derive(Clone, Debug, Hash, Eq, PartialEq)] pub struct Id { /// Hash which identifies this block pub hash: Hash, // TODO: parts set header? } impl Id { /// Create a new `Id` from a hash byte slice pub fn new(hash: Hash) -> Self { Self { hash } } } // TODO: match gaia serialization? e.g `D2F5991B98D708FD2C25AA2BEBED9358F24177DE:1:C37A55FB95E9` impl Display for Id { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self.hash) } } // TODO: match gaia serialization? impl FromStr for Id { type Err = Error; fn from_str(s: &str) -> Result<Self, Error> { Ok(Self::new(Hash::from_hex_upper(HashAlgorithm::Sha256, s)?)) } } #[cfg(feature = "serializers")] impl Serialize for Id { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { self.to_string().serialize(serializer) } } #[cfg(feature = "serializers")] impl<'de> Deserialize<'de> for Id { fn deserialize<De: Deserializer<'de>>(deserializer: De) -> Result<Self, De::Error> { Self::from_str(&String::deserialize(deserializer)?) .map_err(|e| De::Error::custom(format!("{}", e))) } } /// Parse `block::Id` from a type pub trait ParseId { /// Parse `block::Id`, or return an `Error` if parsing failed fn parse_block_id(&self) -> Result<Id, Error>; } #[cfg(test)] mod tests { use super::*; const EXAMPLE_SHA256_ID: &str = "26C0A41F3243C6BCD7AD2DFF8A8D83A71D29D307B5326C227F734A1A512FE47D"; #[test] fn parses_hex_strings() { let id = Id::from_str(EXAMPLE_SHA256_ID).unwrap(); assert_eq!( id.hash.as_slice(), b"\x26\xC0\xA4\x1F\x32\x43\xC6\xBC\xD7\xAD\x2D\xFF\x8A\x8D\x83\xA7\ \x1D\x29\xD3\x07\xB5\x32\x6C\x22\x7F\x73\x4A\x1A\x51\x2F\xE4\x7D" .as_ref() ); } #[test] fn serializes_hex_strings() { let id = Id::from_str(EXAMPLE_SHA256_ID).unwrap(); assert_eq!(&id.to_string(), EXAMPLE_SHA256_ID) } }
27.376471
96
0.616674
796502ffcf3fbc2e0d759eab12a9272238fdf088
927
use roman_numerals::RomanNumber; use std::io::{stdout, Write}; fn main() { let mut s = String::new(); println!("Welcome to Roman-Arabic number convertor:\n"); print!("enter number to be converted:\n> "); let _ = stdout().flush(); if let Err(_) = std::io::stdin().read_line(&mut s) { eprintln!("Failed to read user input."); return; }; s.retain(|c| !c.is_whitespace()); match convert(&s) { Ok(r) => { println!("Converted number: {}", r); } Err(e) => { eprintln!("Error: {}", e); } } } fn convert(s: &str) -> Result<String, String> { if let Ok(n) = s.parse::<u32>() { return Ok(RomanNumber::from_arab(n)?.to_string()); } if let Ok(_) = s.parse::<f32>() { return Err("Decimal numbers are not supported.".into()); } return Ok(RomanNumber::from_string(s)?.to_arab()?.to_string()); }
25.75
67
0.533981
ac8bed72df06ee6baad578b257b726f944fdbc64
269
// tests2.rs // This test has a problem with it -- make the test compile! Make the test // pass! Make the test fail! Execute `rustlings hint tests2` for hints :) #[cfg(test)] mod tests { #[test] fn you_can_assert_eq() { assert_eq!(1.200, 1.2); } }
22.416667
74
0.624535
907de17c180da017c23760c084c08019f15e9e19
1,629
use crate::{api_client::Client, command::origin::key::download::download_public_encryption_key, common::ui::{Status, UIWriter, UI}, error::{Error, Result}, PRODUCT, VERSION}; use biome_core::{crypto::keys::KeyCache, origin::Origin}; pub async fn start(ui: &mut UI, bldr_url: &str, token: &str, origin: &Origin, key: &str, secret: &str, key_cache: &KeyCache) -> Result<()> { let api_client = Client::new(bldr_url, PRODUCT, VERSION, None).map_err(Error::APIClient)?; let encryption_key = match key_cache.latest_origin_public_encryption_key(origin) { Ok(key) => key, Err(_) => { debug!("Didn't find public encryption key in cache path"); download_public_encryption_key(ui, &api_client, origin, token, key_cache).await?; key_cache.latest_origin_public_encryption_key(origin)? } }; ui.status(Status::Encrypting, format!("value for key {}.", key))?; let anonymous_box = encryption_key.encrypt(secret.as_bytes()); ui.status(Status::Encrypted, format!("{}=[REDACTED].", key))?; ui.status(Status::Uploading, format!("secret for key {}.", key))?; api_client.create_origin_secret(origin, token, key, &anonymous_box) .await .map_err(Error::APIClient)?; ui.status(Status::Uploaded, format!("secret for {}.", key))?; Ok(()) }
35.413043
94
0.542664
aca80ece9daa63991192493f0f53d3ce8315032e
658
use std::{env, fs, path::PathBuf}; use once_cell::sync::Lazy; pub static BENCH_LOGS_PATH: Lazy<PathBuf> = Lazy::new(|| { let path = env::current_exe() .unwrap() .parent() .unwrap() .join("dev/bench_logs"); fs::create_dir_all(&path).unwrap(); path }); #[macro_export] macro_rules! bench_log_message { () => { "this is a test log message" }; } // These values are shared in Rust crate benchmarks. // Benchmark "compare_with_cpp_spdlog" defines its own values in its file. #[allow(dead_code)] pub const FILE_SIZE: u64 = 30 * 1024 * 1024; #[allow(dead_code)] pub const ROTATING_FILES: usize = 6;
21.933333
74
0.635258
fbb45e8d9d09f6f8bd163a1106b25ac2389ba99b
2,195
use crate::prelude::*; /// Frequently used mathematical operators. #[derive(Debug, PartialEq, Eq)] pub enum MathOperator { /// Addition of two numbers (`a + b`). Add, /// Equivalence of two numbers (`a == b`). Equal, /// Multiplication of two numbers (`a * b`) Mul, /// Negation of one number (`-a`). Not, /// Subtraction of two numbers (`a - b`). Sub, } /// A simple operator system that decides how each of the variants of [`MathOperator`][MathOperator] /// trigger push and pulls on the [`Stack`][Stack] inside a [`Machine`][Machine]. /// /// [MathOperator]: enum.MathOperator.html /// [Stack]: ../../core/stack/struct.Stack.html /// [Machine]: ../../core/machine/struct.Machine.html pub fn simple_math_op_sys(stack: &mut Stack, operator: &MathOperator) { use crate::core::value::Value::*; match operator { MathOperator::Add => { let a = stack.pop(); let b = stack.pop(); stack.push(a + b); } MathOperator::Equal => { let a = stack.pop(); let b = stack.pop(); stack.push(Boolean(a == b)); } MathOperator::Mul => { let a = stack.pop(); let b = stack.pop(); stack.push(a * b); } MathOperator::Not => { let x = stack.pop(); stack.push(!x); } MathOperator::Sub => { let a = stack.pop(); let b = stack.pop(); stack.push(a - b); } } } #[cfg(test)] mod tests { use crate::core::value::Value::*; use crate::op_systems::simple_math::{simple_math_op_sys, MathOperator}; use crate::prelude::Item::*; use crate::prelude::*; #[test] fn test_one_plus_one_equals_two() { let machine = &mut Machine::new(&simple_math_op_sys); let result = machine .run_script(&[ Value(Integer(1)), Value(Integer(1)), Operator(MathOperator::Add), Value(Integer(2)), Operator(MathOperator::Equal), ]) .unwrap(); assert_eq!(result, &Boolean(true)); } }
27.78481
100
0.521185
260717b77ec26817378aba86aa2b63e924120132
2,630
/* * Copyright 2020 Fluence Labs Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use crate::execution_step::ExecutableInstruction; use crate::farewell_step as farewell; use crate::preparation_step::prepare; use crate::preparation_step::PreparationDescriptor; use air_interpreter_interface::InterpreterOutcome; use air_interpreter_interface::RunParameters; use air_log_targets::RUN_PARAMS; pub fn execute_air( air: String, prev_data: Vec<u8>, data: Vec<u8>, params: RunParameters, call_results: Vec<u8>, ) -> InterpreterOutcome { use std::convert::identity; log::trace!( target: RUN_PARAMS, "air interpreter version is {}, run parameters:\ init peer id {}\ current peer id {}", env!("CARGO_PKG_VERSION"), params.init_peer_id, params.current_peer_id, ); execute_air_impl(air, prev_data, data, params, call_results).unwrap_or_else(identity) } fn execute_air_impl( air: String, prev_data: Vec<u8>, data: Vec<u8>, params: RunParameters, call_results: Vec<u8>, ) -> Result<InterpreterOutcome, InterpreterOutcome> { let PreparationDescriptor { mut exec_ctx, mut trace_handler, air, } = match prepare(&prev_data, &data, air.as_str(), &call_results, params) { Ok(descriptor) => descriptor, // return the prev data in case of errors Err(error) => return Err(farewell::from_uncatchable_error(prev_data, error)), }; // match here is used instead of map_err, because the compiler can't determine that // they are exclusive and would treat exec_ctx and trace_handler as moved match air.execute(&mut exec_ctx, &mut trace_handler) { Ok(_) => farewell::from_success_result(exec_ctx, trace_handler), // return new collected trace in case of errors Err(error) if error.is_catchable() => Err(farewell::from_execution_error(exec_ctx, trace_handler, error)), // return the prev data in case of any trace errors Err(error) => Err(farewell::from_uncatchable_error(prev_data, error)), } }
35.066667
114
0.696198
093d9261858a62d9cf9e612449fdf4302e2de914
27,370
use crate::*; use crate::fs::fstrait::*; use crate::fs::structures::*; use super::structures::*; use alloc::vec; use crate::process::descriptor::*; use libutils::paths::PathBuffer; use super::super::ioctl::*; /// Minix3 Filesystem Driver pub struct Minix3Filesystem { block_driver: &'static mut crate::drivers::virtio::drivers::block::BlockDriver, mount_id: Option<usize>, vfs: Option<&'static mut crate::fs::vfs::FilesystemInterface>, superblock: Option<Minix3SuperBlock>, cache: Vec<(usize, [u8; 1024])>, rewritten: Vec<(usize, [u8; 1024])>, mount_inodes: Vec<(FilesystemIndex, FilesystemIndex, String)> } impl Minix3Filesystem { /// Initialize a new Minix3 Filesystem Interface pub fn new(driver_id: usize) -> Self { Self { block_driver: crate::drivers::virtio::get_block_driver(driver_id).unwrap(), mount_id: None, vfs: None, superblock: None, cache: Vec::new(), rewritten: Vec::new(), mount_inodes: Vec::new(), } } /// Read a block as a buffer fn read_block_to_buffer(&mut self, index: usize) -> [u8; 1024] { for (idx, data) in &self.rewritten { if index == *idx { return *data; } } for (idx, data) in &self.cache { if index == *idx { return *data; } } let mut buffer = Box::new([0; 1024]); let ptr = &mut *buffer as *mut [u8; 1024] as *mut u8; self.block_driver.sync_read(ptr, 1024, index as u64 * 1024); self.cache.push((index, *buffer)); *buffer } /// Edit the contents of a block fn edit_block(&mut self, index: usize, new_data: [u8; 1024]) -> FilesystemResult<()> { for (idx, data) in &mut self.rewritten { if index == *idx { *data = new_data; return Ok(()) } } self.rewritten.push((index, new_data)); Ok(()) } /// Edit the contents at a specific region in the block fn edit_block_region(&mut self, index: usize, start: usize, new_data: &[u8]) -> FilesystemResult<usize> { let mut i = start; let mut rewritten_index = 0; for (idx, data) in &mut self.rewritten { if index == *idx { for v in new_data { data[i] = *v; i += 1; if i == 1024 { break; } } return Ok(rewritten_index) } rewritten_index += 1; } let mut prev_data = self.read_block_to_buffer(index); for v in new_data { prev_data[i] = *v; i += 1; if i == 1024 { break; } } self.rewritten.push((index, prev_data)); Ok(self.rewritten.len() - 1) } /// Read an inode fn get_inode(&mut self, inode_number: usize) -> FilesystemResult<Minix3Inode> { kdebugln!(Filesystem, "Opening inode {} on fs {:?}", inode_number, self.mount_id); if let Some(superblock) = self.superblock { // Conver the inode number to a block index let block_index = (inode_number - 1) / 16 + 2 + superblock.imap_blocks as usize + superblock.zmap_blocks as usize; // Read the block into a buffer let mut buffer = self.read_block_to_buffer(block_index); // Read the inode out of the buffer let inode = unsafe { (&mut buffer as *mut [u8; 1024] as *mut Minix3Inode).add((inode_number - 1) % 16).read() }; // The buffer is freed implicitly after the return Ok(inode) } else { Err(FilesystemError::FilesystemUninitialized) } } /// Get a mutable buffer into editable memory fn get_mut_buffer(&mut self, block: usize) -> FilesystemResult<&mut [u8; 1024]> { let mut rewritten_index = 0; for (idx, _) in &mut self.rewritten { if block == *idx { break; } rewritten_index += 1; } if rewritten_index == self.rewritten.len() { let buffer = self.read_block_to_buffer(block); self.rewritten.push((block, buffer)); } Ok(&mut self.rewritten[rewritten_index].1) } /// Edit an inode fn get_mut_inode(&mut self, inode_number: usize) -> FilesystemResult<& mut Minix3Inode> { if let Some(superblock) = self.superblock { // Conver the inode number to a block index let block_index = (inode_number - 1) / 16 + 2 + superblock.imap_blocks as usize + superblock.zmap_blocks as usize; // Get a reference to that memory let buffer_ref = self.get_mut_buffer(block_index)?; // Get the reference to the specific inode let inode = unsafe { (buffer_ref as *mut [u8; 1024] as *mut Minix3Inode).add((inode_number - 1) % 16).as_mut().unwrap() }; Ok(inode) } else { Err(FilesystemError::FilesystemUninitialized) } } /// Read from a possibly nested zone fn read_zone(&mut self, zone: usize, level: usize, buffer: *mut u8, index: &mut usize, remaining: &mut usize, offset: &mut usize) { // If no bytes are left to be read, terminate if *remaining == 0 { return; } if level == 0 { // Read the block to a buffer let data = self.read_block_to_buffer(zone); // Read byte by byte for v in data.iter() { if *offset > 0 { *offset -= 1; continue; } unsafe { buffer.add(*index).write(*v) }; *index += 1; *remaining -= 1; if *remaining == 0 { break; } } } else { // Read the block to a buffer let data = unsafe { core::mem::transmute::<[u8; 1024], [u32; 256]>(self.read_block_to_buffer(zone)) }; // Read byte by byte for v in data.iter() { // Skip entries which contain zero if *v == 0 { continue; } // Otherwise, use it as the zone to go to the next level down self.read_zone(*v as usize, level - 1, buffer, index, remaining, offset); // If we are done reading the file, break if *remaining == 0 { break; } } } } /// Read the data from an inode fn read_from_inode(&mut self, inode: Minix3Inode) -> Vec<u8> { let mut remaining = inode.size as usize; let mut buffer = vec![0u8; remaining]; let mut index = 0; let mut offset = 0; for (i, zone) in inode.zones.iter().enumerate() { if *zone == 0 {continue; } self.read_zone(*zone as usize, i.max(6) - 6, buffer.as_mut_ptr(), &mut index, &mut remaining, &mut offset); } buffer } /// Add a directory entry at the given inode fn add_directory_entry_raw(&mut self, inode: usize, entry: Minix3DirEntry) -> FilesystemResult<()> { // Get a mutable reference to the inode let inode_ref = self.get_mut_inode(inode)?; // Get the original size let orig_entry_count = inode_ref.size / 64; // Increment the size inode_ref.size += 64; let zone_index = orig_entry_count / 16; if zone_index < 7 { let next = if inode_ref.zones[zone_index as usize] == 0 { let next = self.next_free_zone()?; self.claim_zone(next)?; Some(next as u32) } else { None }; // Get a mutable reference to the inode let inode_ref = self.get_mut_inode(inode)?; if let Some(next) = next { inode_ref.zones[zone_index as usize] = next; } // Get the zone let zone = inode_ref.zones[zone_index as usize]; let buffer = unsafe { core::mem::transmute::<&mut[u8; 1024], &mut[Minix3DirEntry; 16]>(self.get_mut_buffer(zone as usize)?) }; buffer[orig_entry_count as usize % 16] = entry; } else { todo!() } Ok(()) } /// Add a directory entry from the inode and name to the given inode fn add_directory_entry(&mut self, dest: usize, inode: usize, name: &str) -> FilesystemResult<()> { let mut ent = Minix3DirEntry { inode: inode as u32, name: [0; 60], }; for (i, c) in name.chars().enumerate() { ent.name[i] = c as u8; } self.add_directory_entry_raw(dest, ent) } /// Get the next available free inode fn next_free_inode(&mut self) -> FilesystemResult<usize> { if let Some(superblock) = self.superblock { let mut i = 0; let num_blocks = superblock.imap_blocks; for b in 0..num_blocks { let buffer = self.read_block_to_buffer(2 + b as usize); for v in &buffer { if *v == 0xFF { i += 8; continue; } let mut walker = 0x01; while walker > 0 { if *v & walker == 0 { return Ok(i); } i += 1; walker <<= 1; } } } Err(FilesystemError::OutOfSpace) } else { Err(FilesystemError::FilesystemUninitialized) } } /// Claim an inode fn claim_inode(&mut self, mut inode: usize) -> FilesystemResult<()> { inode -= 0; let block = 2 + inode / (8 * 1024); let byte = (inode / 8) % 1024; let bit = inode % 8; let buffer = self.get_mut_buffer(block)?; buffer[byte] |= 0x01 << bit; Ok(()) } /// Free an inode fn free_inode(&mut self, mut inode: usize) -> FilesystemResult<()> { inode -= 0; let block = 2 + inode / (8 * 1024); let byte = (inode / 8) % 1024; let bit = inode % 8; let buffer = self.get_mut_buffer(block)?; buffer[byte] &= !(0x01 << bit); Ok(()) } /// Get the next available free inode fn next_free_zone(&mut self) -> FilesystemResult<usize> { if let Some(superblock) = self.superblock { let mut i = 0; let num_blocks = superblock.zmap_blocks; for b in 0..num_blocks { let buffer = self.read_block_to_buffer(2 + b as usize + superblock.imap_blocks as usize); for v in &buffer { if *v == 0xFF { i += 8; continue; } let mut walker = 0x01; while walker > 0 { if *v & walker == 0 && i as u16 >= superblock.first_data_zone + 500 { return Ok(i); } i += 1; walker <<= 1; } } } Err(FilesystemError::OutOfSpace) } else { Err(FilesystemError::FilesystemUninitialized) } } /// Claim a zone fn claim_zone(&mut self, zone: usize) -> FilesystemResult<()> { if let Some(superblock) = self.superblock { let block = 2 + superblock.imap_blocks as usize + zone / (8 * 1024); let byte = (zone / 8) % 1024; let bit = zone % 8; let buffer = self.get_mut_buffer(block)?; buffer[byte] |= 0x01 << bit; Ok(()) } else { Err(FilesystemError::FilesystemUninitialized) } } /// Free a zone fn free_zone(&mut self, zone: usize) -> FilesystemResult<()> { if let Some(superblock) = self.superblock { let block = 2 + superblock.imap_blocks as usize + zone / (8 * 1024); let byte = (zone / 8) % 1024; let bit = zone % 8; let buffer = self.get_mut_buffer(block)?; buffer[byte] &= !(0x01 << bit); Ok(()) } else { Err(FilesystemError::FilesystemUninitialized) } } /// Recursive zone allocation fn recursive_zone_alloc(&mut self, level: usize, remaining: &mut usize) -> FilesystemResult<usize> { if *remaining == 0 { return Ok(0); } let zone = self.next_free_zone()?; self.claim_zone(zone)?; if level > 0 { let buffer = self.get_mut_buffer(zone)?; let zones = unsafe { core::mem::transmute::<&mut [u8; 1024], &mut [u32; 256]>(buffer) }; for z in zones { *z = self.recursive_zone_alloc(level - 1, remaining)? as u32; } } else { *remaining -= 1; } Ok(zone) } /// Allocate zones fn allocate_zones(&mut self, inode: &mut Minix3Inode, mut count: usize) -> FilesystemResult<()> { // TODO: Make this acknowledge any previously allocated zones, right // now, it assumes zones are free which means there will be a memory // leak if a nonempty file is passed, therefore this check is in place: assert_eq!(inode.zones, [0; 10]); for (i, slot) in inode.zones.iter_mut().enumerate() { *slot = self.recursive_zone_alloc(i.max(6) - 6, &mut count)? as u32; if count == 0 { break; } } Ok(()) } /// Recursive copy to zones fn recursive_copy_to_zones(&mut self, zone: usize, level: usize, data: &[u8], index: &mut usize) -> FilesystemResult<()> { if *index >= data.len() { return Ok(()); } if level == 0 { for byte in self.get_mut_buffer(zone)?.iter_mut() { *byte = data[*index]; *index += 1; if *index >= data.len() { break; } } } else { for slot in unsafe { core::mem::transmute::<[u8; 1024], [u32; 256]>(self.read_block_to_buffer(zone)) }.iter() { self.recursive_copy_to_zones(*slot as usize, level - 1, data, index)?; if *index >= data.len() { break; } } } Ok(()) } /// Copy data to zones fn copy_to_zones(&mut self, inode: &mut Minix3Inode, data: &[u8]) -> FilesystemResult<()> { let mut index = 0; if data.len() == 0 { return Ok(()) } for (i, zone) in inode.zones.iter().enumerate() { self.recursive_copy_to_zones(*zone as usize, i.max(6) - 6, data, &mut index)?; if index >= data.len() { break; } } Ok(()) } /// Recursive Free Zones fn recursive_free_zones(&mut self, zone: usize, level: usize) -> FilesystemResult<()> { if level == 0 { self.free_zone(zone)?; } else { let current = self.read_block_to_buffer(zone); let zone_numbers = unsafe { core::mem::transmute::<[u8; 1024], [u32; 256]>(current) }; for zone in &zone_numbers { if *zone == 0 { break; } self.recursive_free_zones(*zone as usize, level - 1)?; } } Ok(()) } /// Free zones fn free_zones(&mut self, inode: &mut Minix3Inode) -> FilesystemResult<()> { for (i, zone) in inode.zones.iter_mut().enumerate() { if *zone == 0 { break; } self.recursive_free_zones(*zone as usize, i.max(6) - 6)?; *zone = 0; } Ok(()) } /// Write data to a file fn write_to_file(&mut self, inode_number: usize, data: &[u8]) -> FilesystemResult<()> { let mut inode = self.get_inode(inode_number)?; // TODO: This is not as efficent as I would like it to be, it currently // will free and then reallocate zones self.free_zones(&mut inode)?; self.allocate_zones(&mut inode, (data.len() + 1023) / 1024)?; self.copy_to_zones(&mut inode, data)?; inode.size = data.len() as u32; *(self.get_mut_inode(inode_number)?) = inode; Ok(()) } /// Allocate a file fn allocate_file(&mut self, data: String, mode: u16) -> FilesystemResult<usize> { let next_inode = self.next_free_inode()?; self.claim_inode(next_inode)?; let inode = self.get_mut_inode(next_inode)?; *inode = Minix3Inode { mode, nlinks: 1, uid: 1000, gid: 1000, size: 50, atime: 0, mtime: 0, ctime: 0, zones: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }; self.write_to_file(next_inode, data.as_bytes())?; Ok(next_inode) } /// Allocate a new directory fn new_directory(&mut self, dest: usize, name: String) -> FilesystemResult<usize> { let inode = self.allocate_file(String::new(), 0x4000 | 0o777)?; self.add_directory_entry(inode, inode, ".")?; self.add_directory_entry(inode, dest, "..")?; self.add_directory_entry(dest, inode, &name)?; Ok(inode) } } impl Filesystem for Minix3Filesystem { /// Initialize the filesystem on the current disk fn init(&mut self) -> FilesystemResult<()> { kdebugln!(Filesystem, "Initializing Minix3 Filesystem"); // Read the super block let mut ptr = Box::new([0u8; 512]); self.block_driver.sync_read(ptr.as_mut() as *mut [u8; 512] as *mut u8, 512, 1024); let superblock = unsafe { *(ptr.as_mut() as *mut [u8; 512] as *mut Minix3SuperBlock) }; // Verify the filesystem is a minix3 filesystem if superblock.magic != 0x4d5a { return Err(FilesystemError::BadFilesystemFormat) } self.superblock = Some(superblock); Ok(()) } /// Sync the filesystem with the current disk fn sync(&mut self) -> FilesystemResult<()> { kdebugln!(Filesystem, "{} Zones Rewritten", self.rewritten.len()); for (block, data) in &self.rewritten { kdebugln!(Filesystem, "Writing to Block {}", block); let ptr = data.as_ptr() as *mut u8; self.block_driver.sync_write(ptr, 1024, 1024 * *block as u64); } // Clear the rewritten buffer self.rewritten = Vec::new(); Ok(()) } /// Set the mount_id of the filesystem fn set_mount_id(&mut self, mount_id: usize, vfs: &'static mut crate::fs::vfs::FilesystemInterface) { self.mount_id = Some(mount_id); self.vfs = Some(vfs); } /// Get the index of the root directory of the filesystem fn get_root_index(&mut self) -> FilesystemResult<FilesystemIndex> { if let Some(mount_id) = self.mount_id { Ok( FilesystemIndex { mount_id, inode: 1, } ) } else { Err(FilesystemError::FilesystemNotMounted) } } /// Convert a path to an inode fn path_to_inode(&mut self, path: PathBuffer) -> FilesystemResult<FilesystemIndex> { if let Some(vfs) = &mut self.vfs { vfs.path_to_inode(path) } else { Err(FilesystemError::FilesystemNotMounted) } } /// Convert an inode to a path fn inode_to_path(&mut self, inode: FilesystemIndex) -> FilesystemResult<PathBuffer> { if let Some(vfs) = &mut self.vfs { vfs.inode_to_path(inode) } else { Err(FilesystemError::FilesystemNotMounted) } } /// Get the directory entries for the given inode fn get_dir_entries(&mut self, inode: FilesystemIndex) -> FilesystemResult<alloc::vec::Vec<DirectoryEntry>> { if Some(inode.mount_id) == self.mount_id { let inode_data = self.get_inode(inode.inode)?; if inode_data.mode & 0x4000 == 0 { return Err(FilesystemError::INodeIsNotADirectory); } let data = self.read_from_inode(inode_data); let dir_entries = unsafe { core::mem::transmute::<&[u8], &[Minix3DirEntry]>(data.as_slice()) }; let mut result = Vec::new(); for i in 0..inode_data.size as usize / 64 { let entry = &dir_entries[i]; let mut name = String::new(); for c in &entry.name { if *c == 0 { break; } name.push(*c as char); } result.push(DirectoryEntry{ index: FilesystemIndex{ mount_id: inode.mount_id, inode: entry.inode as usize }, name: name, entry_type: DirectoryEntryType::Unknown }); } // Add any mounted filesystems for (place, root, name) in &self.mount_inodes { if *place == inode { result.push(DirectoryEntry{ index: *root, name: name.clone(), entry_type: DirectoryEntryType::Directory }); } } Ok(result) } else { if let Some(vfs) = &mut self.vfs { vfs.get_dir_entries(inode) } else { Err(FilesystemError::FilesystemNotMounted) } } } /// Create a file in the directory at the given inode fn create_file(&mut self, inode: FilesystemIndex, name: alloc::string::String) -> FilesystemResult<FilesystemIndex> { if Some(inode.mount_id) == self.mount_id { let file_inode = self.allocate_file(String::new(), 0o100777)?; self.add_directory_entry(inode.inode, file_inode, &name)?; Ok(FilesystemIndex { mount_id: inode.mount_id, inode: file_inode } ) } else { if let Some(vfs) = &mut self.vfs { vfs.create_file(inode, name) } else { Err(FilesystemError::FilesystemNotMounted) } } } /// Create a directory in the directory at the given inode fn create_directory(&mut self, inode: FilesystemIndex, name: alloc::string::String) -> FilesystemResult<FilesystemIndex> { if Some(inode.mount_id) == self.mount_id { let dir_inode = self.new_directory(inode.inode, name)?; Ok(FilesystemIndex { mount_id: inode.mount_id, inode: dir_inode } ) } else { if let Some(vfs) = &mut self.vfs { vfs.create_directory(inode, name) } else { Err(FilesystemError::FilesystemNotMounted) } } } /// Remove an inode at the given index from the given directory fn remove_inode(&mut self, _inode: FilesystemIndex, _directory: FilesystemIndex) -> FilesystemResult<()> { todo!() } /// Read the data stored in an inode fn read_inode(&mut self, inode: FilesystemIndex) -> FilesystemResult<Vec<u8>> { if Some(inode.mount_id) == self.mount_id { let inode = self.get_inode(inode.inode)?; Ok(self.read_from_inode(inode)) } else { if let Some(vfs) = &mut self.vfs { vfs.read_inode(inode) } else { Err(FilesystemError::FilesystemNotMounted) } } } /// Write data to an inode fn write_inode(&mut self, inode: FilesystemIndex, data: &[u8]) -> FilesystemResult<()> { if Some(inode.mount_id) == self.mount_id { self.write_to_file(inode.inode, data) } else { if let Some(vfs) = &mut self.vfs { vfs.write_inode(inode, data) } else { Err(FilesystemError::FilesystemNotMounted) } } } /// Mount a filesystem at the given inode fn mount_fs_at(&mut self, inode: FilesystemIndex, root: FilesystemIndex, name: String) -> FilesystemResult<()> { self.mount_inodes.push((inode, root, name)); Ok(()) } /// Open a filedescriptor for the given inode fn open_fd(&mut self, inode: FilesystemIndex, mode: usize) -> FilesystemResult<Box<dyn crate::process::descriptor::FileDescriptor>> { if let Some(vfs) = &mut self.vfs { if Some(inode.mount_id) == self.mount_id { Ok(Box::new(InodeFileDescriptor::new(vfs, inode, mode).unwrap())) } else { vfs.open_fd(inode, mode) } } else { Err(FilesystemError::FilesystemNotMounted) } } /// Execute an ioctl command on an inode fn exec_ioctl(&mut self, inode: FilesystemIndex, cmd: IOControlCommand) -> FilesystemResult<usize> { if let Some(vfs) = &mut self.vfs { if Some(inode.mount_id) == self.mount_id { // Nothing to do here (yet) Ok(usize::MAX) } else { vfs.exec_ioctl(inode, cmd) } } else { Err(FilesystemError::FilesystemNotMounted) } } }
26.754643
180
0.486737
33913d028995607e5530afbf4667cb6d4e2575d1
9,204
use darling::ast::{Data, Style}; use proc_macro::TokenStream; use proc_macro2::{Ident, Span}; use quote::quote; use std::collections::HashSet; use syn::visit_mut::VisitMut; use syn::{visit_mut, Error, Lifetime, Type}; use crate::args::{self, RenameTarget}; use crate::utils::{get_crate_name, get_rustdoc, visible_fn, GeneratorResult}; pub fn generate(union_args: &args::Union) -> GeneratorResult<TokenStream> { let crate_name = get_crate_name(union_args.internal); let ident = &union_args.ident; let (impl_generics, ty_generics, where_clause) = union_args.generics.split_for_impl(); let s = match &union_args.data { Data::Enum(s) => s, _ => { return Err(Error::new_spanned(&ident, "Union can only be applied to an enum.").into()) } }; let mut enum_names = Vec::new(); let mut enum_items = HashSet::new(); let mut type_into_impls = Vec::new(); let gql_typename = union_args .name .clone() .unwrap_or_else(|| RenameTarget::Type.rename(ident.to_string())); let desc = get_rustdoc(&union_args.attrs)? .map(|s| quote! { ::std::option::Option::Some(#s) }) .unwrap_or_else(|| quote! {::std::option::Option::None}); let mut registry_types = Vec::new(); let mut possible_types = Vec::new(); let mut union_values = Vec::new(); let mut get_introspection_typename = Vec::new(); let mut collect_all_fields = Vec::new(); for variant in s { let enum_name = &variant.ident; let union_visible = visible_fn(&variant.visible); let ty = match variant.fields.style { Style::Tuple if variant.fields.fields.len() == 1 => &variant.fields.fields[0], Style::Tuple => { return Err(Error::new_spanned( enum_name, "Only single value variants are supported", ) .into()) } Style::Unit => { return Err( Error::new_spanned(enum_name, "Empty variants are not supported").into(), ) } Style::Struct => { return Err(Error::new_spanned( enum_name, "Variants with named fields are not supported", ) .into()) } }; if let Type::Path(p) = &ty { // This validates that the field type wasn't already used if !enum_items.insert(p) { return Err( Error::new_spanned(&ty, "This type already used in another variant").into(), ); } enum_names.push(enum_name); union_values.push(quote! { union_values.insert( <#p as #crate_name::Type>::type_name().into_owned(), #crate_name::registry::MetaUnionValue { name: <#p as #crate_name::Type>::type_name().into_owned(), visible: #union_visible, } ); }); struct RemoveLifetime; impl VisitMut for RemoveLifetime { fn visit_lifetime_mut(&mut self, i: &mut Lifetime) { i.ident = Ident::new("_", Span::call_site()); visit_mut::visit_lifetime_mut(self, i); } } let mut assert_ty = p.clone(); RemoveLifetime.visit_type_path_mut(&mut assert_ty); if !variant.flatten { type_into_impls.push(quote! { #crate_name::static_assertions::assert_impl_one!(#assert_ty: #crate_name::ObjectType); #[allow(clippy::all, clippy::pedantic)] impl #impl_generics ::std::convert::From<#p> for #ident #ty_generics #where_clause { fn from(obj: #p) -> Self { #ident::#enum_name(obj) } } }); } else { type_into_impls.push(quote! { #crate_name::static_assertions::assert_impl_one!(#assert_ty: #crate_name::UnionType); #[allow(clippy::all, clippy::pedantic)] impl #impl_generics ::std::convert::From<#p> for #ident #ty_generics #where_clause { fn from(obj: #p) -> Self { #ident::#enum_name(obj) } } }); } if !variant.flatten { registry_types.push(quote! { <#p as #crate_name::Type>::create_type_info(registry); }); possible_types.push(quote! { possible_types.insert(<#p as #crate_name::Type>::type_name().into_owned()); }); } else { possible_types.push(quote! { if let #crate_name::registry::MetaType::Union { possible_types: possible_types2, .. } = registry.create_dummy_type::<#p>() { possible_types.extend(possible_types2); } }); } if !variant.flatten { get_introspection_typename.push(quote! { #ident::#enum_name(obj) => <#p as #crate_name::Type>::type_name() }); } else { get_introspection_typename.push(quote! { #ident::#enum_name(obj) => <#p as #crate_name::Type>::introspection_type_name(obj) }); } collect_all_fields.push(quote! { #ident::#enum_name(obj) => obj.collect_all_fields(ctx, fields) }); } else { return Err(Error::new_spanned(ty, "Invalid type").into()); } } if possible_types.is_empty() { return Err(Error::new_spanned( &ident, "A GraphQL Union type must include one or more unique member types.", ) .into()); } let visible = visible_fn(&union_args.visible); let expanded = quote! { #(#type_into_impls)* #[allow(clippy::all, clippy::pedantic)] impl #impl_generics #crate_name::Type for #ident #ty_generics #where_clause { fn type_name() -> ::std::borrow::Cow<'static, ::std::primitive::str> { ::std::borrow::Cow::Borrowed(#gql_typename) } fn introspection_type_name(&self) -> ::std::borrow::Cow<'static, ::std::primitive::str> { match self { #(#get_introspection_typename),* } } fn create_type_info(registry: &mut #crate_name::registry::Registry) -> ::std::string::String { registry.create_type::<Self, _>(|registry| { #(#registry_types)* #crate_name::registry::MetaType::Union { name: ::std::borrow::ToOwned::to_owned(#gql_typename), description: #desc, possible_types: { let mut possible_types = #crate_name::indexmap::IndexSet::new(); #(#possible_types)* possible_types }, union_values: { let mut union_values = #crate_name::indexmap::IndexMap::new(); #(#union_values)* union_values }, visible: #visible, rust_typename: ::std::any::type_name::<Self>(), } }) } } #[allow(clippy::all, clippy::pedantic)] #[#crate_name::async_trait::async_trait] impl #impl_generics #crate_name::resolver_utils::ContainerType for #ident #ty_generics #where_clause { async fn resolve_field(&self, ctx: &#crate_name::Context<'_>) -> #crate_name::ServerResult<::std::option::Option<#crate_name::Value>> { ::std::result::Result::Ok(::std::option::Option::None) } fn collect_all_fields<'__life>(&'__life self, ctx: &#crate_name::ContextSelectionSet<'__life>, fields: &mut #crate_name::resolver_utils::Fields<'__life>) -> #crate_name::ServerResult<()> { match self { #(#collect_all_fields),* } } } #[allow(clippy::all, clippy::pedantic)] #[#crate_name::async_trait::async_trait] impl #impl_generics #crate_name::OutputType for #ident #ty_generics #where_clause { async fn resolve(&self, ctx: &#crate_name::ContextSelectionSet<'_>, _field: &#crate_name::Positioned<#crate_name::parser::types::Field>) -> #crate_name::ServerResult<#crate_name::Value> { #crate_name::resolver_utils::resolve_container(ctx, self).await } } impl #impl_generics #crate_name::UnionType for #ident #ty_generics #where_clause {} }; Ok(expanded.into()) }
40.19214
200
0.510104
d51916d78185a5ad6870cd97d9571e10484456e4
8,359
//! Timed kernel and buffer tests / benchmarks / examples. //! //! Manipulate the consts below to fiddle with parameters. To create longer //! running tests, increase `WORK_SIZE`, and the `*_ITERS` consts. The //! other consts can be anything at all. //! //! Due to buggy and/or intentionally crippled drivers, this example may not //! work on NVIDIA hardware. Until NVIDIA's implementation is corrected this //! example may fail on that platform. extern crate time; extern crate fil_ocl as ocl; extern crate ocl_extras; // * TODO: Bring this back once `Instant` stabilizes: use // std::time::Instant; use ocl::{core, ProQue, Buffer, EventList}; const WORK_SIZE: usize = 1 << 12; const KERNEL_RUN_ITERS: i32 = 800; const BUFFER_READ_ITERS: i32 = 20; const KERNEL_AND_BUFFER_ITERS: i32 = 1000; const SCALAR: f32 = 1.0; const INIT_VAL_RANGE: (f32, f32) = (100.0, 200.0); const PRINT_SOME_RESULTS: bool = true; const RESULTS_TO_PRINT: usize = 5; fn timed() -> ocl::Result<()> { // Define a kernel: let src = r#" __kernel void add( __global float const* const source, __private float scalar, __global float* const result) { uint idx = get_global_id(0); result[idx] = source[idx] + scalar; } "#; // Create an all-in-one context, program, and command queue: let ocl_pq = ProQue::builder().src(src).dims(WORK_SIZE).build()?; // Create init and result buffers and vectors: let vec_init = ocl_extras::scrambled_vec(INIT_VAL_RANGE, ocl_pq.dims().to_len()); let buffer_init = Buffer::builder() .queue(ocl_pq.queue().clone()) .flags(core::MemFlags::new()) .len(WORK_SIZE) .copy_host_slice(&vec_init) .build()?; let mut vec_result = vec![0.0f32; WORK_SIZE]; let buffer_result = Buffer::<f32>::builder() .queue(ocl_pq.queue().clone()) .len(WORK_SIZE) .build()?; // Create a kernel with arguments matching those in the kernel: let kern = ocl_pq.kernel_builder("add") .global_work_size(ocl_pq.dims().clone()) .arg_named("source", Some(&buffer_init)) .arg(SCALAR) .arg(&buffer_result) .build()?; // ################################################## // ##################### KERNEL ##################### // ################################################## print!("\n"); println!("Enqueuing {} kernel runs... ", KERNEL_RUN_ITERS); // Start kernel timer let kern_start = time::get_time(); // Enqueue kernel the first time: unsafe { kern.enq()?; } // Set kernel source buffer to the same as result: // kern.set_arg("source", Some(&buffer_result))?; kern.set_arg("source", &buffer_result)?; // Enqueue kernel for additional iterations: for _ in 0..(KERNEL_RUN_ITERS - 1) { unsafe { kern.enq()?; } } // Wait for all kernels to run: ocl_pq.queue().finish()?; // Print elapsed time for kernels: print_elapsed("total elapsed", kern_start); // ################################################## // ##################### BUFFER ##################### // ################################################## print!("\n"); println!("Enqueuing {} buffer reads... ", BUFFER_READ_ITERS); // Start kernel timer let buffer_start = time::get_time(); // Read results from the device into buffer's local vector: for _ in 0..BUFFER_READ_ITERS { buffer_result.cmd().read(&mut vec_result).enq()? } print_elapsed("queue unfinished", buffer_start); ocl_pq.queue().finish()?; print_elapsed("queue finished", buffer_start); verify_results(&vec_init, &vec_result, KERNEL_RUN_ITERS)?; // ################################################## // ########### KERNEL & BUFFER BLOCKING ############# // ################################################## print!("\n"); println!("Enqueuing {} blocking kernel buffer sequences... ", KERNEL_AND_BUFFER_ITERS); let kern_buf_start = time::get_time(); for _ in 0..(KERNEL_AND_BUFFER_ITERS) { unsafe { kern.enq()?; } buffer_result.cmd().read(&mut vec_result).enq()?; } print_elapsed("queue unfinished", kern_buf_start); ocl_pq.queue().finish()?; print_elapsed("queue finished", kern_buf_start); verify_results(&vec_init, &vec_result, KERNEL_AND_BUFFER_ITERS + KERNEL_RUN_ITERS)?; // ################################################## // ######### KERNEL & BUFFER NON-BLOCKING ########### // ################################################## print!("\n"); println!("Enqueuing {} non-blocking kernel buffer sequences... ", KERNEL_AND_BUFFER_ITERS); let kern_buf_start = time::get_time(); let mut kern_events = EventList::new(); let mut buf_events = EventList::new(); for _ in 0..KERNEL_AND_BUFFER_ITERS { unsafe { kern.cmd().ewait(&buf_events).enew(&mut kern_events).enq()?; } buffer_result.cmd().read(&mut vec_result).ewait(&kern_events) .enew(&mut buf_events).enq()?; } print_elapsed("queue unfinished", kern_buf_start); ocl_pq.queue().finish()?; print_elapsed("queue finished", kern_buf_start); kern_events.wait_for()?; kern_events.clear_completed()?; buf_events.wait_for()?; buf_events.clear_completed()?; verify_results(&vec_init, &vec_result, KERNEL_AND_BUFFER_ITERS + KERNEL_AND_BUFFER_ITERS + KERNEL_RUN_ITERS)?; // ################################################## // ############# CAUTION IS OVERRATED ############### // ################################################## print!("\n"); println!("Enqueuing another {} kernel buffer sequences... ", KERNEL_AND_BUFFER_ITERS); let kern_buf_start = time::get_time(); for _ in 0..KERNEL_AND_BUFFER_ITERS { unsafe { kern.cmd().enew(&mut kern_events).enq()?; } unsafe { buffer_result.cmd().read(&mut vec_result).enew(&mut buf_events) .block(true).enq()?; } } print_elapsed("queue unfinished", kern_buf_start); ocl_pq.queue().finish()?; print_elapsed("queue finished", kern_buf_start); kern_events.wait_for()?; buf_events.wait_for()?; verify_results(&vec_init, &vec_result, KERNEL_AND_BUFFER_ITERS + KERNEL_AND_BUFFER_ITERS + KERNEL_AND_BUFFER_ITERS + KERNEL_RUN_ITERS) } // [KEEP]: // Convert back to this once `Instant` stabilizes: // // fn print_elapsed(title: &str, start: Instant) { // let time_elapsed = time::get_time() - start; // // let time_elapsed = time::get_time().duration_since(start); // let elapsed_ms = time_elapsed.subsec_nanos() / 1000000; // let separator = if title.len() > 0 { ": " } else { "" }; // println!(" {}{}: {}.{:03}", title, separator, time_elapsed.as_secs(), elapsed_ms); // } // // [/KEEP] fn print_elapsed(title: &str, start: time::Timespec) { let time_elapsed = time::get_time() - start; let elapsed_ms = time_elapsed.num_milliseconds(); let separator = if title.len() > 0 { ": " } else { "" }; println!(" {}{}: {}.{:03}", title, separator, time_elapsed.num_seconds(), elapsed_ms); } fn verify_results(vec_init: &Vec<f32>, vec_result: &Vec<f32>, iters: i32) -> ocl::Result<()> { print!("\nVerifying result values... "); if PRINT_SOME_RESULTS { print!("(printing {})\n", RESULTS_TO_PRINT); } // let margin_of_error = iters as f32 / 100000.0; let margin_of_error = 0.1 as f32; for idx in 0..WORK_SIZE { let correct = vec_init[idx] + (iters as f32 * SCALAR); assert!((correct - vec_result[idx]).abs() < margin_of_error, " INVALID RESULT[{}]: init: {}, correct: {}, margin: {}, result: {}", idx, vec_init[idx], correct, margin_of_error, vec_result[idx]); if PRINT_SOME_RESULTS && (idx % (WORK_SIZE / RESULTS_TO_PRINT)) == 0 { println!(" [{}]: init: {}, correct: {}, result: {}", idx, vec_init[idx], correct, vec_result[idx]); } } if PRINT_SOME_RESULTS { print!("\n"); } println!("All result values are correct."); Ok(()) } pub fn main() { match timed() { Ok(_) => (), Err(err) => println!("{}", err), } }
32.399225
103
0.568369
8fd54b4082b2d1fb19d9276ef3c83a44e1b6b7b9
931
use std::io; fn sum_odd_length_subarrays(arr: Vec<i32>) -> i32 { (1..=arr.len()) .step_by(2) .map(|n| arr.windows(n).map(|s| s.iter().sum::<i32>()).sum::<i32>()) .sum() } fn main() { let mut arr: Vec<i32> = Vec::new(); let mut i = 0; loop { let mut input_string = String::new(); println!("Enter the value for arr[{}], Type quit to stop: ", i); io::stdin().read_line(&mut input_string).unwrap(); input_string = input_string.trim_end_matches('\n').to_string(); if input_string == "quit" { break; } else { let input: i32 = input_string .trim() .parse::<i32>() .expect("input is not an integer"); arr.push(input); } i += 1; } println!( "The sum of odd lenght of subarrays is: {:#?}", sum_odd_length_subarrays(arr) ); }
29.09375
76
0.493018
ccaea6dbb015c3475b0e7891eea8ce9704a51f5a
10,625
//! This crate should eventually represent the structure at this repo: //! //! https://github.com/eth2-clients/eth2-testnets/tree/master/nimbus/testnet1 //! //! It is not accurate at the moment, we include extra files and we also don't support a few //! others. We are unable to conform to the repo until we have the following PR merged: //! //! https://github.com/sigp/lighthouse/pull/605 use enr::{CombinedKey, Enr}; use ssz::{Decode, Encode}; use std::fs::{create_dir_all, File}; use std::io::{Read, Write}; use std::path::PathBuf; use types::{Address, BeaconState, EthSpec, YamlConfig}; pub const ADDRESS_FILE: &str = "deposit_contract.txt"; pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt"; pub const BOOT_ENR_FILE: &str = "boot_enr.yaml"; pub const GENESIS_STATE_FILE: &str = "genesis.ssz"; pub const YAML_CONFIG_FILE: &str = "config.yaml"; /// The name of the testnet to hardcode. /// /// Should be set to `None` when no existing testnet is compatible with the codebase. pub const HARDCODED_TESTNET: Option<&str> = Some("altona-v3"); pub const HARDCODED_YAML_CONFIG: &[u8] = include_bytes!("../altona-v3/config.yaml"); pub const HARDCODED_DEPLOY_BLOCK: &[u8] = include_bytes!("../altona-v3/deploy_block.txt"); pub const HARDCODED_DEPOSIT_CONTRACT: &[u8] = include_bytes!("../altona-v3/deposit_contract.txt"); pub const HARDCODED_GENESIS_STATE: &[u8] = include_bytes!("../altona-v3/genesis.ssz"); pub const HARDCODED_BOOT_ENR: &[u8] = include_bytes!("../altona-v3/boot_enr.yaml"); /// Specifies an Eth2 testnet. /// /// See the crate-level documentation for more details. #[derive(Clone, PartialEq, Debug)] pub struct Eth2TestnetConfig<E: EthSpec> { pub deposit_contract_address: String, pub deposit_contract_deploy_block: u64, pub boot_enr: Option<Vec<Enr<CombinedKey>>>, pub genesis_state: Option<BeaconState<E>>, pub yaml_config: Option<YamlConfig>, } impl<E: EthSpec> Eth2TestnetConfig<E> { /// Creates the `Eth2TestnetConfig` that was included in the binary at compile time. This can be /// considered the default Lighthouse testnet. /// /// Returns an error if those included bytes are invalid (this is unlikely). /// Returns `None` if the hardcoded testnet is disabled. pub fn hard_coded() -> Result<Option<Self>, String> { if HARDCODED_TESTNET.is_some() { Ok(Some(Self { deposit_contract_address: serde_yaml::from_reader(HARDCODED_DEPOSIT_CONTRACT) .map_err(|e| format!("Unable to parse contract address: {:?}", e))?, deposit_contract_deploy_block: serde_yaml::from_reader(HARDCODED_DEPLOY_BLOCK) .map_err(|e| format!("Unable to parse deploy block: {:?}", e))?, boot_enr: Some( serde_yaml::from_reader(HARDCODED_BOOT_ENR) .map_err(|e| format!("Unable to parse boot enr: {:?}", e))?, ), genesis_state: Some( BeaconState::from_ssz_bytes(HARDCODED_GENESIS_STATE) .map_err(|e| format!("Unable to parse genesis state: {:?}", e))?, ), yaml_config: Some( serde_yaml::from_reader(HARDCODED_YAML_CONFIG) .map_err(|e| format!("Unable to parse genesis state: {:?}", e))?, ), })) } else { Ok(None) } } // Write the files to the directory. // // Overwrites files if specified to do so. pub fn write_to_file(&self, base_dir: PathBuf, overwrite: bool) -> Result<(), String> { if base_dir.exists() && !overwrite { return Err("Testnet directory already exists".to_string()); } self.force_write_to_file(base_dir) } // Write the files to the directory, even if the directory already exists. pub fn force_write_to_file(&self, base_dir: PathBuf) -> Result<(), String> { create_dir_all(&base_dir) .map_err(|e| format!("Unable to create testnet directory: {:?}", e))?; macro_rules! write_to_yaml_file { ($file: ident, $variable: expr) => { File::create(base_dir.join($file)) .map_err(|e| format!("Unable to create {}: {:?}", $file, e)) .and_then(|mut file| { let yaml = serde_yaml::to_string(&$variable) .map_err(|e| format!("Unable to YAML encode {}: {:?}", $file, e))?; // Remove the doc header from the YAML file. // // This allows us to play nice with other clients that are expecting // plain-text, not YAML. let no_doc_header = if yaml.starts_with("---\n") { &yaml[4..] } else { &yaml }; file.write_all(no_doc_header.as_bytes()) .map_err(|e| format!("Unable to write {}: {:?}", $file, e)) })?; }; } write_to_yaml_file!(ADDRESS_FILE, self.deposit_contract_address); write_to_yaml_file!(DEPLOY_BLOCK_FILE, self.deposit_contract_deploy_block); if let Some(boot_enr) = &self.boot_enr { write_to_yaml_file!(BOOT_ENR_FILE, boot_enr); } if let Some(yaml_config) = &self.yaml_config { write_to_yaml_file!(YAML_CONFIG_FILE, yaml_config); } // The genesis state is a special case because it uses SSZ, not YAML. if let Some(genesis_state) = &self.genesis_state { let file = base_dir.join(GENESIS_STATE_FILE); File::create(&file) .map_err(|e| format!("Unable to create {:?}: {:?}", file, e)) .and_then(|mut file| { file.write_all(&genesis_state.as_ssz_bytes()) .map_err(|e| format!("Unable to write {:?}: {:?}", file, e)) })?; } Ok(()) } pub fn load(base_dir: PathBuf) -> Result<Self, String> { macro_rules! load_from_file { ($file: ident) => { File::open(base_dir.join($file)) .map_err(|e| format!("Unable to open {}: {:?}", $file, e)) .and_then(|file| { serde_yaml::from_reader(file) .map_err(|e| format!("Unable to parse {}: {:?}", $file, e)) })?; }; } macro_rules! optional_load_from_file { ($file: ident) => { if base_dir.join($file).exists() { Some(load_from_file!($file)) } else { None } }; } let deposit_contract_address = load_from_file!(ADDRESS_FILE); let deposit_contract_deploy_block = load_from_file!(DEPLOY_BLOCK_FILE); let boot_enr = optional_load_from_file!(BOOT_ENR_FILE); let yaml_config = optional_load_from_file!(YAML_CONFIG_FILE); // The genesis state is a special case because it uses SSZ, not YAML. let genesis_file_path = base_dir.join(GENESIS_STATE_FILE); let genesis_state = if genesis_file_path.exists() { Some( File::open(&genesis_file_path) .map_err(|e| format!("Unable to open {:?}: {:?}", genesis_file_path, e)) .and_then(|mut file| { let mut bytes = vec![]; file.read_to_end(&mut bytes) .map_err(|e| format!("Unable to read {:?}: {:?}", file, e))?; BeaconState::from_ssz_bytes(&bytes) .map_err(|e| format!("Unable to SSZ decode {:?}: {:?}", file, e)) })?, ) } else { None }; Ok(Self { deposit_contract_address, deposit_contract_deploy_block, boot_enr, genesis_state, yaml_config, }) } pub fn deposit_contract_address(&self) -> Result<Address, String> { if self.deposit_contract_address.starts_with("0x") { self.deposit_contract_address[2..] .parse() .map_err(|e| format!("Corrupted address, unable to parse: {:?}", e)) } else { Err("Corrupted address, must start with 0x".to_string()) } } } #[cfg(test)] mod tests { use super::*; use tempdir::TempDir; use types::{Eth1Data, Hash256, MainnetEthSpec, YamlConfig}; type E = MainnetEthSpec; #[test] fn hard_coded_works() { if let Some(dir) = Eth2TestnetConfig::<E>::hard_coded().expect("should decode hard_coded params") { assert!(dir.boot_enr.is_some()); assert!(dir.genesis_state.is_some()); assert!(dir.yaml_config.is_some()); } } #[test] fn round_trip() { let spec = &E::default_spec(); let eth1_data = Eth1Data { deposit_root: Hash256::zero(), deposit_count: 0, block_hash: Hash256::zero(), }; // TODO: figure out how to generate ENR and add some here. let boot_enr = None; let genesis_state = Some(BeaconState::new(42, eth1_data, spec)); let yaml_config = Some(YamlConfig::from_spec::<E>(spec)); do_test::<E>(boot_enr, genesis_state, yaml_config); do_test::<E>(None, None, None); } fn do_test<E: EthSpec>( boot_enr: Option<Vec<Enr<CombinedKey>>>, genesis_state: Option<BeaconState<E>>, yaml_config: Option<YamlConfig>, ) { let temp_dir = TempDir::new("eth2_testnet_test").expect("should create temp dir"); let base_dir = temp_dir.path().join("my_testnet"); let deposit_contract_address = "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413".to_string(); let deposit_contract_deploy_block = 42; let testnet: Eth2TestnetConfig<E> = Eth2TestnetConfig { deposit_contract_address, deposit_contract_deploy_block, boot_enr, genesis_state, yaml_config, }; testnet .write_to_file(base_dir.clone(), false) .expect("should write to file"); let decoded = Eth2TestnetConfig::load(base_dir).expect("should load struct"); assert_eq!(testnet, decoded, "should decode as encoded"); } }
38.777372
100
0.563671
f91275be5bc997a14b631d73e27ad5e5cc89b0bc
1,824
use super::*; use totem_common::converter::Converter; impl pallet_accounting::Config for Runtime { type Event = Event; type AccountingConverter = Converter; type Currency = Balances; } // impl pallet_archive::Config for Runtime { // type Event = Event; // type Timekeeping = pallet_timekeeping::Pallet<Self>; // } // impl pallet_bonsai::Config for Runtime { // type Event = Event; // type Orders = pallet_orders::Pallet<Self>; // type Projects = pallet_teams::Pallet<Self>; // type Timekeeping = pallet_timekeeping::Pallet<Self>; // type BonsaiConverter = Converter; // } // impl pallet_escrow::Config for Runtime { // type Event = Event; // type Currency = Balances; // type EscrowConverter = Converter; // } // impl pallet_funding::Config for Runtime { // type Event = Event; // } // impl pallet_orders::Config for Runtime { // type Event = Event; // type Accounting = pallet_accounting::Pallet<Self>; // type Prefunding = pallet_prefunding::Pallet<Self>; // type Currency = Balances; // type Bonsai = pallet_bonsai::Pallet<Self>; // type OrdersConverter = Converter; // } // impl pallet_prefunding::Config for Runtime { // type Event = Event; // type Currency = pallet_balances::Pallet<Self>; // type PrefundingConverter = Converter; // type Accounting = pallet_accounting::Pallet<Self>; // type Escrowable = pallet_escrow::Pallet<Self>; // } // impl pallet_teams::Config for Runtime { // type Event = Event; // } // impl pallet_timekeeping::Config for Runtime { // type Event = Event; // type Projects = Teams; // } // impl pallet_transfer::Config for Runtime { // type Event = Event; // type Currency = pallet_balances::Pallet<Self>; // type Bonsai = pallet_bonsai::Pallet<Self>; // }
28.5
59
0.660636
f5ff3860afe344ee7f4491771bb71394fb60d886
7,623
//! Tidy check to enforce rules about platform-specific code in std. //! //! This is intended to maintain existing standards of code //! organization in hopes that the standard library will continue to //! be refactored to isolate platform-specific bits, making porting //! easier; where "standard library" roughly means "all the //! dependencies of the std and test crates". //! //! This generally means placing restrictions on where `cfg(unix)`, //! `cfg(windows)`, `cfg(target_os)` and `cfg(target_env)` may appear, //! the basic objective being to isolate platform-specific code to the //! platform-specific `std::sys` modules, and to the allocation, //! unwinding, and libc crates. //! //! Following are the basic rules, though there are currently //! exceptions: //! //! - core may not have platform-specific code. //! - libpanic_abort may have platform-specific code. //! - libpanic_unwind may have platform-specific code. //! - libunwind may have platform-specific code. //! - other crates in the std facade may not. //! - std may have platform-specific code in the following places: //! - `sys/` //! - `os/` //! //! `std/sys_common` should _not_ contain platform-specific code. //! Finally, because std contains tests with platform-specific //! `ignore` attributes, once the parser encounters `mod tests`, //! platform-specific cfgs are allowed. Not sure yet how to deal with //! this in the long term. use std::iter::Iterator; use std::path::Path; // Paths that may contain platform-specific code. const EXCEPTION_PATHS: &[&str] = &[ "library/panic_abort", "library/panic_unwind", "library/unwind", "library/rtstartup", // Not sure what to do about this. magic stuff for mingw "library/term", // Not sure how to make this crate portable, but test crate needs it. "library/test", // Probably should defer to unstable `std::sys` APIs. // The `VaList` implementation must have platform specific code. // The Windows implementation of a `va_list` is always a character // pointer regardless of the target architecture. As a result, // we must use `#[cfg(windows)]` to conditionally compile the // correct `VaList` structure for windows. "library/core/src/ffi/mod.rs", "library/std/src/sys/", // Platform-specific code for std lives here. "library/std/src/os", // Platform-specific public interfaces // Temporary `std` exceptions // FIXME: platform-specific code should be moved to `sys` "library/std/src/io/copy.rs", "library/std/src/io/stdio.rs", "library/std/src/f32.rs", "library/std/src/f64.rs", "library/std/src/path.rs", "library/std/src/sys_common", // Should only contain abstractions over platforms "library/std/src/net/test.rs", // Utility helpers for tests "library/std/src/panic.rs", // fuchsia-specific panic backtrace handling ]; pub fn check(path: &Path, bad: &mut bool) { // Sanity check that the complex parsing here works. let mut saw_target_arch = false; let mut saw_cfg_bang = false; super::walk(path, &mut super::filter_dirs, &mut |entry, contents| { let file = entry.path(); let filestr = file.to_string_lossy().replace("\\", "/"); if !filestr.ends_with(".rs") { return; } let is_exception_path = EXCEPTION_PATHS.iter().any(|s| filestr.contains(&**s)); if is_exception_path { return; } // exclude tests and benchmarks as some platforms do not support all tests if filestr.contains("tests") || filestr.contains("benches") { return; } check_cfgs(contents, &file, bad, &mut saw_target_arch, &mut saw_cfg_bang); }); assert!(saw_target_arch); assert!(saw_cfg_bang); } fn check_cfgs( contents: &str, file: &Path, bad: &mut bool, saw_target_arch: &mut bool, saw_cfg_bang: &mut bool, ) { // Pull out all `cfg(...)` and `cfg!(...)` strings. let cfgs = parse_cfgs(contents); let mut line_numbers: Option<Vec<usize>> = None; let mut err = |idx: usize, cfg: &str| { if line_numbers.is_none() { line_numbers = Some(contents.match_indices('\n').map(|(i, _)| i).collect()); } let line_numbers = line_numbers.as_ref().expect(""); let line = match line_numbers.binary_search(&idx) { Ok(_) => unreachable!(), Err(i) => i + 1, }; tidy_error!(bad, "{}:{}: platform-specific cfg: {}", file.display(), line, cfg); }; for (idx, cfg) in cfgs { // Sanity check that the parsing here works. if !*saw_target_arch && cfg.contains("target_arch") { *saw_target_arch = true } if !*saw_cfg_bang && cfg.contains("cfg!") { *saw_cfg_bang = true } let contains_platform_specific_cfg = cfg.contains("target_os") || cfg.contains("target_env") || cfg.contains("target_abi") || cfg.contains("target_vendor") || cfg.contains("unix") || cfg.contains("windows"); if !contains_platform_specific_cfg { continue; } let preceeded_by_doc_comment = { let pre_contents = &contents[..idx]; let pre_newline = pre_contents.rfind('\n'); let pre_doc_comment = pre_contents.rfind("///"); match (pre_newline, pre_doc_comment) { (Some(n), Some(c)) => n < c, (None, Some(_)) => true, (_, None) => false, } }; if preceeded_by_doc_comment { continue; } // exclude tests as some platforms do not support all tests if cfg.contains("test") { continue; } err(idx, cfg); } } fn parse_cfgs(contents: &str) -> Vec<(usize, &str)> { let candidate_cfgs = contents.match_indices("cfg"); let candidate_cfg_idxs = candidate_cfgs.map(|(i, _)| i); // This is puling out the indexes of all "cfg" strings // that appear to be tokens followed by a parenthesis. let cfgs = candidate_cfg_idxs.filter(|i| { let pre_idx = i.saturating_sub(1); let succeeds_non_ident = !contents .as_bytes() .get(pre_idx) .cloned() .map(char::from) .map(char::is_alphanumeric) .unwrap_or(false); let contents_after = &contents[*i..]; let first_paren = contents_after.find('('); let paren_idx = first_paren.map(|ip| i + ip); let preceeds_whitespace_and_paren = paren_idx .map(|ip| { let maybe_space = &contents[*i + "cfg".len()..ip]; maybe_space.chars().all(|c| char::is_whitespace(c) || c == '!') }) .unwrap_or(false); succeeds_non_ident && preceeds_whitespace_and_paren }); cfgs.flat_map(|i| { let mut depth = 0; let contents_from = &contents[i..]; for (j, byte) in contents_from.bytes().enumerate() { match byte { b'(' => { depth += 1; } b')' => { depth -= 1; if depth == 0 { return Some((i, &contents_from[..=j])); } } _ => {} } } // if the parentheses are unbalanced just ignore this cfg -- it'll be caught when attempting // to run the compiler, and there's no real reason to lint it separately here None }) .collect() }
36.3
100
0.592418
e6436d27d8b1974bdf8d72f519b72af9e4506bc2
5,467
extern crate dft_types; extern crate dft_utils; use crate::state::TOKEN; use candid::encode_args; use dft_standard::ic_management::*; use dft_types::constants::{CYCLES_PER_AUTO_SCALING, MAX_HEAP_MEMORY_SIZE, MAX_TXS_CACHE_IN_DFT}; use dft_types::*; use ic_cdk::{ api, export::{candid::Nat, Principal}, }; pub(crate) async fn exec_auto_scaling_strategy() -> ActorResult<()> { let inner_txs: Vec<TxRecord> = TOKEN.with(|token| { let token = token.borrow(); token.get_inner_txs() }); let first_tx_index_inner: Nat = TOKEN.with(|token| { let token = token.borrow(); token.get_tx_index(&inner_txs[0]) }); // When create auto-scaling storage ? // DFT's txs count > 2000 // It's means when creating a test DFT, when the number of transactions is less than 2000, no storage will be created to save cycles if inner_txs.len() >= MAX_TXS_CACHE_IN_DFT * 2 { let storage_canister_id = get_or_create_available_storage_id(&first_tx_index_inner).await?; let should_save_txs = inner_txs[0..MAX_TXS_CACHE_IN_DFT].to_vec(); //save the txs to auto-scaling storage match api::call::call(storage_canister_id, "batchAppend", (should_save_txs,)).await { Ok((res,)) => { if res { TOKEN.with(|token| { let mut token = token.borrow_mut(); (0..MAX_TXS_CACHE_IN_DFT).for_each(|_| token.remove_inner_txs(0)); }); } } Err((_, emsg)) => { api::print(format!( "batchAppend: save to auto-scaling storage failed,{} ", emsg )); } } } Ok(()) } async fn get_or_create_available_storage_id(tx_index: &Nat) -> ActorResult<Principal> { let mut max_key = Nat::from(0); let mut last_storage_id = Principal::anonymous(); TOKEN.with(|token| { let token = token.borrow(); for (k, v) in token.get_storage_canister_ids() { if k >= max_key && last_storage_id != v { max_key = k.clone(); last_storage_id = v.clone(); } } }); let mut is_necessary_create_new_storage_canister = last_storage_id == Principal::anonymous(); // check storage remain size if !is_necessary_create_new_storage_canister { let req = CanisterIdRecord { canister_id: last_storage_id, }; let status = get_canister_status(req).await; match status { Ok(res) => { ic_cdk::print(format!("memory_size is {}", res.memory_size)); let min_storage_size_for_cache_txs = Nat::from(MAX_TXS_CACHE_IN_DFT * std::mem::size_of::<TxRecord>()); if (Nat::from(MAX_HEAP_MEMORY_SIZE) - res.memory_size) .lt(&min_storage_size_for_cache_txs) { is_necessary_create_new_storage_canister = true; } else { return Ok(last_storage_id); } } Err(_) => { return Err(DFTError::StorageScalingFailed.into()); } }; } if is_necessary_create_new_storage_canister { const STORAGE_WASM: &[u8] = std::include_bytes!( "../../target/wasm32-unknown-unknown/release/dft_tx_storage_opt.wasm" ); let dft_id = api::id(); let create_args = CreateCanisterArgs { cycles: CYCLES_PER_AUTO_SCALING, settings: CanisterSettings { controllers: Some(vec![dft_id.clone()]), compute_allocation: None, memory_allocation: None, freezing_threshold: None, }, }; api::print("creating token storage..."); let create_result = create_canister(create_args).await; match create_result { Ok(cdr) => { api::print(format!( "token new storage canister id : {} ,start index is {}", cdr.canister_id.clone().to_string(), tx_index.clone() )); let install_args = encode_args((dft_id.clone(), tx_index.clone())) .expect("Failed to encode arguments."); match install_canister(&cdr.canister_id, STORAGE_WASM.to_vec(), install_args).await { Ok(_) => { TOKEN.with(|token| { let mut token = token.borrow_mut(); token.add_storage_canister_ids(tx_index.clone(), cdr.canister_id) }); return Ok(cdr.canister_id); } Err(emsg) => { api::print(format!( "install auto-scaling storage canister failed. details:{}", emsg )); return Err(DFTError::StorageScalingFailed.into()); } } } Err(emsg) => { api::print(format!("create new storage canister failed {}", emsg).as_str()); return Err(DFTError::StorageScalingFailed.into()); } }; } else { return Ok(last_storage_id); } }
37.190476
136
0.526797
fc1343cd61aeed8ce8848264af0a00a033670f91
1,033
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode}; use ::RegType::*; use ::instruction_def::*; use ::Operand::*; use ::Reg::*; use ::RegScale::*; fn fucomp_1() { run_test(&Instruction { mnemonic: Mnemonic::FUCOMP, operand1: Some(Direct(ST1)), operand2: None, operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[221, 233], OperandSize::Word) } fn fucomp_2() { run_test(&Instruction { mnemonic: Mnemonic::FUCOMP, operand1: Some(Direct(ST3)), operand2: None, operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[221, 235], OperandSize::Dword) } fn fucomp_3() { run_test(&Instruction { mnemonic: Mnemonic::FUCOMP, operand1: Some(Direct(ST7)), operand2: None, operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[221, 239], OperandSize::Qword) }
51.65
260
0.704743
f74a4378b07ab93b7513a42f8f627c1777c03788
3,215
// Copyright 2020 Nym Technologies SA // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::FRAG_ID_LEN; use nymsphinx_types::header::HEADER_SIZE; use nymsphinx_types::PAYLOAD_OVERHEAD_SIZE; use std::convert::TryFrom; // it's up to the smart people to figure those values out : ) const REGULAR_PACKET_SIZE: usize = HEADER_SIZE + PAYLOAD_OVERHEAD_SIZE + 2 * 1024; // TODO: even though we have 16B IV, is having just 5B (FRAG_ID_LEN) of the ID possibly insecure? // TODO: I'm not entirely sure if we can easily extract `<AckEncryptionAlgorithm as NewStreamCipher>::NonceSize` // into a const usize before relevant stuff is stabilised in rust... const ACK_IV_SIZE: usize = 16; const ACK_PACKET_SIZE: usize = HEADER_SIZE + PAYLOAD_OVERHEAD_SIZE + ACK_IV_SIZE + FRAG_ID_LEN; const EXTENDED_PACKET_SIZE: usize = HEADER_SIZE + PAYLOAD_OVERHEAD_SIZE + 32 * 1024; #[derive(Debug)] pub struct InvalidPacketSize; #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PacketSize { RegularPacket = 1, // for example instant messaging use case ACKPacket = 2, // for sending SURB-ACKs ExtendedPacket = 3, // for example for streaming fast and furious in uncompressed 10bit 4K HDR quality } impl TryFrom<u8> for PacketSize { type Error = InvalidPacketSize; fn try_from(value: u8) -> std::result::Result<Self, Self::Error> { match value { _ if value == (PacketSize::RegularPacket as u8) => Ok(Self::RegularPacket), _ if value == (PacketSize::ACKPacket as u8) => Ok(Self::ACKPacket), _ if value == (PacketSize::ExtendedPacket as u8) => Ok(Self::ExtendedPacket), _ => Err(InvalidPacketSize), } } } impl PacketSize { pub fn size(self) -> usize { match self { PacketSize::RegularPacket => REGULAR_PACKET_SIZE, PacketSize::ACKPacket => ACK_PACKET_SIZE, PacketSize::ExtendedPacket => EXTENDED_PACKET_SIZE, } } pub fn plaintext_size(self) -> usize { self.size() - HEADER_SIZE - PAYLOAD_OVERHEAD_SIZE } pub fn payload_size(self) -> usize { self.size() - HEADER_SIZE } pub fn get_type(size: usize) -> std::result::Result<Self, InvalidPacketSize> { if PacketSize::RegularPacket.size() == size { Ok(PacketSize::RegularPacket) } else if PacketSize::ACKPacket.size() == size { Ok(PacketSize::ACKPacket) } else if PacketSize::ExtendedPacket.size() == size { Ok(PacketSize::ExtendedPacket) } else { Err(InvalidPacketSize) } } } impl Default for PacketSize { fn default() -> Self { PacketSize::RegularPacket } }
34.945652
112
0.676205
8922005d2f82c6c154efb001f187e65519618cf0
135
fn foo() -> &'static u32 { let x = 0; &x //~^ ERROR cannot return reference to local variable `x` [E0515] } fn main() { }
16.875
68
0.540741
6a9709c5b6ad596c42cbbe2e339cd274da1962ae
1,162
extern crate toml; use std::fs::File; use std::io::{self, Read}; #[derive(Deserialize, Debug)] pub struct Config { pub min_reviewers_approved: u8, pub pr_max_age: u8, pub notification_timeout: u8, pub sleep_interval: u8, pub bitbucket: Bitbucket, pub slack: Slack, } #[derive(Deserialize, Debug)] pub struct Bitbucket { pub uri: String, pub username: String, pub password: String, } #[derive(Deserialize, Debug)] pub struct Slack { pub uri: String, pub username: String, pub channel: String, } #[derive(Debug)] pub enum ConfigError { IoError(io::Error), ParseError(toml::de::Error), } impl From<io::Error> for ConfigError { fn from(error: io::Error) -> Self { ConfigError::IoError(error) } } impl From<toml::de::Error> for ConfigError { fn from(error: toml::de::Error) -> Self { ConfigError::ParseError(error) } } pub fn parse(filename: &str) -> Result<Config, ConfigError> { let mut fd = File::open(filename)?; let mut contents = String::new(); fd.read_to_string(&mut contents)?; let config: Config = toml::from_str(&contents)?; Ok(config) }
20.034483
61
0.646299
672756866da2a1b22ac217483443a59538ed901f
947
use chrono::{Duration, Utc}; use jsonwebtoken::{DecodingKey, EncodingKey, Header, Validation}; use uuid::Uuid; use crate::{config::env::JWT_SECRET, error::Result}; #[derive(Debug, Deserialize, Serialize)] pub struct Claims { pub sub: Uuid, pub exp: i64, pub iat: i64, } impl Claims { pub fn new(id: Uuid) -> Self { let iat = Utc::now(); let exp = iat + Duration::hours(24); Self { sub: id, iat: iat.timestamp(), exp: exp.timestamp(), } } } pub fn sign(id: Uuid) -> Result<String> { Ok(jsonwebtoken::encode( &Header::default(), &Claims::new(id), &EncodingKey::from_secret(JWT_SECRET.as_bytes()), )?) } pub fn verify(token: &str) -> Result<Claims> { Ok(jsonwebtoken::decode( token, &DecodingKey::from_secret(JWT_SECRET.as_bytes()), &Validation::default(), ) .map(|data| data.claims)?) }
22.023256
65
0.570222
fbe1263f3841911d390384d26317d6bd49748745
18,882
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use std::clone::Clone; use std::iter::Iterator; use std::ops::Range; use std::vec::Vec; use unic_char_range::{chars, CharIter, CharRange}; /// A trait for objects that represent one or more disjoint /// [CharRanges](unic_char_range::CharRange). pub trait MultiCharRange { /// Iterate over the discrete [CharRange]s in the collection in ascending order. fn iter_ranges<'a>(&'a self) -> Box<dyn Iterator<Item = CharRange> + 'a>; /// The number of ranges in the collection. fn range_count(&self) -> usize; } /// A collection of `char`s (i.e. Unicode code points), used for storing large continuous ranges /// efficiently. /// /// Lookups and insertions are O(log <var>R</var>), where <var>R</var> is the number of discrete /// ranges in the collection. /// /// The easiest way to create instances is using the /// [char_collect!](::char_collection::char_collect) macro. /// /// ``` /// use char_collection::CharCollection; /// /// let mut collection: CharCollection = char_collect!('a'..='d', 'x'..='z'); /// char_collection += 'e'; /// char_collection += chars!('p'..='t'); /// assert_eq!( /// collection.iter_ranges().collect(), /// vec![chars!('a'..='e'), chars!('p'..='t'), chars!('x'..='z')]); /// /// assert!(collection.contains(&'c')); /// assert!(collection.contains_range(chars!('q'..='s'))); /// assert!(!collection.contains(&'9')); /// /// collection -= chars!('t'..='y'); /// assert_eq!( /// collection.iter_ranges().collect(), /// vec![chars!('a'..='e', chars!('p'..'s'), chars!('z'..='z'))]); /// ``` /// /// TODO(kpozin): Implement IntoIter. #[derive(Clone, Debug, Eq, PartialEq)] pub struct CharCollection { ranges: Vec<CharRange>, } impl CharCollection { /// Create a new, empty `CharCollection`. pub fn new() -> CharCollection { CharCollection { ranges: Vec::new() } } /// Iterate over all the `char`s in the collection. pub fn iter(&self) -> impl Iterator<Item = char> + '_ { self.ranges.iter().flat_map(CharRange::iter) } /// Test whether the collection contains a specific `char`. /// /// The time complexity is O(log <var>R</var>), where <var>R</var> is the number of ranges in /// the collection. pub fn contains(&self, ch: &char) -> bool { self.find_containing_range(ch).is_ok() } /// Test whether the collection contains an entire range of characters. /// /// The time complexity is O(log <var>R</var>), where <var>R</var> is the number of ranges in /// the collection. pub fn contains_range(&self, range: &CharRange) -> bool { if range.is_empty() { return false; } let lower_existing_range = self.find_containing_range(&range.low); let upper_existing_range = self.find_containing_range(&range.high); // Fully enclosed in existing range. return lower_existing_range == upper_existing_range && lower_existing_range.is_ok(); } /// Insert a `char` or other collection of chars into this collection. /// /// Returns `&mut self` for easy chaining. /// /// The time complexity is O(<var>T</var> log(<var>R</var> + <var>T</var>)), where <var>R</var> /// is the number of ranges in this collection and <var>T</var> is the number of ranges in /// `to_add`. pub fn insert<V: MultiCharRange>(&mut self, to_add: &V) -> &mut Self { to_add.iter_ranges().for_each(|range| self.insert_char_range(&range)); self } /// Remove a `char` or other collection of chars from this collection. /// /// Returns `&mut self` for easy chaining. /// /// The time complexity is O(<var>T</var> log(<var>R</var> + <var>T</var>)), where <var>R</var> /// is the number of ranges in this collection and <var>T</var> is the number of ranges in /// `to_remove`. pub fn remove<V: MultiCharRange>(&mut self, to_remove: &V) -> &mut Self { to_remove.iter_ranges().for_each(|range| self.remove_char_range(&range)); self } /// Remove all entries from this collection. /// /// Returns `&mut self` for easy chaining. pub fn clear(&mut self) -> &mut Self { self.ranges.clear(); self } /// Return the set union of this collection and another one. /// /// The time complexity is O(min(<var>R</var>, <var>T</var>) log(<var>R</var> + <var>T</var>)), /// where <var>R</var> is the number of ranges in this collection and <var>T</var> is the number /// of ranges in `rhs`. pub fn union<V: MultiCharRange>(&self, rhs: &V) -> CharCollection { let mut result: CharCollection; if self.range_count() > rhs.range_count() { result = self.clone(); result.insert(rhs); } else { result = rhs.into(); result.insert(self); } result } /// Return the set intersection of this collection and another one. /// /// The time complexity is O(min(<var>R</var>, <var>T</var>) log(<var>R</var> + <var>T</var>)), /// where <var>R</var> is the number of ranges in this collection and <var>T</var> is the number /// of ranges in `rhs`. pub fn intersection<V: MultiCharRange>(&self, rhs: &V) -> CharCollection { let mut result: CharCollection; if self.range_count() > rhs.range_count() { result = self.clone(); let rhs: CharCollection = rhs.into(); result.remove(&rhs.complement()); } else { result = rhs.into(); result.remove(&self.complement()); } result } /// Return the (non-symmetric) set difference of this collection and another one. /// /// The time complexity is O(<var>T</var> log(<var>R</var> + <var>T</var>)), where <var>R</var> /// is the number of ranges in this collection and <var>T</var> is the number of ranges in /// `rhs`. pub fn difference<V: MultiCharRange>(&self, rhs: &V) -> CharCollection { let mut result: CharCollection = self.clone(); result.remove(rhs); result } /// Return the set complement of this collection (over the universe of `char`s). /// /// The time complexity is O(<var>R</var>), where <var>R</var> is the number of ranges in this /// collection. pub fn complement(&self) -> CharCollection { if self.ranges.is_empty() { return CharCollection::from(&CharRange::all()); } let mut result_ranges: Vec<CharRange> = Vec::new(); if self.ranges[0].low != '\u{0}' { result_ranges.push(CharRange::open_right('\u{0}', self.ranges[0].low)); } let mut prev_high = self.ranges[0].high; for range in &self.ranges[1..] { result_ranges.push(CharRange::open(prev_high, range.low)); prev_high = range.high; } if prev_high != std::char::MAX { result_ranges.push(CharRange::open_left(prev_high, std::char::MAX)); } CharCollection { ranges: result_ranges } } /// Insert a single `CharRange`. /// /// Depending on how the new range relates to existing ranges in /// the collection, it might be subsumed by an existing range, modify the endpoints of an /// existing range, or replace one or more existing ranges. fn insert_char_range(&mut self, new_range: &CharRange) { if new_range.is_empty() { return; } let lower_existing_range = self.find_containing_range(&new_range.low); let upper_existing_range = self.find_containing_range(&new_range.high); // Fully enclosed in existing range. if lower_existing_range == upper_existing_range && lower_existing_range.is_ok() { return; } let new_low: char; let new_high: char; let remove_from_idx: usize; let remove_to_idx: usize; match lower_existing_range { Ok((idx, lower_existing_range)) => { new_low = lower_existing_range.low; remove_from_idx = idx; } Err(idx) => { new_low = new_range.low; remove_from_idx = idx; } } match upper_existing_range { Ok((idx, higher_existing_range)) => { new_high = higher_existing_range.high; remove_to_idx = idx + 1; } Err(idx) => { new_high = new_range.high; remove_to_idx = idx; } } self.replace_ranges(chars!(new_low..=new_high), remove_from_idx..remove_to_idx); } /// Remove a single `CharRange`. /// /// Depending on how the removed range relates to existing ranges in the collection, it might /// remove or modify the endpoints of existing ranges. fn remove_char_range(&mut self, range_to_remove: &CharRange) { if range_to_remove.is_empty() { return; } let lower_existing_range = self.find_containing_range(&range_to_remove.low); let upper_existing_range = self.find_containing_range(&range_to_remove.high); let mut replacement_ranges: Vec<CharRange> = Vec::new(); let remove_from_idx: usize; let remove_to_idx: usize; match lower_existing_range { Ok((idx, lower_existing_range)) => { if lower_existing_range.low < range_to_remove.low { replacement_ranges .push(CharRange::open_right(lower_existing_range.low, range_to_remove.low)); } remove_from_idx = idx; } Err(idx) => remove_from_idx = idx, } match upper_existing_range { Ok((idx, higher_existing_range)) => { if range_to_remove.high < higher_existing_range.high { replacement_ranges.push(CharRange::open_left( range_to_remove.high, higher_existing_range.high, )); } remove_to_idx = idx + 1; } Err(idx) => { remove_to_idx = idx; } } self.ranges.splice(remove_from_idx..remove_to_idx, replacement_ranges); } /// Delete all the existing `CharRange`s that fall within `indices_to_replace` in the vector, /// and insert `char_range_to_insert` in their place. If the newly formed range is adjacent to /// a kept range on its left or right, coalesce them. fn replace_ranges( &mut self, mut char_range_to_insert: CharRange, mut indices_to_replace: Range<usize>, ) { // If the newly formed range is adjacent to the range on its left, coalesce the two. if indices_to_replace.start > 0 { let prev_char_range = self.ranges[indices_to_replace.start - 1]; if are_chars_adjacent(&prev_char_range.high, &char_range_to_insert.low) { char_range_to_insert.low = prev_char_range.low; indices_to_replace.start -= 1; } } // If the newly formed range is adjacent to the range on its right, coalesce the two. if indices_to_replace.end < self.ranges.len() { let next_char_range = self.ranges[indices_to_replace.end]; if are_chars_adjacent(&char_range_to_insert.high, &next_char_range.low) { char_range_to_insert.high = next_char_range.high; indices_to_replace.end += 1; } } self.ranges.splice(indices_to_replace, vec![char_range_to_insert]); } fn find_containing_range(&self, query: &char) -> Result<(usize, CharRange), usize> { let result = self.ranges.binary_search_by(|range| range.cmp_char(query.clone())); match result { Ok(index) => Ok((index, self.ranges[index])), Err(index) => Err(index), } } } impl MultiCharRange for CharCollection { fn iter_ranges<'a>(&'a self) -> Box<dyn Iterator<Item = CharRange> + 'a> { Box::new(self.ranges.iter().map(|range| range.clone())) } fn range_count(&self) -> usize { self.ranges.len() } } fn are_chars_adjacent(left: &char, right: &char) -> bool { let mut iter: CharIter = CharRange::open_right(left.clone(), right.clone()).iter(); match iter.next_back() { None => false, Some(next_right) => left == &next_right, } } #[cfg(test)] mod tests { use super::are_chars_adjacent; use std::char; use unic_char_range::{chars, CharRange}; #[test] fn test_find_containing_range() { let collection = char_collect!({ ('a'..='d') + ('g'..='j') + ('l'..='o') + 'z' }); assert_eq!(collection.find_containing_range(&'0'), Err(0)); assert_eq!(collection.find_containing_range(&'c'), Ok((0, chars!('a'..='d')))); assert_eq!(collection.find_containing_range(&'e'), Err(1)); } #[test] fn test_insert_initial() { let collection = char_collect!('a'..='d'); assert_eq!(collection.ranges, vec![chars!('a'..='d')]) } #[test] fn test_insert_exact_match() { let mut collection = char_collect!('a'..='d', 'g'..='l'); collection += 'a'..='d'; assert_eq!(collection.ranges, vec![chars!('a'..='d'), chars!('g'..='l')]); } #[test] fn test_insert_non_overlapping_sorted() { let collection = char_collect!('a'..='d', 'g'..='j', 'l'..='o'); assert_eq!( collection.ranges, vec![chars!('a'..='d'), chars!('g'..='j'), chars!('l'..='o')] ); } #[test] fn test_insert_non_overlapping_unsorted() { let collection = char_collect!('l'..='o', 'a'..='d', 'l'..='o', 'a'..='d', 'g'..='j'); assert_eq!( collection.ranges, vec![chars!('a'..='d'), chars!('g'..='j'), chars!('l'..='o')] ); } #[test] fn test_insert_overlapping_all_existent() { let mut collection = char_collect!('l'..='o', 'a'..='d'); collection += 'a'..='o'; assert_eq!(collection.ranges, vec![chars!('a'..='o')]); } #[test] fn test_insert_overlapping_some_existent() { let mut collection = char_collect!('c'..='e', 'j'..='m', 'p'..='s'); collection += 'i'..='n'; assert_eq!( collection.ranges, vec![chars!('c'..='e'), chars!('i'..='n'), chars!('p'..='s')] ); } #[test] fn test_insert_overlapping_with_intersections() { let mut collection = char_collect!('c'..='e', 'j'..='m', 'p'..='s'); collection += 'd'..='k'; assert_eq!(collection.ranges, vec![chars!('c'..='m'), chars!('p'..='s')]); } #[test] fn test_insert_coalesce_adjacent_ranges() { let mut collection = char_collect!('a'..='c', 'j'..='m'); collection += 'd'..='i'; assert_eq!(collection.ranges, vec![chars!('a'..='m')]); } #[test] fn test_remove_exact_range() { let mut collection = char_collect!('c'..='e', 'j'..='m', 'p'..='s'); collection -= 'j'..='m'; assert_eq!(collection.ranges, vec![chars!('c'..='e'), chars!['p'..='s']]); } #[test] fn test_remove_overlapping_all_existent() { let mut collection = char_collect!('c'..='e', 'j'..='m', 'p'..='s'); collection -= 'c'..='s'; assert_eq!(collection.ranges, vec![]); } #[test] fn test_remove_overlapping_all_existent_superset() { let mut collection = char_collect!('c'..='e', 'j'..='m', 'p'..='s'); collection -= 'a'..='z'; assert_eq!(collection.ranges, vec![]); } #[test] fn test_remove_one_subrange() { let mut collection = char_collect!('c'..='e', 'j'..='m', 'p'..='s'); collection -= 'k'..='l'; assert_eq!( collection.ranges, vec![chars!('c'..='e'), chars!('j'..='j'), chars!('m'..='m'), chars!('p'..='s')] ); } #[test] fn test_remove_intersection() { let mut collection = char_collect!('c'..='e', 'j'..='m', 'p'..='s'); collection -= 'd'..='q'; assert_eq!(collection.ranges, vec![chars!('c'..='c'), chars!('r'..='s')]); } #[test] fn test_complement_simple() { let collection = char_collect!(0x10..=0x50, 0x70..=0x70, 0x99..=0x640); assert_eq!( collection.complement(), char_collect!(0x00..=0x0F, 0x51..=0x6F, 0x71..=0x98, 0x641..=(char::MAX as u32)) ); } #[test] fn test_complement_all() { let collection = char_collect!(CharRange::all()); assert_eq!(collection.complement(), char_collect!()); } #[test] fn test_complement_none() { let collection = char_collect!(); assert_eq!(collection.complement(), char_collect!(CharRange::all())); } #[test] fn test_complement_includes_min_and_max() { let collection = char_collect!(0x0..=0x10, 0x40..=0x50, 0xCCCC..=(char::MAX as u32)); assert_eq!(collection.complement(), char_collect!(0x11..=0x3F, 0x51..=0xCCCB)); } #[test] fn test_union() { let collection_a = char_collect!('a'..='g', 'm'..='z', 'B'..='R'); let collection_b = char_collect!('e'..='q', 'W'..='Y'); let expected = char_collect!('a'..='z', 'B'..='R', 'W'..='Y'); assert_eq!(collection_a.union(&collection_b), expected); assert_eq!(collection_b.union(&collection_a), expected); } #[test] fn test_intersection() { let collection_a = char_collect!('a'..='g', 'm'..='z'); let collection_b = char_collect!('e'..='q'); let expected = char_collect!('e'..='g', 'm'..='q'); assert_eq!(collection_a.intersection(&collection_b), expected); assert_eq!(collection_b.intersection(&collection_a), expected); } #[test] fn test_macro_expressions() { use unicode_blocks::UnicodeBlockId::Arabic; let collection = char_collect!({ ('c'..='e') + ('f'..='h') - ('a'..='d') + Arabic + (0x5..=0x42) }); assert_eq!(collection, char_collect!(0x5..=0x42, 'e'..='h', Arabic)); } #[test] fn test_iter() { let mut v: Vec<char> = Vec::new(); let collection = char_collect!('a'..='c', 'j'..='l', 'x'..='z'); collection.iter().for_each(|ch| v.push(ch)); assert_eq!(v, vec!['a', 'b', 'c', 'j', 'k', 'l', 'x', 'y', 'z']); } #[test] fn test_are_chars_adjacent() { assert!(are_chars_adjacent(&'a', &'b')); assert!(!are_chars_adjacent(&'b', &'a')); assert!(!are_chars_adjacent(&'a', &'c')); } }
34.773481
100
0.568266
e23fc6a84a96760b7a6c19ebf3756619d4bc12d4
39,755
//! A wrapper around the procedural macro API of the compiler's [`proc_macro`] //! crate. This library serves two purposes: //! //! [`proc_macro`]: https://doc.rust-lang.org/proc_macro/ //! //! - **Bring proc-macro-like functionality to other contexts like build.rs and //! main.rs.** Types from `proc_macro` are entirely specific to procedural //! macros and cannot ever exist in code outside of a procedural macro. //! Meanwhile `proc_macro2` types may exist anywhere including non-macro code. //! By developing foundational libraries like [syn] and [quote] against //! `proc_macro2` rather than `proc_macro`, the procedural macro ecosystem //! becomes easily applicable to many other use cases and we avoid //! reimplementing non-macro equivalents of those libraries. //! //! - **Make procedural macros unit testable.** As a consequence of being //! specific to procedural macros, nothing that uses `proc_macro` can be //! executed from a unit test. In order for helper libraries or components of //! a macro to be testable in isolation, they must be implemented using //! `proc_macro2`. //! //! [syn]: https://github.com/dtolnay/syn //! [quote]: https://github.com/dtolnay/quote //! //! # Usage //! //! The skeleton of a typical procedural macro typically looks like this: //! //! ``` //! extern crate proc_macro; //! //! # const IGNORE: &str = stringify! { //! #[proc_macro_derive(MyDerive)] //! # }; //! # #[cfg(wrap_proc_macro)] //! pub fn my_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream { //! let input = proc_macro2::TokenStream::from(input); //! //! let output: proc_macro2::TokenStream = { //! /* transform input */ //! # input //! }; //! //! proc_macro::TokenStream::from(output) //! } //! ``` //! //! If parsing with [Syn], you'll use [`parse_macro_input!`] instead to //! propagate parse errors correctly back to the compiler when parsing fails. //! //! [`parse_macro_input!`]: https://docs.rs/syn/1.0/syn/macro.parse_macro_input.html //! //! # Unstable features //! //! The default feature set of proc-macro2 tracks the most recent stable //! compiler API. Functionality in `proc_macro` that is not yet stable is not //! exposed by proc-macro2 by default. //! //! To opt into the additional APIs available in the most recent nightly //! compiler, the `procmacro2_semver_exempt` config flag must be passed to //! rustc. We will polyfill those nightly-only APIs back to Rust 1.31.0. As //! these are unstable APIs that track the nightly compiler, minor versions of //! proc-macro2 may make breaking changes to them at any time. //! //! ```sh //! RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build //! ``` //! //! Note that this must not only be done for your crate, but for any crate that //! depends on your crate. This infectious nature is intentional, as it serves //! as a reminder that you are outside of the normal semver guarantees. //! //! Semver exempt methods are marked as such in the proc-macro2 documentation. //! //! # Thread-Safety //! //! Most types in this crate are `!Sync` because the underlying compiler //! types make use of thread-local memory, meaning they cannot be accessed from //! a different thread. // Proc-macro2 types in rustdoc of other crates get linked to here. #![doc(html_root_url = "https://docs.rs/proc-macro2/1.0.19")] #![cfg_attr(any(proc_macro_span, super_unstable), feature(proc_macro_span))] #![cfg_attr(super_unstable, feature(proc_macro_raw_ident, proc_macro_def_site))] #![allow(clippy::needless_doctest_main)] #[cfg(use_proc_macro)] extern crate proc_macro; use std::cmp::Ordering; use std::fmt::{self, Debug, Display}; use std::hash::{Hash, Hasher}; use std::iter::FromIterator; use std::marker; use std::ops::RangeBounds; #[cfg(procmacro2_semver_exempt)] use std::path::PathBuf; use std::rc::Rc; use std::str::FromStr; mod parse; #[cfg(wrap_proc_macro)] mod detection; // Public for proc_macro2::fallback::force() and unforce(), but those are quite // a niche use case so we omit it from rustdoc. #[doc(hidden)] pub mod fallback; #[cfg(not(wrap_proc_macro))] use crate::fallback as imp; #[path = "wrapper.rs"] #[cfg(wrap_proc_macro)] mod imp; /// An abstract stream of tokens, or more concretely a sequence of token trees. /// /// This type provides interfaces for iterating over token trees and for /// collecting token trees into one stream. /// /// Token stream is both the input and output of `#[proc_macro]`, /// `#[proc_macro_attribute]` and `#[proc_macro_derive]` definitions. #[derive(Clone)] pub struct TokenStream { inner: imp::TokenStream, _marker: marker::PhantomData<Rc<()>>, } /// Error returned from `TokenStream::from_str`. pub struct LexError { inner: imp::LexError, _marker: marker::PhantomData<Rc<()>>, } impl TokenStream { fn _new(inner: imp::TokenStream) -> TokenStream { TokenStream { inner, _marker: marker::PhantomData, } } fn _new_stable(inner: fallback::TokenStream) -> TokenStream { TokenStream { inner: inner.into(), _marker: marker::PhantomData, } } /// Returns an empty `TokenStream` containing no token trees. pub fn new() -> TokenStream { TokenStream::_new(imp::TokenStream::new()) } /// Checks if this `TokenStream` is empty. pub fn is_empty(&self) -> bool { self.inner.is_empty() } } /// `TokenStream::default()` returns an empty stream, /// i.e. this is equivalent with `TokenStream::new()`. impl Default for TokenStream { fn default() -> Self { TokenStream::new() } } /// Attempts to break the string into tokens and parse those tokens into a token /// stream. /// /// May fail for a number of reasons, for example, if the string contains /// unbalanced delimiters or characters not existing in the language. /// /// NOTE: Some errors may cause panics instead of returning `LexError`. We /// reserve the right to change these errors into `LexError`s later. impl FromStr for TokenStream { type Err = LexError; fn from_str(src: &str) -> Result<TokenStream, LexError> { let e = src.parse().map_err(|e| LexError { inner: e, _marker: marker::PhantomData, })?; Ok(TokenStream::_new(e)) } } #[cfg(use_proc_macro)] impl From<proc_macro::TokenStream> for TokenStream { fn from(inner: proc_macro::TokenStream) -> TokenStream { TokenStream::_new(inner.into()) } } #[cfg(use_proc_macro)] impl From<TokenStream> for proc_macro::TokenStream { fn from(inner: TokenStream) -> proc_macro::TokenStream { inner.inner.into() } } impl From<TokenTree> for TokenStream { fn from(token: TokenTree) -> Self { TokenStream::_new(imp::TokenStream::from(token)) } } impl Extend<TokenTree> for TokenStream { fn extend<I: IntoIterator<Item = TokenTree>>(&mut self, streams: I) { self.inner.extend(streams) } } impl Extend<TokenStream> for TokenStream { fn extend<I: IntoIterator<Item = TokenStream>>(&mut self, streams: I) { self.inner .extend(streams.into_iter().map(|stream| stream.inner)) } } /// Collects a number of token trees into a single stream. impl FromIterator<TokenTree> for TokenStream { fn from_iter<I: IntoIterator<Item = TokenTree>>(streams: I) -> Self { TokenStream::_new(streams.into_iter().collect()) } } impl FromIterator<TokenStream> for TokenStream { fn from_iter<I: IntoIterator<Item = TokenStream>>(streams: I) -> Self { TokenStream::_new(streams.into_iter().map(|i| i.inner).collect()) } } /// Prints the token stream as a string that is supposed to be losslessly /// convertible back into the same token stream (modulo spans), except for /// possibly `TokenTree::Group`s with `Delimiter::None` delimiters and negative /// numeric literals. impl Display for TokenStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Display::fmt(&self.inner, f) } } /// Prints token in a form convenient for debugging. impl Debug for TokenStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Debug::fmt(&self.inner, f) } } impl Debug for LexError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Debug::fmt(&self.inner, f) } } /// The source file of a given `Span`. /// /// This type is semver exempt and not exposed by default. #[cfg(procmacro2_semver_exempt)] #[derive(Clone, PartialEq, Eq)] pub struct SourceFile { inner: imp::SourceFile, _marker: marker::PhantomData<Rc<()>>, } #[cfg(procmacro2_semver_exempt)] impl SourceFile { fn _new(inner: imp::SourceFile) -> Self { SourceFile { inner, _marker: marker::PhantomData, } } /// Get the path to this source file. /// /// ### Note /// /// If the code span associated with this `SourceFile` was generated by an /// external macro, this may not be an actual path on the filesystem. Use /// [`is_real`] to check. /// /// Also note that even if `is_real` returns `true`, if /// `--remap-path-prefix` was passed on the command line, the path as given /// may not actually be valid. /// /// [`is_real`]: #method.is_real pub fn path(&self) -> PathBuf { self.inner.path() } /// Returns `true` if this source file is a real source file, and not /// generated by an external macro's expansion. pub fn is_real(&self) -> bool { self.inner.is_real() } } #[cfg(procmacro2_semver_exempt)] impl Debug for SourceFile { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Debug::fmt(&self.inner, f) } } /// A line-column pair representing the start or end of a `Span`. /// /// This type is semver exempt and not exposed by default. #[cfg(span_locations)] #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct LineColumn { /// The 1-indexed line in the source file on which the span starts or ends /// (inclusive). pub line: usize, /// The 0-indexed column (in UTF-8 characters) in the source file on which /// the span starts or ends (inclusive). pub column: usize, } #[cfg(span_locations)] impl Ord for LineColumn { fn cmp(&self, other: &Self) -> Ordering { self.line .cmp(&other.line) .then(self.column.cmp(&other.column)) } } #[cfg(span_locations)] impl PartialOrd for LineColumn { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } /// A region of source code, along with macro expansion information. #[derive(Copy, Clone)] pub struct Span { inner: imp::Span, _marker: marker::PhantomData<Rc<()>>, } impl Span { fn _new(inner: imp::Span) -> Span { Span { inner, _marker: marker::PhantomData, } } fn _new_stable(inner: fallback::Span) -> Span { Span { inner: inner.into(), _marker: marker::PhantomData, } } /// The span of the invocation of the current procedural macro. /// /// Identifiers created with this span will be resolved as if they were /// written directly at the macro call location (call-site hygiene) and /// other code at the macro call site will be able to refer to them as well. pub fn call_site() -> Span { Span::_new(imp::Span::call_site()) } /// The span located at the invocation of the procedural macro, but with /// local variables, labels, and `$crate` resolved at the definition site /// of the macro. This is the same hygiene behavior as `macro_rules`. /// /// This function requires Rust 1.45 or later. #[cfg(hygiene)] pub fn mixed_site() -> Span { Span::_new(imp::Span::mixed_site()) } /// A span that resolves at the macro definition site. /// /// This method is semver exempt and not exposed by default. #[cfg(procmacro2_semver_exempt)] pub fn def_site() -> Span { Span::_new(imp::Span::def_site()) } /// Creates a new span with the same line/column information as `self` but /// that resolves symbols as though it were at `other`. pub fn resolved_at(&self, other: Span) -> Span { Span::_new(self.inner.resolved_at(other.inner)) } /// Creates a new span with the same name resolution behavior as `self` but /// with the line/column information of `other`. pub fn located_at(&self, other: Span) -> Span { Span::_new(self.inner.located_at(other.inner)) } /// Convert `proc_macro2::Span` to `proc_macro::Span`. /// /// This method is available when building with a nightly compiler, or when /// building with rustc 1.29+ *without* semver exempt features. /// /// # Panics /// /// Panics if called from outside of a procedural macro. Unlike /// `proc_macro2::Span`, the `proc_macro::Span` type can only exist within /// the context of a procedural macro invocation. #[cfg(wrap_proc_macro)] pub fn unwrap(self) -> proc_macro::Span { self.inner.unwrap() } // Soft deprecated. Please use Span::unwrap. #[cfg(wrap_proc_macro)] #[doc(hidden)] pub fn unstable(self) -> proc_macro::Span { self.unwrap() } /// The original source file into which this span points. /// /// This method is semver exempt and not exposed by default. #[cfg(procmacro2_semver_exempt)] pub fn source_file(&self) -> SourceFile { SourceFile::_new(self.inner.source_file()) } /// Get the starting line/column in the source file for this span. /// /// This method requires the `"span-locations"` feature to be enabled. #[cfg(span_locations)] pub fn start(&self) -> LineColumn { let imp::LineColumn { line, column } = self.inner.start(); LineColumn { line, column } } /// Get the ending line/column in the source file for this span. /// /// This method requires the `"span-locations"` feature to be enabled. #[cfg(span_locations)] pub fn end(&self) -> LineColumn { let imp::LineColumn { line, column } = self.inner.end(); LineColumn { line, column } } /// Create a new span encompassing `self` and `other`. /// /// Returns `None` if `self` and `other` are from different files. /// /// Warning: the underlying [`proc_macro::Span::join`] method is /// nightly-only. When called from within a procedural macro not using a /// nightly compiler, this method will always return `None`. /// /// [`proc_macro::Span::join`]: https://doc.rust-lang.org/proc_macro/struct.Span.html#method.join pub fn join(&self, other: Span) -> Option<Span> { self.inner.join(other.inner).map(Span::_new) } /// Compares two spans to see if they're equal. /// /// This method is semver exempt and not exposed by default. #[cfg(procmacro2_semver_exempt)] pub fn eq(&self, other: &Span) -> bool { self.inner.eq(&other.inner) } } /// Prints a span in a form convenient for debugging. impl Debug for Span { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Debug::fmt(&self.inner, f) } } /// A single token or a delimited sequence of token trees (e.g. `[1, (), ..]`). #[derive(Clone)] pub enum TokenTree { /// A token stream surrounded by bracket delimiters. Group(Group), /// An identifier. Ident(Ident), /// A single punctuation character (`+`, `,`, `$`, etc.). Punct(Punct), /// A literal character (`'a'`), string (`"hello"`), number (`2.3`), etc. Literal(Literal), } impl TokenTree { /// Returns the span of this tree, delegating to the `span` method of /// the contained token or a delimited stream. pub fn span(&self) -> Span { match self { TokenTree::Group(t) => t.span(), TokenTree::Ident(t) => t.span(), TokenTree::Punct(t) => t.span(), TokenTree::Literal(t) => t.span(), } } /// Configures the span for *only this token*. /// /// Note that if this token is a `Group` then this method will not configure /// the span of each of the internal tokens, this will simply delegate to /// the `set_span` method of each variant. pub fn set_span(&mut self, span: Span) { match self { TokenTree::Group(t) => t.set_span(span), TokenTree::Ident(t) => t.set_span(span), TokenTree::Punct(t) => t.set_span(span), TokenTree::Literal(t) => t.set_span(span), } } } impl From<Group> for TokenTree { fn from(g: Group) -> TokenTree { TokenTree::Group(g) } } impl From<Ident> for TokenTree { fn from(g: Ident) -> TokenTree { TokenTree::Ident(g) } } impl From<Punct> for TokenTree { fn from(g: Punct) -> TokenTree { TokenTree::Punct(g) } } impl From<Literal> for TokenTree { fn from(g: Literal) -> TokenTree { TokenTree::Literal(g) } } /// Prints the token tree as a string that is supposed to be losslessly /// convertible back into the same token tree (modulo spans), except for /// possibly `TokenTree::Group`s with `Delimiter::None` delimiters and negative /// numeric literals. impl Display for TokenTree { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { TokenTree::Group(t) => Display::fmt(t, f), TokenTree::Ident(t) => Display::fmt(t, f), TokenTree::Punct(t) => Display::fmt(t, f), TokenTree::Literal(t) => Display::fmt(t, f), } } } /// Prints token tree in a form convenient for debugging. impl Debug for TokenTree { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // Each of these has the name in the struct type in the derived debug, // so don't bother with an extra layer of indirection match self { TokenTree::Group(t) => Debug::fmt(t, f), TokenTree::Ident(t) => { let mut debug = f.debug_struct("Ident"); debug.field("sym", &format_args!("{}", t)); imp::debug_span_field_if_nontrivial(&mut debug, t.span().inner); debug.finish() } TokenTree::Punct(t) => Debug::fmt(t, f), TokenTree::Literal(t) => Debug::fmt(t, f), } } } /// A delimited token stream. /// /// A `Group` internally contains a `TokenStream` which is surrounded by /// `Delimiter`s. #[derive(Clone)] pub struct Group { inner: imp::Group, } /// Describes how a sequence of token trees is delimited. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum Delimiter { /// `( ... )` Parenthesis, /// `{ ... }` Brace, /// `[ ... ]` Bracket, /// `Ø ... Ø` /// /// An implicit delimiter, that may, for example, appear around tokens /// coming from a "macro variable" `$var`. It is important to preserve /// operator priorities in cases like `$var * 3` where `$var` is `1 + 2`. /// Implicit delimiters may not survive roundtrip of a token stream through /// a string. None, } impl Group { fn _new(inner: imp::Group) -> Self { Group { inner } } fn _new_stable(inner: fallback::Group) -> Self { Group { inner: inner.into(), } } /// Creates a new `Group` with the given delimiter and token stream. /// /// This constructor will set the span for this group to /// `Span::call_site()`. To change the span you can use the `set_span` /// method below. pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group { Group { inner: imp::Group::new(delimiter, stream.inner), } } /// Returns the delimiter of this `Group` pub fn delimiter(&self) -> Delimiter { self.inner.delimiter() } /// Returns the `TokenStream` of tokens that are delimited in this `Group`. /// /// Note that the returned token stream does not include the delimiter /// returned above. pub fn stream(&self) -> TokenStream { TokenStream::_new(self.inner.stream()) } /// Returns the span for the delimiters of this token stream, spanning the /// entire `Group`. /// /// ```text /// pub fn span(&self) -> Span { /// ^^^^^^^ /// ``` pub fn span(&self) -> Span { Span::_new(self.inner.span()) } /// Returns the span pointing to the opening delimiter of this group. /// /// ```text /// pub fn span_open(&self) -> Span { /// ^ /// ``` pub fn span_open(&self) -> Span { Span::_new(self.inner.span_open()) } /// Returns the span pointing to the closing delimiter of this group. /// /// ```text /// pub fn span_close(&self) -> Span { /// ^ /// ``` pub fn span_close(&self) -> Span { Span::_new(self.inner.span_close()) } /// Configures the span for this `Group`'s delimiters, but not its internal /// tokens. /// /// This method will **not** set the span of all the internal tokens spanned /// by this group, but rather it will only set the span of the delimiter /// tokens at the level of the `Group`. pub fn set_span(&mut self, span: Span) { self.inner.set_span(span.inner) } } /// Prints the group as a string that should be losslessly convertible back /// into the same group (modulo spans), except for possibly `TokenTree::Group`s /// with `Delimiter::None` delimiters. impl Display for Group { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { Display::fmt(&self.inner, formatter) } } impl Debug for Group { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { Debug::fmt(&self.inner, formatter) } } /// An `Punct` is an single punctuation character like `+`, `-` or `#`. /// /// Multicharacter operators like `+=` are represented as two instances of /// `Punct` with different forms of `Spacing` returned. #[derive(Clone)] pub struct Punct { op: char, spacing: Spacing, span: Span, } /// Whether an `Punct` is followed immediately by another `Punct` or followed by /// another token or whitespace. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum Spacing { /// E.g. `+` is `Alone` in `+ =`, `+ident` or `+()`. Alone, /// E.g. `+` is `Joint` in `+=` or `'` is `Joint` in `'#`. /// /// Additionally, single quote `'` can join with identifiers to form /// lifetimes `'ident`. Joint, } impl Punct { /// Creates a new `Punct` from the given character and spacing. /// /// The `ch` argument must be a valid punctuation character permitted by the /// language, otherwise the function will panic. /// /// The returned `Punct` will have the default span of `Span::call_site()` /// which can be further configured with the `set_span` method below. pub fn new(op: char, spacing: Spacing) -> Punct { Punct { op, spacing, span: Span::call_site(), } } /// Returns the value of this punctuation character as `char`. pub fn as_char(&self) -> char { self.op } /// Returns the spacing of this punctuation character, indicating whether /// it's immediately followed by another `Punct` in the token stream, so /// they can potentially be combined into a multicharacter operator /// (`Joint`), or it's followed by some other token or whitespace (`Alone`) /// so the operator has certainly ended. pub fn spacing(&self) -> Spacing { self.spacing } /// Returns the span for this punctuation character. pub fn span(&self) -> Span { self.span } /// Configure the span for this punctuation character. pub fn set_span(&mut self, span: Span) { self.span = span; } } /// Prints the punctuation character as a string that should be losslessly /// convertible back into the same character. impl Display for Punct { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Display::fmt(&self.op, f) } } impl Debug for Punct { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let mut debug = fmt.debug_struct("Punct"); debug.field("op", &self.op); debug.field("spacing", &self.spacing); imp::debug_span_field_if_nontrivial(&mut debug, self.span.inner); debug.finish() } } /// A word of Rust code, which may be a keyword or legal variable name. /// /// An identifier consists of at least one Unicode code point, the first of /// which has the XID_Start property and the rest of which have the XID_Continue /// property. /// /// - The empty string is not an identifier. Use `Option<Ident>`. /// - A lifetime is not an identifier. Use `syn::Lifetime` instead. /// /// An identifier constructed with `Ident::new` is permitted to be a Rust /// keyword, though parsing one through its [`Parse`] implementation rejects /// Rust keywords. Use `input.call(Ident::parse_any)` when parsing to match the /// behaviour of `Ident::new`. /// /// [`Parse`]: https://docs.rs/syn/1.0/syn/parse/trait.Parse.html /// /// # Examples /// /// A new ident can be created from a string using the `Ident::new` function. /// A span must be provided explicitly which governs the name resolution /// behavior of the resulting identifier. /// /// ``` /// use proc_macro2::{Ident, Span}; /// /// fn main() { /// let call_ident = Ident::new("calligraphy", Span::call_site()); /// /// println!("{}", call_ident); /// } /// ``` /// /// An ident can be interpolated into a token stream using the `quote!` macro. /// /// ``` /// use proc_macro2::{Ident, Span}; /// use quote::quote; /// /// fn main() { /// let ident = Ident::new("demo", Span::call_site()); /// /// // Create a variable binding whose name is this ident. /// let expanded = quote! { let #ident = 10; }; /// /// // Create a variable binding with a slightly different name. /// let temp_ident = Ident::new(&format!("new_{}", ident), Span::call_site()); /// let expanded = quote! { let #temp_ident = 10; }; /// } /// ``` /// /// A string representation of the ident is available through the `to_string()` /// method. /// /// ``` /// # use proc_macro2::{Ident, Span}; /// # /// # let ident = Ident::new("another_identifier", Span::call_site()); /// # /// // Examine the ident as a string. /// let ident_string = ident.to_string(); /// if ident_string.len() > 60 { /// println!("Very long identifier: {}", ident_string) /// } /// ``` #[derive(Clone)] pub struct Ident { inner: imp::Ident, _marker: marker::PhantomData<Rc<()>>, } impl Ident { fn _new(inner: imp::Ident) -> Ident { Ident { inner, _marker: marker::PhantomData, } } /// Creates a new `Ident` with the given `string` as well as the specified /// `span`. /// /// The `string` argument must be a valid identifier permitted by the /// language, otherwise the function will panic. /// /// Note that `span`, currently in rustc, configures the hygiene information /// for this identifier. /// /// As of this time `Span::call_site()` explicitly opts-in to "call-site" /// hygiene meaning that identifiers created with this span will be resolved /// as if they were written directly at the location of the macro call, and /// other code at the macro call site will be able to refer to them as well. /// /// Later spans like `Span::def_site()` will allow to opt-in to /// "definition-site" hygiene meaning that identifiers created with this /// span will be resolved at the location of the macro definition and other /// code at the macro call site will not be able to refer to them. /// /// Due to the current importance of hygiene this constructor, unlike other /// tokens, requires a `Span` to be specified at construction. /// /// # Panics /// /// Panics if the input string is neither a keyword nor a legal variable /// name. If you are not sure whether the string contains an identifier and /// need to handle an error case, use /// <a href="https://docs.rs/syn/1.0/syn/fn.parse_str.html"><code /// style="padding-right:0;">syn::parse_str</code></a><code /// style="padding-left:0;">::&lt;Ident&gt;</code> /// rather than `Ident::new`. pub fn new(string: &str, span: Span) -> Ident { Ident::_new(imp::Ident::new(string, span.inner)) } /// Same as `Ident::new`, but creates a raw identifier (`r#ident`). /// /// This method is semver exempt and not exposed by default. #[cfg(procmacro2_semver_exempt)] pub fn new_raw(string: &str, span: Span) -> Ident { Ident::_new_raw(string, span) } fn _new_raw(string: &str, span: Span) -> Ident { Ident::_new(imp::Ident::new_raw(string, span.inner)) } /// Returns the span of this `Ident`. pub fn span(&self) -> Span { Span::_new(self.inner.span()) } /// Configures the span of this `Ident`, possibly changing its hygiene /// context. pub fn set_span(&mut self, span: Span) { self.inner.set_span(span.inner); } } impl PartialEq for Ident { fn eq(&self, other: &Ident) -> bool { self.inner == other.inner } } impl<T> PartialEq<T> for Ident where T: ?Sized + AsRef<str>, { fn eq(&self, other: &T) -> bool { self.inner == other } } impl Eq for Ident {} impl PartialOrd for Ident { fn partial_cmp(&self, other: &Ident) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for Ident { fn cmp(&self, other: &Ident) -> Ordering { self.to_string().cmp(&other.to_string()) } } impl Hash for Ident { fn hash<H: Hasher>(&self, hasher: &mut H) { self.to_string().hash(hasher) } } /// Prints the identifier as a string that should be losslessly convertible back /// into the same identifier. impl Display for Ident { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Display::fmt(&self.inner, f) } } impl Debug for Ident { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Debug::fmt(&self.inner, f) } } /// A literal string (`"hello"`), byte string (`b"hello"`), character (`'a'`), /// byte character (`b'a'`), an integer or floating point number with or without /// a suffix (`1`, `1u8`, `2.3`, `2.3f32`). /// /// Boolean literals like `true` and `false` do not belong here, they are /// `Ident`s. #[derive(Clone)] pub struct Literal { inner: imp::Literal, _marker: marker::PhantomData<Rc<()>>, } macro_rules! suffixed_int_literals { ($($name:ident => $kind:ident,)*) => ($( /// Creates a new suffixed integer literal with the specified value. /// /// This function will create an integer like `1u32` where the integer /// value specified is the first part of the token and the integral is /// also suffixed at the end. Literals created from negative numbers may /// not survive rountrips through `TokenStream` or strings and may be /// broken into two tokens (`-` and positive literal). /// /// Literals created through this method have the `Span::call_site()` /// span by default, which can be configured with the `set_span` method /// below. pub fn $name(n: $kind) -> Literal { Literal::_new(imp::Literal::$name(n)) } )*) } macro_rules! unsuffixed_int_literals { ($($name:ident => $kind:ident,)*) => ($( /// Creates a new unsuffixed integer literal with the specified value. /// /// This function will create an integer like `1` where the integer /// value specified is the first part of the token. No suffix is /// specified on this token, meaning that invocations like /// `Literal::i8_unsuffixed(1)` are equivalent to /// `Literal::u32_unsuffixed(1)`. Literals created from negative numbers /// may not survive rountrips through `TokenStream` or strings and may /// be broken into two tokens (`-` and positive literal). /// /// Literals created through this method have the `Span::call_site()` /// span by default, which can be configured with the `set_span` method /// below. pub fn $name(n: $kind) -> Literal { Literal::_new(imp::Literal::$name(n)) } )*) } impl Literal { fn _new(inner: imp::Literal) -> Literal { Literal { inner, _marker: marker::PhantomData, } } fn _new_stable(inner: fallback::Literal) -> Literal { Literal { inner: inner.into(), _marker: marker::PhantomData, } } suffixed_int_literals! { u8_suffixed => u8, u16_suffixed => u16, u32_suffixed => u32, u64_suffixed => u64, u128_suffixed => u128, usize_suffixed => usize, i8_suffixed => i8, i16_suffixed => i16, i32_suffixed => i32, i64_suffixed => i64, i128_suffixed => i128, isize_suffixed => isize, } unsuffixed_int_literals! { u8_unsuffixed => u8, u16_unsuffixed => u16, u32_unsuffixed => u32, u64_unsuffixed => u64, u128_unsuffixed => u128, usize_unsuffixed => usize, i8_unsuffixed => i8, i16_unsuffixed => i16, i32_unsuffixed => i32, i64_unsuffixed => i64, i128_unsuffixed => i128, isize_unsuffixed => isize, } /// Creates a new unsuffixed floating-point literal. /// /// This constructor is similar to those like `Literal::i8_unsuffixed` where /// the float's value is emitted directly into the token but no suffix is /// used, so it may be inferred to be a `f64` later in the compiler. /// Literals created from negative numbers may not survive rountrips through /// `TokenStream` or strings and may be broken into two tokens (`-` and /// positive literal). /// /// # Panics /// /// This function requires that the specified float is finite, for example /// if it is infinity or NaN this function will panic. pub fn f64_unsuffixed(f: f64) -> Literal { assert!(f.is_finite()); Literal::_new(imp::Literal::f64_unsuffixed(f)) } /// Creates a new suffixed floating-point literal. /// /// This constructor will create a literal like `1.0f64` where the value /// specified is the preceding part of the token and `f64` is the suffix of /// the token. This token will always be inferred to be an `f64` in the /// compiler. Literals created from negative numbers may not survive /// rountrips through `TokenStream` or strings and may be broken into two /// tokens (`-` and positive literal). /// /// # Panics /// /// This function requires that the specified float is finite, for example /// if it is infinity or NaN this function will panic. pub fn f64_suffixed(f: f64) -> Literal { assert!(f.is_finite()); Literal::_new(imp::Literal::f64_suffixed(f)) } /// Creates a new unsuffixed floating-point literal. /// /// This constructor is similar to those like `Literal::i8_unsuffixed` where /// the float's value is emitted directly into the token but no suffix is /// used, so it may be inferred to be a `f64` later in the compiler. /// Literals created from negative numbers may not survive rountrips through /// `TokenStream` or strings and may be broken into two tokens (`-` and /// positive literal). /// /// # Panics /// /// This function requires that the specified float is finite, for example /// if it is infinity or NaN this function will panic. pub fn f32_unsuffixed(f: f32) -> Literal { assert!(f.is_finite()); Literal::_new(imp::Literal::f32_unsuffixed(f)) } /// Creates a new suffixed floating-point literal. /// /// This constructor will create a literal like `1.0f32` where the value /// specified is the preceding part of the token and `f32` is the suffix of /// the token. This token will always be inferred to be an `f32` in the /// compiler. Literals created from negative numbers may not survive /// rountrips through `TokenStream` or strings and may be broken into two /// tokens (`-` and positive literal). /// /// # Panics /// /// This function requires that the specified float is finite, for example /// if it is infinity or NaN this function will panic. pub fn f32_suffixed(f: f32) -> Literal { assert!(f.is_finite()); Literal::_new(imp::Literal::f32_suffixed(f)) } /// String literal. pub fn string(string: &str) -> Literal { Literal::_new(imp::Literal::string(string)) } /// Character literal. pub fn character(ch: char) -> Literal { Literal::_new(imp::Literal::character(ch)) } /// Byte string literal. pub fn byte_string(s: &[u8]) -> Literal { Literal::_new(imp::Literal::byte_string(s)) } /// Returns the span encompassing this literal. pub fn span(&self) -> Span { Span::_new(self.inner.span()) } /// Configures the span associated for this literal. pub fn set_span(&mut self, span: Span) { self.inner.set_span(span.inner); } /// Returns a `Span` that is a subset of `self.span()` containing only /// the source bytes in range `range`. Returns `None` if the would-be /// trimmed span is outside the bounds of `self`. /// /// Warning: the underlying [`proc_macro::Literal::subspan`] method is /// nightly-only. When called from within a procedural macro not using a /// nightly compiler, this method will always return `None`. /// /// [`proc_macro::Literal::subspan`]: https://doc.rust-lang.org/proc_macro/struct.Literal.html#method.subspan pub fn subspan<R: RangeBounds<usize>>(&self, range: R) -> Option<Span> { self.inner.subspan(range).map(Span::_new) } } impl Debug for Literal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Debug::fmt(&self.inner, f) } } impl Display for Literal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Display::fmt(&self.inner, f) } } /// Public implementation details for the `TokenStream` type, such as iterators. pub mod token_stream { use crate::{imp, TokenTree}; use std::fmt::{self, Debug}; use std::marker; use std::rc::Rc; pub use crate::TokenStream; /// An iterator over `TokenStream`'s `TokenTree`s. /// /// The iteration is "shallow", e.g. the iterator doesn't recurse into /// delimited groups, and returns whole groups as token trees. #[derive(Clone)] pub struct IntoIter { inner: imp::TokenTreeIter, _marker: marker::PhantomData<Rc<()>>, } impl Iterator for IntoIter { type Item = TokenTree; fn next(&mut self) -> Option<TokenTree> { self.inner.next() } } impl Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Debug::fmt(&self.inner, f) } } impl IntoIterator for TokenStream { type Item = TokenTree; type IntoIter = IntoIter; fn into_iter(self) -> IntoIter { IntoIter { inner: self.inner.into_iter(), _marker: marker::PhantomData, } } } }
32.400163
113
0.620375
09e5213c0dbb0d38576d5dba34ca85ca03a4d9d1
51,505
// Copyright 2020 Andy Grove // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Serde code to convert from protocol buffers to Rust data structures. use std::{ convert::{From, TryInto}, unimplemented, }; use crate::error::BallistaError; use crate::serde::{proto_error, protobuf}; use crate::{convert_box_required, convert_required}; use arrow::datatypes::{DataType, Field, Schema}; use datafusion::logical_plan::{ abs, acos, asin, atan, ceil, cos, exp, floor, log10, log2, round, signum, sin, sqrt, tan, trunc, Expr, JoinType, LogicalPlan, LogicalPlanBuilder, Operator, }; use datafusion::physical_plan::aggregates::AggregateFunction; use datafusion::physical_plan::csv::CsvReadOptions; use datafusion::scalar::ScalarValue; use protobuf::logical_plan_node::LogicalPlanType; use protobuf::{logical_expr_node::ExprType, scalar_type}; // use uuid::Uuid; impl TryInto<LogicalPlan> for &protobuf::LogicalPlanNode { type Error = BallistaError; fn try_into(self) -> Result<LogicalPlan, Self::Error> { let plan = self.logical_plan_type.as_ref().ok_or_else(|| { proto_error(format!( "logical_plan::from_proto() Unsupported logical plan '{:?}'", self )) })?; match plan { LogicalPlanType::Projection(projection) => { let input: LogicalPlan = convert_box_required!(projection.input)?; LogicalPlanBuilder::from(&input) .project( &projection .expr .iter() .map(|expr| expr.try_into()) .collect::<Result<Vec<_>, _>>()?, )? .build() .map_err(|e| e.into()) } LogicalPlanType::Selection(selection) => { let input: LogicalPlan = convert_box_required!(selection.input)?; LogicalPlanBuilder::from(&input) .filter( selection .expr .as_ref() .expect("expression required") .try_into()?, )? .build() .map_err(|e| e.into()) } LogicalPlanType::Aggregate(aggregate) => { let input: LogicalPlan = convert_box_required!(aggregate.input)?; let group_expr = aggregate .group_expr .iter() .map(|expr| expr.try_into()) .collect::<Result<Vec<_>, _>>()?; let aggr_expr = aggregate .aggr_expr .iter() .map(|expr| expr.try_into()) .collect::<Result<Vec<_>, _>>()?; LogicalPlanBuilder::from(&input) .aggregate(&group_expr, &aggr_expr)? .build() .map_err(|e| e.into()) } LogicalPlanType::CsvScan(scan) => { let schema: Schema = convert_required!(scan.schema)?; let options = CsvReadOptions::new() .schema(&schema) .delimiter(scan.delimiter.as_bytes()[0]) .file_extension(&scan.file_extension) .has_header(scan.has_header); let mut projection = None; if let Some(column_names) = &scan.projection { let column_indices = column_names .columns .iter() .map(|name| schema.index_of(name)) .collect::<Result<Vec<usize>, _>>()?; projection = Some(column_indices); } LogicalPlanBuilder::scan_csv(&scan.path, options, projection)? .build() .map_err(|e| e.into()) } LogicalPlanType::ParquetScan(scan) => { let projection = match scan.projection.as_ref() { None => None, Some(columns) => { let schema: Schema = convert_required!(scan.schema)?; let r: Result<Vec<usize>, _> = columns .columns .iter() .map(|col_name| { schema.fields().iter().position(|field| field.name() == col_name).ok_or_else(|| { let column_names: Vec<&String> = schema.fields().iter().map(|f| f.name()).collect(); proto_error(format!( "Parquet projection contains column name that is not present in schema. Column name: {}. Schema columns: {:?}", col_name, column_names )) }) }) .collect(); Some(r?) } }; LogicalPlanBuilder::scan_parquet(&scan.path, projection, 24)? //TODO concurrency .build() .map_err(|e| e.into()) } LogicalPlanType::Sort(sort) => { let input: LogicalPlan = convert_box_required!(sort.input)?; let sort_expr: Vec<Expr> = sort .expr .iter() .map(|expr| expr.try_into()) .collect::<Result<Vec<Expr>, _>>()?; LogicalPlanBuilder::from(&input) .sort(&sort_expr)? .build() .map_err(|e| e.into()) } LogicalPlanType::Repartition(repartition) => { use datafusion::logical_plan::Partitioning; let input: LogicalPlan = convert_box_required!(repartition.input)?; use protobuf::repartition_node::PartitionMethod; let pb_partition_method = repartition.partition_method.clone().ok_or_else(|| { BallistaError::General(String::from( "Protobuf deserialization error, RepartitionNode was missing required field 'partition_method'", )) })?; let partitioning_scheme = match pb_partition_method { PartitionMethod::Hash(protobuf::HashRepartition { hash_expr: pb_hash_expr, batch_size, }) => Partitioning::Hash( pb_hash_expr .iter() .map(|pb_expr| pb_expr.try_into()) .collect::<Result<Vec<_>, _>>()?, batch_size as usize, ), PartitionMethod::RoundRobin(batch_size) => { Partitioning::RoundRobinBatch(batch_size as usize) } }; LogicalPlanBuilder::from(&input) .repartition(partitioning_scheme)? .build() .map_err(|e| e.into()) } LogicalPlanType::EmptyRelation(empty_relation) => { LogicalPlanBuilder::empty(empty_relation.produce_one_row) .build() .map_err(|e| e.into()) } LogicalPlanType::CreateExternalTable(create_extern_table) => { let pb_schema = (create_extern_table.schema.clone()).ok_or_else(|| { BallistaError::General(String::from( "Protobuf deserialization error, CreateExternalTableNode was missing required field schema.", )) })?; let pb_file_type: protobuf::FileType = create_extern_table.file_type.try_into()?; Ok(LogicalPlan::CreateExternalTable { schema: pb_schema.try_into()?, name: create_extern_table.name.clone(), location: create_extern_table.location.clone(), file_type: pb_file_type.into(), has_header: create_extern_table.has_header, }) } LogicalPlanType::Explain(explain) => { let input: LogicalPlan = convert_box_required!(explain.input)?; LogicalPlanBuilder::from(&input) .explain(explain.verbose)? .build() .map_err(|e| e.into()) } LogicalPlanType::Limit(limit) => { let input: LogicalPlan = convert_box_required!(limit.input)?; LogicalPlanBuilder::from(&input) .limit(limit.limit as usize)? .build() .map_err(|e| e.into()) } LogicalPlanType::Join(join) => { let left_keys: Vec<&str> = join.left_join_column.iter().map(|i| i.as_str()).collect(); let right_keys: Vec<&str> = join.right_join_column.iter().map(|i| i.as_str()).collect(); let join_type = protobuf::JoinType::from_i32(join.join_type).ok_or_else(|| { proto_error(format!( "Received a JoinNode message with unknown JoinType {}", join.join_type )) })?; let join_type = match join_type { protobuf::JoinType::Inner => JoinType::Inner, protobuf::JoinType::Left => JoinType::Left, protobuf::JoinType::Right => JoinType::Right, }; LogicalPlanBuilder::from(&convert_box_required!(join.left)?) .join( &convert_box_required!(join.right)?, join_type, &left_keys, &right_keys, )? .build() .map_err(|e| e.into()) } } } } impl TryInto<datafusion::logical_plan::DFSchema> for protobuf::Schema { type Error = BallistaError; fn try_into(self) -> Result<datafusion::logical_plan::DFSchema, Self::Error> { let schema: Schema = (&self).try_into()?; schema.try_into().map_err(BallistaError::DataFusionError) } } impl TryInto<datafusion::logical_plan::DFSchemaRef> for protobuf::Schema { type Error = BallistaError; fn try_into(self) -> Result<datafusion::logical_plan::DFSchemaRef, Self::Error> { use datafusion::logical_plan::ToDFSchema; let schema: Schema = (&self).try_into()?; schema .to_dfschema_ref() .map_err(BallistaError::DataFusionError) } } impl TryInto<arrow::datatypes::DataType> for &protobuf::scalar_type::Datatype { type Error = BallistaError; fn try_into(self) -> Result<arrow::datatypes::DataType, Self::Error> { use protobuf::scalar_type::Datatype; Ok(match self { Datatype::Scalar(scalar_type) => { let pb_scalar_enum = protobuf::PrimitiveScalarType::from_i32(*scalar_type).ok_or_else(|| { proto_error(format!( "Protobuf deserialization error, scalar_type::Datatype missing was provided invalid enum variant: {}", *scalar_type )) })?; pb_scalar_enum.into() } Datatype::List(protobuf::ScalarListType { deepest_type, field_names, }) => { if field_names.is_empty() { return Err(proto_error( "Protobuf deserialization error: found no field names in ScalarListType message which requires at least one", )); } let pb_scalar_type = protobuf::PrimitiveScalarType::from_i32(*deepest_type) .ok_or_else(|| { proto_error(format!( "Protobuf deserialization error: invalid i32 for scalar enum: {}", *deepest_type )) })?; //Because length is checked above it is safe to unwrap .last() let mut scalar_type = arrow::datatypes::DataType::List(Box::new(Field::new( field_names.last().unwrap().as_str(), pb_scalar_type.into(), true, ))); //Iterate over field names in reverse order except for the last item in the vector for name in field_names.iter().rev().skip(1) { let new_datatype = arrow::datatypes::DataType::List(Box::new(Field::new( name.as_str(), scalar_type, true, ))); scalar_type = new_datatype; } scalar_type } }) } } impl TryInto<arrow::datatypes::DataType> for &protobuf::arrow_type::ArrowTypeEnum { type Error = BallistaError; fn try_into(self) -> Result<arrow::datatypes::DataType, Self::Error> { use arrow::datatypes::DataType; use protobuf::arrow_type; Ok(match self { arrow_type::ArrowTypeEnum::None(_) => DataType::Null, arrow_type::ArrowTypeEnum::Bool(_) => DataType::Boolean, arrow_type::ArrowTypeEnum::Uint8(_) => DataType::UInt8, arrow_type::ArrowTypeEnum::Int8(_) => DataType::Int8, arrow_type::ArrowTypeEnum::Uint16(_) => DataType::UInt16, arrow_type::ArrowTypeEnum::Int16(_) => DataType::Int16, arrow_type::ArrowTypeEnum::Uint32(_) => DataType::UInt32, arrow_type::ArrowTypeEnum::Int32(_) => DataType::Int32, arrow_type::ArrowTypeEnum::Uint64(_) => DataType::UInt64, arrow_type::ArrowTypeEnum::Int64(_) => DataType::Int64, arrow_type::ArrowTypeEnum::Float16(_) => DataType::Float16, arrow_type::ArrowTypeEnum::Float32(_) => DataType::Float32, arrow_type::ArrowTypeEnum::Float64(_) => DataType::Float64, arrow_type::ArrowTypeEnum::Utf8(_) => DataType::Utf8, arrow_type::ArrowTypeEnum::LargeUtf8(_) => DataType::LargeUtf8, arrow_type::ArrowTypeEnum::Binary(_) => DataType::Binary, arrow_type::ArrowTypeEnum::FixedSizeBinary(size) => DataType::FixedSizeBinary(*size), arrow_type::ArrowTypeEnum::LargeBinary(_) => DataType::LargeBinary, arrow_type::ArrowTypeEnum::Date32(_) => DataType::Date32, arrow_type::ArrowTypeEnum::Date64(_) => DataType::Date64, arrow_type::ArrowTypeEnum::Duration(time_unit) => { DataType::Duration(protobuf::TimeUnit::from_i32_to_arrow(*time_unit)?) } arrow_type::ArrowTypeEnum::Timestamp(protobuf::Timestamp { time_unit, timezone, }) => DataType::Timestamp( protobuf::TimeUnit::from_i32_to_arrow(*time_unit)?, match timezone.len() { 0 => None, _ => Some(timezone.to_owned()), }, ), arrow_type::ArrowTypeEnum::Time32(time_unit) => { DataType::Time32(protobuf::TimeUnit::from_i32_to_arrow(*time_unit)?) } arrow_type::ArrowTypeEnum::Time64(time_unit) => { DataType::Time64(protobuf::TimeUnit::from_i32_to_arrow(*time_unit)?) } arrow_type::ArrowTypeEnum::Interval(interval_unit) => { DataType::Interval(protobuf::IntervalUnit::from_i32_to_arrow(*interval_unit)?) } arrow_type::ArrowTypeEnum::Decimal(protobuf::Decimal { whole, fractional }) => { DataType::Decimal(*whole as usize, *fractional as usize) } arrow_type::ArrowTypeEnum::List(list) => { let list_type: &protobuf::Field = list .as_ref() .field_type .as_ref() .ok_or_else(|| proto_error("Protobuf deserialization error: List message missing required field 'field_type'"))? .as_ref(); DataType::List(Box::new(list_type.try_into()?)) } arrow_type::ArrowTypeEnum::LargeList(list) => { let list_type: &protobuf::Field = list .as_ref() .field_type .as_ref() .ok_or_else(|| proto_error("Protobuf deserialization error: List message missing required field 'field_type'"))? .as_ref(); DataType::LargeList(Box::new(list_type.try_into()?)) } arrow_type::ArrowTypeEnum::FixedSizeList(list) => { let list_type: &protobuf::Field = list .as_ref() .field_type .as_ref() .ok_or_else(|| proto_error("Protobuf deserialization error: List message missing required field 'field_type'"))? .as_ref(); let list_size = list.list_size; DataType::FixedSizeList(Box::new(list_type.try_into()?), list_size) } arrow_type::ArrowTypeEnum::Struct(strct) => DataType::Struct( strct .sub_field_types .iter() .map(|field| field.try_into()) .collect::<Result<Vec<_>, _>>()?, ), arrow_type::ArrowTypeEnum::Union(union) => DataType::Union( union .union_types .iter() .map(|field| field.try_into()) .collect::<Result<Vec<_>, _>>()?, ), arrow_type::ArrowTypeEnum::Dictionary(dict) => { let pb_key_datatype = dict .as_ref() .key .as_ref() .ok_or_else(|| proto_error("Protobuf deserialization error: Dictionary message missing required field 'key'"))?; let pb_value_datatype = dict .as_ref() .value .as_ref() .ok_or_else(|| proto_error("Protobuf deserialization error: Dictionary message missing required field 'key'"))?; let key_datatype: DataType = pb_key_datatype.as_ref().try_into()?; let value_datatype: DataType = pb_value_datatype.as_ref().try_into()?; DataType::Dictionary(Box::new(key_datatype), Box::new(value_datatype)) } }) } } impl Into<arrow::datatypes::DataType> for protobuf::PrimitiveScalarType { fn into(self) -> arrow::datatypes::DataType { use arrow::datatypes::DataType; match self { protobuf::PrimitiveScalarType::Bool => DataType::Boolean, protobuf::PrimitiveScalarType::Uint8 => DataType::UInt8, protobuf::PrimitiveScalarType::Int8 => DataType::Int8, protobuf::PrimitiveScalarType::Uint16 => DataType::UInt16, protobuf::PrimitiveScalarType::Int16 => DataType::Int16, protobuf::PrimitiveScalarType::Uint32 => DataType::UInt32, protobuf::PrimitiveScalarType::Int32 => DataType::Int32, protobuf::PrimitiveScalarType::Uint64 => DataType::UInt64, protobuf::PrimitiveScalarType::Int64 => DataType::Int64, protobuf::PrimitiveScalarType::Float32 => DataType::Float32, protobuf::PrimitiveScalarType::Float64 => DataType::Float64, protobuf::PrimitiveScalarType::Utf8 => DataType::Utf8, protobuf::PrimitiveScalarType::LargeUtf8 => DataType::LargeUtf8, protobuf::PrimitiveScalarType::Date32 => DataType::Date32, protobuf::PrimitiveScalarType::TimeMicrosecond => { DataType::Time64(arrow::datatypes::TimeUnit::Microsecond) } protobuf::PrimitiveScalarType::TimeNanosecond => { DataType::Time64(arrow::datatypes::TimeUnit::Nanosecond) } protobuf::PrimitiveScalarType::Null => DataType::Null, } } } //Does not typecheck lists fn typechecked_scalar_value_conversion( tested_type: &protobuf::scalar_value::Value, required_type: protobuf::PrimitiveScalarType, ) -> Result<datafusion::scalar::ScalarValue, BallistaError> { use protobuf::scalar_value::Value; use protobuf::PrimitiveScalarType; Ok(match (tested_type, &required_type) { (Value::BoolValue(v), PrimitiveScalarType::Bool) => ScalarValue::Boolean(Some(*v)), (Value::Int8Value(v), PrimitiveScalarType::Int8) => ScalarValue::Int8(Some(*v as i8)), (Value::Int16Value(v), PrimitiveScalarType::Int16) => ScalarValue::Int16(Some(*v as i16)), (Value::Int32Value(v), PrimitiveScalarType::Int32) => ScalarValue::Int32(Some(*v)), (Value::Int64Value(v), PrimitiveScalarType::Int64) => ScalarValue::Int64(Some(*v)), (Value::Uint8Value(v), PrimitiveScalarType::Uint8) => ScalarValue::UInt8(Some(*v as u8)), (Value::Uint16Value(v), PrimitiveScalarType::Uint16) => { ScalarValue::UInt16(Some(*v as u16)) } (Value::Uint32Value(v), PrimitiveScalarType::Uint32) => ScalarValue::UInt32(Some(*v)), (Value::Uint64Value(v), PrimitiveScalarType::Uint64) => ScalarValue::UInt64(Some(*v)), (Value::Float32Value(v), PrimitiveScalarType::Float32) => ScalarValue::Float32(Some(*v)), (Value::Float64Value(v), PrimitiveScalarType::Float64) => ScalarValue::Float64(Some(*v)), (Value::Date32Value(v), PrimitiveScalarType::Date32) => ScalarValue::Date32(Some(*v)), (Value::TimeMicrosecondValue(v), PrimitiveScalarType::TimeMicrosecond) => { ScalarValue::TimeMicrosecond(Some(*v)) } (Value::TimeNanosecondValue(v), PrimitiveScalarType::TimeMicrosecond) => { ScalarValue::TimeNanosecond(Some(*v)) } (Value::Utf8Value(v), PrimitiveScalarType::Utf8) => ScalarValue::Utf8(Some(v.to_owned())), (Value::LargeUtf8Value(v), PrimitiveScalarType::LargeUtf8) => { ScalarValue::LargeUtf8(Some(v.to_owned())) } (Value::NullValue(i32_enum), required_scalar_type) => { if *i32_enum == *required_scalar_type as i32 { let pb_scalar_type = PrimitiveScalarType::from_i32(*i32_enum).unwrap(); let scalar_value: ScalarValue = match pb_scalar_type { PrimitiveScalarType::Bool => ScalarValue::Boolean(None), PrimitiveScalarType::Uint8 => ScalarValue::UInt8(None), PrimitiveScalarType::Int8 => ScalarValue::Int8(None), PrimitiveScalarType::Uint16 => ScalarValue::UInt16(None), PrimitiveScalarType::Int16 => ScalarValue::Int16(None), PrimitiveScalarType::Uint32 => ScalarValue::UInt32(None), PrimitiveScalarType::Int32 => ScalarValue::Int32(None), PrimitiveScalarType::Uint64 => ScalarValue::UInt64(None), PrimitiveScalarType::Int64 => ScalarValue::Int64(None), PrimitiveScalarType::Float32 => ScalarValue::Float32(None), PrimitiveScalarType::Float64 => ScalarValue::Float64(None), PrimitiveScalarType::Utf8 => ScalarValue::Utf8(None), PrimitiveScalarType::LargeUtf8 => ScalarValue::LargeUtf8(None), PrimitiveScalarType::Date32 => ScalarValue::Date32(None), PrimitiveScalarType::TimeMicrosecond => ScalarValue::TimeMicrosecond(None), PrimitiveScalarType::TimeNanosecond => ScalarValue::TimeNanosecond(None), PrimitiveScalarType::Null => { return Err(proto_error( "Untyped scalar null is not a valid scalar value", )) } }; scalar_value } else { return Err(proto_error("Could not convert to the proper type")); } } _ => return Err(proto_error("Could not convert to the proper type")), }) } impl TryInto<datafusion::scalar::ScalarValue> for &protobuf::scalar_value::Value { type Error = BallistaError; fn try_into(self) -> Result<datafusion::scalar::ScalarValue, Self::Error> { use datafusion::scalar::ScalarValue; use protobuf::PrimitiveScalarType; let scalar = match self { protobuf::scalar_value::Value::BoolValue(v) => ScalarValue::Boolean(Some(*v)), protobuf::scalar_value::Value::Utf8Value(v) => ScalarValue::Utf8(Some(v.to_owned())), protobuf::scalar_value::Value::LargeUtf8Value(v) => { ScalarValue::LargeUtf8(Some(v.to_owned())) } protobuf::scalar_value::Value::Int8Value(v) => ScalarValue::Int8(Some(*v as i8)), protobuf::scalar_value::Value::Int16Value(v) => ScalarValue::Int16(Some(*v as i16)), protobuf::scalar_value::Value::Int32Value(v) => ScalarValue::Int32(Some(*v)), protobuf::scalar_value::Value::Int64Value(v) => ScalarValue::Int64(Some(*v)), protobuf::scalar_value::Value::Uint8Value(v) => ScalarValue::UInt8(Some(*v as u8)), protobuf::scalar_value::Value::Uint16Value(v) => ScalarValue::UInt16(Some(*v as u16)), protobuf::scalar_value::Value::Uint32Value(v) => ScalarValue::UInt32(Some(*v)), protobuf::scalar_value::Value::Uint64Value(v) => ScalarValue::UInt64(Some(*v)), protobuf::scalar_value::Value::Float32Value(v) => ScalarValue::Float32(Some(*v)), protobuf::scalar_value::Value::Float64Value(v) => ScalarValue::Float64(Some(*v)), protobuf::scalar_value::Value::Date32Value(v) => ScalarValue::Date32(Some(*v)), protobuf::scalar_value::Value::TimeMicrosecondValue(v) => { ScalarValue::TimeMicrosecond(Some(*v)) } protobuf::scalar_value::Value::TimeNanosecondValue(v) => { ScalarValue::TimeNanosecond(Some(*v)) } protobuf::scalar_value::Value::ListValue(v) => v.try_into()?, protobuf::scalar_value::Value::NullListValue(v) => { ScalarValue::List(None, v.try_into()?) } protobuf::scalar_value::Value::NullValue(null_enum) => { PrimitiveScalarType::from_i32(*null_enum) .ok_or_else(|| proto_error("Invalid scalar type"))? .try_into()? } }; Ok(scalar) } } impl TryInto<datafusion::scalar::ScalarValue> for &protobuf::ScalarListValue { type Error = BallistaError; fn try_into(self) -> Result<datafusion::scalar::ScalarValue, Self::Error> { use protobuf::scalar_type::Datatype; use protobuf::PrimitiveScalarType; let protobuf::ScalarListValue { datatype, values } = self; let pb_scalar_type = datatype .as_ref() .ok_or_else(|| proto_error("Protobuf deserialization error: ScalarListValue messsage missing required field 'datatype'"))?; let scalar_type = pb_scalar_type .datatype .as_ref() .ok_or_else(|| proto_error("Protobuf deserialization error: ScalarListValue.Datatype messsage missing required field 'datatype'"))?; let scalar_values = match scalar_type { Datatype::Scalar(scalar_type_i32) => { let leaf_scalar_type = protobuf::PrimitiveScalarType::from_i32(*scalar_type_i32) .ok_or_else(|| proto_error("Error converting i32 to basic scalar type"))?; let typechecked_values: Vec<datafusion::scalar::ScalarValue> = values .iter() .map(|protobuf::ScalarValue { value: opt_value }| { let value = opt_value.as_ref().ok_or_else(|| { proto_error( "Protobuf deserialization error: missing required field 'value'", ) })?; typechecked_scalar_value_conversion(value, leaf_scalar_type) }) .collect::<Result<Vec<_>, _>>()?; datafusion::scalar::ScalarValue::List( Some(typechecked_values), leaf_scalar_type.into(), ) } Datatype::List(list_type) => { let protobuf::ScalarListType { deepest_type, field_names, } = &list_type; let leaf_type = PrimitiveScalarType::from_i32(*deepest_type) .ok_or_else(|| proto_error("Error converting i32 to basic scalar type"))?; let depth = field_names.len(); let typechecked_values: Vec<datafusion::scalar::ScalarValue> = if depth == 0 { return Err(proto_error( "Protobuf deserialization error, ScalarListType had no field names, requires at least one", )); } else if depth == 1 { values .iter() .map(|protobuf::ScalarValue { value: opt_value }| { let value = opt_value .as_ref() .ok_or_else(|| proto_error("Protobuf deserialization error: missing required field 'value'"))?; typechecked_scalar_value_conversion(value, leaf_type) }) .collect::<Result<Vec<_>, _>>()? } else { values .iter() .map(|protobuf::ScalarValue { value: opt_value }| { let value = opt_value .as_ref() .ok_or_else(|| proto_error("Protobuf deserialization error: missing required field 'value'"))?; value.try_into() }) .collect::<Result<Vec<_>, _>>()? }; datafusion::scalar::ScalarValue::List( match typechecked_values.len() { 0 => None, _ => Some(typechecked_values), }, list_type.try_into()?, ) } }; Ok(scalar_values) } } impl TryInto<arrow::datatypes::DataType> for &protobuf::ScalarListType { type Error = BallistaError; fn try_into(self) -> Result<arrow::datatypes::DataType, Self::Error> { use protobuf::PrimitiveScalarType; let protobuf::ScalarListType { deepest_type, field_names, } = self; let depth = field_names.len(); if depth == 0 { return Err(proto_error( "Protobuf deserialization error: Found a ScalarListType message with no field names, at least one is required", )); } let mut curr_type = arrow::datatypes::DataType::List(Box::new(Field::new( //Since checked vector is not empty above this is safe to unwrap field_names.last().unwrap(), PrimitiveScalarType::from_i32(*deepest_type) .ok_or_else(|| proto_error("Could not convert to datafusion scalar type"))? .into(), true, ))); //Iterates over field names in reverse order except for the last item in the vector for name in field_names.iter().rev().skip(1) { let temp_curr_type = arrow::datatypes::DataType::List(Box::new(Field::new(name, curr_type, true))); curr_type = temp_curr_type; } Ok(curr_type) } } impl TryInto<datafusion::scalar::ScalarValue> for protobuf::PrimitiveScalarType { type Error = BallistaError; fn try_into(self) -> Result<datafusion::scalar::ScalarValue, Self::Error> { use datafusion::scalar::ScalarValue; Ok(match self { protobuf::PrimitiveScalarType::Null => { return Err(proto_error("Untyped null is an invalid scalar value")) } protobuf::PrimitiveScalarType::Bool => ScalarValue::Boolean(None), protobuf::PrimitiveScalarType::Uint8 => ScalarValue::UInt8(None), protobuf::PrimitiveScalarType::Int8 => ScalarValue::Int8(None), protobuf::PrimitiveScalarType::Uint16 => ScalarValue::UInt16(None), protobuf::PrimitiveScalarType::Int16 => ScalarValue::Int16(None), protobuf::PrimitiveScalarType::Uint32 => ScalarValue::UInt32(None), protobuf::PrimitiveScalarType::Int32 => ScalarValue::Int32(None), protobuf::PrimitiveScalarType::Uint64 => ScalarValue::UInt64(None), protobuf::PrimitiveScalarType::Int64 => ScalarValue::Int64(None), protobuf::PrimitiveScalarType::Float32 => ScalarValue::Float32(None), protobuf::PrimitiveScalarType::Float64 => ScalarValue::Float64(None), protobuf::PrimitiveScalarType::Utf8 => ScalarValue::Utf8(None), protobuf::PrimitiveScalarType::LargeUtf8 => ScalarValue::LargeUtf8(None), protobuf::PrimitiveScalarType::Date32 => ScalarValue::Date32(None), protobuf::PrimitiveScalarType::TimeMicrosecond => ScalarValue::TimeMicrosecond(None), protobuf::PrimitiveScalarType::TimeNanosecond => ScalarValue::TimeNanosecond(None), }) } } impl TryInto<datafusion::scalar::ScalarValue> for &protobuf::ScalarValue { type Error = BallistaError; fn try_into(self) -> Result<datafusion::scalar::ScalarValue, Self::Error> { let value = self.value.as_ref().ok_or_else(|| { proto_error("Protobuf deserialization error: missing required field 'value'") })?; Ok(match value { protobuf::scalar_value::Value::BoolValue(v) => ScalarValue::Boolean(Some(*v)), protobuf::scalar_value::Value::Utf8Value(v) => ScalarValue::Utf8(Some(v.to_owned())), protobuf::scalar_value::Value::LargeUtf8Value(v) => { ScalarValue::LargeUtf8(Some(v.to_owned())) } protobuf::scalar_value::Value::Int8Value(v) => ScalarValue::Int8(Some(*v as i8)), protobuf::scalar_value::Value::Int16Value(v) => ScalarValue::Int16(Some(*v as i16)), protobuf::scalar_value::Value::Int32Value(v) => ScalarValue::Int32(Some(*v)), protobuf::scalar_value::Value::Int64Value(v) => ScalarValue::Int64(Some(*v)), protobuf::scalar_value::Value::Uint8Value(v) => ScalarValue::UInt8(Some(*v as u8)), protobuf::scalar_value::Value::Uint16Value(v) => ScalarValue::UInt16(Some(*v as u16)), protobuf::scalar_value::Value::Uint32Value(v) => ScalarValue::UInt32(Some(*v)), protobuf::scalar_value::Value::Uint64Value(v) => ScalarValue::UInt64(Some(*v)), protobuf::scalar_value::Value::Float32Value(v) => ScalarValue::Float32(Some(*v)), protobuf::scalar_value::Value::Float64Value(v) => ScalarValue::Float64(Some(*v)), protobuf::scalar_value::Value::Date32Value(v) => ScalarValue::Date32(Some(*v)), protobuf::scalar_value::Value::TimeMicrosecondValue(v) => { ScalarValue::TimeMicrosecond(Some(*v)) } protobuf::scalar_value::Value::TimeNanosecondValue(v) => { ScalarValue::TimeNanosecond(Some(*v)) } protobuf::scalar_value::Value::ListValue(scalar_list) => { let protobuf::ScalarListValue { values, datatype: opt_scalar_type, } = &scalar_list; let pb_scalar_type = opt_scalar_type .as_ref() .ok_or_else(|| proto_error("Protobuf deserialization err: ScalaListValue missing required field 'datatype'"))?; let typechecked_values: Vec<ScalarValue> = values .iter() .map(|val| val.try_into()) .collect::<Result<Vec<_>, _>>()?; let scalar_type: arrow::datatypes::DataType = pb_scalar_type.try_into()?; ScalarValue::List(Some(typechecked_values), scalar_type) } protobuf::scalar_value::Value::NullListValue(v) => { let pb_datatype = v .datatype .as_ref() .ok_or_else(|| proto_error("Protobuf deserialization error: NullListValue message missing required field 'datatyp'"))?; ScalarValue::List(None, pb_datatype.try_into()?) } protobuf::scalar_value::Value::NullValue(v) => { let null_type_enum = protobuf::PrimitiveScalarType::from_i32(*v) .ok_or_else(|| proto_error("Protobuf deserialization error found invalid enum variant for DatafusionScalar"))?; null_type_enum.try_into()? } }) } } impl TryInto<Expr> for &protobuf::LogicalExprNode { type Error = BallistaError; fn try_into(self) -> Result<Expr, Self::Error> { use protobuf::logical_expr_node::ExprType; let expr_type = self .expr_type .as_ref() .ok_or_else(|| proto_error("Unexpected empty logical expression"))?; match expr_type { ExprType::BinaryExpr(binary_expr) => Ok(Expr::BinaryExpr { left: Box::new(parse_required_expr(&binary_expr.l)?), op: from_proto_binary_op(&binary_expr.op)?, right: Box::new(parse_required_expr(&binary_expr.r)?), }), ExprType::ColumnName(column_name) => Ok(Expr::Column(column_name.to_owned())), ExprType::Literal(literal) => { use datafusion::scalar::ScalarValue; let scalar_value: datafusion::scalar::ScalarValue = literal.try_into()?; Ok(Expr::Literal(scalar_value)) } ExprType::AggregateExpr(expr) => { let aggr_function = protobuf::AggregateFunction::from_i32(expr.aggr_function) .ok_or_else(|| { proto_error(format!( "Received an unknown aggregate function: {}", expr.aggr_function )) })?; let fun = match aggr_function { protobuf::AggregateFunction::Min => AggregateFunction::Min, protobuf::AggregateFunction::Max => AggregateFunction::Max, protobuf::AggregateFunction::Sum => AggregateFunction::Sum, protobuf::AggregateFunction::Avg => AggregateFunction::Avg, protobuf::AggregateFunction::Count => AggregateFunction::Count, }; Ok(Expr::AggregateFunction { fun, args: vec![parse_required_expr(&expr.expr)?], distinct: false, //TODO }) } ExprType::Alias(alias) => Ok(Expr::Alias( Box::new(parse_required_expr(&alias.expr)?), alias.alias.clone(), )), ExprType::IsNullExpr(is_null) => { Ok(Expr::IsNull(Box::new(parse_required_expr(&is_null.expr)?))) } ExprType::IsNotNullExpr(is_not_null) => Ok(Expr::IsNotNull(Box::new( parse_required_expr(&is_not_null.expr)?, ))), ExprType::NotExpr(not) => Ok(Expr::Not(Box::new(parse_required_expr(&not.expr)?))), ExprType::Between(between) => Ok(Expr::Between { expr: Box::new(parse_required_expr(&between.expr)?), negated: between.negated, low: Box::new(parse_required_expr(&between.low)?), high: Box::new(parse_required_expr(&between.high)?), }), ExprType::Case(case) => { let when_then_expr = case .when_then_expr .iter() .map(|e| { Ok(( Box::new(match &e.when_expr { Some(e) => e.try_into(), None => Err(proto_error("Missing required expression")), }?), Box::new(match &e.then_expr { Some(e) => e.try_into(), None => Err(proto_error("Missing required expression")), }?), )) }) .collect::<Result<Vec<(Box<Expr>, Box<Expr>)>, BallistaError>>()?; Ok(Expr::Case { expr: parse_optional_expr(&case.expr)?.map(Box::new), when_then_expr, else_expr: parse_optional_expr(&case.else_expr)?.map(Box::new), }) } ExprType::Cast(cast) => { let expr = Box::new(parse_required_expr(&cast.expr)?); let arrow_type: &protobuf::ArrowType = cast .arrow_type .as_ref() .ok_or_else(|| proto_error("Protobuf deserialization error: CastNode message missing required field 'arrow_type'"))?; let data_type = arrow_type.try_into()?; Ok(Expr::Cast { expr, data_type }) } ExprType::Sort(sort) => Ok(Expr::Sort { expr: Box::new(parse_required_expr(&sort.expr)?), asc: sort.asc, nulls_first: sort.nulls_first, }), ExprType::Negative(negative) => Ok(Expr::Negative(Box::new(parse_required_expr( &negative.expr, )?))), ExprType::InList(in_list) => Ok(Expr::InList { expr: Box::new(parse_required_expr(&in_list.expr)?), list: in_list .list .iter() .map(|expr| expr.try_into()) .collect::<Result<Vec<_>, _>>()?, negated: in_list.negated, }), ExprType::Wildcard(_) => Ok(Expr::Wildcard), ExprType::ScalarFunction(expr) => { let scalar_function = protobuf::ScalarFunction::from_i32(expr.fun).ok_or_else(|| { proto_error(format!("Received an unknown scalar function: {}", expr.fun)) })?; match scalar_function { protobuf::ScalarFunction::Sqrt => Ok(sqrt((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Sin => Ok(sin((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Cos => Ok(cos((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Tan => Ok(tan((&expr.expr[0]).try_into()?)), // protobuf::ScalarFunction::Asin => Ok(asin(&expr.expr[0]).try_into()?)), // protobuf::ScalarFunction::Acos => Ok(acos(&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Atan => Ok(atan((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Exp => Ok(exp((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Log2 => Ok(log2((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Log10 => Ok(log10((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Floor => Ok(floor((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Ceil => Ok(ceil((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Round => Ok(round((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Trunc => Ok(trunc((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Abs => Ok(abs((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Signum => Ok(signum((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Length => Ok(length((&expr.expr[0]).try_into()?)), // // protobuf::ScalarFunction::Concat => Ok(concat((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Lower => Ok(lower((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Upper => Ok(upper((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Trim => Ok(trim((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Ltrim => Ok(ltrim((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Rtrim => Ok(rtrim((&expr.expr[0]).try_into()?)), // protobuf::ScalarFunction::Totimestamp => Ok(to_timestamp((&expr.expr[0]).try_into()?)), // protobuf::ScalarFunction::Array => Ok(array((&expr.expr[0]).try_into()?)), // // protobuf::ScalarFunction::Nullif => Ok(nulli((&expr.expr[0]).try_into()?)), // protobuf::ScalarFunction::Datetrunc => Ok(date_trunc((&expr.expr[0]).try_into()?)), // protobuf::ScalarFunction::Md5 => Ok(md5((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Sha224 => Ok(sha224((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Sha256 => Ok(sha256((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Sha384 => Ok(sha384((&expr.expr[0]).try_into()?)), protobuf::ScalarFunction::Sha512 => Ok(sha512((&expr.expr[0]).try_into()?)), _ => Err(proto_error( "Protobuf deserialization error: Unsupported scalar function", )), } } } } } fn from_proto_binary_op(op: &str) -> Result<Operator, BallistaError> { match op { "And" => Ok(Operator::And), "Or" => Ok(Operator::Or), "Eq" => Ok(Operator::Eq), "NotEq" => Ok(Operator::NotEq), "LtEq" => Ok(Operator::LtEq), "Lt" => Ok(Operator::Lt), "Gt" => Ok(Operator::Gt), "GtEq" => Ok(Operator::GtEq), "Plus" => Ok(Operator::Plus), "Minus" => Ok(Operator::Minus), "Multiply" => Ok(Operator::Multiply), "Divide" => Ok(Operator::Divide), "Like" => Ok(Operator::Like), other => Err(proto_error(format!( "Unsupported binary operator '{:?}'", other ))), } } impl TryInto<arrow::datatypes::DataType> for &protobuf::ScalarType { type Error = BallistaError; fn try_into(self) -> Result<arrow::datatypes::DataType, Self::Error> { let pb_scalartype = self .datatype .as_ref() .ok_or_else(|| proto_error("ScalarType message missing required field 'datatype'"))?; pb_scalartype.try_into() } } impl TryInto<Schema> for &protobuf::Schema { type Error = BallistaError; fn try_into(self) -> Result<Schema, BallistaError> { let fields = self .columns .iter() .map(|c| { let pb_arrow_type_res = c .arrow_type .as_ref() .ok_or_else(|| proto_error("Protobuf deserialization error: Field message was missing required field 'arrow_type'")); let pb_arrow_type: &protobuf::ArrowType = match pb_arrow_type_res { Ok(res) => res, Err(e) => return Err(e), }; Ok(Field::new(&c.name, pb_arrow_type.try_into()?, c.nullable)) }) .collect::<Result<Vec<_>, _>>()?; Ok(Schema::new(fields)) } } impl TryInto<arrow::datatypes::Field> for &protobuf::Field { type Error = BallistaError; fn try_into(self) -> Result<arrow::datatypes::Field, Self::Error> { let pb_datatype = self.arrow_type.as_ref().ok_or_else(|| { proto_error( "Protobuf deserialization error: Field message missing required field 'arrow_type'", ) })?; Ok(arrow::datatypes::Field::new( self.name.as_str(), pb_datatype.as_ref().try_into()?, self.nullable, )) } } use datafusion::physical_plan::datetime_expressions::{date_trunc, to_timestamp}; use datafusion::prelude::{ array, length, lower, ltrim, md5, rtrim, sha224, sha256, sha384, sha512, trim, upper, }; use std::convert::TryFrom; impl TryFrom<i32> for protobuf::FileType { type Error = BallistaError; fn try_from(value: i32) -> Result<Self, Self::Error> { use protobuf::FileType; match value { _x if _x == FileType::NdJson as i32 => Ok(FileType::NdJson), _x if _x == FileType::Parquet as i32 => Ok(FileType::Parquet), _x if _x == FileType::Csv as i32 => Ok(FileType::Csv), invalid => Err(BallistaError::General(format!( "Attempted to convert invalid i32 to protobuf::Filetype: {}", invalid ))), } } } impl Into<datafusion::sql::parser::FileType> for protobuf::FileType { fn into(self) -> datafusion::sql::parser::FileType { use datafusion::sql::parser::FileType; match self { protobuf::FileType::NdJson => FileType::NdJson, protobuf::FileType::Parquet => FileType::Parquet, protobuf::FileType::Csv => FileType::CSV, } } } fn parse_required_expr(p: &Option<Box<protobuf::LogicalExprNode>>) -> Result<Expr, BallistaError> { match p { Some(expr) => expr.as_ref().try_into(), None => Err(proto_error("Missing required expression")), } } fn parse_optional_expr( p: &Option<Box<protobuf::LogicalExprNode>>, ) -> Result<Option<Expr>, BallistaError> { match p { Some(expr) => expr.as_ref().try_into().map(Some), None => Ok(None), } }
49.005709
151
0.536569
d6b97defc6e43ca14f4c73068b83f281216e22c5
431
use std::env; use std::process; use minigrep::Config; // ANCHOR: here fn main() { let args: Vec<String> = env::args().collect(); let config = Config::build(&args).unwrap_or_else(|err| { eprintln!("Problem parsing arguments: {err}"); process::exit(1); }); if let Err(e) = minigrep::run(config) { eprintln!("Application error: {e}"); process::exit(1); } } // ANCHOR_END: here
19.590909
60
0.577726
f59f100653fbb31bcc5eba9fe11bf13a8bb6f02a
5,413
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // run-pass // This test deserializes an enum in-place by transmuting to a union that // should have the same layout, and manipulating the tag and payloads // independently. This verifies that `repr(some_int)` has a stable representation, // and that we don't miscompile these kinds of manipulations. use std::time::Duration; use std::mem; #[repr(C, u8)] #[derive(Copy, Clone, Eq, PartialEq, Debug)] enum MyEnum { A(u32), // Single primitive value B { x: u8, y: i16, z: u8 }, // Composite, and the offsets of `y` and `z` // depend on tag being internal C, // Empty D(Option<u32>), // Contains an enum E(Duration), // Contains a struct } #[repr(C)] struct MyEnumRepr { tag: MyEnumTag, payload: MyEnumPayload, } #[repr(C)] #[allow(non_snake_case)] union MyEnumPayload { A: MyEnumVariantA, B: MyEnumVariantB, D: MyEnumVariantD, E: MyEnumVariantE, } #[repr(u8)] #[derive(Copy, Clone)] enum MyEnumTag { A, B, C, D, E } #[repr(C)] #[derive(Copy, Clone)] struct MyEnumVariantA(u32); #[repr(C)] #[derive(Copy, Clone)] struct MyEnumVariantB {x: u8, y: i16, z: u8 } #[repr(C)] #[derive(Copy, Clone)] struct MyEnumVariantD(Option<u32>); #[repr(C)] #[derive(Copy, Clone)] struct MyEnumVariantE(Duration); fn main() { let result: Vec<Result<MyEnum, ()>> = vec![ Ok(MyEnum::A(17)), Ok(MyEnum::B { x: 206, y: 1145, z: 78 }), Ok(MyEnum::C), Err(()), Ok(MyEnum::D(Some(407))), Ok(MyEnum::D(None)), Ok(MyEnum::E(Duration::from_secs(100))), Err(()), ]; // Binary serialized version of the above (little-endian) let input: Vec<u8> = vec![ 0, 17, 0, 0, 0, 1, 206, 121, 4, 78, 2, 8, /* invalid tag value */ 3, 0, 151, 1, 0, 0, 3, 1, 4, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* incomplete value */ ]; let mut output = vec![]; let mut buf = &input[..]; unsafe { // This should be safe, because we don't match on it unless it's fully formed, // and it doesn't have a destructor. let mut dest: MyEnum = mem::uninitialized(); while buf.len() > 0 { match parse_my_enum(&mut dest, &mut buf) { Ok(()) => output.push(Ok(dest)), Err(()) => output.push(Err(())), } } } assert_eq!(output, result); } fn parse_my_enum<'a>(dest: &'a mut MyEnum, buf: &mut &[u8]) -> Result<(), ()> { unsafe { // Should be correct to do this transmute. let dest: &'a mut MyEnumRepr = mem::transmute(dest); let tag = read_u8(buf)?; dest.tag = match tag { 0 => MyEnumTag::A, 1 => MyEnumTag::B, 2 => MyEnumTag::C, 3 => MyEnumTag::D, 4 => MyEnumTag::E, _ => return Err(()), }; match dest.tag { MyEnumTag::A => { dest.payload.A.0 = read_u32_le(buf)?; } MyEnumTag::B => { dest.payload.B.x = read_u8(buf)?; dest.payload.B.y = read_u16_le(buf)? as i16; dest.payload.B.z = read_u8(buf)?; } MyEnumTag::C => { /* do nothing */ } MyEnumTag::D => { let is_some = read_u8(buf)? == 0; if is_some { dest.payload.D.0 = Some(read_u32_le(buf)?); } else { dest.payload.D.0 = None; } } MyEnumTag::E => { let secs = read_u64_le(buf)?; let nanos = read_u32_le(buf)?; dest.payload.E.0 = Duration::new(secs, nanos); } } Ok(()) } } // reader helpers fn read_u64_le(buf: &mut &[u8]) -> Result<u64, ()> { if buf.len() < 8 { return Err(()) } let val = (buf[0] as u64) << 0 | (buf[1] as u64) << 8 | (buf[2] as u64) << 16 | (buf[3] as u64) << 24 | (buf[4] as u64) << 32 | (buf[5] as u64) << 40 | (buf[6] as u64) << 48 | (buf[7] as u64) << 56; *buf = &buf[8..]; Ok(val) } fn read_u32_le(buf: &mut &[u8]) -> Result<u32, ()> { if buf.len() < 4 { return Err(()) } let val = (buf[0] as u32) << 0 | (buf[1] as u32) << 8 | (buf[2] as u32) << 16 | (buf[3] as u32) << 24; *buf = &buf[4..]; Ok(val) } fn read_u16_le(buf: &mut &[u8]) -> Result<u16, ()> { if buf.len() < 2 { return Err(()) } let val = (buf[0] as u16) << 0 | (buf[1] as u16) << 8; *buf = &buf[2..]; Ok(val) } fn read_u8(buf: &mut &[u8]) -> Result<u8, ()> { if buf.len() < 1 { return Err(()) } let val = buf[0]; *buf = &buf[1..]; Ok(val) }
29.906077
86
0.49492
4ab37c64b39443dd9ef2bd11f527bb1550624a28
6,846
// WARNING: This file was autogenerated by jni-bindgen. Any changes to this file may be lost!!! #[cfg(any(feature = "all", feature = "android-icu-util-Measure"))] __jni_bindgen! { /// public class [Measure](https://developer.android.com/reference/android/icu/util/Measure.html) /// /// Required feature: android-icu-util-Measure public class Measure ("android/icu/util/Measure") extends crate::java::lang::Object { /// [Measure](https://developer.android.com/reference/android/icu/util/Measure.html#Measure(java.lang.Number,%20android.icu.util.MeasureUnit)) /// /// Required features: "android-icu-util-MeasureUnit", "java-lang-Number" #[cfg(any(feature = "all", all(feature = "android-icu-util-MeasureUnit", feature = "java-lang-Number")))] pub fn new<'env>(__jni_env: &'env __jni_bindgen::Env, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::java::lang::Number>>, arg1: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::android::icu::util::MeasureUnit>>) -> __jni_bindgen::std::result::Result<__jni_bindgen::Local<'env, crate::android::icu::util::Measure>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> { // class.path == "android/icu/util/Measure", java.flags == PUBLIC, .name == "<init>", .descriptor == "(Ljava/lang/Number;Landroid/icu/util/MeasureUnit;)V" unsafe { let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into()), __jni_bindgen::AsJValue::as_jvalue(&arg1.into())]; let (__jni_class, __jni_method) = __jni_env.require_class_method("android/icu/util/Measure\0", "<init>\0", "(Ljava/lang/Number;Landroid/icu/util/MeasureUnit;)V\0"); __jni_env.new_object_a(__jni_class, __jni_method, __jni_args.as_ptr()) } } /// [equals](https://developer.android.com/reference/android/icu/util/Measure.html#equals(java.lang.Object)) /// /// Required features: "java-lang-Object" #[cfg(any(feature = "all", all(feature = "java-lang-Object")))] pub fn equals<'env>(&'env self, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::java::lang::Object>>) -> __jni_bindgen::std::result::Result<bool, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> { // class.path == "android/icu/util/Measure", java.flags == PUBLIC, .name == "equals", .descriptor == "(Ljava/lang/Object;)Z" unsafe { let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into())]; let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env); let (__jni_class, __jni_method) = __jni_env.require_class_method("android/icu/util/Measure\0", "equals\0", "(Ljava/lang/Object;)Z\0"); __jni_env.call_boolean_method_a(self.0.object, __jni_method, __jni_args.as_ptr()) } } /// [hashCode](https://developer.android.com/reference/android/icu/util/Measure.html#hashCode()) pub fn hashCode<'env>(&'env self) -> __jni_bindgen::std::result::Result<i32, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> { // class.path == "android/icu/util/Measure", java.flags == PUBLIC, .name == "hashCode", .descriptor == "()I" unsafe { let __jni_args = []; let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env); let (__jni_class, __jni_method) = __jni_env.require_class_method("android/icu/util/Measure\0", "hashCode\0", "()I\0"); __jni_env.call_int_method_a(self.0.object, __jni_method, __jni_args.as_ptr()) } } /// [toString](https://developer.android.com/reference/android/icu/util/Measure.html#toString()) /// /// Required features: "java-lang-String" #[cfg(any(feature = "all", all(feature = "java-lang-String")))] pub fn toString<'env>(&'env self) -> __jni_bindgen::std::result::Result<__jni_bindgen::std::option::Option<__jni_bindgen::Local<'env, crate::java::lang::String>>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> { // class.path == "android/icu/util/Measure", java.flags == PUBLIC, .name == "toString", .descriptor == "()Ljava/lang/String;" unsafe { let __jni_args = []; let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env); let (__jni_class, __jni_method) = __jni_env.require_class_method("android/icu/util/Measure\0", "toString\0", "()Ljava/lang/String;\0"); __jni_env.call_object_method_a(self.0.object, __jni_method, __jni_args.as_ptr()) } } /// [getNumber](https://developer.android.com/reference/android/icu/util/Measure.html#getNumber()) /// /// Required features: "java-lang-Number" #[cfg(any(feature = "all", all(feature = "java-lang-Number")))] pub fn getNumber<'env>(&'env self) -> __jni_bindgen::std::result::Result<__jni_bindgen::std::option::Option<__jni_bindgen::Local<'env, crate::java::lang::Number>>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> { // class.path == "android/icu/util/Measure", java.flags == PUBLIC, .name == "getNumber", .descriptor == "()Ljava/lang/Number;" unsafe { let __jni_args = []; let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env); let (__jni_class, __jni_method) = __jni_env.require_class_method("android/icu/util/Measure\0", "getNumber\0", "()Ljava/lang/Number;\0"); __jni_env.call_object_method_a(self.0.object, __jni_method, __jni_args.as_ptr()) } } /// [getUnit](https://developer.android.com/reference/android/icu/util/Measure.html#getUnit()) /// /// Required features: "android-icu-util-MeasureUnit" #[cfg(any(feature = "all", all(feature = "android-icu-util-MeasureUnit")))] pub fn getUnit<'env>(&'env self) -> __jni_bindgen::std::result::Result<__jni_bindgen::std::option::Option<__jni_bindgen::Local<'env, crate::android::icu::util::MeasureUnit>>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> { // class.path == "android/icu/util/Measure", java.flags == PUBLIC, .name == "getUnit", .descriptor == "()Landroid/icu/util/MeasureUnit;" unsafe { let __jni_args = []; let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env); let (__jni_class, __jni_method) = __jni_env.require_class_method("android/icu/util/Measure\0", "getUnit\0", "()Landroid/icu/util/MeasureUnit;\0"); __jni_env.call_object_method_a(self.0.object, __jni_method, __jni_args.as_ptr()) } } } }
74.413043
466
0.63234
29a252f7cf53248274d54255ccb07cdaf7a594ca
832
use std::env::{self, args}; use crate::{ parser::BakaArgs, plugins::plugins, setting::{project, root}, }; pub mod commands; pub mod parser; pub mod plugins; pub mod setting; const MY_DREAM: &str = "여친생겼으면"; pub fn debug() { let root = root(); let project = project(); let plugins = plugins(); println!( "Settings:\nRootSetting: {:?}\nProjectSetting: {:?}\nPlugins: {:?}\n", root, project, plugins ); let args_parsed = BakaArgs::parse_args(args()); println!( "Parser:\nFlags: {:?} Subcommand: {:?} Args: {:?}\n", args_parsed.baka_flags, args_parsed.subcommand, args_parsed.args ); println!( "Env:\nbaka_plugins: {}\nbaka_root_setting: {}", env::var("baka_plugins").unwrap(), env::var("baka_root_setting").unwrap() ); }
22.486486
78
0.588942
284dec0ab32deff9b96a283dfecbe0b8fd72a8f5
8,004
//! This crate provides traits for working with finite fields. // Catch documentation errors caused by code changes. #![no_std] #![cfg_attr(docsrs, feature(doc_cfg))] #![deny(broken_intra_doc_links)] #![forbid(unsafe_code)] #[cfg(feature = "alloc")] extern crate alloc; mod batch; pub use batch::*; #[cfg(feature = "derive")] #[cfg_attr(docsrs, doc(cfg(feature = "derive")))] pub use ff_derive::PrimeField; #[cfg(feature = "bits")] #[cfg_attr(docsrs, doc(cfg(feature = "bits")))] pub use bitvec::view::BitViewSized; #[cfg(feature = "bits")] use bitvec::{array::BitArray, order::Lsb0}; use core::fmt; use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; use rand_core::RngCore; use subtle::{ConditionallySelectable, CtOption}; /// Bit representation of a field element. #[cfg(feature = "bits")] #[cfg_attr(docsrs, doc(cfg(feature = "bits")))] pub type FieldBits<V> = BitArray<Lsb0, V>; /// This trait represents an element of a field. pub trait Field: Sized + Eq + Copy + Clone + Default + Send + Sync + fmt::Debug + 'static + ConditionallySelectable + Add<Output = Self> + Sub<Output = Self> + Mul<Output = Self> + Neg<Output = Self> + for<'a> Add<&'a Self, Output = Self> + for<'a> Mul<&'a Self, Output = Self> + for<'a> Sub<&'a Self, Output = Self> + MulAssign + AddAssign + SubAssign + for<'a> MulAssign<&'a Self> + for<'a> AddAssign<&'a Self> + for<'a> SubAssign<&'a Self> { /// Returns an element chosen uniformly at random using a user-provided RNG. fn random(rng: impl RngCore) -> Self; /// Returns the zero element of the field, the additive identity. fn zero() -> Self; /// Returns the one element of the field, the multiplicative identity. fn one() -> Self; /// Returns true iff this element is zero. fn is_zero(&self) -> bool; /// Squares this element. #[must_use] fn square(&self) -> Self; /// Cubes this element. #[must_use] fn cube(&self) -> Self { self.square() * self } /// Doubles this element. #[must_use] fn double(&self) -> Self; /// Computes the multiplicative inverse of this element, /// failing if the element is zero. fn invert(&self) -> CtOption<Self>; /// Returns the square root of the field element, if it is /// quadratic residue. fn sqrt(&self) -> CtOption<Self>; /// Exponentiates `self` by `exp`, where `exp` is a little-endian order /// integer exponent. /// /// **This operation is variable time with respect to the exponent.** If the /// exponent is fixed, this operation is effectively constant time. fn pow_vartime<S: AsRef<[u64]>>(&self, exp: S) -> Self { let mut res = Self::one(); for e in exp.as_ref().iter().rev() { for i in (0..64).rev() { res = res.square(); if ((*e >> i) & 1) == 1 { res.mul_assign(self); } } } res } } /// This represents an element of a prime field. pub trait PrimeField: Field + From<u64> { /// The prime field can be converted back and forth into this binary /// representation. type Repr: Default + AsRef<[u8]> + AsMut<[u8]>; /// Interpret a string of numbers as a (congruent) prime field element. /// Does not accept unnecessary leading zeroes or a blank string. fn from_str(s: &str) -> Option<Self> { if s.is_empty() { return None; } if s == "0" { return Some(Self::zero()); } let mut res = Self::zero(); let ten = Self::from(10); let mut first_digit = true; for c in s.chars() { match c.to_digit(10) { Some(c) => { if first_digit { if c == 0 { return None; } first_digit = false; } res.mul_assign(&ten); res.add_assign(&Self::from(u64::from(c))); } None => { return None; } } } Some(res) } /// Attempts to convert a byte representation of a field element into an element of /// this prime field, failing if the input is not canonical (is not smaller than the /// field's modulus). /// /// The byte representation is interpreted with the same endianness as elements /// returned by [`PrimeField::to_repr`]. fn from_repr(_: Self::Repr) -> Option<Self>; /// Converts an element of the prime field into the standard byte representation for /// this field. /// /// The endianness of the byte representation is implementation-specific. Generic /// encodings of field elements should be treated as opaque. fn to_repr(&self) -> Self::Repr; /// Returns true iff this element is odd. fn is_odd(&self) -> bool; /// Returns true iff this element is even. #[inline(always)] fn is_even(&self) -> bool { !self.is_odd() } /// How many bits are needed to represent an element of this field. const NUM_BITS: u32; /// How many bits of information can be reliably stored in the field element. /// /// This is usually `Self::NUM_BITS - 1`. const CAPACITY: u32; /// Returns a fixed multiplicative generator of `modulus - 1` order. This element must /// also be a quadratic nonresidue. /// /// It can be calculated using [SageMath] as `GF(modulus).primitive_element()`. /// /// Implementations of this method MUST ensure that this is the generator used to /// derive `Self::root_of_unity`. /// /// [SageMath]: https://www.sagemath.org/ fn multiplicative_generator() -> Self; /// An integer `s` satisfying the equation `2^s * t = modulus - 1` with `t` odd. /// /// This is the number of leading zero bits in the little-endian bit representation of /// `modulus - 1`. const S: u32; /// Returns the `2^s` root of unity. /// /// It can be calculated by exponentiating `Self::multiplicative_generator` by `t`, /// where `t = (modulus - 1) >> Self::S`. fn root_of_unity() -> Self; } /// This represents the bits of an element of a prime field. #[cfg(feature = "bits")] #[cfg_attr(docsrs, doc(cfg(feature = "bits")))] pub trait PrimeFieldBits: PrimeField { /// The backing store for a bit representation of a prime field element. type ReprBits: BitViewSized + Send + Sync; /// Converts an element of the prime field into a little-endian sequence of bits. fn to_le_bits(&self) -> FieldBits<Self::ReprBits>; /// Returns the bits of the field characteristic (the modulus) in little-endian order. fn char_le_bits() -> FieldBits<Self::ReprBits>; } pub use self::arith_impl::*; mod arith_impl { /// Calculate a - b - borrow, returning the result and modifying /// the borrow value. #[inline(always)] pub fn sbb(a: u64, b: u64, borrow: &mut u64) -> u64 { let tmp = (1u128 << 64) + u128::from(a) - u128::from(b) - u128::from(*borrow); *borrow = if tmp >> 64 == 0 { 1 } else { 0 }; tmp as u64 } /// Calculate a + b + carry, returning the sum and modifying the /// carry value. #[inline(always)] pub fn adc(a: u64, b: u64, carry: &mut u64) -> u64 { let tmp = u128::from(a) + u128::from(b) + u128::from(*carry); *carry = (tmp >> 64) as u64; tmp as u64 } /// Calculate a + (b * c) + carry, returning the least significant digit /// and setting carry to the most significant digit. #[inline(always)] pub fn mac_with_carry(a: u64, b: u64, c: u64, carry: &mut u64) -> u64 { let tmp = (u128::from(a)) + u128::from(b) * u128::from(c) + u128::from(*carry); *carry = (tmp >> 64) as u64; tmp as u64 } }
29.865672
90
0.582209
feef113c2b43714e9c1c76a4b84392cafabef57e
2,300
use crate::stream::Fuse; use core::pin::Pin; use futures_core::future::{FusedFuture, Future}; use futures_core::stream::Stream; use futures_core::task::{Context, Poll}; use futures_sink::Sink; use pin_project::pin_project; /// Future for the [`forward`](super::StreamExt::forward) method. #[pin_project(project = ForwardProj)] #[derive(Debug)] #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct Forward<St, Si, Item> { #[pin] sink: Option<Si>, #[pin] stream: Fuse<St>, buffered_item: Option<Item>, } impl<St, Si, Item> Forward<St, Si, Item> { pub(crate) fn new(stream: St, sink: Si) -> Self { Forward { sink: Some(sink), stream: Fuse::new(stream), buffered_item: None, } } } impl<St, Si, Item, E> FusedFuture for Forward<St, Si, Item> where Si: Sink<Item, Error = E>, St: Stream<Item = Result<Item, E>>, { fn is_terminated(&self) -> bool { self.sink.is_none() } } impl<St, Si, Item, E> Future for Forward<St, Si, Item> where Si: Sink<Item, Error = E>, St: Stream<Item = Result<Item, E>>, { type Output = Result<(), E>; fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Self::Output> { let ForwardProj { mut sink, mut stream, buffered_item } = self.project(); let mut si = sink.as_mut().as_pin_mut().expect("polled `Forward` after completion"); loop { // If we've got an item buffered already, we need to write it to the // sink before we can do anything else if buffered_item.is_some() { ready!(si.as_mut().poll_ready(cx))?; si.as_mut().start_send(buffered_item.take().unwrap())?; } match stream.as_mut().poll_next(cx)? { Poll::Ready(Some(item)) => { *buffered_item = Some(item); } Poll::Ready(None) => { ready!(si.poll_close(cx))?; sink.set(None); return Poll::Ready(Ok(())) } Poll::Pending => { ready!(si.poll_flush(cx))?; return Poll::Pending } } } } }
28.75
92
0.531739
50a069ef12f6f7f24c229da0421cb1c18e2ad1df
752
use geometry::Shape; use math::{Point3, Vector3}; use ray::Ray; use texture::TextureCoord; #[derive(Copy, Clone)] pub struct Intersection<'a> { pub t: f32, pub shape: &'a dyn Shape, pub point: Point3, pub ray: Ray, pub normal: Vector3, pub inside: bool, pub texture_coord: Option<TextureCoord>, } impl<'a> Intersection<'a> { pub fn new( t: f32, shape: &'a dyn Shape, point: Point3, ray: Ray, normal: Vector3, inside: bool, texture_coord: Option<TextureCoord>, ) -> Intersection { Intersection { t, shape, point, ray, normal, inside, texture_coord, } } }
19.789474
44
0.521277
8f419104c8bd8116cb5646c2c48e584f667a5b4f
795
// Copyright (c) The dgc.network // SPDX-License-Identifier: Apache-2.0 extern crate protoc_rust; use protoc_rust::Customize; use std::fs; use std::fs::File; use std::io::prelude::*; fn main() { fs::create_dir_all("src/protos").unwrap(); protoc_rust::run(protoc_rust::Args { out_dir: "src/protos", //input: &["../protos/payload.proto", "../protos/state.proto"], input: &["../sdk/protos/payload.proto", "../sdk/protos/account.proto"], includes: &["../sdk/protos"], customize: Customize::default(), }).expect("protoc"); let mut file = File::create("src/protos/mod.rs").unwrap(); file.write_all(b"pub mod payload;\n").unwrap(); //file.write_all(b"pub mod state;\n").unwrap(); file.write_all(b"pub mod account;\n").unwrap(); }
30.576923
79
0.621384
29cd4a0446996c6f383ac46e7e8d504b9971263c
3,944
use std::collections::HashMap; use {Docopt, ArgvMap, Error}; use Value::{self, Switch, Plain}; fn get_args(doc: &str, argv: &[&'static str]) -> ArgvMap { let dopt = match Docopt::new(doc) { Err(err) => panic!("Invalid usage: {}", err), Ok(dopt) => dopt, }; match dopt.argv(vec!["cmd"].iter().chain(argv.iter())).parse() { Err(err) => panic!("{}", err), Ok(vals) => vals, } } fn map_from_alist(alist: Vec<(&'static str, Value)>) -> HashMap<String, Value> { alist.into_iter().map(|(k, v)| (k.to_string(), v)).collect() } fn same_args(expected: &HashMap<String, Value>, got: &ArgvMap) { for (k, ve) in expected.iter() { match got.map.find(k) { None => panic!("EXPECTED has '{}' but GOT does not.", k), Some(vg) => { assert!(ve == vg, "{}: EXPECTED = '{:?}' != '{:?}' = GOT", k, ve, vg) } } } for (k, vg) in got.map.iter() { match got.map.find(k) { None => panic!("GOT has '{}' but EXPECTED does not.", k), Some(ve) => { assert!(vg == ve, "{}: GOT = '{:?}' != '{:?}' = EXPECTED", k, vg, ve) } } } } macro_rules! test_expect( ($name:ident, $doc:expr, $args:expr, $expected:expr) => ( #[test] fn $name() { let vals = get_args($doc, $args); let expected = map_from_alist($expected); same_args(&expected, &vals); } ); ); macro_rules! test_user_error( ($name:ident, $doc:expr, $args:expr) => ( #[test] #[should_panic] fn $name() { get_args($doc, $args); } ); ); test_expect!(test_issue_13, "Usage: prog file <file>", &["file", "file"], vec![("file", Switch(true)), ("<file>", Plain(Some("file".to_string())))]); test_expect!(test_issue_129, "Usage: prog [options] Options: --foo ARG Foo foo.", &["--foo=a b"], vec![("--foo", Plain(Some("a b".into())))]); #[test] fn regression_issue_12() { const USAGE: &'static str = " Usage: whisper info <file> whisper update <file> <timestamp> <value> whisper mark <file> <value> "; #[derive(Deserialize, Debug)] struct Args { arg_file: String, cmd_info: bool, cmd_update: bool, arg_timestamp: u64, arg_value: f64, } let dopt: Args = Docopt::new(USAGE) .unwrap() .argv(&["whisper", "mark", "./p/blah", "100"]) .deserialize() .unwrap(); assert_eq!(dopt.arg_timestamp, 0); } #[test] fn regression_issue_195() { const USAGE: &'static str = " Usage: slow [-abcdefghijklmnopqrs...] "; let argv = &["slow", "-abcdefghijklmnopqrs"]; let dopt : Docopt = Docopt::new(USAGE).unwrap().argv(argv); dopt.parse().unwrap(); } #[test] fn regression_issue_219() { #[derive(Deserialize)] struct Args { arg_type: Vec<String>, arg_param: Vec<String>, } const USAGE: &'static str = " Usage: encode [-v <type> <param>]... "; let argv = &["encode", "-v", "bool", "true", "string", "foo"]; let args: Args = Docopt::new(USAGE).unwrap().argv(argv).deserialize().unwrap(); assert_eq!(args.arg_type, vec!["bool".to_owned(), "string".to_owned()]); assert_eq!(args.arg_param, vec!["true".to_owned(), "foo".to_owned()]); } #[test] fn test_unit_struct() { const USAGE: &'static str = " Usage: cargo version [options] Options: -h, --help Print this message "; #[derive(Deserialize)] struct Options; let argv = &["cargo", "version"]; let dopt: Result<Options, Error>= Docopt::new(USAGE) .unwrap() .argv(argv) .deserialize(); assert!(dopt.is_ok()); } mod testcases; mod suggestions;
25.777778
83
0.510142
28f42c0ec0c2193b8e0d74a3978aa10adce99b0b
338
use libstripe::resources::core::balance::Balance; use libstripe::Client; use std::env; fn main() -> libstripe::Result<()> { let secret_key = env::var("STRIPE_KEY").expect("Missing 'STRIPE_KEY'."); let client = Client::new(&secret_key); let balance = Balance::retrieve(&client)?; println!("{:?}", balance); Ok(()) }
24.142857
76
0.633136
ff7db29695b0a017538cbcd3b1ee73fa2338663f
5,991
use bigint::{Address, H256}; use block::{HeaderHash, Log}; use sha3::{Digest, Keccak256}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; use rpc::RPCLogFilter; use super::{RPCLog, Either}; use super::util::*; use error::Error; use rlp; use miner::MinerState; #[derive(Clone, Debug)] pub enum TopicFilter { All, Or(Vec<H256>), } #[derive(Clone, Debug)] pub struct LogFilter { pub from_block: usize, pub to_block: usize, pub address: Option<Address>, pub topics: Vec<TopicFilter>, } #[derive(Clone, Debug)] pub enum Filter { PendingTransaction(usize), Block(usize), Log(LogFilter), } fn check_log(log: &Log, index: usize, filter: &TopicFilter) -> bool { match filter { &TopicFilter::All => true, &TopicFilter::Or(ref hashes) => { if log.topics.len() >= index { false } else { let mut matched = false; for hash in hashes { if hash == &log.topics[index] { matched = true; } } matched } }, } } pub fn get_logs(state: &MinerState, filter: LogFilter) -> Result<Vec<RPCLog>, Error> { let mut current_block_number = filter.from_block; let mut ret = Vec::new(); while current_block_number >= filter.to_block { if current_block_number > state.block_height() { break; } let block = state.get_block_by_number(current_block_number); for transaction in &block.transactions { let transaction_hash = H256::from(Keccak256::digest(&rlp::encode(transaction).to_vec()).as_slice()); let receipt = state.get_receipt_by_transaction_hash(transaction_hash)?; for i in 0..receipt.logs.len() { let log = &receipt.logs[i]; if check_log(log, 0, &filter.topics[0]) && check_log(log, 1, &filter.topics[1]) && check_log(log, 2, &filter.topics[2]) && check_log(log, 3, &filter.topics[3]) && match filter.address { Some(address) => address == log.address, None => true, } { ret.push(to_rpc_log(&receipt, i, transaction, &block)); } } } current_block_number += 1; } return Ok(ret); } pub struct FilterManager { filters: HashMap<usize, Filter>, state: Arc<Mutex<MinerState>>, unmodified_filters: HashMap<usize, Filter>, } impl FilterManager { pub fn new(state: Arc<Mutex<MinerState>>) -> Self { FilterManager { state, filters: HashMap::new(), unmodified_filters: HashMap::new(), } } pub fn from_log_filter(&self, log: RPCLogFilter) -> Result<LogFilter, Error> { let state = self.state.lock().unwrap(); from_log_filter(&state, log) } pub fn install_log_filter(&mut self, filter: LogFilter) -> usize { let id = self.filters.len(); self.filters.insert(id, Filter::Log(filter.clone())); self.unmodified_filters.insert(id, Filter::Log(filter.clone())); id } pub fn install_block_filter(&mut self) -> usize { let state = self.state.lock().unwrap(); let block_height = state.block_height(); let id = self.filters.len(); self.filters.insert(id, Filter::Block(block_height + 1)); self.unmodified_filters.insert(id, Filter::Block(block_height + 1)); id } pub fn install_pending_transaction_filter(&mut self) -> usize { let state = self.state.lock().unwrap(); let pending_transactions = state.all_pending_transaction_hashes(); let id = self.filters.len(); self.filters.insert(id, Filter::PendingTransaction(pending_transactions.len())); self.unmodified_filters.insert(id, Filter::PendingTransaction(pending_transactions.len())); id } pub fn uninstall_filter(&mut self, id: usize) { self.filters.remove(&id); self.unmodified_filters.remove(&id); } pub fn get_logs(&mut self, id: usize) -> Result<Vec<RPCLog>, Error> { let state = self.state.lock().unwrap(); let filter = self.unmodified_filters.get(&id).ok_or(Error::NotFound)?; match filter { &Filter::Log(ref filter) => { let ret = get_logs(&state, filter.clone())?; Ok(ret) }, _ => Err(Error::NotFound), } } pub fn get_changes(&mut self, id: usize) -> Result<Either<Vec<String>, Vec<RPCLog>>, Error> { let state = self.state.lock().unwrap(); let filter = self.filters.get_mut(&id).ok_or(Error::NotFound)?; match filter { &mut Filter::PendingTransaction(ref mut next_start) => { let pending_transactions = state.all_pending_transaction_hashes(); let mut ret = Vec::new(); while *next_start < pending_transactions.len() { ret.push(format!("0x{:x}", &pending_transactions[*next_start])); *next_start += 1; } Ok(Either::Left(ret)) }, &mut Filter::Block(ref mut next_start) => { let mut ret = Vec::new(); while *next_start <= state.block_height() { ret.push(format!("0x{:x}", state.get_block_by_number(*next_start).header.header_hash())); *next_start += 1; } Ok(Either::Left(ret)) }, &mut Filter::Log(ref mut filter) => { let ret = get_logs(&state, filter.clone())?; filter.from_block = state.block_height() + 1; Ok(Either::Right(ret)) }, } } }
32.037433
112
0.545652
2f4020162be36b8ea99e1bbe23d13a9718c37b5a
18,388
use crate::abi::{self, Abi, Align, FieldPlacement, Size}; use crate::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods}; use crate::spec::{self, HasTargetSpec}; mod aarch64; mod amdgpu; mod arm; mod asmjs; mod cheri; mod hexagon; mod mips; mod mips64; mod msp430; mod nvptx; mod nvptx64; mod powerpc; mod powerpc64; mod riscv; mod s390x; mod sparc; mod sparc64; mod x86; mod x86_64; mod x86_win64; mod wasm32; #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum IgnoreMode { /// C-variadic arguments. CVarArgs, /// A zero-sized type. Zst, } #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum PassMode { /// Ignore the argument (useful for empty structs and C-variadic args). Ignore(IgnoreMode), /// Pass the argument directly. Direct(ArgAttributes), /// Pass a pair's elements directly in two arguments. Pair(ArgAttributes, ArgAttributes), /// Pass the argument after casting it, to either /// a single uniform or a pair of registers. Cast(CastTarget), /// Pass the argument indirectly via a hidden pointer. /// The second value, if any, is for the extra data (vtable or length) /// which indicates that it refers to an unsized rvalue. Indirect(ArgAttributes, Option<ArgAttributes>), } // Hack to disable non_upper_case_globals only for the bitflags! and not for the rest // of this module pub use attr_impl::ArgAttribute; #[allow(non_upper_case_globals)] #[allow(unused)] mod attr_impl { // The subset of llvm::Attribute needed for arguments, packed into a bitfield. bitflags::bitflags! { #[derive(Default)] pub struct ArgAttribute: u16 { const ByVal = 1 << 0; const NoAlias = 1 << 1; const NoCapture = 1 << 2; const NonNull = 1 << 3; const ReadOnly = 1 << 4; const SExt = 1 << 5; const StructRet = 1 << 6; const ZExt = 1 << 7; const InReg = 1 << 8; } } } /// A compact representation of LLVM attributes (at least those relevant for this module) /// that can be manipulated without interacting with LLVM's Attribute machinery. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct ArgAttributes { pub regular: ArgAttribute, pub pointee_size: Size, pub pointee_align: Option<Align> } impl ArgAttributes { pub fn new() -> Self { ArgAttributes { regular: ArgAttribute::default(), pointee_size: Size::ZERO, pointee_align: None, } } pub fn set(&mut self, attr: ArgAttribute) -> &mut Self { self.regular |= attr; self } pub fn contains(&self, attr: ArgAttribute) -> bool { self.regular.contains(attr) } } #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum RegKind { Integer, Float, Vector } #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct Reg { pub kind: RegKind, pub size: Size, } macro_rules! reg_ctor { ($name:ident, $kind:ident, $bits:expr) => { pub fn $name() -> Reg { Reg { kind: RegKind::$kind, size: Size::from_bits($bits) } } } } impl Reg { reg_ctor!(i8, Integer, 8); reg_ctor!(i16, Integer, 16); reg_ctor!(i32, Integer, 32); reg_ctor!(i64, Integer, 64); reg_ctor!(f32, Float, 32); reg_ctor!(f64, Float, 64); } impl Reg { pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align { let dl = cx.data_layout(); match self.kind { RegKind::Integer => { match self.size.bits() { 1 => dl.i1_align.abi, 2..=8 => dl.i8_align.abi, 9..=16 => dl.i16_align.abi, 17..=32 => dl.i32_align.abi, 33..=64 => dl.i64_align.abi, 65..=128 => dl.i128_align.abi, _ => panic!("unsupported integer: {:?}", self) } } RegKind::Float => { match self.size.bits() { 32 => dl.f32_align.abi, 64 => dl.f64_align.abi, _ => panic!("unsupported float: {:?}", self) } } RegKind::Vector => dl.vector_align(self.size).abi, } } } /// An argument passed entirely registers with the /// same kind (e.g., HFA / HVA on PPC64 and AArch64). #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct Uniform { pub unit: Reg, /// The total size of the argument, which can be: /// * equal to `unit.size` (one scalar/vector), /// * a multiple of `unit.size` (an array of scalar/vectors), /// * if `unit.kind` is `Integer`, the last element /// can be shorter, i.e., `{ i64, i64, i32 }` for /// 64-bit integers with a total size of 20 bytes. pub total: Size, } impl From<Reg> for Uniform { fn from(unit: Reg) -> Uniform { Uniform { unit, total: unit.size } } } impl Uniform { pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align { self.unit.align(cx) } } #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct CastTarget { pub prefix: [Option<RegKind>; 8], pub prefix_chunk: Size, pub rest: Uniform, } impl From<Reg> for CastTarget { fn from(unit: Reg) -> CastTarget { CastTarget::from(Uniform::from(unit)) } } impl From<Uniform> for CastTarget { fn from(uniform: Uniform) -> CastTarget { CastTarget { prefix: [None; 8], prefix_chunk: Size::ZERO, rest: uniform } } } impl CastTarget { pub fn pair(a: Reg, b: Reg) -> CastTarget { CastTarget { prefix: [Some(a.kind), None, None, None, None, None, None, None], prefix_chunk: a.size, rest: Uniform::from(b) } } pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size { (self.prefix_chunk * self.prefix.iter().filter(|x| x.is_some()).count() as u64) .align_to(self.rest.align(cx)) + self.rest.total } pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align { self.prefix.iter() .filter_map(|x| x.map(|kind| Reg { kind, size: self.prefix_chunk }.align(cx))) .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| acc.max(align)) } } /// Returns value from the `homogeneous_aggregate` test function. #[derive(Copy, Clone, Debug)] pub enum HomogeneousAggregate { /// Yes, all the "leaf fields" of this struct are passed in the /// same way (specified in the `Reg` value). Homogeneous(Reg), /// There are distinct leaf fields passed in different ways, /// or this is uninhabited. Heterogeneous, /// There are no leaf fields at all. NoData, } impl HomogeneousAggregate { /// If this is a homogeneous aggregate, returns the homogeneous /// unit, else `None`. pub fn unit(self) -> Option<Reg> { if let HomogeneousAggregate::Homogeneous(r) = self { Some(r) } else { None } } } impl<'a, Ty> TyLayout<'a, Ty> { fn is_aggregate(&self) -> bool { match self.abi { Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } => false, Abi::ScalarPair(..) | Abi::Aggregate { .. } => true } } /// Returns `true` if this layout is an aggregate containing fields of only /// a single type (e.g., `(u32, u32)`). Such aggregates are often /// special-cased in ABIs. /// /// Note: We generally ignore fields of zero-sized type when computing /// this value (see #56877). /// /// This is public so that it can be used in unit tests, but /// should generally only be relevant to the ABI details of /// specific targets. pub fn homogeneous_aggregate<C>(&self, cx: &C) -> HomogeneousAggregate where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf<Ty = Ty, TyLayout = Self> { match self.abi { Abi::Uninhabited => HomogeneousAggregate::Heterogeneous, // The primitive for this algorithm. Abi::Scalar(ref scalar) => { let kind = match scalar.value { abi::Int(..) | abi::Pointer => RegKind::Integer, abi::Float(_) => RegKind::Float, }; HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size }) } Abi::Vector { .. } => { assert!(!self.is_zst()); HomogeneousAggregate::Homogeneous(Reg { kind: RegKind::Vector, size: self.size }) } Abi::ScalarPair(..) | Abi::Aggregate { .. } => { let mut total = Size::ZERO; let mut result = None; let is_union = match self.fields { FieldPlacement::Array { count, .. } => { if count > 0 { return self.field(cx, 0).homogeneous_aggregate(cx); } else { return HomogeneousAggregate::NoData; } } FieldPlacement::Union(_) => true, FieldPlacement::Arbitrary { .. } => false }; for i in 0..self.fields.count() { if !is_union && total != self.fields.offset(i) { return HomogeneousAggregate::Heterogeneous; } let field = self.field(cx, i); match (result, field.homogeneous_aggregate(cx)) { (_, HomogeneousAggregate::NoData) => { // Ignore fields that have no data } (_, HomogeneousAggregate::Heterogeneous) => { // The field itself must be a homogeneous aggregate. return HomogeneousAggregate::Heterogeneous; } // If this is the first field, record the unit. (None, HomogeneousAggregate::Homogeneous(unit)) => { result = Some(unit); } // For all following fields, the unit must be the same. (Some(prev_unit), HomogeneousAggregate::Homogeneous(unit)) => { if prev_unit != unit { return HomogeneousAggregate::Heterogeneous; } } } // Keep track of the offset (without padding). let size = field.size; if is_union { total = total.max(size); } else { total += size; } } // There needs to be no padding. if total != self.size { HomogeneousAggregate::Heterogeneous } else { match result { Some(reg) => { assert_ne!(total, Size::ZERO); HomogeneousAggregate::Homogeneous(reg) } None => { assert_eq!(total, Size::ZERO); HomogeneousAggregate::NoData } } } } } } } /// Information about how to pass an argument to, /// or return a value from, a function, under some ABI. #[derive(Debug)] pub struct ArgType<'a, Ty> { pub layout: TyLayout<'a, Ty>, /// Dummy argument, which is emitted before the real argument. pub pad: Option<Reg>, pub mode: PassMode, } impl<'a, Ty> ArgType<'a, Ty> { pub fn new(layout: TyLayout<'a, Ty>) -> Self { ArgType { layout, pad: None, mode: PassMode::Direct(ArgAttributes::new()), } } pub fn make_indirect(&mut self) { assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new())); // Start with fresh attributes for the pointer. let mut attrs = ArgAttributes::new(); // For non-immediate arguments the callee gets its own copy of // the value on the stack, so there are no aliases. It's also // program-invisible so can't possibly capture attrs.set(ArgAttribute::NoAlias) .set(ArgAttribute::NoCapture) .set(ArgAttribute::NonNull); attrs.pointee_size = self.layout.size; // FIXME(eddyb) We should be doing this, but at least on // i686-pc-windows-msvc, it results in wrong stack offsets. // attrs.pointee_align = Some(self.layout.align.abi); let extra_attrs = if self.layout.is_unsized() { Some(ArgAttributes::new()) } else { None }; self.mode = PassMode::Indirect(attrs, extra_attrs); } pub fn make_indirect_byval(&mut self) { self.make_indirect(); match self.mode { PassMode::Indirect(ref mut attrs, _) => { attrs.set(ArgAttribute::ByVal); } _ => unreachable!() } } pub fn extend_integer_width_to(&mut self, bits: u64) { // Only integers have signedness if let Abi::Scalar(ref scalar) = self.layout.abi { if let abi::Int(i, signed) = scalar.value { if i.size().bits() < bits { if let PassMode::Direct(ref mut attrs) = self.mode { attrs.set(if signed { ArgAttribute::SExt } else { ArgAttribute::ZExt }); } } } } } pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) { assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new())); self.mode = PassMode::Cast(target.into()); } pub fn pad_with(&mut self, reg: Reg) { self.pad = Some(reg); } pub fn is_indirect(&self) -> bool { match self.mode { PassMode::Indirect(..) => true, _ => false } } pub fn is_sized_indirect(&self) -> bool { match self.mode { PassMode::Indirect(_, None) => true, _ => false } } pub fn is_unsized_indirect(&self) -> bool { match self.mode { PassMode::Indirect(_, Some(_)) => true, _ => false } } pub fn is_ignore(&self) -> bool { match self.mode { PassMode::Ignore(_) => true, _ => false } } } #[derive(Copy, Clone, PartialEq, Debug)] pub enum Conv { C, ArmAapcs, Msp430Intr, PtxKernel, X86Fastcall, X86Intr, X86Stdcall, X86ThisCall, X86VectorCall, X86_64SysV, X86_64Win64, AmdGpuKernel, } /// Metadata describing how the arguments to a native function /// should be passed in order to respect the native ABI. /// /// I will do my best to describe this structure, but these /// comments are reverse-engineered and may be inaccurate. -NDM #[derive(Debug)] pub struct FnType<'a, Ty> { /// The LLVM types of each argument. pub args: Vec<ArgType<'a, Ty>>, /// LLVM return type. pub ret: ArgType<'a, Ty>, pub c_variadic: bool, pub conv: Conv, } impl<'a, Ty> FnType<'a, Ty> { pub fn adjust_for_cabi<C>(&mut self, cx: &C, abi: spec::abi::Abi) -> Result<(), String> where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec { match &cx.target_spec().arch[..] { "x86" => { let flavor = if abi == spec::abi::Abi::Fastcall { x86::Flavor::Fastcall } else { x86::Flavor::General }; x86::compute_abi_info(cx, self, flavor); }, "x86_64" => if abi == spec::abi::Abi::SysV64 { x86_64::compute_abi_info(cx, self); } else if abi == spec::abi::Abi::Win64 || cx.target_spec().options.is_like_windows { x86_win64::compute_abi_info(self); } else { x86_64::compute_abi_info(cx, self); }, "aarch64" => aarch64::compute_abi_info(cx, self), "amdgpu" => amdgpu::compute_abi_info(cx, self), "arm" => arm::compute_abi_info(cx, self), "cheri" => cheri::compute_abi_info(cx, self), "mips" => mips::compute_abi_info(cx, self), "mips64" => mips64::compute_abi_info(cx, self), "powerpc" => powerpc::compute_abi_info(cx, self), "powerpc64" => powerpc64::compute_abi_info(cx, self), "s390x" => s390x::compute_abi_info(cx, self), "asmjs" => asmjs::compute_abi_info(cx, self), "wasm32" => { if cx.target_spec().llvm_target.contains("emscripten") { asmjs::compute_abi_info(cx, self) } else { wasm32::compute_abi_info(self) } } "msp430" => msp430::compute_abi_info(self), "sparc" => sparc::compute_abi_info(cx, self), "sparc64" => sparc64::compute_abi_info(cx, self), "nvptx" => nvptx::compute_abi_info(self), "nvptx64" => nvptx64::compute_abi_info(self), "hexagon" => hexagon::compute_abi_info(self), "riscv32" => riscv::compute_abi_info(self, 32), "riscv64" => riscv::compute_abi_info(self, 64), a => return Err(format!("unrecognized arch \"{}\" in target specification", a)) } if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode { attrs.set(ArgAttribute::StructRet); } Ok(()) } }
30.852349
96
0.516859
fcfbb9a855463b01b1d276cc280575dd47f2b478
2,441
use std::convert::TryInto; /// Gets all divisors of a number, including itself fn get_divisors(n: u32) -> Vec<u32> { let mut results = Vec::new(); for i in 1..(n / 2 + 1) { if n % i == 0 { results.push(i); } } results.push(n); results } /// Calculates whether the divisors can be partitioned into two disjoint /// sets that sum to the same value fn is_summable(x: i32, divisors: &[u32]) -> bool { if !divisors.is_empty() { if divisors.contains(&(x as u32)) { return true; } else if let Some((first, t)) = divisors.split_first() { return is_summable(x - *first as i32, &t) || is_summable(x, &t); } } false } /// Calculates whether the number is a Zumkeller number /// Zumkeller numbers are the set of numbers whose divisors can be partitioned /// into two disjoint sets that sum to the same value. Each sum must contain /// divisor values that are not in the other sum, and all of the divisors must /// be in one or the other. fn is_zumkeller_number(number: u32) -> bool { if number % 18 == 6 || number % 18 == 12 { return true; } let div = get_divisors(number); let divisor_sum: u32 = div.iter().sum(); if divisor_sum == 0 { return false; } if divisor_sum % 2 == 1 { return false; } // numbers where n is odd and the abundance is even are Zumkeller numbers let abundance = divisor_sum as i32 - 2 * number as i32; if number % 2 == 1 && abundance > 0 && abundance % 2 == 0 { return true; } let half = divisor_sum / 2; return div.contains(&half) || (div.iter().filter(|&&d| d < half).count() > 0 && is_summable(half.try_into().unwrap(), &div)); } fn main() { println!("\nFirst 220 Zumkeller numbers:"); let mut counter: u32 = 0; let mut i: u32 = 0; while counter < 220 { if is_zumkeller_number(i) { print!("{:>3}", i); counter += 1; print!("{}", if counter % 20 == 0 { "\n" } else { "," }); } i += 1; } println!("\nFirst 40 odd Zumkeller numbers:"); let mut counter: u32 = 0; let mut i: u32 = 3; while counter < 40 { if is_zumkeller_number(i) { print!("{:>5}", i); counter += 1; print!("{}", if counter % 20 == 0 { "\n" } else { "," }); } i += 2; } }
29.059524
78
0.542401
874e5b59c69e4cd0a128adf6cb1f080138b33644
10,569
//! Implementation of circle shape. use std::{ f64::consts::{FRAC_PI_2, PI}, iter, ops::{Add, Mul, Sub}, }; use crate::{Affine, Arc, ArcAppendIter, Ellipse, PathEl, Point, Rect, Shape, Vec2}; /// A circle. #[repr(C)] #[derive(Clone, Copy, Default, Debug, PartialEq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Circle { /// The center. pub center: Point, /// The radius. pub radius: f64, } impl Circle { /// A new circle from center and radius. #[inline] pub fn new(center: impl Into<Point>, radius: f64) -> Circle { Circle { center: center.into(), radius, } } /// Create a [`CircleSegment`] by cutting out parts of this circle. pub fn segment(self, inner_radius: f64, start_angle: f64, sweep_angle: f64) -> CircleSegment { CircleSegment { center: self.center, outer_radius: self.radius, inner_radius, start_angle, sweep_angle, } } /// Is this circle finite? #[inline] pub fn is_finite(&self) -> bool { self.center.is_finite() && self.radius.is_finite() } /// Is this circle NaN? #[inline] pub fn is_nan(&self) -> bool { self.center.is_nan() || self.radius.is_nan() } } impl Add<Vec2> for Circle { type Output = Circle; #[inline] fn add(self, v: Vec2) -> Circle { Circle { center: self.center + v, radius: self.radius, } } } impl Sub<Vec2> for Circle { type Output = Circle; #[inline] fn sub(self, v: Vec2) -> Circle { Circle { center: self.center - v, radius: self.radius, } } } impl Mul<Circle> for Affine { type Output = Ellipse; fn mul(self, other: Circle) -> Self::Output { self * Ellipse::from(other) } } #[doc(hidden)] pub struct CirclePathIter { circle: Circle, delta_th: f64, arm_len: f64, ix: usize, n: usize, } impl Shape for Circle { type PathElementsIter = CirclePathIter; fn path_elements(&self, tolerance: f64) -> CirclePathIter { let scaled_err = self.radius.abs() / tolerance; let (n, arm_len) = if scaled_err < 1.0 / 1.9608e-4 { // Solution from http://spencermortensen.com/articles/bezier-circle/ (4, 0.551915024494) } else { // This is empirically determined to fall within error tolerance. let n = (1.1163 * scaled_err).powf(1.0 / 6.0).ceil() as usize; // Note: this isn't minimum error, but it is simple and we can easily // estimate the error. let arm_len = (4.0 / 3.0) * (FRAC_PI_2 / (n as f64)).tan(); (n, arm_len) }; CirclePathIter { circle: *self, delta_th: 2.0 * PI / (n as f64), arm_len, ix: 0, n, } } #[inline] fn area(&self) -> f64 { PI * self.radius.powi(2) } #[inline] fn perimeter(&self, _accuracy: f64) -> f64 { (2.0 * PI * self.radius).abs() } fn winding(&self, pt: Point) -> i32 { if (pt - self.center).hypot2() < self.radius.powi(2) { 1 } else { 0 } } #[inline] fn bounding_box(&self) -> Rect { let r = self.radius.abs(); let (x, y) = self.center.into(); Rect::new(x - r, y - r, x + r, y + r) } fn as_circle(&self) -> Option<Circle> { Some(*self) } } impl Iterator for CirclePathIter { type Item = PathEl; fn next(&mut self) -> Option<PathEl> { let a = self.arm_len; let r = self.circle.radius; let (x, y) = self.circle.center.into(); let ix = self.ix; self.ix += 1; if ix == 0 { Some(PathEl::MoveTo(Point::new(x + r, y))) } else if ix <= self.n { let th1 = self.delta_th * (ix as f64); let th0 = th1 - self.delta_th; let (c0, s0) = (th0.cos(), th0.sin()); let (c1, s1) = if ix == self.n { (1.0, 0.0) } else { (th1.cos(), th1.sin()) }; Some(PathEl::CurveTo( Point::new(x + r * (c0 - a * s0), y + r * (s0 + a * c0)), Point::new(x + r * (c1 + a * s1), y + r * (s1 - a * c1)), Point::new(x + r * c1, y + r * s1), )) } else if ix == self.n + 1 { Some(PathEl::ClosePath) } else { None } } } /// A segment of a circle. /// /// If `inner_radius > 0`, then the shape will be a doughnut segment. pub struct CircleSegment { /// The center. pub center: Point, /// The outer radius. pub outer_radius: f64, /// The inner radius. pub inner_radius: f64, /// The angle to start drawing the segment (in radians). pub start_angle: f64, /// The arc length of the segment (in radians). pub sweep_angle: f64, } impl CircleSegment { /// Create a `CircleSegment` out of its constituent parts. pub fn new( center: impl Into<Point>, outer_radius: f64, inner_radius: f64, start_angle: f64, sweep_angle: f64, ) -> Self { CircleSegment { center: center.into(), outer_radius, inner_radius, start_angle, sweep_angle, } } /// Is this circle segment finite? #[inline] pub fn is_finite(&self) -> bool { self.center.is_finite() && self.outer_radius.is_finite() && self.inner_radius.is_finite() && self.start_angle.is_finite() && self.sweep_angle.is_finite() } /// Is this circle segment NaN? #[inline] pub fn is_nan(&self) -> bool { self.center.is_nan() || self.outer_radius.is_nan() || self.inner_radius.is_nan() || self.start_angle.is_nan() || self.sweep_angle.is_nan() } } impl Add<Vec2> for CircleSegment { type Output = CircleSegment; #[inline] fn add(self, v: Vec2) -> Self { Self { center: self.center + v, ..self } } } impl Sub<Vec2> for CircleSegment { type Output = CircleSegment; #[inline] fn sub(self, v: Vec2) -> Self { Self { center: self.center - v, ..self } } } type CircleSegmentPathIter = std::iter::Chain< iter::Chain< iter::Chain<iter::Chain<iter::Once<PathEl>, iter::Once<PathEl>>, ArcAppendIter>, iter::Once<PathEl>, >, ArcAppendIter, >; impl Shape for CircleSegment { type PathElementsIter = CircleSegmentPathIter; fn path_elements(&self, tolerance: f64) -> CircleSegmentPathIter { iter::once(PathEl::MoveTo(point_on_circle( self.center, self.inner_radius, self.start_angle, ))) // First radius .chain(iter::once(PathEl::LineTo(point_on_circle( self.center, self.outer_radius, self.start_angle, )))) // outer arc .chain( Arc { center: self.center, radii: Vec2::new(self.outer_radius, self.outer_radius), start_angle: self.start_angle, sweep_angle: self.sweep_angle, x_rotation: 0.0, } .append_iter(tolerance), ) // second radius .chain(iter::once(PathEl::LineTo(point_on_circle( self.center, self.inner_radius, self.start_angle + self.sweep_angle, )))) // inner arc .chain( Arc { center: self.center, radii: Vec2::new(self.inner_radius, self.inner_radius), start_angle: self.start_angle + self.sweep_angle, sweep_angle: -self.sweep_angle, x_rotation: 0.0, } .append_iter(tolerance), ) } #[inline] fn area(&self) -> f64 { 0.5 * (self.outer_radius.powi(2) - self.inner_radius.powi(2)).abs() * self.sweep_angle } #[inline] fn perimeter(&self, _accuracy: f64) -> f64 { 2.0 * (self.outer_radius - self.inner_radius).abs() + self.sweep_angle * (self.inner_radius + self.outer_radius) } fn winding(&self, pt: Point) -> i32 { let angle = (pt - self.center).atan2(); if angle < self.start_angle || angle > self.start_angle + self.sweep_angle { return 0; } let dist2 = (pt - self.center).hypot2(); if (dist2 < self.outer_radius.powi(2) && dist2 > self.inner_radius.powi(2)) || // case where outer_radius < inner_radius (dist2 < self.inner_radius.powi(2) && dist2 > self.outer_radius.powi(2)) { 1 } else { 0 } } #[inline] fn bounding_box(&self) -> Rect { // todo this is currently not tight let r = self.inner_radius.max(self.outer_radius); let (x, y) = self.center.into(); Rect::new(x - r, y - r, x + r, y + r) } } #[cfg(test)] mod tests { use crate::{Circle, Point, Shape}; use std::f64::consts::PI; fn assert_approx_eq(x: f64, y: f64) { // Note: we might want to be more rigorous in testing the accuracy // of the conversion into Béziers. But this seems good enough. assert!((x - y).abs() < 1e-7, "{} != {}", x, y); } #[test] fn area_sign() { let center = Point::new(5.0, 5.0); let c = Circle::new(center, 5.0); assert_approx_eq(c.area(), 25.0 * PI); assert_eq!(c.winding(center), 1); let p = c.to_path(1e-9); assert_approx_eq(c.area(), p.area()); assert_eq!(c.winding(center), p.winding(center)); let c_neg_radius = Circle::new(center, -5.0); assert_approx_eq(c_neg_radius.area(), 25.0 * PI); assert_eq!(c_neg_radius.winding(center), 1); let p_neg_radius = c_neg_radius.to_path(1e-9); assert_approx_eq(c_neg_radius.area(), p_neg_radius.area()); assert_eq!(c_neg_radius.winding(center), p_neg_radius.winding(center)); } } #[inline] fn point_on_circle(center: Point, radius: f64, angle: f64) -> Point { center + Vec2 { x: angle.cos() * radius, y: angle.sin() * radius, } }
26.824873
98
0.523039
e24c0f4f1028b32e54990a1f8d7ffcd739afe901
7,780
use crate::cpu_6502::*; /// Apply the logical "or" operator on the accumulator. /// Function: A:=A or {adr} /// Flags: N Z pub fn ora(cpu: &mut Cpu6502, mode: Mode, extra_cycle: u8) { let (_, operand) = cpu.get_address_and_operand(mode, extra_cycle); cpu.a |= operand; cpu.update_zero_and_negative_flag(cpu.a); } /// Apply the logical "and" operator on the accumulator. /// Function: A:=A&{adr} /// Flags: N Z pub fn and(cpu: &mut Cpu6502, mode: Mode, extra_cycle: u8) { let (_, operand) = cpu.get_address_and_operand(mode, extra_cycle); cpu.a &= operand; cpu.update_zero_and_negative_flag(cpu.a); } /// Logical Exclusive OR /// Function: A:=A exor {adr} /// Flags: N Z pub fn eor(cpu: &mut Cpu6502, mode: Mode, extra_cycle: u8) { let (_, operand) = cpu.get_address_and_operand(mode, extra_cycle); cpu.a ^= operand; cpu.update_zero_and_negative_flag(cpu.a); } fn add_impl(cpu: &mut Cpu6502, operand: u8) { // Translating to u16 means that the values won't wrap, so wrapping // add is not needed. let result_u16 = // Get the carry from the previous operation, and carry it over // into this one, but operate in the u16 space as to not overflow. cpu.get_carry() as u16 + // Either 0x00 or 0x01 cpu.a as u16 + operand as u16; let result_u8 = result_u16 as u8; cpu.update_zero_and_negative_flag(result_u8); // Take the 0x100 value here, and set it to the register. This can then carry // over into the next byte of a number. cpu.update_carry_flag(result_u16); cpu.update_overflow_flag(operand, result_u8); cpu.a = result_u8; } /// Add with Carry /// Function: A:=A+{adr}+C /// Flags: N V Z C pub fn adc(cpu: &mut Cpu6502, mode: Mode, extra_cycle: u8) { let (_, operand) = cpu.get_address_and_operand(mode, extra_cycle); add_impl(cpu, operand); } /// Subtract with Carry /// Function: A:=A-{adr}+C /// Flags: N V Z C pub fn sbc(cpu: &mut Cpu6502, mode: Mode, extra_cycle: u8) { // Signed numbers range: -128 to 127 // 0b0000_0000, 0 // 0b0000_0001, 1 // 0b0000_0010, 1 // ... // 0b0111_1111, 127 // 0b1000_0000, -128 // 0b1000_0001, -127 // ... // 0b1111_1111, -1 let (_, operand) = cpu.get_address_and_operand(mode, extra_cycle); // In order to properly subtract we need the two's complement of the operand. // Normally this would be accomplished by; // `let twos_complement = !operand + 0x1;` // // However, in this CPU, this is done by inverting the operand here, and letting // the carry flag be the + 1. // // Because of this, it's assumed the assembly will run SEC before running sbc. add_impl(cpu, !operand); } /// Compare A with source /// http://6502.org/tutorials/compare_instructions.html /// Function: A-{adr} /// Flags: N Z C pub fn cmp(cpu: &mut Cpu6502, mode: Mode, extra_cycle: u8) { let (_, operand) = cpu.get_address_and_operand(mode, extra_cycle); cpu.update_zero_and_negative_flag(cpu.a.wrapping_sub(operand)); cpu.set_status_flag(StatusFlag::Carry, cpu.a >= operand); } /// Compare X with source /// http://6502.org/tutorials/compare_instructions.html /// Function: X-{adr} /// Flags: N Z C pub fn cpx(cpu: &mut Cpu6502, mode: Mode, extra_cycle: u8) { let (_, operand) = cpu.get_address_and_operand(mode, extra_cycle); cpu.update_zero_and_negative_flag(cpu.x.wrapping_sub(operand)); cpu.set_status_flag(StatusFlag::Carry, cpu.x >= operand); } /// Compare Y with source /// http://6502.org/tutorials/compare_instructions.html /// Function: Y-{adr} /// Flags: N Z C pub fn cpy(cpu: &mut Cpu6502, mode: Mode, extra_cycle: u8) { let (_, operand) = cpu.get_address_and_operand(mode, extra_cycle); cpu.update_zero_and_negative_flag(cpu.y.wrapping_sub(operand)); cpu.set_status_flag(StatusFlag::Carry, cpu.y >= operand); } /// Decrement at an address /// Function: {adr}:={adr}-1 /// Flags: N Z pub fn dec(cpu: &mut Cpu6502, mode: Mode, extra_cycle: u8) { let (address, operand) = cpu.get_address_and_operand(mode, extra_cycle); let result = operand.wrapping_sub(1); cpu.update_zero_and_negative_flag(result); cpu.bus.borrow_mut().set_u8(address, result); } /// Decrement X /// Function: X:=X-1 /// Flags: N Z pub fn dex(cpu: &mut Cpu6502, _mode: Mode, _extra_cycle: u8) { cpu.x = cpu.x.wrapping_sub(1); cpu.update_zero_and_negative_flag(cpu.x); } /// Decrement Y /// Function: Y:=Y-1 /// Flags: N Z pub fn dey(cpu: &mut Cpu6502, _mode: Mode, _extra_cycle: u8) { cpu.y = cpu.y.wrapping_sub(1); cpu.update_zero_and_negative_flag(cpu.x); } /// Increment the address /// Function: {adr}:={adr}+1 /// Flags: N Z pub fn inc(cpu: &mut Cpu6502, mode: Mode, extra_cycle: u8) { let (address, operand) = cpu.get_address_and_operand(mode, extra_cycle); let result = operand.wrapping_add(1); cpu.update_zero_and_negative_flag(result); cpu.bus.borrow_mut().set_u8(address, result); } /// Increment X /// Function: X:=X+1 /// Flags: N Z pub fn inx(cpu: &mut Cpu6502, _mode: Mode, _extra_cycle: u8) { cpu.x = cpu.x.wrapping_add(1); cpu.update_zero_and_negative_flag(cpu.x); } /// Increment Y /// Function: Y:=Y+1 /// Flags: N Z pub fn iny(cpu: &mut Cpu6502, _mode: Mode, _extra_cycle: u8) { cpu.y = cpu.y.wrapping_add(1); cpu.update_zero_and_negative_flag(cpu.y); } /// Arithmetic shift left /// Function: {adr}:={adr}*2 /// Flags: N Z C pub fn asl(cpu: &mut Cpu6502, mode: Mode, extra_cycle: u8) { let (address, operand) = cpu.get_address_and_maybe_operand(mode, extra_cycle); let result = operand << 1; cpu.update_zero_and_negative_flag(result); // The Carry flag contains the bit that was shifted out: cpu.set_status_flag(StatusFlag::Carry, operand & 0b1000_0000 != 0); if let Some(address) = address { cpu.bus.borrow_mut().set_u8(address, result); } else { cpu.a = result; } } /// Rotate left /// Function: {adr}:={adr}*2+C /// Flags: N Z C pub fn rol(cpu: &mut Cpu6502, mode: Mode, extra_cycle: u8) { let (address, operand) = cpu.get_address_and_maybe_operand(mode, extra_cycle); let result = (operand << 1) | cpu.get_carry(); cpu.update_zero_and_negative_flag(result); // The Carry flag contains the bit that was shifted out: cpu.set_status_flag(StatusFlag::Carry, operand & 0b1000_0000 != 0); if let Some(address) = address { cpu.bus.borrow_mut().set_u8(address, result); } else { cpu.a = result; } } /// Logical shift right /// Function: {adr}:={adr}/2 /// Flags: N Z C pub fn lsr(cpu: &mut Cpu6502, mode: Mode, extra_cycle: u8) { let (address, operand) = cpu.get_address_and_maybe_operand(mode, extra_cycle); let result = operand >> 1; cpu.update_zero_and_negative_flag(result); // The Carry flag contains the bit that was shifted out: cpu.set_status_flag(StatusFlag::Carry, operand & 0b0000_0001 != 0); if let Some(address) = address { cpu.bus.borrow_mut().set_u8(address, result); } else { cpu.a = result; } } /// Rotate right /// Function: {adr}:={adr}/2+C*128 /// Flags: N Z C pub fn ror(cpu: &mut Cpu6502, mode: Mode, extra_cycle: u8) { let (address, operand) = cpu.get_address_and_maybe_operand(mode, extra_cycle); let result = // Shift the operand, {adr}/2 (operand >> 1) | // Move the carry bit to the beginning 0b0000_0001 -> 0b10000_000 // C*128 (cpu.get_carry() << 7); cpu.update_zero_and_negative_flag(result); // The Carry flag contains the bit that was shifted out: cpu.set_status_flag(StatusFlag::Carry, operand & 0b0000_0001 != 0); if let Some(address) = address { cpu.bus.borrow_mut().set_u8(address, result); } else { cpu.a = result; } }
32.689076
84
0.664781
e97db110326ae69af6dcd1490437efc67fc71db3
9,460
extern crate crossbeam_deque as deque; extern crate crossbeam_epoch as epoch; extern crate rand; use std::sync::atomic::Ordering::SeqCst; use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::sync::{Arc, Mutex}; use std::thread; use deque::{Pop, Steal}; use rand::Rng; #[test] fn smoke() { let (w, s) = deque::fifo::<i32>(); assert_eq!(w.pop(), Pop::Empty); assert_eq!(s.steal(), Steal::Empty); w.push(1); assert_eq!(w.pop(), Pop::Data(1)); assert_eq!(w.pop(), Pop::Empty); assert_eq!(s.steal(), Steal::Empty); w.push(2); assert_eq!(s.steal(), Steal::Data(2)); assert_eq!(s.steal(), Steal::Empty); assert_eq!(w.pop(), Pop::Empty); w.push(3); w.push(4); w.push(5); assert_eq!(s.steal(), Steal::Data(3)); assert_eq!(s.steal(), Steal::Data(4)); assert_eq!(s.steal(), Steal::Data(5)); assert_eq!(s.steal(), Steal::Empty); w.push(6); w.push(7); w.push(8); w.push(9); assert_eq!(w.pop(), Pop::Data(6)); assert_eq!(s.steal(), Steal::Data(7)); assert_eq!(w.pop(), Pop::Data(8)); assert_eq!(w.pop(), Pop::Data(9)); assert_eq!(w.pop(), Pop::Empty); } #[test] fn steal_push() { const STEPS: usize = 50_000; let (w, s) = deque::fifo(); let t = thread::spawn(move || { for i in 0..STEPS { loop { if let Steal::Data(v) = s.steal() { assert_eq!(i, v); break; } } } }); for i in 0..STEPS { w.push(i); } t.join().unwrap(); } #[test] fn stampede() { const THREADS: usize = 8; const COUNT: usize = 50_000; let (w, s) = deque::fifo(); for i in 0..COUNT { w.push(Box::new(i + 1)); } let remaining = Arc::new(AtomicUsize::new(COUNT)); let threads = (0..THREADS) .map(|_| { let s = s.clone(); let remaining = remaining.clone(); thread::spawn(move || { let mut last = 0; while remaining.load(SeqCst) > 0 { if let Steal::Data(x) = s.steal() { assert!(last < *x); last = *x; remaining.fetch_sub(1, SeqCst); } } }) }).collect::<Vec<_>>(); let mut last = 0; while remaining.load(SeqCst) > 0 { loop { match w.pop() { Pop::Data(x) => { assert!(last < *x); last = *x; remaining.fetch_sub(1, SeqCst); break; } Pop::Empty => break, Pop::Retry => {} } } } for t in threads { t.join().unwrap(); } } fn run_stress() { const THREADS: usize = 8; const COUNT: usize = 50_000; let (w, s) = deque::fifo(); let done = Arc::new(AtomicBool::new(false)); let hits = Arc::new(AtomicUsize::new(0)); let threads = (0..THREADS) .map(|_| { let s = s.clone(); let done = done.clone(); let hits = hits.clone(); thread::spawn(move || { let (w2, _) = deque::fifo(); while !done.load(SeqCst) { if let Steal::Data(_) = s.steal() { hits.fetch_add(1, SeqCst); } if let Steal::Data(_) = s.steal_many(&w2) { hits.fetch_add(1, SeqCst); loop { match w2.pop() { Pop::Data(_) => { hits.fetch_add(1, SeqCst); } Pop::Empty => break, Pop::Retry => {} } } } } }) }).collect::<Vec<_>>(); let mut rng = rand::thread_rng(); let mut expected = 0; while expected < COUNT { if rng.gen_range(0, 3) == 0 { loop { match w.pop() { Pop::Data(_) => { hits.fetch_add(1, SeqCst); } Pop::Empty => break, Pop::Retry => {} } } } else { w.push(expected); expected += 1; } } while hits.load(SeqCst) < COUNT { loop { match w.pop() { Pop::Data(_) => { hits.fetch_add(1, SeqCst); } Pop::Empty => break, Pop::Retry => {} } } } done.store(true, SeqCst); for t in threads { t.join().unwrap(); } } #[test] fn stress() { run_stress(); } #[test] fn stress_pinned() { let _guard = epoch::pin(); run_stress(); } #[test] fn no_starvation() { const THREADS: usize = 8; const COUNT: usize = 50_000; let (w, s) = deque::fifo(); let done = Arc::new(AtomicBool::new(false)); let (threads, hits): (Vec<_>, Vec<_>) = (0..THREADS) .map(|_| { let s = s.clone(); let done = done.clone(); let hits = Arc::new(AtomicUsize::new(0)); let t = { let hits = hits.clone(); thread::spawn(move || { let (w2, _) = deque::fifo(); while !done.load(SeqCst) { if let Steal::Data(_) = s.steal() { hits.fetch_add(1, SeqCst); } if let Steal::Data(_) = s.steal_many(&w2) { hits.fetch_add(1, SeqCst); loop { match w2.pop() { Pop::Data(_) => { hits.fetch_add(1, SeqCst); } Pop::Empty => break, Pop::Retry => {} } } } } }) }; (t, hits) }).unzip(); let mut rng = rand::thread_rng(); let mut my_hits = 0; loop { for i in 0..rng.gen_range(0, COUNT) { if rng.gen_range(0, 3) == 0 && my_hits == 0 { loop { match w.pop() { Pop::Data(_) => my_hits += 1, Pop::Empty => break, Pop::Retry => {} } } } else { w.push(i); } } if my_hits > 0 && hits.iter().all(|h| h.load(SeqCst) > 0) { break; } } done.store(true, SeqCst); for t in threads { t.join().unwrap(); } } #[test] fn destructors() { const THREADS: usize = 8; const COUNT: usize = 50_000; const STEPS: usize = 1000; struct Elem(usize, Arc<Mutex<Vec<usize>>>); impl Drop for Elem { fn drop(&mut self) { self.1.lock().unwrap().push(self.0); } } let (w, s) = deque::fifo(); let dropped = Arc::new(Mutex::new(Vec::new())); let remaining = Arc::new(AtomicUsize::new(COUNT)); for i in 0..COUNT { w.push(Elem(i, dropped.clone())); } let threads = (0..THREADS) .map(|_| { let remaining = remaining.clone(); let s = s.clone(); thread::spawn(move || { let (w2, _) = deque::fifo(); let mut cnt = 0; while cnt < STEPS { if let Steal::Data(_) = s.steal() { cnt += 1; remaining.fetch_sub(1, SeqCst); } if let Steal::Data(_) = s.steal_many(&w2) { cnt += 1; remaining.fetch_sub(1, SeqCst); loop { match w2.pop() { Pop::Data(_) => { cnt += 1; remaining.fetch_sub(1, SeqCst); } Pop::Empty => break, Pop::Retry => {} } } } } }) }).collect::<Vec<_>>(); for _ in 0..STEPS { loop { match w.pop() { Pop::Data(_) => { remaining.fetch_sub(1, SeqCst); break; } Pop::Empty => break, Pop::Retry => {} } } } for t in threads { t.join().unwrap(); } let rem = remaining.load(SeqCst); assert!(rem > 0); { let mut v = dropped.lock().unwrap(); assert_eq!(v.len(), COUNT - rem); v.clear(); } drop((w, s)); { let mut v = dropped.lock().unwrap(); assert_eq!(v.len(), rem); v.sort(); for pair in v.windows(2) { assert_eq!(pair[0] + 1, pair[1]); } } }
25.430108
67
0.376321
22f0797d0d009e042007f1a508bc4df7352ea162
9,054
use alloc::{ collections::{ btree_map::{Iter, Values}, BTreeMap, }, format, string::String, }; use types::{ account::AccountHash, system_contract_errors::pos::{Error, Result}, U512, }; /// The maximum difference between the largest and the smallest stakes. // TODO: Should this be a percentage instead? // TODO: Pick a reasonable value. const MAX_SPREAD: U512 = U512::MAX; /// The maximum increase of stakes in a single bonding request. const MAX_INCREASE: U512 = U512::MAX; /// The maximum decrease of stakes in a single unbonding request. const MAX_DECREASE: U512 = U512::MAX; /// The maximum increase of stakes in millionths of the total stakes in a single bonding request. const MAX_REL_INCREASE: u64 = 1_000_000_000; /// The maximum decrease of stakes in millionths of the total stakes in a single unbonding request. const MAX_REL_DECREASE: u64 = 900_000; /// The stakes map, assigning the staked amount of motes to each bonded /// validator. #[derive(Clone, Debug, PartialEq)] pub struct Stakes(pub BTreeMap<AccountHash, U512>); impl Stakes { pub fn new(map: BTreeMap<AccountHash, U512>) -> Stakes { Stakes(map) } pub fn iter(&self) -> Iter<AccountHash, U512> { self.0.iter() } pub fn values(&self) -> Values<AccountHash, U512> { self.0.values() } pub fn strings(&self) -> impl Iterator<Item = String> + '_ { self.iter().map(|(account_hash, balance)| { let key_bytes = account_hash.as_bytes(); let hex_key = base16::encode_lower(&key_bytes); format!("v_{}_{}", hex_key, balance) }) } pub fn total_bonds(&self) -> U512 { self.values().fold(U512::zero(), |x, y| x + y) } /// If `maybe_amount` is `None`, removes all the validator's stakes, /// otherwise subtracts the given amount. If the stakes are lower than /// the specified amount, it also subtracts all the stakes. /// /// Returns the amount that was actually subtracted from the stakes, or an /// error if /// * unbonding the specified amount is not allowed, /// * tries to unbond last validator, /// * validator was not bonded. pub fn unbond(&mut self, validator: &AccountHash, maybe_amount: Option<U512>) -> Result<U512> { let min = self .max_without(validator) .unwrap_or_else(U512::zero) .saturating_sub(MAX_SPREAD); let max_decrease = MAX_DECREASE.min(self.sum() * MAX_REL_DECREASE / 1_000_000); if let Some(amount) = maybe_amount { // The minimum stake value to not violate the maximum spread. let stake = self.0.get_mut(validator).ok_or(Error::NotBonded)?; if *stake > amount { if *stake - amount < min { return Err(Error::SpreadTooHigh); } if amount > max_decrease { return Err(Error::UnbondTooLarge); } *stake -= amount; return Ok(amount); } } if self.0.len() == 1 { return Err(Error::CannotUnbondLastValidator); } // If the the amount is greater or equal to the stake, remove the validator. let stake = self.0.remove(validator).ok_or(Error::NotBonded)?; if let Some(amount) = maybe_amount { if amount > stake { return Err(Error::UnbondTooLarge); } } if stake > min.saturating_add(max_decrease) && stake > max_decrease { return Err(Error::UnbondTooLarge); } Ok(stake) } /// Adds `amount` to the validator's stakes. pub fn bond(&mut self, validator: &AccountHash, amount: U512) { self.0 .entry(*validator) .and_modify(|x| *x += amount) .or_insert(amount); } /// Returns an error if bonding the specified amount is not allowed. pub fn validate_bonding(&self, validator: &AccountHash, amount: U512) -> Result<()> { let max = self .min_without(validator) .unwrap_or(U512::MAX) .saturating_add(MAX_SPREAD); let min = self .max_without(validator) .unwrap_or_else(U512::zero) .saturating_sub(MAX_SPREAD); let stake = self.0.get(validator).map(|s| *s + amount).unwrap_or(amount); if stake > max || stake < min { return Err(Error::SpreadTooHigh); } let max_increase = MAX_INCREASE.min(self.sum() * MAX_REL_INCREASE / 1_000_000); if (stake.is_zero() && amount > min.saturating_add(max_increase)) || (!stake.is_zero() && amount > max_increase) { return Err(Error::BondTooLarge); } Ok(()) } /// Returns the minimum stake of the _other_ validators. fn min_without(&self, validator: &AccountHash) -> Option<U512> { self.0 .iter() .filter(|(v, _)| *v != validator) .map(|(_, s)| s) .min() .cloned() } /// Returns the maximum stake of the _other_ validators. fn max_without(&self, validator: &AccountHash) -> Option<U512> { self.0 .iter() .filter(|(v, _)| *v != validator) .map(|(_, s)| s) .max() .cloned() } /// Returns the total stakes. fn sum(&self) -> U512 { self.0 .values() .fold(U512::zero(), |sum, s| sum.saturating_add(*s)) } } #[cfg(test)] mod tests { use types::{account::AccountHash, system_contract_errors::pos::Error, U512}; use super::Stakes; const KEY1: [u8; 32] = [1; 32]; const KEY2: [u8; 32] = [2; 32]; fn new_stakes(stakes: &[([u8; 32], u64)]) -> Stakes { Stakes( stakes .iter() .map(|&(key, amount)| (AccountHash::new(key), U512::from(amount))) .collect(), ) } #[test] fn test_bond() { let mut stakes = new_stakes(&[(KEY2, 100)]); assert_eq!( Ok(()), stakes.validate_bonding(&AccountHash::new(KEY1), U512::from(5)) ); stakes.bond(&AccountHash::new(KEY1), U512::from(5)); assert_eq!(new_stakes(&[(KEY1, 5), (KEY2, 100)]), stakes); } #[test] fn test_bond_existing() { let mut stakes = new_stakes(&[(KEY1, 50), (KEY2, 100)]); assert_eq!( Ok(()), stakes.validate_bonding(&AccountHash::new(KEY1), U512::from(4)) ); stakes.bond(&AccountHash::new(KEY1), U512::from(4)); assert_eq!(new_stakes(&[(KEY1, 54), (KEY2, 100)]), stakes); } #[test] fn test_bond_too_much_rel() { let stakes = new_stakes(&[(KEY1, 1_000), (KEY2, 1_000)]); let total = 1_000 + 1_000; assert_eq!( Err(Error::BondTooLarge), stakes.validate_bonding( &AccountHash::new(KEY1), U512::from(super::MAX_REL_INCREASE * total / 1_000_000 + 1), ), "Successfully bonded more than the maximum amount." ); assert_eq!( Ok(()), stakes.validate_bonding( &AccountHash::new(KEY1), U512::from(super::MAX_REL_INCREASE * total / 1_000_000), ), "Failed to bond the maximum amount." ); } #[test] fn test_unbond() { let mut stakes = new_stakes(&[(KEY1, 5), (KEY2, 100)]); assert_eq!( Ok(U512::from(5)), stakes.unbond(&AccountHash::new(KEY1), None) ); assert_eq!(new_stakes(&[(KEY2, 100)]), stakes); } #[test] fn test_unbond_last_validator() { let mut stakes = new_stakes(&[(KEY1, 5)]); assert_eq!( Err(Error::CannotUnbondLastValidator), stakes.unbond(&AccountHash::new(KEY1), None) ); } #[test] fn test_partially_unbond() { let mut stakes = new_stakes(&[(KEY1, 50)]); assert_eq!( Ok(U512::from(4)), stakes.unbond(&AccountHash::new(KEY1), Some(U512::from(4))) ); assert_eq!(new_stakes(&[(KEY1, 46)]), stakes); } #[test] fn test_unbond_too_much_rel() { let mut stakes = new_stakes(&[(KEY1, 999), (KEY2, 1)]); let total = 999 + 1; assert_eq!( Err(Error::UnbondTooLarge), stakes.unbond( &AccountHash::new(KEY1), Some(U512::from(super::MAX_REL_DECREASE * total / 1_000_000 + 1)), ), "Successfully unbonded more than the maximum amount." ); assert_eq!( Ok(U512::from(super::MAX_REL_DECREASE * total / 1_000_000)), stakes.unbond( &AccountHash::new(KEY1), Some(U512::from(super::MAX_REL_DECREASE * total / 1_000_000)), ), "Failed to unbond the maximum amount." ); } }
31.880282
99
0.548929
0aef1e8cf87f1c99d92fa9fca8c08919b0b180a7
1,413
//! Data structures and implementations. pub use self::dmat::DMat; pub use self::dvec::{DVec, DVec1, DVec2, DVec3, DVec4, DVec5, DVec6}; pub use self::vec::{Vec0, Vec1, Vec2, Vec3, Vec4, Vec5, Vec6}; pub use self::mat::{Identity, Mat1, Mat2, Mat3, Mat4, Mat5, Mat6}; pub use self::rot::{Rot2, Rot3, Rot4}; pub use self::iso::{Iso2, Iso3, Iso4}; pub use self::vec::{Vec1MulRhs, Vec2MulRhs, Vec3MulRhs, Vec4MulRhs, Vec5MulRhs, Vec6MulRhs, Vec1DivRhs, Vec2DivRhs, Vec3DivRhs, Vec4DivRhs, Vec5DivRhs, Vec6DivRhs, Vec1AddRhs, Vec2AddRhs, Vec3AddRhs, Vec4AddRhs, Vec5AddRhs, Vec6AddRhs, Vec1SubRhs, Vec2SubRhs, Vec3SubRhs, Vec4SubRhs, Vec5SubRhs, Vec6SubRhs}; pub use self::mat::{Mat1MulRhs, Mat2MulRhs, Mat3MulRhs, Mat4MulRhs, Mat5MulRhs, Mat6MulRhs, Mat1DivRhs, Mat2DivRhs, Mat3DivRhs, Mat4DivRhs, Mat5DivRhs, Mat6DivRhs, Mat1AddRhs, Mat2AddRhs, Mat3AddRhs, Mat4AddRhs, Mat5AddRhs, Mat6AddRhs, Mat1SubRhs, Mat2SubRhs, Mat3SubRhs, Mat4SubRhs, Mat5SubRhs, Mat6SubRhs}; mod metal; mod dmat; mod dvec_macros; mod dvec; mod vec_macros; mod vec; mod mat_macros; mod mat; mod rot_macros; mod rot; mod iso_macros; mod iso; // specialization for some 1d, 2d and 3d operations #[doc(hidden)] mod spec { mod identity; mod mat; mod vec0; mod vec; mod primitives; // mod complex; }
33.642857
92
0.680113
d9417ee0a19533ed26b88adffc9b9bbd5f6874a9
1,048
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // xfail-pretty - token trees can't pretty print macro_rules! myfn( ( $f:ident, ( $( $x:ident ),* ), $body:block ) => ( fn $f( $( $x : int),* ) -> int $body ) ) myfn!(add, (a,b), { return a+b; } ) pub fn main() { macro_rules! mylet( ($x:ident, $val:expr) => ( let $x = $val; ) ); mylet!(y, 8*2); assert!((y == 16)); myfn!(mult, (a,b), { a*b } ); assert!((mult(2, add(4,4)) == 16)); macro_rules! actually_an_expr_macro ( () => ( 16 ) ) assert!({ actually_an_expr_macro!() } == 16); }
24.372093
68
0.580153
72f0e2716312ad28c94162484316b359776c9643
1,116
use anyhow::Error; use async_trait::async_trait; use http::StatusCode; use svc_error::Error as SvcError; use crate::{message_handler::generic::MethodKind, switchboard::StreamId}; #[derive(Clone, Debug, Deserialize)] pub struct Request {} #[derive(Serialize)] struct Response {} #[async_trait] impl super::Operation for Request { async fn call(&self, request: &super::Request) -> super::OperationResult { let app = app!().map_err(internal_error)?; app.switchboard .with_write_lock(|mut switchboard| { switchboard.touch_session(request.session_id()); Ok(()) }) .map_err(internal_error)?; Ok(Response {}.into()) } fn stream_id(&self) -> Option<StreamId> { None } fn method_kind(&self) -> Option<MethodKind> { Some(MethodKind::ServicePing) } } fn internal_error(err: Error) -> SvcError { SvcError::builder() .kind("touch_session_error", "Error touching session") .status(StatusCode::INTERNAL_SERVER_ERROR) .detail(&err.to_string()) .build() }
24.8
78
0.626344
75311cabeaa64e58d874c149d84b9b4f2ddd5df6
4,006
use glium::glutin::{self, Event, WindowEvent}; use glium::{Display, Surface}; use imgui::{Context, ConfigFlags, FontConfig, FontGlyphRanges, FontSource, Ui}; use imgui_glium_renderer::Renderer; use imgui_winit_support::{HiDpiMode, WinitPlatform}; use std::time::Instant; mod clipboard; pub struct System { pub events_loop: glutin::EventsLoop, pub display: glium::Display, pub imgui: Context, pub platform: WinitPlatform, pub renderer: Renderer, pub font_size: f32, } pub fn init(title: &str) -> System { let title = match title.rfind('/') { Some(idx) => title.split_at(idx + 1).1, None => title, }; let events_loop = glutin::EventsLoop::new(); let context = glutin::ContextBuilder::new().with_vsync(true); let builder = glutin::WindowBuilder::new() .with_title(title.to_owned()) .with_dimensions(glutin::dpi::LogicalSize::new(1024f64, 768f64)); let display = Display::new(builder, context, &events_loop).expect("Failed to initialize display"); let mut imgui = Context::create(); imgui.set_ini_filename(None); if let Some(backend) = clipboard::init() { imgui.set_clipboard_backend(Box::new(backend)); } else { eprintln!("Failed to initialize clipboard"); } let mut platform = WinitPlatform::init(&mut imgui); { let gl_window = display.gl_window(); let window = gl_window.window(); platform.attach_window(imgui.io_mut(), &window, HiDpiMode::Rounded); } let hidpi_factor = platform.hidpi_factor(); let font_size = (13.0 * hidpi_factor) as f32; imgui.fonts().add_font(&[ FontSource::DefaultFontData { config: Some(FontConfig { size_pixels: font_size, ..FontConfig::default() }), }, FontSource::TtfData { data: include_bytes!("../../../resources/mplus-1p-regular.ttf"), size_pixels: font_size, config: Some(FontConfig { rasterizer_multiply: 1.75, glyph_ranges: FontGlyphRanges::japanese(), ..FontConfig::default() }), }, ]); imgui.io_mut().font_global_scale = (1.0 / hidpi_factor) as f32; let renderer = Renderer::init(&mut imgui, &display).expect("Failed to initialize renderer"); System { events_loop, display, imgui, platform, renderer, font_size, } } impl System { pub fn main_loop<F: FnMut(&mut bool, &mut Ui)>(self, mut run_ui: F) { let System { mut events_loop, display, mut imgui, mut platform, mut renderer, .. } = self; let gl_window = display.gl_window(); let window = gl_window.window(); let mut last_frame = Instant::now(); let mut run = true; while run { events_loop.poll_events(|event| { platform.handle_event(imgui.io_mut(), &window, &event); if let Event::WindowEvent { event, .. } = event { if let WindowEvent::CloseRequested = event { run = false; } } }); let io = imgui.io_mut(); io.config_flags = ConfigFlags::DOCKING_ENABLE; platform .prepare_frame(io, &window) .expect("Failed to start frame"); last_frame = io.update_delta_time(last_frame); let mut ui = imgui.frame(); run_ui(&mut run, &mut ui); let mut target = display.draw(); target.clear_color_srgb(1.0, 1.0, 1.0, 1.0); platform.prepare_render(&ui, &window); let draw_data = ui.render(); renderer .render(&mut target, draw_data) .expect("Rendering failed"); target.finish().expect("Failed to swap buffers"); } } }
31.054264
96
0.562906
38a2ff9297fe9a3e519bfbe20ee4d9078479cd2d
19,318
// due to code generated by JsonSchema #![allow(clippy::field_reassign_with_default)] use crate::{IdentityKey, SphinxKey}; use az::CheckedCast; use cosmwasm_std::{coin, Addr, Coin, Uint128}; use log::error; use network_defaults::DEFAULT_OPERATOR_INTERVAL_COST; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use serde_repr::{Deserialize_repr, Serialize_repr}; use std::cmp::Ordering; use std::fmt::Display; type U128 = fixed::types::U75F53; // u128 with 18 significant digits fixed::const_fixed_from_int! { const ONE: U128 = 1; } #[cfg_attr(feature = "ts-rs", derive(ts_rs::TS))] #[derive(Clone, Copy, Debug, Deserialize, PartialEq, PartialOrd, Serialize, JsonSchema)] pub enum RewardedSetNodeStatus { Active, Standby, } impl RewardedSetNodeStatus { pub fn is_active(&self) -> bool { matches!(self, RewardedSetNodeStatus::Active) } } #[cfg_attr(feature = "ts-rs", derive(ts_rs::TS))] #[derive(Clone, Debug, Deserialize, PartialEq, PartialOrd, Serialize, JsonSchema)] pub struct MixNode { pub host: String, pub mix_port: u16, pub verloc_port: u16, pub http_api_port: u16, pub sphinx_key: SphinxKey, /// Base58 encoded ed25519 EdDSA public key. pub identity_key: IdentityKey, pub version: String, pub profit_margin_percent: u8, } #[derive( Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize_repr, Deserialize_repr, JsonSchema, )] #[repr(u8)] pub enum Layer { Gateway = 0, One = 1, Two = 2, Three = 3, } impl From<Layer> for String { fn from(layer: Layer) -> Self { if layer == Layer::Gateway { "gateway".to_string() } else { (layer as u8).to_string() } } } #[derive(Debug, Clone, JsonSchema, PartialEq, Serialize, Deserialize, Copy)] pub struct NodeRewardParams { period_reward_pool: Uint128, rewarded_set_size: Uint128, active_set_size: Uint128, reward_blockstamp: u64, circulating_supply: Uint128, uptime: Uint128, sybil_resistance_percent: u8, in_active_set: bool, active_set_work_factor: u8, } impl NodeRewardParams { #[allow(clippy::too_many_arguments)] pub fn new( period_reward_pool: u128, rewarded_set_size: u128, active_set_size: u128, reward_blockstamp: u64, circulating_supply: u128, uptime: u128, sybil_resistance_percent: u8, in_active_set: bool, active_set_work_factor: u8, ) -> NodeRewardParams { NodeRewardParams { period_reward_pool: Uint128::new(period_reward_pool), rewarded_set_size: Uint128::new(rewarded_set_size), active_set_size: Uint128::new(active_set_size), reward_blockstamp, circulating_supply: Uint128::new(circulating_supply), uptime: Uint128::new(uptime), sybil_resistance_percent, in_active_set, active_set_work_factor, } } pub fn omega(&self) -> U128 { // As per keybase://chat/nymtech#tokeneconomics/1179 let denom = self.active_set_work_factor() * U128::from_num(self.rewarded_set_size()) - (self.active_set_work_factor() - ONE) * U128::from_num(self.idle_nodes().u128()); if self.in_active_set() { // work_active = factor / (factor * self.network.k[month] - (factor - 1) * idle_nodes) self.active_set_work_factor() / denom * self.rewarded_set_size() } else { // work_idle = 1 / (factor * self.network.k[month] - (factor - 1) * idle_nodes) ONE / denom * self.rewarded_set_size() } } pub fn idle_nodes(&self) -> Uint128 { self.rewarded_set_size - self.active_set_size } pub fn active_set_work_factor(&self) -> U128 { U128::from_num(self.active_set_work_factor) } pub fn in_active_set(&self) -> bool { self.in_active_set } pub fn performance(&self) -> U128 { U128::from_num(self.uptime.u128()) / U128::from_num(100) } pub fn operator_cost(&self) -> U128 { U128::from_num(self.uptime.u128() / 100u128 * DEFAULT_OPERATOR_INTERVAL_COST as u128) } pub fn set_reward_blockstamp(&mut self, blockstamp: u64) { self.reward_blockstamp = blockstamp; } pub fn period_reward_pool(&self) -> u128 { self.period_reward_pool.u128() } pub fn rewarded_set_size(&self) -> u128 { self.rewarded_set_size.u128() } pub fn circulating_supply(&self) -> u128 { self.circulating_supply.u128() } pub fn reward_blockstamp(&self) -> u64 { self.reward_blockstamp } pub fn uptime(&self) -> u128 { self.uptime.u128() } pub fn one_over_k(&self) -> U128 { ONE / U128::from_num(self.rewarded_set_size.u128()) } pub fn alpha(&self) -> U128 { U128::from_num(self.sybil_resistance_percent) / U128::from_num(100) } } // cosmwasm's limited serde doesn't work with U128 directly #[allow(non_snake_case)] pub mod fixed_U128_as_string { use super::U128; use serde::de::Error; use serde::Deserialize; use std::str::FromStr; pub fn serialize<S>(val: &U128, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { let s = (*val).to_string(); serializer.serialize_str(&s) } pub fn deserialize<'de, D>(deserializer: D) -> Result<U128, D::Error> where D: serde::Deserializer<'de>, { let s = String::deserialize(deserializer)?; U128::from_str(&s).map_err(|err| { D::Error::custom(format!( "failed to deserialize U128 with its string representation - {}", err )) }) } } // everything required to reward delegator of given mixnode #[derive(Debug, Clone, Copy, Serialize, Deserialize, JsonSchema)] pub struct DelegatorRewardParams { node_reward_params: NodeRewardParams, // to be completely honest I don't understand all consequences of using `#[schemars(with = "String")]` // for U128 here, but it seems that CosmWasm is using the same attribute for their Uint128 #[schemars(with = "String")] #[serde(with = "fixed_U128_as_string")] sigma: U128, #[schemars(with = "String")] #[serde(with = "fixed_U128_as_string")] profit_margin: U128, #[schemars(with = "String")] #[serde(with = "fixed_U128_as_string")] node_profit: U128, } impl DelegatorRewardParams { pub fn new(mixnode_bond: &MixNodeBond, node_reward_params: NodeRewardParams) -> Self { DelegatorRewardParams { sigma: mixnode_bond.sigma(&node_reward_params), profit_margin: mixnode_bond.profit_margin(), node_profit: mixnode_bond.node_profit(&node_reward_params), node_reward_params, } } pub fn determine_delegation_reward(&self, delegation_amount: Uint128) -> u128 { // change all values into their fixed representations let delegation_amount = U128::from_num(delegation_amount.u128()); let circulating_supply = U128::from_num(self.node_reward_params.circulating_supply()); let scaled_delegation_amount = delegation_amount / circulating_supply; let delegator_reward = (ONE - self.profit_margin) * scaled_delegation_amount / self.sigma * self.node_profit; let reward = delegator_reward.max(U128::ZERO); if let Some(int_reward) = reward.checked_cast() { int_reward } else { error!( "Could not cast delegator reward ({}) to u128, returning 0", reward, ); 0u128 } } pub fn node_reward_params(&self) -> &NodeRewardParams { &self.node_reward_params } } #[derive(Debug, Copy, Clone)] pub struct NodeRewardResult { reward: U128, lambda: U128, sigma: U128, } impl NodeRewardResult { pub fn reward(&self) -> U128 { self.reward } pub fn lambda(&self) -> U128 { self.lambda } pub fn sigma(&self) -> U128 { self.sigma } } #[derive(Clone, Debug, Deserialize, PartialEq, Serialize, JsonSchema)] pub struct MixNodeBond { pub pledge_amount: Coin, pub total_delegation: Coin, pub owner: Addr, pub layer: Layer, pub block_height: u64, pub mix_node: MixNode, pub proxy: Option<Addr>, } impl MixNodeBond { pub fn new( pledge_amount: Coin, owner: Addr, layer: Layer, block_height: u64, mix_node: MixNode, proxy: Option<Addr>, ) -> Self { MixNodeBond { total_delegation: coin(0, &pledge_amount.denom), pledge_amount, owner, layer, block_height, mix_node, proxy, } } pub fn profit_margin(&self) -> U128 { U128::from_num(self.mix_node.profit_margin_percent) / U128::from_num(100) } pub fn identity(&self) -> &String { &self.mix_node.identity_key } pub fn pledge_amount(&self) -> Coin { self.pledge_amount.clone() } pub fn owner(&self) -> &Addr { &self.owner } pub fn mix_node(&self) -> &MixNode { &self.mix_node } pub fn total_bond(&self) -> Option<u128> { if self.pledge_amount.denom != self.total_delegation.denom { None } else { Some(self.pledge_amount.amount.u128() + self.total_delegation.amount.u128()) } } pub fn total_delegation(&self) -> Coin { self.total_delegation.clone() } pub fn stake_saturation(&self, circulating_supply: u128, rewarded_set_size: u32) -> U128 { self.total_bond_to_circulating_supply(circulating_supply) * U128::from_num(rewarded_set_size) } pub fn pledge_to_circulating_supply(&self, circulating_supply: u128) -> U128 { U128::from_num(self.pledge_amount().amount.u128()) / U128::from_num(circulating_supply) } pub fn total_bond_to_circulating_supply(&self, circulating_supply: u128) -> U128 { U128::from_num(self.pledge_amount().amount.u128() + self.total_delegation().amount.u128()) / U128::from_num(circulating_supply) } pub fn lambda(&self, params: &NodeRewardParams) -> U128 { // Ratio of a bond to the token circulating supply let pledge_to_circulating_supply_ratio = self.pledge_to_circulating_supply(params.circulating_supply()); pledge_to_circulating_supply_ratio.min(params.one_over_k()) } pub fn sigma(&self, params: &NodeRewardParams) -> U128 { // Ratio of a delegation to the the token circulating supply let total_bond_to_circulating_supply_ratio = self.total_bond_to_circulating_supply(params.circulating_supply()); total_bond_to_circulating_supply_ratio.min(params.one_over_k()) } pub fn reward(&self, params: &NodeRewardParams) -> NodeRewardResult { let lambda = self.lambda(params); let sigma = self.sigma(params); let reward = params.performance() * params.period_reward_pool() * (sigma * params.omega() + params.alpha() * lambda * sigma * params.rewarded_set_size()) / (ONE + params.alpha()); NodeRewardResult { reward, lambda, sigma, } } pub fn node_profit(&self, params: &NodeRewardParams) -> U128 { if self.reward(params).reward() < params.operator_cost() { U128::from_num(0) } else { self.reward(params).reward() - params.operator_cost() } } pub fn operator_reward(&self, params: &NodeRewardParams) -> u128 { let reward = self.reward(params); let profit = if reward.reward < params.operator_cost() { U128::from_num(0) } else { reward.reward - params.operator_cost() }; let operator_base_reward = reward.reward.min(params.operator_cost()); let operator_reward = (self.profit_margin() + (ONE - self.profit_margin()) * reward.lambda / reward.sigma) * profit; let reward = (operator_reward + operator_base_reward).max(U128::from_num(0)); if let Some(int_reward) = reward.checked_cast() { int_reward } else { error!( "Could not cast reward ({}) to u128, returning 0 - mixnode {}", reward, self.identity() ); 0u128 } } pub fn sigma_ratio(&self, params: &NodeRewardParams) -> U128 { if self.total_bond_to_circulating_supply(params.circulating_supply()) < params.one_over_k() { self.total_bond_to_circulating_supply(params.circulating_supply()) } else { params.one_over_k() } } pub fn reward_delegation(&self, delegation_amount: Uint128, params: &NodeRewardParams) -> u128 { let reward_params = DelegatorRewardParams::new(self, *params); reward_params.determine_delegation_reward(delegation_amount) } } impl PartialOrd for MixNodeBond { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { // first remove invalid cases if self.pledge_amount.denom != self.total_delegation.denom { return None; } if other.pledge_amount.denom != other.total_delegation.denom { return None; } if self.pledge_amount.denom != other.pledge_amount.denom { return None; } // try to order by total bond + delegation let total_cmp = (self.pledge_amount.amount + self.total_delegation.amount) .partial_cmp(&(self.pledge_amount.amount + self.total_delegation.amount))?; if total_cmp != Ordering::Equal { return Some(total_cmp); } // then if those are equal, prefer higher bond over delegation let pledge_cmp = self .pledge_amount .amount .partial_cmp(&other.pledge_amount.amount)?; if pledge_cmp != Ordering::Equal { return Some(pledge_cmp); } // then look at delegation (I'm not sure we can get here, but better safe than sorry) let delegation_cmp = self .total_delegation .amount .partial_cmp(&other.total_delegation.amount)?; if delegation_cmp != Ordering::Equal { return Some(delegation_cmp); } // then check block height let height_cmp = self.block_height.partial_cmp(&other.block_height)?; if height_cmp != Ordering::Equal { return Some(height_cmp); } // finally go by the rest of the fields in order. It doesn't really matter at this point // but we should be deterministic. let owner_cmp = self.owner.partial_cmp(&other.owner)?; if owner_cmp != Ordering::Equal { return Some(owner_cmp); } let layer_cmp = self.layer.partial_cmp(&other.layer)?; if layer_cmp != Ordering::Equal { return Some(layer_cmp); } self.mix_node.partial_cmp(&other.mix_node) } } impl Display for MixNodeBond { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "amount: {} {}, owner: {}, identity: {}", self.pledge_amount.amount, self.pledge_amount.denom, self.owner, self.mix_node.identity_key ) } } #[derive(Clone, Debug, Deserialize, PartialEq, Serialize, JsonSchema)] pub struct PagedMixnodeResponse { pub nodes: Vec<MixNodeBond>, pub per_page: usize, pub start_next_after: Option<IdentityKey>, } impl PagedMixnodeResponse { pub fn new( nodes: Vec<MixNodeBond>, per_page: usize, start_next_after: Option<IdentityKey>, ) -> Self { PagedMixnodeResponse { nodes, per_page, start_next_after, } } } #[derive(Clone, Debug, Deserialize, PartialEq, Serialize, JsonSchema)] pub struct MixOwnershipResponse { pub address: Addr, pub mixnode: Option<MixNodeBond>, } #[cfg(test)] mod tests { use super::*; fn mixnode_fixture() -> MixNode { MixNode { host: "1.1.1.1".to_string(), mix_port: 123, verloc_port: 456, http_api_port: 789, sphinx_key: "sphinxkey".to_string(), identity_key: "identitykey".to_string(), version: "0.11.0".to_string(), profit_margin_percent: 10, } } #[test] fn mixnode_bond_partial_ord() { let _150foos = Coin::new(150, "foo"); let _50foos = Coin::new(50, "foo"); let _0foos = Coin::new(0, "foo"); let mix1 = MixNodeBond { pledge_amount: _150foos.clone(), total_delegation: _50foos.clone(), owner: Addr::unchecked("foo1"), layer: Layer::One, block_height: 100, mix_node: mixnode_fixture(), proxy: None, }; let mix2 = MixNodeBond { pledge_amount: _150foos.clone(), total_delegation: _50foos.clone(), owner: Addr::unchecked("foo2"), layer: Layer::One, block_height: 120, mix_node: mixnode_fixture(), proxy: None, }; let mix3 = MixNodeBond { pledge_amount: _50foos, total_delegation: _150foos.clone(), owner: Addr::unchecked("foo3"), layer: Layer::One, block_height: 120, mix_node: mixnode_fixture(), proxy: None, }; let mix4 = MixNodeBond { pledge_amount: _150foos.clone(), total_delegation: _0foos.clone(), owner: Addr::unchecked("foo4"), layer: Layer::One, block_height: 120, mix_node: mixnode_fixture(), proxy: None, }; let mix5 = MixNodeBond { pledge_amount: _0foos, total_delegation: _150foos, owner: Addr::unchecked("foo5"), layer: Layer::One, block_height: 120, mix_node: mixnode_fixture(), proxy: None, }; // summary: // mix1: 150bond + 50delegation, foo1, 100 // mix2: 150bond + 50delegation, foo2, 120 // mix3: 50bond + 150delegation, foo3, 120 // mix4: 150bond + 0delegation, foo4, 120 // mix5: 0bond + 150delegation, foo5, 120 // highest total bond+delegation is used // then bond followed by delegation // finally just the rest of the fields // mix1 has higher total than mix4 or mix5 assert!(mix1 > mix4); assert!(mix1 > mix5); // mix1 has the same total as mix3, however, mix1 has more tokens in bond assert!(mix1 > mix3); // same case for mix4 and mix5 assert!(mix4 > mix5); // same bond and delegation, so it's just ordered by height assert!(mix1 < mix2); } }
29.857805
106
0.601615