hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
ac1bbe19b0f51c031e52e81dfecd83f3d5631875
120
//! FRP bindings to the animation engine. pub mod animation; pub mod easing; pub use animation::*; pub use easing::*;
15
41
0.708333
7698c2de24d5c8d9e3b21b6c2d02a07aec0b1252
80,426
use std::cell::RefCell; use std::default::Default; use std::fmt; use std::hash::Hash; use std::iter; use std::lazy::SyncOnceCell as OnceCell; use std::path::PathBuf; use std::rc::Rc; use std::sync::Arc; use std::vec; use arrayvec::ArrayVec; use rustc_ast::attr; use rustc_ast::util::comments::beautify_doc_string; use rustc_ast::{self as ast, AttrStyle}; use rustc_attr::{ConstStability, Deprecation, Stability, StabilityLevel}; use rustc_const_eval::const_eval::is_unstable_const_fn; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::thin_vec::ThinVec; use rustc_hir as hir; use rustc_hir::def::{CtorKind, DefKind, Res}; use rustc_hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX, LOCAL_CRATE}; use rustc_hir::lang_items::LangItem; use rustc_hir::{BodyId, Mutability}; use rustc_index::vec::IndexVec; use rustc_middle::ty::fast_reject::SimplifiedType; use rustc_middle::ty::{self, TyCtxt}; use rustc_session::Session; use rustc_span::hygiene::MacroKind; use rustc_span::source_map::DUMMY_SP; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::{self, FileName, Loc}; use rustc_target::abi::VariantIdx; use rustc_target::spec::abi::Abi; use rustc_typeck::check::intrinsic::intrinsic_operation_unsafety; use crate::clean::cfg::Cfg; use crate::clean::external_path; use crate::clean::inline::{self, print_inlined_const}; use crate::clean::utils::{is_literal_expr, print_const_expr, print_evaluated_const}; use crate::clean::Clean; use crate::core::DocContext; use crate::formats::cache::Cache; use crate::formats::item_type::ItemType; use crate::html::render::Context; use crate::passes::collect_intra_doc_links::UrlFragment; crate use self::FnRetTy::*; crate use self::ItemKind::*; crate use self::SelfTy::*; crate use self::Type::{ Array, BareFunction, BorrowedRef, DynTrait, Generic, ImplTrait, Infer, Primitive, QPath, RawPointer, Slice, Tuple, }; crate use self::Visibility::{Inherited, Public}; crate type ItemIdSet = FxHashSet<ItemId>; #[derive(Debug, Clone, PartialEq, Eq, Hash, Copy)] crate enum ItemId { /// A "normal" item that uses a [`DefId`] for identification. DefId(DefId), /// Identifier that is used for auto traits. Auto { trait_: DefId, for_: DefId }, /// Identifier that is used for blanket implementations. Blanket { impl_id: DefId, for_: DefId }, /// Identifier for primitive types. Primitive(PrimitiveType, CrateNum), } impl ItemId { #[inline] crate fn is_local(self) -> bool { match self { ItemId::Auto { for_: id, .. } | ItemId::Blanket { for_: id, .. } | ItemId::DefId(id) => id.is_local(), ItemId::Primitive(_, krate) => krate == LOCAL_CRATE, } } #[inline] #[track_caller] crate fn expect_def_id(self) -> DefId { self.as_def_id() .unwrap_or_else(|| panic!("ItemId::expect_def_id: `{:?}` isn't a DefId", self)) } #[inline] crate fn as_def_id(self) -> Option<DefId> { match self { ItemId::DefId(id) => Some(id), _ => None, } } #[inline] crate fn krate(self) -> CrateNum { match self { ItemId::Auto { for_: id, .. } | ItemId::Blanket { for_: id, .. } | ItemId::DefId(id) => id.krate, ItemId::Primitive(_, krate) => krate, } } #[inline] crate fn index(self) -> Option<DefIndex> { match self { ItemId::DefId(id) => Some(id.index), _ => None, } } } impl From<DefId> for ItemId { fn from(id: DefId) -> Self { Self::DefId(id) } } /// The crate currently being documented. #[derive(Clone, Debug)] crate struct Crate { crate module: Item, crate primitives: ThinVec<(DefId, PrimitiveType)>, /// Only here so that they can be filtered through the rustdoc passes. crate external_traits: Rc<RefCell<FxHashMap<DefId, TraitWithExtraInfo>>>, } // `Crate` is frequently moved by-value. Make sure it doesn't unintentionally get bigger. #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] rustc_data_structures::static_assert_size!(Crate, 72); impl Crate { crate fn name(&self, tcx: TyCtxt<'_>) -> Symbol { ExternalCrate::LOCAL.name(tcx) } crate fn src(&self, tcx: TyCtxt<'_>) -> FileName { ExternalCrate::LOCAL.src(tcx) } } /// This struct is used to wrap additional information added by rustdoc on a `trait` item. #[derive(Clone, Debug)] crate struct TraitWithExtraInfo { crate trait_: Trait, crate is_notable: bool, } #[derive(Copy, Clone, Debug)] crate struct ExternalCrate { crate crate_num: CrateNum, } impl ExternalCrate { const LOCAL: Self = Self { crate_num: LOCAL_CRATE }; #[inline] crate fn def_id(&self) -> DefId { DefId { krate: self.crate_num, index: CRATE_DEF_INDEX } } crate fn src(&self, tcx: TyCtxt<'_>) -> FileName { let krate_span = tcx.def_span(self.def_id()); tcx.sess.source_map().span_to_filename(krate_span) } crate fn name(&self, tcx: TyCtxt<'_>) -> Symbol { tcx.crate_name(self.crate_num) } crate fn src_root(&self, tcx: TyCtxt<'_>) -> PathBuf { match self.src(tcx) { FileName::Real(ref p) => match p.local_path_if_available().parent() { Some(p) => p.to_path_buf(), None => PathBuf::new(), }, _ => PathBuf::new(), } } /// Attempts to find where an external crate is located, given that we're /// rendering in to the specified source destination. crate fn location( &self, extern_url: Option<&str>, extern_url_takes_precedence: bool, dst: &std::path::Path, tcx: TyCtxt<'_>, ) -> ExternalLocation { use ExternalLocation::*; fn to_remote(url: impl ToString) -> ExternalLocation { let mut url = url.to_string(); if !url.ends_with('/') { url.push('/'); } Remote(url) } // See if there's documentation generated into the local directory // WARNING: since rustdoc creates these directories as it generates documentation, this check is only accurate before rendering starts. // Make sure to call `location()` by that time. let local_location = dst.join(self.name(tcx).as_str()); if local_location.is_dir() { return Local; } if extern_url_takes_precedence { if let Some(url) = extern_url { return to_remote(url); } } // Failing that, see if there's an attribute specifying where to find this // external crate let did = DefId { krate: self.crate_num, index: CRATE_DEF_INDEX }; tcx.get_attrs(did) .lists(sym::doc) .filter(|a| a.has_name(sym::html_root_url)) .filter_map(|a| a.value_str()) .map(to_remote) .next() .or_else(|| extern_url.map(to_remote)) // NOTE: only matters if `extern_url_takes_precedence` is false .unwrap_or(Unknown) // Well, at least we tried. } crate fn keywords(&self, tcx: TyCtxt<'_>) -> ThinVec<(DefId, Symbol)> { let root = self.def_id(); let as_keyword = |res: Res<!>| { if let Res::Def(DefKind::Mod, def_id) = res { let attrs = tcx.get_attrs(def_id); let mut keyword = None; for attr in attrs.lists(sym::doc) { if attr.has_name(sym::keyword) { if let Some(v) = attr.value_str() { keyword = Some(v); break; } } } return keyword.map(|p| (def_id, p)); } None }; if root.is_local() { tcx.hir() .root_module() .item_ids .iter() .filter_map(|&id| { let item = tcx.hir().item(id); match item.kind { hir::ItemKind::Mod(_) => { as_keyword(Res::Def(DefKind::Mod, id.def_id.to_def_id())) } hir::ItemKind::Use(path, hir::UseKind::Single) if tcx.visibility(id.def_id).is_public() => { as_keyword(path.res.expect_non_local()) .map(|(_, prim)| (id.def_id.to_def_id(), prim)) } _ => None, } }) .collect() } else { tcx.module_children(root).iter().map(|item| item.res).filter_map(as_keyword).collect() } } crate fn primitives(&self, tcx: TyCtxt<'_>) -> ThinVec<(DefId, PrimitiveType)> { let root = self.def_id(); // Collect all inner modules which are tagged as implementations of // primitives. // // Note that this loop only searches the top-level items of the crate, // and this is intentional. If we were to search the entire crate for an // item tagged with `#[doc(primitive)]` then we would also have to // search the entirety of external modules for items tagged // `#[doc(primitive)]`, which is a pretty inefficient process (decoding // all that metadata unconditionally). // // In order to keep the metadata load under control, the // `#[doc(primitive)]` feature is explicitly designed to only allow the // primitive tags to show up as the top level items in a crate. // // Also note that this does not attempt to deal with modules tagged // duplicately for the same primitive. This is handled later on when // rendering by delegating everything to a hash map. let as_primitive = |res: Res<!>| { if let Res::Def(DefKind::Mod, def_id) = res { let attrs = tcx.get_attrs(def_id); let mut prim = None; for attr in attrs.lists(sym::doc) { if let Some(v) = attr.value_str() { if attr.has_name(sym::primitive) { prim = PrimitiveType::from_symbol(v); if prim.is_some() { break; } // FIXME: should warn on unknown primitives? } } } return prim.map(|p| (def_id, p)); } None }; if root.is_local() { tcx.hir() .root_module() .item_ids .iter() .filter_map(|&id| { let item = tcx.hir().item(id); match item.kind { hir::ItemKind::Mod(_) => { as_primitive(Res::Def(DefKind::Mod, id.def_id.to_def_id())) } hir::ItemKind::Use(path, hir::UseKind::Single) if tcx.visibility(id.def_id).is_public() => { as_primitive(path.res.expect_non_local()).map(|(_, prim)| { // Pretend the primitive is local. (id.def_id.to_def_id(), prim) }) } _ => None, } }) .collect() } else { tcx.module_children(root).iter().map(|item| item.res).filter_map(as_primitive).collect() } } } /// Indicates where an external crate can be found. #[derive(Debug)] crate enum ExternalLocation { /// Remote URL root of the external crate Remote(String), /// This external crate can be found in the local doc/ folder Local, /// The external crate could not be found. Unknown, } /// Anything with a source location and set of attributes and, optionally, a /// name. That is, anything that can be documented. This doesn't correspond /// directly to the AST's concept of an item; it's a strict superset. #[derive(Clone)] crate struct Item { /// The name of this item. /// Optional because not every item has a name, e.g. impls. crate name: Option<Symbol>, crate attrs: Box<Attributes>, crate visibility: Visibility, /// Information about this item that is specific to what kind of item it is. /// E.g., struct vs enum vs function. crate kind: Box<ItemKind>, crate def_id: ItemId, crate cfg: Option<Arc<Cfg>>, } /// NOTE: this does NOT unconditionally print every item, to avoid thousands of lines of logs. /// If you want to see the debug output for attributes and the `kind` as well, use `{:#?}` instead of `{:?}`. impl fmt::Debug for Item { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let alternate = f.alternate(); // hand-picked fields that don't bloat the logs too much let mut fmt = f.debug_struct("Item"); fmt.field("name", &self.name) .field("visibility", &self.visibility) .field("def_id", &self.def_id); // allow printing the full item if someone really wants to if alternate { fmt.field("attrs", &self.attrs).field("kind", &self.kind).field("cfg", &self.cfg); } else { fmt.field("kind", &self.type_()); fmt.field("docs", &self.doc_value()); } fmt.finish() } } // `Item` is used a lot. Make sure it doesn't unintentionally get bigger. #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] rustc_data_structures::static_assert_size!(Item, 56); crate fn rustc_span(def_id: DefId, tcx: TyCtxt<'_>) -> Span { Span::new(def_id.as_local().map_or_else( || tcx.def_span(def_id), |local| { let hir = tcx.hir(); hir.span_with_body(hir.local_def_id_to_hir_id(local)) }, )) } impl Item { crate fn stability<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Option<Stability> { self.def_id.as_def_id().and_then(|did| tcx.lookup_stability(did)) } crate fn const_stability<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Option<ConstStability> { self.def_id.as_def_id().and_then(|did| tcx.lookup_const_stability(did)) } crate fn deprecation(&self, tcx: TyCtxt<'_>) -> Option<Deprecation> { self.def_id.as_def_id().and_then(|did| tcx.lookup_deprecation(did)) } crate fn inner_docs(&self, tcx: TyCtxt<'_>) -> bool { self.def_id.as_def_id().map(|did| tcx.get_attrs(did).inner_docs()).unwrap_or(false) } crate fn span(&self, tcx: TyCtxt<'_>) -> Span { let kind = match &*self.kind { ItemKind::StrippedItem(k) => k, _ => &*self.kind, }; match kind { ItemKind::ModuleItem(Module { span, .. }) => *span, ItemKind::ImplItem(Impl { kind: ImplKind::Auto, .. }) => Span::dummy(), ItemKind::ImplItem(Impl { kind: ImplKind::Blanket(_), .. }) => { if let ItemId::Blanket { impl_id, .. } = self.def_id { rustc_span(impl_id, tcx) } else { panic!("blanket impl item has non-blanket ID") } } _ => { self.def_id.as_def_id().map(|did| rustc_span(did, tcx)).unwrap_or_else(Span::dummy) } } } crate fn attr_span(&self, tcx: TyCtxt<'_>) -> rustc_span::Span { crate::passes::span_of_attrs(&self.attrs).unwrap_or_else(|| self.span(tcx).inner()) } /// Finds the `doc` attribute as a NameValue and returns the corresponding /// value found. crate fn doc_value(&self) -> Option<String> { self.attrs.doc_value() } /// Convenience wrapper around [`Self::from_def_id_and_parts`] which converts /// `hir_id` to a [`DefId`] crate fn from_hir_id_and_parts( hir_id: hir::HirId, name: Option<Symbol>, kind: ItemKind, cx: &mut DocContext<'_>, ) -> Item { Item::from_def_id_and_parts(cx.tcx.hir().local_def_id(hir_id).to_def_id(), name, kind, cx) } crate fn from_def_id_and_parts( def_id: DefId, name: Option<Symbol>, kind: ItemKind, cx: &mut DocContext<'_>, ) -> Item { let ast_attrs = cx.tcx.get_attrs(def_id); Self::from_def_id_and_attrs_and_parts( def_id, name, kind, box ast_attrs.clean(cx), cx, ast_attrs.cfg(cx.tcx, &cx.cache.hidden_cfg), ) } crate fn from_def_id_and_attrs_and_parts( def_id: DefId, name: Option<Symbol>, kind: ItemKind, attrs: Box<Attributes>, cx: &mut DocContext<'_>, cfg: Option<Arc<Cfg>>, ) -> Item { trace!("name={:?}, def_id={:?}", name, def_id); // Primitives and Keywords are written in the source code as private modules. // The modules need to be private so that nobody actually uses them, but the // keywords and primitives that they are documenting are public. let visibility = if matches!(&kind, ItemKind::KeywordItem(..) | ItemKind::PrimitiveItem(..)) { Visibility::Public } else { cx.tcx.visibility(def_id).clean(cx) }; Item { def_id: def_id.into(), kind: box kind, name, attrs, visibility, cfg } } /// Finds all `doc` attributes as NameValues and returns their corresponding values, joined /// with newlines. crate fn collapsed_doc_value(&self) -> Option<String> { self.attrs.collapsed_doc_value() } crate fn links(&self, cx: &Context<'_>) -> Vec<RenderedLink> { use crate::html::format::href; cx.cache() .intra_doc_links .get(&self.def_id) .map_or(&[][..], |v| v.as_slice()) .iter() .filter_map(|ItemLink { link: s, link_text, did, ref fragment }| { debug!(?did); if let Ok((mut href, ..)) = href(*did, cx) { debug!(?href); if let Some(ref fragment) = *fragment { fragment.render(&mut href, cx.tcx()).unwrap() } Some(RenderedLink { original_text: s.clone(), new_text: link_text.clone(), href, }) } else { None } }) .collect() } /// Find a list of all link names, without finding their href. /// /// This is used for generating summary text, which does not include /// the link text, but does need to know which `[]`-bracketed names /// are actually links. crate fn link_names(&self, cache: &Cache) -> Vec<RenderedLink> { cache .intra_doc_links .get(&self.def_id) .map_or(&[][..], |v| v.as_slice()) .iter() .map(|ItemLink { link: s, link_text, .. }| RenderedLink { original_text: s.clone(), new_text: link_text.clone(), href: String::new(), }) .collect() } crate fn is_crate(&self) -> bool { self.is_mod() && self.def_id.as_def_id().map_or(false, |did| did.index == CRATE_DEF_INDEX) } crate fn is_mod(&self) -> bool { self.type_() == ItemType::Module } crate fn is_trait(&self) -> bool { self.type_() == ItemType::Trait } crate fn is_struct(&self) -> bool { self.type_() == ItemType::Struct } crate fn is_enum(&self) -> bool { self.type_() == ItemType::Enum } crate fn is_variant(&self) -> bool { self.type_() == ItemType::Variant } crate fn is_associated_type(&self) -> bool { self.type_() == ItemType::AssocType } crate fn is_associated_const(&self) -> bool { self.type_() == ItemType::AssocConst } crate fn is_method(&self) -> bool { self.type_() == ItemType::Method } crate fn is_ty_method(&self) -> bool { self.type_() == ItemType::TyMethod } crate fn is_typedef(&self) -> bool { self.type_() == ItemType::Typedef } crate fn is_primitive(&self) -> bool { self.type_() == ItemType::Primitive } crate fn is_union(&self) -> bool { self.type_() == ItemType::Union } crate fn is_import(&self) -> bool { self.type_() == ItemType::Import } crate fn is_extern_crate(&self) -> bool { self.type_() == ItemType::ExternCrate } crate fn is_keyword(&self) -> bool { self.type_() == ItemType::Keyword } crate fn is_stripped(&self) -> bool { match *self.kind { StrippedItem(..) => true, ImportItem(ref i) => !i.should_be_displayed, _ => false, } } crate fn has_stripped_fields(&self) -> Option<bool> { match *self.kind { StructItem(ref _struct) => Some(_struct.fields_stripped), UnionItem(ref union) => Some(union.fields_stripped), VariantItem(Variant::Struct(ref vstruct)) => Some(vstruct.fields_stripped), _ => None, } } crate fn stability_class(&self, tcx: TyCtxt<'_>) -> Option<String> { self.stability(tcx).as_ref().and_then(|s| { let mut classes = Vec::with_capacity(2); if s.level.is_unstable() { classes.push("unstable"); } // FIXME: what about non-staged API items that are deprecated? if self.deprecation(tcx).is_some() { classes.push("deprecated"); } if !classes.is_empty() { Some(classes.join(" ")) } else { None } }) } crate fn stable_since(&self, tcx: TyCtxt<'_>) -> Option<Symbol> { match self.stability(tcx)?.level { StabilityLevel::Stable { since, .. } => Some(since), StabilityLevel::Unstable { .. } => None, } } crate fn const_stable_since(&self, tcx: TyCtxt<'_>) -> Option<Symbol> { match self.const_stability(tcx)?.level { StabilityLevel::Stable { since, .. } => Some(since), StabilityLevel::Unstable { .. } => None, } } crate fn is_non_exhaustive(&self) -> bool { self.attrs.other_attrs.iter().any(|a| a.has_name(sym::non_exhaustive)) } /// Returns a documentation-level item type from the item. crate fn type_(&self) -> ItemType { ItemType::from(self) } crate fn is_default(&self) -> bool { match *self.kind { ItemKind::MethodItem(_, Some(defaultness)) => { defaultness.has_value() && !defaultness.is_final() } _ => false, } } /// Returns a `FnHeader` if `self` is a function item, otherwise returns `None`. crate fn fn_header(&self, tcx: TyCtxt<'_>) -> Option<hir::FnHeader> { fn build_fn_header( def_id: DefId, tcx: TyCtxt<'_>, asyncness: hir::IsAsync, ) -> hir::FnHeader { let sig = tcx.fn_sig(def_id); let constness = if tcx.is_const_fn(def_id) && is_unstable_const_fn(tcx, def_id).is_none() { hir::Constness::Const } else { hir::Constness::NotConst }; hir::FnHeader { unsafety: sig.unsafety(), abi: sig.abi(), constness, asyncness } } let header = match *self.kind { ItemKind::ForeignFunctionItem(_) => { let abi = tcx.fn_sig(self.def_id.as_def_id().unwrap()).abi(); hir::FnHeader { unsafety: if abi == Abi::RustIntrinsic { intrinsic_operation_unsafety(self.name.unwrap()) } else { hir::Unsafety::Unsafe }, abi, constness: hir::Constness::NotConst, asyncness: hir::IsAsync::NotAsync, } } ItemKind::FunctionItem(_) | ItemKind::MethodItem(_, _) => { let def_id = self.def_id.as_def_id().unwrap(); build_fn_header(def_id, tcx, tcx.asyncness(def_id)) } ItemKind::TyMethodItem(_) => { build_fn_header(self.def_id.as_def_id().unwrap(), tcx, hir::IsAsync::NotAsync) } _ => return None, }; Some(header) } } #[derive(Clone, Debug)] crate enum ItemKind { ExternCrateItem { /// The crate's name, *not* the name it's imported as. src: Option<Symbol>, }, ImportItem(Import), StructItem(Struct), UnionItem(Union), EnumItem(Enum), FunctionItem(Function), ModuleItem(Module), TypedefItem(Typedef, bool /* is associated type */), OpaqueTyItem(OpaqueTy), StaticItem(Static), ConstantItem(Constant), TraitItem(Trait), TraitAliasItem(TraitAlias), ImplItem(Impl), /// A method signature only. Used for required methods in traits (ie, /// non-default-methods). TyMethodItem(Function), /// A method with a body. MethodItem(Function, Option<hir::Defaultness>), StructFieldItem(Type), VariantItem(Variant), /// `fn`s from an extern block ForeignFunctionItem(Function), /// `static`s from an extern block ForeignStaticItem(Static), /// `type`s from an extern block ForeignTypeItem, MacroItem(Macro), ProcMacroItem(ProcMacro), PrimitiveItem(PrimitiveType), AssocConstItem(Type, Option<ConstantKind>), /// An associated item in a trait or trait impl. /// /// The bounds may be non-empty if there is a `where` clause. /// The `Option<Type>` is the default concrete type (e.g. `trait Trait { type Target = usize; }`) AssocTypeItem(Box<Generics>, Vec<GenericBound>, Option<Type>), /// An item that has been stripped by a rustdoc pass StrippedItem(Box<ItemKind>), KeywordItem(Symbol), } impl ItemKind { /// Some items contain others such as structs (for their fields) and Enums /// (for their variants). This method returns those contained items. crate fn inner_items(&self) -> impl Iterator<Item = &Item> { match self { StructItem(s) => s.fields.iter(), UnionItem(u) => u.fields.iter(), VariantItem(Variant::Struct(v)) => v.fields.iter(), VariantItem(Variant::Tuple(v)) => v.iter(), EnumItem(e) => e.variants.iter(), TraitItem(t) => t.items.iter(), ImplItem(i) => i.items.iter(), ModuleItem(m) => m.items.iter(), ExternCrateItem { .. } | ImportItem(_) | FunctionItem(_) | TypedefItem(_, _) | OpaqueTyItem(_) | StaticItem(_) | ConstantItem(_) | TraitAliasItem(_) | TyMethodItem(_) | MethodItem(_, _) | StructFieldItem(_) | VariantItem(_) | ForeignFunctionItem(_) | ForeignStaticItem(_) | ForeignTypeItem | MacroItem(_) | ProcMacroItem(_) | PrimitiveItem(_) | AssocConstItem(_, _) | AssocTypeItem(..) | StrippedItem(_) | KeywordItem(_) => [].iter(), } } } #[derive(Clone, Debug)] crate struct Module { crate items: Vec<Item>, crate span: Span, } crate trait AttributesExt { type AttributeIterator<'a>: Iterator<Item = ast::NestedMetaItem> where Self: 'a; fn lists<'a>(&'a self, name: Symbol) -> Self::AttributeIterator<'a>; fn span(&self) -> Option<rustc_span::Span>; fn inner_docs(&self) -> bool; fn other_attrs(&self) -> Vec<ast::Attribute>; fn cfg(&self, tcx: TyCtxt<'_>, hidden_cfg: &FxHashSet<Cfg>) -> Option<Arc<Cfg>>; } impl AttributesExt for [ast::Attribute] { type AttributeIterator<'a> = impl Iterator<Item = ast::NestedMetaItem> + 'a; fn lists<'a>(&'a self, name: Symbol) -> Self::AttributeIterator<'a> { self.iter() .filter(move |attr| attr.has_name(name)) .filter_map(ast::Attribute::meta_item_list) .flatten() } /// Return the span of the first doc-comment, if it exists. fn span(&self) -> Option<rustc_span::Span> { self.iter().find(|attr| attr.doc_str().is_some()).map(|attr| attr.span) } /// Returns whether the first doc-comment is an inner attribute. /// //// If there are no doc-comments, return true. /// FIXME(#78591): Support both inner and outer attributes on the same item. fn inner_docs(&self) -> bool { self.iter().find(|a| a.doc_str().is_some()).map_or(true, |a| a.style == AttrStyle::Inner) } fn other_attrs(&self) -> Vec<ast::Attribute> { self.iter().filter(|attr| attr.doc_str().is_none()).cloned().collect() } fn cfg(&self, tcx: TyCtxt<'_>, hidden_cfg: &FxHashSet<Cfg>) -> Option<Arc<Cfg>> { let sess = tcx.sess; let doc_cfg_active = tcx.features().doc_cfg; let doc_auto_cfg_active = tcx.features().doc_auto_cfg; fn single<T: IntoIterator>(it: T) -> Option<T::Item> { let mut iter = it.into_iter(); let item = iter.next()?; if iter.next().is_some() { return None; } Some(item) } let mut cfg = if doc_cfg_active || doc_auto_cfg_active { let mut doc_cfg = self .iter() .filter(|attr| attr.has_name(sym::doc)) .flat_map(|attr| attr.meta_item_list().unwrap_or_else(Vec::new)) .filter(|attr| attr.has_name(sym::cfg)) .peekable(); if doc_cfg.peek().is_some() && doc_cfg_active { doc_cfg .filter_map(|attr| Cfg::parse(attr.meta_item()?).ok()) .fold(Cfg::True, |cfg, new_cfg| cfg & new_cfg) } else if doc_auto_cfg_active { self.iter() .filter(|attr| attr.has_name(sym::cfg)) .filter_map(|attr| single(attr.meta_item_list()?)) .filter_map(|attr| { Cfg::parse_without(attr.meta_item()?, hidden_cfg).ok().flatten() }) .fold(Cfg::True, |cfg, new_cfg| cfg & new_cfg) } else { Cfg::True } } else { Cfg::True }; for attr in self.iter() { // #[doc] if attr.doc_str().is_none() && attr.has_name(sym::doc) { // #[doc(...)] if let Some(list) = attr.meta().as_ref().and_then(|mi| mi.meta_item_list()) { for item in list { // #[doc(hidden)] if !item.has_name(sym::cfg) { continue; } // #[doc(cfg(...))] if let Some(cfg_mi) = item .meta_item() .and_then(|item| rustc_expand::config::parse_cfg(item, sess)) { match Cfg::parse(cfg_mi) { Ok(new_cfg) => cfg &= new_cfg, Err(e) => { sess.span_err(e.span, e.msg); } } } } } } } // treat #[target_feature(enable = "feat")] attributes as if they were // #[doc(cfg(target_feature = "feat"))] attributes as well for attr in self.lists(sym::target_feature) { if attr.has_name(sym::enable) { if let Some(feat) = attr.value_str() { let meta = attr::mk_name_value_item_str( Ident::with_dummy_span(sym::target_feature), feat, DUMMY_SP, ); if let Ok(feat_cfg) = Cfg::parse(&meta) { cfg &= feat_cfg; } } } } if cfg == Cfg::True { None } else { Some(Arc::new(cfg)) } } } crate trait NestedAttributesExt { /// Returns `true` if the attribute list contains a specific `word` fn has_word(self, word: Symbol) -> bool where Self: std::marker::Sized, { <Self as NestedAttributesExt>::get_word_attr(self, word).is_some() } /// Returns `Some(attr)` if the attribute list contains 'attr' /// corresponding to a specific `word` fn get_word_attr(self, word: Symbol) -> Option<ast::NestedMetaItem>; } impl<I: Iterator<Item = ast::NestedMetaItem>> NestedAttributesExt for I { fn get_word_attr(mut self, word: Symbol) -> Option<ast::NestedMetaItem> { self.find(|attr| attr.is_word() && attr.has_name(word)) } } /// A portion of documentation, extracted from a `#[doc]` attribute. /// /// Each variant contains the line number within the complete doc-comment where the fragment /// starts, as well as the Span where the corresponding doc comment or attribute is located. /// /// Included files are kept separate from inline doc comments so that proper line-number /// information can be given when a doctest fails. Sugared doc comments and "raw" doc comments are /// kept separate because of issue #42760. #[derive(Clone, PartialEq, Eq, Debug)] crate struct DocFragment { crate span: rustc_span::Span, /// The module this doc-comment came from. /// /// This allows distinguishing between the original documentation and a pub re-export. /// If it is `None`, the item was not re-exported. crate parent_module: Option<DefId>, crate doc: Symbol, crate kind: DocFragmentKind, crate indent: usize, } // `DocFragment` is used a lot. Make sure it doesn't unintentionally get bigger. #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] rustc_data_structures::static_assert_size!(DocFragment, 32); #[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] crate enum DocFragmentKind { /// A doc fragment created from a `///` or `//!` doc comment. SugaredDoc, /// A doc fragment created from a "raw" `#[doc=""]` attribute. RawDoc, } /// The goal of this function is to apply the `DocFragment` transformation that is required when /// transforming into the final Markdown, which is applying the computed indent to each line in /// each doc fragment (a `DocFragment` can contain multiple lines in case of `#[doc = ""]`). /// /// Note: remove the trailing newline where appropriate fn add_doc_fragment(out: &mut String, frag: &DocFragment) { let s = frag.doc.as_str(); let mut iter = s.lines(); if s == "" { out.push('\n'); return; } while let Some(line) = iter.next() { if line.chars().any(|c| !c.is_whitespace()) { assert!(line.len() >= frag.indent); out.push_str(&line[frag.indent..]); } else { out.push_str(line); } out.push('\n'); } } /// Collapse a collection of [`DocFragment`]s into one string, /// handling indentation and newlines as needed. crate fn collapse_doc_fragments(doc_strings: &[DocFragment]) -> String { let mut acc = String::new(); for frag in doc_strings { add_doc_fragment(&mut acc, frag); } acc.pop(); acc } /// A link that has not yet been rendered. /// /// This link will be turned into a rendered link by [`Item::links`]. #[derive(Clone, Debug, PartialEq, Eq)] crate struct ItemLink { /// The original link written in the markdown crate link: String, /// The link text displayed in the HTML. /// /// This may not be the same as `link` if there was a disambiguator /// in an intra-doc link (e.g. \[`fn@f`\]) crate link_text: String, crate did: DefId, /// The url fragment to append to the link crate fragment: Option<UrlFragment>, } pub struct RenderedLink { /// The text the link was original written as. /// /// This could potentially include disambiguators and backticks. crate original_text: String, /// The text to display in the HTML crate new_text: String, /// The URL to put in the `href` crate href: String, } /// The attributes on an [`Item`], including attributes like `#[derive(...)]` and `#[inline]`, /// as well as doc comments. #[derive(Clone, Debug, Default)] crate struct Attributes { crate doc_strings: Vec<DocFragment>, crate other_attrs: Vec<ast::Attribute>, } impl Attributes { crate fn lists(&self, name: Symbol) -> impl Iterator<Item = ast::NestedMetaItem> + '_ { self.other_attrs.lists(name) } crate fn has_doc_flag(&self, flag: Symbol) -> bool { for attr in &self.other_attrs { if !attr.has_name(sym::doc) { continue; } if let Some(items) = attr.meta_item_list() { if items.iter().filter_map(|i| i.meta_item()).any(|it| it.has_name(flag)) { return true; } } } false } crate fn from_ast( attrs: &[ast::Attribute], additional_attrs: Option<(&[ast::Attribute], DefId)>, ) -> Attributes { let mut doc_strings: Vec<DocFragment> = vec![]; let clean_attr = |(attr, parent_module): (&ast::Attribute, Option<DefId>)| { if let Some((value, kind)) = attr.doc_str_and_comment_kind() { trace!("got doc_str={:?}", value); let value = beautify_doc_string(value, kind); let kind = if attr.is_doc_comment() { DocFragmentKind::SugaredDoc } else { DocFragmentKind::RawDoc }; let frag = DocFragment { span: attr.span, doc: value, kind, parent_module, indent: 0 }; doc_strings.push(frag); None } else { Some(attr.clone()) } }; // Additional documentation should be shown before the original documentation let other_attrs = additional_attrs .into_iter() .flat_map(|(attrs, id)| attrs.iter().map(move |attr| (attr, Some(id)))) .chain(attrs.iter().map(|attr| (attr, None))) .filter_map(clean_attr) .collect(); Attributes { doc_strings, other_attrs } } /// Finds the `doc` attribute as a NameValue and returns the corresponding /// value found. crate fn doc_value(&self) -> Option<String> { let mut iter = self.doc_strings.iter(); let ori = iter.next()?; let mut out = String::new(); add_doc_fragment(&mut out, ori); for new_frag in iter { add_doc_fragment(&mut out, new_frag); } out.pop(); if out.is_empty() { None } else { Some(out) } } /// Return the doc-comments on this item, grouped by the module they came from. /// /// The module can be different if this is a re-export with added documentation. crate fn collapsed_doc_value_by_module_level(&self) -> FxHashMap<Option<DefId>, String> { let mut ret = FxHashMap::default(); if self.doc_strings.len() == 0 { return ret; } let last_index = self.doc_strings.len() - 1; for (i, new_frag) in self.doc_strings.iter().enumerate() { let out = ret.entry(new_frag.parent_module).or_default(); add_doc_fragment(out, new_frag); if i == last_index { out.pop(); } } ret } /// Finds all `doc` attributes as NameValues and returns their corresponding values, joined /// with newlines. crate fn collapsed_doc_value(&self) -> Option<String> { if self.doc_strings.is_empty() { None } else { Some(collapse_doc_fragments(&self.doc_strings)) } } crate fn get_doc_aliases(&self) -> Box<[Symbol]> { let mut aliases = FxHashSet::default(); for attr in self.other_attrs.lists(sym::doc).filter(|a| a.has_name(sym::alias)) { if let Some(values) = attr.meta_item_list() { for l in values { match l.literal().unwrap().kind { ast::LitKind::Str(s, _) => { aliases.insert(s); } _ => unreachable!(), } } } else { aliases.insert(attr.value_str().unwrap()); } } aliases.into_iter().collect::<Vec<_>>().into() } } impl PartialEq for Attributes { fn eq(&self, rhs: &Self) -> bool { self.doc_strings == rhs.doc_strings && self .other_attrs .iter() .map(|attr| attr.id) .eq(rhs.other_attrs.iter().map(|attr| attr.id)) } } impl Eq for Attributes {} #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum GenericBound { TraitBound(PolyTrait, hir::TraitBoundModifier), Outlives(Lifetime), } impl GenericBound { crate fn maybe_sized(cx: &mut DocContext<'_>) -> GenericBound { let did = cx.tcx.require_lang_item(LangItem::Sized, None); let empty = cx.tcx.intern_substs(&[]); let path = external_path(cx, did, false, vec![], empty); inline::record_extern_fqn(cx, did, ItemType::Trait); GenericBound::TraitBound( PolyTrait { trait_: path, generic_params: Vec::new() }, hir::TraitBoundModifier::Maybe, ) } crate fn is_sized_bound(&self, cx: &DocContext<'_>) -> bool { use rustc_hir::TraitBoundModifier as TBM; if let GenericBound::TraitBound(PolyTrait { ref trait_, .. }, TBM::None) = *self { if Some(trait_.def_id()) == cx.tcx.lang_items().sized_trait() { return true; } } false } crate fn get_poly_trait(&self) -> Option<PolyTrait> { if let GenericBound::TraitBound(ref p, _) = *self { return Some(p.clone()); } None } crate fn get_trait_path(&self) -> Option<Path> { if let GenericBound::TraitBound(PolyTrait { ref trait_, .. }, _) = *self { Some(trait_.clone()) } else { None } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct Lifetime(pub Symbol); impl Lifetime { crate fn statik() -> Lifetime { Lifetime(kw::StaticLifetime) } crate fn elided() -> Lifetime { Lifetime(kw::UnderscoreLifetime) } } #[derive(Clone, Debug)] crate enum WherePredicate { BoundPredicate { ty: Type, bounds: Vec<GenericBound>, bound_params: Vec<Lifetime> }, RegionPredicate { lifetime: Lifetime, bounds: Vec<GenericBound> }, EqPredicate { lhs: Type, rhs: Term }, } impl WherePredicate { crate fn get_bounds(&self) -> Option<&[GenericBound]> { match *self { WherePredicate::BoundPredicate { ref bounds, .. } => Some(bounds), WherePredicate::RegionPredicate { ref bounds, .. } => Some(bounds), _ => None, } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum GenericParamDefKind { Lifetime { outlives: Vec<Lifetime> }, Type { did: DefId, bounds: Vec<GenericBound>, default: Option<Box<Type>>, synthetic: bool }, Const { did: DefId, ty: Box<Type>, default: Option<Box<String>> }, } impl GenericParamDefKind { crate fn is_type(&self) -> bool { matches!(self, GenericParamDefKind::Type { .. }) } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct GenericParamDef { crate name: Symbol, crate kind: GenericParamDefKind, } // `GenericParamDef` is used in many places. Make sure it doesn't unintentionally get bigger. #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] rustc_data_structures::static_assert_size!(GenericParamDef, 56); impl GenericParamDef { crate fn is_synthetic_type_param(&self) -> bool { match self.kind { GenericParamDefKind::Lifetime { .. } | GenericParamDefKind::Const { .. } => false, GenericParamDefKind::Type { synthetic, .. } => synthetic, } } crate fn is_type(&self) -> bool { self.kind.is_type() } crate fn get_bounds(&self) -> Option<&[GenericBound]> { match self.kind { GenericParamDefKind::Type { ref bounds, .. } => Some(bounds), _ => None, } } } // maybe use a Generic enum and use Vec<Generic>? #[derive(Clone, Debug, Default)] crate struct Generics { crate params: Vec<GenericParamDef>, crate where_predicates: Vec<WherePredicate>, } #[derive(Clone, Debug)] crate struct Function { crate decl: FnDecl, crate generics: Generics, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct FnDecl { crate inputs: Arguments, crate output: FnRetTy, crate c_variadic: bool, } impl FnDecl { crate fn self_type(&self) -> Option<SelfTy> { self.inputs.values.get(0).and_then(|v| v.to_self()) } /// Returns the sugared return type for an async function. /// /// For example, if the return type is `impl std::future::Future<Output = i32>`, this function /// will return `i32`. /// /// # Panics /// /// This function will panic if the return type does not match the expected sugaring for async /// functions. crate fn sugared_async_return_type(&self) -> FnRetTy { match &self.output { FnRetTy::Return(Type::ImplTrait(bounds)) => match &bounds[0] { GenericBound::TraitBound(PolyTrait { trait_, .. }, ..) => { let bindings = trait_.bindings().unwrap(); let ret_ty = bindings[0].term(); let ty = ret_ty.ty().expect("Unexpected constant return term"); FnRetTy::Return(ty.clone()) } _ => panic!("unexpected desugaring of async function"), }, _ => panic!("unexpected desugaring of async function"), } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct Arguments { crate values: Vec<Argument>, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct Argument { crate type_: Type, crate name: Symbol, /// This field is used to represent "const" arguments from the `rustc_legacy_const_generics` /// feature. More information in <https://github.com/rust-lang/rust/issues/83167>. crate is_const: bool, } #[derive(Clone, PartialEq, Debug)] crate enum SelfTy { SelfValue, SelfBorrowed(Option<Lifetime>, Mutability), SelfExplicit(Type), } impl Argument { crate fn to_self(&self) -> Option<SelfTy> { if self.name != kw::SelfLower { return None; } if self.type_.is_self_type() { return Some(SelfValue); } match self.type_ { BorrowedRef { ref lifetime, mutability, ref type_ } if type_.is_self_type() => { Some(SelfBorrowed(lifetime.clone(), mutability)) } _ => Some(SelfExplicit(self.type_.clone())), } } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum FnRetTy { Return(Type), DefaultReturn, } impl FnRetTy { crate fn as_return(&self) -> Option<&Type> { match self { Return(ret) => Some(ret), DefaultReturn => None, } } } #[derive(Clone, Debug)] crate struct Trait { crate unsafety: hir::Unsafety, crate items: Vec<Item>, crate generics: Generics, crate bounds: Vec<GenericBound>, crate is_auto: bool, } #[derive(Clone, Debug)] crate struct TraitAlias { crate generics: Generics, crate bounds: Vec<GenericBound>, } /// A trait reference, which may have higher ranked lifetimes. #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct PolyTrait { crate trait_: Path, crate generic_params: Vec<GenericParamDef>, } /// Rustdoc's representation of types, mostly based on the [`hir::Ty`]. #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum Type { /// A named type, which could be a trait. /// /// This is mostly Rustdoc's version of [`hir::Path`]. /// It has to be different because Rustdoc's [`PathSegment`] can contain cleaned generics. Path { path: Path }, /// A `dyn Trait` object: `dyn for<'a> Trait<'a> + Send + 'static` DynTrait(Vec<PolyTrait>, Option<Lifetime>), /// A type parameter. Generic(Symbol), /// A primitive (aka, builtin) type. Primitive(PrimitiveType), /// A function pointer: `extern "ABI" fn(...) -> ...` BareFunction(Box<BareFunctionDecl>), /// A tuple type: `(i32, &str)`. Tuple(Vec<Type>), /// A slice type (does *not* include the `&`): `[i32]` Slice(Box<Type>), /// An array type. /// /// The `String` field is a stringified version of the array's length parameter. Array(Box<Type>, String), /// A raw pointer type: `*const i32`, `*mut i32` RawPointer(Mutability, Box<Type>), /// A reference type: `&i32`, `&'a mut Foo` BorrowedRef { lifetime: Option<Lifetime>, mutability: Mutability, type_: Box<Type> }, /// A qualified path to an associated item: `<Type as Trait>::Name` QPath { assoc: Box<PathSegment>, self_type: Box<Type>, /// FIXME: This is a hack that should be removed; see [this discussion][1]. /// /// [1]: https://github.com/rust-lang/rust/pull/85479#discussion_r635729093 self_def_id: Option<DefId>, trait_: Path, }, /// A type that is inferred: `_` Infer, /// An `impl Trait`: `impl TraitA + TraitB + ...` ImplTrait(Vec<GenericBound>), } // `Type` is used a lot. Make sure it doesn't unintentionally get bigger. #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] rustc_data_structures::static_assert_size!(Type, 80); impl Type { /// When comparing types for equality, it can help to ignore `&` wrapping. crate fn without_borrowed_ref(&self) -> &Type { let mut result = self; while let Type::BorrowedRef { type_, .. } = result { result = &*type_; } result } /// Check if two types are "potentially the same". /// This is different from `Eq`, because it knows that things like /// `Placeholder` are possible matches for everything. crate fn is_same(&self, other: &Self, cache: &Cache) -> bool { match (self, other) { // Recursive cases. (Type::Tuple(a), Type::Tuple(b)) => { a.len() == b.len() && a.iter().zip(b).all(|(a, b)| a.is_same(&b, cache)) } (Type::Slice(a), Type::Slice(b)) => a.is_same(&b, cache), (Type::Array(a, al), Type::Array(b, bl)) => al == bl && a.is_same(&b, cache), (Type::RawPointer(mutability, type_), Type::RawPointer(b_mutability, b_type_)) => { mutability == b_mutability && type_.is_same(&b_type_, cache) } ( Type::BorrowedRef { mutability, type_, .. }, Type::BorrowedRef { mutability: b_mutability, type_: b_type_, .. }, ) => mutability == b_mutability && type_.is_same(&b_type_, cache), // Placeholders and generics are equal to all other types. (Type::Infer, _) | (_, Type::Infer) => true, (Type::Generic(_), _) | (_, Type::Generic(_)) => true, // Other cases, such as primitives, just use recursion. (a, b) => a .def_id(cache) .and_then(|a| Some((a, b.def_id(cache)?))) .map(|(a, b)| a == b) .unwrap_or(false), } } crate fn primitive_type(&self) -> Option<PrimitiveType> { match *self { Primitive(p) | BorrowedRef { type_: box Primitive(p), .. } => Some(p), Slice(..) | BorrowedRef { type_: box Slice(..), .. } => Some(PrimitiveType::Slice), Array(..) | BorrowedRef { type_: box Array(..), .. } => Some(PrimitiveType::Array), Tuple(ref tys) => { if tys.is_empty() { Some(PrimitiveType::Unit) } else { Some(PrimitiveType::Tuple) } } RawPointer(..) => Some(PrimitiveType::RawPointer), BareFunction(..) => Some(PrimitiveType::Fn), _ => None, } } /// Checks if this is a `T::Name` path for an associated type. crate fn is_assoc_ty(&self) -> bool { match self { Type::Path { path, .. } => path.is_assoc_ty(), _ => false, } } crate fn is_self_type(&self) -> bool { match *self { Generic(name) => name == kw::SelfUpper, _ => false, } } crate fn generics(&self) -> Option<Vec<&Type>> { match self { Type::Path { path, .. } => path.generics(), _ => None, } } crate fn is_full_generic(&self) -> bool { matches!(self, Type::Generic(_)) } crate fn is_primitive(&self) -> bool { self.primitive_type().is_some() } crate fn projection(&self) -> Option<(&Type, DefId, PathSegment)> { if let QPath { self_type, trait_, assoc, .. } = self { Some((&self_type, trait_.def_id(), *assoc.clone())) } else { None } } fn inner_def_id(&self, cache: Option<&Cache>) -> Option<DefId> { let t: PrimitiveType = match *self { Type::Path { ref path } => return Some(path.def_id()), DynTrait(ref bounds, _) => return Some(bounds[0].trait_.def_id()), Primitive(p) => return cache.and_then(|c| c.primitive_locations.get(&p).cloned()), BorrowedRef { type_: box Generic(..), .. } => PrimitiveType::Reference, BorrowedRef { ref type_, .. } => return type_.inner_def_id(cache), Tuple(ref tys) => { if tys.is_empty() { PrimitiveType::Unit } else { PrimitiveType::Tuple } } BareFunction(..) => PrimitiveType::Fn, Slice(..) => PrimitiveType::Slice, Array(..) => PrimitiveType::Array, RawPointer(..) => PrimitiveType::RawPointer, QPath { ref self_type, .. } => return self_type.inner_def_id(cache), Generic(_) | Infer | ImplTrait(_) => return None, }; cache.and_then(|c| Primitive(t).def_id(c)) } /// Use this method to get the [DefId] of a [clean] AST node, including [PrimitiveType]s. /// /// [clean]: crate::clean crate fn def_id(&self, cache: &Cache) -> Option<DefId> { self.inner_def_id(Some(cache)) } } /// A primitive (aka, builtin) type. /// /// This represents things like `i32`, `str`, etc. /// /// N.B. This has to be different from [`hir::PrimTy`] because it also includes types that aren't /// paths, like [`Self::Unit`]. #[derive(Clone, PartialEq, Eq, Hash, Copy, Debug)] crate enum PrimitiveType { Isize, I8, I16, I32, I64, I128, Usize, U8, U16, U32, U64, U128, F32, F64, Char, Bool, Str, Slice, Array, Tuple, Unit, RawPointer, Reference, Fn, Never, } type SimplifiedTypes = FxHashMap<PrimitiveType, ArrayVec<SimplifiedType, 2>>; impl PrimitiveType { crate fn from_hir(prim: hir::PrimTy) -> PrimitiveType { use ast::{FloatTy, IntTy, UintTy}; match prim { hir::PrimTy::Int(IntTy::Isize) => PrimitiveType::Isize, hir::PrimTy::Int(IntTy::I8) => PrimitiveType::I8, hir::PrimTy::Int(IntTy::I16) => PrimitiveType::I16, hir::PrimTy::Int(IntTy::I32) => PrimitiveType::I32, hir::PrimTy::Int(IntTy::I64) => PrimitiveType::I64, hir::PrimTy::Int(IntTy::I128) => PrimitiveType::I128, hir::PrimTy::Uint(UintTy::Usize) => PrimitiveType::Usize, hir::PrimTy::Uint(UintTy::U8) => PrimitiveType::U8, hir::PrimTy::Uint(UintTy::U16) => PrimitiveType::U16, hir::PrimTy::Uint(UintTy::U32) => PrimitiveType::U32, hir::PrimTy::Uint(UintTy::U64) => PrimitiveType::U64, hir::PrimTy::Uint(UintTy::U128) => PrimitiveType::U128, hir::PrimTy::Float(FloatTy::F32) => PrimitiveType::F32, hir::PrimTy::Float(FloatTy::F64) => PrimitiveType::F64, hir::PrimTy::Str => PrimitiveType::Str, hir::PrimTy::Bool => PrimitiveType::Bool, hir::PrimTy::Char => PrimitiveType::Char, } } crate fn from_symbol(s: Symbol) -> Option<PrimitiveType> { match s { sym::isize => Some(PrimitiveType::Isize), sym::i8 => Some(PrimitiveType::I8), sym::i16 => Some(PrimitiveType::I16), sym::i32 => Some(PrimitiveType::I32), sym::i64 => Some(PrimitiveType::I64), sym::i128 => Some(PrimitiveType::I128), sym::usize => Some(PrimitiveType::Usize), sym::u8 => Some(PrimitiveType::U8), sym::u16 => Some(PrimitiveType::U16), sym::u32 => Some(PrimitiveType::U32), sym::u64 => Some(PrimitiveType::U64), sym::u128 => Some(PrimitiveType::U128), sym::bool => Some(PrimitiveType::Bool), sym::char => Some(PrimitiveType::Char), sym::str => Some(PrimitiveType::Str), sym::f32 => Some(PrimitiveType::F32), sym::f64 => Some(PrimitiveType::F64), sym::array => Some(PrimitiveType::Array), sym::slice => Some(PrimitiveType::Slice), sym::tuple => Some(PrimitiveType::Tuple), sym::unit => Some(PrimitiveType::Unit), sym::pointer => Some(PrimitiveType::RawPointer), sym::reference => Some(PrimitiveType::Reference), kw::Fn => Some(PrimitiveType::Fn), sym::never => Some(PrimitiveType::Never), _ => None, } } crate fn simplified_types() -> &'static SimplifiedTypes { use ty::fast_reject::SimplifiedTypeGen::*; use ty::{FloatTy, IntTy, UintTy}; use PrimitiveType::*; static CELL: OnceCell<SimplifiedTypes> = OnceCell::new(); let single = |x| iter::once(x).collect(); CELL.get_or_init(move || { map! { Isize => single(IntSimplifiedType(IntTy::Isize)), I8 => single(IntSimplifiedType(IntTy::I8)), I16 => single(IntSimplifiedType(IntTy::I16)), I32 => single(IntSimplifiedType(IntTy::I32)), I64 => single(IntSimplifiedType(IntTy::I64)), I128 => single(IntSimplifiedType(IntTy::I128)), Usize => single(UintSimplifiedType(UintTy::Usize)), U8 => single(UintSimplifiedType(UintTy::U8)), U16 => single(UintSimplifiedType(UintTy::U16)), U32 => single(UintSimplifiedType(UintTy::U32)), U64 => single(UintSimplifiedType(UintTy::U64)), U128 => single(UintSimplifiedType(UintTy::U128)), F32 => single(FloatSimplifiedType(FloatTy::F32)), F64 => single(FloatSimplifiedType(FloatTy::F64)), Str => single(StrSimplifiedType), Bool => single(BoolSimplifiedType), Char => single(CharSimplifiedType), Array => single(ArraySimplifiedType), Slice => single(SliceSimplifiedType), // FIXME: If we ever add an inherent impl for tuples // with different lengths, they won't show in rustdoc. // // Either manually update this arrayvec at this point // or start with a more complex refactoring. Tuple => [TupleSimplifiedType(2), TupleSimplifiedType(3)].into(), Unit => single(TupleSimplifiedType(0)), RawPointer => [PtrSimplifiedType(Mutability::Not), PtrSimplifiedType(Mutability::Mut)].into(), Reference => [RefSimplifiedType(Mutability::Not), RefSimplifiedType(Mutability::Mut)].into(), // FIXME: This will be wrong if we ever add inherent impls // for function pointers. Fn => ArrayVec::new(), Never => single(NeverSimplifiedType), } }) } crate fn impls<'tcx>(&self, tcx: TyCtxt<'tcx>) -> impl Iterator<Item = DefId> + 'tcx { Self::simplified_types() .get(self) .into_iter() .flatten() .flat_map(move |&simp| tcx.incoherent_impls(simp)) .copied() } crate fn all_impls(tcx: TyCtxt<'_>) -> impl Iterator<Item = DefId> + '_ { Self::simplified_types() .values() .flatten() .flat_map(move |&simp| tcx.incoherent_impls(simp)) .copied() } crate fn as_sym(&self) -> Symbol { use PrimitiveType::*; match self { Isize => sym::isize, I8 => sym::i8, I16 => sym::i16, I32 => sym::i32, I64 => sym::i64, I128 => sym::i128, Usize => sym::usize, U8 => sym::u8, U16 => sym::u16, U32 => sym::u32, U64 => sym::u64, U128 => sym::u128, F32 => sym::f32, F64 => sym::f64, Str => sym::str, Bool => sym::bool, Char => sym::char, Array => sym::array, Slice => sym::slice, Tuple => sym::tuple, Unit => sym::unit, RawPointer => sym::pointer, Reference => sym::reference, Fn => kw::Fn, Never => sym::never, } } /// Returns the DefId of the module with `doc(primitive)` for this primitive type. /// Panics if there is no such module. /// /// This gives precedence to primitives defined in the current crate, and deprioritizes primitives defined in `core`, /// but otherwise, if multiple crates define the same primitive, there is no guarantee of which will be picked. /// In particular, if a crate depends on both `std` and another crate that also defines `doc(primitive)`, then /// it's entirely random whether `std` or the other crate is picked. (no_std crates are usually fine unless multiple dependencies define a primitive.) crate fn primitive_locations(tcx: TyCtxt<'_>) -> &FxHashMap<PrimitiveType, DefId> { static PRIMITIVE_LOCATIONS: OnceCell<FxHashMap<PrimitiveType, DefId>> = OnceCell::new(); PRIMITIVE_LOCATIONS.get_or_init(|| { let mut primitive_locations = FxHashMap::default(); // NOTE: technically this misses crates that are only passed with `--extern` and not loaded when checking the crate. // This is a degenerate case that I don't plan to support. for &crate_num in tcx.crates(()) { let e = ExternalCrate { crate_num }; let crate_name = e.name(tcx); debug!(?crate_num, ?crate_name); for &(def_id, prim) in &e.primitives(tcx) { // HACK: try to link to std instead where possible if crate_name == sym::core && primitive_locations.contains_key(&prim) { continue; } primitive_locations.insert(prim, def_id); } } let local_primitives = ExternalCrate { crate_num: LOCAL_CRATE }.primitives(tcx); for (def_id, prim) in local_primitives { primitive_locations.insert(prim, def_id); } primitive_locations }) } } impl From<ast::IntTy> for PrimitiveType { fn from(int_ty: ast::IntTy) -> PrimitiveType { match int_ty { ast::IntTy::Isize => PrimitiveType::Isize, ast::IntTy::I8 => PrimitiveType::I8, ast::IntTy::I16 => PrimitiveType::I16, ast::IntTy::I32 => PrimitiveType::I32, ast::IntTy::I64 => PrimitiveType::I64, ast::IntTy::I128 => PrimitiveType::I128, } } } impl From<ast::UintTy> for PrimitiveType { fn from(uint_ty: ast::UintTy) -> PrimitiveType { match uint_ty { ast::UintTy::Usize => PrimitiveType::Usize, ast::UintTy::U8 => PrimitiveType::U8, ast::UintTy::U16 => PrimitiveType::U16, ast::UintTy::U32 => PrimitiveType::U32, ast::UintTy::U64 => PrimitiveType::U64, ast::UintTy::U128 => PrimitiveType::U128, } } } impl From<ast::FloatTy> for PrimitiveType { fn from(float_ty: ast::FloatTy) -> PrimitiveType { match float_ty { ast::FloatTy::F32 => PrimitiveType::F32, ast::FloatTy::F64 => PrimitiveType::F64, } } } impl From<ty::IntTy> for PrimitiveType { fn from(int_ty: ty::IntTy) -> PrimitiveType { match int_ty { ty::IntTy::Isize => PrimitiveType::Isize, ty::IntTy::I8 => PrimitiveType::I8, ty::IntTy::I16 => PrimitiveType::I16, ty::IntTy::I32 => PrimitiveType::I32, ty::IntTy::I64 => PrimitiveType::I64, ty::IntTy::I128 => PrimitiveType::I128, } } } impl From<ty::UintTy> for PrimitiveType { fn from(uint_ty: ty::UintTy) -> PrimitiveType { match uint_ty { ty::UintTy::Usize => PrimitiveType::Usize, ty::UintTy::U8 => PrimitiveType::U8, ty::UintTy::U16 => PrimitiveType::U16, ty::UintTy::U32 => PrimitiveType::U32, ty::UintTy::U64 => PrimitiveType::U64, ty::UintTy::U128 => PrimitiveType::U128, } } } impl From<ty::FloatTy> for PrimitiveType { fn from(float_ty: ty::FloatTy) -> PrimitiveType { match float_ty { ty::FloatTy::F32 => PrimitiveType::F32, ty::FloatTy::F64 => PrimitiveType::F64, } } } impl From<hir::PrimTy> for PrimitiveType { fn from(prim_ty: hir::PrimTy) -> PrimitiveType { match prim_ty { hir::PrimTy::Int(int_ty) => int_ty.into(), hir::PrimTy::Uint(uint_ty) => uint_ty.into(), hir::PrimTy::Float(float_ty) => float_ty.into(), hir::PrimTy::Str => PrimitiveType::Str, hir::PrimTy::Bool => PrimitiveType::Bool, hir::PrimTy::Char => PrimitiveType::Char, } } } #[derive(Copy, Clone, Debug)] crate enum Visibility { /// `pub` Public, /// Visibility inherited from parent. /// /// For example, this is the visibility of private items and of enum variants. Inherited, /// `pub(crate)`, `pub(super)`, or `pub(in path::to::somewhere)` Restricted(DefId), } impl Visibility { crate fn is_public(&self) -> bool { matches!(self, Visibility::Public) } } #[derive(Clone, Debug)] crate struct Struct { crate struct_type: CtorKind, crate generics: Generics, crate fields: Vec<Item>, crate fields_stripped: bool, } #[derive(Clone, Debug)] crate struct Union { crate generics: Generics, crate fields: Vec<Item>, crate fields_stripped: bool, } /// This is a more limited form of the standard Struct, different in that /// it lacks the things most items have (name, id, parameterization). Found /// only as a variant in an enum. #[derive(Clone, Debug)] crate struct VariantStruct { crate struct_type: CtorKind, crate fields: Vec<Item>, crate fields_stripped: bool, } #[derive(Clone, Debug)] crate struct Enum { crate variants: IndexVec<VariantIdx, Item>, crate generics: Generics, crate variants_stripped: bool, } #[derive(Clone, Debug)] crate enum Variant { CLike, Tuple(Vec<Item>), Struct(VariantStruct), } /// Small wrapper around [`rustc_span::Span`] that adds helper methods /// and enforces calling [`rustc_span::Span::source_callsite()`]. #[derive(Copy, Clone, Debug)] crate struct Span(rustc_span::Span); impl Span { /// Wraps a [`rustc_span::Span`]. In case this span is the result of a macro expansion, the /// span will be updated to point to the macro invocation instead of the macro definition. /// /// (See rust-lang/rust#39726) crate fn new(sp: rustc_span::Span) -> Self { Self(sp.source_callsite()) } crate fn inner(&self) -> rustc_span::Span { self.0 } crate fn dummy() -> Self { Self(rustc_span::DUMMY_SP) } crate fn is_dummy(&self) -> bool { self.0.is_dummy() } crate fn filename(&self, sess: &Session) -> FileName { sess.source_map().span_to_filename(self.0) } crate fn lo(&self, sess: &Session) -> Loc { sess.source_map().lookup_char_pos(self.0.lo()) } crate fn hi(&self, sess: &Session) -> Loc { sess.source_map().lookup_char_pos(self.0.hi()) } crate fn cnum(&self, sess: &Session) -> CrateNum { // FIXME: is there a time when the lo and hi crate would be different? self.lo(sess).file.cnum } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct Path { crate res: Res, crate segments: Vec<PathSegment>, } impl Path { crate fn def_id(&self) -> DefId { self.res.def_id() } crate fn last(&self) -> Symbol { self.segments.last().expect("segments were empty").name } crate fn whole_name(&self) -> String { self.segments .iter() .map(|s| if s.name == kw::PathRoot { String::new() } else { s.name.to_string() }) .intersperse("::".into()) .collect() } /// Checks if this is a `T::Name` path for an associated type. crate fn is_assoc_ty(&self) -> bool { match self.res { Res::SelfTy { .. } if self.segments.len() != 1 => true, Res::Def(DefKind::TyParam, _) if self.segments.len() != 1 => true, Res::Def(DefKind::AssocTy, _) => true, _ => false, } } crate fn generics(&self) -> Option<Vec<&Type>> { self.segments.last().and_then(|seg| { if let GenericArgs::AngleBracketed { ref args, .. } = seg.args { Some( args.iter() .filter_map(|arg| match arg { GenericArg::Type(ty) => Some(ty), _ => None, }) .collect(), ) } else { None } }) } crate fn bindings(&self) -> Option<&[TypeBinding]> { self.segments.last().and_then(|seg| { if let GenericArgs::AngleBracketed { ref bindings, .. } = seg.args { Some(&**bindings) } else { None } }) } } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum GenericArg { Lifetime(Lifetime), Type(Type), Const(Box<Constant>), Infer, } // `GenericArg` can occur many times in a single `Path`, so make sure it // doesn't increase in size unexpectedly. #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] rustc_data_structures::static_assert_size!(GenericArg, 88); #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum GenericArgs { AngleBracketed { args: Vec<GenericArg>, bindings: ThinVec<TypeBinding> }, Parenthesized { inputs: Vec<Type>, output: Option<Box<Type>> }, } // `GenericArgs` is in every `PathSegment`, so its size can significantly // affect rustdoc's memory usage. #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] rustc_data_structures::static_assert_size!(GenericArgs, 40); #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct PathSegment { crate name: Symbol, crate args: GenericArgs, } // `PathSegment` usually occurs multiple times in every `Path`, so its size can // significantly affect rustdoc's memory usage. #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] rustc_data_structures::static_assert_size!(PathSegment, 48); #[derive(Clone, Debug)] crate struct Typedef { crate type_: Type, crate generics: Generics, /// `type_` can come from either the HIR or from metadata. If it comes from HIR, it may be a type /// alias instead of the final type. This will always have the final type, regardless of whether /// `type_` came from HIR or from metadata. /// /// If `item_type.is_none()`, `type_` is guaranteed to come from metadata (and therefore hold the /// final type). crate item_type: Option<Type>, } #[derive(Clone, Debug)] crate struct OpaqueTy { crate bounds: Vec<GenericBound>, crate generics: Generics, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct BareFunctionDecl { crate unsafety: hir::Unsafety, crate generic_params: Vec<GenericParamDef>, crate decl: FnDecl, crate abi: Abi, } #[derive(Clone, Debug)] crate struct Static { crate type_: Type, crate mutability: Mutability, crate expr: Option<BodyId>, } #[derive(Clone, PartialEq, Eq, Hash, Debug)] crate struct Constant { crate type_: Type, crate kind: ConstantKind, } #[derive(Clone, PartialEq, Eq, Hash, Debug)] crate enum Term { Type(Type), Constant(Constant), } impl Term { crate fn ty(&self) -> Option<&Type> { if let Term::Type(ty) = self { Some(ty) } else { None } } } impl From<Type> for Term { fn from(ty: Type) -> Self { Term::Type(ty) } } #[derive(Clone, PartialEq, Eq, Hash, Debug)] crate enum ConstantKind { /// This is the wrapper around `ty::Const` for a non-local constant. Because it doesn't have a /// `BodyId`, we need to handle it on its own. /// /// Note that `ty::Const` includes generic parameters, and may not always be uniquely identified /// by a DefId. So this field must be different from `Extern`. TyConst { expr: String }, /// A constant (expression) that's not an item or associated item. These are usually found /// nested inside types (e.g., array lengths) or expressions (e.g., repeat counts), and also /// used to define explicit discriminant values for enum variants. Anonymous { body: BodyId }, /// A constant from a different crate. Extern { def_id: DefId }, /// `const FOO: u32 = ...;` Local { def_id: DefId, body: BodyId }, } impl Constant { crate fn expr(&self, tcx: TyCtxt<'_>) -> String { self.kind.expr(tcx) } crate fn value(&self, tcx: TyCtxt<'_>) -> Option<String> { self.kind.value(tcx) } crate fn is_literal(&self, tcx: TyCtxt<'_>) -> bool { self.kind.is_literal(tcx) } } impl ConstantKind { crate fn expr(&self, tcx: TyCtxt<'_>) -> String { match *self { ConstantKind::TyConst { ref expr } => expr.clone(), ConstantKind::Extern { def_id } => print_inlined_const(tcx, def_id), ConstantKind::Local { body, .. } | ConstantKind::Anonymous { body } => { print_const_expr(tcx, body) } } } crate fn value(&self, tcx: TyCtxt<'_>) -> Option<String> { match *self { ConstantKind::TyConst { .. } | ConstantKind::Anonymous { .. } => None, ConstantKind::Extern { def_id } | ConstantKind::Local { def_id, .. } => { print_evaluated_const(tcx, def_id) } } } crate fn is_literal(&self, tcx: TyCtxt<'_>) -> bool { match *self { ConstantKind::TyConst { .. } => false, ConstantKind::Extern { def_id } => def_id.as_local().map_or(false, |def_id| { is_literal_expr(tcx, tcx.hir().local_def_id_to_hir_id(def_id)) }), ConstantKind::Local { body, .. } | ConstantKind::Anonymous { body } => { is_literal_expr(tcx, body.hir_id) } } } } #[derive(Clone, Debug)] crate struct Impl { crate unsafety: hir::Unsafety, crate generics: Generics, crate trait_: Option<Path>, crate for_: Type, crate items: Vec<Item>, crate polarity: ty::ImplPolarity, crate kind: ImplKind, } impl Impl { crate fn provided_trait_methods(&self, tcx: TyCtxt<'_>) -> FxHashSet<Symbol> { self.trait_ .as_ref() .map(|t| t.def_id()) .map(|did| tcx.provided_trait_methods(did).map(|meth| meth.name).collect()) .unwrap_or_default() } } #[derive(Clone, Debug)] crate enum ImplKind { Normal, Auto, Blanket(Box<Type>), } impl ImplKind { crate fn is_auto(&self) -> bool { matches!(self, ImplKind::Auto) } crate fn is_blanket(&self) -> bool { matches!(self, ImplKind::Blanket(_)) } crate fn as_blanket_ty(&self) -> Option<&Type> { match self { ImplKind::Blanket(ty) => Some(ty), _ => None, } } } #[derive(Clone, Debug)] crate struct Import { crate kind: ImportKind, crate source: ImportSource, crate should_be_displayed: bool, } impl Import { crate fn new_simple(name: Symbol, source: ImportSource, should_be_displayed: bool) -> Self { Self { kind: ImportKind::Simple(name), source, should_be_displayed } } crate fn new_glob(source: ImportSource, should_be_displayed: bool) -> Self { Self { kind: ImportKind::Glob, source, should_be_displayed } } } #[derive(Clone, Debug)] crate enum ImportKind { // use source as str; Simple(Symbol), // use source::*; Glob, } #[derive(Clone, Debug)] crate struct ImportSource { crate path: Path, crate did: Option<DefId>, } #[derive(Clone, Debug)] crate struct Macro { crate source: String, } #[derive(Clone, Debug)] crate struct ProcMacro { crate kind: MacroKind, crate helpers: Vec<Symbol>, } /// An type binding on an associated type (e.g., `A = Bar` in `Foo<A = Bar>` or /// `A: Send + Sync` in `Foo<A: Send + Sync>`). #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate struct TypeBinding { crate assoc: PathSegment, crate kind: TypeBindingKind, } #[derive(Clone, PartialEq, Eq, Debug, Hash)] crate enum TypeBindingKind { Equality { term: Term }, Constraint { bounds: Vec<GenericBound> }, } impl TypeBinding { crate fn term(&self) -> &Term { match self.kind { TypeBindingKind::Equality { ref term } => term, _ => panic!("expected equality type binding for parenthesized generic args"), } } } /// The type, lifetime, or constant that a private type alias's parameter should be /// replaced with when expanding a use of that type alias. /// /// For example: /// /// ``` /// type PrivAlias<T> = Vec<T>; /// /// pub fn public_fn() -> PrivAlias<i32> { vec![] } /// ``` /// /// `public_fn`'s docs will show it as returning `Vec<i32>`, since `PrivAlias` is private. /// [`SubstParam`] is used to record that `T` should be mapped to `i32`. crate enum SubstParam { Type(Type), Lifetime(Lifetime), Constant(Constant), } impl SubstParam { crate fn as_ty(&self) -> Option<&Type> { if let Self::Type(ty) = self { Some(ty) } else { None } } crate fn as_lt(&self) -> Option<&Lifetime> { if let Self::Lifetime(lt) = self { Some(lt) } else { None } } }
33.806641
154
0.561435
7a527e287563fbda89cd1e5397dfac62e04c0bdc
403
use std::{ fs::File, io::{BufRead, BufReader}, path::Path, }; pub fn file_char_stream(path: &Path) -> Result<impl Iterator<Item = char>, std::io::Error> { let f = BufReader::new(File::open(path)?); Ok(f.lines().flat_map(|line| { line.unwrap() .chars() .chain(std::iter::once('\n')) .collect::<Vec<_>>() .into_iter() })) }
23.705882
92
0.503722
0e19a00d84b2df3ede6a7a74701ba82c3ab25288
7,021
// Copyright (c) 2018-2021 The MobileCoin Foundation use core::{ convert::TryFrom, fmt::{Debug, Display}, hash::Hash, result::Result as StdResult, }; use displaydoc::Display; use ed25519::signature::Error as SignatureError; use mc_common::{NodeID, ResponderId, ResponderIdParseError}; use mc_crypto_keys::{DistinguishedEncoding, Ed25519Public, KeyError}; use std::{path::PathBuf, str::FromStr}; use url::Url; #[derive(Debug, Display, Ord, PartialOrd, Eq, PartialEq, Clone)] pub enum UriConversionError { /// Error converting key: {0} KeyConversion(KeyError), /// Error with Ed25519 signature Signature, /// Error decoding base64 Base64Decode, /// Error parsing ResponderId {0}, {1} ResponderId(String, ResponderIdParseError), /// No consensus-msg-key provided NoPubkey, } impl From<KeyError> for UriConversionError { fn from(src: KeyError) -> Self { UriConversionError::KeyConversion(src) } } impl From<SignatureError> for UriConversionError { fn from(_src: SignatureError) -> Self { // NOTE: ed25519::signature::Error does not implement Eq/Ord UriConversionError::Signature } } impl From<base64::DecodeError> for UriConversionError { fn from(_src: base64::DecodeError) -> Self { // NOTE: Base64::DecodeError does not implement Eq/Ord UriConversionError::Base64Decode } } impl From<ResponderIdParseError> for UriConversionError { fn from(src: ResponderIdParseError) -> Self { match src.clone() { ResponderIdParseError::FromUtf8Error(contents) => { UriConversionError::ResponderId(hex::encode(contents), src) } ResponderIdParseError::InvalidFormat(contents) => { UriConversionError::ResponderId(contents, src) } } } } /// A base URI trait. pub trait ConnectionUri: Clone + Display + Eq + Hash + Ord + PartialEq + PartialOrd + Send + Sync { /// Retrieve a reference to the underlying Url object. fn url(&self) -> &Url; /// Retreive the host part of the URI. fn host(&self) -> String; /// Retreive the port part of the URI. fn port(&self) -> u16; /// Retrieve the host:port string for this connection. fn addr(&self) -> String; /// Whether TLS should be used for this connection. fn use_tls(&self) -> bool; /// Retrieve the username part of the URI, or an empty string if one is not available. fn username(&self) -> String; /// Retrieve the password part of the URI, or an empty string if one is not available. fn password(&self) -> String; /// Retrieve the responder id for this connection. fn responder_id(&self) -> StdResult<ResponderId, UriConversionError> { // .addr() is always expected to return a host:port, so from_str should not fail. Ok(ResponderId::from_str(&self.addr())?) } fn node_id(&self) -> StdResult<NodeID, UriConversionError> { Ok(NodeID { responder_id: self.responder_id()?, public_key: self.consensus_msg_key()?, }) } /// Retrieve the Public Key for Message Signing for this connection. /// /// Public keys via URIs are expected to be either hex or base64 encoded, with the /// key algorithm specified in the URI as well, for future compatibility /// with different key schemes. // FIXME: Add key ?algo=ED25519 fn consensus_msg_key(&self) -> StdResult<Ed25519Public, UriConversionError> { if let Some(pubkey) = self.get_param("consensus-msg-key") { match hex::decode(&pubkey) { Ok(pubkey_bytes) => Ok(Ed25519Public::try_from(pubkey_bytes.as_slice())?), Err(_e) => { let pubkey_bytes = base64::decode_config(&pubkey, base64::URL_SAFE)?; Ok(Ed25519Public::try_from_der(&pubkey_bytes)?) } } } else { Err(UriConversionError::NoPubkey) } } /// Get the value of a query parameter, if parameter is available. fn get_param(&self, name: &str) -> Option<String> { self.url().query_pairs().find_map(|(k, v)| { if k == name && !v.is_empty() { Some(v.to_string()) } else { None } }) } /// Get the value of a boolean query parameter. fn get_bool_param(&self, name: &str) -> bool { let p = self.get_param(name).unwrap_or_else(|| "0".into()); p == "1" || p == "true" } /// Optional TLS hostname override. fn tls_hostname_override(&self) -> Option<String> { self.get_param("tls-hostname") } /// Retrieve the CA bundle to use for this connection. If the `ca-bundle` query parameter is /// present, we will error if we fail at loading a certificate. When it is not present we will /// make a best-effort attempt and return Ok(None) if no certificate could be loaded. fn ca_bundle(&self) -> StdResult<Option<Vec<u8>>, String> { let ca_bundle_path = self.get_param("ca-bundle").map(PathBuf::from); // If we haven't received a ca-bundle query parameter, we're okay with host_cert not // returning anything. If the ca-bundle query parameter was present we will propagate // errors from `read_ca_bundle`. ca_bundle_path.map_or_else( || Ok(mc_util_host_cert::read_ca_bundle(None).ok()), |bundle_path| mc_util_host_cert::read_ca_bundle(Some(bundle_path)).map(Some), ) } /// Retrieve the TLS chain file path to use for this connection. fn tls_chain_path(&self) -> StdResult<String, String> { Ok(self .get_param("tls-chain") .ok_or_else(|| format!("Missing tls-chain query parameter for {}", self.url()))?) } /// Retrieve the TLS chain to use for this connection. fn tls_chain(&self) -> StdResult<Vec<u8>, String> { let path = self.tls_chain_path()?; std::fs::read(path.clone()) .map_err(|e| format!("Failed reading TLS chain from {}: {:?}", path, e)) } /// Retrieve the TLS key file path to use for this connection. fn tls_key_path(&self) -> StdResult<String, String> { Ok(self .get_param("tls-key") .ok_or_else(|| format!("Missing tls-key query parameter for {}", self.url()))?) } /// Retrieve the TLS key to use for this connection. fn tls_key(&self) -> StdResult<Vec<u8>, String> { let path = self.tls_key_path()?; std::fs::read(path.clone()) .map_err(|e| format!("Failed reading TLS key from {}: {:?}", path, e)) } } /// A trait with associated constants, representing a URI scheme and default ports pub trait UriScheme: Debug + Hash + Ord + PartialOrd + Eq + PartialEq + Send + Sync + Clone { const SCHEME_SECURE: &'static str; const SCHEME_INSECURE: &'static str; const DEFAULT_SECURE_PORT: u16; const DEFAULT_INSECURE_PORT: u16; }
35.821429
98
0.626549
116506d0d474b3c29104438e123837d62fbe68d4
1,444
use std::net::SocketAddr; pub const CLIENT_NAME: &str = "srwc"; pub const VERSION: &str = "0.1.0"; pub const DEFAULT_TYPE: ServerType = ServerType::HTTP; pub const DEFAULT_ADDR: &str = "0.0.0.0:1417"; pub const DEFAULT_STORAGE: &str = "/tmp/srwc"; pub const BUFFER_SIZE: usize = 8; pub const ACK_MESSAGE: &str = "ACK"; pub const PREPARE_TRANSFER_MESSAGE: &str = "prepare transfer file"; pub const CANNOT_FIND_FILE_MESSAGE: &str = "cannot find file"; pub const REMOVED_OK_MESSAGE: &str = "removed ok"; pub const REMOVED_NOK_MESSAGE: &str = "removed nok"; pub const GRPC_METADATA_FILENAME: &str = "filename"; pub const GRPC_URL_SCHEMA: &str = "http://"; #[derive(Debug)] pub enum ServerType { HTTP, HTTPS, GRPC, } #[derive(Debug)] pub struct ClientConfig { pub server_type: ServerType, pub address: SocketAddr, pub storage: String, } impl ClientConfig { pub fn new() -> Self { ClientConfig { server_type: DEFAULT_TYPE, address: DEFAULT_ADDR.parse().expect("Unable to parse socket address"), storage: DEFAULT_STORAGE.to_string(), } } } #[derive(Debug)] pub struct ServerFile { pub fullpath: String, pub name: String, pub size: u64, } impl ServerFile { pub fn new() -> Self { ServerFile { fullpath: String::from(""), name: String::from(""), size: 0, } } }
24.066667
83
0.624654
eb174a7ad26eac8df22b525a4d31816ba6642e27
371
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); // use wasm_bindgen_test::*; use pouch::*; // TODO make them work with 'wasm-pack test --chrome' // #[wasm_bindgen_test] async fn _test_get_name() { let db_name = "tests_new_node"; let db = Database::new(db_name); let info = db.info().await.unwrap(); assert_eq!(info.db_name, db_name); }
26.5
64
0.700809
14c8affffe74f9fbca6e09e66bd87d5568b40d74
14,188
//! Execute Nix commands using a builder-pattern abstraction. //! ```rust //! extern crate lorri; //! use lorri::nix; //! //! #[macro_use] extern crate serde_derive; //! #[derive(Debug, Deserialize, PartialEq, Eq)] //! struct Author { //! name: String, //! contributions: usize //! } //! //! fn main() { //! let output: Result<Vec<Author>, _> = nix::CallOpts::expression(r#" //! { name }: //! { //! contributors = [ //! { inherit name; contributions = 99; } //! ]; //! } //! "#) //! .argstr("name", "Jill") //! .attribute("contributors") //! .value(); //! //! assert_eq!( //! output.unwrap(), //! vec![ //! Author { name: "Jill".to_string(), contributions: 99 }, //! ] //! ); //! } //! ``` use osstrlines; use serde_json; use std::collections::HashMap; use std::ffi::OsStr; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; use vec1::Vec1; /// Execute Nix commands using a builder-pattern abstraction. #[derive(Clone)] pub struct CallOpts { input: Input, attribute: Option<String>, argstrs: HashMap<String, String>, } /// Which input to give nix. #[derive(Clone)] enum Input { /// A nix expression string. Expression(String), /// A nix file. File(PathBuf), } /// A store path (generated by `nix-store --realize` from a .drv file). #[derive(Hash, PartialEq, Eq, Clone, Debug)] pub struct StorePath(PathBuf); impl StorePath { /// Underlying `Path`. pub fn as_path(&self) -> &Path { &self.0 } } impl From<&std::ffi::OsStr> for StorePath { fn from(s: &std::ffi::OsStr) -> StorePath { StorePath(PathBuf::from(s.to_owned())) } } impl From<std::ffi::OsString> for StorePath { fn from(s: std::ffi::OsString) -> StorePath { StorePath(PathBuf::from(s)) } } /// Opaque type to keep a temporary GC root directory alive. /// Once it is dropped, the GC root is removed. pub struct GcRootTempDir(tempfile::TempDir); impl CallOpts { /// Create a CallOpts with the Nix expression `expr`. /// /// ```rust /// extern crate lorri; /// use lorri::nix; /// let output: Result<u8, _> = nix::CallOpts::expression("let x = 5; in x") /// .value(); /// assert_eq!( /// output.unwrap(), 5 /// ); /// ``` pub fn expression(expr: &str) -> CallOpts { CallOpts { input: Input::Expression(expr.to_string()), attribute: None, argstrs: HashMap::new(), } } /// Create a CallOpts with the Nix file `nix_file`. pub fn file(nix_file: PathBuf) -> CallOpts { CallOpts { input: Input::File(nix_file), attribute: None, argstrs: HashMap::new(), } } /// Evaluate a sub attribute of the expression. Only supports one: /// calling attribute() multiple times is supported, but overwrites /// the previous attribute. /// /// /// ```rust /// extern crate lorri; /// use lorri::nix; /// let output: Result<u8, _> = nix::CallOpts::expression("let x = 5; in { a = x; }") /// .attribute("a") /// .value(); /// assert_eq!( /// output.unwrap(), 5 /// ); /// ``` /// /// /// This is due to the following difficult to handle edge case of /// /// nix-instantiate --eval --strict --json -E '{ a = 1; b = 2; }' -A a -A b /// /// producing "12". pub fn attribute(&mut self, attr: &str) -> &mut Self { self.attribute = Some(attr.to_string()); self } /// Specify an argument to the expression, where the argument's value /// is to be interpreted as a string. /// /// ```rust /// extern crate lorri; /// use lorri::nix; /// let output: Result<String, _> = nix::CallOpts::expression(r#"{ name }: "Hello, ${name}!""#) /// .argstr("name", "Jill") /// .value(); /// assert_eq!( /// output.unwrap(), "Hello, Jill!" /// ); /// ``` pub fn argstr(&mut self, name: &str, value: &str) -> &mut Self { self.argstrs.insert(name.to_string(), value.to_string()); self } /// Evaluate the expression and parameters, and interpret as type T: /// /// ```rust /// extern crate lorri; /// use lorri::nix; /// /// #[macro_use] extern crate serde_derive; /// #[derive(Debug, Deserialize, PartialEq, Eq)] /// struct Author { /// name: String, /// contributions: usize /// } /// /// fn main() { /// let output: Result<Vec<Author>, _> = nix::CallOpts::expression(r#" /// { name }: /// { /// contributors = [ /// { inherit name; contributions = 99; } /// ]; /// } /// "#) /// .argstr("name", "Jill") /// .attribute("contributors") /// .value(); /// /// assert_eq!( /// output.unwrap(), /// vec![ /// Author { name: "Jill".to_string(), contributions: 99 }, /// ] /// ); /// } /// ``` pub fn value<T>(&self) -> Result<T, EvaluationError> where T: serde::de::DeserializeOwned, { let mut cmd = Command::new("nix-instantiate"); cmd.args(&["--eval", "--json", "--strict"]); cmd.args(self.command_arguments()); let output = cmd.output()?; if output.status.success() { Ok(serde_json::from_slice(&output.stdout.clone())?) } else { Err(output.into()) } } /// Build the expression and return a path to the build result: /// /// ```rust /// extern crate lorri; /// use lorri::nix; /// use std::path::{Path, PathBuf}; /// # use std::env; /// # env::set_var("NIX_PATH", "nixpkgs=./nix/bogus-nixpkgs/"); /// /// let (location, gc_root) = nix::CallOpts::expression(r#" /// import <nixpkgs> {} /// "#) /// .attribute("hello") /// .path() /// .unwrap() /// ; /// /// let location = location.as_path().to_string_lossy().into_owned(); /// println!("{:?}", location); /// assert!(location.contains("/nix/store")); /// assert!(location.contains("hello-")); /// drop(gc_root); /// ``` /// /// `path` returns a lock to the GC roots created by the Nix call /// (`gc_root` in the example above). Until that is dropped, /// a Nix garbage collect will not remove the store paths created /// by `path()`. /// /// Note, `path()` returns an error if there are multiple store paths /// returned by Nix: /// /// ```rust /// extern crate lorri; /// use lorri::nix; /// use std::path::{Path, PathBuf}; /// # use std::env; /// # env::set_var("NIX_PATH", "nixpkgs=./nix/bogus-nixpkgs/"); /// /// let paths = nix::CallOpts::expression(r#" /// { inherit (import <nixpkgs> {}) hello git; } /// "#) /// .path(); /// /// match paths { /// Err(nix::OnePathError::TooManyResults) => {}, /// otherwise => panic!(otherwise) /// } /// ``` pub fn path(&self) -> Result<(StorePath, GcRootTempDir), OnePathError> { let (pathsv1, gc_root) = self.paths()?; let mut paths = pathsv1.into_vec(); match (paths.pop(), paths.pop()) { // Exactly zero (None, _) => Err(BuildError::NoResult.into()), // Exactly one (Some(path), None) => Ok((path, gc_root)), // More than one (Some(_), Some(_)) => Err(OnePathError::TooManyResults), } } /// Build the expression and return a list of paths to the build results. /// Like `.path()`, except it returns all store paths. /// /// ```rust /// extern crate lorri; /// use lorri::nix; /// use std::path::{Path, PathBuf}; /// # use std::env; /// # env::set_var("NIX_PATH", "nixpkgs=./nix/bogus-nixpkgs/"); /// /// let (paths, gc_root) = nix::CallOpts::expression(r#" /// { inherit (import <nixpkgs> {}) hello git; } /// "#) /// .paths() /// .unwrap(); /// let mut paths = paths /// .into_iter() /// .map(|path| { println!("{:?}", path); format!("{:?}", path) }); /// assert!(paths.next().unwrap().contains("git-")); /// assert!(paths.next().unwrap().contains("hello-")); /// drop(gc_root); /// ``` pub fn paths(&self) -> Result<(Vec1<StorePath>, GcRootTempDir), BuildError> { // TODO: temp_dir writes to /tmp by default, we should // create a wrapper using XDG_RUNTIME_DIR instead, // which is per-user and (on systemd systems) a tmpfs. let gc_root_dir = tempfile::TempDir::new()?; let mut cmd = Command::new("nix-build"); // Create a gc root to the build output cmd.args(&[ OsStr::new("--out-link"), gc_root_dir.path().join(Path::new("result")).as_os_str(), ]); cmd.args(self.command_arguments()); cmd.stderr(Stdio::inherit()); let output = cmd.output()?; if output.status.success() { let stdout: &[u8] = &output.stdout; let paths: Vec<StorePath> = osstrlines::Lines::from(stdout) .map(|line| line.map(StorePath::from)) .collect::<Result<Vec<StorePath>, _>>()?; if let Ok(vec1) = Vec1::from_vec(paths) { Ok((vec1, GcRootTempDir(gc_root_dir))) } else { Err(BuildError::NoResult) } } else { Err(output.into()) } } /// Fetch common arguments passed to Nix's CLI, specifically /// the --expr expression, -A attribute, and --argstr values. fn command_arguments(&self) -> Vec<&OsStr> { let mut ret: Vec<&OsStr> = vec![]; if let Some(ref attr) = self.attribute { ret.push(OsStr::new("-A")); ret.push(OsStr::new(attr)); } for (name, value) in self.argstrs.iter() { ret.push(OsStr::new("--argstr")); ret.push(OsStr::new(name)); ret.push(OsStr::new(value)); } match self.input { Input::Expression(ref exp) => { ret.push(OsStr::new("--expr")); ret.push(OsStr::new(exp.as_str())); } Input::File(ref fp) => { ret.push(OsStr::new("--")); ret.push(OsStr::new(fp)); } } ret } } /// Possible error conditions encountered when executing Nix evaluation commands. #[derive(Debug)] pub enum EvaluationError { /// A system-level IO error occured while executing Nix. Io(std::io::Error), /// Nix execution failed. ExecutionFailed(std::process::Output), /// The data returned from nix-instantiate did not match the /// data time you expect. Decoding(serde_json::Error), } impl From<std::io::Error> for EvaluationError { fn from(e: std::io::Error) -> EvaluationError { EvaluationError::Io(e) } } impl From<serde_json::Error> for EvaluationError { fn from(e: serde_json::Error) -> EvaluationError { EvaluationError::Decoding(e) } } impl From<std::process::Output> for EvaluationError { fn from(output: std::process::Output) -> EvaluationError { if output.status.success() { panic!( "Output is successful, but we're in error handling: {:#?}", output ); } EvaluationError::ExecutionFailed(output) } } /// Possible error conditions encountered when executing Nix build commands. #[derive(Debug)] pub enum BuildError { /// A system-level IO error occured while executing Nix. Io(std::io::Error), /// Nix execution failed. ExecutionFailed(std::process::Output), /// Build produced no paths NoResult, /// The directory passed for the GC root either does not exist or /// is not a directory. GcRootNotADirectory, } impl From<std::io::Error> for BuildError { fn from(e: std::io::Error) -> BuildError { BuildError::Io(e) } } impl From<std::process::Output> for BuildError { fn from(output: std::process::Output) -> BuildError { if output.status.success() { panic!( "Output is successful, but we're in error handling: {:#?}", output ); } BuildError::ExecutionFailed(output) } } /// Possible error conditions encountered when executing a Nix build /// and expecting a single result #[derive(Debug)] pub enum OnePathError { /// Too many paths were returned TooManyResults, /// Standard Build Error results Build(BuildError), } impl From<BuildError> for OnePathError { fn from(e: BuildError) -> OnePathError { OnePathError::Build(e) } } #[cfg(test)] mod tests { use super::CallOpts; use std::ffi::OsStr; use std::path::PathBuf; #[test] fn cmd_arguments_expression() { let mut nix = CallOpts::expression("my-cool-expression"); nix.attribute("hello"); nix.argstr("foo", "bar"); let exp: Vec<&OsStr> = [ "-A", "hello", "--argstr", "foo", "bar", "--expr", "my-cool-expression", ] .into_iter() .map(OsStr::new) .collect(); assert_eq!(exp, nix.command_arguments()); } #[test] fn cmd_arguments_test() { let mut nix2 = CallOpts::file(PathBuf::from("/my-cool-file.nix")); nix2.attribute("hello"); nix2.argstr("foo", "bar"); let exp2: Vec<&OsStr> = [ "-A", "hello", "--argstr", "foo", "bar", "--", "/my-cool-file.nix", ] .into_iter() .map(OsStr::new) .collect(); assert_eq!(exp2, nix2.command_arguments()); } }
28.09505
99
0.521074
2346ca24b27f86aae009d2fae60363a12d8a7296
8,905
use crate::eth_account::EthereumAccount; use crate::zksync_account::ZkSyncAccount; use num::BigUint; use web3::types::{TransactionReceipt, U64}; use zksync_crypto::rand::Rng; use zksync_types::tx::{ChangePubKeyType, TimeRange}; use zksync_types::{AccountId, Address, Nonce, PriorityOp, TokenId, ZkSyncTx}; use crate::types::*; /// Account set is used to create transactions using stored account /// in a convenient way #[derive(Clone)] pub struct AccountSet { pub eth_accounts: Vec<EthereumAccount>, pub zksync_accounts: Vec<ZkSyncAccount>, pub fee_account_id: ZKSyncAccountId, } impl AccountSet { /// Create deposit from eth account to zksync account pub async fn deposit( &self, from: ETHAccountId, to: ZKSyncAccountId, token: Option<Address>, // None for ETH amount: BigUint, ) -> (Vec<TransactionReceipt>, PriorityOp) { let from = &self.eth_accounts[from.0]; let to = &self.zksync_accounts[to.0]; if let Some(address) = token { from.deposit_erc20(address, amount, &to.address) .await .expect("erc20 deposit should not fail") } else { from.deposit_eth(amount, &to.address, None) .await .expect("eth deposit should not fail") } } pub async fn deposit_to_random( &self, from: ETHAccountId, token: Option<Address>, // None for ETH amount: BigUint, rng: &mut impl Rng, ) -> (Vec<TransactionReceipt>, PriorityOp) { let from = &self.eth_accounts[from.0]; let to_address = Address::from_slice(&rng.gen::<[u8; 20]>()); if let Some(address) = token { from.deposit_erc20(address, amount, &to_address) .await .expect("erc20 deposit should not fail") } else { from.deposit_eth(amount, &to_address, None) .await .expect("eth deposit should not fail") } } /// Create signed transfer between zksync accounts /// `nonce` optional nonce override /// `increment_nonce` - flag for `from` account nonce increment #[allow(clippy::too_many_arguments)] pub fn transfer( &self, from: ZKSyncAccountId, to: ZKSyncAccountId, token_id: Token, amount: BigUint, fee: BigUint, nonce: Option<Nonce>, time_range: TimeRange, increment_nonce: bool, ) -> ZkSyncTx { let from = &self.zksync_accounts[from.0]; let to = &self.zksync_accounts[to.0]; ZkSyncTx::Transfer(Box::new( from.sign_transfer( token_id.0, "", amount, fee, &to.address, nonce, increment_nonce, time_range, ) .0, )) } /// Create signed transfer between zksync accounts /// `nonce` optional nonce override /// `increment_nonce` - flag for `from` account nonce increment #[allow(clippy::too_many_arguments)] pub fn transfer_to_new_random( &self, from: ZKSyncAccountId, token_id: Token, amount: BigUint, fee: BigUint, nonce: Option<Nonce>, increment_nonce: bool, rng: &mut impl Rng, ) -> ZkSyncTx { let from = &self.zksync_accounts[from.0]; let to_address = Address::from_slice(&rng.gen::<[u8; 20]>()); ZkSyncTx::Transfer(Box::new( from.sign_transfer( token_id.0, "", amount, fee, &to_address, nonce, increment_nonce, Default::default(), ) .0, )) } /// Create withdraw from zksync account to eth account /// `nonce` optional nonce override /// `increment_nonce` - flag for `from` account nonce increment #[allow(clippy::too_many_arguments)] pub fn withdraw( &self, from: ZKSyncAccountId, to: ETHAccountId, token_id: Token, amount: BigUint, fee: BigUint, nonce: Option<Nonce>, increment_nonce: bool, time_range: TimeRange, ) -> ZkSyncTx { let from = &self.zksync_accounts[from.0]; let to = &self.eth_accounts[to.0]; ZkSyncTx::Withdraw(Box::new( from.sign_withdraw( token_id.0, "", amount, fee, &to.address, nonce, increment_nonce, time_range, ) .0, )) } /// Create forced exit for zksync account /// `nonce` optional nonce override /// `increment_nonce` - flag for `from` account nonce increment #[allow(clippy::too_many_arguments)] pub fn forced_exit( &self, initiator: ZKSyncAccountId, target: ZKSyncAccountId, token_id: Token, fee: BigUint, nonce: Option<Nonce>, increment_nonce: bool, time_range: TimeRange, ) -> ZkSyncTx { let from = &self.zksync_accounts[initiator.0]; let target = &self.zksync_accounts[target.0]; ZkSyncTx::ForcedExit(Box::new(from.sign_forced_exit( token_id.0, fee, &target.address, nonce, increment_nonce, time_range, ))) } /// Create withdraw from zksync account to random eth account /// `nonce` optional nonce override /// `increment_nonce` - flag for `from` account nonce increment #[allow(clippy::too_many_arguments)] pub fn withdraw_to_random( &self, from: ZKSyncAccountId, token_id: Token, amount: BigUint, fee: BigUint, nonce: Option<Nonce>, increment_nonce: bool, rng: &mut impl Rng, ) -> ZkSyncTx { let from = &self.zksync_accounts[from.0]; let to_address = Address::from_slice(&rng.gen::<[u8; 20]>()); ZkSyncTx::Withdraw(Box::new( from.sign_withdraw( token_id.0, "", amount, fee, &to_address, nonce, increment_nonce, Default::default(), ) .0, )) } /// Create full exit from zksync account to eth account /// `nonce` optional nonce override /// `increment_nonce` - flag for `from` account nonce increment #[allow(clippy::too_many_arguments)] pub async fn full_exit( &self, post_by: ETHAccountId, token_address: Address, account_id: AccountId, ) -> (TransactionReceipt, PriorityOp) { self.eth_accounts[post_by.0] .full_exit(account_id, token_address) .await .expect("FullExit eth call failed") } #[allow(clippy::too_many_arguments)] pub async fn change_pubkey_with_onchain_auth( &self, eth_account: ETHAccountId, zksync_signer: ZKSyncAccountId, fee_token: TokenId, fee: BigUint, nonce: Option<Nonce>, increment_nonce: bool, time_range: TimeRange, ) -> ZkSyncTx { let zksync_account = &self.zksync_accounts[zksync_signer.0]; let auth_nonce = nonce.unwrap_or_else(|| zksync_account.nonce()); let eth_account = &self.eth_accounts[eth_account.0]; let tx_receipt = eth_account .auth_fact(&zksync_account.pubkey_hash.data, auth_nonce) .await .expect("Auth pubkey fail"); assert_eq!(tx_receipt.status, Some(U64::from(1)), "Auth pubkey fail"); ZkSyncTx::ChangePubKey(Box::new(zksync_account.sign_change_pubkey_tx( nonce, increment_nonce, fee_token, fee, ChangePubKeyType::Onchain, time_range, ))) } pub fn change_pubkey_with_tx( &self, zksync_signer: ZKSyncAccountId, fee_token: TokenId, fee: BigUint, nonce: Option<Nonce>, increment_nonce: bool, time_range: TimeRange, ) -> ZkSyncTx { let zksync_account = &self.zksync_accounts[zksync_signer.0]; ZkSyncTx::ChangePubKey(Box::new(zksync_account.sign_change_pubkey_tx( nonce, increment_nonce, fee_token, fee, if zksync_account.eth_account_data.is_eoa() { ChangePubKeyType::ECDSA } else if zksync_account.eth_account_data.is_create2() { ChangePubKeyType::CREATE2 } else { panic!("Not supported, use onchain change pubkey"); }, time_range, ))) } }
30.496575
78
0.55452
67547b4fa9e3091179bb8cd302febabba3ad9cb6
2,444
// Copyright 2018 Grove Enterprises LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Data sources use std::fs::File; use std::rc::Rc; use std::sync::Arc; use arrow::csv; use arrow::datatypes::Schema; use arrow::record_batch::RecordBatch; use super::error::Result; pub trait DataSource { fn schema(&self) -> &Arc<Schema>; fn next(&mut self) -> Result<Option<RecordBatch>>; } /// CSV data source pub struct CsvDataSource { schema: Arc<Schema>, reader: csv::Reader, } impl CsvDataSource { pub fn new(filename: &str, schema: Arc<Schema>, batch_size: usize) -> Self { let file = File::open(filename).unwrap(); let reader = csv::Reader::new(file, schema.clone(), true, batch_size, None); Self { schema, reader } } pub fn from_reader(schema: Arc<Schema>, reader: csv::Reader) -> Self { Self { schema, reader } } } impl DataSource for CsvDataSource { fn schema(&self) -> &Arc<Schema> { &self.schema } fn next(&mut self) -> Result<Option<RecordBatch>> { Ok(self.reader.next()?) } } //pub struct DataSourceIterator { // pub ds: Rc<RefCell<DataSource>>, //} // //impl DataSourceIterator { // pub fn new(ds: Rc<RefCell<DataSource>>) -> Self { // DataSourceIterator { ds } // } //} // //impl Iterator for DataSourceIterator { // type Item = Result<Rc<RecordBatch>>; // // fn next(&mut self) -> Option<Self::Item> { // self.ds.borrow_mut().next() // } //} #[derive(Serialize, Deserialize, Clone)] pub enum DataSourceMeta { /// Represents a CSV file with a provided schema CsvFile { filename: String, schema: Rc<Schema>, has_header: bool, projection: Option<Vec<usize>>, }, /// Represents a Parquet file that contains schema information ParquetFile { filename: String, schema: Rc<Schema>, projection: Option<Vec<usize>>, }, }
26
84
0.644435
22934e319f7a0f81090efe07878fc2618ca20fa0
20,301
// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //! Provides functionality for saving/restoring the MMIO device manager and its devices. // Currently only supports x86_64. #![cfg(target_arch = "x86_64")] use std::io; use std::result::Result; use std::sync::{Arc, Mutex}; use super::mmio::*; use devices::virtio::balloon::persist::{BalloonConstructorArgs, BalloonState}; use devices::virtio::balloon::{Balloon, Error as BalloonError}; use devices::virtio::block::persist::{BlockConstructorArgs, BlockState}; use devices::virtio::block::Block; use devices::virtio::net::persist::{Error as NetError, NetConstructorArgs, NetState}; use devices::virtio::net::Net; use devices::virtio::persist::{MmioTransportConstructorArgs, MmioTransportState}; use devices::virtio::vsock::persist::{VsockConstructorArgs, VsockState, VsockUdsConstructorArgs}; use devices::virtio::vsock::{Vsock, VsockError, VsockUnixBackend, VsockUnixBackendError}; use devices::virtio::{ MmioTransport, VirtioDevice, TYPE_BALLOON, TYPE_BLOCK, TYPE_NET, TYPE_VSOCK, }; use kvm_ioctls::VmFd; use polly::event_manager::{Error as EventMgrError, EventManager, Subscriber}; use snapshot::Persist; use versionize::{VersionMap, Versionize, VersionizeError, VersionizeResult}; use versionize_derive::Versionize; use vm_memory::GuestMemoryMmap; /// Errors for (de)serialization of the MMIO device manager. #[derive(Debug)] pub enum Error { Balloon(BalloonError), Block(io::Error), EventManager(EventMgrError), DeviceManager(super::mmio::Error), MmioTransport, Net(NetError), Vsock(VsockError), VsockUnixBackend(VsockUnixBackendError), } #[derive(Clone, Versionize)] /// Holds the state of a balloon device connected to the MMIO space. pub struct ConnectedBalloonState { /// Device identifier. pub device_id: String, /// Device state. pub device_state: BalloonState, /// Mmio transport state. pub transport_state: MmioTransportState, /// VmmResources. pub mmio_slot: MMIODeviceInfo, } #[derive(Clone, Versionize)] /// Holds the state of a block device connected to the MMIO space. pub struct ConnectedBlockState { /// Device identifier. pub device_id: String, /// Device state. pub device_state: BlockState, /// Mmio transport state. pub transport_state: MmioTransportState, /// VmmResources. pub mmio_slot: MMIODeviceInfo, } #[derive(Clone, Versionize)] /// Holds the state of a net device connected to the MMIO space. pub struct ConnectedNetState { /// Device identifier. pub device_id: String, /// Device state. pub device_state: NetState, /// Mmio transport state. pub transport_state: MmioTransportState, /// VmmResources. pub mmio_slot: MMIODeviceInfo, } #[derive(Clone, Versionize)] /// Holds the state of a vsock device connected to the MMIO space. pub struct ConnectedVsockState { /// Device identifier. pub device_id: String, /// Device state. pub device_state: VsockState, /// Mmio transport state. pub transport_state: MmioTransportState, /// VmmResources. pub mmio_slot: MMIODeviceInfo, } #[derive(Clone, Versionize)] /// Holds the device states. pub struct DeviceStates { /// Block device states. pub block_devices: Vec<ConnectedBlockState>, /// Net device states. pub net_devices: Vec<ConnectedNetState>, /// Vsock device state. pub vsock_device: Option<ConnectedVsockState>, /// Balloon device state. #[version(start = 2, ser_fn = "balloon_serialize")] pub balloon_device: Option<ConnectedBalloonState>, } impl DeviceStates { fn balloon_serialize(&mut self, target_version: u16) -> VersionizeResult<()> { if target_version < 2 && self.balloon_device.is_some() { return Err(VersionizeError::Semantic( "Target version does not implement the virtio-balloon device.".to_owned(), )); } Ok(()) } } pub struct MMIODevManagerConstructorArgs<'a> { pub mem: GuestMemoryMmap, pub vm: &'a VmFd, pub event_manager: &'a mut EventManager, } impl<'a> Persist<'a> for MMIODeviceManager { type State = DeviceStates; type ConstructorArgs = MMIODevManagerConstructorArgs<'a>; type Error = Error; fn save(&self) -> Self::State { let mut states = DeviceStates { balloon_device: None, block_devices: Vec::new(), net_devices: Vec::new(), vsock_device: None, }; let _: Result<(), ()> = self.for_each_device(|devtype, devid, devinfo, bus_dev| { if *devtype == arch::DeviceType::BootTimer { // No need to save BootTimer state. return Ok(()); } let locked_bus_dev = bus_dev.lock().expect("Poisoned lock"); let mmio_transport = locked_bus_dev .as_any() // Only MmioTransport implements BusDevice on x86_64 at this point. .downcast_ref::<MmioTransport>() .expect("Unexpected BusDevice type"); let transport_state = mmio_transport.save(); let locked_device = mmio_transport.locked_device(); match locked_device.device_type() { TYPE_BALLOON => { let balloon_state = locked_device .as_any() .downcast_ref::<Balloon>() .unwrap() .save(); states.balloon_device = Some(ConnectedBalloonState { device_id: devid.clone(), device_state: balloon_state, transport_state, mmio_slot: devinfo.clone(), }); } TYPE_BLOCK => { let block_state = locked_device .as_any() .downcast_ref::<Block>() .unwrap() .save(); states.block_devices.push(ConnectedBlockState { device_id: devid.clone(), device_state: block_state, transport_state, mmio_slot: devinfo.clone(), }); } TYPE_NET => { let net_state = locked_device.as_any().downcast_ref::<Net>().unwrap().save(); states.net_devices.push(ConnectedNetState { device_id: devid.clone(), device_state: net_state, transport_state, mmio_slot: devinfo.clone(), }); } TYPE_VSOCK => { let vsock = locked_device .as_any() // Currently, VsockUnixBackend is the only implementation of VsockBackend. .downcast_ref::<Vsock<VsockUnixBackend>>() .unwrap(); let vsock_state = VsockState { backend: vsock.backend().save(), frontend: vsock.save(), }; states.vsock_device = Some(ConnectedVsockState { device_id: devid.clone(), device_state: vsock_state, transport_state, mmio_slot: devinfo.clone(), }); } _ => unreachable!(), }; Ok(()) }); states } fn restore( constructor_args: Self::ConstructorArgs, state: &Self::State, ) -> Result<Self, Self::Error> { let mut dev_manager = MMIODeviceManager::new(arch::MMIO_MEM_START, (arch::IRQ_BASE, arch::IRQ_MAX)); let mem = &constructor_args.mem; let vm = constructor_args.vm; let mut restore_helper = |device: Arc<Mutex<dyn VirtioDevice>>, as_subscriber: Arc<Mutex<dyn Subscriber>>, id: &String, state: &MmioTransportState, slot: &MMIODeviceInfo, event_manager: &mut EventManager| -> Result<(), Self::Error> { dev_manager .slot_sanity_check(slot) .map_err(Error::DeviceManager)?; let restore_args = MmioTransportConstructorArgs { mem: mem.clone(), device, }; let mmio_transport = MmioTransport::restore(restore_args, state).map_err(|()| Error::MmioTransport)?; dev_manager .register_virtio_mmio_device(vm, id.clone(), mmio_transport, slot) .map_err(Error::DeviceManager)?; event_manager .add_subscriber(as_subscriber) .map_err(Error::EventManager) }; if let Some(balloon_state) = &state.balloon_device { let device = Arc::new(Mutex::new( Balloon::restore( BalloonConstructorArgs { mem: mem.clone() }, &balloon_state.device_state, ) .map_err(Error::Balloon)?, )); restore_helper( device.clone(), device, &balloon_state.device_id, &balloon_state.transport_state, &balloon_state.mmio_slot, constructor_args.event_manager, )?; } for block_state in &state.block_devices { let device = Arc::new(Mutex::new( Block::restore( BlockConstructorArgs { mem: mem.clone() }, &block_state.device_state, ) .map_err(Error::Block)?, )); restore_helper( device.clone(), device, &block_state.device_id, &block_state.transport_state, &block_state.mmio_slot, constructor_args.event_manager, )?; } for net_state in &state.net_devices { let device = Arc::new(Mutex::new( Net::restore( NetConstructorArgs { mem: mem.clone() }, &net_state.device_state, ) .map_err(Error::Net)?, )); restore_helper( device.clone(), device, &net_state.device_id, &net_state.transport_state, &net_state.mmio_slot, constructor_args.event_manager, )?; } if let Some(vsock_state) = &state.vsock_device { let ctor_args = VsockUdsConstructorArgs { cid: vsock_state.device_state.frontend.cid, }; let backend = VsockUnixBackend::restore(ctor_args, &vsock_state.device_state.backend) .map_err(Error::VsockUnixBackend)?; let device = Arc::new(Mutex::new( Vsock::restore( VsockConstructorArgs { mem: mem.clone(), backend, }, &vsock_state.device_state.frontend, ) .map_err(Error::Vsock)?, )); restore_helper( device.clone(), device, &vsock_state.device_id, &vsock_state.transport_state, &vsock_state.mmio_slot, constructor_args.event_manager, )?; } Ok(dev_manager) } } #[cfg(test)] mod tests { use super::*; use crate::builder::tests::*; use crate::vmm_config::balloon::BalloonDeviceConfig; use crate::vmm_config::net::NetworkInterfaceConfig; use crate::vmm_config::vsock::VsockDeviceConfig; use polly::event_manager::EventManager; use utils::tempfile::TempFile; impl PartialEq for ConnectedBalloonState { fn eq(&self, other: &ConnectedBalloonState) -> bool { // Actual device state equality is checked by the device's tests. self.transport_state == other.transport_state && self.mmio_slot == other.mmio_slot } } impl std::fmt::Debug for ConnectedBalloonState { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, "ConnectedBalloonDevice {{ transport_state: {:?}, mmio_slot: {:?} }}", self.transport_state, self.mmio_slot ) } } impl PartialEq for ConnectedBlockState { fn eq(&self, other: &ConnectedBlockState) -> bool { // Actual device state equality is checked by the device's tests. self.transport_state == other.transport_state && self.mmio_slot == other.mmio_slot } } impl std::fmt::Debug for ConnectedBlockState { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, "ConnectedBlockDevice {{ transport_state: {:?}, mmio_slot: {:?} }}", self.transport_state, self.mmio_slot ) } } impl PartialEq for ConnectedNetState { fn eq(&self, other: &ConnectedNetState) -> bool { // Actual device state equality is checked by the device's tests. self.transport_state == other.transport_state && self.mmio_slot == other.mmio_slot } } impl std::fmt::Debug for ConnectedNetState { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, "ConnectedNetDevice {{ transport_state: {:?}, mmio_slot: {:?} }}", self.transport_state, self.mmio_slot ) } } impl PartialEq for ConnectedVsockState { fn eq(&self, other: &ConnectedVsockState) -> bool { // Actual device state equality is checked by the device's tests. self.transport_state == other.transport_state && self.mmio_slot == other.mmio_slot } } impl std::fmt::Debug for ConnectedVsockState { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, "ConnectedVsockDevice {{ transport_state: {:?}, mmio_slot: {:?} }}", self.transport_state, self.mmio_slot ) } } impl PartialEq for DeviceStates { fn eq(&self, other: &DeviceStates) -> bool { self.balloon_device == other.balloon_device && self.block_devices == other.block_devices && self.net_devices == other.net_devices && self.vsock_device == other.vsock_device } } impl std::fmt::Debug for DeviceStates { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, "DevicesStates {{ block_devices: {:?}, net_devices: {:?}, vsock_device: {:?} }}", self.block_devices, self.net_devices, self.vsock_device ) } } impl MMIODeviceManager { fn soft_clone(&self) -> Self { let dummy_mmio_base = 0; let dummy_irq_range = (0, 0); let mut clone = MMIODeviceManager::new(dummy_mmio_base, dummy_irq_range); // We only care about the device hashmap. clone.id_to_dev_info = self.id_to_dev_info.clone(); clone } } impl PartialEq for MMIODeviceManager { fn eq(&self, other: &MMIODeviceManager) -> bool { // We only care about the device hashmap. if self.id_to_dev_info.len() != other.id_to_dev_info.len() { return false; } for (key, val) in &self.id_to_dev_info { match other.id_to_dev_info.get(key) { Some(other_val) if val == other_val => continue, _ => return false, }; } true } } impl std::fmt::Debug for MMIODeviceManager { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{:?}", self.id_to_dev_info) } } #[test] fn test_device_manager_persistence() { let mut buf = vec![0; 16384]; let mut version_map = VersionMap::new(); // These need to survive so the restored blocks find them. let _block_files; let mut tmp_sock_file = TempFile::new().unwrap(); tmp_sock_file.remove().unwrap(); // Set up a vmm with one of each device, and get the serialized DeviceStates. let original_mmio_device_manager = { let mut event_manager = EventManager::new().expect("Unable to create EventManager"); let mut vmm = default_vmm(); let mut cmdline = default_kernel_cmdline(); // Add a balloon device. let balloon_cfg = BalloonDeviceConfig { amount_mb: 123, must_tell_host: true, deflate_on_oom: false, stats_polling_interval_s: 1, }; insert_balloon_device(&mut vmm, &mut cmdline, &mut event_manager, balloon_cfg); // Add a block device. let drive_id = String::from("root"); let block_configs = vec![CustomBlockConfig::new(drive_id, true, None, true)]; _block_files = insert_block_devices(&mut vmm, &mut cmdline, &mut event_manager, block_configs); // Add a net device. let network_interface = NetworkInterfaceConfig { iface_id: String::from("netif"), host_dev_name: String::from("hostname"), guest_mac: None, rx_rate_limiter: None, tx_rate_limiter: None, allow_mmds_requests: true, }; insert_net_device( &mut vmm, &mut cmdline, &mut event_manager, network_interface, ); // Add a vsock device. let vsock_dev_id = "vsock"; let vsock_config = VsockDeviceConfig { vsock_id: vsock_dev_id.to_string(), guest_cid: 3, uds_path: tmp_sock_file.as_path().to_str().unwrap().to_string(), }; insert_vsock_device(&mut vmm, &mut cmdline, &mut event_manager, vsock_config); assert_eq!( vmm.mmio_device_manager .save() .serialize(&mut buf.as_mut_slice(), &version_map, 1), Err(VersionizeError::Semantic( "Target version does not implement the virtio-balloon device.".to_string() )) ); version_map .new_version() .set_type_version(DeviceStates::type_id(), 2); vmm.mmio_device_manager .save() .serialize(&mut buf.as_mut_slice(), &version_map, 2) .unwrap(); // We only want to keep the device map from the original MmioDeviceManager. vmm.mmio_device_manager.soft_clone() }; tmp_sock_file.remove().unwrap(); let mut event_manager = EventManager::new().expect("Unable to create EventManager"); let vmm = default_vmm(); let device_states: DeviceStates = DeviceStates::deserialize(&mut buf.as_slice(), &version_map, 2).unwrap(); let restore_args = MMIODevManagerConstructorArgs { mem: vmm.guest_memory().clone(), vm: vmm.vm.fd(), event_manager: &mut event_manager, }; let restored_dev_manager = MMIODeviceManager::restore(restore_args, &device_states).unwrap(); assert_eq!(restored_dev_manager, original_mmio_device_manager); } }
36.316637
98
0.54953
036880d9f830ebe634362628cb77631a1d021e03
25,901
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 //! Implementation of the unary RPC protocol as per [AptosNet wire protocol v1]. //! //! ## Design: //! //! The unary RPC protocol is implemented here as two independent async completion //! queues: [`InboundRpcs`] and [`OutboundRpcs`]. //! //! The `InboundRpcs` queue is responsible for handling inbound rpc requests //! off-the-wire, forwarding the request to the application layer, waiting for //! the application layer's response, and then enqueuing the rpc response to-be //! written over-the-wire. //! //! Likewise, the `OutboundRpcs` queue is responsible for handling outbound rpc //! requests from the application layer, enqueuing the request for writing onto //! the wire, waiting for a corresponding rpc response, and then notifying the //! requestor of the arrived response message. //! //! Both `InboundRpcs` and `OutboundRpcs` are owned and driven by the [`Peer`] //! actor. This has a few implications. First, it means that each connection has //! its own pair of local rpc completion queues; the queues are _not_ shared //! across connections. Second, the queues don't do any IO work. They're purely //! driven by the owning `Peer` actor, who calls `handle_` methods on new //! [`NetworkMessage`] arrivals and polls for completed rpc requests. The queues //! also do not write to the wire directly; instead, they're given a reference to //! the [`Peer`] actor's write queue, which they can enqueue a new outbound //! [`NetworkMessage`] onto. //! //! ## Timeouts: //! //! Both inbound and outbound requests have mandatory timeouts. The tasks in the //! async completion queues are each wrapped in a `timeout` future, which causes //! the task to complete with an error if the task isn't fulfilled before the //! deadline. //! //! ## Limits: //! //! We limit the number of pending inbound and outbound RPC tasks to ensure that //! resource usage is bounded. //! //! [AptosNet wire protocol v1]: https://github.com/aptos-labs/aptos-core/blob/main/specifications/network/messaging-v1.md //! [`Peer`]: crate::peer::Peer use crate::{ counters::{ self, CANCELED_LABEL, DECLINED_LABEL, FAILED_LABEL, RECEIVED_LABEL, REQUEST_LABEL, RESPONSE_LABEL, SENT_LABEL, }, logging::NetworkSchema, peer::PeerNotification, peer_manager::PeerManagerError, protocols::{ network::SerializedRequest, wire::messaging::v1::{NetworkMessage, Priority, RequestId, RpcRequest, RpcResponse}, }, ProtocolId, }; use anyhow::anyhow; use aptos_config::network_id::NetworkContext; use aptos_id_generator::{IdGenerator, U32IdGenerator}; use aptos_logger::prelude::*; use aptos_time_service::{timeout, TimeService, TimeServiceTrait}; use aptos_types::PeerId; use bytes::Bytes; use channel::aptos_channel; use error::RpcError; use futures::{ channel::oneshot, future::{BoxFuture, FusedFuture, Future, FutureExt}, sink::SinkExt, stream::{FuturesUnordered, StreamExt}, }; use serde::Serialize; use short_hex_str::AsShortHexStr; use std::{cmp::PartialEq, collections::HashMap, fmt::Debug, time::Duration}; pub mod error; /// A wrapper struct for an inbound rpc request and its associated context. #[derive(Debug)] pub struct InboundRpcRequest { /// The [`ProtocolId`] for which of our upstream application modules should /// handle (i.e., deserialize and then respond to) this inbound rpc request. /// /// For example, if `protocol_id == ProtocolId::ConsensusRpcBcs`, then this /// inbound rpc request will be dispatched to consensus for handling. pub protocol_id: ProtocolId, /// The serialized request data received from the sender. At this layer in /// the stack, the request data is just an opaque blob and will only be fully /// deserialized later in the handling application module. pub data: Bytes, /// Channel over which the rpc response is sent from the upper application /// layer to the network rpc layer. /// /// The rpc actor holds onto the receiving end of this channel, awaiting the /// response from the upper layer. If there is an error in, e.g., /// deserializing the request, the upper layer should send an [`RpcError`] /// down the channel to signify that there was an error while handling this /// rpc request. Currently, we just log these errors and drop the request. /// /// The upper client layer should be prepared for `res_tx` to be disconnected /// when trying to send their response, as the rpc call might have timed out /// while handling the request. pub res_tx: oneshot::Sender<Result<Bytes, RpcError>>, } impl SerializedRequest for InboundRpcRequest { fn protocol_id(&self) -> ProtocolId { self.protocol_id } fn data(&self) -> &Bytes { &self.data } } /// A wrapper struct for an outbound rpc request and its associated context. #[derive(Debug, Serialize)] pub struct OutboundRpcRequest { /// The remote peer's application module that should handle our outbound rpc /// request. /// /// For example, if `protocol_id == ProtocolId::ConsensusRpcBcs`, then this /// outbound rpc request should be handled by the remote peer's consensus /// application module. pub protocol_id: ProtocolId, /// The serialized request data to be sent to the receiver. At this layer in /// the stack, the request data is just an opaque blob. #[serde(skip)] pub data: Bytes, /// Channel over which the rpc response is sent from the rpc layer to the /// upper client layer. /// /// If there is an error while performing the rpc protocol, e.g., the remote /// peer drops the connection, we will send an [`RpcError`] over the channel. #[serde(skip)] pub res_tx: oneshot::Sender<Result<Bytes, RpcError>>, /// The timeout duration for the entire rpc call. If the timeout elapses, the /// rpc layer will send an [`RpcError::TimedOut`] error over the /// `res_tx` channel to the upper client layer. pub timeout: Duration, } impl SerializedRequest for OutboundRpcRequest { fn protocol_id(&self) -> ProtocolId { self.protocol_id } fn data(&self) -> &Bytes { &self.data } } impl PartialEq for InboundRpcRequest { fn eq(&self, other: &Self) -> bool { self.protocol_id == other.protocol_id && self.data == other.data } } /// `InboundRpcs` handles new inbound rpc requests off the wire, notifies the /// `PeerManager` of the new request, and stores the pending response on a queue. /// If the response eventually completes, `InboundRpc` records some metrics and /// enqueues the response message onto the outbound write queue. /// /// There is one `InboundRpcs` handler per [`Peer`](crate::peer::Peer). pub struct InboundRpcs { /// The network instance this Peer actor is running under. network_context: NetworkContext, /// A handle to a time service for easily mocking time-related operations. time_service: TimeService, /// The PeerId of this connection's remote peer. Used for logging. remote_peer_id: PeerId, /// The core async queue of pending inbound rpc tasks. The tasks are driven /// to completion by the `InboundRpcs::next_completed_response()` method. inbound_rpc_tasks: FuturesUnordered<BoxFuture<'static, Result<RpcResponse, RpcError>>>, /// A blanket timeout on all inbound rpc requests. If the application handler /// doesn't respond to the request before this timeout, the request will be /// dropped. inbound_rpc_timeout: Duration, /// Only allow this many concurrent inbound rpcs at one time from this remote /// peer. New inbound requests exceeding this limit will be dropped. max_concurrent_inbound_rpcs: u32, } impl InboundRpcs { pub fn new( network_context: NetworkContext, time_service: TimeService, remote_peer_id: PeerId, inbound_rpc_timeout: Duration, max_concurrent_inbound_rpcs: u32, ) -> Self { Self { network_context, time_service, remote_peer_id, inbound_rpc_tasks: FuturesUnordered::new(), inbound_rpc_timeout, max_concurrent_inbound_rpcs, } } /// Handle a new inbound `RpcRequest` message off the wire. pub fn handle_inbound_request( &mut self, peer_notifs_tx: &mut aptos_channel::Sender<ProtocolId, PeerNotification>, request: RpcRequest, ) -> Result<(), RpcError> { let network_context = &self.network_context; // Drop new inbound requests if our completion queue is at capacity. if self.inbound_rpc_tasks.len() as u32 == self.max_concurrent_inbound_rpcs { // Increase counter of declined responses and log warning. counters::rpc_messages(network_context, RESPONSE_LABEL, DECLINED_LABEL).inc(); return Err(RpcError::TooManyPending(self.max_concurrent_inbound_rpcs)); } let protocol_id = request.protocol_id; let request_id = request.request_id; let priority = request.priority; let req_len = request.raw_request.len() as u64; trace!( NetworkSchema::new(network_context).remote_peer(&self.remote_peer_id), "{} Received inbound rpc request from peer {} with request_id {} and protocol_id {}", network_context, self.remote_peer_id.short_str(), request_id, protocol_id, ); // Collect counters for received request. counters::rpc_messages(network_context, REQUEST_LABEL, RECEIVED_LABEL).inc(); counters::rpc_bytes(network_context, REQUEST_LABEL, RECEIVED_LABEL).inc_by(req_len); let timer = counters::inbound_rpc_handler_latency(network_context, protocol_id).start_timer(); // Foward request to PeerManager for handling. let (response_tx, response_rx) = oneshot::channel(); let notif = PeerNotification::RecvRpc(InboundRpcRequest { protocol_id, data: Bytes::from(request.raw_request), res_tx: response_tx, }); if let Err(err) = peer_notifs_tx.push(protocol_id, notif) { counters::rpc_messages(network_context, RESPONSE_LABEL, FAILED_LABEL).inc(); return Err(err.into()); } // Create a new task that waits for a response from the upper layer with a timeout. let inbound_rpc_task = self .time_service .timeout(self.inbound_rpc_timeout, response_rx) .map(move |result| { // Flatten the errors let maybe_response = match result { Ok(Ok(Ok(response_bytes))) => Ok(RpcResponse { request_id, priority, raw_response: Vec::from(response_bytes.as_ref()), }), Ok(Ok(Err(err))) => Err(err), Ok(Err(oneshot::Canceled)) => Err(RpcError::UnexpectedResponseChannelCancel), Err(timeout::Elapsed) => Err(RpcError::TimedOut), }; // Only record latency of successful requests match maybe_response { Ok(_) => timer.stop_and_record(), Err(_) => timer.stop_and_discard(), }; maybe_response }) .boxed(); // Add that task to the inbound completion queue. These tasks are driven // forward by `Peer` awaiting `self.next_completed_response()`. self.inbound_rpc_tasks.push(inbound_rpc_task); Ok(()) } /// Method for `Peer` actor to drive the pending inbound rpc tasks forward. /// The returned `Future` is a `FusedFuture` so it works correctly in a /// `futures::select!`. pub fn next_completed_response( &mut self, ) -> impl Future<Output = Result<RpcResponse, RpcError>> + FusedFuture + '_ { self.inbound_rpc_tasks.select_next_some() } /// Handle a completed response from the application handler. If successful, /// we update the appropriate counters and enqueue the response message onto /// the outbound write queue. pub async fn send_outbound_response( &mut self, write_reqs_tx: &mut channel::Sender<( NetworkMessage, oneshot::Sender<Result<(), PeerManagerError>>, )>, maybe_response: Result<RpcResponse, RpcError>, ) -> Result<(), RpcError> { let network_context = &self.network_context; let response = match maybe_response { Ok(response) => response, Err(err) => { counters::rpc_messages(network_context, RESPONSE_LABEL, FAILED_LABEL).inc(); return Err(err); } }; let res_len = response.raw_response.len() as u64; // Send outbound response to remote peer. trace!( NetworkSchema::new(network_context).remote_peer(&self.remote_peer_id), "{} Sending rpc response to peer {} for request_id {}", network_context, self.remote_peer_id.short_str(), response.request_id, ); let message = NetworkMessage::RpcResponse(response); let (ack_tx, _) = oneshot::channel(); write_reqs_tx.send((message, ack_tx)).await?; // Collect counters for sent response. counters::rpc_messages(network_context, RESPONSE_LABEL, SENT_LABEL).inc(); counters::rpc_bytes(network_context, RESPONSE_LABEL, SENT_LABEL).inc_by(res_len); Ok(()) } } /// `OutboundRpcs` handles new outbound rpc requests made from the application layer. /// /// There is one `OutboundRpcs` handler per [`Peer`](crate::peer::Peer). pub struct OutboundRpcs { /// The network instance this Peer actor is running under. network_context: NetworkContext, /// A handle to a time service for easily mocking time-related operations. time_service: TimeService, /// The PeerId of this connection's remote peer. Used for logging. remote_peer_id: PeerId, /// Generates the next RequestId to use for the next outbound RPC. Note that /// request ids are local to each connection. request_id_gen: U32IdGenerator, /// A completion queue of pending outbound rpc tasks. Each task waits for /// either a successful `RpcResponse` message, handed to it via the channel /// in `pending_outbound_rpcs`, or waits for a timeout or cancellation /// notification. After completion, the task will yield its `RequestId` and /// other metadata (success/failure, success latency, response length) via /// the future from `next_completed_request`. outbound_rpc_tasks: FuturesUnordered<BoxFuture<'static, (RequestId, Result<(f64, u64), RpcError>)>>, /// Maps a `RequestId` into a handle to a task in the `outbound_rpc_tasks` /// completion queue. When a new `RpcResponse` message comes in, we will use /// this map to notify the corresponding task that its response has arrived. pending_outbound_rpcs: HashMap<RequestId, oneshot::Sender<RpcResponse>>, /// Only allow this many concurrent outbound rpcs at one time from this remote /// peer. New outbound requests exceeding this limit will be dropped. max_concurrent_outbound_rpcs: u32, } impl OutboundRpcs { pub fn new( network_context: NetworkContext, time_service: TimeService, remote_peer_id: PeerId, max_concurrent_outbound_rpcs: u32, ) -> Self { Self { network_context, time_service, remote_peer_id, request_id_gen: U32IdGenerator::new(), outbound_rpc_tasks: FuturesUnordered::new(), pending_outbound_rpcs: HashMap::new(), max_concurrent_outbound_rpcs, } } /// Handle a new outbound rpc request from the application layer. pub async fn handle_outbound_request( &mut self, request: OutboundRpcRequest, write_reqs_tx: &mut channel::Sender<( NetworkMessage, oneshot::Sender<Result<(), PeerManagerError>>, )>, ) -> Result<(), RpcError> { let network_context = &self.network_context; let peer_id = &self.remote_peer_id; // Unpack request. let OutboundRpcRequest { protocol_id, data: request_data, timeout, res_tx: mut application_response_tx, } = request; let req_len = request_data.len() as u64; // Drop the outbound request if the application layer has already canceled. if application_response_tx.is_canceled() { counters::rpc_messages(network_context, REQUEST_LABEL, CANCELED_LABEL).inc(); return Err(RpcError::UnexpectedResponseChannelCancel); } // Drop new outbound requests if our completion queue is at capacity. if self.outbound_rpc_tasks.len() == self.max_concurrent_outbound_rpcs as usize { counters::rpc_messages(network_context, REQUEST_LABEL, DECLINED_LABEL).inc(); // Notify application that their request was dropped due to capacity. let err = Err(RpcError::TooManyPending(self.max_concurrent_outbound_rpcs)); let _ = application_response_tx.send(err); return Err(RpcError::TooManyPending(self.max_concurrent_outbound_rpcs)); } let request_id = self.request_id_gen.next(); trace!( NetworkSchema::new(network_context).remote_peer(peer_id), "{} Sending outbound rpc request with request_id {} and protocol_id {} to {}", network_context, request_id, protocol_id, peer_id.short_str(), ); // Start timer to collect outbound RPC latency. let timer = counters::outbound_rpc_request_latency(network_context, protocol_id).start_timer(); // Enqueue rpc request message onto outbound write queue. let message = NetworkMessage::RpcRequest(RpcRequest { protocol_id, request_id, priority: Priority::default(), raw_request: Vec::from(request_data.as_ref()), }); let (ack_tx, _) = oneshot::channel(); write_reqs_tx.send((message, ack_tx)).await?; // Collect counters for requests sent. counters::rpc_messages(network_context, REQUEST_LABEL, SENT_LABEL).inc(); counters::rpc_bytes(network_context, REQUEST_LABEL, SENT_LABEL).inc_by(req_len); // Create channel over which response is delivered to outbound_rpc_task. let (response_tx, response_rx) = oneshot::channel::<RpcResponse>(); // Store send-side in the pending map so we can notify outbound_rpc_task // when the rpc response has arrived. self.pending_outbound_rpcs.insert(request_id, response_tx); // A future that waits for the rpc response with a timeout. We create the // timeout out here to start the timer as soon as we push onto the queue // (as opposed to whenever it first gets polled on the queue). let wait_for_response = self .time_service .timeout(timeout, response_rx) .map(|result| { // Flatten errors. match result { Ok(Ok(response)) => Ok(Bytes::from(response.raw_response)), Ok(Err(oneshot::Canceled)) => Err(RpcError::UnexpectedResponseChannelCancel), Err(timeout::Elapsed) => Err(RpcError::TimedOut), } }); // A future that waits for the response and sends it to the application. let notify_application = async move { // This future will complete if the application layer cancels the request. let mut cancellation = application_response_tx.cancellation().fuse(); // Pin the response future to the stack so we don't have to box it. tokio::pin!(wait_for_response); futures::select! { maybe_response = wait_for_response => { // TODO(philiphayes): Clean up RpcError. Effectively need to // clone here to pass the result up to application layer, but // RpcError is not currently cloneable. let result_copy = match &maybe_response { Ok(response) => Ok(response.len() as u64), Err(err) => Err(RpcError::Error(anyhow!(err.to_string()))), }; // Notify the application of the results. application_response_tx.send(maybe_response).map_err(|_| RpcError::UnexpectedResponseChannelCancel)?; result_copy } _ = cancellation => Err(RpcError::UnexpectedResponseChannelCancel), } }; let outbound_rpc_task = async move { // Always return the request_id so we can garbage collect the // pending_outbound_rpcs map. match notify_application.await { Ok(response_len) => { let latency = timer.stop_and_record(); (request_id, Ok((latency, response_len))) } Err(err) => { // don't record timer.stop_and_discard(); (request_id, Err(err)) } } }; self.outbound_rpc_tasks.push(outbound_rpc_task.boxed()); Ok(()) } /// Method for `Peer` actor to drive the pending outbound rpc tasks forward. /// The returned `Future` is a `FusedFuture` so it works correctly in a /// `futures::select!`. pub fn next_completed_request( &mut self, ) -> impl Future<Output = (RequestId, Result<(f64, u64), RpcError>)> + FusedFuture + '_ { self.outbound_rpc_tasks.select_next_some() } /// Handle a newly completed task from the `self.outbound_rpc_tasks` queue. /// At this point, the application layer's request has already been fulfilled; /// we just need to clean up this request and update some counters. pub fn handle_completed_request( &mut self, request_id: RequestId, result: Result<(f64, u64), RpcError>, ) { // Remove request_id from pending_outbound_rpcs if not already removed. // // We don't care about the value from `remove` here. If the request // timed-out or was canceled, it will still be in the pending map. // Otherwise, if we received a response for our request, we will have // removed and triggered the oneshot from the pending map, notifying us. let _ = self.pending_outbound_rpcs.remove(&request_id); let network_context = &self.network_context; let peer_id = &self.remote_peer_id; match result { Ok((latency, request_len)) => { counters::rpc_messages(network_context, RESPONSE_LABEL, RECEIVED_LABEL).inc(); counters::rpc_bytes(network_context, RESPONSE_LABEL, RECEIVED_LABEL) .inc_by(request_len); trace!( NetworkSchema::new(network_context).remote_peer(peer_id), "{} Received response for request_id {} from peer {} \ with {:.6} seconds of latency", network_context, request_id, peer_id.short_str(), latency, ); } Err(err) => { if let RpcError::UnexpectedResponseChannelCancel = err { counters::rpc_messages(network_context, REQUEST_LABEL, CANCELED_LABEL).inc(); } else { counters::rpc_messages(network_context, REQUEST_LABEL, FAILED_LABEL).inc(); } warn!( NetworkSchema::new(network_context).remote_peer(peer_id), "{} Error making outbound rpc request with request_id {} to {}: {}", network_context, request_id, peer_id.short_str(), err ); } } } /// Handle a new inbound `RpcResponse` message. If we have a pending request /// with a matching request id in the `pending_outbound_rpcs` map, this will /// trigger that corresponding task to wake up and complete in /// `handle_completed_request`. pub fn handle_inbound_response(&mut self, response: RpcResponse) { let network_context = &self.network_context; let peer_id = &self.remote_peer_id; let request_id = response.request_id; let is_canceled = if let Some(response_tx) = self.pending_outbound_rpcs.remove(&request_id) { response_tx.send(response).is_err() } else { true }; if is_canceled { info!( NetworkSchema::new(network_context).remote_peer(peer_id), request_id = request_id, "{} Received response for expired request_id {} from {}. Discarding.", network_context, request_id, peer_id.short_str(), ); } else { trace!( NetworkSchema::new(network_context).remote_peer(peer_id), request_id = request_id, "{} Notified pending outbound rpc task of inbound response for request_id {} from {}", network_context, request_id, peer_id.short_str(), ); } } }
42.321895
122
0.633412
9148375c6ead141f11e877123e856c6e7af0ae13
3,531
use std::time::Duration; #[derive(Copy, Clone, Debug)] pub struct ConstantGrowth { delay: Duration, incr: Duration, } impl ConstantGrowth { pub const fn new(delay: Duration, incr: Duration) -> Self { Self { delay, incr } } pub const fn clamp(self, max_delay: Duration, max_retries: usize) -> Clamped<Self> { Clamped::new(self, max_delay, max_retries) } } impl From<Duration> for ConstantGrowth { fn from(delay: Duration) -> Self { Self::new(delay, Duration::from_secs(1)) } } impl Iterator for ConstantGrowth { type Item = Duration; fn next(&mut self) -> Option<Duration> { let delay = self.delay; if let Some(next) = self.delay.checked_add(self.incr) { self.delay = next; } Some(delay) } } #[derive(Copy, Clone, Debug)] pub struct Clamped<S> { pub strategy: S, pub max_delay: Duration, pub max_retries: usize, } impl<S> Clamped<S> { pub const fn new(strategy: S, max_delay: Duration, max_retries: usize) -> Self { Self { strategy, max_delay, max_retries, } } pub fn iter(self) -> impl Iterator<Item = Duration> where S: Iterator<Item = Duration>, { let Self { strategy, max_retries, max_delay, } = self; strategy .take(max_retries) .map(move |delay| delay.min(max_delay)) } } #[cfg(test)] mod tests { use super::*; const CONST_STRATEGY: ConstantGrowth = ConstantGrowth::new(Duration::from_secs(1), Duration::from_millis(500)); #[test] fn const_growth_no_clamp() { let delays = CONST_STRATEGY.take(10).collect::<Vec<_>>(); assert_eq!( delays, vec![ Duration::from_millis(1000), Duration::from_millis(1500), Duration::from_millis(2000), Duration::from_millis(2500), Duration::from_millis(3000), Duration::from_millis(3500), Duration::from_millis(4000), Duration::from_millis(4500), Duration::from_millis(5000), Duration::from_millis(5500) ] ); } #[test] fn clamped_const_growth_max_delay() { let strategy = CONST_STRATEGY.clamp(Duration::from_secs(10), 10); let delays = strategy.iter().collect::<Vec<_>>(); assert_eq!( delays, vec![ Duration::from_millis(1000), Duration::from_millis(1500), Duration::from_millis(2000), Duration::from_millis(2500), Duration::from_millis(3000), Duration::from_millis(3500), Duration::from_millis(4000), Duration::from_millis(4500), Duration::from_millis(5000), Duration::from_millis(5500) ] ); } #[test] fn clamped_const_growth_max_retries() { let strategy = CONST_STRATEGY.clamp(Duration::from_secs(10000), 5); let delays = strategy.iter().collect::<Vec<_>>(); assert_eq!( delays, vec![ Duration::from_millis(1000), Duration::from_millis(1500), Duration::from_millis(2000), Duration::from_millis(2500), Duration::from_millis(3000) ] ); } }
26.155556
88
0.535542
21f1de4166ed3e86584b56e18bb55fb40db783fe
1,264
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0. use std::fmt::{self, Debug, Formatter}; use std::io::Result; pub trait EncryptionKeyManager: Sync + Send { fn get_file(&self, fname: &str) -> Result<FileEncryptionInfo>; fn new_file(&self, fname: &str) -> Result<FileEncryptionInfo>; fn delete_file(&self, fname: &str) -> Result<()>; fn link_file(&self, src_fname: &str, dst_fname: &str) -> Result<()>; } #[derive(Clone, PartialEq, Eq)] pub struct FileEncryptionInfo { pub method: EncryptionMethod, pub key: Vec<u8>, pub iv: Vec<u8>, } impl Default for FileEncryptionInfo { fn default() -> Self { FileEncryptionInfo { method: EncryptionMethod::Unknown, key: vec![], iv: vec![], } } } impl Debug for FileEncryptionInfo { fn fmt(&self, f: &mut Formatter) -> fmt::Result { write!( f, "FileEncryptionInfo [method={:?}, key=...<{} bytes>, iv=...<{} bytes>]", self.method, self.key.len(), self.iv.len() ) } } #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum EncryptionMethod { Unknown = 0, Plaintext = 1, Aes128Ctr = 2, Aes192Ctr = 3, Aes256Ctr = 4, }
25.795918
84
0.579114
643d10d12d4b70885d0feb60feb4dc021458929a
3,670
//! API for the IWDG //! //! You can activate the watchdog by calling `start` or the setting appropriate //! device option bit when programming. //! //! After activating the watchdog, you'll have to regularly `feed` the watchdog. //! If more time than `timeout` has gone by since the last `feed`, your //! microcontroller will be reset. //! //! This is useful if you fear that your program may get stuck. In that case it //! won't feed the watchdog anymore, the watchdog will reset the microcontroller //! and thus your program will function again. //! //! **Attention**: //! //! The IWDG runs on a separate 40kHz low-accuracy clock (30kHz-60kHz). You may //! want to some buffer in your interval. //! //! Per default the iwdg continues to run even when you stopped execution of code via a debugger. //! You may want to disable the watchdog when the cpu is stopped //! //! ``` ignore //! let dbgmcu = p.DBGMCU; //! dbgmcu.apb1_fz.modify(|_, w| w.dbg_iwdg_stop().set_bit()); //! ``` //! //! # Example //! ``` no_run //! use stm32f0xx_hal as hal; //! //! use crate::hal::pac; //! use crate::hal::prelude::*; //! use crate::hal:watchdog::Watchdog; //! use crate::hal:time::Hertz; //! //! let mut p = pac::Peripherals::take().unwrap(); //! //! let mut iwdg = Watchdog::new(p.iwdg); //! iwdg.start(Hertz(100)); //! loop {} //! // Whoops, got stuck, the watchdog issues a reset after 10 ms //! iwdg.feed(); //! ``` use embedded_hal::watchdog; use crate::pac::IWDG; use crate::time::Hertz; /// Watchdog instance pub struct Watchdog { iwdg: IWDG, } impl watchdog::Watchdog for Watchdog { /// Feed the watchdog, so that at least one `period` goes by before the next /// reset fn feed(&mut self) { self.iwdg.kr.write(|w| w.key().reset()); } } /// Timeout configuration for the IWDG #[derive(PartialEq, PartialOrd, Clone, Copy)] pub struct IwdgTimeout { psc: u8, reload: u16, } impl From<Hertz> for IwdgTimeout { /// This converts the value so it's usable by the IWDG /// Due to conversion losses, the specified frequency is a maximum /// /// It can also only represent values < 10000 Hertz fn from(hz: Hertz) -> Self { let mut time = 40_000 / 4 / hz.0; let mut psc = 0; let mut reload = 0; while psc < 7 { reload = time; if reload < 0x1000 { break; } psc += 1; time /= 2; } // As we get an integer value, reload is always below 0xFFF let reload = reload as u16; IwdgTimeout { psc, reload } } } impl Watchdog { pub fn new(iwdg: IWDG) -> Self { Self { iwdg } } } impl watchdog::WatchdogEnable for Watchdog { type Time = IwdgTimeout; fn start<T>(&mut self, period: T) where T: Into<IwdgTimeout>, { let time: IwdgTimeout = period.into(); // Feed the watchdog in case it's already running // (Waiting for the registers to update takes sometime) self.iwdg.kr.write(|w| w.key().reset()); // Enable the watchdog self.iwdg.kr.write(|w| w.key().start()); self.iwdg.kr.write(|w| w.key().enable()); // Wait until it's safe to write to the registers while self.iwdg.sr.read().pvu().bit() {} self.iwdg.pr.write(|w| w.pr().bits(time.psc)); while self.iwdg.sr.read().rvu().bit() {} self.iwdg.rlr.write(|w| w.rl().bits(time.reload)); // Wait until the registers are updated before issuing a reset with // (potentially false) values while self.iwdg.sr.read().bits() != 0 {} self.iwdg.kr.write(|w| w.key().reset()); } }
30.081967
97
0.60545
ff476e5d42bb75fcba5aba497bead3d2f2b4b261
8,923
#[allow(dead_code)] pub static NAME: &[&str] = &[ "{person.last} {company.suffix}", "{person.last}-{person.last}", "{person.last}, {person.last} and {person.last}", ]; #[allow(dead_code)] pub static SUFFIX: &[&str] = &["Inc", "and Sons", "LLC", "Group"]; #[allow(dead_code)] pub static BUZZWORDS: &[&str] = &[ "Adaptive", "Advanced", "Ameliorated", "Assimilated", "Automated", "Balanced", "Business-focused", "Centralized", "Cloned", "Compatible", "Configurable", "Cross-group", "Cross-platform", "Customer-focused", "Customizable", "De-engineered", "Decentralized", "Devolved", "Digitized", "Distributed", "Diverse", "Down-sized", "Enhanced", "Enterprise-wide", "Ergonomic", "Exclusive", "Expanded", "Extended", "Face to face", "Focused", "Front-line", "Fully-configurable", "Function-based", "Fundamental", "Future-proofed", "Grass-roots", "Horizontal", "Implemented", "Innovative", "Integrated", "Intuitive", "Inverse", "Managed", "Mandatory", "Monitored", "Multi-channelled", "Multi-lateral", "Multi-layered", "Multi-tiered", "Networked", "Object-based", "Open-architected", "Open-source", "Operative", "Optimized", "Optional", "Organic", "Organized", "Persevering", "Persistent", "Phased", "Polarised", "Pre-emptive", "Proactive", "Profit-focused", "Profound", "Programmable", "Progressive", "Public-key", "Quality-focused", "Re-contextualized", "Re-engineered", "Reactive", "Realigned", "Reduced", "Reverse-engineered", "Right-sized", "Robust", "Seamless", "Secured", "Self-enabling", "Sharable", "Stand-alone", "Streamlined", "Switchable", "Synchronised", "Synergistic", "Synergized", "Team-oriented", "Total", "Triple-buffered", "Universal", "Up-sized", "Upgradable", "User-centric", "User-friendly", "Versatile", "Virtual", "Vision-oriented", "Visionary", "24 hour", "24/7", "3rd generation", "4th generation", "5th generation", "6th generation", "actuating", "analyzing", "asymmetric", "asynchronous", "attitude-oriented", "background", "bandwidth-monitored", "bi-directional", "bifurcated", "bottom-line", "clear-thinking", "client-driven", "client-server", "coherent", "cohesive", "composite", "content-based", "context-sensitive", "contextually-based", "dedicated", "demand-driven", "didactic", "directional", "discrete", "disintermediate", "dynamic", "eco-centric", "empowering", "encompassing", "even-keeled", "executive", "explicit", "exuding", "fault-tolerant", "foreground", "fresh-thinking", "full-range", "global", "grid-enabled", "heuristic", "high-level", "holistic", "homogeneous", "human-resource", "hybrid", "impactful", "incremental", "intangible", "interactive", "intermediate", "leading edge", "local", "logistical", "maximized", "methodical", "mission-critical", "mobile", "modular", "motivating", "multi-state", "multi-tasking", "multimedia", "national", "needs-based", "neutral", "next generation", "non-volatile", "object-oriented", "optimal", "optimizing", "radical", "real-time", "reciprocal", "regional", "responsive", "scalable", "secondary", "solution-oriented", "stable", "static", "system-worthy", "systematic", "systemic", "tangible", "tertiary", "transitional", "uniform", "upward-trending", "user-facing", "value-added", "web-enabled", "well-modulated", "zero administration", "zero defect", "zero tolerance", "Graphic Interface", "Graphical User Interface", "ability", "access", "adapter", "algorithm", "alliance", "analyzer", "application", "approach", "architecture", "archive", "array", "artificial intelligence", "attitude", "benchmark", "budgetary management", "capability", "capacity", "challenge", "circuit", "collaboration", "complexity", "concept", "conglomeration", "contingency", "core", "customer loyalty", "data-warehouse", "database", "definition", "emulation", "encoding", "encryption", "extranet", "firmware", "flexibility", "focus group", "forecast", "frame", "framework", "function", "functionalities", "groupware", "hardware", "help-desk", "hierarchy", "hub", "implementation", "info-mediaries", "infrastructure", "initiative", "installation", "instruction set", "interface", "internet solution", "intranet", "knowledge base", "knowledge user", "leverage", "local area network", "matrices", "matrix", "methodology", "middleware", "migration", "model", "moderator", "monitoring", "moratorium", "neural-net", "open architecture", "open system", "orchestration", "paradigm", "parallelism", "policy", "portal", "pricing structure", "process improvement", "product", "productivity", "project", "projection", "protocol", "secured line", "service-desk", "software", "solution", "standardization", "strategy", "structure", "success", "superstructure", "support", "synergy", "system engine", "task-force", "throughput", "time-frame", "toolset", "utilisation", "website", "workforce", ]; #[allow(dead_code)] pub static BS: &[&str] = &[ "aggregate", "architect", "benchmark", "brand", "cultivate", "deliver", "deploy", "disintermediate", "drive", "e-enable", "embrace", "empower", "enable", "engage", "engineer", "enhance", "envisioneer", "evolve", "expedite", "exploit", "extend", "facilitate", "generate", "grow", "harness", "implement", "incentivize", "incubate", "innovate", "integrate", "iterate", "leverage", "matrix", "maximize", "mesh", "monetize", "morph", "optimize", "orchestrate", "productize", "recontextualize", "redefine", "reintermediate", "reinvent", "repurpose", "revolutionize", "scale", "seize", "strategize", "streamline", "syndicate", "synergize", "synthesize", "target", "transform", "transition", "unleash", "utilize", "visualize", "whiteboard", "24/365", "24/7", "B2B", "B2C", "back-end", "best-of-breed", "bleeding-edge", "bricks-and-clicks", "clicks-and-mortar", "collaborative", "compelling", "cross-media", "cross-platform", "customized", "cutting-edge", "distributed", "dot-com", "dynamic", "e-business", "efficient", "end-to-end", "enterprise", "extensible", "frictionless", "front-end", "global", "granular", "holistic", "impactful", "innovative", "integrated", "interactive", "intuitive", "killer", "leading-edge", "magnetic", "mission-critical", "next-generation", "one-to-one", "open-source", "out-of-the-box", "plug-and-play", "proactive", "real-time", "revolutionary", "rich", "robust", "scalable", "seamless", "sexy", "sticky", "strategic", "synergistic", "transparent", "turn-key", "ubiquitous", "user-centric", "value-added", "vertical", "viral", "virtual", "visionary", "web-enabled", "wireless", "world-class", "ROI", "action-items", "applications", "architectures", "bandwidth", "channels", "communities", "content", "convergence", "deliverables", "e-business", "e-commerce", "e-markets", "e-services", "e-tailers", "experiences", "eyeballs", "functionalities", "infomediaries", "infrastructures", "initiatives", "interfaces", "markets", "methodologies", "metrics", "mindshare", "models", "networks", "niches", "paradigms", "partnerships", "platforms", "portals", "relationships", "schemas", "solutions", "supply-chains", "synergies", "systems", "technologies", "users", "vortals", "web services", "web-readiness", ];
18.136179
66
0.540065
1adf43304736daa682f4a8558dd461a7b07d6161
442
/// Extra string stuff pub trait StrExtra { fn loose_eq<O: AsRef<str>>(&self, other: O) -> bool; } impl<T: AsRef<str>> StrExtra for T { fn loose_eq<O: AsRef<str>>(&self, other: O) -> bool { self.as_ref().trim().to_lowercase() == other.as_ref().trim().to_lowercase() } } #[cfg(test)] mod tests { use super::*; #[test] fn loose_eq() { let a = " foo "; let b = " FoO \n"; assert!(a.loose_eq(b)); } }
18.416667
79
0.552036
8f90dc817e06be688b9e5fc814358532711fb980
9,607
//! For statement parsing. //! //! More information: //! - [MDN documentation][mdn] //! - [ECMAScript specification][spec] //! //! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for //! [spec]: https://tc39.es/ecma262/#sec-for-statement use crate::{ syntax::{ ast::{ node::{iteration::IterableLoopInitializer, ForInLoop, ForLoop, ForOfLoop, Node}, Const, Keyword, Punctuator, }, lexer::{Error as LexError, Position, TokenKind}, parser::{ expression::Expression, statement::declaration::Declaration, statement::{variable::VariableDeclarationList, Statement}, AllowAwait, AllowReturn, AllowYield, Cursor, ParseError, TokenParser, }, }, BoaProfiler, Interner, }; use std::io::Read; /// For statement parsing /// /// More information: /// - [MDN documentation][mdn] /// - [ECMAScript specification][spec] /// /// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for /// [spec]: https://tc39.es/ecma262/#sec-for-statement #[derive(Debug, Clone, Copy)] pub(in crate::syntax::parser::statement) struct ForStatement { allow_yield: AllowYield, allow_await: AllowAwait, allow_return: AllowReturn, } impl ForStatement { /// Creates a new `ForStatement` parser. pub(in crate::syntax::parser::statement) fn new<Y, A, R>( allow_yield: Y, allow_await: A, allow_return: R, ) -> Self where Y: Into<AllowYield>, A: Into<AllowAwait>, R: Into<AllowReturn>, { Self { allow_yield: allow_yield.into(), allow_await: allow_await.into(), allow_return: allow_return.into(), } } } impl<R> TokenParser<R> for ForStatement where R: Read, { type Output = Node; fn parse( self, cursor: &mut Cursor<R>, interner: &mut Interner, ) -> Result<Self::Output, ParseError> { let _timer = BoaProfiler::global().start_event("ForStatement", "Parsing"); cursor.expect(Keyword::For, "for statement", interner)?; let init_position = cursor .expect(Punctuator::OpenParen, "for statement", interner)? .span() .end(); let init = match cursor .peek(0, interner)? .ok_or(ParseError::AbruptEnd)? .kind() { TokenKind::Keyword(Keyword::Var) => { let _next = cursor.next(interner)?; Some( VariableDeclarationList::new(false, self.allow_yield, self.allow_await) .parse(cursor, interner) .map(Node::from)?, ) } TokenKind::Keyword(Keyword::Let | Keyword::Const) => Some( Declaration::new(self.allow_yield, self.allow_await, false) .parse(cursor, interner)?, ), TokenKind::Punctuator(Punctuator::Semicolon) => None, _ => Some( Expression::new(None, false, self.allow_yield, self.allow_await) .parse(cursor, interner)?, ), }; match (init.as_ref(), cursor.peek(0, interner)?) { (Some(init), Some(tok)) if tok.kind() == &TokenKind::Keyword(Keyword::In) => { let init = node_to_iterable_loop_initializer(init, init_position)?; let _next = cursor.next(interner)?; let expr = Expression::new(None, true, self.allow_yield, self.allow_await) .parse(cursor, interner)?; let position = cursor .expect(Punctuator::CloseParen, "for in statement", interner)? .span() .end(); let body = Statement::new(self.allow_yield, self.allow_await, self.allow_return) .parse(cursor, interner)?; // Early Error: It is a Syntax Error if IsLabelledFunction(the first Statement) is true. if let Node::FunctionDecl(_) = body { return Err(ParseError::wrong_function_declaration_non_strict(position)); } return Ok(ForInLoop::new(init, expr, body).into()); } (Some(init), Some(tok)) if tok.kind() == &TokenKind::Keyword(Keyword::Of) => { let init = node_to_iterable_loop_initializer(init, init_position)?; let _next = cursor.next(interner)?; let iterable = Expression::new(None, true, self.allow_yield, self.allow_await) .parse(cursor, interner)?; let position = cursor .expect(Punctuator::CloseParen, "for of statement", interner)? .span() .end(); let body = Statement::new(self.allow_yield, self.allow_await, self.allow_return) .parse(cursor, interner)?; // Early Error: It is a Syntax Error if IsLabelledFunction(the first Statement) is true. if let Node::FunctionDecl(_) = body { return Err(ParseError::wrong_function_declaration_non_strict(position)); } return Ok(ForOfLoop::new(init, iterable, body).into()); } _ => {} } cursor.expect(Punctuator::Semicolon, "for statement", interner)?; let cond = if cursor.next_if(Punctuator::Semicolon, interner)?.is_some() { Const::from(true).into() } else { let step = Expression::new(None, true, self.allow_yield, self.allow_await) .parse(cursor, interner)?; cursor.expect(Punctuator::Semicolon, "for statement", interner)?; step }; let step = if cursor.next_if(Punctuator::CloseParen, interner)?.is_some() { None } else { let step = Expression::new(None, true, self.allow_yield, self.allow_await) .parse(cursor, interner)?; cursor.expect( TokenKind::Punctuator(Punctuator::CloseParen), "for statement", interner, )?; Some(step) }; let position = cursor .peek(0, interner)? .ok_or(ParseError::AbruptEnd)? .span() .start(); let body = Statement::new(self.allow_yield, self.allow_await, self.allow_return) .parse(cursor, interner)?; // Early Error: It is a Syntax Error if IsLabelledFunction(the first Statement) is true. if let Node::FunctionDecl(_) = body { return Err(ParseError::wrong_function_declaration_non_strict(position)); } // TODO: do not encapsulate the `for` in a block just to have an inner scope. Ok(ForLoop::new(init, cond, step, body).into()) } } #[inline] fn node_to_iterable_loop_initializer( node: &Node, position: Position, ) -> Result<IterableLoopInitializer, ParseError> { match node { Node::Identifier(name) => Ok(IterableLoopInitializer::Identifier(*name)), Node::VarDeclList(ref list) => match list.as_ref() { [var] => { if var.init().is_some() { return Err(ParseError::lex(LexError::Syntax( "a declaration in the head of a for-of loop can't have an initializer" .into(), position, ))); } Ok(IterableLoopInitializer::Var(var.clone())) } _ => Err(ParseError::lex(LexError::Syntax( "only one variable can be declared in the head of a for-of loop".into(), position, ))), }, Node::LetDeclList(ref list) => match list.as_ref() { [var] => { if var.init().is_some() { return Err(ParseError::lex(LexError::Syntax( "a declaration in the head of a for-of loop can't have an initializer" .into(), position, ))); } Ok(IterableLoopInitializer::Let(var.clone())) } _ => Err(ParseError::lex(LexError::Syntax( "only one variable can be declared in the head of a for-of loop".into(), position, ))), }, Node::ConstDeclList(ref list) => match list.as_ref() { [var] => { if var.init().is_some() { return Err(ParseError::lex(LexError::Syntax( "a declaration in the head of a for-of loop can't have an initializer" .into(), position, ))); } Ok(IterableLoopInitializer::Const(var.clone())) } _ => Err(ParseError::lex(LexError::Syntax( "only one variable can be declared in the head of a for-of loop".into(), position, ))), }, Node::Assign(_) => Err(ParseError::lex(LexError::Syntax( "a declaration in the head of a for-of loop can't have an initializer".into(), position, ))), _ => Err(ParseError::lex(LexError::Syntax( "unknown left hand side in head of for-of loop".into(), position, ))), } }
36.808429
104
0.530238
892a53c9016a6ad6a24edfd21aff702acce0c832
1,052
// https://leetcode.com/problems/permutations/ #![allow(dead_code)] struct Solution; impl Solution { pub fn permute(nums: Vec<i32>) -> Vec<Vec<i32>> { if nums.is_empty() { vec![vec![]] } else { nums.iter().flat_map(|c| { let idx = nums.iter().position(|x| x == c).unwrap() as usize; let mut without = nums.clone(); without.remove(idx); Self::permute(without).iter().map(|x| { let mut y = x.clone(); y.insert(0, *c); y }).collect::<Vec<Vec<i32>>>() }).collect() } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_permute() { assert_eq!( vec![ vec![1, 2, 3], vec![1, 3, 2], vec![2, 1, 3], vec![2, 3, 1], vec![3, 1, 2], vec![3, 2, 1], ], Solution::permute(vec![1, 2, 3]), ); } }
23.377778
77
0.385932
f9563a16ddb35df96095dfe12ad7bf846ba20e82
6,534
use convergence::server::{self, BindOptions}; use convergence_arrow::datafusion::DataFusionEngine; use convergence_dynamodb::provider::{DynamoDbKey, DynamoDbTableDefinition, DynamoDbTableProvider}; use datafusion::arrow::datatypes::{DataType, Field, Schema}; use datafusion::prelude::*; use rusoto_core::{credential::StaticProvider, Client, HttpClient, Region}; use rusoto_dynamodb::{ AttributeDefinition, AttributeValue, CreateTableInput, DynamoDb, DynamoDbClient, KeySchemaElement, PutItemInput, }; use std::collections::HashMap; use std::sync::Arc; use tokio_postgres::{connect, NoTls}; use uuid::Uuid; async fn new_engine() -> DataFusionEngine { let ddb_hash_table_name = Uuid::new_v4().to_simple().to_string(); let ddb_composite_table_name = Uuid::new_v4().to_simple().to_string(); // use the extended client init to avoid issues in rusoto's usage of hyper // https://github.com/hyperium/hyper/issues/2112 let ddb_client = DynamoDbClient::new_with_client( Client::new_with( StaticProvider::new("blah".to_owned(), "blah".to_owned(), None, None), HttpClient::new().unwrap(), ), Region::Custom { name: "test".to_owned(), endpoint: "http://localhost:8000".to_owned(), }, ); ddb_client .create_table(CreateTableInput { table_name: ddb_hash_table_name.clone(), attribute_definitions: vec![AttributeDefinition { attribute_name: "some_id".to_owned(), attribute_type: "S".to_owned(), }], key_schema: vec![KeySchemaElement { attribute_name: "some_id".to_owned(), key_type: "HASH".to_owned(), }], billing_mode: Some("PAY_PER_REQUEST".to_owned()), ..Default::default() }) .await .expect("failed to create ddb table"); ddb_client .create_table(CreateTableInput { table_name: ddb_composite_table_name.clone(), attribute_definitions: vec![ AttributeDefinition { attribute_name: "partition_id".to_owned(), attribute_type: "S".to_owned(), }, AttributeDefinition { attribute_name: "additional_key".to_owned(), attribute_type: "N".to_owned(), }, ], key_schema: vec![ KeySchemaElement { attribute_name: "partition_id".to_owned(), key_type: "HASH".to_owned(), }, KeySchemaElement { attribute_name: "additional_key".to_owned(), key_type: "RANGE".to_owned(), }, ], billing_mode: Some("PAY_PER_REQUEST".to_owned()), ..Default::default() }) .await .expect("failed to create ddb table"); for i in 0..10 { let mut hash_item = HashMap::new(); hash_item.insert( "some_id".to_owned(), AttributeValue { s: Some(format!("item_{}", i)), ..Default::default() }, ); hash_item.insert( "float_val".to_owned(), AttributeValue { n: Some(format!("{}", (i as f64) * 1.5)), ..Default::default() }, ); ddb_client .put_item(PutItemInput { table_name: ddb_hash_table_name.clone(), item: hash_item, ..Default::default() }) .await .expect("failed to put item"); let mut composite_item = HashMap::new(); composite_item.insert( "partition_id".to_owned(), AttributeValue { s: Some(if i < 5 { "1" } else { "2" }.to_owned()), ..Default::default() }, ); composite_item.insert( "additional_key".to_owned(), AttributeValue { n: Some(i.to_string()), ..Default::default() }, ); ddb_client .put_item(PutItemInput { table_name: ddb_composite_table_name.clone(), item: composite_item, ..Default::default() }) .await .expect("failed to put item"); } let mut ctx = ExecutionContext::new(); ctx.register_table( "ddb_hash_test", Arc::new(DynamoDbTableProvider::new( ddb_client.clone(), DynamoDbTableDefinition::new( ddb_hash_table_name, DynamoDbKey::Hash("some_id".to_owned()), Arc::new(Schema::new(vec![ Field::new("some_id", DataType::Utf8, true), Field::new("float_val", DataType::Float64, true), ])), ), )), ) .expect("failed to register table"); ctx.register_table( "ddb_composite_test", Arc::new(DynamoDbTableProvider::new( ddb_client.clone(), DynamoDbTableDefinition::new( ddb_composite_table_name, DynamoDbKey::Composite("partition_id".to_owned(), "additional_key".to_owned()), Arc::new(Schema::new(vec![ Field::new("partition_id", DataType::Utf8, true), Field::new("additional_key", DataType::Float64, true), ])), ), )), ) .expect("failed to register table"); DataFusionEngine::new(ctx) } async fn setup() -> tokio_postgres::Client { let port = server::run_background(BindOptions::new().with_port(0), Arc::new(|| Box::pin(new_engine()))) .await .unwrap(); let (client, conn) = connect(&format!("postgres://localhost:{}/test", port), NoTls) .await .expect("failed to init client"); tokio::spawn(async move { conn.await.unwrap() }); client } #[tokio::test] async fn hash_count_rows() { let client = setup().await; let row = client .query_one("select count(*) from ddb_hash_test", &[]) .await .unwrap(); let count: i64 = row.get(0); assert_eq!(count, 10); } #[tokio::test] async fn hash_row_values() { let client = setup().await; let rows = client .query("select some_id, float_val from ddb_hash_test order by some_id", &[]) .await .unwrap(); assert_eq!(rows.len(), 10); let get_row = |idx: usize| { let row = &rows[idx]; let cols: (&str, f64) = (row.get(0), row.get(1)); cols }; for i in 0..10 { assert_eq!(get_row(i), (format!("item_{}", i).as_str(), (i as f64) * 1.5)); } } #[tokio::test] async fn hash_point_query() { let client = setup().await; let rows = client .query( "select some_id, float_val from ddb_hash_test where some_id = 'item_1'", &[], ) .await .unwrap(); assert_eq!(rows.len(), 1); let get_row = |idx: usize| { let row = &rows[idx]; let cols: (&str, f64) = (row.get(0), row.get(1)); cols }; assert_eq!(get_row(0), ("item_1", 1.5)); } #[tokio::test] async fn composite_count_rows() { let client = setup().await; let row = client .query_one("select count(*) from ddb_composite_test", &[]) .await .unwrap(); let count: i64 = row.get(0); assert_eq!(count, 10); } #[tokio::test] async fn composite_partition_query() { let client = setup().await; let rows = client .query( "select additional_key from ddb_composite_test where partition_id = '1' order by additional_key", &[], ) .await .unwrap(); assert_eq!(rows.len(), 5); for (i, row) in rows.iter().enumerate() { let value: f64 = row.get(0); assert_eq!(value as usize, i); } }
24.110701
113
0.657178
0a79ec30e4b942445b96da9bc08f970a4b023f01
903
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Check that we correctly prevent users from making trait objects // form traits that make use of `Self` in an argument or return position. trait Bar<T> { fn bar(&self, x: &T); } trait Baz : Bar<Self> { } fn make_bar<T:Bar<u32>>(t: &T) -> &Bar<u32> { t } fn make_baz<T:Baz>(t: &T) -> &Baz { //~^ ERROR E0038 //~| NOTE the trait cannot use `Self` as a type parameter in the supertrait listing t } fn main() { }
27.363636
87
0.683278
72fce99a49074c3967d75811797e04b7ed1a1173
2,491
extern crate graphics; extern crate freetype as ft; extern crate sdl2_window; extern crate opengl_graphics; extern crate piston; extern crate find_folder; use sdl2_window::Sdl2Window; use opengl_graphics::{ GlGraphics, Texture, TextureSettings, OpenGL }; use piston::window::WindowSettings; use piston::input::*; use piston::event_loop::{Events, EventSettings, EventLoop}; use graphics::{Context, Graphics, ImageSize}; fn glyphs(face: &mut ft::Face, text: &str) -> Vec<(Texture, [f64; 2])> { let mut x = 10; let mut y = 0; let mut res = vec![]; for ch in text.chars() { face.load_char(ch as usize, ft::face::LoadFlag::RENDER).unwrap(); let g = face.glyph(); let bitmap = g.bitmap(); let texture = Texture::from_memory_alpha( bitmap.buffer(), bitmap.width() as u32, bitmap.rows() as u32, &TextureSettings::new() ).unwrap(); res.push((texture, [(x + g.bitmap_left()) as f64, (y - g.bitmap_top()) as f64])); x += (g.advance().x >> 6) as i32; y += (g.advance().y >> 6) as i32; } res } fn render_text<G, T>(glyphs: &[(T, [f64; 2])], c: &Context, gl: &mut G) where G: Graphics<Texture = T>, T: ImageSize { for &(ref texture, [x, y]) in glyphs { use graphics::*; Image::new_color(color::BLACK).draw( texture, &c.draw_state, c.transform.trans(x, y), gl ); } } fn main() { let opengl = OpenGL::V3_2; let mut window: Sdl2Window = WindowSettings::new("piston-example-freetype", [300, 300]) .exit_on_esc(true) .opengl(opengl) .build() .unwrap(); let assets = find_folder::Search::ParentsThenKids(3, 3) .for_folder("assets").unwrap(); let freetype = ft::Library::init().unwrap(); let font = assets.join("FiraSans-Regular.ttf"); let mut face = freetype.new_face(&font, 0).unwrap(); face.set_pixel_sizes(0, 48).unwrap(); let ref mut gl = GlGraphics::new(opengl); let glyphs = glyphs(&mut face, "Hello Piston!"); let mut events = Events::new(EventSettings::new().lazy(true)); while let Some(e) = events.next(&mut window) { if let Some(args) = e.render_args() { use graphics::*; gl.draw(args.viewport(), |c, gl| { clear(color::WHITE, gl); render_text(&glyphs, &c.trans(0.0, 100.0), gl); }); } } }
29.654762
89
0.571257
3ab3f89d45f93ded0c84bb24f99130ca6a480220
659
use ethane::rpc; use ethane::types::{Bytes, H256}; use std::convert::TryFrom; use test_helper::*; #[test] fn test_web3_client_version() { let mut client = ConnectionWrapper::new_from_env(None); rpc_call_test_expected( &mut client, rpc::web3_client_version(), String::from("Geth/v1.9.25-stable-e7872729/linux-amd64/go1.15.6"), ); } #[test] fn test_web3_sha3() { let mut client = ConnectionWrapper::new_from_env(None); let empty = Bytes::from_slice("".as_bytes()); let expected = H256::try_from(KECCAK_HASH_OF_EMPTY_STRING).unwrap(); rpc_call_test_expected(&mut client, rpc::web3_sha3(empty), expected); }
27.458333
74
0.688923
f9e50273e815ed9d9510e3258c144a746fa45aff
5,756
// Copyright 2020 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT #![cfg(feature = "submodule_tests")] mod message; mod rand_replay; mod stubs; mod tipset; pub use self::message::*; pub use self::rand_replay::*; pub use self::stubs::*; pub use self::tipset::*; use actor::actorv2::CHAOS_ACTOR_CODE_ID; use address::{Address, Protocol}; use blockstore::BlockStore; use cid::Cid; use clock::ChainEpoch; use crypto::{DomainSeparationTag, Signature}; use encoding::{tuple::*, Cbor}; use fil_types::{SealVerifyInfo, WindowPoStVerifyInfo}; use forest_message::{ChainMessage, Message, MessageReceipt, SignedMessage, UnsignedMessage}; use interpreter::{ApplyRet, BlockMessages, Rand, VM}; use runtime::{ConsensusFault, Syscalls}; use serde::{Deserialize, Deserializer}; use std::error::Error as StdError; use vm::{ExitCode, Serialized}; mod base64_bytes { use super::*; use serde::de; use std::borrow::Cow; pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<u8>, D::Error> where D: Deserializer<'de>, { let s: Cow<'de, str> = Deserialize::deserialize(deserializer)?; Ok(base64::decode(s.as_ref()).map_err(de::Error::custom)?) } pub mod vec { use super::*; pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<Vec<u8>>, D::Error> where D: Deserializer<'de>, { let v: Vec<Cow<'de, str>> = Deserialize::deserialize(deserializer)?; Ok(v.into_iter() .map(|s| base64::decode(s.as_ref())) .collect::<Result<Vec<_>, _>>() .map_err(de::Error::custom)?) } } } mod message_receipt_vec { use super::*; #[derive(Deserialize)] pub struct MessageReceiptVector { exit_code: ExitCode, #[serde(rename = "return", with = "base64_bytes")] return_value: Vec<u8>, gas_used: i64, } pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<MessageReceipt>, D::Error> where D: Deserializer<'de>, { let s: Vec<MessageReceiptVector> = Deserialize::deserialize(deserializer)?; Ok(s.into_iter() .map(|v| MessageReceipt { exit_code: v.exit_code, return_data: Serialized::new(v.return_value), gas_used: v.gas_used, }) .collect()) } } #[derive(Debug, Deserialize)] pub struct StateTreeVector { #[serde(with = "cid::json")] pub root_cid: Cid, } #[derive(Debug, Deserialize, Clone)] pub struct GenerationData { #[serde(default)] pub source: String, #[serde(default)] pub version: String, } #[derive(Debug, Deserialize, Clone)] pub struct MetaData { pub id: String, #[serde(default)] pub version: String, #[serde(default)] pub description: String, #[serde(default)] pub comment: String, pub gen: Vec<GenerationData>, } #[derive(Debug, Deserialize)] pub struct PreConditions { pub state_tree: StateTreeVector, #[serde(default)] pub basefee: Option<f64>, #[serde(default)] pub circ_supply: Option<f64>, #[serde(default)] pub variants: Vec<Variant>, } #[derive(Debug, Deserialize)] pub struct PostConditions { pub state_tree: StateTreeVector, #[serde(with = "message_receipt_vec")] pub receipts: Vec<MessageReceipt>, #[serde(default, with = "cid::json::vec")] pub receipts_roots: Vec<Cid>, } #[derive(Debug, Deserialize)] pub struct Selector { #[serde(default)] pub puppet_actor: Option<String>, #[serde(default)] pub chaos_actor: Option<String>, #[serde(default)] pub min_protocol_version: Option<String>, } #[derive(Debug, Deserialize)] pub struct Variant { pub id: String, pub epoch: ChainEpoch, pub nv: u32, } /// Encoded VM randomness used to be replayed. pub type Randomness = Vec<RandomnessMatch>; /// One randomness entry. #[derive(Debug, Deserialize)] pub struct RandomnessMatch { pub on: RandomnessRule, #[serde(with = "base64_bytes")] pub ret: Vec<u8>, } #[derive(Debug, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum RandomnessKind { Beacon, Chain, } /// Rule for matching when randomness is returned. #[derive(Debug, Deserialize_tuple, PartialEq)] pub struct RandomnessRule { pub kind: RandomnessKind, pub dst: DomainSeparationTag, pub epoch: ChainEpoch, #[serde(with = "base64_bytes")] pub entropy: Vec<u8>, } #[derive(Debug, Deserialize)] #[serde(tag = "class")] pub enum TestVector { #[serde(rename = "message")] Message { selector: Option<Selector>, #[serde(rename = "_meta")] meta: Option<MetaData>, #[serde(with = "base64_bytes")] car: Vec<u8>, preconditions: PreConditions, apply_messages: Vec<MessageVector>, postconditions: PostConditions, #[serde(default)] randomness: Randomness, }, #[serde(rename = "tipset")] Tipset { selector: Option<Selector>, #[serde(rename = "_meta")] meta: Option<MetaData>, #[serde(with = "base64_bytes")] car: Vec<u8>, preconditions: PreConditions, apply_tipsets: Vec<TipsetVector>, postconditions: PostConditions, #[serde(default)] randomness: Randomness, }, } // This might be changed to be encoded into vector, matching go runner for now pub fn to_chain_msg(msg: UnsignedMessage) -> ChainMessage { if msg.from().protocol() == Protocol::Secp256k1 { ChainMessage::Signed(SignedMessage { message: msg, signature: Signature::new_secp256k1(vec![0; 65]), }) } else { ChainMessage::Unsigned(msg) } }
25.927928
92
0.627519
69b7b121525871ba5e325e9a9d624ac1dc064db0
18,661
use super::peer_info::{PeerConnectionStatus, PeerInfo}; use super::peer_sync_status::PeerSyncStatus; use crate::rpc::methods::MetaData; use crate::PeerId; use slog::{crit, debug, warn}; use std::collections::{hash_map::Entry, HashMap}; use std::time::Instant; use types::{EthSpec, SubnetId}; /// A peer's reputation (perceived potential usefulness) pub type Rep = u8; /// Reputation change (positive or negative) pub struct RepChange { is_good: bool, diff: Rep, } /// Max number of disconnected nodes to remember const MAX_DC_PEERS: usize = 30; /// The default starting reputation for an unknown peer. pub const DEFAULT_REPUTATION: Rep = 50; /// Storage of known peers, their reputation and information pub struct PeerDB<TSpec: EthSpec> { /// The collection of known connected peers, their status and reputation peers: HashMap<PeerId, PeerInfo<TSpec>>, /// Tracking of number of disconnected nodes n_dc: usize, /// PeerDB's logger log: slog::Logger, } impl RepChange { pub fn good(diff: Rep) -> Self { RepChange { is_good: true, diff, } } pub fn bad(diff: Rep) -> Self { RepChange { is_good: false, diff, } } pub const fn worst() -> Self { RepChange { is_good: false, diff: Rep::max_value(), } } } impl<TSpec: EthSpec> PeerDB<TSpec> { pub fn new(log: &slog::Logger) -> Self { Self { log: log.clone(), n_dc: 0, peers: HashMap::new(), } } /* Getters */ /// Gives the reputation of a peer, or DEFAULT_REPUTATION if it is unknown. pub fn reputation(&self, peer_id: &PeerId) -> Rep { self.peers .get(peer_id) .map_or(DEFAULT_REPUTATION, |info| info.reputation) } /// Returns an iterator over all peers in the db. pub fn peers(&self) -> impl Iterator<Item = (&PeerId, &PeerInfo<TSpec>)> { self.peers.iter() } /// Returns an iterator over all peers in the db. pub(super) fn _peers_mut(&mut self) -> impl Iterator<Item = (&PeerId, &mut PeerInfo<TSpec>)> { self.peers.iter_mut() } /// Gives the ids of all known peers. pub fn peer_ids(&self) -> impl Iterator<Item = &PeerId> { self.peers.keys() } /// Returns a peer's info, if known. pub fn peer_info(&self, peer_id: &PeerId) -> Option<&PeerInfo<TSpec>> { self.peers.get(peer_id) } /// Returns a mutable reference to a peer's info if known. /// TODO: make pub(super) to ensure that peer management is unified pub fn peer_info_mut(&mut self, peer_id: &PeerId) -> Option<&mut PeerInfo<TSpec>> { self.peers.get_mut(peer_id) } /// Returns true if the peer is synced at least to our current head. pub fn peer_synced(&self, peer_id: &PeerId) -> bool { match self.peers.get(peer_id).map(|info| &info.sync_status) { Some(PeerSyncStatus::Synced { .. }) => true, Some(_) => false, None => false, } } /// Gives the ids of all known connected peers. pub fn connected_peers(&self) -> impl Iterator<Item = (&PeerId, &PeerInfo<TSpec>)> { self.peers .iter() .filter(|(_, info)| info.connection_status.is_connected()) } /// Gives the ids of all known connected peers. pub fn connected_peer_ids(&self) -> impl Iterator<Item = &PeerId> { self.peers .iter() .filter(|(_, info)| info.connection_status.is_connected()) .map(|(peer_id, _)| peer_id) } /// Connected or dialing peers pub fn connected_or_dialing_peers(&self) -> impl Iterator<Item = &PeerId> { self.peers .iter() .filter(|(_, info)| { info.connection_status.is_connected() || info.connection_status.is_dialing() }) .map(|(peer_id, _)| peer_id) } /// Gives the `peer_id` of all known connected and synced peers. pub fn synced_peers(&self) -> impl Iterator<Item = &PeerId> { self.peers .iter() .filter(|(_, info)| { if info.sync_status.is_synced() || info.sync_status.is_advanced() { return info.connection_status.is_connected(); } false }) .map(|(peer_id, _)| peer_id) } /// Gives an iterator of all peers on a given subnet. pub fn peers_on_subnet(&self, subnet_id: SubnetId) -> impl Iterator<Item = &PeerId> { self.peers .iter() .filter(move |(_, info)| { info.connection_status.is_connected() && info.on_subnet(subnet_id) }) .map(|(peer_id, _)| peer_id) } /// Gives the ids of all known disconnected peers. pub fn disconnected_peers(&self) -> impl Iterator<Item = &PeerId> { self.peers .iter() .filter(|(_, info)| info.connection_status.is_disconnected()) .map(|(peer_id, _)| peer_id) } /// Gives the ids of all known banned peers. pub fn banned_peers(&self) -> impl Iterator<Item = &PeerId> { self.peers .iter() .filter(|(_, info)| info.connection_status.is_banned()) .map(|(peer_id, _)| peer_id) } /// Returns a vector containing peers (their ids and info), sorted by /// reputation from highest to lowest, and filtered using `is_status` pub fn best_peers_by_status<F>(&self, is_status: F) -> Vec<(&PeerId, &PeerInfo<TSpec>)> where F: Fn(&PeerConnectionStatus) -> bool, { let mut by_status = self .peers .iter() .filter(|(_, info)| is_status(&info.connection_status)) .collect::<Vec<_>>(); by_status.sort_by_key(|(_, info)| Rep::max_value() - info.reputation); by_status } /// Returns the peer with highest reputation that satisfies `is_status` pub fn best_by_status<F>(&self, is_status: F) -> Option<&PeerId> where F: Fn(&PeerConnectionStatus) -> bool, { self.peers .iter() .filter(|(_, info)| is_status(&info.connection_status)) .max_by_key(|(_, info)| info.reputation) .map(|(id, _)| id) } /// Returns the peer's connection status. Returns unknown if the peer is not in the DB. pub fn connection_status(&self, peer_id: &PeerId) -> Option<PeerConnectionStatus> { self.peer_info(peer_id) .map(|info| info.connection_status.clone()) } /// Returns if the peer is already connected. pub fn is_connected(&self, peer_id: &PeerId) -> bool { if let Some(PeerConnectionStatus::Connected { .. }) = self.connection_status(peer_id) { true } else { false } } /// If we are connected or currently dialing the peer returns true. pub fn is_connected_or_dialing(&self, peer_id: &PeerId) -> bool { match self.connection_status(peer_id) { Some(PeerConnectionStatus::Connected { .. }) | Some(PeerConnectionStatus::Dialing { .. }) => true, _ => false, } } /* Setters */ /// A peer is being dialed. pub fn dialing_peer(&mut self, peer_id: &PeerId) { let info = self.peers.entry(peer_id.clone()).or_default(); if info.connection_status.is_disconnected() { self.n_dc = self.n_dc.saturating_sub(1); } info.connection_status = PeerConnectionStatus::Dialing { since: Instant::now(), }; debug!(self.log, "Peer dialing in db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc); } /// Sets a peer as connected with an ingoing connection. pub fn connect_ingoing(&mut self, peer_id: &PeerId) { let info = self.peers.entry(peer_id.clone()).or_default(); if info.connection_status.is_disconnected() { self.n_dc = self.n_dc.saturating_sub(1); } info.connection_status.connect_ingoing(); debug!(self.log, "Peer connected to db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc); } /// Sets a peer as connected with an outgoing connection. pub fn connect_outgoing(&mut self, peer_id: &PeerId) { let info = self.peers.entry(peer_id.clone()).or_default(); if info.connection_status.is_disconnected() { self.n_dc = self.n_dc.saturating_sub(1); } info.connection_status.connect_outgoing(); debug!(self.log, "Peer connected to db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc); } /// Sets the peer as disconnected. A banned peer remains banned pub fn disconnect(&mut self, peer_id: &PeerId) { let log_ref = &self.log; let info = self.peers.entry(peer_id.clone()).or_insert_with(|| { warn!(log_ref, "Disconnecting unknown peer"; "peer_id" => peer_id.to_string()); PeerInfo::default() }); if !info.connection_status.is_disconnected() && !info.connection_status.is_banned() { info.connection_status.disconnect(); self.n_dc += 1; } debug!(self.log, "Peer disconnected from db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc); self.shrink_to_fit(); } /// Drops the peers with the lowest reputation so that the number of /// disconnected peers is less than MAX_DC_PEERS pub fn shrink_to_fit(&mut self) { // for caution, but the difference should never be > 1 while self.n_dc > MAX_DC_PEERS { let to_drop = self .peers .iter() .filter(|(_, info)| info.connection_status.is_disconnected()) .min_by_key(|(_, info)| info.reputation) .map(|(id, _)| id.clone()) .unwrap(); // should be safe since n_dc > MAX_DC_PEERS > 0 self.peers.remove(&to_drop); self.n_dc = self.n_dc.saturating_sub(1); } } /// Sets a peer as banned pub fn ban(&mut self, peer_id: &PeerId) { let log_ref = &self.log; let info = self.peers.entry(peer_id.clone()).or_insert_with(|| { warn!(log_ref, "Banning unknown peer"; "peer_id" => peer_id.to_string()); PeerInfo::default() }); if info.connection_status.is_disconnected() { self.n_dc = self.n_dc.saturating_sub(1); } debug!(self.log, "Peer banned"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc); info.connection_status.ban(); } /// Add the meta data of a peer. pub fn add_metadata(&mut self, peer_id: &PeerId, meta_data: MetaData<TSpec>) { if let Some(peer_info) = self.peers.get_mut(peer_id) { peer_info.meta_data = Some(meta_data); } else { warn!(self.log, "Tried to add meta data for a non-existant peer"; "peer_id" => peer_id.to_string()); } } /// Sets the reputation of peer. #[allow(dead_code)] pub(super) fn set_reputation(&mut self, peer_id: &PeerId, rep: Rep) { if let Some(peer_info) = self.peers.get_mut(peer_id) { peer_info.reputation = rep; } else { crit!(self.log, "Tried to modify reputation for an unknown peer"; "peer_id" => peer_id.to_string()); } } /// Sets the syncing status of a peer. pub fn set_sync_status(&mut self, peer_id: &PeerId, sync_status: PeerSyncStatus) { if let Some(peer_info) = self.peers.get_mut(peer_id) { peer_info.sync_status = sync_status; } else { crit!(self.log, "Tried to the sync status for an unknown peer"; "peer_id" => peer_id.to_string()); } } /// Adds to a peer's reputation by `change`. If the reputation exceeds Rep's /// upper (lower) bounds, it stays at the maximum (minimum) value. pub(super) fn add_reputation(&mut self, peer_id: &PeerId, change: RepChange) { let log_ref = &self.log; let info = match self.peers.entry(peer_id.clone()) { Entry::Vacant(_) => { warn!(log_ref, "Peer is unknown, no reputation change made"; "peer_id" => peer_id.to_string()); return; } Entry::Occupied(e) => e.into_mut(), }; info.reputation = if change.is_good { info.reputation.saturating_add(change.diff) } else { info.reputation.saturating_sub(change.diff) }; } } #[cfg(test)] mod tests { use super::*; use slog::{o, Drain}; use types::MinimalEthSpec; type M = MinimalEthSpec; pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { let decorator = slog_term::TermDecorator::new().build(); let drain = slog_term::FullFormat::new(decorator).build().fuse(); let drain = slog_async::Async::new(drain).build().fuse(); if enabled { slog::Logger::root(drain.filter_level(level).fuse(), o!()) } else { slog::Logger::root(drain.filter(|_| false).fuse(), o!()) } } fn get_db() -> PeerDB<M> { let log = build_log(slog::Level::Debug, true); PeerDB::new(&log) } #[test] fn test_peer_connected_successfully() { let mut pdb = get_db(); let random_peer = PeerId::random(); let (n_in, n_out) = (10, 20); for _ in 0..n_in { pdb.connect_ingoing(&random_peer); } for _ in 0..n_out { pdb.connect_outgoing(&random_peer); } // the peer is known let peer_info = pdb.peer_info(&random_peer); assert!(peer_info.is_some()); // this is the only peer assert_eq!(pdb.peers().count(), 1); // the peer has the default reputation assert_eq!(pdb.reputation(&random_peer), DEFAULT_REPUTATION); // it should be connected, and therefore not counted as disconnected assert_eq!(pdb.n_dc, 0); assert!(peer_info.unwrap().connection_status.is_connected()); assert_eq!( peer_info.unwrap().connection_status.connections(), (n_in, n_out) ); } #[test] fn test_set_reputation() { let mut pdb = get_db(); let random_peer = PeerId::random(); pdb.connect_ingoing(&random_peer); let mut rep = Rep::min_value(); pdb.set_reputation(&random_peer, rep); assert_eq!(pdb.reputation(&random_peer), rep); rep = Rep::max_value(); pdb.set_reputation(&random_peer, rep); assert_eq!(pdb.reputation(&random_peer), rep); rep = Rep::max_value() / 100; pdb.set_reputation(&random_peer, rep); assert_eq!(pdb.reputation(&random_peer), rep); } #[test] fn test_reputation_change() { let mut pdb = get_db(); // 0 change does not change de reputation let random_peer = PeerId::random(); let change = RepChange::good(0); pdb.connect_ingoing(&random_peer); pdb.add_reputation(&random_peer, change); assert_eq!(pdb.reputation(&random_peer), DEFAULT_REPUTATION); // overflowing change is capped let random_peer = PeerId::random(); let change = RepChange::worst(); pdb.connect_ingoing(&random_peer); pdb.add_reputation(&random_peer, change); assert_eq!(pdb.reputation(&random_peer), Rep::min_value()); let random_peer = PeerId::random(); let change = RepChange::good(Rep::max_value()); pdb.connect_ingoing(&random_peer); pdb.add_reputation(&random_peer, change); assert_eq!(pdb.reputation(&random_peer), Rep::max_value()); } #[test] fn test_disconnected_are_bounded() { let mut pdb = get_db(); for _ in 0..MAX_DC_PEERS + 1 { let p = PeerId::random(); pdb.connect_ingoing(&p); } assert_eq!(pdb.n_dc, 0); for p in pdb.connected_peer_ids().cloned().collect::<Vec<_>>() { pdb.disconnect(&p); } assert_eq!(pdb.n_dc, MAX_DC_PEERS); } #[test] fn test_best_peers() { let mut pdb = get_db(); let p0 = PeerId::random(); let p1 = PeerId::random(); let p2 = PeerId::random(); pdb.connect_ingoing(&p0); pdb.connect_ingoing(&p1); pdb.connect_ingoing(&p2); pdb.set_reputation(&p0, 70); pdb.set_reputation(&p1, 100); pdb.set_reputation(&p2, 50); let best_peers = pdb.best_peers_by_status(PeerConnectionStatus::is_connected); assert!(vec![&p1, &p0, &p2] .into_iter() .eq(best_peers.into_iter().map(|p| p.0))); } #[test] fn test_the_best_peer() { let mut pdb = get_db(); let p0 = PeerId::random(); let p1 = PeerId::random(); let p2 = PeerId::random(); pdb.connect_ingoing(&p0); pdb.connect_ingoing(&p1); pdb.connect_ingoing(&p2); pdb.set_reputation(&p0, 70); pdb.set_reputation(&p1, 100); pdb.set_reputation(&p2, 50); let the_best = pdb.best_by_status(PeerConnectionStatus::is_connected); assert!(the_best.is_some()); // Consistency check let best_peers = pdb.best_peers_by_status(PeerConnectionStatus::is_connected); assert_eq!(the_best, best_peers.into_iter().map(|p| p.0).next()); } #[test] fn test_disconnected_consistency() { let mut pdb = get_db(); let random_peer = PeerId::random(); pdb.connect_ingoing(&random_peer); assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); pdb.connect_ingoing(&random_peer); assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); pdb.disconnect(&random_peer); assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); pdb.connect_outgoing(&random_peer); assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); pdb.disconnect(&random_peer); assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); pdb.ban(&random_peer); assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); pdb.disconnect(&random_peer); assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); pdb.disconnect(&random_peer); assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); pdb.disconnect(&random_peer); assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); } }
34.366483
112
0.584213
ab916a90ea8f08910f6844f07162f08b68162bf7
1,656
#[rustversion::stable] const COMPILER: &str = "stable"; #[rustversion::beta] const COMPILER: &str = "beta"; #[rustversion::nightly] const COMPILER: &str = "nightly"; #[test] fn tests() { let t = trybuild::TestCases::new(); t.pass("tests/stub/original-trait-maintained.rs"); t.pass("tests/stub/stub-trait-is-generated.rs"); t.pass("tests/stub/custom-prefix-accepted.rs"); t.pass("tests/stub/defaults-impls-are-copied.rs"); t.pass("tests/stub/stub-impls-satisfy-bounds.rs"); t.pass("tests/stub/visibility-is-correct.rs"); t.pass("tests/validate_bindings/valid-bindings-are-accepted.rs"); t.pass("tests/validate_bindings/valid-template-bindings-are-accepted.rs"); t.pass("tests/validate_bindings/templates-work-with-raw-bindings.rs"); t.compile_fail("tests/stub/meta-args-reject-correctly.rs"); t.compile_fail("tests/stub/bounds-are-maintained.rs"); t.compile_fail("tests/stub/test-only-attr-works.rs"); t.compile_fail("tests/validate_bindings/invalid-keys-are-rejected.rs"); t.compile_fail("tests/validate_bindings/invalid-keys-with-modifiers-are-rejected.rs"); t.compile_fail("tests/validate_bindings/invalid-modifiers-are-rejected.rs"); t.compile_fail("tests/validate_bindings/keys-cannot-be-used-as-modifiers.rs"); t.compile_fail("tests/validate_bindings/invalid-templates-are-rejected.rs"); t.compile_fail("tests/validate_bindings/repeated-bindings-are-rejected.rs"); t.compile_fail("tests/validate_bindings/bindings-clashing-with-templates-are-rejected.rs"); if COMPILER == "stable" { t.compile_fail("tests/stub/visibility-is-correct-failure.rs"); } }
43.578947
95
0.727053
ccd30a82f9e5ad9fa69e9480a2f47e31274015d1
2,446
/** * [17] Letter Combinations of a Phone Number * * Given a string containing digits from 2-9 inclusive, return all possible letter combinations that the number could represent. * * A mapping of digit to letters (just like on the telephone buttons) is given below. Note that 1 does not map to any letters. * * <img src="http://upload.wikimedia.org/wikipedia/commons/thumb/7/73/Telephone-keypad2.svg/200px-Telephone-keypad2.svg.png" /> * * Example: * * * Input: "23" * Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]. * * * Note: * * Although the above answer is in lexicographical order, your answer could be in any order you want. * */ pub struct Solution {} // submission codes start here impl Solution { pub fn letter_combinations(digits: String) -> Vec<String> { // '0' and '1' as placeholder to avoid index shifting let table: Vec<(char, Vec<char>)> = vec![ ('0', vec![]), ('1', vec![]), ('2', vec!['a', 'b', 'c']), ('3', vec!['d', 'e', 'f']), ('4', vec!['g', 'h', 'i']), ('5', vec!['j', 'k', 'l']), ('6', vec!['m', 'n', 'o']), ('7', vec!['p', 'q', 'r', 's']), ('8', vec!['t', 'u', 'v']), ('9', vec!['w', 'x', 'y', 'z']), ]; if digits.len() < 1 { return vec![]; } let mut combs: Vec<String> = vec![String::with_capacity(digits.len())]; for ch in digits.chars().into_iter() { let chs = &table[ch.to_digit(10).unwrap() as usize].1; let mut added: Vec<String> = Vec::with_capacity((chs.len() - 1) * combs.len()); for comb in combs.iter_mut() { for (i, &alphabetic) in chs.iter().enumerate() { if i == chs.len() - 1 { comb.push(alphabetic); } else { let mut new_comb = (*comb).clone(); new_comb.push(alphabetic); added.push(new_comb); } } } combs.append(&mut added); } combs } } // submission codes end #[cfg(test)] mod tests { use super::*; #[test] fn test_17() { assert_eq!( Solution::letter_combinations("23".to_string()), ["cf", "af", "bf", "cd", "ce", "ad", "ae", "bd", "be"] ); } }
30.962025
128
0.474244
89571b9c9135bcafc94113a9ca000e62af4ef55d
6,464
#![allow(clippy::integer_arithmetic)] #![feature(test)] extern crate test; use { rand::seq::SliceRandom, raptorq::{Decoder, Encoder}, solana_entry::entry::{create_ticks, Entry}, solana_ledger::shred::{ max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats, Shred, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK, SIZE_OF_DATA_SHRED_PAYLOAD, }, solana_perf::test_tx, solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, signature::Keypair}, test::Bencher, }; // Copied these values here to avoid exposing shreds // internals only for the sake of benchmarks. // size of nonce: 4 // size of common shred header: 83 // size of coding shred header: 6 const VALID_SHRED_DATA_LEN: usize = PACKET_DATA_SIZE - 4 - 83 - 6; fn make_test_entry(txs_per_entry: u64) -> Entry { Entry { num_hashes: 100_000, hash: Hash::default(), transactions: vec![test_tx::test_tx().into(); txs_per_entry as usize], } } fn make_large_unchained_entries(txs_per_entry: u64, num_entries: u64) -> Vec<Entry> { (0..num_entries) .map(|_| make_test_entry(txs_per_entry)) .collect() } fn make_shreds(num_shreds: usize) -> Vec<Shred> { let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD; let txs_per_entry = 128; let num_entries = max_entries_per_n_shred( &make_test_entry(txs_per_entry), 2 * num_shreds as u64, Some(shred_size), ); let entries = make_large_unchained_entries(txs_per_entry, num_entries); let shredder = Shredder::new(1, 0, 0, 0).unwrap(); let data_shreds = shredder.entries_to_data_shreds( &Keypair::new(), &entries, true, // is_last_in_slot 0, // next_shred_index 0, // fec_set_offset &mut ProcessShredsStats::default(), ); assert!(data_shreds.len() >= num_shreds); data_shreds } fn make_concatenated_shreds(num_shreds: usize) -> Vec<u8> { let data_shreds = make_shreds(num_shreds); let mut data: Vec<u8> = vec![0; num_shreds * VALID_SHRED_DATA_LEN]; for (i, shred) in (data_shreds[0..num_shreds]).iter().enumerate() { data[i * VALID_SHRED_DATA_LEN..(i + 1) * VALID_SHRED_DATA_LEN] .copy_from_slice(&shred.payload()[..VALID_SHRED_DATA_LEN]); } data } #[bench] fn bench_shredder_ticks(bencher: &mut Bencher) { let kp = Keypair::new(); let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD; let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size; // ~1Mb let num_ticks = max_ticks_per_n_shreds(1, Some(SIZE_OF_DATA_SHRED_PAYLOAD)) * num_shreds as u64; let entries = create_ticks(num_ticks, 0, Hash::default()); bencher.iter(|| { let shredder = Shredder::new(1, 0, 0, 0).unwrap(); shredder.entries_to_shreds(&kp, &entries, true, 0, 0); }) } #[bench] fn bench_shredder_large_entries(bencher: &mut Bencher) { let kp = Keypair::new(); let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD; let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size; let txs_per_entry = 128; let num_entries = max_entries_per_n_shred( &make_test_entry(txs_per_entry), num_shreds as u64, Some(shred_size), ); let entries = make_large_unchained_entries(txs_per_entry, num_entries); // 1Mb bencher.iter(|| { let shredder = Shredder::new(1, 0, 0, 0).unwrap(); shredder.entries_to_shreds(&kp, &entries, true, 0, 0); }) } #[bench] fn bench_deshredder(bencher: &mut Bencher) { let kp = Keypair::new(); let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD; // ~10Mb let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size; let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64; let entries = create_ticks(num_ticks, 0, Hash::default()); let shredder = Shredder::new(1, 0, 0, 0).unwrap(); let (data_shreds, _) = shredder.entries_to_shreds(&kp, &entries, true, 0, 0); bencher.iter(|| { let raw = &mut Shredder::deshred(&data_shreds).unwrap(); assert_ne!(raw.len(), 0); }) } #[bench] fn bench_deserialize_hdr(bencher: &mut Bencher) { let data = vec![0; SIZE_OF_DATA_SHRED_PAYLOAD]; let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true, 0, 0, 1); bencher.iter(|| { let payload = shred.payload().clone(); let _ = Shred::new_from_serialized_shred(payload).unwrap(); }) } #[bench] fn bench_shredder_coding(bencher: &mut Bencher) { let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize; let data_shreds = make_shreds(symbol_count); bencher.iter(|| { Shredder::generate_coding_shreds( &data_shreds[..symbol_count], true, // is_last_in_slot 0, // next_code_index ) .len(); }) } #[bench] fn bench_shredder_decoding(bencher: &mut Bencher) { let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize; let data_shreds = make_shreds(symbol_count); let coding_shreds = Shredder::generate_coding_shreds( &data_shreds[..symbol_count], true, // is_last_in_slot 0, // next_code_index ); bencher.iter(|| { Shredder::try_recovery(coding_shreds[..].to_vec()).unwrap(); }) } #[bench] fn bench_shredder_coding_raptorq(bencher: &mut Bencher) { let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK; let data = make_concatenated_shreds(symbol_count as usize); bencher.iter(|| { let encoder = Encoder::with_defaults(&data, VALID_SHRED_DATA_LEN as u16); encoder.get_encoded_packets(symbol_count); }) } #[bench] fn bench_shredder_decoding_raptorq(bencher: &mut Bencher) { let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK; let data = make_concatenated_shreds(symbol_count as usize); let encoder = Encoder::with_defaults(&data, VALID_SHRED_DATA_LEN as u16); let mut packets = encoder.get_encoded_packets(symbol_count as u32); packets.shuffle(&mut rand::thread_rng()); // Here we simulate losing 1 less than 50% of the packets randomly packets.truncate(packets.len() - packets.len() / 2 + 1); bencher.iter(|| { let mut decoder = Decoder::new(encoder.get_config()); let mut result = None; for packet in &packets { result = decoder.decode(packet.clone()); if result != None { break; } } assert_eq!(result.unwrap(), data); }) }
33.148718
100
0.655786
01a1e3d48f23e39978fb5df1b50e582cae54bbf2
604
use thiserror::Error; use solana_program::program_error::ProgramError; #[derive(Error, Debug, Copy, Clone)] pub enum EscrowError { /// Invalid instruction #[error("Invalid Instruction")] InvalidInstruction, /// Not Rent Exempt #[error("Not Rent Exempt")] NotRentExempt, /// Expected Amount Mismatch #[error("Expected Amount Mismatch")] ExpectedAmountMismatch, /// Amount Overflow #[error("Amount Overflow")] AmountOverflow, } impl From<EscrowError> for ProgramError { fn from(e: EscrowError) -> Self { ProgramError::Custom(e as u32) } }
23.230769
48
0.667219
1449366039da267f642f23a13677b9b71e842985
738
use crate::outcome::Outcome; use crate::state::CoreState; use async_trait::*; use kg_tree::serial::to_tree; use op_engine::operation::OperationResult; use op_engine::{EngineRef, OperationImpl, OperationRef}; use std::ops::Deref; pub struct ConfigGetOperation {} impl ConfigGetOperation { pub fn new() -> Self { ConfigGetOperation {} } } #[async_trait] impl OperationImpl<Outcome> for ConfigGetOperation { async fn done( &mut self, engine: &EngineRef<Outcome>, _operation: &OperationRef<Outcome>, ) -> OperationResult<Outcome> { let state = engine.state::<CoreState>().unwrap(); let cfg = to_tree(state.config().deref())?; Ok(Outcome::NodeSet(cfg.into())) } }
25.448276
57
0.662602
f7cd29f2e67ef591e02af4607b812c2961cf5feb
7,222
//! This module contains the fcuntaiontliy to convert from the wacky tcx data //! structures into the HAIR. The `builder` is generally ignorant of the tcx, //! etc., and instead goes through the `Cx` for most of its work. use crate::hair::*; use crate::hair::util::UserAnnotatedTyHelpers; use rustc_data_structures::indexed_vec::Idx; use rustc::hir::def_id::DefId; use rustc::hir::Node; use rustc::middle::region; use rustc::infer::InferCtxt; use rustc::ty::subst::Subst; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::subst::{GenericArg, InternalSubsts}; use rustc::ty::layout::VariantIdx; use syntax::ast; use syntax::attr; use syntax::symbol::{Symbol, sym}; use rustc::hir; use crate::hair::constant::{lit_to_const, LitToConstError}; #[derive(Clone)] pub struct Cx<'a, 'tcx> { tcx: TyCtxt<'tcx>, infcx: &'a InferCtxt<'a, 'tcx>, pub root_lint_level: hir::HirId, pub param_env: ty::ParamEnv<'tcx>, /// Identity `InternalSubsts` for use with const-evaluation. pub identity_substs: &'tcx InternalSubsts<'tcx>, pub region_scope_tree: &'tcx region::ScopeTree, pub tables: &'a ty::TypeckTables<'tcx>, /// This is `Constness::Const` if we are compiling a `static`, /// `const`, or the body of a `const fn`. constness: hir::Constness, /// The `DefId` of the owner of this body. body_owner: DefId, /// What kind of body is being compiled. pub body_owner_kind: hir::BodyOwnerKind, /// Whether this constant/function needs overflow checks. check_overflow: bool, /// See field with the same name on `mir::Body`. control_flow_destroyed: Vec<(Span, String)>, } impl<'a, 'tcx> Cx<'a, 'tcx> { pub fn new(infcx: &'a InferCtxt<'a, 'tcx>, src_id: hir::HirId) -> Cx<'a, 'tcx> { let tcx = infcx.tcx; let src_def_id = tcx.hir().local_def_id(src_id); let tables = tcx.typeck_tables_of(src_def_id); let body_owner_kind = tcx.hir().body_owner_kind(src_id); let constness = match body_owner_kind { hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_) => hir::Constness::Const, hir::BodyOwnerKind::Closure | hir::BodyOwnerKind::Fn => hir::Constness::NotConst, }; let attrs = tcx.hir().attrs(src_id); // Some functions always have overflow checks enabled, // however, they may not get codegen'd, depending on // the settings for the crate they are codegened in. let mut check_overflow = attr::contains_name(attrs, sym::rustc_inherit_overflow_checks); // Respect -C overflow-checks. check_overflow |= tcx.sess.overflow_checks(); // Constants always need overflow checks. check_overflow |= constness == hir::Constness::Const; Cx { tcx, infcx, root_lint_level: src_id, param_env: tcx.param_env(src_def_id), identity_substs: InternalSubsts::identity_for_item(tcx.global_tcx(), src_def_id), region_scope_tree: tcx.region_scope_tree(src_def_id), tables, constness, body_owner: src_def_id, body_owner_kind, check_overflow, control_flow_destroyed: Vec::new(), } } pub fn control_flow_destroyed(self) -> Vec<(Span, String)> { self.control_flow_destroyed } } impl<'a, 'tcx> Cx<'a, 'tcx> { /// Normalizes `ast` into the appropriate "mirror" type. pub fn mirror<M: Mirror<'tcx>>(&mut self, ast: M) -> M::Output { ast.make_mirror(self) } pub fn usize_ty(&mut self) -> Ty<'tcx> { self.tcx.types.usize } pub fn usize_literal(&mut self, value: u64) -> &'tcx ty::Const<'tcx> { ty::Const::from_usize(self.tcx, value) } pub fn bool_ty(&mut self) -> Ty<'tcx> { self.tcx.types.bool } pub fn unit_ty(&mut self) -> Ty<'tcx> { self.tcx.mk_unit() } pub fn true_literal(&mut self) -> &'tcx ty::Const<'tcx> { ty::Const::from_bool(self.tcx, true) } pub fn false_literal(&mut self) -> &'tcx ty::Const<'tcx> { ty::Const::from_bool(self.tcx, false) } pub fn const_eval_literal( &mut self, lit: &'tcx ast::LitKind, ty: Ty<'tcx>, sp: Span, neg: bool, ) -> &'tcx ty::Const<'tcx> { trace!("const_eval_literal: {:#?}, {:?}, {:?}, {:?}", lit, ty, sp, neg); match lit_to_const(lit, self.tcx, ty, neg) { Ok(c) => c, Err(LitToConstError::UnparseableFloat) => { // FIXME(#31407) this is only necessary because float parsing is buggy self.tcx.sess.span_err(sp, "could not evaluate float literal (see issue #31407)"); // create a dummy value and continue compiling Const::from_bits(self.tcx, 0, self.param_env.and(ty)) }, Err(LitToConstError::Reported) => { // create a dummy value and continue compiling Const::from_bits(self.tcx, 0, self.param_env.and(ty)) } } } pub fn pattern_from_hir(&mut self, p: &hir::Pat) -> Pat<'tcx> { let tcx = self.tcx.global_tcx(); let p = match tcx.hir().get(p.hir_id) { Node::Pat(p) | Node::Binding(p) => p, node => bug!("pattern became {:?}", node) }; Pat::from_hir(tcx, self.param_env.and(self.identity_substs), self.tables(), p) } pub fn trait_method(&mut self, trait_def_id: DefId, method_name: Symbol, self_ty: Ty<'tcx>, params: &[GenericArg<'tcx>]) -> &'tcx ty::Const<'tcx> { let substs = self.tcx.mk_substs_trait(self_ty, params); for item in self.tcx.associated_items(trait_def_id) { if item.kind == ty::AssocKind::Method && item.ident.name == method_name { let method_ty = self.tcx.type_of(item.def_id); let method_ty = method_ty.subst(self.tcx, substs); return ty::Const::zero_sized(self.tcx, method_ty); } } bug!("found no method `{}` in `{:?}`", method_name, trait_def_id); } pub fn all_fields(&mut self, adt_def: &ty::AdtDef, variant_index: VariantIdx) -> Vec<Field> { (0..adt_def.variants[variant_index].fields.len()) .map(Field::new) .collect() } pub fn needs_drop(&mut self, ty: Ty<'tcx>) -> bool { ty.needs_drop(self.tcx.global_tcx(), self.param_env) } pub fn tcx(&self) -> TyCtxt<'tcx> { self.tcx } pub fn tables(&self) -> &'a ty::TypeckTables<'tcx> { self.tables } pub fn check_overflow(&self) -> bool { self.check_overflow } pub fn type_is_copy_modulo_regions(&self, ty: Ty<'tcx>, span: Span) -> bool { self.infcx.type_is_copy_modulo_regions(self.param_env, ty, span) } } impl UserAnnotatedTyHelpers<'tcx> for Cx<'_, 'tcx> { fn tcx(&self) -> TyCtxt<'tcx> { self.tcx() } fn tables(&self) -> &ty::TypeckTables<'tcx> { self.tables() } } mod block; mod expr; mod to_ref;
32.38565
98
0.585433
2912e5a7981b236137ff6fbfc9bb12d91ce8febb
5,237
// Copyright 2021 Contributors to the Parsec project. // SPDX-License-Identifier: Apache-2.0 //! Slot and token management functions use crate::get_pkcs11; use crate::types::function::Rv; use crate::types::mechanism::{MechanismInfo, MechanismType}; use crate::types::slot_token::{Slot, TokenInfo}; use crate::Pkcs11; use crate::Result; use crate::Session; use cryptoki_sys::{CK_MECHANISM_INFO, CK_TOKEN_INFO}; use secrecy::{ExposeSecret, Secret}; use std::convert::TryInto; use std::ffi::CString; impl Pkcs11 { /// Get all slots available with a token pub fn get_slots_with_token(&self) -> Result<Vec<Slot>> { let mut slot_count = 0; unsafe { Rv::from(get_pkcs11!(self, C_GetSlotList)( cryptoki_sys::CK_TRUE, std::ptr::null_mut(), &mut slot_count, )) .into_result()?; } let mut slots = vec![0; slot_count.try_into()?]; unsafe { Rv::from(get_pkcs11!(self, C_GetSlotList)( cryptoki_sys::CK_TRUE, slots.as_mut_ptr(), &mut slot_count, )) .into_result()?; } let mut slots: Vec<Slot> = slots.into_iter().map(Slot::new).collect(); // This should always truncate slots. slots.resize(slot_count.try_into()?, Slot::new(0)); Ok(slots) } /// Get all slots pub fn get_all_slots(&self) -> Result<Vec<Slot>> { let mut slot_count = 0; unsafe { Rv::from(get_pkcs11!(self, C_GetSlotList)( cryptoki_sys::CK_FALSE, std::ptr::null_mut(), &mut slot_count, )) .into_result()?; } let mut slots = vec![0; slot_count.try_into()?]; unsafe { Rv::from(get_pkcs11!(self, C_GetSlotList)( cryptoki_sys::CK_FALSE, slots.as_mut_ptr(), &mut slot_count, )) .into_result()?; } let mut slots: Vec<Slot> = slots.into_iter().map(Slot::new).collect(); // This should always truncate slots. slots.resize(slot_count.try_into()?, Slot::new(0)); Ok(slots) } /// Initialize a token /// /// Currently will use an empty label for all tokens. pub fn init_token(&self, slot: Slot, pin: &str) -> Result<()> { let pin = Secret::new(CString::new(pin)?.into_bytes()); // FIXME: make a good conversion to the label format let label = [b' '; 32]; unsafe { Rv::from(get_pkcs11!(self, C_InitToken)( slot.into(), pin.expose_secret().as_ptr() as *mut u8, pin.expose_secret().len().try_into()?, label.as_ptr() as *mut u8, )) .into_result() } } /// Returns information about a specific token pub fn get_token_info(&self, slot: Slot) -> Result<TokenInfo> { unsafe { let mut token_info = CK_TOKEN_INFO::default(); Rv::from(get_pkcs11!(self, C_GetTokenInfo)( slot.into(), &mut token_info, )) .into_result()?; Ok(TokenInfo::new(token_info)) } } /// Get all mechanisms support by a slot pub fn get_mechanism_list(&self, slot: Slot) -> Result<Vec<MechanismType>> { let mut mechanism_count = 0; unsafe { Rv::from(get_pkcs11!(self, C_GetMechanismList)( slot.into(), std::ptr::null_mut(), &mut mechanism_count, )) .into_result()?; } let mut mechanisms = vec![0; mechanism_count.try_into()?]; unsafe { Rv::from(get_pkcs11!(self, C_GetMechanismList)( slot.into(), mechanisms.as_mut_ptr(), &mut mechanism_count, )) .into_result()?; } // Truncate mechanisms if count decreased. mechanisms.truncate(mechanism_count.try_into()?); Ok(mechanisms .into_iter() .filter_map(|type_| type_.try_into().ok()) .collect()) } /// Get detailed information about a mechanism for a slot pub fn get_mechanism_info(&self, slot: Slot, type_: MechanismType) -> Result<MechanismInfo> { unsafe { let mut mechanism_info = CK_MECHANISM_INFO::default(); Rv::from(get_pkcs11!(self, C_GetMechanismInfo)( slot.into(), type_.into(), &mut mechanism_info, )) .into_result()?; Ok(MechanismInfo::new(mechanism_info)) } } } impl<'a> Session<'a> { /// Initialize the normal user's pin for a token pub fn init_pin(&self, pin: &str) -> Result<()> { let pin = Secret::new(CString::new(pin)?.into_bytes()); unsafe { Rv::from(get_pkcs11!(self.client(), C_InitPIN)( self.handle(), pin.expose_secret().as_ptr() as *mut u8, pin.expose_secret().len().try_into()?, )) .into_result() } } }
29.925714
97
0.528165
e6c1c9b0e6a6362d039f769751780b62e0050581
1,698
use std::cell::RefCell; use std::pin::Pin; use std::marker::Unpin; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, Stdin, BufReader, Lines, AsyncBufReadExt}; use crate::collections::Single; use crate::hide::{Hide, Delta}; use crate::tag::{SINGLE}; use crate::lattice::set_union::{SetUnionRepr}; use crate::metadata::Order; use super::*; pub struct ReadOp<R: AsyncRead + Unpin> { reader: RefCell<Lines<BufReader<R>>>, } impl ReadOp<Stdin> { pub fn new_stdin() -> Self { Self { reader: RefCell::new(BufReader::new(tokio::io::stdin()).lines()), } } } impl<R: AsyncRead + Unpin> ReadOp<R> { pub fn new(read: R) -> Self { Self { reader: RefCell::new(BufReader::new(read).lines()), } } pub fn from_buf(buf_read: BufReader<R>) -> Self { Self { reader: RefCell::new(buf_read.lines()), } } } impl<R: AsyncRead + Unpin> Op for ReadOp<R> { type LatRepr = SetUnionRepr<SINGLE, String>; fn propegate_saturation(&self) { unimplemented!("TODO?"); } } impl<R: AsyncRead + Unpin> OpDelta for ReadOp<R> { type Ord = UserInputOrder; fn poll_delta(&self, ctx: &mut Context<'_>) -> Poll<Option<Hide<Delta, Self::LatRepr>>> { loop { match Pin::new(&mut *self.reader.borrow_mut()).as_mut().poll_next_line(ctx) { Poll::Pending => return Poll::Pending, Poll::Ready(Result::Ok(opt)) => return Poll::Ready(opt.map(|x| Hide::new(Single(x)))), Poll::Ready(Result::Err(err)) => println!("ERROR: {}", err), } } } } pub struct UserInputOrder; impl Order for UserInputOrder {}
26.123077
102
0.586572
e5f091d873df9c1384207472e4f6061713bf5152
631
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. fn main() { let z = match 3 { x(1) => x(1) //~ ERROR unresolved enum variant //~^ ERROR unresolved name `x` }; assert!(z == 3); }
33.210526
69
0.675119
75962ddb525a2bbd1f5b547cac39c25da2629e26
208
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. pub mod build; pub mod capability_routing;
29.714286
73
0.764423
dd92fdd546643e2fbb7e7d50e2fbc937d358cf81
5,523
#![allow(non_snake_case, non_upper_case_globals)] #![allow(non_camel_case_types)] //! Nested Vectored Interrupt Controller //! //! Used by: stm32mp153, stm32mp157 #[cfg(not(feature = "nosync"))] pub use crate::stm32mp::peripherals::nvic::Instance; pub use crate::stm32mp::peripherals::nvic::{RegisterBlock, ResetValues}; pub use crate::stm32mp::peripherals::nvic::{ IABR0, IABR1, IABR2, IABR3, IABR4, ICER0, ICER1, ICER2, ICER3, ICER4, ICPR0, ICPR1, ICPR2, ICPR3, ICPR4, IPR0, IPR1, IPR10, IPR11, IPR12, IPR13, IPR14, IPR15, IPR16, IPR17, IPR18, IPR19, IPR2, IPR20, IPR21, IPR22, IPR23, IPR24, IPR25, IPR26, IPR27, IPR28, IPR29, IPR3, IPR30, IPR31, IPR32, IPR33, IPR34, IPR35, IPR36, IPR37, IPR38, IPR4, IPR5, IPR6, IPR7, IPR8, IPR9, ISER0, ISER1, ISER2, ISER3, ISER4, ISPR0, ISPR1, ISPR2, ISPR3, ISPR4, }; /// Access functions for the NVIC peripheral instance pub mod NVIC { use super::ResetValues; #[cfg(not(feature = "nosync"))] use super::Instance; #[cfg(not(feature = "nosync"))] const INSTANCE: Instance = Instance { addr: 0xe000e100, _marker: ::core::marker::PhantomData, }; /// Reset values for each field in NVIC pub const reset: ResetValues = ResetValues { ISER0: 0x00000000, ISER1: 0x00000000, ISER2: 0x00000000, ISER3: 0x00000000, ICER0: 0x00000000, ICER1: 0x00000000, ICER2: 0x00000000, ICER3: 0x00000000, ISPR0: 0x00000000, ISPR1: 0x00000000, ISPR2: 0x00000000, ISPR3: 0x00000000, ICPR0: 0x00000000, ICPR1: 0x00000000, ICPR2: 0x00000000, ICPR3: 0x00000000, IABR0: 0x00000000, IABR1: 0x00000000, IABR2: 0x00000000, IABR3: 0x00000000, IPR0: 0x00000000, IPR1: 0x00000000, IPR2: 0x00000000, IPR3: 0x00000000, IPR4: 0x00000000, IPR5: 0x00000000, IPR6: 0x00000000, IPR7: 0x00000000, IPR8: 0x00000000, IPR9: 0x00000000, IPR10: 0x00000000, IPR11: 0x00000000, IPR12: 0x00000000, IPR13: 0x00000000, IPR14: 0x00000000, IPR15: 0x00000000, IPR16: 0x00000000, IPR17: 0x00000000, IPR18: 0x00000000, IPR19: 0x00000000, IPR20: 0x00000000, IPR21: 0x00000000, IPR22: 0x00000000, IPR23: 0x00000000, IPR24: 0x00000000, IPR25: 0x00000000, IPR26: 0x00000000, IPR27: 0x00000000, IPR28: 0x00000000, IPR29: 0x00000000, IPR30: 0x00000000, IPR31: 0x00000000, IPR32: 0x00000000, IPR33: 0x00000000, IPR34: 0x00000000, IPR35: 0x00000000, IPR36: 0x00000000, IPR37: 0x00000000, IPR38: 0x00000000, ISER4: 0x00000000, ICER4: 0x00000000, ISPR4: 0x00000000, ICPR4: 0x00000000, IABR4: 0x00000000, }; #[cfg(not(feature = "nosync"))] #[allow(renamed_and_removed_lints)] #[allow(private_no_mangle_statics)] #[no_mangle] static mut NVIC_TAKEN: bool = false; /// Safe access to NVIC /// /// This function returns `Some(Instance)` if this instance is not /// currently taken, and `None` if it is. This ensures that if you /// do get `Some(Instance)`, you are ensured unique access to /// the peripheral and there cannot be data races (unless other /// code uses `unsafe`, of course). You can then pass the /// `Instance` around to other functions as required. When you're /// done with it, you can call `release(instance)` to return it. /// /// `Instance` itself dereferences to a `RegisterBlock`, which /// provides access to the peripheral's registers. #[cfg(not(feature = "nosync"))] #[inline] pub fn take() -> Option<Instance> { external_cortex_m::interrupt::free(|_| unsafe { if NVIC_TAKEN { None } else { NVIC_TAKEN = true; Some(INSTANCE) } }) } /// Release exclusive access to NVIC /// /// This function allows you to return an `Instance` so that it /// is available to `take()` again. This function will panic if /// you return a different `Instance` or if this instance is not /// already taken. #[cfg(not(feature = "nosync"))] #[inline] pub fn release(inst: Instance) { external_cortex_m::interrupt::free(|_| unsafe { if NVIC_TAKEN && inst.addr == INSTANCE.addr { NVIC_TAKEN = false; } else { panic!("Released a peripheral which was not taken"); } }); } /// Unsafely steal NVIC /// /// This function is similar to take() but forcibly takes the /// Instance, marking it as taken irregardless of its previous /// state. #[cfg(not(feature = "nosync"))] #[inline] pub unsafe fn steal() -> Instance { NVIC_TAKEN = true; INSTANCE } } /// Raw pointer to NVIC /// /// Dereferencing this is unsafe because you are not ensured unique /// access to the peripheral, so you may encounter data races with /// other users of this peripheral. It is up to you to ensure you /// will not cause data races. /// /// This constant is provided for ease of use in unsafe code: you can /// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`. pub const NVIC: *const RegisterBlock = 0xe000e100 as *const _;
32.298246
99
0.605649
dd02fa7ac151c10186bd633726d7dc2567896f8f
577
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. fn fn1(0: Box) {} //~^ ERROR E0243 //~| NOTE expected 1 type arguments, found 0 fn main() {}
36.0625
68
0.701906
610234d45ce675519d2e95029b2ae6cf022626c3
108,537
//! Type context book-keeping. use crate::arena::Arena; use crate::dep_graph::{DepGraph, DepKind, DepKindStruct}; use crate::hir::place::Place as HirPlace; use crate::infer::canonical::{Canonical, CanonicalVarInfo, CanonicalVarInfos}; use crate::lint::{struct_lint_level, LintDiagnosticBuilder, LintLevelSource}; use crate::middle::codegen_fn_attrs::CodegenFnAttrs; use crate::middle::resolve_lifetime; use crate::middle::stability; use crate::mir::interpret::{self, Allocation, ConstAllocation, ConstValue, Scalar}; use crate::mir::{ Body, BorrowCheckResult, Field, Local, Place, PlaceElem, ProjectionKind, Promoted, }; use crate::thir::Thir; use crate::traits; use crate::ty::query::{self, TyCtxtAt}; use crate::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSubsts}; use crate::ty::{ self, AdtDef, AdtDefData, AdtKind, Binder, BindingMode, BoundVar, CanonicalPolyFnSig, ClosureSizeProfileData, Const, ConstS, ConstVid, DefIdTree, ExistentialPredicate, FloatTy, FloatVar, FloatVid, GenericParamDefKind, InferConst, InferTy, IntTy, IntVar, IntVid, List, ParamConst, ParamTy, PolyFnSig, Predicate, PredicateKind, PredicateS, ProjectionTy, Region, RegionKind, ReprOptions, TraitObjectVisitor, Ty, TyKind, TyS, TyVar, TyVid, TypeAndMut, UintTy, }; use rustc_ast as ast; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::intern::{Interned, WithStableHash}; use rustc_data_structures::memmap::Mmap; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::steal::Steal; use rustc_data_structures::sync::{self, Lock, Lrc, WorkerLocal}; use rustc_data_structures::vec_map::VecMap; use rustc_errors::{ErrorGuaranteed, MultiSpan}; use rustc_hir as hir; use rustc_hir::def::{DefKind, Res}; use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LOCAL_CRATE}; use rustc_hir::intravisit::Visitor; use rustc_hir::lang_items::LangItem; use rustc_hir::{ Constness, ExprKind, HirId, ImplItemKind, ItemKind, ItemLocalId, ItemLocalMap, ItemLocalSet, Node, TraitCandidate, TraitItemKind, }; use rustc_index::vec::{Idx, IndexVec}; use rustc_macros::HashStable; use rustc_middle::mir::FakeReadCause; use rustc_query_system::ich::StableHashingContext; use rustc_serialize::opaque::{FileEncodeResult, FileEncoder}; use rustc_session::config::{CrateType, OutputFilenames}; use rustc_session::lint::{Level, Lint}; use rustc_session::Limit; use rustc_session::Session; use rustc_span::def_id::{DefPathHash, StableCrateId}; use rustc_span::source_map::SourceMap; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::{Span, DUMMY_SP}; use rustc_target::abi::{Layout, LayoutS, TargetDataLayout, VariantIdx}; use rustc_target::spec::abi; use rustc_type_ir::sty::TyKind::*; use rustc_type_ir::{InternAs, InternIteratorElement, Interner, TypeFlags}; use std::any::Any; use std::borrow::Borrow; use std::cmp::Ordering; use std::collections::hash_map::{self, Entry}; use std::fmt; use std::hash::{Hash, Hasher}; use std::iter; use std::mem; use std::ops::{Bound, Deref}; use std::sync::Arc; use super::RvalueScopes; pub trait OnDiskCache<'tcx>: rustc_data_structures::sync::Sync { /// Creates a new `OnDiskCache` instance from the serialized data in `data`. fn new(sess: &'tcx Session, data: Mmap, start_pos: usize) -> Self where Self: Sized; fn new_empty(source_map: &'tcx SourceMap) -> Self where Self: Sized; fn drop_serialized_data(&self, tcx: TyCtxt<'tcx>); fn serialize(&self, tcx: TyCtxt<'tcx>, encoder: FileEncoder) -> FileEncodeResult; } #[allow(rustc::usage_of_ty_tykind)] impl<'tcx> Interner for TyCtxt<'tcx> { type AdtDef = ty::AdtDef<'tcx>; type SubstsRef = ty::SubstsRef<'tcx>; type DefId = DefId; type Ty = Ty<'tcx>; type Const = ty::Const<'tcx>; type Region = Region<'tcx>; type TypeAndMut = TypeAndMut<'tcx>; type Mutability = hir::Mutability; type Movability = hir::Movability; type PolyFnSig = PolyFnSig<'tcx>; type ListBinderExistentialPredicate = &'tcx List<Binder<'tcx, ExistentialPredicate<'tcx>>>; type BinderListTy = Binder<'tcx, &'tcx List<Ty<'tcx>>>; type ListTy = &'tcx List<Ty<'tcx>>; type ProjectionTy = ty::ProjectionTy<'tcx>; type ParamTy = ParamTy; type BoundTy = ty::BoundTy; type PlaceholderType = ty::PlaceholderType; type InferTy = InferTy; type DelaySpanBugEmitted = DelaySpanBugEmitted; type PredicateKind = ty::PredicateKind<'tcx>; type AllocId = crate::mir::interpret::AllocId; } /// A type that is not publicly constructable. This prevents people from making [`TyKind::Error`]s /// except through the error-reporting functions on a [`tcx`][TyCtxt]. #[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] #[derive(TyEncodable, TyDecodable, HashStable)] pub struct DelaySpanBugEmitted { pub reported: ErrorGuaranteed, _priv: (), } type InternedSet<'tcx, T> = ShardedHashMap<InternedInSet<'tcx, T>, ()>; pub struct CtxtInterners<'tcx> { /// The arena that types, regions, etc. are allocated from. arena: &'tcx WorkerLocal<Arena<'tcx>>, // Specifically use a speedy hash algorithm for these hash sets, since // they're accessed quite often. type_: InternedSet<'tcx, WithStableHash<TyS<'tcx>>>, substs: InternedSet<'tcx, InternalSubsts<'tcx>>, canonical_var_infos: InternedSet<'tcx, List<CanonicalVarInfo<'tcx>>>, region: InternedSet<'tcx, RegionKind>, poly_existential_predicates: InternedSet<'tcx, List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>>>, predicate: InternedSet<'tcx, PredicateS<'tcx>>, predicates: InternedSet<'tcx, List<Predicate<'tcx>>>, projs: InternedSet<'tcx, List<ProjectionKind>>, place_elems: InternedSet<'tcx, List<PlaceElem<'tcx>>>, const_: InternedSet<'tcx, ConstS<'tcx>>, const_allocation: InternedSet<'tcx, Allocation>, bound_variable_kinds: InternedSet<'tcx, List<ty::BoundVariableKind>>, layout: InternedSet<'tcx, LayoutS<'tcx>>, adt_def: InternedSet<'tcx, AdtDefData>, } impl<'tcx> CtxtInterners<'tcx> { fn new(arena: &'tcx WorkerLocal<Arena<'tcx>>) -> CtxtInterners<'tcx> { CtxtInterners { arena, type_: Default::default(), substs: Default::default(), region: Default::default(), poly_existential_predicates: Default::default(), canonical_var_infos: Default::default(), predicate: Default::default(), predicates: Default::default(), projs: Default::default(), place_elems: Default::default(), const_: Default::default(), const_allocation: Default::default(), bound_variable_kinds: Default::default(), layout: Default::default(), adt_def: Default::default(), } } /// Interns a type. #[allow(rustc::usage_of_ty_tykind)] #[inline(never)] fn intern_ty( &self, kind: TyKind<'tcx>, sess: &Session, resolutions: &ty::ResolverOutputs, ) -> Ty<'tcx> { Ty(Interned::new_unchecked( self.type_ .intern(kind, |kind| { let flags = super::flags::FlagComputation::for_kind(&kind); // It's impossible to hash inference regions (and will ICE), so we don't need to try to cache them. // Without incremental, we rarely stable-hash types, so let's not do it proactively. let stable_hash = if flags.flags.intersects(TypeFlags::HAS_RE_INFER) || sess.opts.incremental.is_none() { Fingerprint::ZERO } else { let mut hasher = StableHasher::new(); let mut hcx = StableHashingContext::ignore_spans( sess, &resolutions.definitions, &*resolutions.cstore, ); kind.hash_stable(&mut hcx, &mut hasher); hasher.finish() }; let ty_struct = TyS { kind, flags: flags.flags, outer_exclusive_binder: flags.outer_exclusive_binder, }; InternedInSet( self.arena.alloc(WithStableHash { internee: ty_struct, stable_hash }), ) }) .0, )) } #[inline(never)] fn intern_predicate(&self, kind: Binder<'tcx, PredicateKind<'tcx>>) -> Predicate<'tcx> { Predicate(Interned::new_unchecked( self.predicate .intern(kind, |kind| { let flags = super::flags::FlagComputation::for_predicate(kind); let predicate_struct = PredicateS { kind, flags: flags.flags, outer_exclusive_binder: flags.outer_exclusive_binder, }; InternedInSet(self.arena.alloc(predicate_struct)) }) .0, )) } } pub struct CommonTypes<'tcx> { pub unit: Ty<'tcx>, pub bool: Ty<'tcx>, pub char: Ty<'tcx>, pub isize: Ty<'tcx>, pub i8: Ty<'tcx>, pub i16: Ty<'tcx>, pub i32: Ty<'tcx>, pub i64: Ty<'tcx>, pub i128: Ty<'tcx>, pub usize: Ty<'tcx>, pub u8: Ty<'tcx>, pub u16: Ty<'tcx>, pub u32: Ty<'tcx>, pub u64: Ty<'tcx>, pub u128: Ty<'tcx>, pub f32: Ty<'tcx>, pub f64: Ty<'tcx>, pub str_: Ty<'tcx>, pub never: Ty<'tcx>, pub self_param: Ty<'tcx>, /// Dummy type used for the `Self` of a `TraitRef` created for converting /// a trait object, and which gets removed in `ExistentialTraitRef`. /// This type must not appear anywhere in other converted types. pub trait_object_dummy_self: Ty<'tcx>, } pub struct CommonLifetimes<'tcx> { /// `ReEmpty` in the root universe. pub re_root_empty: Region<'tcx>, /// `ReStatic` pub re_static: Region<'tcx>, /// Erased region, used outside of type inference. pub re_erased: Region<'tcx>, } pub struct CommonConsts<'tcx> { pub unit: Const<'tcx>, } pub struct LocalTableInContext<'a, V> { hir_owner: LocalDefId, data: &'a ItemLocalMap<V>, } /// Validate that the given HirId (respectively its `local_id` part) can be /// safely used as a key in the maps of a TypeckResults. For that to be /// the case, the HirId must have the same `owner` as all the other IDs in /// this table (signified by `hir_owner`). Otherwise the HirId /// would be in a different frame of reference and using its `local_id` /// would result in lookup errors, or worse, in silently wrong data being /// stored/returned. #[inline] fn validate_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) { if hir_id.owner != hir_owner { invalid_hir_id_for_typeck_results(hir_owner, hir_id); } } #[cold] #[inline(never)] fn invalid_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) { ty::tls::with(|tcx| { bug!( "node {} with HirId::owner {:?} cannot be placed in TypeckResults with hir_owner {:?}", tcx.hir().node_to_string(hir_id), hir_id.owner, hir_owner ) }); } impl<'a, V> LocalTableInContext<'a, V> { pub fn contains_key(&self, id: hir::HirId) -> bool { validate_hir_id_for_typeck_results(self.hir_owner, id); self.data.contains_key(&id.local_id) } pub fn get(&self, id: hir::HirId) -> Option<&V> { validate_hir_id_for_typeck_results(self.hir_owner, id); self.data.get(&id.local_id) } pub fn iter(&self) -> hash_map::Iter<'_, hir::ItemLocalId, V> { self.data.iter() } } impl<'a, V> ::std::ops::Index<hir::HirId> for LocalTableInContext<'a, V> { type Output = V; fn index(&self, key: hir::HirId) -> &V { self.get(key).expect("LocalTableInContext: key not found") } } pub struct LocalTableInContextMut<'a, V> { hir_owner: LocalDefId, data: &'a mut ItemLocalMap<V>, } impl<'a, V> LocalTableInContextMut<'a, V> { pub fn get_mut(&mut self, id: hir::HirId) -> Option<&mut V> { validate_hir_id_for_typeck_results(self.hir_owner, id); self.data.get_mut(&id.local_id) } pub fn entry(&mut self, id: hir::HirId) -> Entry<'_, hir::ItemLocalId, V> { validate_hir_id_for_typeck_results(self.hir_owner, id); self.data.entry(id.local_id) } pub fn insert(&mut self, id: hir::HirId, val: V) -> Option<V> { validate_hir_id_for_typeck_results(self.hir_owner, id); self.data.insert(id.local_id, val) } pub fn remove(&mut self, id: hir::HirId) -> Option<V> { validate_hir_id_for_typeck_results(self.hir_owner, id); self.data.remove(&id.local_id) } } /// Whenever a value may be live across a generator yield, the type of that value winds up in the /// `GeneratorInteriorTypeCause` struct. This struct adds additional information about such /// captured types that can be useful for diagnostics. In particular, it stores the span that /// caused a given type to be recorded, along with the scope that enclosed the value (which can /// be used to find the await that the value is live across). /// /// For example: /// /// ```ignore (pseudo-Rust) /// async move { /// let x: T = expr; /// foo.await /// ... /// } /// ``` /// /// Here, we would store the type `T`, the span of the value `x`, the "scope-span" for /// the scope that contains `x`, the expr `T` evaluated from, and the span of `foo.await`. #[derive(TyEncodable, TyDecodable, Clone, Debug, Eq, Hash, PartialEq, HashStable)] #[derive(TypeFoldable)] pub struct GeneratorInteriorTypeCause<'tcx> { /// Type of the captured binding. pub ty: Ty<'tcx>, /// Span of the binding that was captured. pub span: Span, /// Span of the scope of the captured binding. pub scope_span: Option<Span>, /// Span of `.await` or `yield` expression. pub yield_span: Span, /// Expr which the type evaluated from. pub expr: Option<hir::HirId>, } // This type holds diagnostic information on generators and async functions across crate boundaries // and is used to provide better error messages #[derive(TyEncodable, TyDecodable, Clone, Debug, HashStable)] pub struct GeneratorDiagnosticData<'tcx> { pub generator_interior_types: ty::Binder<'tcx, Vec<GeneratorInteriorTypeCause<'tcx>>>, pub hir_owner: DefId, pub nodes_types: ItemLocalMap<Ty<'tcx>>, pub adjustments: ItemLocalMap<Vec<ty::adjustment::Adjustment<'tcx>>>, } #[derive(TyEncodable, TyDecodable, Debug, HashStable)] pub struct TypeckResults<'tcx> { /// The `HirId::owner` all `ItemLocalId`s in this table are relative to. pub hir_owner: LocalDefId, /// Resolved definitions for `<T>::X` associated paths and /// method calls, including those of overloaded operators. type_dependent_defs: ItemLocalMap<Result<(DefKind, DefId), ErrorGuaranteed>>, /// Resolved field indices for field accesses in expressions (`S { field }`, `obj.field`) /// or patterns (`S { field }`). The index is often useful by itself, but to learn more /// about the field you also need definition of the variant to which the field /// belongs, but it may not exist if it's a tuple field (`tuple.0`). field_indices: ItemLocalMap<usize>, /// Stores the types for various nodes in the AST. Note that this table /// is not guaranteed to be populated outside inference. See /// typeck::check::fn_ctxt for details. node_types: ItemLocalMap<Ty<'tcx>>, /// Stores the type parameters which were substituted to obtain the type /// of this node. This only applies to nodes that refer to entities /// parameterized by type parameters, such as generic fns, types, or /// other items. node_substs: ItemLocalMap<SubstsRef<'tcx>>, /// This will either store the canonicalized types provided by the user /// or the substitutions that the user explicitly gave (if any) attached /// to `id`. These will not include any inferred values. The canonical form /// is used to capture things like `_` or other unspecified values. /// /// For example, if the user wrote `foo.collect::<Vec<_>>()`, then the /// canonical substitutions would include only `for<X> { Vec<X> }`. /// /// See also `AscribeUserType` statement in MIR. user_provided_types: ItemLocalMap<CanonicalUserType<'tcx>>, /// Stores the canonicalized types provided by the user. See also /// `AscribeUserType` statement in MIR. pub user_provided_sigs: DefIdMap<CanonicalPolyFnSig<'tcx>>, adjustments: ItemLocalMap<Vec<ty::adjustment::Adjustment<'tcx>>>, /// Stores the actual binding mode for all instances of hir::BindingAnnotation. pat_binding_modes: ItemLocalMap<BindingMode>, /// Stores the types which were implicitly dereferenced in pattern binding modes /// for later usage in THIR lowering. For example, /// /// ``` /// match &&Some(5i32) { /// Some(n) => {}, /// _ => {}, /// } /// ``` /// leads to a `vec![&&Option<i32>, &Option<i32>]`. Empty vectors are not stored. /// /// See: /// <https://github.com/rust-lang/rfcs/blob/master/text/2005-match-ergonomics.md#definitions> pat_adjustments: ItemLocalMap<Vec<Ty<'tcx>>>, /// Records the reasons that we picked the kind of each closure; /// not all closures are present in the map. closure_kind_origins: ItemLocalMap<(Span, HirPlace<'tcx>)>, /// For each fn, records the "liberated" types of its arguments /// and return type. Liberated means that all bound regions /// (including late-bound regions) are replaced with free /// equivalents. This table is not used in codegen (since regions /// are erased there) and hence is not serialized to metadata. /// /// This table also contains the "revealed" values for any `impl Trait` /// that appear in the signature and whose values are being inferred /// by this function. /// /// # Example /// /// ```rust /// # use std::fmt::Debug; /// fn foo(x: &u32) -> impl Debug { *x } /// ``` /// /// The function signature here would be: /// /// ```ignore (illustrative) /// for<'a> fn(&'a u32) -> Foo /// ``` /// /// where `Foo` is an opaque type created for this function. /// /// /// The *liberated* form of this would be /// /// ```ignore (illustrative) /// fn(&'a u32) -> u32 /// ``` /// /// Note that `'a` is not bound (it would be an `ReFree`) and /// that the `Foo` opaque type is replaced by its hidden type. liberated_fn_sigs: ItemLocalMap<ty::FnSig<'tcx>>, /// For each FRU expression, record the normalized types of the fields /// of the struct - this is needed because it is non-trivial to /// normalize while preserving regions. This table is used only in /// MIR construction and hence is not serialized to metadata. fru_field_types: ItemLocalMap<Vec<Ty<'tcx>>>, /// For every coercion cast we add the HIR node ID of the cast /// expression to this set. coercion_casts: ItemLocalSet, /// Set of trait imports actually used in the method resolution. /// This is used for warning unused imports. During type /// checking, this `Lrc` should not be cloned: it must have a ref-count /// of 1 so that we can insert things into the set mutably. pub used_trait_imports: Lrc<FxHashSet<LocalDefId>>, /// If any errors occurred while type-checking this body, /// this field will be set to `Some(ErrorGuaranteed)`. pub tainted_by_errors: Option<ErrorGuaranteed>, /// All the opaque types that have hidden types set /// by this function. For return-position-impl-trait we also store the /// type here, so that mir-borrowck can figure out hidden types, /// even if they are only set in dead code (which doesn't show up in MIR). /// For type-alias-impl-trait, this map is only used to prevent query cycles, /// so the hidden types are all `None`. pub concrete_opaque_types: VecMap<DefId, Option<Ty<'tcx>>>, /// Tracks the minimum captures required for a closure; /// see `MinCaptureInformationMap` for more details. pub closure_min_captures: ty::MinCaptureInformationMap<'tcx>, /// Tracks the fake reads required for a closure and the reason for the fake read. /// When performing pattern matching for closures, there are times we don't end up /// reading places that are mentioned in a closure (because of _ patterns). However, /// to ensure the places are initialized, we introduce fake reads. /// Consider these two examples: /// ``` (discriminant matching with only wildcard arm) /// let x: u8; /// let c = || match x { _ => () }; /// ``` /// In this example, we don't need to actually read/borrow `x` in `c`, and so we don't /// want to capture it. However, we do still want an error here, because `x` should have /// to be initialized at the point where c is created. Therefore, we add a "fake read" /// instead. /// ``` (destructured assignments) /// let c = || { /// let (t1, t2) = t; /// } /// ``` /// In the second example, we capture the disjoint fields of `t` (`t.0` & `t.1`), but /// we never capture `t`. This becomes an issue when we build MIR as we require /// information on `t` in order to create place `t.0` and `t.1`. We can solve this /// issue by fake reading `t`. pub closure_fake_reads: FxHashMap<DefId, Vec<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>>, /// Tracks the rvalue scoping rules which defines finer scoping for rvalue expressions /// by applying extended parameter rules. /// Details may be find in `rustc_typeck::check::rvalue_scopes`. pub rvalue_scopes: RvalueScopes, /// Stores the type, expression, span and optional scope span of all types /// that are live across the yield of this generator (if a generator). pub generator_interior_types: ty::Binder<'tcx, Vec<GeneratorInteriorTypeCause<'tcx>>>, /// We sometimes treat byte string literals (which are of type `&[u8; N]`) /// as `&[u8]`, depending on the pattern in which they are used. /// This hashset records all instances where we behave /// like this to allow `const_to_pat` to reliably handle this situation. pub treat_byte_string_as_slice: ItemLocalSet, /// Contains the data for evaluating the effect of feature `capture_disjoint_fields` /// on closure size. pub closure_size_eval: FxHashMap<DefId, ClosureSizeProfileData<'tcx>>, } impl<'tcx> TypeckResults<'tcx> { pub fn new(hir_owner: LocalDefId) -> TypeckResults<'tcx> { TypeckResults { hir_owner, type_dependent_defs: Default::default(), field_indices: Default::default(), user_provided_types: Default::default(), user_provided_sigs: Default::default(), node_types: Default::default(), node_substs: Default::default(), adjustments: Default::default(), pat_binding_modes: Default::default(), pat_adjustments: Default::default(), closure_kind_origins: Default::default(), liberated_fn_sigs: Default::default(), fru_field_types: Default::default(), coercion_casts: Default::default(), used_trait_imports: Lrc::new(Default::default()), tainted_by_errors: None, concrete_opaque_types: Default::default(), closure_min_captures: Default::default(), closure_fake_reads: Default::default(), rvalue_scopes: Default::default(), generator_interior_types: ty::Binder::dummy(Default::default()), treat_byte_string_as_slice: Default::default(), closure_size_eval: Default::default(), } } /// Returns the final resolution of a `QPath` in an `Expr` or `Pat` node. pub fn qpath_res(&self, qpath: &hir::QPath<'_>, id: hir::HirId) -> Res { match *qpath { hir::QPath::Resolved(_, ref path) => path.res, hir::QPath::TypeRelative(..) | hir::QPath::LangItem(..) => self .type_dependent_def(id) .map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)), } } pub fn type_dependent_defs( &self, ) -> LocalTableInContext<'_, Result<(DefKind, DefId), ErrorGuaranteed>> { LocalTableInContext { hir_owner: self.hir_owner, data: &self.type_dependent_defs } } pub fn type_dependent_def(&self, id: HirId) -> Option<(DefKind, DefId)> { validate_hir_id_for_typeck_results(self.hir_owner, id); self.type_dependent_defs.get(&id.local_id).cloned().and_then(|r| r.ok()) } pub fn type_dependent_def_id(&self, id: HirId) -> Option<DefId> { self.type_dependent_def(id).map(|(_, def_id)| def_id) } pub fn type_dependent_defs_mut( &mut self, ) -> LocalTableInContextMut<'_, Result<(DefKind, DefId), ErrorGuaranteed>> { LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.type_dependent_defs } } pub fn field_indices(&self) -> LocalTableInContext<'_, usize> { LocalTableInContext { hir_owner: self.hir_owner, data: &self.field_indices } } pub fn field_indices_mut(&mut self) -> LocalTableInContextMut<'_, usize> { LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.field_indices } } pub fn user_provided_types(&self) -> LocalTableInContext<'_, CanonicalUserType<'tcx>> { LocalTableInContext { hir_owner: self.hir_owner, data: &self.user_provided_types } } pub fn user_provided_types_mut( &mut self, ) -> LocalTableInContextMut<'_, CanonicalUserType<'tcx>> { LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.user_provided_types } } pub fn node_types(&self) -> LocalTableInContext<'_, Ty<'tcx>> { LocalTableInContext { hir_owner: self.hir_owner, data: &self.node_types } } pub fn node_types_mut(&mut self) -> LocalTableInContextMut<'_, Ty<'tcx>> { LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.node_types } } pub fn get_generator_diagnostic_data(&self) -> GeneratorDiagnosticData<'tcx> { let generator_interior_type = self.generator_interior_types.map_bound_ref(|vec| { vec.iter() .map(|item| { GeneratorInteriorTypeCause { ty: item.ty, span: item.span, scope_span: item.scope_span, yield_span: item.yield_span, expr: None, //FIXME: Passing expression over crate boundaries is impossible at the moment } }) .collect::<Vec<_>>() }); GeneratorDiagnosticData { generator_interior_types: generator_interior_type, hir_owner: self.hir_owner.to_def_id(), nodes_types: self.node_types.clone(), adjustments: self.adjustments.clone(), } } pub fn node_type(&self, id: hir::HirId) -> Ty<'tcx> { self.node_type_opt(id).unwrap_or_else(|| { bug!("node_type: no type for node `{}`", tls::with(|tcx| tcx.hir().node_to_string(id))) }) } pub fn node_type_opt(&self, id: hir::HirId) -> Option<Ty<'tcx>> { validate_hir_id_for_typeck_results(self.hir_owner, id); self.node_types.get(&id.local_id).cloned() } pub fn node_substs_mut(&mut self) -> LocalTableInContextMut<'_, SubstsRef<'tcx>> { LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.node_substs } } pub fn node_substs(&self, id: hir::HirId) -> SubstsRef<'tcx> { validate_hir_id_for_typeck_results(self.hir_owner, id); self.node_substs.get(&id.local_id).cloned().unwrap_or_else(|| InternalSubsts::empty()) } pub fn node_substs_opt(&self, id: hir::HirId) -> Option<SubstsRef<'tcx>> { validate_hir_id_for_typeck_results(self.hir_owner, id); self.node_substs.get(&id.local_id).cloned() } // Returns the type of a pattern as a monotype. Like @expr_ty, this function // doesn't provide type parameter substitutions. pub fn pat_ty(&self, pat: &hir::Pat<'_>) -> Ty<'tcx> { self.node_type(pat.hir_id) } // Returns the type of an expression as a monotype. // // NB (1): This is the PRE-ADJUSTMENT TYPE for the expression. That is, in // some cases, we insert `Adjustment` annotations such as auto-deref or // auto-ref. The type returned by this function does not consider such // adjustments. See `expr_ty_adjusted()` instead. // // NB (2): This type doesn't provide type parameter substitutions; e.g., if you // ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize" // instead of "fn(ty) -> T with T = isize". pub fn expr_ty(&self, expr: &hir::Expr<'_>) -> Ty<'tcx> { self.node_type(expr.hir_id) } pub fn expr_ty_opt(&self, expr: &hir::Expr<'_>) -> Option<Ty<'tcx>> { self.node_type_opt(expr.hir_id) } pub fn adjustments(&self) -> LocalTableInContext<'_, Vec<ty::adjustment::Adjustment<'tcx>>> { LocalTableInContext { hir_owner: self.hir_owner, data: &self.adjustments } } pub fn adjustments_mut( &mut self, ) -> LocalTableInContextMut<'_, Vec<ty::adjustment::Adjustment<'tcx>>> { LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.adjustments } } pub fn expr_adjustments(&self, expr: &hir::Expr<'_>) -> &[ty::adjustment::Adjustment<'tcx>] { validate_hir_id_for_typeck_results(self.hir_owner, expr.hir_id); self.adjustments.get(&expr.hir_id.local_id).map_or(&[], |a| &a[..]) } /// Returns the type of `expr`, considering any `Adjustment` /// entry recorded for that expression. pub fn expr_ty_adjusted(&self, expr: &hir::Expr<'_>) -> Ty<'tcx> { self.expr_adjustments(expr).last().map_or_else(|| self.expr_ty(expr), |adj| adj.target) } pub fn expr_ty_adjusted_opt(&self, expr: &hir::Expr<'_>) -> Option<Ty<'tcx>> { self.expr_adjustments(expr).last().map(|adj| adj.target).or_else(|| self.expr_ty_opt(expr)) } pub fn is_method_call(&self, expr: &hir::Expr<'_>) -> bool { // Only paths and method calls/overloaded operators have // entries in type_dependent_defs, ignore the former here. if let hir::ExprKind::Path(_) = expr.kind { return false; } matches!(self.type_dependent_defs().get(expr.hir_id), Some(Ok((DefKind::AssocFn, _)))) } pub fn extract_binding_mode(&self, s: &Session, id: HirId, sp: Span) -> Option<BindingMode> { self.pat_binding_modes().get(id).copied().or_else(|| { s.delay_span_bug(sp, "missing binding mode"); None }) } pub fn pat_binding_modes(&self) -> LocalTableInContext<'_, BindingMode> { LocalTableInContext { hir_owner: self.hir_owner, data: &self.pat_binding_modes } } pub fn pat_binding_modes_mut(&mut self) -> LocalTableInContextMut<'_, BindingMode> { LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.pat_binding_modes } } pub fn pat_adjustments(&self) -> LocalTableInContext<'_, Vec<Ty<'tcx>>> { LocalTableInContext { hir_owner: self.hir_owner, data: &self.pat_adjustments } } pub fn pat_adjustments_mut(&mut self) -> LocalTableInContextMut<'_, Vec<Ty<'tcx>>> { LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.pat_adjustments } } /// For a given closure, returns the iterator of `ty::CapturedPlace`s that are captured /// by the closure. pub fn closure_min_captures_flattened( &self, closure_def_id: DefId, ) -> impl Iterator<Item = &ty::CapturedPlace<'tcx>> { self.closure_min_captures .get(&closure_def_id) .map(|closure_min_captures| closure_min_captures.values().flat_map(|v| v.iter())) .into_iter() .flatten() } pub fn closure_kind_origins(&self) -> LocalTableInContext<'_, (Span, HirPlace<'tcx>)> { LocalTableInContext { hir_owner: self.hir_owner, data: &self.closure_kind_origins } } pub fn closure_kind_origins_mut( &mut self, ) -> LocalTableInContextMut<'_, (Span, HirPlace<'tcx>)> { LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.closure_kind_origins } } pub fn liberated_fn_sigs(&self) -> LocalTableInContext<'_, ty::FnSig<'tcx>> { LocalTableInContext { hir_owner: self.hir_owner, data: &self.liberated_fn_sigs } } pub fn liberated_fn_sigs_mut(&mut self) -> LocalTableInContextMut<'_, ty::FnSig<'tcx>> { LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.liberated_fn_sigs } } pub fn fru_field_types(&self) -> LocalTableInContext<'_, Vec<Ty<'tcx>>> { LocalTableInContext { hir_owner: self.hir_owner, data: &self.fru_field_types } } pub fn fru_field_types_mut(&mut self) -> LocalTableInContextMut<'_, Vec<Ty<'tcx>>> { LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.fru_field_types } } pub fn is_coercion_cast(&self, hir_id: hir::HirId) -> bool { validate_hir_id_for_typeck_results(self.hir_owner, hir_id); self.coercion_casts.contains(&hir_id.local_id) } pub fn set_coercion_cast(&mut self, id: ItemLocalId) { self.coercion_casts.insert(id); } pub fn coercion_casts(&self) -> &ItemLocalSet { &self.coercion_casts } } rustc_index::newtype_index! { pub struct UserTypeAnnotationIndex { derive [HashStable] DEBUG_FORMAT = "UserType({})", const START_INDEX = 0, } } /// Mapping of type annotation indices to canonical user type annotations. pub type CanonicalUserTypeAnnotations<'tcx> = IndexVec<UserTypeAnnotationIndex, CanonicalUserTypeAnnotation<'tcx>>; #[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)] pub struct CanonicalUserTypeAnnotation<'tcx> { pub user_ty: CanonicalUserType<'tcx>, pub span: Span, pub inferred_ty: Ty<'tcx>, } /// Canonicalized user type annotation. pub type CanonicalUserType<'tcx> = Canonical<'tcx, UserType<'tcx>>; impl<'tcx> CanonicalUserType<'tcx> { /// Returns `true` if this represents a substitution of the form `[?0, ?1, ?2]`, /// i.e., each thing is mapped to a canonical variable with the same index. pub fn is_identity(&self) -> bool { match self.value { UserType::Ty(_) => false, UserType::TypeOf(_, user_substs) => { if user_substs.user_self_ty.is_some() { return false; } iter::zip(user_substs.substs, BoundVar::new(0)..).all(|(kind, cvar)| { match kind.unpack() { GenericArgKind::Type(ty) => match ty.kind() { ty::Bound(debruijn, b) => { // We only allow a `ty::INNERMOST` index in substitutions. assert_eq!(*debruijn, ty::INNERMOST); cvar == b.var } _ => false, }, GenericArgKind::Lifetime(r) => match *r { ty::ReLateBound(debruijn, br) => { // We only allow a `ty::INNERMOST` index in substitutions. assert_eq!(debruijn, ty::INNERMOST); cvar == br.var } _ => false, }, GenericArgKind::Const(ct) => match ct.val() { ty::ConstKind::Bound(debruijn, b) => { // We only allow a `ty::INNERMOST` index in substitutions. assert_eq!(debruijn, ty::INNERMOST); cvar == b } _ => false, }, } }) } } } } /// A user-given type annotation attached to a constant. These arise /// from constants that are named via paths, like `Foo::<A>::new` and /// so forth. #[derive(Copy, Clone, Debug, PartialEq, TyEncodable, TyDecodable)] #[derive(HashStable, TypeFoldable, Lift)] pub enum UserType<'tcx> { Ty(Ty<'tcx>), /// The canonical type is the result of `type_of(def_id)` with the /// given substitutions applied. TypeOf(DefId, UserSubsts<'tcx>), } impl<'tcx> CommonTypes<'tcx> { fn new( interners: &CtxtInterners<'tcx>, sess: &Session, resolutions: &ty::ResolverOutputs, ) -> CommonTypes<'tcx> { let mk = |ty| interners.intern_ty(ty, sess, resolutions); CommonTypes { unit: mk(Tuple(List::empty())), bool: mk(Bool), char: mk(Char), never: mk(Never), isize: mk(Int(ty::IntTy::Isize)), i8: mk(Int(ty::IntTy::I8)), i16: mk(Int(ty::IntTy::I16)), i32: mk(Int(ty::IntTy::I32)), i64: mk(Int(ty::IntTy::I64)), i128: mk(Int(ty::IntTy::I128)), usize: mk(Uint(ty::UintTy::Usize)), u8: mk(Uint(ty::UintTy::U8)), u16: mk(Uint(ty::UintTy::U16)), u32: mk(Uint(ty::UintTy::U32)), u64: mk(Uint(ty::UintTy::U64)), u128: mk(Uint(ty::UintTy::U128)), f32: mk(Float(ty::FloatTy::F32)), f64: mk(Float(ty::FloatTy::F64)), str_: mk(Str), self_param: mk(ty::Param(ty::ParamTy { index: 0, name: kw::SelfUpper })), trait_object_dummy_self: mk(Infer(ty::FreshTy(0))), } } } impl<'tcx> CommonLifetimes<'tcx> { fn new(interners: &CtxtInterners<'tcx>) -> CommonLifetimes<'tcx> { let mk = |r| { Region(Interned::new_unchecked( interners.region.intern(r, |r| InternedInSet(interners.arena.alloc(r))).0, )) }; CommonLifetimes { re_root_empty: mk(ty::ReEmpty(ty::UniverseIndex::ROOT)), re_static: mk(ty::ReStatic), re_erased: mk(ty::ReErased), } } } impl<'tcx> CommonConsts<'tcx> { fn new(interners: &CtxtInterners<'tcx>, types: &CommonTypes<'tcx>) -> CommonConsts<'tcx> { let mk_const = |c| { Const(Interned::new_unchecked( interners.const_.intern(c, |c| InternedInSet(interners.arena.alloc(c))).0, )) }; CommonConsts { unit: mk_const(ty::ConstS { val: ty::ConstKind::Value(ConstValue::Scalar(Scalar::ZST)), ty: types.unit, }), } } } // This struct contains information regarding the `ReFree(FreeRegion)` corresponding to a lifetime // conflict. #[derive(Debug)] pub struct FreeRegionInfo { // `LocalDefId` corresponding to FreeRegion pub def_id: LocalDefId, // the bound region corresponding to FreeRegion pub boundregion: ty::BoundRegionKind, // checks if bound region is in Impl Item pub is_impl_item: bool, } /// The central data structure of the compiler. It stores references /// to the various **arenas** and also houses the results of the /// various **compiler queries** that have been performed. See the /// [rustc dev guide] for more details. /// /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/ty.html #[derive(Copy, Clone)] #[rustc_diagnostic_item = "TyCtxt"] #[rustc_pass_by_value] pub struct TyCtxt<'tcx> { gcx: &'tcx GlobalCtxt<'tcx>, } impl<'tcx> Deref for TyCtxt<'tcx> { type Target = &'tcx GlobalCtxt<'tcx>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.gcx } } pub struct GlobalCtxt<'tcx> { pub arena: &'tcx WorkerLocal<Arena<'tcx>>, interners: CtxtInterners<'tcx>, pub sess: &'tcx Session, /// This only ever stores a `LintStore` but we don't want a dependency on that type here. /// /// FIXME(Centril): consider `dyn LintStoreMarker` once /// we can upcast to `Any` for some additional type safety. pub lint_store: Lrc<dyn Any + sync::Sync + sync::Send>, pub dep_graph: DepGraph, pub prof: SelfProfilerRef, /// Common types, pre-interned for your convenience. pub types: CommonTypes<'tcx>, /// Common lifetimes, pre-interned for your convenience. pub lifetimes: CommonLifetimes<'tcx>, /// Common consts, pre-interned for your convenience. pub consts: CommonConsts<'tcx>, /// Output of the resolver. pub(crate) untracked_resolutions: ty::ResolverOutputs, pub(crate) untracked_crate: &'tcx hir::Crate<'tcx>, /// This provides access to the incremental compilation on-disk cache for query results. /// Do not access this directly. It is only meant to be used by /// `DepGraph::try_mark_green()` and the query infrastructure. /// This is `None` if we are not incremental compilation mode pub on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>, pub queries: &'tcx dyn query::QueryEngine<'tcx>, pub query_caches: query::QueryCaches<'tcx>, query_kinds: &'tcx [DepKindStruct], // Internal caches for metadata decoding. No need to track deps on this. pub ty_rcache: Lock<FxHashMap<ty::CReaderCacheKey, Ty<'tcx>>>, pub pred_rcache: Lock<FxHashMap<ty::CReaderCacheKey, Predicate<'tcx>>>, /// Caches the results of trait selection. This cache is used /// for things that do not have to do with the parameters in scope. pub selection_cache: traits::SelectionCache<'tcx>, /// Caches the results of trait evaluation. This cache is used /// for things that do not have to do with the parameters in scope. /// Merge this with `selection_cache`? pub evaluation_cache: traits::EvaluationCache<'tcx>, /// The definite name of the current crate after taking into account /// attributes, commandline parameters, etc. crate_name: Symbol, /// Data layout specification for the current target. pub data_layout: TargetDataLayout, /// Stores memory for globals (statics/consts). pub(crate) alloc_map: Lock<interpret::AllocMap<'tcx>>, output_filenames: Arc<OutputFilenames>, } impl<'tcx> TyCtxt<'tcx> { /// Expects a body and returns its codegen attributes. /// /// Unlike `codegen_fn_attrs`, this returns `CodegenFnAttrs::EMPTY` for /// constants. pub fn body_codegen_attrs(self, def_id: DefId) -> &'tcx CodegenFnAttrs { let def_kind = self.def_kind(def_id); if def_kind.has_codegen_attrs() { self.codegen_fn_attrs(def_id) } else if matches!( def_kind, DefKind::AnonConst | DefKind::AssocConst | DefKind::Const | DefKind::InlineConst ) { CodegenFnAttrs::EMPTY } else { bug!( "body_codegen_fn_attrs called on unexpected definition: {:?} {:?}", def_id, def_kind ) } } pub fn typeck_opt_const_arg( self, def: ty::WithOptConstParam<LocalDefId>, ) -> &'tcx TypeckResults<'tcx> { if let Some(param_did) = def.const_param_did { self.typeck_const_arg((def.did, param_did)) } else { self.typeck(def.did) } } pub fn mir_borrowck_opt_const_arg( self, def: ty::WithOptConstParam<LocalDefId>, ) -> &'tcx BorrowCheckResult<'tcx> { if let Some(param_did) = def.const_param_did { self.mir_borrowck_const_arg((def.did, param_did)) } else { self.mir_borrowck(def.did) } } pub fn alloc_steal_thir(self, thir: Thir<'tcx>) -> &'tcx Steal<Thir<'tcx>> { self.arena.alloc(Steal::new(thir)) } pub fn alloc_steal_mir(self, mir: Body<'tcx>) -> &'tcx Steal<Body<'tcx>> { self.arena.alloc(Steal::new(mir)) } pub fn alloc_steal_promoted( self, promoted: IndexVec<Promoted, Body<'tcx>>, ) -> &'tcx Steal<IndexVec<Promoted, Body<'tcx>>> { self.arena.alloc(Steal::new(promoted)) } pub fn alloc_adt_def( self, did: DefId, kind: AdtKind, variants: IndexVec<VariantIdx, ty::VariantDef>, repr: ReprOptions, ) -> ty::AdtDef<'tcx> { self.intern_adt_def(ty::AdtDefData::new(self, did, kind, variants, repr)) } /// Allocates a read-only byte or string literal for `mir::interpret`. pub fn allocate_bytes(self, bytes: &[u8]) -> interpret::AllocId { // Create an allocation that just contains these bytes. let alloc = interpret::Allocation::from_bytes_byte_aligned_immutable(bytes); let alloc = self.intern_const_alloc(alloc); self.create_memory_alloc(alloc) } /// Returns a range of the start/end indices specified with the /// `rustc_layout_scalar_valid_range` attribute. // FIXME(eddyb) this is an awkward spot for this method, maybe move it? pub fn layout_scalar_valid_range(self, def_id: DefId) -> (Bound<u128>, Bound<u128>) { let get = |name| { let Some(attr) = self.get_attr(def_id, name) else { return Bound::Unbounded; }; debug!("layout_scalar_valid_range: attr={:?}", attr); if let Some( &[ ast::NestedMetaItem::Literal(ast::Lit { kind: ast::LitKind::Int(a, _), .. }), ], ) = attr.meta_item_list().as_deref() { Bound::Included(a) } else { self.sess .delay_span_bug(attr.span, "invalid rustc_layout_scalar_valid_range attribute"); Bound::Unbounded } }; ( get(sym::rustc_layout_scalar_valid_range_start), get(sym::rustc_layout_scalar_valid_range_end), ) } pub fn lift<T: Lift<'tcx>>(self, value: T) -> Option<T::Lifted> { value.lift_to_tcx(self) } /// Creates a type context and call the closure with a `TyCtxt` reference /// to the context. The closure enforces that the type context and any interned /// value (types, substs, etc.) can only be used while `ty::tls` has a valid /// reference to the context, to allow formatting values that need it. pub fn create_global_ctxt( s: &'tcx Session, lint_store: Lrc<dyn Any + sync::Send + sync::Sync>, arena: &'tcx WorkerLocal<Arena<'tcx>>, resolutions: ty::ResolverOutputs, krate: &'tcx hir::Crate<'tcx>, dep_graph: DepGraph, on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>, queries: &'tcx dyn query::QueryEngine<'tcx>, query_kinds: &'tcx [DepKindStruct], crate_name: &str, output_filenames: OutputFilenames, ) -> GlobalCtxt<'tcx> { let data_layout = TargetDataLayout::parse(&s.target).unwrap_or_else(|err| { s.fatal(&err); }); let interners = CtxtInterners::new(arena); let common_types = CommonTypes::new(&interners, s, &resolutions); let common_lifetimes = CommonLifetimes::new(&interners); let common_consts = CommonConsts::new(&interners, &common_types); GlobalCtxt { sess: s, lint_store, arena, interners, dep_graph, untracked_resolutions: resolutions, prof: s.prof.clone(), types: common_types, lifetimes: common_lifetimes, consts: common_consts, untracked_crate: krate, on_disk_cache, queries, query_caches: query::QueryCaches::default(), query_kinds, ty_rcache: Default::default(), pred_rcache: Default::default(), selection_cache: Default::default(), evaluation_cache: Default::default(), crate_name: Symbol::intern(crate_name), data_layout, alloc_map: Lock::new(interpret::AllocMap::new()), output_filenames: Arc::new(output_filenames), } } pub(crate) fn query_kind(self, k: DepKind) -> &'tcx DepKindStruct { &self.query_kinds[k as usize] } /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` to ensure it gets used. #[track_caller] pub fn ty_error(self) -> Ty<'tcx> { self.ty_error_with_message(DUMMY_SP, "TyKind::Error constructed but no error reported") } /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` with the given `msg` to /// ensure it gets used. #[track_caller] pub fn ty_error_with_message<S: Into<MultiSpan>>(self, span: S, msg: &str) -> Ty<'tcx> { let reported = self.sess.delay_span_bug(span, msg); self.mk_ty(Error(DelaySpanBugEmitted { reported, _priv: () })) } /// Like [TyCtxt::ty_error] but for constants. #[track_caller] pub fn const_error(self, ty: Ty<'tcx>) -> Const<'tcx> { self.const_error_with_message( ty, DUMMY_SP, "ty::ConstKind::Error constructed but no error reported", ) } /// Like [TyCtxt::ty_error_with_message] but for constants. #[track_caller] pub fn const_error_with_message<S: Into<MultiSpan>>( self, ty: Ty<'tcx>, span: S, msg: &str, ) -> Const<'tcx> { let reported = self.sess.delay_span_bug(span, msg); self.mk_const(ty::ConstS { val: ty::ConstKind::Error(DelaySpanBugEmitted { reported, _priv: () }), ty, }) } pub fn consider_optimizing<T: Fn() -> String>(self, msg: T) -> bool { let cname = self.crate_name(LOCAL_CRATE); self.sess.consider_optimizing(cname.as_str(), msg) } /// Obtain all lang items of this crate and all dependencies (recursively) pub fn lang_items(self) -> &'tcx rustc_hir::lang_items::LanguageItems { self.get_lang_items(()) } /// Obtain the given diagnostic item's `DefId`. Use `is_diagnostic_item` if you just want to /// compare against another `DefId`, since `is_diagnostic_item` is cheaper. pub fn get_diagnostic_item(self, name: Symbol) -> Option<DefId> { self.all_diagnostic_items(()).name_to_id.get(&name).copied() } /// Obtain the diagnostic item's name pub fn get_diagnostic_name(self, id: DefId) -> Option<Symbol> { self.diagnostic_items(id.krate).id_to_name.get(&id).copied() } /// Check whether the diagnostic item with the given `name` has the given `DefId`. pub fn is_diagnostic_item(self, name: Symbol, did: DefId) -> bool { self.diagnostic_items(did.krate).name_to_id.get(&name) == Some(&did) } pub fn stability(self) -> &'tcx stability::Index { self.stability_index(()) } pub fn features(self) -> &'tcx rustc_feature::Features { self.features_query(()) } pub fn def_key(self, id: DefId) -> rustc_hir::definitions::DefKey { // Accessing the DefKey is ok, since it is part of DefPathHash. if let Some(id) = id.as_local() { self.untracked_resolutions.definitions.def_key(id) } else { self.untracked_resolutions.cstore.def_key(id) } } /// Converts a `DefId` into its fully expanded `DefPath` (every /// `DefId` is really just an interned `DefPath`). /// /// Note that if `id` is not local to this crate, the result will /// be a non-local `DefPath`. pub fn def_path(self, id: DefId) -> rustc_hir::definitions::DefPath { // Accessing the DefPath is ok, since it is part of DefPathHash. if let Some(id) = id.as_local() { self.untracked_resolutions.definitions.def_path(id) } else { self.untracked_resolutions.cstore.def_path(id) } } #[inline] pub fn def_path_hash(self, def_id: DefId) -> rustc_hir::definitions::DefPathHash { // Accessing the DefPathHash is ok, it is incr. comp. stable. if let Some(def_id) = def_id.as_local() { self.untracked_resolutions.definitions.def_path_hash(def_id) } else { self.untracked_resolutions.cstore.def_path_hash(def_id) } } #[inline] pub fn stable_crate_id(self, crate_num: CrateNum) -> StableCrateId { if crate_num == LOCAL_CRATE { self.sess.local_stable_crate_id() } else { self.untracked_resolutions.cstore.stable_crate_id(crate_num) } } /// Maps a StableCrateId to the corresponding CrateNum. This method assumes /// that the crate in question has already been loaded by the CrateStore. #[inline] pub fn stable_crate_id_to_crate_num(self, stable_crate_id: StableCrateId) -> CrateNum { if stable_crate_id == self.sess.local_stable_crate_id() { LOCAL_CRATE } else { self.untracked_resolutions.cstore.stable_crate_id_to_crate_num(stable_crate_id) } } /// Converts a `DefPathHash` to its corresponding `DefId` in the current compilation /// session, if it still exists. This is used during incremental compilation to /// turn a deserialized `DefPathHash` into its current `DefId`. pub fn def_path_hash_to_def_id(self, hash: DefPathHash, err: &mut dyn FnMut() -> !) -> DefId { debug!("def_path_hash_to_def_id({:?})", hash); let stable_crate_id = hash.stable_crate_id(); // If this is a DefPathHash from the local crate, we can look up the // DefId in the tcx's `Definitions`. if stable_crate_id == self.sess.local_stable_crate_id() { self.untracked_resolutions .definitions .local_def_path_hash_to_def_id(hash, err) .to_def_id() } else { // If this is a DefPathHash from an upstream crate, let the CrateStore map // it to a DefId. let cstore = &self.untracked_resolutions.cstore; let cnum = cstore.stable_crate_id_to_crate_num(stable_crate_id); cstore.def_path_hash_to_def_id(cnum, hash) } } pub fn def_path_debug_str(self, def_id: DefId) -> String { // We are explicitly not going through queries here in order to get // crate name and stable crate id since this code is called from debug!() // statements within the query system and we'd run into endless // recursion otherwise. let (crate_name, stable_crate_id) = if def_id.is_local() { (self.crate_name, self.sess.local_stable_crate_id()) } else { let cstore = &self.untracked_resolutions.cstore; (cstore.crate_name(def_id.krate), cstore.stable_crate_id(def_id.krate)) }; format!( "{}[{}]{}", crate_name, // Don't print the whole stable crate id. That's just // annoying in debug output. &(format!("{:08x}", stable_crate_id.to_u64()))[..4], self.def_path(def_id).to_string_no_crate_verbose() ) } /// Note that this is *untracked* and should only be used within the query /// system if the result is otherwise tracked through queries pub fn cstore_untracked(self) -> &'tcx ty::CrateStoreDyn { &*self.untracked_resolutions.cstore } /// Note that this is *untracked* and should only be used within the query /// system if the result is otherwise tracked through queries pub fn definitions_untracked(self) -> &'tcx hir::definitions::Definitions { &self.untracked_resolutions.definitions } #[inline(always)] pub fn create_stable_hashing_context(self) -> StableHashingContext<'tcx> { let resolutions = &self.gcx.untracked_resolutions; StableHashingContext::new(self.sess, &resolutions.definitions, &*resolutions.cstore) } #[inline(always)] pub fn create_no_span_stable_hashing_context(self) -> StableHashingContext<'tcx> { let resolutions = &self.gcx.untracked_resolutions; StableHashingContext::ignore_spans( self.sess, &resolutions.definitions, &*resolutions.cstore, ) } pub fn serialize_query_result_cache(self, encoder: FileEncoder) -> FileEncodeResult { self.on_disk_cache.as_ref().map_or(Ok(0), |c| c.serialize(self, encoder)) } /// If `true`, we should use lazy normalization for constants, otherwise /// we still evaluate them eagerly. #[inline] pub fn lazy_normalization(self) -> bool { let features = self.features(); // Note: We only use lazy normalization for generic const expressions. features.generic_const_exprs } #[inline] pub fn local_crate_exports_generics(self) -> bool { debug_assert!(self.sess.opts.share_generics()); self.sess.crate_types().iter().any(|crate_type| { match crate_type { CrateType::Executable | CrateType::Staticlib | CrateType::ProcMacro | CrateType::Cdylib => false, // FIXME rust-lang/rust#64319, rust-lang/rust#64872: // We want to block export of generics from dylibs, // but we must fix rust-lang/rust#65890 before we can // do that robustly. CrateType::Dylib => true, CrateType::Rlib => true, } }) } // Returns the `DefId` and the `BoundRegionKind` corresponding to the given region. pub fn is_suitable_region(self, region: Region<'tcx>) -> Option<FreeRegionInfo> { let (suitable_region_binding_scope, bound_region) = match *region { ty::ReFree(ref free_region) => { (free_region.scope.expect_local(), free_region.bound_region) } ty::ReEarlyBound(ref ebr) => ( self.local_parent(ebr.def_id.expect_local()), ty::BoundRegionKind::BrNamed(ebr.def_id, ebr.name), ), _ => return None, // not a free region }; let is_impl_item = match self.hir().find_by_def_id(suitable_region_binding_scope) { Some(Node::Item(..) | Node::TraitItem(..)) => false, Some(Node::ImplItem(..)) => { self.is_bound_region_in_impl_item(suitable_region_binding_scope) } _ => return None, }; Some(FreeRegionInfo { def_id: suitable_region_binding_scope, boundregion: bound_region, is_impl_item, }) } /// Given a `DefId` for an `fn`, return all the `dyn` and `impl` traits in its return type. pub fn return_type_impl_or_dyn_traits( self, scope_def_id: LocalDefId, ) -> Vec<&'tcx hir::Ty<'tcx>> { let hir_id = self.hir().local_def_id_to_hir_id(scope_def_id); let Some(hir::FnDecl { output: hir::FnRetTy::Return(hir_output), .. }) = self.hir().fn_decl_by_hir_id(hir_id) else { return vec![]; }; let mut v = TraitObjectVisitor(vec![], self.hir()); v.visit_ty(hir_output); v.0 } pub fn return_type_impl_trait(self, scope_def_id: LocalDefId) -> Option<(Ty<'tcx>, Span)> { // `type_of()` will fail on these (#55796, #86483), so only allow `fn`s or closures. match self.hir().get_by_def_id(scope_def_id) { Node::Item(&hir::Item { kind: ItemKind::Fn(..), .. }) => {} Node::TraitItem(&hir::TraitItem { kind: TraitItemKind::Fn(..), .. }) => {} Node::ImplItem(&hir::ImplItem { kind: ImplItemKind::Fn(..), .. }) => {} Node::Expr(&hir::Expr { kind: ExprKind::Closure(..), .. }) => {} _ => return None, } let ret_ty = self.type_of(scope_def_id); match ret_ty.kind() { ty::FnDef(_, _) => { let sig = ret_ty.fn_sig(self); let output = self.erase_late_bound_regions(sig.output()); if output.is_impl_trait() { let hir_id = self.hir().local_def_id_to_hir_id(scope_def_id); let fn_decl = self.hir().fn_decl_by_hir_id(hir_id).unwrap(); Some((output, fn_decl.output.span())) } else { None } } _ => None, } } // Checks if the bound region is in Impl Item. pub fn is_bound_region_in_impl_item(self, suitable_region_binding_scope: LocalDefId) -> bool { let container_id = self.associated_item(suitable_region_binding_scope.to_def_id()).container.id(); if self.impl_trait_ref(container_id).is_some() { // For now, we do not try to target impls of traits. This is // because this message is going to suggest that the user // change the fn signature, but they may not be free to do so, // since the signature must match the trait. // // FIXME(#42706) -- in some cases, we could do better here. return true; } false } /// Determines whether identifiers in the assembly have strict naming rules. /// Currently, only NVPTX* targets need it. pub fn has_strict_asm_symbol_naming(self) -> bool { self.sess.target.arch.contains("nvptx") } /// Returns `&'static core::panic::Location<'static>`. pub fn caller_location_ty(self) -> Ty<'tcx> { self.mk_imm_ref( self.lifetimes.re_static, self.bound_type_of(self.require_lang_item(LangItem::PanicLocation, None)) .subst(self, self.mk_substs([self.lifetimes.re_static.into()].iter())), ) } /// Returns a displayable description and article for the given `def_id` (e.g. `("a", "struct")`). pub fn article_and_description(self, def_id: DefId) -> (&'static str, &'static str) { match self.def_kind(def_id) { DefKind::Generator => match self.generator_kind(def_id).unwrap() { rustc_hir::GeneratorKind::Async(..) => ("an", "async closure"), rustc_hir::GeneratorKind::Gen => ("a", "generator"), }, def_kind => (def_kind.article(), def_kind.descr(def_id)), } } pub fn type_length_limit(self) -> Limit { self.limits(()).type_length_limit } pub fn recursion_limit(self) -> Limit { self.limits(()).recursion_limit } pub fn move_size_limit(self) -> Limit { self.limits(()).move_size_limit } pub fn const_eval_limit(self) -> Limit { self.limits(()).const_eval_limit } pub fn all_traits(self) -> impl Iterator<Item = DefId> + 'tcx { iter::once(LOCAL_CRATE) .chain(self.crates(()).iter().copied()) .flat_map(move |cnum| self.traits_in_crate(cnum).iter().copied()) } } /// A trait implemented for all `X<'a>` types that can be safely and /// efficiently converted to `X<'tcx>` as long as they are part of the /// provided `TyCtxt<'tcx>`. /// This can be done, for example, for `Ty<'tcx>` or `SubstsRef<'tcx>` /// by looking them up in their respective interners. /// /// However, this is still not the best implementation as it does /// need to compare the components, even for interned values. /// It would be more efficient if `TypedArena` provided a way to /// determine whether the address is in the allocated range. /// /// `None` is returned if the value or one of the components is not part /// of the provided context. /// For `Ty`, `None` can be returned if either the type interner doesn't /// contain the `TyKind` key or if the address of the interned /// pointer differs. The latter case is possible if a primitive type, /// e.g., `()` or `u8`, was interned in a different context. pub trait Lift<'tcx>: fmt::Debug { type Lifted: fmt::Debug + 'tcx; fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted>; } macro_rules! nop_lift { ($set:ident; $ty:ty => $lifted:ty) => { impl<'a, 'tcx> Lift<'tcx> for $ty { type Lifted = $lifted; fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { if tcx.interners.$set.contains_pointer_to(&InternedInSet(&*self.0.0)) { // SAFETY: `self` is interned and therefore valid // for the entire lifetime of the `TyCtxt`. Some(unsafe { mem::transmute(self) }) } else { None } } } }; } // Can't use the macros as we have reuse the `substs` here. // // See `intern_type_list` for more info. impl<'a, 'tcx> Lift<'tcx> for &'a List<Ty<'a>> { type Lifted = &'tcx List<Ty<'tcx>>; fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { if self.is_empty() { return Some(List::empty()); } if tcx.interners.substs.contains_pointer_to(&InternedInSet(self.as_substs())) { // SAFETY: `self` is interned and therefore valid // for the entire lifetime of the `TyCtxt`. Some(unsafe { mem::transmute::<&'a List<Ty<'a>>, &'tcx List<Ty<'tcx>>>(self) }) } else { None } } } macro_rules! nop_list_lift { ($set:ident; $ty:ty => $lifted:ty) => { impl<'a, 'tcx> Lift<'tcx> for &'a List<$ty> { type Lifted = &'tcx List<$lifted>; fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { if self.is_empty() { return Some(List::empty()); } if tcx.interners.$set.contains_pointer_to(&InternedInSet(self)) { Some(unsafe { mem::transmute(self) }) } else { None } } } }; } nop_lift! {type_; Ty<'a> => Ty<'tcx>} nop_lift! {region; Region<'a> => Region<'tcx>} nop_lift! {const_; Const<'a> => Const<'tcx>} nop_lift! {const_allocation; ConstAllocation<'a> => ConstAllocation<'tcx>} nop_lift! {predicate; Predicate<'a> => Predicate<'tcx>} nop_list_lift! {poly_existential_predicates; ty::Binder<'a, ExistentialPredicate<'a>> => ty::Binder<'tcx, ExistentialPredicate<'tcx>>} nop_list_lift! {predicates; Predicate<'a> => Predicate<'tcx>} nop_list_lift! {canonical_var_infos; CanonicalVarInfo<'a> => CanonicalVarInfo<'tcx>} nop_list_lift! {projs; ProjectionKind => ProjectionKind} nop_list_lift! {bound_variable_kinds; ty::BoundVariableKind => ty::BoundVariableKind} // This is the impl for `&'a InternalSubsts<'a>`. nop_list_lift! {substs; GenericArg<'a> => GenericArg<'tcx>} CloneLiftImpls! { for<'tcx> { Constness, traits::WellFormedLoc, } } pub mod tls { use super::{ptr_eq, GlobalCtxt, TyCtxt}; use crate::dep_graph::TaskDepsRef; use crate::ty::query; use rustc_data_structures::sync::{self, Lock}; use rustc_data_structures::thin_vec::ThinVec; use rustc_errors::Diagnostic; use std::mem; #[cfg(not(parallel_compiler))] use std::cell::Cell; #[cfg(parallel_compiler)] use rustc_rayon_core as rayon_core; /// This is the implicit state of rustc. It contains the current /// `TyCtxt` and query. It is updated when creating a local interner or /// executing a new query. Whenever there's a `TyCtxt` value available /// you should also have access to an `ImplicitCtxt` through the functions /// in this module. #[derive(Clone)] pub struct ImplicitCtxt<'a, 'tcx> { /// The current `TyCtxt`. pub tcx: TyCtxt<'tcx>, /// The current query job, if any. This is updated by `JobOwner::start` in /// `ty::query::plumbing` when executing a query. pub query: Option<query::QueryJobId>, /// Where to store diagnostics for the current query job, if any. /// This is updated by `JobOwner::start` in `ty::query::plumbing` when executing a query. pub diagnostics: Option<&'a Lock<ThinVec<Diagnostic>>>, /// Used to prevent layout from recursing too deeply. pub layout_depth: usize, /// The current dep graph task. This is used to add dependencies to queries /// when executing them. pub task_deps: TaskDepsRef<'a>, } impl<'a, 'tcx> ImplicitCtxt<'a, 'tcx> { pub fn new(gcx: &'tcx GlobalCtxt<'tcx>) -> Self { let tcx = TyCtxt { gcx }; ImplicitCtxt { tcx, query: None, diagnostics: None, layout_depth: 0, task_deps: TaskDepsRef::Ignore, } } } /// Sets Rayon's thread-local variable, which is preserved for Rayon jobs /// to `value` during the call to `f`. It is restored to its previous value after. /// This is used to set the pointer to the new `ImplicitCtxt`. #[cfg(parallel_compiler)] #[inline] fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R { rayon_core::tlv::with(value, f) } /// Gets Rayon's thread-local variable, which is preserved for Rayon jobs. /// This is used to get the pointer to the current `ImplicitCtxt`. #[cfg(parallel_compiler)] #[inline] pub fn get_tlv() -> usize { rayon_core::tlv::get() } #[cfg(not(parallel_compiler))] thread_local! { /// A thread local variable that stores a pointer to the current `ImplicitCtxt`. static TLV: Cell<usize> = const { Cell::new(0) }; } /// Sets TLV to `value` during the call to `f`. /// It is restored to its previous value after. /// This is used to set the pointer to the new `ImplicitCtxt`. #[cfg(not(parallel_compiler))] #[inline] fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R { let old = get_tlv(); let _reset = rustc_data_structures::OnDrop(move || TLV.with(|tlv| tlv.set(old))); TLV.with(|tlv| tlv.set(value)); f() } /// Gets the pointer to the current `ImplicitCtxt`. #[cfg(not(parallel_compiler))] #[inline] fn get_tlv() -> usize { TLV.with(|tlv| tlv.get()) } /// Sets `context` as the new current `ImplicitCtxt` for the duration of the function `f`. #[inline] pub fn enter_context<'a, 'tcx, F, R>(context: &ImplicitCtxt<'a, 'tcx>, f: F) -> R where F: FnOnce(&ImplicitCtxt<'a, 'tcx>) -> R, { set_tlv(context as *const _ as usize, || f(&context)) } /// Allows access to the current `ImplicitCtxt` in a closure if one is available. #[inline] pub fn with_context_opt<F, R>(f: F) -> R where F: for<'a, 'tcx> FnOnce(Option<&ImplicitCtxt<'a, 'tcx>>) -> R, { let context = get_tlv(); if context == 0 { f(None) } else { // We could get an `ImplicitCtxt` pointer from another thread. // Ensure that `ImplicitCtxt` is `Sync`. sync::assert_sync::<ImplicitCtxt<'_, '_>>(); unsafe { f(Some(&*(context as *const ImplicitCtxt<'_, '_>))) } } } /// Allows access to the current `ImplicitCtxt`. /// Panics if there is no `ImplicitCtxt` available. #[inline] pub fn with_context<F, R>(f: F) -> R where F: for<'a, 'tcx> FnOnce(&ImplicitCtxt<'a, 'tcx>) -> R, { with_context_opt(|opt_context| f(opt_context.expect("no ImplicitCtxt stored in tls"))) } /// Allows access to the current `ImplicitCtxt` whose tcx field is the same as the tcx argument /// passed in. This means the closure is given an `ImplicitCtxt` with the same `'tcx` lifetime /// as the `TyCtxt` passed in. /// This will panic if you pass it a `TyCtxt` which is different from the current /// `ImplicitCtxt`'s `tcx` field. #[inline] pub fn with_related_context<'tcx, F, R>(tcx: TyCtxt<'tcx>, f: F) -> R where F: FnOnce(&ImplicitCtxt<'_, 'tcx>) -> R, { with_context(|context| unsafe { assert!(ptr_eq(context.tcx.gcx, tcx.gcx)); let context: &ImplicitCtxt<'_, '_> = mem::transmute(context); f(context) }) } /// Allows access to the `TyCtxt` in the current `ImplicitCtxt`. /// Panics if there is no `ImplicitCtxt` available. #[inline] pub fn with<F, R>(f: F) -> R where F: for<'tcx> FnOnce(TyCtxt<'tcx>) -> R, { with_context(|context| f(context.tcx)) } /// Allows access to the `TyCtxt` in the current `ImplicitCtxt`. /// The closure is passed None if there is no `ImplicitCtxt` available. #[inline] pub fn with_opt<F, R>(f: F) -> R where F: for<'tcx> FnOnce(Option<TyCtxt<'tcx>>) -> R, { with_context_opt(|opt_context| f(opt_context.map(|context| context.tcx))) } } macro_rules! sty_debug_print { ($fmt: expr, $ctxt: expr, $($variant: ident),*) => {{ // Curious inner module to allow variant names to be used as // variable names. #[allow(non_snake_case)] mod inner { use crate::ty::{self, TyCtxt}; use crate::ty::context::InternedInSet; #[derive(Copy, Clone)] struct DebugStat { total: usize, lt_infer: usize, ty_infer: usize, ct_infer: usize, all_infer: usize, } pub fn go(fmt: &mut std::fmt::Formatter<'_>, tcx: TyCtxt<'_>) -> std::fmt::Result { let mut total = DebugStat { total: 0, lt_infer: 0, ty_infer: 0, ct_infer: 0, all_infer: 0, }; $(let mut $variant = total;)* let shards = tcx.interners.type_.lock_shards(); let types = shards.iter().flat_map(|shard| shard.keys()); for &InternedInSet(t) in types { let variant = match t.kind { ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Str | ty::Never => continue, ty::Error(_) => /* unimportant */ continue, $(ty::$variant(..) => &mut $variant,)* }; let lt = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER); let ty = t.flags.intersects(ty::TypeFlags::HAS_TY_INFER); let ct = t.flags.intersects(ty::TypeFlags::HAS_CT_INFER); variant.total += 1; total.total += 1; if lt { total.lt_infer += 1; variant.lt_infer += 1 } if ty { total.ty_infer += 1; variant.ty_infer += 1 } if ct { total.ct_infer += 1; variant.ct_infer += 1 } if lt && ty && ct { total.all_infer += 1; variant.all_infer += 1 } } writeln!(fmt, "Ty interner total ty lt ct all")?; $(writeln!(fmt, " {:18}: {uses:6} {usespc:4.1}%, \ {ty:4.1}% {lt:5.1}% {ct:4.1}% {all:4.1}%", stringify!($variant), uses = $variant.total, usespc = $variant.total as f64 * 100.0 / total.total as f64, ty = $variant.ty_infer as f64 * 100.0 / total.total as f64, lt = $variant.lt_infer as f64 * 100.0 / total.total as f64, ct = $variant.ct_infer as f64 * 100.0 / total.total as f64, all = $variant.all_infer as f64 * 100.0 / total.total as f64)?; )* writeln!(fmt, " total {uses:6} \ {ty:4.1}% {lt:5.1}% {ct:4.1}% {all:4.1}%", uses = total.total, ty = total.ty_infer as f64 * 100.0 / total.total as f64, lt = total.lt_infer as f64 * 100.0 / total.total as f64, ct = total.ct_infer as f64 * 100.0 / total.total as f64, all = total.all_infer as f64 * 100.0 / total.total as f64) } } inner::go($fmt, $ctxt) }} } impl<'tcx> TyCtxt<'tcx> { pub fn debug_stats(self) -> impl std::fmt::Debug + 'tcx { struct DebugStats<'tcx>(TyCtxt<'tcx>); impl<'tcx> std::fmt::Debug for DebugStats<'tcx> { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { sty_debug_print!( fmt, self.0, Adt, Array, Slice, RawPtr, Ref, FnDef, FnPtr, Placeholder, Generator, GeneratorWitness, Dynamic, Closure, Tuple, Bound, Param, Infer, Projection, Opaque, Foreign )?; writeln!(fmt, "InternalSubsts interner: #{}", self.0.interners.substs.len())?; writeln!(fmt, "Region interner: #{}", self.0.interners.region.len())?; writeln!( fmt, "Const Allocation interner: #{}", self.0.interners.const_allocation.len() )?; writeln!(fmt, "Layout interner: #{}", self.0.interners.layout.len())?; Ok(()) } } DebugStats(self) } } // This type holds a `T` in the interner. The `T` is stored in the arena and // this type just holds a pointer to it, but it still effectively owns it. It // impls `Borrow` so that it can be looked up using the original // (non-arena-memory-owning) types. struct InternedInSet<'tcx, T: ?Sized>(&'tcx T); impl<'tcx, T: 'tcx + ?Sized> Clone for InternedInSet<'tcx, T> { fn clone(&self) -> Self { InternedInSet(self.0) } } impl<'tcx, T: 'tcx + ?Sized> Copy for InternedInSet<'tcx, T> {} impl<'tcx, T: 'tcx + ?Sized> IntoPointer for InternedInSet<'tcx, T> { fn into_pointer(&self) -> *const () { self.0 as *const _ as *const () } } #[allow(rustc::usage_of_ty_tykind)] impl<'tcx> Borrow<TyKind<'tcx>> for InternedInSet<'tcx, WithStableHash<TyS<'tcx>>> { fn borrow<'a>(&'a self) -> &'a TyKind<'tcx> { &self.0.kind } } impl<'tcx> PartialEq for InternedInSet<'tcx, WithStableHash<TyS<'tcx>>> { fn eq(&self, other: &InternedInSet<'tcx, WithStableHash<TyS<'tcx>>>) -> bool { // The `Borrow` trait requires that `x.borrow() == y.borrow()` equals // `x == y`. self.0.kind == other.0.kind } } impl<'tcx> Eq for InternedInSet<'tcx, WithStableHash<TyS<'tcx>>> {} impl<'tcx> Hash for InternedInSet<'tcx, WithStableHash<TyS<'tcx>>> { fn hash<H: Hasher>(&self, s: &mut H) { // The `Borrow` trait requires that `x.borrow().hash(s) == x.hash(s)`. self.0.kind.hash(s) } } impl<'tcx> Borrow<Binder<'tcx, PredicateKind<'tcx>>> for InternedInSet<'tcx, PredicateS<'tcx>> { fn borrow<'a>(&'a self) -> &'a Binder<'tcx, PredicateKind<'tcx>> { &self.0.kind } } impl<'tcx> PartialEq for InternedInSet<'tcx, PredicateS<'tcx>> { fn eq(&self, other: &InternedInSet<'tcx, PredicateS<'tcx>>) -> bool { // The `Borrow` trait requires that `x.borrow() == y.borrow()` equals // `x == y`. self.0.kind == other.0.kind } } impl<'tcx> Eq for InternedInSet<'tcx, PredicateS<'tcx>> {} impl<'tcx> Hash for InternedInSet<'tcx, PredicateS<'tcx>> { fn hash<H: Hasher>(&self, s: &mut H) { // The `Borrow` trait requires that `x.borrow().hash(s) == x.hash(s)`. self.0.kind.hash(s) } } impl<'tcx, T> Borrow<[T]> for InternedInSet<'tcx, List<T>> { fn borrow<'a>(&'a self) -> &'a [T] { &self.0[..] } } impl<'tcx, T: PartialEq> PartialEq for InternedInSet<'tcx, List<T>> { fn eq(&self, other: &InternedInSet<'tcx, List<T>>) -> bool { // The `Borrow` trait requires that `x.borrow() == y.borrow()` equals // `x == y`. self.0[..] == other.0[..] } } impl<'tcx, T: Eq> Eq for InternedInSet<'tcx, List<T>> {} impl<'tcx, T: Hash> Hash for InternedInSet<'tcx, List<T>> { fn hash<H: Hasher>(&self, s: &mut H) { // The `Borrow` trait requires that `x.borrow().hash(s) == x.hash(s)`. self.0[..].hash(s) } } macro_rules! direct_interners { ($($name:ident: $method:ident($ty:ty): $ret_ctor:ident -> $ret_ty:ty,)+) => { $(impl<'tcx> Borrow<$ty> for InternedInSet<'tcx, $ty> { fn borrow<'a>(&'a self) -> &'a $ty { &self.0 } } impl<'tcx> PartialEq for InternedInSet<'tcx, $ty> { fn eq(&self, other: &Self) -> bool { // The `Borrow` trait requires that `x.borrow() == y.borrow()` // equals `x == y`. self.0 == other.0 } } impl<'tcx> Eq for InternedInSet<'tcx, $ty> {} impl<'tcx> Hash for InternedInSet<'tcx, $ty> { fn hash<H: Hasher>(&self, s: &mut H) { // The `Borrow` trait requires that `x.borrow().hash(s) == // x.hash(s)`. self.0.hash(s) } } impl<'tcx> TyCtxt<'tcx> { pub fn $method(self, v: $ty) -> $ret_ty { $ret_ctor(Interned::new_unchecked(self.interners.$name.intern(v, |v| { InternedInSet(self.interners.arena.alloc(v)) }).0)) } })+ } } direct_interners! { region: mk_region(RegionKind): Region -> Region<'tcx>, const_: mk_const(ConstS<'tcx>): Const -> Const<'tcx>, const_allocation: intern_const_alloc(Allocation): ConstAllocation -> ConstAllocation<'tcx>, layout: intern_layout(LayoutS<'tcx>): Layout -> Layout<'tcx>, adt_def: intern_adt_def(AdtDefData): AdtDef -> AdtDef<'tcx>, } macro_rules! slice_interners { ($($field:ident: $method:ident($ty:ty)),+ $(,)?) => ( impl<'tcx> TyCtxt<'tcx> { $(pub fn $method(self, v: &[$ty]) -> &'tcx List<$ty> { self.interners.$field.intern_ref(v, || { InternedInSet(List::from_arena(&*self.arena, v)) }).0 })+ } ); } slice_interners!( substs: _intern_substs(GenericArg<'tcx>), canonical_var_infos: _intern_canonical_var_infos(CanonicalVarInfo<'tcx>), poly_existential_predicates: _intern_poly_existential_predicates(ty::Binder<'tcx, ExistentialPredicate<'tcx>>), predicates: _intern_predicates(Predicate<'tcx>), projs: _intern_projs(ProjectionKind), place_elems: _intern_place_elems(PlaceElem<'tcx>), bound_variable_kinds: _intern_bound_variable_kinds(ty::BoundVariableKind), ); impl<'tcx> TyCtxt<'tcx> { /// Given a `fn` type, returns an equivalent `unsafe fn` type; /// that is, a `fn` type that is equivalent in every way for being /// unsafe. pub fn safe_to_unsafe_fn_ty(self, sig: PolyFnSig<'tcx>) -> Ty<'tcx> { assert_eq!(sig.unsafety(), hir::Unsafety::Normal); self.mk_fn_ptr(sig.map_bound(|sig| ty::FnSig { unsafety: hir::Unsafety::Unsafe, ..sig })) } /// Given the def_id of a Trait `trait_def_id` and the name of an associated item `assoc_name` /// returns true if the `trait_def_id` defines an associated item of name `assoc_name`. pub fn trait_may_define_assoc_type(self, trait_def_id: DefId, assoc_name: Ident) -> bool { self.super_traits_of(trait_def_id).any(|trait_did| { self.associated_items(trait_did) .find_by_name_and_kind(self, assoc_name, ty::AssocKind::Type, trait_did) .is_some() }) } /// Computes the def-ids of the transitive supertraits of `trait_def_id`. This (intentionally) /// does not compute the full elaborated super-predicates but just the set of def-ids. It is used /// to identify which traits may define a given associated type to help avoid cycle errors. /// Returns a `DefId` iterator. fn super_traits_of(self, trait_def_id: DefId) -> impl Iterator<Item = DefId> + 'tcx { let mut set = FxHashSet::default(); let mut stack = vec![trait_def_id]; set.insert(trait_def_id); iter::from_fn(move || -> Option<DefId> { let trait_did = stack.pop()?; let generic_predicates = self.super_predicates_of(trait_did); for (predicate, _) in generic_predicates.predicates { if let ty::PredicateKind::Trait(data) = predicate.kind().skip_binder() { if set.insert(data.def_id()) { stack.push(data.def_id()); } } } Some(trait_did) }) } /// Given a closure signature, returns an equivalent fn signature. Detuples /// and so forth -- so e.g., if we have a sig with `Fn<(u32, i32)>` then /// you would get a `fn(u32, i32)`. /// `unsafety` determines the unsafety of the fn signature. If you pass /// `hir::Unsafety::Unsafe` in the previous example, then you would get /// an `unsafe fn (u32, i32)`. /// It cannot convert a closure that requires unsafe. pub fn signature_unclosure( self, sig: PolyFnSig<'tcx>, unsafety: hir::Unsafety, ) -> PolyFnSig<'tcx> { sig.map_bound(|s| { let params_iter = match s.inputs()[0].kind() { ty::Tuple(params) => params.into_iter(), _ => bug!(), }; self.mk_fn_sig(params_iter, s.output(), s.c_variadic, unsafety, abi::Abi::Rust) }) } /// Same a `self.mk_region(kind)`, but avoids accessing the interners if /// `*r == kind`. #[inline] pub fn reuse_or_mk_region(self, r: Region<'tcx>, kind: RegionKind) -> Region<'tcx> { if *r == kind { r } else { self.mk_region(kind) } } #[allow(rustc::usage_of_ty_tykind)] #[inline] pub fn mk_ty(self, st: TyKind<'tcx>) -> Ty<'tcx> { self.interners.intern_ty(st, self.sess, &self.gcx.untracked_resolutions) } #[inline] pub fn mk_predicate(self, binder: Binder<'tcx, PredicateKind<'tcx>>) -> Predicate<'tcx> { self.interners.intern_predicate(binder) } #[inline] pub fn reuse_or_mk_predicate( self, pred: Predicate<'tcx>, binder: Binder<'tcx, PredicateKind<'tcx>>, ) -> Predicate<'tcx> { if pred.kind() != binder { self.mk_predicate(binder) } else { pred } } pub fn mk_mach_int(self, tm: IntTy) -> Ty<'tcx> { match tm { IntTy::Isize => self.types.isize, IntTy::I8 => self.types.i8, IntTy::I16 => self.types.i16, IntTy::I32 => self.types.i32, IntTy::I64 => self.types.i64, IntTy::I128 => self.types.i128, } } pub fn mk_mach_uint(self, tm: UintTy) -> Ty<'tcx> { match tm { UintTy::Usize => self.types.usize, UintTy::U8 => self.types.u8, UintTy::U16 => self.types.u16, UintTy::U32 => self.types.u32, UintTy::U64 => self.types.u64, UintTy::U128 => self.types.u128, } } pub fn mk_mach_float(self, tm: FloatTy) -> Ty<'tcx> { match tm { FloatTy::F32 => self.types.f32, FloatTy::F64 => self.types.f64, } } #[inline] pub fn mk_static_str(self) -> Ty<'tcx> { self.mk_imm_ref(self.lifetimes.re_static, self.types.str_) } #[inline] pub fn mk_adt(self, def: AdtDef<'tcx>, substs: SubstsRef<'tcx>) -> Ty<'tcx> { // Take a copy of substs so that we own the vectors inside. self.mk_ty(Adt(def, substs)) } #[inline] pub fn mk_foreign(self, def_id: DefId) -> Ty<'tcx> { self.mk_ty(Foreign(def_id)) } fn mk_generic_adt(self, wrapper_def_id: DefId, ty_param: Ty<'tcx>) -> Ty<'tcx> { let adt_def = self.adt_def(wrapper_def_id); let substs = InternalSubsts::for_item(self, wrapper_def_id, |param, substs| match param.kind { GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => bug!(), GenericParamDefKind::Type { has_default, .. } => { if param.index == 0 { ty_param.into() } else { assert!(has_default); self.bound_type_of(param.def_id).subst(self, substs).into() } } }); self.mk_ty(Adt(adt_def, substs)) } #[inline] pub fn mk_box(self, ty: Ty<'tcx>) -> Ty<'tcx> { let def_id = self.require_lang_item(LangItem::OwnedBox, None); self.mk_generic_adt(def_id, ty) } #[inline] pub fn mk_lang_item(self, ty: Ty<'tcx>, item: LangItem) -> Option<Ty<'tcx>> { let def_id = self.lang_items().require(item).ok()?; Some(self.mk_generic_adt(def_id, ty)) } #[inline] pub fn mk_diagnostic_item(self, ty: Ty<'tcx>, name: Symbol) -> Option<Ty<'tcx>> { let def_id = self.get_diagnostic_item(name)?; Some(self.mk_generic_adt(def_id, ty)) } #[inline] pub fn mk_maybe_uninit(self, ty: Ty<'tcx>) -> Ty<'tcx> { let def_id = self.require_lang_item(LangItem::MaybeUninit, None); self.mk_generic_adt(def_id, ty) } #[inline] pub fn mk_ptr(self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> { self.mk_ty(RawPtr(tm)) } #[inline] pub fn mk_ref(self, r: Region<'tcx>, tm: TypeAndMut<'tcx>) -> Ty<'tcx> { self.mk_ty(Ref(r, tm.ty, tm.mutbl)) } #[inline] pub fn mk_mut_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> { self.mk_ref(r, TypeAndMut { ty, mutbl: hir::Mutability::Mut }) } #[inline] pub fn mk_imm_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> { self.mk_ref(r, TypeAndMut { ty, mutbl: hir::Mutability::Not }) } #[inline] pub fn mk_mut_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> { self.mk_ptr(TypeAndMut { ty, mutbl: hir::Mutability::Mut }) } #[inline] pub fn mk_imm_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> { self.mk_ptr(TypeAndMut { ty, mutbl: hir::Mutability::Not }) } #[inline] pub fn mk_array(self, ty: Ty<'tcx>, n: u64) -> Ty<'tcx> { self.mk_ty(Array(ty, ty::Const::from_usize(self, n))) } #[inline] pub fn mk_slice(self, ty: Ty<'tcx>) -> Ty<'tcx> { self.mk_ty(Slice(ty)) } #[inline] pub fn intern_tup(self, ts: &[Ty<'tcx>]) -> Ty<'tcx> { self.mk_ty(Tuple(self.intern_type_list(&ts))) } pub fn mk_tup<I: InternAs<[Ty<'tcx>], Ty<'tcx>>>(self, iter: I) -> I::Output { iter.intern_with(|ts| self.mk_ty(Tuple(self.intern_type_list(&ts)))) } #[inline] pub fn mk_unit(self) -> Ty<'tcx> { self.types.unit } #[inline] pub fn mk_diverging_default(self) -> Ty<'tcx> { if self.features().never_type_fallback { self.types.never } else { self.types.unit } } #[inline] pub fn mk_fn_def(self, def_id: DefId, substs: SubstsRef<'tcx>) -> Ty<'tcx> { self.mk_ty(FnDef(def_id, substs)) } #[inline] pub fn mk_fn_ptr(self, fty: PolyFnSig<'tcx>) -> Ty<'tcx> { self.mk_ty(FnPtr(fty)) } #[inline] pub fn mk_dynamic( self, obj: &'tcx List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>>, reg: ty::Region<'tcx>, ) -> Ty<'tcx> { self.mk_ty(Dynamic(obj, reg)) } #[inline] pub fn mk_projection(self, item_def_id: DefId, substs: SubstsRef<'tcx>) -> Ty<'tcx> { self.mk_ty(Projection(ProjectionTy { item_def_id, substs })) } #[inline] pub fn mk_closure(self, closure_id: DefId, closure_substs: SubstsRef<'tcx>) -> Ty<'tcx> { self.mk_ty(Closure(closure_id, closure_substs)) } #[inline] pub fn mk_generator( self, id: DefId, generator_substs: SubstsRef<'tcx>, movability: hir::Movability, ) -> Ty<'tcx> { self.mk_ty(Generator(id, generator_substs, movability)) } #[inline] pub fn mk_generator_witness(self, types: ty::Binder<'tcx, &'tcx List<Ty<'tcx>>>) -> Ty<'tcx> { self.mk_ty(GeneratorWitness(types)) } #[inline] pub fn mk_ty_var(self, v: TyVid) -> Ty<'tcx> { self.mk_ty_infer(TyVar(v)) } #[inline] pub fn mk_const_var(self, v: ConstVid<'tcx>, ty: Ty<'tcx>) -> Const<'tcx> { self.mk_const(ty::ConstS { val: ty::ConstKind::Infer(InferConst::Var(v)), ty }) } #[inline] pub fn mk_int_var(self, v: IntVid) -> Ty<'tcx> { self.mk_ty_infer(IntVar(v)) } #[inline] pub fn mk_float_var(self, v: FloatVid) -> Ty<'tcx> { self.mk_ty_infer(FloatVar(v)) } #[inline] pub fn mk_ty_infer(self, it: InferTy) -> Ty<'tcx> { self.mk_ty(Infer(it)) } #[inline] pub fn mk_const_infer(self, ic: InferConst<'tcx>, ty: Ty<'tcx>) -> ty::Const<'tcx> { self.mk_const(ty::ConstS { val: ty::ConstKind::Infer(ic), ty }) } #[inline] pub fn mk_ty_param(self, index: u32, name: Symbol) -> Ty<'tcx> { self.mk_ty(Param(ParamTy { index, name })) } #[inline] pub fn mk_const_param(self, index: u32, name: Symbol, ty: Ty<'tcx>) -> Const<'tcx> { self.mk_const(ty::ConstS { val: ty::ConstKind::Param(ParamConst { index, name }), ty }) } pub fn mk_param_from_def(self, param: &ty::GenericParamDef) -> GenericArg<'tcx> { match param.kind { GenericParamDefKind::Lifetime => { self.mk_region(ty::ReEarlyBound(param.to_early_bound_region_data())).into() } GenericParamDefKind::Type { .. } => self.mk_ty_param(param.index, param.name).into(), GenericParamDefKind::Const { .. } => { self.mk_const_param(param.index, param.name, self.type_of(param.def_id)).into() } } } #[inline] pub fn mk_opaque(self, def_id: DefId, substs: SubstsRef<'tcx>) -> Ty<'tcx> { self.mk_ty(Opaque(def_id, substs)) } pub fn mk_place_field(self, place: Place<'tcx>, f: Field, ty: Ty<'tcx>) -> Place<'tcx> { self.mk_place_elem(place, PlaceElem::Field(f, ty)) } pub fn mk_place_deref(self, place: Place<'tcx>) -> Place<'tcx> { self.mk_place_elem(place, PlaceElem::Deref) } pub fn mk_place_downcast( self, place: Place<'tcx>, adt_def: AdtDef<'tcx>, variant_index: VariantIdx, ) -> Place<'tcx> { self.mk_place_elem( place, PlaceElem::Downcast(Some(adt_def.variant(variant_index).name), variant_index), ) } pub fn mk_place_downcast_unnamed( self, place: Place<'tcx>, variant_index: VariantIdx, ) -> Place<'tcx> { self.mk_place_elem(place, PlaceElem::Downcast(None, variant_index)) } pub fn mk_place_index(self, place: Place<'tcx>, index: Local) -> Place<'tcx> { self.mk_place_elem(place, PlaceElem::Index(index)) } /// This method copies `Place`'s projection, add an element and reintern it. Should not be used /// to build a full `Place` it's just a convenient way to grab a projection and modify it in /// flight. pub fn mk_place_elem(self, place: Place<'tcx>, elem: PlaceElem<'tcx>) -> Place<'tcx> { let mut projection = place.projection.to_vec(); projection.push(elem); Place { local: place.local, projection: self.intern_place_elems(&projection) } } pub fn intern_poly_existential_predicates( self, eps: &[ty::Binder<'tcx, ExistentialPredicate<'tcx>>], ) -> &'tcx List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>> { assert!(!eps.is_empty()); assert!( eps.array_windows() .all(|[a, b]| a.skip_binder().stable_cmp(self, &b.skip_binder()) != Ordering::Greater) ); self._intern_poly_existential_predicates(eps) } pub fn intern_predicates(self, preds: &[Predicate<'tcx>]) -> &'tcx List<Predicate<'tcx>> { // FIXME consider asking the input slice to be sorted to avoid // re-interning permutations, in which case that would be asserted // here. if preds.is_empty() { // The macro-generated method below asserts we don't intern an empty slice. List::empty() } else { self._intern_predicates(preds) } } pub fn intern_type_list(self, ts: &[Ty<'tcx>]) -> &'tcx List<Ty<'tcx>> { if ts.is_empty() { List::empty() } else { // Actually intern type lists as lists of `GenericArg`s. // // Transmuting from `Ty<'tcx>` to `GenericArg<'tcx>` is sound // as explained in ty_slice_as_generic_arg`. With this, // we guarantee that even when transmuting between `List<Ty<'tcx>>` // and `List<GenericArg<'tcx>>`, the uniqueness requirement for // lists is upheld. let substs = self._intern_substs(ty::subst::ty_slice_as_generic_args(ts)); substs.try_as_type_list().unwrap() } } pub fn intern_substs(self, ts: &[GenericArg<'tcx>]) -> &'tcx List<GenericArg<'tcx>> { if ts.is_empty() { List::empty() } else { self._intern_substs(ts) } } pub fn intern_projs(self, ps: &[ProjectionKind]) -> &'tcx List<ProjectionKind> { if ps.is_empty() { List::empty() } else { self._intern_projs(ps) } } pub fn intern_place_elems(self, ts: &[PlaceElem<'tcx>]) -> &'tcx List<PlaceElem<'tcx>> { if ts.is_empty() { List::empty() } else { self._intern_place_elems(ts) } } pub fn intern_canonical_var_infos( self, ts: &[CanonicalVarInfo<'tcx>], ) -> CanonicalVarInfos<'tcx> { if ts.is_empty() { List::empty() } else { self._intern_canonical_var_infos(ts) } } pub fn intern_bound_variable_kinds( self, ts: &[ty::BoundVariableKind], ) -> &'tcx List<ty::BoundVariableKind> { if ts.is_empty() { List::empty() } else { self._intern_bound_variable_kinds(ts) } } pub fn mk_fn_sig<I>( self, inputs: I, output: I::Item, c_variadic: bool, unsafety: hir::Unsafety, abi: abi::Abi, ) -> <I::Item as InternIteratorElement<Ty<'tcx>, ty::FnSig<'tcx>>>::Output where I: Iterator<Item: InternIteratorElement<Ty<'tcx>, ty::FnSig<'tcx>>>, { inputs.chain(iter::once(output)).intern_with(|xs| ty::FnSig { inputs_and_output: self.intern_type_list(xs), c_variadic, unsafety, abi, }) } pub fn mk_poly_existential_predicates< I: InternAs< [ty::Binder<'tcx, ExistentialPredicate<'tcx>>], &'tcx List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>>, >, >( self, iter: I, ) -> I::Output { iter.intern_with(|xs| self.intern_poly_existential_predicates(xs)) } pub fn mk_predicates<I: InternAs<[Predicate<'tcx>], &'tcx List<Predicate<'tcx>>>>( self, iter: I, ) -> I::Output { iter.intern_with(|xs| self.intern_predicates(xs)) } pub fn mk_type_list<I: InternAs<[Ty<'tcx>], &'tcx List<Ty<'tcx>>>>(self, iter: I) -> I::Output { iter.intern_with(|xs| self.intern_type_list(xs)) } pub fn mk_substs<I: InternAs<[GenericArg<'tcx>], &'tcx List<GenericArg<'tcx>>>>( self, iter: I, ) -> I::Output { iter.intern_with(|xs| self.intern_substs(xs)) } pub fn mk_place_elems<I: InternAs<[PlaceElem<'tcx>], &'tcx List<PlaceElem<'tcx>>>>( self, iter: I, ) -> I::Output { iter.intern_with(|xs| self.intern_place_elems(xs)) } pub fn mk_substs_trait(self, self_ty: Ty<'tcx>, rest: &[GenericArg<'tcx>]) -> SubstsRef<'tcx> { self.mk_substs(iter::once(self_ty.into()).chain(rest.iter().cloned())) } pub fn mk_bound_variable_kinds< I: InternAs<[ty::BoundVariableKind], &'tcx List<ty::BoundVariableKind>>, >( self, iter: I, ) -> I::Output { iter.intern_with(|xs| self.intern_bound_variable_kinds(xs)) } /// Walks upwards from `id` to find a node which might change lint levels with attributes. /// It stops at `bound` and just returns it if reached. pub fn maybe_lint_level_root_bounded(self, mut id: HirId, bound: HirId) -> HirId { let hir = self.hir(); loop { if id == bound { return bound; } if hir.attrs(id).iter().any(|attr| Level::from_attr(attr).is_some()) { return id; } let next = hir.get_parent_node(id); if next == id { bug!("lint traversal reached the root of the crate"); } id = next; } } pub fn lint_level_at_node( self, lint: &'static Lint, mut id: hir::HirId, ) -> (Level, LintLevelSource) { let sets = self.lint_levels(()); loop { if let Some(pair) = sets.level_and_source(lint, id, self.sess) { return pair; } let next = self.hir().get_parent_node(id); if next == id { bug!("lint traversal reached the root of the crate"); } id = next; } } pub fn struct_span_lint_hir( self, lint: &'static Lint, hir_id: HirId, span: impl Into<MultiSpan>, decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>), ) { let (level, src) = self.lint_level_at_node(lint, hir_id); struct_lint_level(self.sess, lint, level, src, Some(span.into()), decorate); } pub fn struct_lint_node( self, lint: &'static Lint, id: HirId, decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>), ) { let (level, src) = self.lint_level_at_node(lint, id); struct_lint_level(self.sess, lint, level, src, None, decorate); } pub fn in_scope_traits(self, id: HirId) -> Option<&'tcx [TraitCandidate]> { let map = self.in_scope_traits_map(id.owner)?; let candidates = map.get(&id.local_id)?; Some(&*candidates) } pub fn named_region(self, id: HirId) -> Option<resolve_lifetime::Region> { debug!(?id, "named_region"); self.named_region_map(id.owner).and_then(|map| map.get(&id.local_id).cloned()) } pub fn is_late_bound(self, id: HirId) -> bool { self.is_late_bound_map(id.owner).map_or(false, |set| { let def_id = self.hir().local_def_id(id); set.contains(&def_id) }) } pub fn late_bound_vars(self, id: HirId) -> &'tcx List<ty::BoundVariableKind> { self.mk_bound_variable_kinds( self.late_bound_vars_map(id.owner) .and_then(|map| map.get(&id.local_id).cloned()) .unwrap_or_else(|| { bug!("No bound vars found for {:?} ({:?})", self.hir().node_to_string(id), id) }) .iter(), ) } /// Whether the `def_id` counts as const fn in the current crate, considering all active /// feature gates pub fn is_const_fn(self, def_id: DefId) -> bool { if self.is_const_fn_raw(def_id) { match self.lookup_const_stability(def_id) { Some(stability) if stability.is_const_unstable() => { // has a `rustc_const_unstable` attribute, check whether the user enabled the // corresponding feature gate. self.features() .declared_lib_features .iter() .any(|&(sym, _)| sym == stability.feature) } // functions without const stability are either stable user written // const fn or the user is using feature gates and we thus don't // care what they do _ => true, } } else { false } } /// Whether the trait impl is marked const. This does not consider stability or feature gates. pub fn is_const_trait_impl_raw(self, def_id: DefId) -> bool { let Some(local_def_id) = def_id.as_local() else { return false }; let hir_id = self.local_def_id_to_hir_id(local_def_id); let node = self.hir().get(hir_id); matches!( node, hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const, .. }), .. }) ) } } impl<'tcx> TyCtxtAt<'tcx> { /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` to ensure it gets used. #[track_caller] pub fn ty_error(self) -> Ty<'tcx> { self.tcx.ty_error_with_message(self.span, "TyKind::Error constructed but no error reported") } /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` with the given `msg to /// ensure it gets used. #[track_caller] pub fn ty_error_with_message(self, msg: &str) -> Ty<'tcx> { self.tcx.ty_error_with_message(self.span, msg) } } // We are comparing types with different invariant lifetimes, so `ptr::eq` // won't work for us. fn ptr_eq<T, U>(t: *const T, u: *const U) -> bool { t as *const () == u as *const () } pub fn provide(providers: &mut ty::query::Providers) { providers.resolutions = |tcx, ()| &tcx.untracked_resolutions; providers.module_reexports = |tcx, id| tcx.resolutions(()).reexport_map.get(&id).map(|v| &v[..]); providers.crate_name = |tcx, id| { assert_eq!(id, LOCAL_CRATE); tcx.crate_name }; providers.maybe_unused_trait_imports = |tcx, ()| &tcx.resolutions(()).maybe_unused_trait_imports; providers.maybe_unused_extern_crates = |tcx, ()| &tcx.resolutions(()).maybe_unused_extern_crates[..]; providers.names_imported_by_glob_use = |tcx, id| { tcx.arena.alloc(tcx.resolutions(()).glob_map.get(&id).cloned().unwrap_or_default()) }; providers.extern_mod_stmt_cnum = |tcx, id| tcx.resolutions(()).extern_crate_map.get(&id).cloned(); providers.output_filenames = |tcx, ()| &tcx.output_filenames; providers.features_query = |tcx, ()| tcx.sess.features_untracked(); providers.is_panic_runtime = |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); tcx.sess.contains_name(tcx.hir().krate_attrs(), sym::panic_runtime) }; providers.is_compiler_builtins = |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); tcx.sess.contains_name(tcx.hir().krate_attrs(), sym::compiler_builtins) }; providers.has_panic_handler = |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); // We want to check if the panic handler was defined in this crate tcx.lang_items().panic_impl().map_or(false, |did| did.is_local()) }; }
37.686458
134
0.594848
e6bbe62f66bec1a106d2e283ab1922b60de616e4
10,941
#![allow(non_snake_case, non_upper_case_globals)] #![allow(non_camel_case_types)] //! KPP Registers use crate::RWRegister; #[cfg(not(feature = "nosync"))] use core::marker::PhantomData; /// Keypad Control Register pub mod KPCR { /// Keypad Row Enable pub mod KRE { /// Offset (0 bits) pub const offset: u16 = 0; /// Mask (8 bits: 0xff << 0) pub const mask: u16 = 0xff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b00000000: Row is not included in the keypad key press detect. pub const KRE_0: u16 = 0b00000000; /// 0b00000001: Row is included in the keypad key press detect. pub const KRE_1: u16 = 0b00000001; } } /// Keypad Column Strobe Open-Drain Enable pub mod KCO { /// Offset (8 bits) pub const offset: u16 = 8; /// Mask (8 bits: 0xff << 8) pub const mask: u16 = 0xff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b00000000: Column strobe output is totem pole drive. pub const TOTEM_POLE: u16 = 0b00000000; /// 0b00000001: Column strobe output is open drain. pub const OPEN_DRAIN: u16 = 0b00000001; } } } /// Keypad Status Register pub mod KPSR { /// Keypad Key Depress pub mod KPKD { /// Offset (0 bits) pub const offset: u16 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u16 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: No key presses detected pub const KPKD_0: u16 = 0b0; /// 0b1: A key has been depressed pub const KPKD_1: u16 = 0b1; } } /// Keypad Key Release pub mod KPKR { /// Offset (1 bits) pub const offset: u16 = 1; /// Mask (1 bit: 1 << 1) pub const mask: u16 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: No key release detected pub const KPKR_0: u16 = 0b0; /// 0b1: All keys have been released pub const KPKR_1: u16 = 0b1; } } /// Key Depress Synchronizer Clear pub mod KDSC { /// Offset (2 bits) pub const offset: u16 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u16 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: No effect pub const KDSC_0: u16 = 0b0; /// 0b1: Set bits that clear the keypad depress synchronizer chain pub const KDSC_1: u16 = 0b1; } } /// Key Release Synchronizer Set pub mod KRSS { /// Offset (3 bits) pub const offset: u16 = 3; /// Mask (1 bit: 1 << 3) pub const mask: u16 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: No effect pub const KRSS_0: u16 = 0b0; /// 0b1: Set bits which sets keypad release synchronizer chain pub const KRSS_1: u16 = 0b1; } } /// Keypad Key Depress Interrupt Enable pub mod KDIE { /// Offset (8 bits) pub const offset: u16 = 8; /// Mask (1 bit: 1 << 8) pub const mask: u16 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: No interrupt request is generated when KPKD is set. pub const KDIE_0: u16 = 0b0; /// 0b1: An interrupt request is generated when KPKD is set. pub const KDIE_1: u16 = 0b1; } } /// Keypad Release Interrupt Enable pub mod KRIE { /// Offset (9 bits) pub const offset: u16 = 9; /// Mask (1 bit: 1 << 9) pub const mask: u16 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: No interrupt request is generated when KPKR is set. pub const KRIE_0: u16 = 0b0; /// 0b1: An interrupt request is generated when KPKR is set. pub const KRIE_1: u16 = 0b1; } } } /// Keypad Data Direction Register pub mod KDDR { /// Keypad Row Data Direction pub mod KRDD { /// Offset (0 bits) pub const offset: u16 = 0; /// Mask (8 bits: 0xff << 0) pub const mask: u16 = 0xff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b00000000: ROWn pin configured as an input. pub const INPUT: u16 = 0b00000000; /// 0b00000001: ROWn pin configured as an output. pub const OUTPUT: u16 = 0b00000001; } } /// Keypad Column Data Direction Register pub mod KCDD { /// Offset (8 bits) pub const offset: u16 = 8; /// Mask (8 bits: 0xff << 8) pub const mask: u16 = 0xff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b00000000: COLn pin is configured as an input. pub const INPUT: u16 = 0b00000000; /// 0b00000001: COLn pin is configured as an output. pub const OUTPUT: u16 = 0b00000001; } } } /// Keypad Data Register pub mod KPDR { /// Keypad Row Data pub mod KRD { /// Offset (0 bits) pub const offset: u16 = 0; /// Mask (8 bits: 0xff << 0) pub const mask: u16 = 0xff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Keypad Column Data pub mod KCD { /// Offset (8 bits) pub const offset: u16 = 8; /// Mask (8 bits: 0xff << 8) pub const mask: u16 = 0xff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } #[repr(C)] pub struct RegisterBlock { /// Keypad Control Register pub KPCR: RWRegister<u16>, /// Keypad Status Register pub KPSR: RWRegister<u16>, /// Keypad Data Direction Register pub KDDR: RWRegister<u16>, /// Keypad Data Register pub KPDR: RWRegister<u16>, } pub struct ResetValues { pub KPCR: u16, pub KPSR: u16, pub KDDR: u16, pub KPDR: u16, } #[cfg(not(feature = "nosync"))] pub struct Instance { pub(crate) addr: u32, pub(crate) _marker: PhantomData<*const RegisterBlock>, } #[cfg(not(feature = "nosync"))] impl ::core::ops::Deref for Instance { type Target = RegisterBlock; #[inline(always)] fn deref(&self) -> &RegisterBlock { unsafe { &*(self.addr as *const _) } } } unsafe impl Send for Instance {} /// Access functions for the KPP peripheral instance pub mod KPP { use super::ResetValues; #[cfg(not(feature = "nosync"))] use super::Instance; #[cfg(not(feature = "nosync"))] const INSTANCE: Instance = Instance { addr: 0x401fc000, _marker: ::core::marker::PhantomData, }; /// Reset values for each field in KPP pub const reset: ResetValues = ResetValues { KPCR: 0x00000000, KPSR: 0x00000400, KDDR: 0x00000000, KPDR: 0x00000000, }; #[cfg(not(feature = "nosync"))] #[allow(renamed_and_removed_lints)] #[allow(private_no_mangle_statics)] #[no_mangle] static mut KPP_TAKEN: bool = false; /// Safe access to KPP /// /// This function returns `Some(Instance)` if this instance is not /// currently taken, and `None` if it is. This ensures that if you /// do get `Some(Instance)`, you are ensured unique access to /// the peripheral and there cannot be data races (unless other /// code uses `unsafe`, of course). You can then pass the /// `Instance` around to other functions as required. When you're /// done with it, you can call `release(instance)` to return it. /// /// `Instance` itself dereferences to a `RegisterBlock`, which /// provides access to the peripheral's registers. #[cfg(not(feature = "nosync"))] #[inline] pub fn take() -> Option<Instance> { external_cortex_m::interrupt::free(|_| unsafe { if KPP_TAKEN { None } else { KPP_TAKEN = true; Some(INSTANCE) } }) } /// Release exclusive access to KPP /// /// This function allows you to return an `Instance` so that it /// is available to `take()` again. This function will panic if /// you return a different `Instance` or if this instance is not /// already taken. #[cfg(not(feature = "nosync"))] #[inline] pub fn release(inst: Instance) { external_cortex_m::interrupt::free(|_| unsafe { if KPP_TAKEN && inst.addr == INSTANCE.addr { KPP_TAKEN = false; } else { panic!("Released a peripheral which was not taken"); } }); } /// Unsafely steal KPP /// /// This function is similar to take() but forcibly takes the /// Instance, marking it as taken irregardless of its previous /// state. #[cfg(not(feature = "nosync"))] #[inline] pub unsafe fn steal() -> Instance { KPP_TAKEN = true; INSTANCE } } /// Raw pointer to KPP /// /// Dereferencing this is unsafe because you are not ensured unique /// access to the peripheral, so you may encounter data races with /// other users of this peripheral. It is up to you to ensure you /// will not cause data races. /// /// This constant is provided for ease of use in unsafe code: you can /// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`. pub const KPP: *const RegisterBlock = 0x401fc000 as *const _;
27.982097
79
0.538708
1199c97a2d95ec390b0b832e803308e5fff2e53d
109,716
#[doc = "Register `PA_INTTYPE` reader"] pub struct R(crate::R<PA_INTTYPE_SPEC>); impl core::ops::Deref for R { type Target = crate::R<PA_INTTYPE_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<PA_INTTYPE_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<PA_INTTYPE_SPEC>) -> Self { R(reader) } } #[doc = "Register `PA_INTTYPE` writer"] pub struct W(crate::W<PA_INTTYPE_SPEC>); impl core::ops::Deref for W { type Target = crate::W<PA_INTTYPE_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<PA_INTTYPE_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<PA_INTTYPE_SPEC>) -> Self { W(writer) } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE0_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE0_A> for bool { #[inline(always)] fn from(variant: TYPE0_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE0` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE0_R(crate::FieldReader<bool, TYPE0_A>); impl TYPE0_R { pub(crate) fn new(bits: bool) -> Self { TYPE0_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE0_A { match self.bits { false => TYPE0_A::_0, true => TYPE0_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE0_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE0_A::_1 } } impl core::ops::Deref for TYPE0_R { type Target = crate::FieldReader<bool, TYPE0_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE0` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE0_W<'a> { w: &'a mut W, } impl<'a> TYPE0_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE0_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE0_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE0_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE1_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE1_A> for bool { #[inline(always)] fn from(variant: TYPE1_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE1` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE1_R(crate::FieldReader<bool, TYPE1_A>); impl TYPE1_R { pub(crate) fn new(bits: bool) -> Self { TYPE1_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE1_A { match self.bits { false => TYPE1_A::_0, true => TYPE1_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE1_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE1_A::_1 } } impl core::ops::Deref for TYPE1_R { type Target = crate::FieldReader<bool, TYPE1_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE1` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE1_W<'a> { w: &'a mut W, } impl<'a> TYPE1_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE1_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE1_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE1_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE2_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE2_A> for bool { #[inline(always)] fn from(variant: TYPE2_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE2` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE2_R(crate::FieldReader<bool, TYPE2_A>); impl TYPE2_R { pub(crate) fn new(bits: bool) -> Self { TYPE2_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE2_A { match self.bits { false => TYPE2_A::_0, true => TYPE2_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE2_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE2_A::_1 } } impl core::ops::Deref for TYPE2_R { type Target = crate::FieldReader<bool, TYPE2_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE2` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE2_W<'a> { w: &'a mut W, } impl<'a> TYPE2_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE2_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE2_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE2_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE3_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE3_A> for bool { #[inline(always)] fn from(variant: TYPE3_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE3` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE3_R(crate::FieldReader<bool, TYPE3_A>); impl TYPE3_R { pub(crate) fn new(bits: bool) -> Self { TYPE3_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE3_A { match self.bits { false => TYPE3_A::_0, true => TYPE3_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE3_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE3_A::_1 } } impl core::ops::Deref for TYPE3_R { type Target = crate::FieldReader<bool, TYPE3_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE3` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE3_W<'a> { w: &'a mut W, } impl<'a> TYPE3_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE3_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE3_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE3_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE4_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE4_A> for bool { #[inline(always)] fn from(variant: TYPE4_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE4` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE4_R(crate::FieldReader<bool, TYPE4_A>); impl TYPE4_R { pub(crate) fn new(bits: bool) -> Self { TYPE4_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE4_A { match self.bits { false => TYPE4_A::_0, true => TYPE4_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE4_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE4_A::_1 } } impl core::ops::Deref for TYPE4_R { type Target = crate::FieldReader<bool, TYPE4_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE4` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE4_W<'a> { w: &'a mut W, } impl<'a> TYPE4_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE4_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE4_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE4_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE5_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE5_A> for bool { #[inline(always)] fn from(variant: TYPE5_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE5` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE5_R(crate::FieldReader<bool, TYPE5_A>); impl TYPE5_R { pub(crate) fn new(bits: bool) -> Self { TYPE5_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE5_A { match self.bits { false => TYPE5_A::_0, true => TYPE5_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE5_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE5_A::_1 } } impl core::ops::Deref for TYPE5_R { type Target = crate::FieldReader<bool, TYPE5_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE5` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE5_W<'a> { w: &'a mut W, } impl<'a> TYPE5_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE5_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE5_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE5_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u32 & 0x01) << 5); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE6_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE6_A> for bool { #[inline(always)] fn from(variant: TYPE6_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE6` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE6_R(crate::FieldReader<bool, TYPE6_A>); impl TYPE6_R { pub(crate) fn new(bits: bool) -> Self { TYPE6_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE6_A { match self.bits { false => TYPE6_A::_0, true => TYPE6_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE6_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE6_A::_1 } } impl core::ops::Deref for TYPE6_R { type Target = crate::FieldReader<bool, TYPE6_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE6` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE6_W<'a> { w: &'a mut W, } impl<'a> TYPE6_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE6_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE6_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE6_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE7_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE7_A> for bool { #[inline(always)] fn from(variant: TYPE7_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE7` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE7_R(crate::FieldReader<bool, TYPE7_A>); impl TYPE7_R { pub(crate) fn new(bits: bool) -> Self { TYPE7_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE7_A { match self.bits { false => TYPE7_A::_0, true => TYPE7_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE7_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE7_A::_1 } } impl core::ops::Deref for TYPE7_R { type Target = crate::FieldReader<bool, TYPE7_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE7` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE7_W<'a> { w: &'a mut W, } impl<'a> TYPE7_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE7_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE7_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE7_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u32 & 0x01) << 7); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE8_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE8_A> for bool { #[inline(always)] fn from(variant: TYPE8_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE8` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE8_R(crate::FieldReader<bool, TYPE8_A>); impl TYPE8_R { pub(crate) fn new(bits: bool) -> Self { TYPE8_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE8_A { match self.bits { false => TYPE8_A::_0, true => TYPE8_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE8_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE8_A::_1 } } impl core::ops::Deref for TYPE8_R { type Target = crate::FieldReader<bool, TYPE8_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE8` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE8_W<'a> { w: &'a mut W, } impl<'a> TYPE8_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE8_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE8_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE8_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | ((value as u32 & 0x01) << 8); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE9_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE9_A> for bool { #[inline(always)] fn from(variant: TYPE9_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE9` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE9_R(crate::FieldReader<bool, TYPE9_A>); impl TYPE9_R { pub(crate) fn new(bits: bool) -> Self { TYPE9_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE9_A { match self.bits { false => TYPE9_A::_0, true => TYPE9_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE9_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE9_A::_1 } } impl core::ops::Deref for TYPE9_R { type Target = crate::FieldReader<bool, TYPE9_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE9` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE9_W<'a> { w: &'a mut W, } impl<'a> TYPE9_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE9_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE9_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE9_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 9)) | ((value as u32 & 0x01) << 9); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE10_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE10_A> for bool { #[inline(always)] fn from(variant: TYPE10_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE10` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE10_R(crate::FieldReader<bool, TYPE10_A>); impl TYPE10_R { pub(crate) fn new(bits: bool) -> Self { TYPE10_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE10_A { match self.bits { false => TYPE10_A::_0, true => TYPE10_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE10_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE10_A::_1 } } impl core::ops::Deref for TYPE10_R { type Target = crate::FieldReader<bool, TYPE10_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE10` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE10_W<'a> { w: &'a mut W, } impl<'a> TYPE10_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE10_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE10_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE10_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 10)) | ((value as u32 & 0x01) << 10); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE11_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE11_A> for bool { #[inline(always)] fn from(variant: TYPE11_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE11` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE11_R(crate::FieldReader<bool, TYPE11_A>); impl TYPE11_R { pub(crate) fn new(bits: bool) -> Self { TYPE11_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE11_A { match self.bits { false => TYPE11_A::_0, true => TYPE11_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE11_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE11_A::_1 } } impl core::ops::Deref for TYPE11_R { type Target = crate::FieldReader<bool, TYPE11_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE11` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE11_W<'a> { w: &'a mut W, } impl<'a> TYPE11_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE11_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE11_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE11_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 11)) | ((value as u32 & 0x01) << 11); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE12_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE12_A> for bool { #[inline(always)] fn from(variant: TYPE12_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE12` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE12_R(crate::FieldReader<bool, TYPE12_A>); impl TYPE12_R { pub(crate) fn new(bits: bool) -> Self { TYPE12_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE12_A { match self.bits { false => TYPE12_A::_0, true => TYPE12_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE12_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE12_A::_1 } } impl core::ops::Deref for TYPE12_R { type Target = crate::FieldReader<bool, TYPE12_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE12` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE12_W<'a> { w: &'a mut W, } impl<'a> TYPE12_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE12_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE12_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE12_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | ((value as u32 & 0x01) << 12); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE13_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE13_A> for bool { #[inline(always)] fn from(variant: TYPE13_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE13` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE13_R(crate::FieldReader<bool, TYPE13_A>); impl TYPE13_R { pub(crate) fn new(bits: bool) -> Self { TYPE13_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE13_A { match self.bits { false => TYPE13_A::_0, true => TYPE13_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE13_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE13_A::_1 } } impl core::ops::Deref for TYPE13_R { type Target = crate::FieldReader<bool, TYPE13_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE13` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE13_W<'a> { w: &'a mut W, } impl<'a> TYPE13_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE13_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE13_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE13_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 13)) | ((value as u32 & 0x01) << 13); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE14_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE14_A> for bool { #[inline(always)] fn from(variant: TYPE14_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE14` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE14_R(crate::FieldReader<bool, TYPE14_A>); impl TYPE14_R { pub(crate) fn new(bits: bool) -> Self { TYPE14_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE14_A { match self.bits { false => TYPE14_A::_0, true => TYPE14_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE14_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE14_A::_1 } } impl core::ops::Deref for TYPE14_R { type Target = crate::FieldReader<bool, TYPE14_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE14` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE14_W<'a> { w: &'a mut W, } impl<'a> TYPE14_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE14_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE14_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE14_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 14)) | ((value as u32 & 0x01) << 14); self.w } } #[doc = "Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TYPE15_A { #[doc = "0: Edge trigger interrupt"] _0 = 0, #[doc = "1: Level trigger interrupt"] _1 = 1, } impl From<TYPE15_A> for bool { #[inline(always)] fn from(variant: TYPE15_A) -> Self { variant as u8 != 0 } } #[doc = "Field `TYPE15` reader - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE15_R(crate::FieldReader<bool, TYPE15_A>); impl TYPE15_R { pub(crate) fn new(bits: bool) -> Self { TYPE15_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TYPE15_A { match self.bits { false => TYPE15_A::_0, true => TYPE15_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == TYPE15_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == TYPE15_A::_1 } } impl core::ops::Deref for TYPE15_R { type Target = crate::FieldReader<bool, TYPE15_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TYPE15` writer - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control\nTYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt.\nIf the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur.\nThe de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct TYPE15_W<'a> { w: &'a mut W, } impl<'a> TYPE15_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TYPE15_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Edge trigger interrupt"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(TYPE15_A::_0) } #[doc = "Level trigger interrupt"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(TYPE15_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 15)) | ((value as u32 & 0x01) << 15); self.w } } impl R { #[doc = "Bit 0 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type0(&self) -> TYPE0_R { TYPE0_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type1(&self) -> TYPE1_R { TYPE1_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type2(&self) -> TYPE2_R { TYPE2_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type3(&self) -> TYPE3_R { TYPE3_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type4(&self) -> TYPE4_R { TYPE4_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type5(&self) -> TYPE5_R { TYPE5_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type6(&self) -> TYPE6_R { TYPE6_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 7 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type7(&self) -> TYPE7_R { TYPE7_R::new(((self.bits >> 7) & 0x01) != 0) } #[doc = "Bit 8 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type8(&self) -> TYPE8_R { TYPE8_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 9 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type9(&self) -> TYPE9_R { TYPE9_R::new(((self.bits >> 9) & 0x01) != 0) } #[doc = "Bit 10 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type10(&self) -> TYPE10_R { TYPE10_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bit 11 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type11(&self) -> TYPE11_R { TYPE11_R::new(((self.bits >> 11) & 0x01) != 0) } #[doc = "Bit 12 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type12(&self) -> TYPE12_R { TYPE12_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bit 13 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type13(&self) -> TYPE13_R { TYPE13_R::new(((self.bits >> 13) & 0x01) != 0) } #[doc = "Bit 14 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type14(&self) -> TYPE14_R { TYPE14_R::new(((self.bits >> 14) & 0x01) != 0) } #[doc = "Bit 15 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type15(&self) -> TYPE15_R { TYPE15_R::new(((self.bits >> 15) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type0(&mut self) -> TYPE0_W { TYPE0_W { w: self } } #[doc = "Bit 1 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type1(&mut self) -> TYPE1_W { TYPE1_W { w: self } } #[doc = "Bit 2 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type2(&mut self) -> TYPE2_W { TYPE2_W { w: self } } #[doc = "Bit 3 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type3(&mut self) -> TYPE3_W { TYPE3_W { w: self } } #[doc = "Bit 4 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type4(&mut self) -> TYPE4_W { TYPE4_W { w: self } } #[doc = "Bit 5 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type5(&mut self) -> TYPE5_W { TYPE5_W { w: self } } #[doc = "Bit 6 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type6(&mut self) -> TYPE6_W { TYPE6_W { w: self } } #[doc = "Bit 7 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type7(&mut self) -> TYPE7_W { TYPE7_W { w: self } } #[doc = "Bit 8 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type8(&mut self) -> TYPE8_W { TYPE8_W { w: self } } #[doc = "Bit 9 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type9(&mut self) -> TYPE9_W { TYPE9_W { w: self } } #[doc = "Bit 10 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type10(&mut self) -> TYPE10_W { TYPE10_W { w: self } } #[doc = "Bit 11 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type11(&mut self) -> TYPE11_W { TYPE11_W { w: self } } #[doc = "Bit 12 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type12(&mut self) -> TYPE12_W { TYPE12_W { w: self } } #[doc = "Bit 13 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type13(&mut self) -> TYPE13_W { TYPE13_W { w: self } } #[doc = "Bit 14 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type14(&mut self) -> TYPE14_W { TYPE14_W { w: self } } #[doc = "Bit 15 - Port A-H Pin\\[n\\] Edge or Level Detection Interrupt Trigger Type Control TYPE (Px_INTTYPE\\[n\\]) bit is used to control the triggered interrupt is by level trigger or by edge trigger. If the interrupt is by edge trigger, the trigger source can be controlled by de-bounce. If the interrupt is by level trigger, the input source is sampled by one HCLK clock and generates the interrupt. If the pin is set as the level trigger interrupt, only one level can be set on the registers RHIEN (Px_INTEN\\[n+16\\])/FLIEN (Px_INTEN\\[n\\]). If both levels to trigger interrupt are set, the setting is ignored and no interrupt will occur. The de-bounce function is valid only for edge triggered interrupt. If the interrupt mode is level triggered, the de-bounce enable bit is ignored. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn type15(&mut self) -> TYPE15_W { TYPE15_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "PA Interrupt Trigger Type Control\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pa_inttype](index.html) module"] pub struct PA_INTTYPE_SPEC; impl crate::RegisterSpec for PA_INTTYPE_SPEC { type Ux = u32; } #[doc = "`read()` method returns [pa_inttype::R](R) reader structure"] impl crate::Readable for PA_INTTYPE_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [pa_inttype::W](W) writer structure"] impl crate::Writable for PA_INTTYPE_SPEC { type Writer = W; } #[doc = "`reset()` method sets PA_INTTYPE to value 0"] impl crate::Resettable for PA_INTTYPE_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
67.104587
849
0.678424
aba13943c24fe498249109feb350c311b8ae582c
8,056
// Copyright 2017 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! retry //! //! this is an abstraction over the regular http get request. it allows you to //! have a request retry until it succeeds, with a configurable number of //! of attempts and a backoff strategy. It also takes care of automatically //! deserializing responses and handles headers in a sane way. use std::borrow::Cow; use std::io::Read; use std::time::Duration; use reqwest::header; use reqwest::{self, Method, Request}; use serde; use serde_json; use serde_xml_rs; use crate::errors::*; use crate::retry::Retry; use crate::retry::raw_deserializer; pub trait Deserializer { fn deserialize<T, R>(&self, r: R) -> Result<T> where T: for<'de> serde::Deserialize<'de>, R: Read; fn content_type(&self) -> header::HeaderValue; } #[derive(Debug, Clone, Copy)] pub struct Xml; impl Deserializer for Xml { fn deserialize<T, R>(&self, r: R) -> Result<T> where T: for<'de> serde::Deserialize<'de>, R: Read, { serde_xml_rs::de::from_reader(r).chain_err(|| "failed xml deserialization") } fn content_type(&self) -> header::HeaderValue { header::HeaderValue::from_static("text/xml; charset=utf-8") } } #[derive(Debug, Clone, Copy)] pub struct Json; impl Deserializer for Json { fn deserialize<T, R>(&self, r: R) -> Result<T> where T: serde::de::DeserializeOwned, R: Read, { serde_json::from_reader(r).chain_err(|| "failed json deserialization") } fn content_type(&self) -> header::HeaderValue { header::HeaderValue::from_static("application/json") } } #[derive(Debug, Clone, Copy)] pub struct Raw; impl Deserializer for Raw { fn deserialize<T, R>(&self, r: R) -> Result<T> where T: for<'de> serde::Deserialize<'de>, R: Read, { raw_deserializer::from_reader(r).chain_err(|| "failed raw deserialization") } fn content_type(&self) -> header::HeaderValue { header::HeaderValue::from_static("text/plain; charset=utf-8") } } #[derive(Debug, Clone)] pub struct Client { client: reqwest::Client, headers: header::HeaderMap, retry: Retry, return_on_404: bool, } impl Client { pub fn try_new() -> Result<Self> { let client = reqwest::Client::builder() .build() .chain_err(|| "failed to initialize client")?; Ok(Client { client, headers: header::HeaderMap::new(), retry: Retry::new(), return_on_404: false, }) } pub fn header(mut self, k: header::HeaderName, v: header::HeaderValue) -> Self { self.headers.append(k, v); self } pub fn initial_backoff(mut self, initial_backoff: Duration) -> Self { self.retry = self.retry.initial_backoff(initial_backoff); self } pub fn max_backoff(mut self, max_backoff: Duration) -> Self { self.retry = self.retry.max_backoff(max_backoff); self } /// max_attempts will panic if the argument is greater than 500 pub fn max_attempts(mut self, max_attempts: u32) -> Self { self.retry = self.retry.max_attempts(max_attempts); self } pub fn return_on_404(mut self, return_on_404: bool) -> Self { self.return_on_404 = return_on_404; self } pub fn get<D>(&self, d: D, url: String) -> RequestBuilder<D> where D: Deserializer, { RequestBuilder { url, body: None, d, client: self.client.clone(), headers: self.headers.clone(), retry: self.retry.clone(), return_on_404: self.return_on_404, } } pub fn post<D>(&self, d: D, url: String, body: Option<Cow<str>>) -> RequestBuilder<D> where D: Deserializer, { RequestBuilder { url, body: body.map(Cow::into_owned), d, client: self.client.clone(), headers: self.headers.clone(), retry: self.retry.clone(), return_on_404: self.return_on_404, } } } pub struct RequestBuilder<D> where D: Deserializer, { url: String, body: Option<String>, d: D, client: reqwest::Client, headers: header::HeaderMap, retry: Retry, return_on_404: bool, } impl<D> RequestBuilder<D> where D: Deserializer, { pub fn header(mut self, k: header::HeaderName, v: header::HeaderValue) -> Self { self.headers.append(k, v); self } pub fn send<T>(self) -> Result<Option<T>> where T: for<'de> serde::Deserialize<'de>, { let url = reqwest::Url::parse(self.url.as_str()).chain_err(|| "failed to parse uri")?; let mut req = Request::new(Method::GET, url); req.headers_mut().extend(self.headers.clone().into_iter()); self.retry.clone().retry(|attempt| { info!("Fetching {}: Attempt #{}", req.url(), attempt + 1); self.dispatch_request(&req) }) } pub fn dispatch_post(self) -> Result<reqwest::StatusCode> { let url = reqwest::Url::parse(self.url.as_str()).chain_err(|| "failed to parse uri")?; self.retry.clone().retry(|attempt| { let mut builder = reqwest::Client::new() .post(url.clone()) .headers(self.headers.clone()) .header(header::CONTENT_TYPE, self.d.content_type()); if let Some(ref content) = self.body { builder = builder.body(content.clone()); }; let req = builder .build() .chain_err(|| "failed to build POST request")?; info!("Posting {}: Attempt #{}", req.url(), attempt + 1); let status = self .client .execute(req) .chain_err(|| "failed to POST request")? .status(); if status.is_success() { Ok(status) } else { Err(format!("POST failed: {}", status).into()) } }) } fn dispatch_request<T>(&self, req: &Request) -> Result<Option<T>> where T: for<'de> serde::Deserialize<'de>, { match self.client.execute(clone_request(req)) { Ok(resp) => match (resp.status(), self.return_on_404) { (reqwest::StatusCode::OK, _) => { info!("Fetch successful"); self.d .deserialize(resp) .map(Some) .chain_err(|| "failed to deserialize data") } (reqwest::StatusCode::NOT_FOUND, true) => { info!("Fetch failed with 404: resource not found"); Ok(None) } (s, _) => { info!("Failed to fetch: {}", s); Err(format!("failed to fetch: {}", s).into()) } }, Err(e) => { info!("Failed to fetch: {}", e); Err(Error::with_chain(e, "failed to fetch")) } } } } /// Reqwests Request struct doesn't implement `Clone`, /// so we have to do it here. fn clone_request(req: &Request) -> Request { let mut newreq = Request::new(req.method().clone(), req.url().clone()); newreq .headers_mut() .extend(req.headers().clone().into_iter()); newreq }
29.083032
94
0.561817
8f4d6aa1d61f73515f36f61259f1ee0f414e339d
101
mod entities; pub use entities::BotEntity as BotEntity; pub use entities::UserEntity as UserEntity;
20.2
43
0.80198
abfefaed099507e56c59b66573d58b1bcafb8e93
2,527
// Copyright 2018 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use criterion::{AxisScale, Bencher, Criterion, ParameterizedBenchmark, PlotConfiguration, Throughput}; use exonum::crypto::{gen_keypair, hash, sign, verify}; use num::pow::pow; fn bench_sign(b: &mut Bencher, &count: &usize) { let (_, secret_key) = gen_keypair(); let data = (0..count).map(|x| (x % 255) as u8).collect::<Vec<u8>>(); b.iter(|| sign(&data, &secret_key)) } fn bench_verify(b: &mut Bencher, &count: &usize) { let (public_key, secret_key) = gen_keypair(); let data = (0..count).map(|x| (x % 255) as u8).collect::<Vec<u8>>(); let signature = sign(&data, &secret_key); b.iter(|| verify(&signature, &data, &public_key)) } fn bench_hash(b: &mut Bencher, &count: &usize) { let data = (0..count).map(|x| (x % 255) as u8).collect::<Vec<u8>>(); b.iter(|| hash(&data)) } pub fn bench_crypto(c: &mut Criterion) { ::exonum::crypto::init(); // Testing crypto functions with different data sizes. // // 2^6 = 64 - is relatively small message, and our starting test point. // 2^16 = 65536 - is relatively big message, and our end point. c.bench( "hash", ParameterizedBenchmark::new("hash", bench_hash, (6..16).map(|i| pow(2, i))) .throughput(|s| Throughput::Bytes(*s as u32)) .plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic)), ); c.bench( "sign", ParameterizedBenchmark::new("sign", bench_sign, (6..16).map(|i| pow(2, i))) .throughput(|s| Throughput::Bytes(*s as u32)) .plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic)), ); c.bench( "verify", ParameterizedBenchmark::new("verify", bench_verify, (6..16).map(|i| pow(2, i))) .throughput(|s| Throughput::Bytes(*s as u32)) .plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic)), ); }
38.876923
93
0.643055
5dd60b1754dafe2f9808fab34514dbc00c658427
19,258
/// Location referencing as defined in ISO 14819-3:2013 use crate::location::exchange::{ CountryCode, CountryId, ExtendedCountryCode, LanguageId, LocationCode, NameId, TableCode, }; use crate::Error; use log::{debug, trace, warn}; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::fmt::{Debug, Display, Formatter, Write}; use std::io::ErrorKind; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::Arc; mod exchange; #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] pub enum Category { Area, Linear, Point, } impl From<Category> for char { fn from(category: Category) -> Self { match category { Category::Area => 'A', Category::Linear => 'L', Category::Point => 'P', } } } impl Display for Category { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.write_char((*self).into()) } } impl FromStr for Category { type Err = Error; fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(match s { "A" => Self::Area, "L" => Self::Linear, "P" => Self::Point, _ => return Err("Invalid category".into()), }) } } #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] pub struct Subtype(Category, u8, u8); impl Subtype { pub fn category(self) -> Category { self.0 } pub fn r#type(self) -> u8 { self.1 } pub fn subtype(self) -> u8 { self.2 } } impl Display for Subtype { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "{}{}.{}", self.0, self.1, self.2) } } #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] pub struct QualifiedCode(CountryCode, TableCode, LocationCode); impl QualifiedCode { pub fn new( country: CountryCode, table: TableCode, location: LocationCode, ) -> Result<Self, Error> { if country > 0xF { return Err("Invalid country code".into()); } Ok(Self(country, table, location)) } } impl From<ExtendedCode> for QualifiedCode { fn from(code: ExtendedCode) -> Self { Self(code.1, code.2, code.3) } } #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] pub struct ExtendedCode(ExtendedCountryCode, CountryCode, TableCode, LocationCode); impl ExtendedCode { pub fn new( extended_country: ExtendedCountryCode, country: CountryCode, table: TableCode, location: LocationCode, ) -> Result<Self, Error> { if country > 0xF { return Err("Invalid country code".into()); } Ok(Self(extended_country, country, table, location)) } } pub trait Area { type Error; fn code(&self) -> ExtendedCode; fn subtype(&self) -> Subtype; fn area(&self) -> Result<Option<Self>, Self::Error> where Self: Sized; } pub trait Point { type Area; type Error; fn code(&self) -> ExtendedCode; fn subtype(&self) -> Subtype; fn administrative_area(&self) -> Result<Option<Self::Area>, Self::Error>; fn other_area(&self) -> Result<Option<Self::Area>, Self::Error>; fn latitude(&self) -> f32; fn longitude(&self) -> f32; } #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] pub enum Location<A, P, R, S> { Area(A), Point(P), Route(R), Segment(S), } impl<A, P, R, S> Location<A, P, R, S> { pub fn category(&self) -> Category { match self { Self::Area(_) => Category::Area, Self::Point(_) => Category::Point, Self::Route(_) => Category::Linear, Self::Segment(_) => Category::Linear, } } } impl<A, P, R, S> Location<A, P, R, S> where A: Area, P: Point, { pub fn code(&self) -> ExtendedCode { match self { Self::Area(area) => area.code(), Self::Point(point) => point.code(), _ => todo!(), } } pub fn subtype(&self) -> Subtype { match self { Self::Area(area) => area.subtype(), Self::Point(point) => point.subtype(), _ => todo!(), } } } pub trait Provider<Q, R> { fn query(&self, query: Q) -> R; } #[derive(Clone, Hash, Eq, PartialEq, Debug)] pub struct AreaRow { pub code: ExtendedCode, pub subtype: Subtype, name: NameId, area: Option<LocationCode>, } #[derive(Clone, Hash, Eq, PartialEq, Debug)] pub struct PointRow { pub code: ExtendedCode, pub subtype: Subtype, pub junction: Option<String>, road_name: Option<NameId>, name1: Option<NameId>, name2: Option<NameId>, administrative_area: Option<LocationCode>, other_area: Option<LocationCode>, segment: Option<LocationCode>, road: Option<LocationCode>, pub in_pos: bool, pub in_neg: bool, pub out_pos: bool, pub out_neg: bool, pub present_pos: bool, pub present_neg: bool, pub diversion_pos: Option<String>, pub diversion_neg: Option<String>, pub x: i32, pub y: i32, interrupted_road: Option<LocationCode>, pub urban: bool, pub offset_neg: Option<LocationCode>, pub offset_pos: Option<LocationCode>, } impl PointRow { fn longitude(&self) -> f32 { self.x as f32 / 100000.0 } fn latitude(&self) -> f32 { self.y as f32 / 100000.0 } } pub struct DefaultProvider { tables: HashMap<(u8, u8), Arc<Tables>>, } impl DefaultProvider { pub fn new() -> Self { Self { tables: HashMap::new(), } } pub fn add(&mut self, tables: Tables) -> &mut Self { self.tables .insert((tables.country, tables.table), Arc::new(tables)); self } pub fn update(&mut self, tables: Tables) -> &mut Self { match self.tables.entry((tables.country, tables.table)) { Entry::Occupied(mut entry) => { let existing = entry.get(); if existing.major < tables.major || (existing.major == tables.major && existing.minor < tables.minor) { entry.insert(Arc::new(tables)); } } Entry::Vacant(entry) => { entry.insert(Arc::new(tables)); } } self } pub fn areas(&self) -> impl Iterator<Item = AreaRef> + '_ { self.tables .values() .map(|tables| { tables .administrative_areas .iter() .map(move |(country, area)| { AreaRef(Arc::clone(&tables), country.0, Arc::clone(&area)) }) .chain(tables.other_areas.iter().map(move |(country, area)| { AreaRef(Arc::clone(&tables), country.0, Arc::clone(&area)) })) }) .flatten() } pub fn points(&self) -> impl Iterator<Item = PointRef> + '_ { self.tables .values() .map(|tables| { tables.points.iter().map(move |(country, point)| { PointRef(Arc::clone(&tables), country.0, Arc::clone(&point)) }) }) .flatten() } fn query_by_code(&self, code: QualifiedCode) -> Option<Location<AreaRef, PointRef, (), ()>> { for tables in self.tables.values() { let country = if let Some(country) = tables.country_ids.get(&code.0) { *country } else { continue; }; let key = Tables::into_key(code, country, None); trace!("Search for {:?}", key); if let Some(result) = tables.administrative_areas.get(&key) { return Some(Location::Area(AreaRef( Arc::clone(&tables), country, Arc::clone(result), ))); } if let Some(result) = tables.other_areas.get(&key) { return Some(Location::Area(AreaRef( Arc::clone(&tables), country, Arc::clone(result), ))); } if let Some(result) = tables.points.get(&key) { return Some(Location::Point(PointRef( Arc::clone(&tables), country, Arc::clone(result), ))); } } None } } impl From<Tables> for DefaultProvider { fn from(tables: Tables) -> Self { Some(tables).into() } } impl<I> From<I> for DefaultProvider where I: IntoIterator<Item = Tables>, { fn from(iter: I) -> Self { let mut result = DefaultProvider::new(); for tables in iter { result.update(tables); } result } } impl Provider<QualifiedCode, Option<Location<AreaRef, PointRef, (), ()>>> for DefaultProvider { fn query(&self, code: QualifiedCode) -> Option<Location<AreaRef, PointRef, (), ()>> { self.query_by_code(code) } } impl Provider<ExtendedCode, Option<Location<AreaRef, PointRef, (), ()>>> for DefaultProvider { fn query(&self, code: ExtendedCode) -> Option<Location<AreaRef, PointRef, (), ()>> { self.query_by_code(code.into()) } } #[derive(Clone)] pub struct AreaRef(Arc<Tables>, CountryId, Arc<AreaRow>); impl AreaRef { pub fn name(&self) -> Option<&str> { self.0.names.get(&self.2.name).map(|s| s.2.as_ref()) } } impl Area for AreaRef { type Error = Error; fn code(&self) -> ExtendedCode { self.as_ref().code } fn subtype(&self) -> Subtype { self.as_ref().subtype } fn area(&self) -> Result<Option<Self>, Self::Error> { if let Some(area) = self.2.area { let area = self .0 .administrative_areas .get(&Tables::into_key(self.2.code, self.1, area)) .ok_or("Invalid administrative area")?; Ok(Some(Self(Arc::clone(&self.0), self.1, Arc::clone(&area)))) } else { Ok(None) } } } impl AsRef<AreaRow> for AreaRef { fn as_ref(&self) -> &AreaRow { &self.2 } } impl Debug for AreaRef { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_tuple("AreaRef") .field(&Arc::as_ptr(&self.0)) .field(&self.1) .field(&self.2) .finish() } } #[derive(Clone)] pub struct PointRef(Arc<Tables>, CountryId, Arc<PointRow>); impl Point for PointRef { type Area = AreaRef; type Error = Error; fn code(&self) -> ExtendedCode { self.as_ref().code } fn subtype(&self) -> Subtype { self.as_ref().subtype } fn administrative_area(&self) -> Result<Option<AreaRef>, Error> { if let Some(area) = self.2.administrative_area { let area = self .0 .administrative_areas .get(&Tables::into_key(self.2.code, self.1, area)) .ok_or("Invalid administrative area")?; Ok(Some(AreaRef( Arc::clone(&self.0), self.1, Arc::clone(&area), ))) } else { Ok(None) } } fn other_area(&self) -> Result<Option<AreaRef>, Error> { if let Some(area) = self.2.other_area { let area = self .0 .other_areas .get(&Tables::into_key(self.2.code, self.1, area)) .ok_or("Invalid other area")?; Ok(Some(AreaRef( Arc::clone(&self.0), self.1, Arc::clone(&area), ))) } else { Ok(None) } } fn latitude(&self) -> f32 { self.as_ref().latitude() } fn longitude(&self) -> f32 { self.as_ref().longitude() } } impl AsRef<PointRow> for PointRef { fn as_ref(&self) -> &PointRow { &self.2 } } impl Debug for PointRef { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_tuple("PointRef") .field(&Arc::as_ptr(&self.0)) .field(&self.1) .field(&self.2) .finish() } } #[derive(Debug)] pub struct Tables { extended_country: u8, country: u8, table: u8, major: u8, minor: u8, administrative_areas: HashMap<(CountryId, TableCode, LocationCode), Arc<AreaRow>>, countries: HashMap<CountryId, (ExtendedCountryCode, CountryCode, String)>, names: HashMap<NameId, (CountryId, LanguageId, String)>, languages: HashMap<(CountryId, LanguageId), String>, other_areas: HashMap<(CountryId, TableCode, LocationCode), Arc<AreaRow>>, points: HashMap<(CountryId, TableCode, LocationCode), Arc<PointRow>>, country_ids: HashMap<CountryCode, CountryId>, extended_country_ids: HashMap<(ExtendedCountryCode, CountryCode), CountryId>, } impl Tables { pub fn load(directory: impl AsRef<Path>) -> Result<Self, Error> { Self::load_with(|table| { let mut path = PathBuf::from(directory.as_ref()); path.push(table.as_code()); path.set_extension("DAT"); debug!("Read {:?}", path); match std::fs::read(path) { Ok(data) => Ok(Some(data)), Err(err) if err.kind() == ErrorKind::NotFound => Ok(None), Err(err) => Err(err), } }) } pub fn load_with<F, T, E>(mut loader: F) -> Result<Self, Error> where F: FnMut(Table) -> Result<Option<T>, E>, E: Into<Error>, T: AsRef<[u8]>, { let charset = loader(Table::MetaInformation) .map_err(|err| err.into())? .and_then(|data| exchange::parse_meta(data.as_ref())) .unwrap_or_default(); let mut loader = |table| { loader(table) .map(|result| result.map(|data| charset.decode_owned(data))) .map_err(|err| err.into()) }; let (areas, _linears, points) = if let Some(table) = loader(Table::Classes)? { let classes = exchange::parse_classes(&table)?; ( classes.contains(&Category::Area), classes.contains(&Category::Linear), classes.contains(&Category::Point), ) } else { (true, true, true) }; let countries = loader(Table::Countries)?.ok_or("Missing countries table")?; let countries = exchange::parse_countries(&countries)?; let country_codes = |id| countries.get(&id).map(|c| (c.0, c.1)); let country_ids = countries .iter() .map(|(id, (_, code, _))| (*code, *id)) .collect::<HashMap<_, _>>(); let extended_country_ids = countries .iter() .map(|(id, (extended, code, _))| ((*extended, *code), *id)) .collect::<HashMap<_, _>>(); if country_ids.len() < countries.len() || extended_country_ids.len() < countries.len() { warn!("Multiple countries use the same country code"); } let datasets = loader(Table::LocationDatasets)?.ok_or("Missing datasets table")?; let dataset = exchange::parse_datasets(&datasets)?; let dataset_country = countries .get(&dataset.0) .ok_or("Invalid country in dataset")?; let languages = loader(Table::Languages)?.ok_or("Missing languages table")?; let languages = exchange::parse_languages(&languages)?; let names = loader(Table::Names)?.ok_or("Missing names table")?; let names = exchange::parse_names(&names)?; let (administrative_areas, other_areas) = if areas { let administrative_areas = loader(Table::AdministrativeAreas)?.ok_or("Missing administrative areas table")?; let administrative_areas = exchange::parse_areas(&administrative_areas, country_codes)?; let other_areas = loader(Table::OtherAreas)?.ok_or("Missing other areas table")?; let other_areas = exchange::parse_areas(&other_areas, country_codes)?; (administrative_areas, other_areas) } else { (HashMap::new(), HashMap::new()) }; let points = if points { let points = loader(Table::Points)?.ok_or("Missing points table")?; let mut points = exchange::parse_points(&points, country_codes)?; if let Some(poffsets) = loader(Table::PointOffsets)? { exchange::parse_poffsets_into(&poffsets, &mut points)?; } points } else { HashMap::new() }; Ok(Self { extended_country: dataset_country.0, country: dataset_country.1, table: dataset.1, major: dataset.2 .0, minor: dataset.2 .1, administrative_areas, countries, languages, names, other_areas, points, country_ids, extended_country_ids, }) } fn into_key( qualified: impl Into<QualifiedCode>, country: CountryId, code: impl Into<Option<LocationCode>>, ) -> (CountryId, TableCode, LocationCode) { let qualified = qualified.into(); (country, qualified.1, code.into().unwrap_or(qualified.2)) } } #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] pub enum Table { AdministrativeAreas, Classes, Countries, EuroRoadCountries, EuroRoadNumbers, Intersections, Languages, LocationCodes, LocationDatasets, Names, NameTranslations, OtherAreas, PointOffsets, Points, Roads, EuroRoadSegments, Segments, SegmentOffsets, Subtypes, SubtypeTranslations, Types, RoadNetworkLevelTypes, MetaInformation, } impl Table { fn as_code(self) -> &'static str { match self { Self::AdministrativeAreas => "ADMINISTRATIVEAREA", Self::Classes => "CLASSES", Self::Countries => "COUNTRIES", Self::EuroRoadCountries => "ERNO_BELONGS_TO_CO", Self::EuroRoadNumbers => "EU ROROADNO", Self::Intersections => "INTERSECTIONS", Self::Languages => "LANGUAGES", Self::LocationCodes => "LOCATIONCODES", Self::LocationDatasets => "LOCATIONDATASETS", Self::Names => "NAMES", Self::NameTranslations => "NAMETRANSLATIONS", Self::OtherAreas => "OTHERAREAS", Self::PointOffsets => "POFFSETS", Self::Points => "POINTS", Self::Roads => "ROADS", Self::EuroRoadSegments => "SEG_HAS_ERNO", Self::Segments => "SEGMENTS", Self::SegmentOffsets => "SOFFSETS", Self::Subtypes => "SUBTYPES", Self::SubtypeTranslations => "SUBTYPETRANSLATION", Self::Types => "TYPES", Self::RoadNetworkLevelTypes => "ROAD_NETWORK_LEVEL_TYPES", Self::MetaInformation => "README", } } }
28.743284
100
0.539828
e502735452735d8dca8264ae20b834953a2acdfd
24,472
use errors::{Diagnostic, DiagnosticBuilder}; use std::panic; use proc_macro::bridge::{server, TokenTree}; use proc_macro::{Delimiter, Level, LineColumn, Spacing}; use rustc_data_structures::sync::Lrc; use std::ascii; use std::ops::Bound; use syntax::ast; use syntax::ext::base::ExtCtxt; use syntax::parse::lexer::comments; use syntax::parse::{self, token, ParseSess}; use syntax::tokenstream::{self, DelimSpan, IsJoint::*, TokenStream, TreeAndJoint}; use syntax_pos::hygiene::{SyntaxContext, Transparency}; use syntax_pos::symbol::{kw, sym, Symbol}; use syntax_pos::{BytePos, FileName, MultiSpan, Pos, SourceFile, Span}; trait FromInternal<T> { fn from_internal(x: T) -> Self; } trait ToInternal<T> { fn to_internal(self) -> T; } impl FromInternal<token::DelimToken> for Delimiter { fn from_internal(delim: token::DelimToken) -> Delimiter { match delim { token::Paren => Delimiter::Parenthesis, token::Brace => Delimiter::Brace, token::Bracket => Delimiter::Bracket, token::NoDelim => Delimiter::None, } } } impl ToInternal<token::DelimToken> for Delimiter { fn to_internal(self) -> token::DelimToken { match self { Delimiter::Parenthesis => token::Paren, Delimiter::Brace => token::Brace, Delimiter::Bracket => token::Bracket, Delimiter::None => token::NoDelim, } } } impl FromInternal<(TreeAndJoint, &'_ ParseSess, &'_ mut Vec<Self>)> for TokenTree<Group, Punct, Ident, Literal> { fn from_internal(((tree, is_joint), sess, stack): (TreeAndJoint, &ParseSess, &mut Vec<Self>)) -> Self { use syntax::parse::token::*; let joint = is_joint == Joint; let Token { kind, span } = match tree { tokenstream::TokenTree::Delimited(span, delim, tts) => { let delimiter = Delimiter::from_internal(delim); return TokenTree::Group(Group { delimiter, stream: tts.into(), span, }); } tokenstream::TokenTree::Token(token) => token, }; macro_rules! tt { ($ty:ident { $($field:ident $(: $value:expr)*),+ $(,)? }) => ( TokenTree::$ty(self::$ty { $($field $(: $value)*,)+ span, }) ); ($ty:ident::$method:ident($($value:expr),*)) => ( TokenTree::$ty(self::$ty::$method($($value,)* span)) ); } macro_rules! op { ($a:expr) => { tt!(Punct::new($a, joint)) }; ($a:expr, $b:expr) => {{ stack.push(tt!(Punct::new($b, joint))); tt!(Punct::new($a, true)) }}; ($a:expr, $b:expr, $c:expr) => {{ stack.push(tt!(Punct::new($c, joint))); stack.push(tt!(Punct::new($b, true))); tt!(Punct::new($a, true)) }}; } match kind { Eq => op!('='), Lt => op!('<'), Le => op!('<', '='), EqEq => op!('=', '='), Ne => op!('!', '='), Ge => op!('>', '='), Gt => op!('>'), AndAnd => op!('&', '&'), OrOr => op!('|', '|'), Not => op!('!'), Tilde => op!('~'), BinOp(Plus) => op!('+'), BinOp(Minus) => op!('-'), BinOp(Star) => op!('*'), BinOp(Slash) => op!('/'), BinOp(Percent) => op!('%'), BinOp(Caret) => op!('^'), BinOp(And) => op!('&'), BinOp(Or) => op!('|'), BinOp(Shl) => op!('<', '<'), BinOp(Shr) => op!('>', '>'), BinOpEq(Plus) => op!('+', '='), BinOpEq(Minus) => op!('-', '='), BinOpEq(Star) => op!('*', '='), BinOpEq(Slash) => op!('/', '='), BinOpEq(Percent) => op!('%', '='), BinOpEq(Caret) => op!('^', '='), BinOpEq(And) => op!('&', '='), BinOpEq(Or) => op!('|', '='), BinOpEq(Shl) => op!('<', '<', '='), BinOpEq(Shr) => op!('>', '>', '='), At => op!('@'), Dot => op!('.'), DotDot => op!('.', '.'), DotDotDot => op!('.', '.', '.'), DotDotEq => op!('.', '.', '='), Comma => op!(','), Semi => op!(';'), Colon => op!(':'), ModSep => op!(':', ':'), RArrow => op!('-', '>'), LArrow => op!('<', '-'), FatArrow => op!('=', '>'), Pound => op!('#'), Dollar => op!('$'), Question => op!('?'), SingleQuote => op!('\''), Ident(name, false) if name == kw::DollarCrate => tt!(Ident::dollar_crate()), Ident(name, is_raw) => tt!(Ident::new(name, is_raw)), Lifetime(name) => { let ident = ast::Ident::new(name, span).without_first_quote(); stack.push(tt!(Ident::new(ident.name, false))); tt!(Punct::new('\'', true)) } Literal(lit) => tt!(Literal { lit }), DocComment(c) => { let style = comments::doc_comment_style(&c.as_str()); let stripped = comments::strip_doc_comment_decoration(&c.as_str()); let mut escaped = String::new(); for ch in stripped.chars() { escaped.extend(ch.escape_debug()); } let stream = vec![ Ident(sym::doc, false), Eq, TokenKind::lit(token::Str, Symbol::intern(&escaped), None), ] .into_iter() .map(|kind| tokenstream::TokenTree::token(kind, span)) .collect(); stack.push(TokenTree::Group(Group { delimiter: Delimiter::Bracket, stream, span: DelimSpan::from_single(span), })); if style == ast::AttrStyle::Inner { stack.push(tt!(Punct::new('!', false))); } tt!(Punct::new('#', false)) } Interpolated(nt) => { let stream = nt.to_tokenstream(sess, span); TokenTree::Group(Group { delimiter: Delimiter::None, stream, span: DelimSpan::from_single(span), }) } OpenDelim(..) | CloseDelim(..) => unreachable!(), Whitespace | Comment | Shebang(..) | Eof => unreachable!(), } } } impl ToInternal<TokenStream> for TokenTree<Group, Punct, Ident, Literal> { fn to_internal(self) -> TokenStream { use syntax::parse::token::*; let (ch, joint, span) = match self { TokenTree::Punct(Punct { ch, joint, span }) => (ch, joint, span), TokenTree::Group(Group { delimiter, stream, span, }) => { return tokenstream::TokenTree::Delimited( span, delimiter.to_internal(), stream.into(), ) .into(); } TokenTree::Ident(self::Ident { sym, is_raw, span }) => { return tokenstream::TokenTree::token(Ident(sym, is_raw), span).into(); } TokenTree::Literal(self::Literal { lit: token::Lit { kind: token::Integer, symbol, suffix }, span, }) if symbol.as_str().starts_with("-") => { let minus = BinOp(BinOpToken::Minus); let symbol = Symbol::intern(&symbol.as_str()[1..]); let integer = TokenKind::lit(token::Integer, symbol, suffix); let a = tokenstream::TokenTree::token(minus, span); let b = tokenstream::TokenTree::token(integer, span); return vec![a, b].into_iter().collect(); } TokenTree::Literal(self::Literal { lit: token::Lit { kind: token::Float, symbol, suffix }, span, }) if symbol.as_str().starts_with("-") => { let minus = BinOp(BinOpToken::Minus); let symbol = Symbol::intern(&symbol.as_str()[1..]); let float = TokenKind::lit(token::Float, symbol, suffix); let a = tokenstream::TokenTree::token(minus, span); let b = tokenstream::TokenTree::token(float, span); return vec![a, b].into_iter().collect(); } TokenTree::Literal(self::Literal { lit, span }) => { return tokenstream::TokenTree::token(Literal(lit), span).into() } }; let kind = match ch { '=' => Eq, '<' => Lt, '>' => Gt, '!' => Not, '~' => Tilde, '+' => BinOp(Plus), '-' => BinOp(Minus), '*' => BinOp(Star), '/' => BinOp(Slash), '%' => BinOp(Percent), '^' => BinOp(Caret), '&' => BinOp(And), '|' => BinOp(Or), '@' => At, '.' => Dot, ',' => Comma, ';' => Semi, ':' => Colon, '#' => Pound, '$' => Dollar, '?' => Question, '\'' => SingleQuote, _ => unreachable!(), }; let tree = tokenstream::TokenTree::token(kind, span); TokenStream::new(vec![(tree, if joint { Joint } else { NonJoint })]) } } impl ToInternal<errors::Level> for Level { fn to_internal(self) -> errors::Level { match self { Level::Error => errors::Level::Error, Level::Warning => errors::Level::Warning, Level::Note => errors::Level::Note, Level::Help => errors::Level::Help, _ => unreachable!("unknown proc_macro::Level variant: {:?}", self), } } } #[derive(Clone)] pub struct TokenStreamIter { cursor: tokenstream::Cursor, stack: Vec<TokenTree<Group, Punct, Ident, Literal>>, } #[derive(Clone)] pub struct Group { delimiter: Delimiter, stream: TokenStream, span: DelimSpan, } #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct Punct { ch: char, // NB. not using `Spacing` here because it doesn't implement `Hash`. joint: bool, span: Span, } impl Punct { fn new(ch: char, joint: bool, span: Span) -> Punct { const LEGAL_CHARS: &[char] = &['=', '<', '>', '!', '~', '+', '-', '*', '/', '%', '^', '&', '|', '@', '.', ',', ';', ':', '#', '$', '?', '\'']; if !LEGAL_CHARS.contains(&ch) { panic!("unsupported character `{:?}`", ch) } Punct { ch, joint, span } } } #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct Ident { sym: Symbol, is_raw: bool, span: Span, } impl Ident { fn is_valid(string: &str) -> bool { let mut chars = string.chars(); if let Some(start) = chars.next() { (start == '_' || start.is_xid_start()) && chars.all(|cont| cont == '_' || cont.is_xid_continue()) } else { false } } fn new(sym: Symbol, is_raw: bool, span: Span) -> Ident { let string = sym.as_str(); if !Self::is_valid(&string) { panic!("`{:?}` is not a valid identifier", string) } // Get rid of gensyms to conservatively check rawness on the string contents only. if is_raw && !sym.as_interned_str().as_symbol().can_be_raw() { panic!("`{}` cannot be a raw identifier", string); } Ident { sym, is_raw, span } } fn dollar_crate(span: Span) -> Ident { // `$crate` is accepted as an ident only if it comes from the compiler. Ident { sym: kw::DollarCrate, is_raw: false, span } } } // FIXME(eddyb) `Literal` should not expose internal `Debug` impls. #[derive(Clone, Debug)] pub struct Literal { lit: token::Lit, span: Span, } pub(crate) struct Rustc<'a> { sess: &'a ParseSess, def_site: Span, call_site: Span, } impl<'a> Rustc<'a> { pub fn new(cx: &'a ExtCtxt<'_>) -> Self { // No way to determine def location for a proc macro right now, so use call location. let location = cx.current_expansion.mark.expn_info().unwrap().call_site; let to_span = |transparency| { location.with_ctxt( SyntaxContext::empty() .apply_mark_with_transparency(cx.current_expansion.mark, transparency), ) }; Rustc { sess: cx.parse_sess, def_site: to_span(Transparency::Opaque), call_site: to_span(Transparency::Transparent), } } fn lit(&mut self, kind: token::LitKind, symbol: Symbol, suffix: Option<Symbol>) -> Literal { Literal { lit: token::Lit::new(kind, symbol, suffix), span: server::Span::call_site(self), } } } impl server::Types for Rustc<'_> { type TokenStream = TokenStream; type TokenStreamBuilder = tokenstream::TokenStreamBuilder; type TokenStreamIter = TokenStreamIter; type Group = Group; type Punct = Punct; type Ident = Ident; type Literal = Literal; type SourceFile = Lrc<SourceFile>; type MultiSpan = Vec<Span>; type Diagnostic = Diagnostic; type Span = Span; } impl server::TokenStream for Rustc<'_> { fn new(&mut self) -> Self::TokenStream { TokenStream::empty() } fn is_empty(&mut self, stream: &Self::TokenStream) -> bool { stream.is_empty() } fn from_str(&mut self, src: &str) -> Self::TokenStream { parse::parse_stream_from_source_str( FileName::proc_macro_source_code(src), src.to_string(), self.sess, Some(self.call_site), ) } fn to_string(&mut self, stream: &Self::TokenStream) -> String { stream.to_string() } fn from_token_tree( &mut self, tree: TokenTree<Self::Group, Self::Punct, Self::Ident, Self::Literal>, ) -> Self::TokenStream { tree.to_internal() } fn into_iter(&mut self, stream: Self::TokenStream) -> Self::TokenStreamIter { TokenStreamIter { cursor: stream.trees(), stack: vec![], } } } impl server::TokenStreamBuilder for Rustc<'_> { fn new(&mut self) -> Self::TokenStreamBuilder { tokenstream::TokenStreamBuilder::new() } fn push(&mut self, builder: &mut Self::TokenStreamBuilder, stream: Self::TokenStream) { builder.push(stream); } fn build(&mut self, builder: Self::TokenStreamBuilder) -> Self::TokenStream { builder.build() } } impl server::TokenStreamIter for Rustc<'_> { fn next( &mut self, iter: &mut Self::TokenStreamIter, ) -> Option<TokenTree<Self::Group, Self::Punct, Self::Ident, Self::Literal>> { loop { let tree = iter.stack.pop().or_else(|| { let next = iter.cursor.next_with_joint()?; Some(TokenTree::from_internal((next, self.sess, &mut iter.stack))) })?; // HACK: The condition "dummy span + group with empty delimiter" represents an AST // fragment approximately converted into a token stream. This may happen, for // example, with inputs to proc macro attributes, including derives. Such "groups" // need to flattened during iteration over stream's token trees. // Eventually this needs to be removed in favor of keeping original token trees // and not doing the roundtrip through AST. if let TokenTree::Group(ref group) = tree { if group.delimiter == Delimiter::None && group.span.entire().is_dummy() { iter.cursor.append(group.stream.clone()); continue; } } return Some(tree); } } } impl server::Group for Rustc<'_> { fn new(&mut self, delimiter: Delimiter, stream: Self::TokenStream) -> Self::Group { Group { delimiter, stream, span: DelimSpan::from_single(server::Span::call_site(self)), } } fn delimiter(&mut self, group: &Self::Group) -> Delimiter { group.delimiter } fn stream(&mut self, group: &Self::Group) -> Self::TokenStream { group.stream.clone() } fn span(&mut self, group: &Self::Group) -> Self::Span { group.span.entire() } fn span_open(&mut self, group: &Self::Group) -> Self::Span { group.span.open } fn span_close(&mut self, group: &Self::Group) -> Self::Span { group.span.close } fn set_span(&mut self, group: &mut Self::Group, span: Self::Span) { group.span = DelimSpan::from_single(span); } } impl server::Punct for Rustc<'_> { fn new(&mut self, ch: char, spacing: Spacing) -> Self::Punct { Punct::new(ch, spacing == Spacing::Joint, server::Span::call_site(self)) } fn as_char(&mut self, punct: Self::Punct) -> char { punct.ch } fn spacing(&mut self, punct: Self::Punct) -> Spacing { if punct.joint { Spacing::Joint } else { Spacing::Alone } } fn span(&mut self, punct: Self::Punct) -> Self::Span { punct.span } fn with_span(&mut self, punct: Self::Punct, span: Self::Span) -> Self::Punct { Punct { span, ..punct } } } impl server::Ident for Rustc<'_> { fn new(&mut self, string: &str, span: Self::Span, is_raw: bool) -> Self::Ident { Ident::new(Symbol::intern(string), is_raw, span) } fn span(&mut self, ident: Self::Ident) -> Self::Span { ident.span } fn with_span(&mut self, ident: Self::Ident, span: Self::Span) -> Self::Ident { Ident { span, ..ident } } } impl server::Literal for Rustc<'_> { // FIXME(eddyb) `Literal` should not expose internal `Debug` impls. fn debug(&mut self, literal: &Self::Literal) -> String { format!("{:?}", literal) } fn integer(&mut self, n: &str) -> Self::Literal { self.lit(token::Integer, Symbol::intern(n), None) } fn typed_integer(&mut self, n: &str, kind: &str) -> Self::Literal { self.lit(token::Integer, Symbol::intern(n), Some(Symbol::intern(kind))) } fn float(&mut self, n: &str) -> Self::Literal { self.lit(token::Float, Symbol::intern(n), None) } fn f32(&mut self, n: &str) -> Self::Literal { self.lit(token::Float, Symbol::intern(n), Some(sym::f32)) } fn f64(&mut self, n: &str) -> Self::Literal { self.lit(token::Float, Symbol::intern(n), Some(sym::f64)) } fn string(&mut self, string: &str) -> Self::Literal { let mut escaped = String::new(); for ch in string.chars() { escaped.extend(ch.escape_debug()); } self.lit(token::Str, Symbol::intern(&escaped), None) } fn character(&mut self, ch: char) -> Self::Literal { let mut escaped = String::new(); escaped.extend(ch.escape_unicode()); self.lit(token::Char, Symbol::intern(&escaped), None) } fn byte_string(&mut self, bytes: &[u8]) -> Self::Literal { let string = bytes .iter() .cloned() .flat_map(ascii::escape_default) .map(Into::<char>::into) .collect::<String>(); self.lit(token::ByteStr, Symbol::intern(&string), None) } fn span(&mut self, literal: &Self::Literal) -> Self::Span { literal.span } fn set_span(&mut self, literal: &mut Self::Literal, span: Self::Span) { literal.span = span; } fn subspan( &mut self, literal: &Self::Literal, start: Bound<usize>, end: Bound<usize>, ) -> Option<Self::Span> { let span = literal.span; let length = span.hi().to_usize() - span.lo().to_usize(); let start = match start { Bound::Included(lo) => lo, Bound::Excluded(lo) => lo + 1, Bound::Unbounded => 0, }; let end = match end { Bound::Included(hi) => hi + 1, Bound::Excluded(hi) => hi, Bound::Unbounded => length, }; // Bounds check the values, preventing addition overflow and OOB spans. if start > u32::max_value() as usize || end > u32::max_value() as usize || (u32::max_value() - start as u32) < span.lo().to_u32() || (u32::max_value() - end as u32) < span.lo().to_u32() || start >= end || end > length { return None; } let new_lo = span.lo() + BytePos::from_usize(start); let new_hi = span.lo() + BytePos::from_usize(end); Some(span.with_lo(new_lo).with_hi(new_hi)) } } impl server::SourceFile for Rustc<'_> { fn eq(&mut self, file1: &Self::SourceFile, file2: &Self::SourceFile) -> bool { Lrc::ptr_eq(file1, file2) } fn path(&mut self, file: &Self::SourceFile) -> String { match file.name { FileName::Real(ref path) => path .to_str() .expect("non-UTF8 file path in `proc_macro::SourceFile::path`") .to_string(), _ => file.name.to_string(), } } fn is_real(&mut self, file: &Self::SourceFile) -> bool { file.is_real_file() } } impl server::MultiSpan for Rustc<'_> { fn new(&mut self) -> Self::MultiSpan { vec![] } fn push(&mut self, spans: &mut Self::MultiSpan, span: Self::Span) { spans.push(span) } } impl server::Diagnostic for Rustc<'_> { fn new(&mut self, level: Level, msg: &str, spans: Self::MultiSpan) -> Self::Diagnostic { let mut diag = Diagnostic::new(level.to_internal(), msg); diag.set_span(MultiSpan::from_spans(spans)); diag } fn sub( &mut self, diag: &mut Self::Diagnostic, level: Level, msg: &str, spans: Self::MultiSpan, ) { diag.sub(level.to_internal(), msg, MultiSpan::from_spans(spans), None); } fn emit(&mut self, diag: Self::Diagnostic) { DiagnosticBuilder::new_diagnostic(&self.sess.span_diagnostic, diag).emit() } } impl server::Span for Rustc<'_> { fn debug(&mut self, span: Self::Span) -> String { format!("{:?} bytes({}..{})", span.ctxt(), span.lo().0, span.hi().0) } fn def_site(&mut self) -> Self::Span { self.def_site } fn call_site(&mut self) -> Self::Span { self.call_site } fn source_file(&mut self, span: Self::Span) -> Self::SourceFile { self.sess.source_map().lookup_char_pos(span.lo()).file } fn parent(&mut self, span: Self::Span) -> Option<Self::Span> { span.ctxt().outer_expn_info().map(|i| i.call_site) } fn source(&mut self, span: Self::Span) -> Self::Span { span.source_callsite() } fn start(&mut self, span: Self::Span) -> LineColumn { let loc = self.sess.source_map().lookup_char_pos(span.lo()); LineColumn { line: loc.line, column: loc.col.to_usize(), } } fn end(&mut self, span: Self::Span) -> LineColumn { let loc = self.sess.source_map().lookup_char_pos(span.hi()); LineColumn { line: loc.line, column: loc.col.to_usize(), } } fn join(&mut self, first: Self::Span, second: Self::Span) -> Option<Self::Span> { let self_loc = self.sess.source_map().lookup_char_pos(first.lo()); let other_loc = self.sess.source_map().lookup_char_pos(second.lo()); if self_loc.file.name != other_loc.file.name { return None; } Some(first.to(second)) } fn resolved_at(&mut self, span: Self::Span, at: Self::Span) -> Self::Span { span.with_ctxt(at.ctxt()) } fn source_text(&mut self, span: Self::Span) -> Option<String> { self.sess.source_map().span_to_snippet(span).ok() } }
34.083565
97
0.505108
648395ad03188311e25e3c3d206efeb75f2a196c
2,116
use std::fs::File; use anyhow::{anyhow, Context, Result}; use crate::fzf; use crate::kubeconfig; use crate::kubectl; use crate::session::Session; use crate::settings::Settings; use crate::shell::spawn_shell; use crate::vars; pub fn namespace(settings: &Settings, namespace_name: Option<String>, recursive: bool) -> Result<()> { vars::ensure_kubie_active()?; let namespaces = kubectl::get_namespaces(None)?; let enter_namespace = |mut namespace_name: String| -> Result<()> { let mut session = Session::load()?; if namespace_name == "-" { namespace_name = session .get_last_namespace() .context("There is not previous namespace to switch to.")? .to_string(); } else if !namespaces.contains(&namespace_name) { return Err(anyhow!("'{}' is not a valid namespace for the context", namespace_name)); } let mut config = kubeconfig::get_current_config()?; config.contexts[0].context.namespace = namespace_name.clone(); session.add_history_entry(&config.contexts[0].name, namespace_name); if recursive { spawn_shell(settings, config, &session)?; } else { let config_file = File::create(kubeconfig::get_kubeconfig_path()?)?; config.write_to(config_file)?; session.save(None)?; } Ok(()) }; if let Some(namespace_name) = namespace_name { enter_namespace(namespace_name)?; } else { // We only select the context with fzf if stdout is a terminal and if // fzf is present on the machine. if atty::is(atty::Stream::Stdout) && fzf::is_available() { match fzf::select(namespaces.iter())? { Some(namespace_name) => { enter_namespace(namespace_name)?; } None => { println!("Selection cancelled."); } } } else { for ns in namespaces { println!("{}", ns); } } } Ok(()) }
30.666667
102
0.563327
339258d003ecfb22e5ad747de9d8be4159e3909f
442
use std::fmt::Display; use std::time::Instant; use aoc_2021::get_input; fn solve(input: &[String]) -> (impl Display, impl Display) { (0, 0) } fn main() { let input = get_input("day24.txt"); let start = Instant::now(); let (r1, r2) = solve(input.as_slice()); let t = start.elapsed().as_micros() as f64 / 1000.0; println!("Part 1: {}", r1); println!("Part 2: {}", r2); println!("Duration: {:.3}ms", t); }
19.217391
60
0.567873
0345ef8a6f1cfc23d0371846a67b782566a98fc7
173
// variables4.rs // Make me compile! Execute the command `rustlings hint variables4` if you want a hint :) fn main() { let x: i32 = 12; println!("Number {}", x); }
19.222222
89
0.624277
298af578df9ed29eeeb3fdd5f0add775a7a88c89
766
#![allow(dead_code)] use std::mem; // Stack: is used to store local variables of the current function // Heap: is a region of the process manually managed, // global scope in the program, by default rust uses smart pointers struct Point { x: f64, y: f64, } fn origin() -> Point { Point { x: 0.0, y: 0.0 } } pub fn stack_and_heap() { // stack is fast but has limited size // heap for longer term storage let p1 = origin(); // to stack let p2 = Box::new(origin()); // store a pointer in stack to heap position println!("p1 takes up to {} bytes", mem::size_of_val(&p1)); println!("p2 takes up to {} bytes", mem::size_of_val(&p2)); let p3 = *p2; println!("p3 takes up to {} bytes", mem::size_of_val(&p3)); }
22.529412
77
0.620104
ab754b1e1159757fe6deaf94a8ca2298cefec599
3,261
//! Secure random number generator interface. //! //! This module implements the [`SecureRandom`] package from the Ruby Standard //! Library. It is an interface to secure random number generators which are //! suitable for generating session keys in HTTP cookies, etc. //! //! You can use this library in your application by requiring it: //! //! ```ruby //! require 'securerandom' //! ``` //! //! This implementation of `SecureRandom` supports the system RNG via the //! [`getrandom`] crate. This implementation does not depend on OpenSSL. //! //! [`SecureRandom`]: https://ruby-doc.org/stdlib-2.6.3/libdoc/securerandom/rdoc/SecureRandom.html //! [`getrandom`]: https://crates.io/crates/getrandom use crate::extn::core::exception as exc; use crate::extn::prelude::*; pub mod mruby; pub mod trampoline; #[doc(inline)] pub use spinoso_securerandom::{ alphanumeric, base64, hex, random_bytes, random_number, urlsafe_base64, uuid, ArgumentError, DomainError, Error as SecureRandomError, Max, Rand, RandomBytesError, SecureRandom, }; impl From<SecureRandomError> for Error { fn from(err: SecureRandomError) -> Self { match err { SecureRandomError::Argument(err) => err.into(), SecureRandomError::RandomBytes(err) => err.into(), } } } impl From<ArgumentError> for Error { fn from(err: ArgumentError) -> Self { exc::ArgumentError::from(err.message()).into() } } impl From<RandomBytesError> for Error { fn from(err: RandomBytesError) -> Self { RuntimeError::from(err.message()).into() } } impl From<DomainError> for Error { fn from(err: DomainError) -> Self { // TODO: MRI returns `Errno::EDOM` exception class. ArgumentError::from(err.message()).into() } } impl TryConvertMut<Value, Max> for Artichoke { type Error = Error; fn try_convert_mut(&mut self, max: Value) -> Result<Max, Self::Error> { let optional: Option<Value> = self.try_convert(max)?; self.try_convert_mut(optional) } } impl TryConvertMut<Option<Value>, Max> for Artichoke { type Error = Error; fn try_convert_mut(&mut self, max: Option<Value>) -> Result<Max, Self::Error> { if let Some(max) = max { match max.ruby_type() { Ruby::Fixnum => { let max = max.try_into(self)?; Ok(Max::Integer(max)) } Ruby::Float => { let max = max.try_into(self)?; Ok(Max::Float(max)) } _ => { let max = max.implicitly_convert_to_int(self).map_err(|_| { let mut message = b"invalid argument - ".to_vec(); message.extend(max.inspect(self).as_slice()); exc::ArgumentError::from(message) })?; Ok(Max::Integer(max)) } } } else { Ok(Max::None) } } } impl ConvertMut<Rand, Value> for Artichoke { fn convert_mut(&mut self, from: Rand) -> Value { match from { Rand::Integer(num) => self.convert(num), Rand::Float(num) => self.convert_mut(num), } } }
31.057143
109
0.586017
6785a10d103c18c578506b8d4d1a1a5155f4500c
2,380
fn main() { // Let - by default declares an immutable (not changeable) variable which can be shadowed. let x: i32 = 1111; // With type annotation/ let x = 1111; // Type will be inferred. // Adding a 'mut' keyword befor the variable declaration will allow it to be changed. let mut x = "I can be mutable!"; println!("The value of x is {}.", x); // Const - used to declare a constant. By default, all constants are immutable. // You need to explicitly add a type annotation to a constant. // May only be set to a constant expression, not the result of a function call. const MY_CONSTANT: bool = true; println!("My constant is {}.", MY_CONSTANT); // Integers - Numbers without a fractional component. // Signed - can have a sign in front of it. // Unsigned - Cannot be negative only positive. // 8-bit let num1: i8 = -10; // Signed, from -128 to 127 let num2: u8 = 10; // Unsigned, from 0 to 255 println!("8-vit integer.\nSigned: {}\nUnsigned: {}", num1, num2); // 16-bit let num1: i16 = -30000; // Signed, from -32,768 to 32,767 let num2: u16 = 30000; // Unsigned, from 0 to println!("16-bit integer.\nSigned: {}\nUnsigned: {}", num1, num2); // 32-bit Integer // By default, Rust uses this type when a number is inferred. let num1: i32 = -1200000; // Signed, from -2,147,483,648 to 2,145,483,647 let num2: u32 = 2000000; // Unsigned, from 0 to 4,294,967,295 println!("32-bit integer.\nSigned: {}\nUnsigned:{}", num1, num2); // 64 bit integer let num1: i64 = -8000000000000000; // Signed, from −(2^63) to 2^63 − 1 let num2: u64 = 9000000000000000000; // Unsigned, from 0 to 2^64 − 1 println!("64 bit integer.\nSigned: {}\nUnsigned: {}", num1, num2); // 128-bit integer let num1: i128 = -170141183460469231731687303715884105728; // Signed, from -(2^127) to 2^127 -1 let num2: u128 = 340282366920938463463374607431768211455; // Unsigned from 0 to 2^128 -1 println!("128 bit integer.\nSigned: {}\nUnsigned: {}", num1, num2); // Array - A collection of multiple values. let array = [ "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday", ]; println!("{}", array[0]) // Printing first element of array. Arrays are zero indexed. }
41.754386
103
0.619328
335254e11823dcdeef7b57ca660b9390396a74d9
4,001
use kernel::hil::led::Led; use kernel::syscall::{SyscallDriver, CommandReturn}; use kernel::{debug, ErrorCode, ProcessId}; use kernel::hil::time::{Alarm, AlarmClient, ConvertTicks}; use kernel::grant::Grant; use kernel::processbuffer::ReadOnlyProcessBuffer; pub const DRIVER_NUM:usize = 0xa0002; #[derive(Default)] pub struct AppData { buffer: ReadOnlyProcessBuffer } const DIGITS: [u32; 10] = [ // 0 0b11111_10011_10101_11001_11111, // 1 0b00100_01100_00100_00100_01110, // 2 0b11110_00001_01110_10000_11111, // 3 0b11110_00001_11110_00001_11110, // 4 0b10000_10000_10100_11111_00100, // 5 0b11111_10000_11110_00001_11110, // 6 0b11111_10000_11111_10001_11111, // 7 0b11111_00001_00010_00100_00100, // 8 0b11111_10001_11111_10001_11111, // 9 0b11111_10001_11111_00001_11111, ]; pub struct DotsText<'a, L: Led, A: Alarm<'a>> { leds: &'a [& 'a L; 25], alarm: &'a A, app_data: Grant<AppData, 1> // tip de date, nr de subscribe-uri // grantu e doar accesatorul si alocatorul pt memorie } impl <'a, L: Led, A: Alarm<'a>> DotsText<'a, L, A> { pub fn new( leds: &'a [& 'a L; 25], alarm: &'a A, app_data: Grant<AppData, 1>) -> DotsText<'a, L, A> { // if leds.len() != 25 { // panic!("DotsText needs 25 LEDs, youy supplied {}",leds.len()) // } DotsText{ leds, alarm, app_data } } pub fn setup_alarm(&self) { self.alarm.set_alarm(self.alarm.now(), self.alarm.ticks_from_ms(1000)); } fn display(&self, digit: char) { let digit_index = digit as usize -'0' as usize; let crt_digit = DIGITS[digit_index]; for idx in 0..25 { let bit = (crt_digit >> (24 -idx)) & 0x1; if bit == 1 { self.leds[idx].on(); } else { self.leds[idx].off(); } } } } impl<'a,L: Led, A:Alarm<'a>> SyscallDriver for DotsText<'a, L, A> { fn command( &self, /*this is not mutable boss*/ command_num: usize, r2: usize, _r3: usize, _process_id: ProcessId, ) -> CommandReturn { match command_num{ 0 => CommandReturn::success(), // print digit 1 => { // char::from_u32(r2 as u32) let digit = r2 as u8 as char; if digit >='0' && digit <= '9' { CommandReturn::success() } else { CommandReturn::failure(ErrorCode::INVAL) } } _ => CommandReturn::failure(ErrorCode::INVAL) } } fn allow_readonly( &self, process_id: ProcessId, allow_num: usize, mut buffer: ReadOnlyProcessBuffer, ) -> Result<ReadOnlyProcessBuffer, (ReadOnlyProcessBuffer, ErrorCode)> { match allow_num{ 0 => { let res = self.app_data.enter(process_id, |data, _upcalls| { // apleata daca se poate intra in grant core::mem::swap(&mut data.buffer,&mut buffer); }); match res { Ok(_) => Ok(buffer), Err(error) => Err((buffer, error.into())) } } _ => Err((buffer, ErrorCode::NOSUPPORT)) } } fn allocate_grant( &self, processid: ProcessId ) -> Result<(), kernel::process::Error> { // tre sa alocam chestii boss self.app_data.enter(processid, |_,_|{ // ce mai poti sa faci aici? debug!("alloc grant")}) // mesaj de eroare doar daca trace syscalls e true } } impl <'a, L: Led, A: Alarm<'a>> AlarmClient for DotsText<'a,L,A> { fn alarm(&self) { // debug!("fired alarm for dots text"); // self.display('0'); self.setup_alarm(); } }
27.784722
79
0.526868
5b1a4221f8c958fec1cda81c356aed9b3b04a221
561
use actix::{Context, Handler, ResponseType}; use interpreter::Interpreter; /// The `Registration` signal is sent from the client to the interpreter when registration /// confirmation is received from the event bus. pub struct Registration { pub message: String, } impl ResponseType for Registration { type Item = (); type Error = (); } impl Handler<Registration> for Interpreter { type Result = (); fn handle(&mut self, _message: Registration, _: &mut Context<Self>) { info!("received registration signal from client"); } }
24.391304
90
0.695187
89de5cdc41fc5976cdc3f1437a887cb0e0def5c5
782
// traits2.rs // // Your task is to implement the trait // `AppendBar' for a vector of strings. // // To implement this trait, consider for // a moment what it means to 'append "Bar"' // to a vector of strings. // // No boiler plate code this time, // you can do this! trait AppendBar { fn append_bar(self) -> Self; } impl AppendBar for Vec<String> { fn append_bar(mut self) -> Self { let mut bar = vec![String::from("Bar")]; self.append(&mut bar); self } } #[cfg(test)] mod tests { use super::*; #[test] fn is_vec_pop_eq_bar() { let mut foo = vec![String::from("Foo")].append_bar(); assert_eq!(foo.pop().unwrap(), String::from("Bar")); assert_eq!(foo.pop().unwrap(), String::from("Foo")); } }
19.073171
61
0.584399
0ea3d4990c560c4187daadbb07faa348f009d1b5
13,241
//! Backend using the pure-rust crossplatform crossterm library. //! //! Requires the `crossterm-backend` feature. #![cfg(feature = "crossterm")] #![cfg_attr(feature = "doc-cfg", doc(cfg(feature = "crossterm-backend")))] use std::{ cell::{Cell, RefCell, RefMut}, io::{self, BufWriter, Write}, time::Duration, }; pub use crossterm; use crossterm::{ cursor::{Hide, MoveTo, Show}, event::{ poll, read, DisableMouseCapture, EnableMouseCapture, Event as CEvent, KeyCode, KeyEvent as CKeyEvent, KeyModifiers, MouseButton as CMouseButton, MouseEvent as CMouseEvent, MouseEventKind, }, execute, queue, style::{ Attribute, Color, Print, SetAttribute, SetBackgroundColor, SetForegroundColor, }, terminal::{ self, disable_raw_mode, enable_raw_mode, Clear, ClearType, EnterAlternateScreen, LeaveAlternateScreen, }, }; use crate::{ backend, event::{Event, Key, MouseButton, MouseEvent}, theme, Vec2, }; #[cfg(windows)] type Stdout = io::Stdout; #[cfg(unix)] type Stdout = std::fs::File; /// Backend using crossterm pub struct Backend { current_style: Cell<theme::ColorPair>, stdout: RefCell<BufWriter<Stdout>>, } fn translate_button(button: CMouseButton) -> MouseButton { match button { CMouseButton::Left => MouseButton::Left, CMouseButton::Right => MouseButton::Right, CMouseButton::Middle => MouseButton::Middle, } } fn translate_key(code: KeyCode) -> Key { match code { KeyCode::Esc => Key::Esc, KeyCode::Backspace => Key::Backspace, KeyCode::Left => Key::Left, KeyCode::Right => Key::Right, KeyCode::Up => Key::Up, KeyCode::Down => Key::Down, KeyCode::Home => Key::Home, KeyCode::End => Key::End, KeyCode::PageUp => Key::PageUp, KeyCode::PageDown => Key::PageDown, KeyCode::Delete => Key::Del, KeyCode::Insert => Key::Ins, KeyCode::Enter => Key::Enter, KeyCode::Tab => Key::Tab, KeyCode::F(n) => Key::from_f(n), KeyCode::BackTab => Key::Tab, /* not supported */ KeyCode::Char(_) => Key::Tab, /* is handled at `Event` level, use tab as default */ KeyCode::Null => Key::Tab, /* is handled at `Event` level, use tab as default */ } } fn translate_event(event: CKeyEvent) -> Event { const CTRL_ALT: KeyModifiers = KeyModifiers::from_bits_truncate( KeyModifiers::CONTROL.bits() | KeyModifiers::ALT.bits(), ); const CTRL_SHIFT: KeyModifiers = KeyModifiers::from_bits_truncate( KeyModifiers::CONTROL.bits() | KeyModifiers::SHIFT.bits(), ); const ALT_SHIFT: KeyModifiers = KeyModifiers::from_bits_truncate( KeyModifiers::ALT.bits() | KeyModifiers::SHIFT.bits(), ); match event { // Handle Char + modifier. CKeyEvent { modifiers: KeyModifiers::CONTROL, code: KeyCode::Char(c), } => Event::CtrlChar(c), CKeyEvent { modifiers: KeyModifiers::ALT, code: KeyCode::Char(c), } => Event::AltChar(c), CKeyEvent { modifiers: KeyModifiers::SHIFT, code: KeyCode::Char(c), } => Event::Char(c), CKeyEvent { code: KeyCode::Char(c), .. } => Event::Char(c), // From now on, assume the key is never a `Char`. // Explicitly handle 'backtab' since crossterm does not sent SHIFT alongside the back tab key. CKeyEvent { code: KeyCode::BackTab, .. } => Event::Shift(Key::Tab), // Handle key + multiple modifiers CKeyEvent { modifiers: CTRL_ALT, code, } => Event::CtrlAlt(translate_key(code)), CKeyEvent { modifiers: CTRL_SHIFT, code, } => Event::CtrlShift(translate_key(code)), CKeyEvent { modifiers: ALT_SHIFT, code, } => Event::AltShift(translate_key(code)), // Handle key + single modifier CKeyEvent { modifiers: KeyModifiers::CONTROL, code, } => Event::Ctrl(translate_key(code)), CKeyEvent { modifiers: KeyModifiers::ALT, code, } => Event::Alt(translate_key(code)), CKeyEvent { modifiers: KeyModifiers::SHIFT, code, } => Event::Shift(translate_key(code)), // All other keys. CKeyEvent { code, .. } => Event::Key(translate_key(code)), } } fn translate_color(base_color: theme::Color) -> Color { match base_color { theme::Color::Dark(theme::BaseColor::Black) => Color::Black, theme::Color::Dark(theme::BaseColor::Red) => Color::DarkRed, theme::Color::Dark(theme::BaseColor::Green) => Color::DarkGreen, theme::Color::Dark(theme::BaseColor::Yellow) => Color::DarkYellow, theme::Color::Dark(theme::BaseColor::Blue) => Color::DarkBlue, theme::Color::Dark(theme::BaseColor::Magenta) => Color::DarkMagenta, theme::Color::Dark(theme::BaseColor::Cyan) => Color::DarkCyan, theme::Color::Dark(theme::BaseColor::White) => Color::Grey, theme::Color::Light(theme::BaseColor::Black) => Color::DarkGrey, theme::Color::Light(theme::BaseColor::Red) => Color::Red, theme::Color::Light(theme::BaseColor::Green) => Color::Green, theme::Color::Light(theme::BaseColor::Yellow) => Color::Yellow, theme::Color::Light(theme::BaseColor::Blue) => Color::Blue, theme::Color::Light(theme::BaseColor::Magenta) => Color::Magenta, theme::Color::Light(theme::BaseColor::Cyan) => Color::Cyan, theme::Color::Light(theme::BaseColor::White) => Color::White, theme::Color::Rgb(r, g, b) => Color::Rgb { r, g, b }, theme::Color::RgbLowRes(r, g, b) => { debug_assert!(r <= 5, "Red color fragment (r = {}) is out of bound. Make sure r ≤ 5.", r); debug_assert!(g <= 5, "Green color fragment (g = {}) is out of bound. Make sure g ≤ 5.", g); debug_assert!(b <= 5, "Blue color fragment (b = {}) is out of bound. Make sure b ≤ 5.", b); Color::AnsiValue(16 + 36 * r + 6 * g + b) } theme::Color::TerminalDefault => Color::Reset, } } impl Backend { /// Creates a new crossterm backend. pub fn init() -> Result<Box<dyn backend::Backend>, crossterm::ErrorKind> where Self: Sized, { enable_raw_mode()?; // TODO: Use the stdout we define down there execute!( io::stdout(), EnterAlternateScreen, EnableMouseCapture, Hide )?; #[cfg(unix)] let stdout = RefCell::new(BufWriter::new(std::fs::File::create("/dev/tty")?)); #[cfg(windows)] let stdout = RefCell::new(BufWriter::new(io::stdout())); Ok(Box::new(Backend { current_style: Cell::new(theme::ColorPair::from_256colors(0, 0)), stdout, })) } fn apply_colors(&self, colors: theme::ColorPair) { self.with_stdout(|stdout| { queue!( stdout, SetForegroundColor(translate_color(colors.front)), SetBackgroundColor(translate_color(colors.back)) ) .unwrap() }); } fn stdout_mut(&self) -> RefMut<BufWriter<Stdout>> { self.stdout.borrow_mut() } fn with_stdout(&self, f: impl FnOnce(&mut BufWriter<Stdout>)) { f(&mut *self.stdout_mut()); } fn set_attr(&self, attr: Attribute) { self.with_stdout(|stdout| queue!(stdout, SetAttribute(attr)).unwrap()); } fn map_key(&mut self, event: CEvent) -> Option<Event> { Some(match event { CEvent::Key(key_event) => translate_event(key_event), CEvent::Mouse(CMouseEvent { kind, column, row, modifiers: _, }) => { let position = (column, row).into(); let event = match kind { MouseEventKind::Down(button) => { MouseEvent::Press(translate_button(button)) } MouseEventKind::Up(button) => { MouseEvent::Release(translate_button(button)) } MouseEventKind::Drag(button) => { MouseEvent::Hold(translate_button(button)) } MouseEventKind::Moved => { return None; } MouseEventKind::ScrollDown => MouseEvent::WheelDown, MouseEventKind::ScrollUp => MouseEvent::WheelUp, }; Event::Mouse { event, position, offset: Vec2::zero(), } } CEvent::Resize(_, _) => Event::WindowResize, }) } } impl Drop for Backend { fn drop(&mut self) { // We have to execute the show cursor command at the `stdout`. self.with_stdout(|stdout| { execute!(stdout, LeaveAlternateScreen, DisableMouseCapture, Show) .expect("Can not disable mouse capture or show cursor.") }); disable_raw_mode().unwrap(); } } impl backend::Backend for Backend { fn poll_event(&mut self) -> Option<Event> { match poll(Duration::from_millis(1)) { Ok(true) => match read() { Ok(event) => match self.map_key(event) { Some(event) => Some(event), None => return self.poll_event(), }, Err(e) => panic!("{:?}", e), }, _ => None, } } fn set_title(&mut self, title: String) { self.with_stdout(|stdout| { execute!(stdout, terminal::SetTitle(title)).unwrap() }); } fn refresh(&mut self) { self.with_stdout(|stdout| stdout.flush().unwrap()); } fn has_colors(&self) -> bool { // TODO: color support detection? true } fn screen_size(&self) -> Vec2 { let size = terminal::size().unwrap_or((1, 1)); Vec2::from(size) } fn print_at(&self, pos: Vec2, text: &str) { self.with_stdout(|stdout| { queue!(stdout, MoveTo(pos.x as u16, pos.y as u16), Print(text)) .unwrap() }); } fn print_at_rep(&self, pos: Vec2, repetitions: usize, text: &str) { if repetitions > 0 { self.with_stdout(|out| { queue!(out, MoveTo(pos.x as u16, pos.y as u16)).unwrap(); out.write_all(text.as_bytes()).unwrap(); let mut dupes_left = repetitions - 1; while dupes_left > 0 { out.write_all(text.as_bytes()).unwrap(); dupes_left -= 1; } }); } } fn clear(&self, color: theme::Color) { self.apply_colors(theme::ColorPair { front: color, back: color, }); self.with_stdout(|stdout| { queue!(stdout, Clear(ClearType::All)).unwrap() }); } fn set_color(&self, color: theme::ColorPair) -> theme::ColorPair { let current_style = self.current_style.get(); if current_style != color { self.apply_colors(color); self.current_style.set(color); } current_style } fn set_effect(&self, effect: theme::Effect) { match effect { theme::Effect::Simple => (), theme::Effect::Reverse => self.set_attr(Attribute::Reverse), theme::Effect::Dim => self.set_attr(Attribute::Dim), theme::Effect::Bold => self.set_attr(Attribute::Bold), theme::Effect::Blink => self.set_attr(Attribute::SlowBlink), theme::Effect::Italic => self.set_attr(Attribute::Italic), theme::Effect::Strikethrough => { self.set_attr(Attribute::CrossedOut) } theme::Effect::Underline => self.set_attr(Attribute::Underlined), } } fn unset_effect(&self, effect: theme::Effect) { match effect { theme::Effect::Simple => (), theme::Effect::Reverse => self.set_attr(Attribute::NoReverse), theme::Effect::Dim | theme::Effect::Bold => { self.set_attr(Attribute::NormalIntensity) } theme::Effect::Blink => self.set_attr(Attribute::NoBlink), theme::Effect::Italic => self.set_attr(Attribute::NoItalic), theme::Effect::Strikethrough => { self.set_attr(Attribute::NotCrossedOut) } theme::Effect::Underline => self.set_attr(Attribute::NoUnderline), } } fn name(&self) -> &str { "crossterm" } }
32.6133
102
0.532739
bb5887bda5a23a6d65fbf7ec24fa8adbcf2ddb10
1,558
use {serde::Serialize, std::fmt::Debug, thiserror::Error}; #[derive(Error, Serialize, Debug, PartialEq)] pub enum IntervalError { #[error("unsupported interval range: {0} to {1}")] UnsupportedRange(String, String), #[error("cannot add between YEAR TO MONTH and HOUR TO SECOND")] AddBetweenYearToMonthAndHourToSecond, #[error("cannot subtract between YEAR TO MONTH and HOUR TO SECOND")] SubtractBetweenYearToMonthAndHourToSecond, #[error("cannot add year or month to TIME: {time} + {interval}")] AddYearOrMonthToTime { time: String, interval: String }, #[error("cannot subtract year or month to TIME: {time} - {interval}")] SubtractYearOrMonthToTime { time: String, interval: String }, #[error("failed to parse integer: {0}")] FailedToParseInteger(String), #[error("failed to parse decimal: {0}")] FailedToParseDecimal(String), #[error("failed to parse time: {0}")] FailedToParseTime(String), #[error("failed to parse YEAR TO MONTH (year-month, ex. 2-8): {0}")] FailedToParseYearToMonth(String), #[error("failed to parse DAY TO HOUR (day hour, ex. 1 23): {0}")] FailedToParseDayToHour(String), #[error("failed to parse DAY TO MINUTE (day hh:mm, ex. 1 12:34): {0}")] FailedToParseDayToMinute(String), #[error("failed to parse DAY TO SECOND (day hh:mm:ss, ex. 1 12:34:55): {0}")] FailedToParseDayToSecond(String), #[error("date overflow: {year}-{month}")] DateOverflow { year: i32, month: i32 }, #[error("unreachable")] Unreachable, }
33.148936
81
0.667522
648d65ce41c918c395abf3733045b3a85fceaf4d
3,165
//! Handles are pointer-like values to objects that *can* accept commands. //! //! Because they're pointers, handles are either 32-bit or 64-bit depending on //! the platform (the same size as `usize`). use super::*; /// [VkInstance](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkInstance.html) /// /// Handle to an Instance (a connection to a Vulkan implementation). /// /// * Parent Object: none /// * ObjectTypeEnum: [`VK_OBJECT_TYPE_INSTANCE`] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] pub struct VkInstance(*mut c_void); impl Default for VkInstance { fn default() -> Self { Self::null() } } impl VkInstance { pub const fn null() -> Self { Self(core::ptr::null_mut()) } pub fn is_null(&self) -> bool { self.0.is_null() } } /// [VkPhysicalDevice](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkPhysicalDevice.html) /// /// Handle to a Physical Device. /// /// * Parent Object: [`VkInstance`] /// * ObjectTypeEnum: [`VK_OBJECT_TYPE_PHYSICAL_DEVICE`] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] pub struct VkPhysicalDevice(*mut c_void); impl Default for VkPhysicalDevice { fn default() -> Self { Self::null() } } impl VkPhysicalDevice { pub const fn null() -> Self { Self(core::ptr::null_mut()) } pub fn is_null(&self) -> bool { self.0.is_null() } } /// [VkDevice](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkDevice.html) /// /// Handle to a Device. /// /// * Parent: [`VkPhysicalDevice`] /// * ObjectTypeEnum: [`VK_OBJECT_TYPE_DEVICE`] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] pub struct VkDevice(*mut c_void); impl Default for VkDevice { fn default() -> Self { Self::null() } } impl VkDevice { pub const fn null() -> Self { Self(core::ptr::null_mut()) } pub fn is_null(&self) -> bool { self.0.is_null() } } /// [VkQueue](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkQueue.html) /// /// Handle to a Queue. /// /// * Parent: [`VkDevice`] /// * ObjectTypeEnum: [`VK_OBJECT_TYPE_QUEUE`] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] pub struct VkQueue(*mut c_void); impl Default for VkQueue { fn default() -> Self { Self::null() } } impl VkQueue { pub const fn null() -> Self { Self(core::ptr::null_mut()) } pub fn is_null(&self) -> bool { self.0.is_null() } } /// [VkCommandBuffer](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkCommandBuffer.html) /// /// Handle to a command buffer object. /// /// * Parent: [`VkCommandPool`] /// * ObjectTypeEnum: [`VK_OBJECT_TYPE_COMMAND_BUFFER`] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] pub struct VkCommandBuffer(*mut c_void); impl Default for VkCommandBuffer { fn default() -> Self { Self::null() } } impl VkCommandBuffer { pub const fn null() -> Self { Self(core::ptr::null_mut()) } pub fn is_null(&self) -> bool { self.0.is_null() } }
25.942623
115
0.662559
03c028e15ba7a6fd09cd1031f9fa16b339984687
903
//! Turns on the 4 user LEDs on the Ultra96 board (PS_MIO[17..20]) #![no_main] #![no_std] #[cfg(not(debug_assertions))] use core::sync::atomic::{self, Ordering}; use panic_dcc as _; use zup_rt::entry; #[entry] fn main() -> ! { let p = unsafe { zup::Peripherals::steal() }; let mask = 0b1111u16 << 1; // [17:20] // configure pins as output p.GPIO.dirm_0.modify(|r, w| unsafe { w.direction_0() .bits(r.direction_0().bits() | ((mask as u32) << 16)) }); // enable output p.GPIO.oen_0.modify(|r, w| unsafe { w.op_enable_0() .bits(r.op_enable_0().bits() | ((mask as u32) << 16)) }); // set pins to 1 p.GPIO .mask_data_0_msw .write(|w| unsafe { w.data_0_msw().bits(mask).mask_0_msw().bits(!mask) }); loop { #[cfg(not(debug_assertions))] atomic::compiler_fence(Ordering::SeqCst); } }
23.763158
82
0.561462
4b3e6308bdef6882485415849c0d694e40a9427e
93,684
use crate::{ accounts_db::{AccountsDb, BankHashInfo, ErrorCounters, LoadedAccount, ScanStorageResult}, accounts_index::{AccountIndex, Ancestors, IndexKey}, bank::{ NonceRollbackFull, NonceRollbackInfo, TransactionCheckResult, TransactionExecutionResult, }, blockhash_queue::BlockhashQueue, rent_collector::RentCollector, system_instruction_processor::{get_system_account_kind, SystemAccountKind}, }; use dashmap::{ mapref::entry::Entry::{Occupied, Vacant}, DashMap, }; use log::*; use rand::{thread_rng, Rng}; use solana_sdk::{ account::{Account, AccountSharedData}, account_utils::StateMut, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{Slot, INITIAL_RENT_EPOCH}, feature_set::{self, FeatureSet}, fee_calculator::{FeeCalculator, FeeConfig}, genesis_config::ClusterType, hash::Hash, message::Message, native_loader, nonce, pubkey::Pubkey, transaction::Result, transaction::{Transaction, TransactionError}, }; use std::{ cmp::Reverse, collections::{hash_map, BinaryHeap, HashMap, HashSet}, ops::RangeBounds, path::PathBuf, sync::{Arc, Mutex}, }; #[derive(Debug, Default, AbiExample)] pub struct AccountLocks { write_locks: HashSet<Pubkey>, readonly_locks: HashMap<Pubkey, u64>, } impl AccountLocks { fn is_locked_readonly(&self, key: &Pubkey) -> bool { self.readonly_locks .get(key) .map_or(false, |count| *count > 0) } fn is_locked_write(&self, key: &Pubkey) -> bool { self.write_locks.contains(key) } fn insert_new_readonly(&mut self, key: &Pubkey) { assert!(self.readonly_locks.insert(*key, 1).is_none()); } fn lock_readonly(&mut self, key: &Pubkey) -> bool { self.readonly_locks.get_mut(key).map_or(false, |count| { *count += 1; true }) } fn unlock_readonly(&mut self, key: &Pubkey) { if let hash_map::Entry::Occupied(mut occupied_entry) = self.readonly_locks.entry(*key) { let count = occupied_entry.get_mut(); *count -= 1; if *count == 0 { occupied_entry.remove_entry(); } } } fn unlock_write(&mut self, key: &Pubkey) { self.write_locks.remove(key); } } /// This structure handles synchronization for db #[derive(Default, Debug, AbiExample)] pub struct Accounts { /// Single global AccountsDb pub accounts_db: Arc<AccountsDb>, /// set of read-only and writable accounts which are currently /// being processed by banking/replay threads pub(crate) account_locks: Mutex<AccountLocks>, } // for the load instructions pub type TransactionAccounts = Vec<AccountSharedData>; pub type TransactionAccountDeps = Vec<(Pubkey, AccountSharedData)>; pub type TransactionRent = u64; pub type TransactionLoaders = Vec<Vec<(Pubkey, AccountSharedData)>>; #[derive(PartialEq, Debug, Clone)] pub struct LoadedTransaction { pub accounts: TransactionAccounts, pub account_deps: TransactionAccountDeps, pub loaders: TransactionLoaders, pub rent: TransactionRent, } pub type TransactionLoadResult = (Result<LoadedTransaction>, Option<NonceRollbackFull>); pub enum AccountAddressFilter { Exclude, // exclude all addresses matching the filter Include, // only include addresses matching the filter } impl Accounts { pub fn new(paths: Vec<PathBuf>, cluster_type: &ClusterType) -> Self { Self::new_with_config(paths, cluster_type, HashSet::new(), false) } pub fn new_with_config( paths: Vec<PathBuf>, cluster_type: &ClusterType, account_indexes: HashSet<AccountIndex>, caching_enabled: bool, ) -> Self { Self { accounts_db: Arc::new(AccountsDb::new_with_config( paths, cluster_type, account_indexes, caching_enabled, )), account_locks: Mutex::new(AccountLocks::default()), } } pub fn new_from_parent(parent: &Accounts, slot: Slot, parent_slot: Slot) -> Self { let accounts_db = parent.accounts_db.clone(); accounts_db.set_hash(slot, parent_slot); Self { accounts_db, account_locks: Mutex::new(AccountLocks::default()), } } pub(crate) fn new_empty(accounts_db: AccountsDb) -> Self { Self { accounts_db: Arc::new(accounts_db), account_locks: Mutex::new(AccountLocks::default()), } } /// Return true if the slice has any duplicate elements pub fn has_duplicates<T: PartialEq>(xs: &[T]) -> bool { // Note: This is an O(n^2) algorithm, but requires no heap allocations. The benchmark // `bench_has_duplicates` in benches/message_processor.rs shows that this implementation is // ~50 times faster than using HashSet for very short slices. for i in 1..xs.len() { if xs[i..].contains(&xs[i - 1]) { return true; } } false } fn construct_instructions_account( message: &Message, demote_sysvar_write_locks: bool, ) -> AccountSharedData { let mut data = message.serialize_instructions(demote_sysvar_write_locks); // add room for current instruction index. data.resize(data.len() + 2, 0); AccountSharedData::from(Account { data, ..Account::default() }) } fn load_transaction( &self, ancestors: &Ancestors, tx: &Transaction, fee: u64, error_counters: &mut ErrorCounters, rent_collector: &RentCollector, feature_set: &FeatureSet, ) -> Result<LoadedTransaction> { // Copy all the accounts let message = tx.message(); if tx.signatures.is_empty() && fee != 0 { Err(TransactionError::MissingSignatureForFee) } else { // There is no way to predict what program will execute without an error // If a fee can pay for execution then the program will be scheduled let mut payer_index = None; let mut tx_rent: TransactionRent = 0; let mut accounts = Vec::with_capacity(message.account_keys.len()); let mut account_deps = Vec::with_capacity(message.account_keys.len()); let demote_sysvar_write_locks = feature_set.is_active(&feature_set::demote_sysvar_write_locks::id()); for (i, key) in message.account_keys.iter().enumerate() { let account = if message.is_non_loader_key(key, i) { if payer_index.is_none() { payer_index = Some(i); } if solana_sdk::sysvar::instructions::check_id(key) && feature_set.is_active(&feature_set::instructions_sysvar_enabled::id()) { if message.is_writable(i, demote_sysvar_write_locks) { return Err(TransactionError::InvalidAccountIndex); } Self::construct_instructions_account(message, demote_sysvar_write_locks) } else { let (account, rent) = self .accounts_db .load(ancestors, key) .map(|(mut account, _)| { if message.is_writable(i, demote_sysvar_write_locks) { let rent_due = rent_collector .collect_from_existing_account(&key, &mut account); (account, rent_due) } else { (account, 0) } }) .unwrap_or_default(); if account.executable && bpf_loader_upgradeable::check_id(&account.owner) { // The upgradeable loader requires the derived ProgramData account if let Ok(UpgradeableLoaderState::Program { programdata_address, }) = account.state() { if let Some(account) = self .accounts_db .load(ancestors, &programdata_address) .map(|(account, _)| account) { account_deps.push((programdata_address, account)); } else { error_counters.account_not_found += 1; return Err(TransactionError::ProgramAccountNotFound); } } else { error_counters.invalid_program_for_execution += 1; return Err(TransactionError::InvalidProgramForExecution); } } tx_rent += rent; account } } else { // Fill in an empty account for the program slots. AccountSharedData::default() }; accounts.push(account); } debug_assert_eq!(accounts.len(), message.account_keys.len()); if let Some(payer_index) = payer_index { if payer_index != 0 { warn!("Payer index should be 0! {:?}", tx); } if accounts[payer_index].lamports == 0 { error_counters.account_not_found += 1; Err(TransactionError::AccountNotFound) } else { let min_balance = match get_system_account_kind(&accounts[payer_index]) .ok_or_else(|| { error_counters.invalid_account_for_fee += 1; TransactionError::InvalidAccountForFee })? { SystemAccountKind::System => 0, SystemAccountKind::Nonce => { // Should we ever allow a fees charge to zero a nonce account's // balance. The state MUST be set to uninitialized in that case rent_collector.rent.minimum_balance(nonce::State::size()) } }; if accounts[payer_index].lamports < fee + min_balance { error_counters.insufficient_funds += 1; Err(TransactionError::InsufficientFundsForFee) } else { accounts[payer_index].lamports -= fee; let message = tx.message(); let loaders = message .instructions .iter() .map(|ix| { if message.account_keys.len() <= ix.program_id_index as usize { error_counters.account_not_found += 1; return Err(TransactionError::AccountNotFound); } let program_id = message.account_keys[ix.program_id_index as usize]; self.load_executable_accounts( ancestors, &program_id, error_counters, ) }) .collect::<Result<TransactionLoaders>>()?; Ok(LoadedTransaction { accounts, account_deps, loaders, rent: tx_rent, }) } } } else { error_counters.account_not_found += 1; Err(TransactionError::AccountNotFound) } } } fn load_executable_accounts( &self, ancestors: &Ancestors, program_id: &Pubkey, error_counters: &mut ErrorCounters, ) -> Result<Vec<(Pubkey, AccountSharedData)>> { let mut accounts = Vec::new(); let mut depth = 0; let mut program_id = *program_id; loop { if native_loader::check_id(&program_id) { // At the root of the chain, ready to dispatch break; } if depth >= 5 { error_counters.call_chain_too_deep += 1; return Err(TransactionError::CallChainTooDeep); } depth += 1; let program = match self .accounts_db .load(ancestors, &program_id) .map(|(account, _)| account) { Some(program) => program, None => { error_counters.account_not_found += 1; return Err(TransactionError::ProgramAccountNotFound); } }; if !program.executable { error_counters.invalid_program_for_execution += 1; return Err(TransactionError::InvalidProgramForExecution); } // Add loader to chain let program_owner = program.owner; if bpf_loader_upgradeable::check_id(&program_owner) { // The upgradeable loader requires the derived ProgramData account if let Ok(UpgradeableLoaderState::Program { programdata_address, }) = program.state() { if let Some(program) = self .accounts_db .load(ancestors, &programdata_address) .map(|(account, _)| account) { accounts.insert(0, (programdata_address, program)); } else { error_counters.account_not_found += 1; return Err(TransactionError::ProgramAccountNotFound); } } else { error_counters.invalid_program_for_execution += 1; return Err(TransactionError::InvalidProgramForExecution); } } accounts.insert(0, (program_id, program)); program_id = program_owner; } Ok(accounts) } pub fn load_accounts( &self, ancestors: &Ancestors, txs: &[Transaction], lock_results: Vec<TransactionCheckResult>, hash_queue: &BlockhashQueue, error_counters: &mut ErrorCounters, rent_collector: &RentCollector, feature_set: &FeatureSet, ) -> Vec<TransactionLoadResult> { let fee_config = FeeConfig { secp256k1_program_enabled: feature_set .is_active(&feature_set::secp256k1_program_enabled::id()), }; txs.iter() .zip(lock_results) .map(|etx| match etx { (tx, (Ok(()), nonce_rollback)) => { let fee_calculator = nonce_rollback .as_ref() .map(|nonce_rollback| nonce_rollback.fee_calculator()) .unwrap_or_else(|| { hash_queue .get_fee_calculator(&tx.message().recent_blockhash) .cloned() }); let fee = if let Some(fee_calculator) = fee_calculator { fee_calculator.calculate_fee_with_config(tx.message(), &fee_config) } else { return (Err(TransactionError::BlockhashNotFound), None); }; let loaded_transaction = match self.load_transaction( ancestors, tx, fee, error_counters, rent_collector, feature_set, ) { Ok(loaded_transaction) => loaded_transaction, Err(e) => return (Err(e), None), }; // Update nonce_rollback with fee-subtracted accounts let nonce_rollback = if let Some(nonce_rollback) = nonce_rollback { match NonceRollbackFull::from_partial( nonce_rollback, tx.message(), &loaded_transaction.accounts, ) { Ok(nonce_rollback) => Some(nonce_rollback), Err(e) => return (Err(e), None), } } else { None }; (Ok(loaded_transaction), nonce_rollback) } (_, (Err(e), _nonce_rollback)) => (Err(e), None), }) .collect() } /// Slow because lock is held for 1 operation instead of many pub fn load_slow( &self, ancestors: &Ancestors, pubkey: &Pubkey, ) -> Option<(AccountSharedData, Slot)> { let (account, slot) = self.accounts_db.load_slow(ancestors, pubkey)?; if account.lamports > 0 { Some((account, slot)) } else { None } } /// scans underlying accounts_db for this delta (slot) with a map function /// from LoadedAccount to B /// returns only the latest/current version of B for this slot pub fn scan_slot<F, B>(&self, slot: Slot, func: F) -> Vec<B> where F: Fn(LoadedAccount) -> Option<B> + Send + Sync, B: Sync + Send + Default + std::cmp::Eq, { let scan_result = self.accounts_db.scan_account_storage( slot, |loaded_account: LoadedAccount| { // Cache only has one version per key, don't need to worry about versioning func(loaded_account) }, |accum: &DashMap<Pubkey, (u64, B)>, loaded_account: LoadedAccount| { let loaded_account_pubkey = *loaded_account.pubkey(); let loaded_write_version = loaded_account.write_version(); let should_insert = accum .get(&loaded_account_pubkey) .map(|existing_entry| loaded_write_version > existing_entry.value().0) .unwrap_or(true); if should_insert { if let Some(val) = func(loaded_account) { // Detected insertion is necessary, grabs the write lock to commit the write, match accum.entry(loaded_account_pubkey) { // Double check in case another thread interleaved a write between the read + write. Occupied(mut occupied_entry) => { if loaded_write_version > occupied_entry.get().0 { occupied_entry.insert((loaded_write_version, val)); } } Vacant(vacant_entry) => { vacant_entry.insert((loaded_write_version, val)); } } } } }, ); match scan_result { ScanStorageResult::Cached(cached_result) => cached_result, ScanStorageResult::Stored(stored_result) => stored_result .into_iter() .map(|(_pubkey, (_latest_write_version, val))| val) .collect(), } } pub fn load_by_program_slot( &self, slot: Slot, program_id: Option<&Pubkey>, ) -> Vec<(Pubkey, AccountSharedData)> { self.scan_slot(slot, |stored_account| { let hit = match program_id { None => true, Some(program_id) => stored_account.owner() == program_id, }; if hit { Some((*stored_account.pubkey(), stored_account.account())) } else { None } }) } pub fn load_largest_accounts( &self, ancestors: &Ancestors, num: usize, filter_by_address: &HashSet<Pubkey>, filter: AccountAddressFilter, ) -> Vec<(Pubkey, u64)> { if num == 0 { return vec![]; } let account_balances = self.accounts_db.scan_accounts( ancestors, |collector: &mut BinaryHeap<Reverse<(u64, Pubkey)>>, option| { if let Some((pubkey, account, _slot)) = option { if account.lamports == 0 { return; } let contains_address = filter_by_address.contains(pubkey); let collect = match filter { AccountAddressFilter::Exclude => !contains_address, AccountAddressFilter::Include => contains_address, }; if !collect { return; } if collector.len() == num { let Reverse(entry) = collector .peek() .expect("BinaryHeap::peek should succeed when len > 0"); if *entry >= (account.lamports, *pubkey) { return; } collector.pop(); } collector.push(Reverse((account.lamports, *pubkey))); } }, ); account_balances .into_sorted_vec() .into_iter() .map(|Reverse((balance, pubkey))| (pubkey, balance)) .collect() } pub fn calculate_capitalization(&self, ancestors: &Ancestors) -> u64 { self.accounts_db.unchecked_scan_accounts( "calculate_capitalization_scan_elapsed", ancestors, |total_capitalization: &mut u64, (_pubkey, loaded_account, _slot)| { let lamports = loaded_account.lamports(); if Self::is_loadable(lamports) { let account_cap = AccountsDb::account_balance_for_capitalization( lamports, &loaded_account.owner(), loaded_account.executable(), ); *total_capitalization = AccountsDb::checked_iterative_sum_for_capitalization( *total_capitalization, account_cap, ); } }, ) } #[must_use] pub fn verify_bank_hash_and_lamports( &self, slot: Slot, ancestors: &Ancestors, total_lamports: u64, ) -> bool { if let Err(err) = self.accounts_db .verify_bank_hash_and_lamports(slot, ancestors, total_lamports) { warn!("verify_bank_hash failed: {:?}", err); false } else { true } } fn is_loadable(lamports: u64) -> bool { // Don't ever load zero lamport accounts into runtime because // the existence of zero-lamport accounts are never deterministic!! lamports > 0 } fn load_while_filtering<F: Fn(&AccountSharedData) -> bool>( collector: &mut Vec<(Pubkey, AccountSharedData)>, some_account_tuple: Option<(&Pubkey, AccountSharedData, Slot)>, filter: F, ) { if let Some(mapped_account_tuple) = some_account_tuple .filter(|(_, account, _)| Self::is_loadable(account.lamports) && filter(account)) .map(|(pubkey, account, _slot)| (*pubkey, account)) { collector.push(mapped_account_tuple) } } pub fn load_by_program( &self, ancestors: &Ancestors, program_id: &Pubkey, ) -> Vec<(Pubkey, AccountSharedData)> { self.accounts_db.scan_accounts( ancestors, |collector: &mut Vec<(Pubkey, AccountSharedData)>, some_account_tuple| { Self::load_while_filtering(collector, some_account_tuple, |account| { account.owner == *program_id }) }, ) } pub fn load_by_program_with_filter<F: Fn(&AccountSharedData) -> bool>( &self, ancestors: &Ancestors, program_id: &Pubkey, filter: F, ) -> Vec<(Pubkey, AccountSharedData)> { self.accounts_db.scan_accounts( ancestors, |collector: &mut Vec<(Pubkey, AccountSharedData)>, some_account_tuple| { Self::load_while_filtering(collector, some_account_tuple, |account| { account.owner == *program_id && filter(account) }) }, ) } pub fn load_by_index_key_with_filter<F: Fn(&AccountSharedData) -> bool>( &self, ancestors: &Ancestors, index_key: &IndexKey, filter: F, ) -> Vec<(Pubkey, AccountSharedData)> { self.accounts_db.index_scan_accounts( ancestors, *index_key, |collector: &mut Vec<(Pubkey, AccountSharedData)>, some_account_tuple| { Self::load_while_filtering(collector, some_account_tuple, |account| filter(account)) }, ) } pub fn load_all(&self, ancestors: &Ancestors) -> Vec<(Pubkey, AccountSharedData, Slot)> { self.accounts_db.scan_accounts( ancestors, |collector: &mut Vec<(Pubkey, AccountSharedData, Slot)>, some_account_tuple| { if let Some((pubkey, account, slot)) = some_account_tuple.filter(|(_, account, _)| Self::is_loadable(account.lamports)) { collector.push((*pubkey, account, slot)) } }, ) } pub fn load_to_collect_rent_eagerly<R: RangeBounds<Pubkey>>( &self, ancestors: &Ancestors, range: R, ) -> Vec<(Pubkey, AccountSharedData)> { self.accounts_db.range_scan_accounts( "load_to_collect_rent_eagerly_scan_elapsed", ancestors, range, |collector: &mut Vec<(Pubkey, AccountSharedData)>, option| { Self::load_while_filtering(collector, option, |_| true) }, ) } /// Slow because lock is held for 1 operation instead of many. /// WARNING: This noncached version is only to be used for tests/benchmarking /// as bypassing the cache in general is not supported pub fn store_slow_uncached(&self, slot: Slot, pubkey: &Pubkey, account: &AccountSharedData) { self.accounts_db.store_uncached(slot, &[(pubkey, account)]); } pub fn store_slow_cached(&self, slot: Slot, pubkey: &Pubkey, account: &AccountSharedData) { self.accounts_db.store_cached(slot, &[(pubkey, account)]); } fn lock_account( &self, account_locks: &mut AccountLocks, writable_keys: Vec<&Pubkey>, readonly_keys: Vec<&Pubkey>, ) -> Result<()> { for k in writable_keys.iter() { if account_locks.is_locked_write(k) || account_locks.is_locked_readonly(k) { debug!("Writable account in use: {:?}", k); return Err(TransactionError::AccountInUse); } } for k in readonly_keys.iter() { if account_locks.is_locked_write(k) { debug!("Read-only account in use: {:?}", k); return Err(TransactionError::AccountInUse); } } for k in writable_keys { account_locks.write_locks.insert(*k); } for k in readonly_keys { if !account_locks.lock_readonly(k) { account_locks.insert_new_readonly(k); } } Ok(()) } fn unlock_account( &self, tx: &Transaction, result: &Result<()>, locks: &mut AccountLocks, demote_sysvar_write_locks: bool, ) { match result { Err(TransactionError::AccountInUse) => (), Err(TransactionError::SanitizeFailure) => (), Err(TransactionError::AccountLoadedTwice) => (), _ => { let (writable_keys, readonly_keys) = &tx .message() .get_account_keys_by_lock_type(demote_sysvar_write_locks); for k in writable_keys { locks.unlock_write(k); } for k in readonly_keys { locks.unlock_readonly(k); } } } } pub fn bank_hash_at(&self, slot: Slot) -> Hash { self.bank_hash_info_at(slot).hash } pub fn bank_hash_info_at(&self, slot: Slot) -> BankHashInfo { let delta_hash = self.accounts_db.get_accounts_delta_hash(slot); let bank_hashes = self.accounts_db.bank_hashes.read().unwrap(); let mut hash_info = bank_hashes .get(&slot) .expect("No bank hash was found for this bank, that should not be possible") .clone(); hash_info.hash = delta_hash; hash_info } /// This function will prevent multiple threads from modifying the same account state at the /// same time #[must_use] pub fn lock_accounts( &self, txs: &[Transaction], demote_sysvar_write_locks: bool, ) -> Vec<Result<()>> { use solana_sdk::sanitize::Sanitize; let keys: Vec<Result<_>> = txs .iter() .map(|tx| { tx.sanitize().map_err(TransactionError::from)?; if Self::has_duplicates(&tx.message.account_keys) { return Err(TransactionError::AccountLoadedTwice); } Ok(tx .message() .get_account_keys_by_lock_type(demote_sysvar_write_locks)) }) .collect(); let mut account_locks = &mut self.account_locks.lock().unwrap(); keys.into_iter() .map(|result| match result { Ok((writable_keys, readonly_keys)) => { self.lock_account(&mut account_locks, writable_keys, readonly_keys) } Err(e) => Err(e), }) .collect() } /// Once accounts are unlocked, new transactions that modify that state can enter the pipeline pub fn unlock_accounts( &self, txs: &[Transaction], results: &[Result<()>], demote_sysvar_write_locks: bool, ) { let mut account_locks = self.account_locks.lock().unwrap(); debug!("bank unlock accounts"); for (tx, lock_result) in txs.iter().zip(results) { self.unlock_account( tx, lock_result, &mut account_locks, demote_sysvar_write_locks, ); } } /// Store the accounts into the DB // allow(clippy) needed for various gating flags #[allow(clippy::too_many_arguments)] pub fn store_cached( &self, slot: Slot, txs: &[Transaction], res: &[TransactionExecutionResult], loaded: &mut [TransactionLoadResult], rent_collector: &RentCollector, last_blockhash_with_fee_calculator: &(Hash, FeeCalculator), fix_recent_blockhashes_sysvar_delay: bool, demote_sysvar_write_locks: bool, ) { let accounts_to_store = self.collect_accounts_to_store( txs, res, loaded, rent_collector, last_blockhash_with_fee_calculator, fix_recent_blockhashes_sysvar_delay, demote_sysvar_write_locks, ); self.accounts_db.store_cached(slot, &accounts_to_store); } /// Purge a slot if it is not a root /// Root slots cannot be purged pub fn purge_slot(&self, slot: Slot) { self.accounts_db.purge_slot(slot); } /// Add a slot to root. Root slots cannot be purged pub fn add_root(&self, slot: Slot) { self.accounts_db.add_root(slot) } fn collect_accounts_to_store<'a>( &self, txs: &'a [Transaction], res: &'a [TransactionExecutionResult], loaded: &'a mut [TransactionLoadResult], rent_collector: &RentCollector, last_blockhash_with_fee_calculator: &(Hash, FeeCalculator), fix_recent_blockhashes_sysvar_delay: bool, demote_sysvar_write_locks: bool, ) -> Vec<(&'a Pubkey, &'a AccountSharedData)> { let mut accounts = Vec::with_capacity(loaded.len()); for (i, ((raccs, _nonce_rollback), tx)) in loaded.iter_mut().zip(txs).enumerate() { if raccs.is_err() { continue; } let (res, nonce_rollback) = &res[i]; let maybe_nonce_rollback = match (res, nonce_rollback) { (Ok(_), Some(nonce_rollback)) => { let pubkey = nonce_rollback.nonce_address(); let acc = nonce_rollback.nonce_account(); let maybe_fee_account = nonce_rollback.fee_account(); Some((pubkey, acc, maybe_fee_account)) } (Err(TransactionError::InstructionError(_, _)), Some(nonce_rollback)) => { let pubkey = nonce_rollback.nonce_address(); let acc = nonce_rollback.nonce_account(); let maybe_fee_account = nonce_rollback.fee_account(); Some((pubkey, acc, maybe_fee_account)) } (Ok(_), _nonce_rollback) => None, (Err(_), _nonce_rollback) => continue, }; let message = &tx.message(); let loaded_transaction = raccs.as_mut().unwrap(); let mut fee_payer_index = None; for ((i, key), account) in message .account_keys .iter() .enumerate() .zip(loaded_transaction.accounts.iter_mut()) .filter(|((i, key), _account)| message.is_non_loader_key(key, *i)) { let is_nonce_account = prepare_if_nonce_account( account, key, res, maybe_nonce_rollback, last_blockhash_with_fee_calculator, fix_recent_blockhashes_sysvar_delay, ); if fee_payer_index.is_none() { fee_payer_index = Some(i); } let is_fee_payer = Some(i) == fee_payer_index; if message.is_writable(i, demote_sysvar_write_locks) && (res.is_ok() || (maybe_nonce_rollback.is_some() && (is_nonce_account || is_fee_payer))) { if res.is_err() { match (is_nonce_account, is_fee_payer, maybe_nonce_rollback) { // nonce is fee-payer, state updated in `prepare_if_nonce_account()` (true, true, Some((_, _, None))) => (), // nonce not fee-payer, state updated in `prepare_if_nonce_account()` (true, false, Some((_, _, Some(_)))) => (), // not nonce, but fee-payer. rollback to cached state (false, true, Some((_, _, Some(fee_payer_account)))) => { *account = fee_payer_account.clone(); } _ => panic!("unexpected nonce_rollback condition"), } } if account.rent_epoch == INITIAL_RENT_EPOCH { loaded_transaction.rent += rent_collector.collect_from_created_account(&key, account); } accounts.push((key, &*account)); } } } accounts } } pub fn prepare_if_nonce_account( account: &mut AccountSharedData, account_pubkey: &Pubkey, tx_result: &Result<()>, maybe_nonce_rollback: Option<(&Pubkey, &AccountSharedData, Option<&AccountSharedData>)>, last_blockhash_with_fee_calculator: &(Hash, FeeCalculator), fix_recent_blockhashes_sysvar_delay: bool, ) -> bool { if let Some((nonce_key, nonce_acc, _maybe_fee_account)) = maybe_nonce_rollback { if account_pubkey == nonce_key { let overwrite = if tx_result.is_err() { // Nonce TX failed with an InstructionError. Roll back // its account state *account = nonce_acc.clone(); true } else { // Retain overwrite on successful transactions until // recent_blockhashes_sysvar_delay fix is activated !fix_recent_blockhashes_sysvar_delay }; if overwrite { // Since hash_age_kind is DurableNonce, unwrap is safe here let state = StateMut::<nonce::state::Versions>::state(nonce_acc) .unwrap() .convert_to_current(); if let nonce::State::Initialized(ref data) = state { let new_data = nonce::state::Versions::new_current(nonce::State::Initialized( nonce::state::Data { blockhash: last_blockhash_with_fee_calculator.0, fee_calculator: last_blockhash_with_fee_calculator.1.clone(), ..data.clone() }, )); account.set_state(&new_data).unwrap(); } } return true; } } false } pub fn create_test_accounts( accounts: &Accounts, pubkeys: &mut Vec<Pubkey>, num: usize, slot: Slot, ) { for t in 0..num { let pubkey = solana_sdk::pubkey::new_rand(); let account = AccountSharedData::new((t + 1) as u64, 0, &AccountSharedData::default().owner); accounts.store_slow_uncached(slot, &pubkey, &account); pubkeys.push(pubkey); } } // Only used by bench, not safe to call otherwise accounts can conflict with the // accounts cache! pub fn update_accounts_bench(accounts: &Accounts, pubkeys: &[Pubkey], slot: u64) { for pubkey in pubkeys { let amount = thread_rng().gen_range(0, 10); let account = AccountSharedData::new(amount, 0, &AccountSharedData::default().owner); accounts.store_slow_uncached(slot, &pubkey, &account); } } #[cfg(test)] mod tests { use super::*; use crate::rent_collector::RentCollector; use solana_sdk::{ account::{AccountSharedData, WritableAccount}, epoch_schedule::EpochSchedule, fee_calculator::FeeCalculator, genesis_config::ClusterType, hash::Hash, instruction::{CompiledInstruction, InstructionError}, message::Message, nonce, nonce_account, rent::Rent, signature::{keypair_from_seed, Keypair, Signer}, system_instruction, system_program, }; use std::{ sync::atomic::{AtomicBool, AtomicU64, Ordering}, {thread, time}, }; fn load_accounts_with_fee_and_rent( tx: Transaction, ka: &[(Pubkey, AccountSharedData)], fee_calculator: &FeeCalculator, rent_collector: &RentCollector, error_counters: &mut ErrorCounters, ) -> Vec<TransactionLoadResult> { let mut hash_queue = BlockhashQueue::new(100); hash_queue.register_hash(&tx.message().recent_blockhash, &fee_calculator); let accounts = Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); for ka in ka.iter() { accounts.store_slow_uncached(0, &ka.0, &ka.1); } let ancestors = vec![(0, 0)].into_iter().collect(); accounts.load_accounts( &ancestors, &[tx], vec![(Ok(()), None)], &hash_queue, error_counters, rent_collector, &FeatureSet::all_enabled(), ) } fn load_accounts_with_fee( tx: Transaction, ka: &[(Pubkey, AccountSharedData)], fee_calculator: &FeeCalculator, error_counters: &mut ErrorCounters, ) -> Vec<TransactionLoadResult> { let rent_collector = RentCollector::default(); load_accounts_with_fee_and_rent(tx, ka, fee_calculator, &rent_collector, error_counters) } fn load_accounts( tx: Transaction, ka: &[(Pubkey, AccountSharedData)], error_counters: &mut ErrorCounters, ) -> Vec<TransactionLoadResult> { let fee_calculator = FeeCalculator::default(); load_accounts_with_fee(tx, ka, &fee_calculator, error_counters) } #[test] fn test_load_accounts_no_key() { let accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let instructions = vec![CompiledInstruction::new(0, &(), vec![0])]; let tx = Transaction::new_with_compiled_instructions::<[&Keypair; 0]>( &[], &[], Hash::default(), vec![native_loader::id()], instructions, ); let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_eq!(error_counters.account_not_found, 1); assert_eq!(loaded_accounts.len(), 1); assert_eq!( loaded_accounts[0], (Err(TransactionError::AccountNotFound), None,) ); } #[test] fn test_load_accounts_no_account_0_exists() { let accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; let tx = Transaction::new_with_compiled_instructions( &[&keypair], &[], Hash::default(), vec![native_loader::id()], instructions, ); let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_eq!(error_counters.account_not_found, 1); assert_eq!(loaded_accounts.len(), 1); assert_eq!( loaded_accounts[0], (Err(TransactionError::AccountNotFound), None,), ); } #[test] fn test_load_accounts_unknown_program_id() { let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); let key1 = Pubkey::new(&[5u8; 32]); let account = AccountSharedData::new(1, 0, &Pubkey::default()); accounts.push((key0, account)); let account = AccountSharedData::new(2, 1, &Pubkey::default()); accounts.push((key1, account)); let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; let tx = Transaction::new_with_compiled_instructions( &[&keypair], &[], Hash::default(), vec![Pubkey::default()], instructions, ); let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_eq!(error_counters.account_not_found, 1); assert_eq!(loaded_accounts.len(), 1); assert_eq!( loaded_accounts[0], (Err(TransactionError::ProgramAccountNotFound), None,) ); } #[test] fn test_load_accounts_insufficient_funds() { let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); let account = AccountSharedData::new(1, 0, &Pubkey::default()); accounts.push((key0, account)); let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; let tx = Transaction::new_with_compiled_instructions( &[&keypair], &[], Hash::default(), vec![native_loader::id()], instructions, ); let fee_calculator = FeeCalculator::new(10); assert_eq!(fee_calculator.calculate_fee(tx.message()), 10); let loaded_accounts = load_accounts_with_fee(tx, &accounts, &fee_calculator, &mut error_counters); assert_eq!(error_counters.insufficient_funds, 1); assert_eq!(loaded_accounts.len(), 1); assert_eq!( loaded_accounts[0].clone(), (Err(TransactionError::InsufficientFundsForFee), None,), ); } #[test] fn test_load_accounts_invalid_account_for_fee() { let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); let account = AccountSharedData::new(1, 1, &solana_sdk::pubkey::new_rand()); // <-- owner is not the system program accounts.push((key0, account)); let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; let tx = Transaction::new_with_compiled_instructions( &[&keypair], &[], Hash::default(), vec![native_loader::id()], instructions, ); let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_eq!(error_counters.invalid_account_for_fee, 1); assert_eq!(loaded_accounts.len(), 1); assert_eq!( loaded_accounts[0], (Err(TransactionError::InvalidAccountForFee), None,), ); } #[test] fn test_load_accounts_fee_payer_is_nonce() { let mut error_counters = ErrorCounters::default(); let rent_collector = RentCollector::new( 0, &EpochSchedule::default(), 500_000.0, &Rent { lamports_per_byte_year: 42, ..Rent::default() }, ); let min_balance = rent_collector.rent.minimum_balance(nonce::State::size()); let fee_calculator = FeeCalculator::new(min_balance); let nonce = Keypair::new(); let mut accounts = vec![( nonce.pubkey(), AccountSharedData::new_data( min_balance * 2, &nonce::state::Versions::new_current(nonce::State::Initialized( nonce::state::Data::default(), )), &system_program::id(), ) .unwrap(), )]; let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; let tx = Transaction::new_with_compiled_instructions( &[&nonce], &[], Hash::default(), vec![native_loader::id()], instructions, ); // Fee leaves min_balance balance succeeds let loaded_accounts = load_accounts_with_fee_and_rent( tx.clone(), &accounts, &fee_calculator, &rent_collector, &mut error_counters, ); assert_eq!(loaded_accounts.len(), 1); let (load_res, _nonce_rollback) = &loaded_accounts[0]; let loaded_transaction = load_res.as_ref().unwrap(); assert_eq!(loaded_transaction.accounts[0].lamports, min_balance); // Fee leaves zero balance fails accounts[0].1.lamports = min_balance; let loaded_accounts = load_accounts_with_fee_and_rent( tx.clone(), &accounts, &fee_calculator, &rent_collector, &mut error_counters, ); assert_eq!(loaded_accounts.len(), 1); let (load_res, _nonce_rollback) = &loaded_accounts[0]; assert_eq!(*load_res, Err(TransactionError::InsufficientFundsForFee)); // Fee leaves non-zero, but sub-min_balance balance fails accounts[0].1.lamports = 3 * min_balance / 2; let loaded_accounts = load_accounts_with_fee_and_rent( tx, &accounts, &fee_calculator, &rent_collector, &mut error_counters, ); assert_eq!(loaded_accounts.len(), 1); let (load_res, _nonce_rollback) = &loaded_accounts[0]; assert_eq!(*load_res, Err(TransactionError::InsufficientFundsForFee)); } #[test] fn test_load_accounts_no_loaders() { let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); let key1 = Pubkey::new(&[5u8; 32]); let mut account = AccountSharedData::new(1, 0, &Pubkey::default()); account.rent_epoch = 1; accounts.push((key0, account)); let mut account = AccountSharedData::new(2, 1, &Pubkey::default()); account.rent_epoch = 1; accounts.push((key1, account)); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let tx = Transaction::new_with_compiled_instructions( &[&keypair], &[key1], Hash::default(), vec![native_loader::id()], instructions, ); let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_eq!(error_counters.account_not_found, 0); assert_eq!(loaded_accounts.len(), 1); match &loaded_accounts[0] { (Ok(loaded_transaction), _nonce_rollback) => { assert_eq!(loaded_transaction.accounts.len(), 3); assert_eq!(loaded_transaction.accounts[0], accounts[0].1); assert_eq!(loaded_transaction.loaders.len(), 1); assert_eq!(loaded_transaction.loaders[0].len(), 0); } (Err(e), _nonce_rollback) => Err(e).unwrap(), } } #[test] fn test_load_accounts_max_call_depth() { let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); let key1 = Pubkey::new(&[5u8; 32]); let key2 = Pubkey::new(&[6u8; 32]); let key3 = Pubkey::new(&[7u8; 32]); let key4 = Pubkey::new(&[8u8; 32]); let key5 = Pubkey::new(&[9u8; 32]); let key6 = Pubkey::new(&[10u8; 32]); let account = AccountSharedData::new(1, 0, &Pubkey::default()); accounts.push((key0, account)); let mut account = AccountSharedData::new(40, 1, &Pubkey::default()); account.executable = true; account.owner = native_loader::id(); accounts.push((key1, account)); let mut account = AccountSharedData::new(41, 1, &Pubkey::default()); account.executable = true; account.owner = key1; accounts.push((key2, account)); let mut account = AccountSharedData::new(42, 1, &Pubkey::default()); account.executable = true; account.owner = key2; accounts.push((key3, account)); let mut account = AccountSharedData::new(43, 1, &Pubkey::default()); account.executable = true; account.owner = key3; accounts.push((key4, account)); let mut account = AccountSharedData::new(44, 1, &Pubkey::default()); account.executable = true; account.owner = key4; accounts.push((key5, account)); let mut account = AccountSharedData::new(45, 1, &Pubkey::default()); account.executable = true; account.owner = key5; accounts.push((key6, account)); let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; let tx = Transaction::new_with_compiled_instructions( &[&keypair], &[], Hash::default(), vec![key6], instructions, ); let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_eq!(error_counters.call_chain_too_deep, 1); assert_eq!(loaded_accounts.len(), 1); assert_eq!( loaded_accounts[0], (Err(TransactionError::CallChainTooDeep), None,) ); } #[test] fn test_load_accounts_bad_program_id() { let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); let key1 = Pubkey::new(&[5u8; 32]); let account = AccountSharedData::new(1, 0, &Pubkey::default()); accounts.push((key0, account)); let mut account = AccountSharedData::new(40, 1, &native_loader::id()); account.executable = true; accounts.push((key1, account)); let instructions = vec![CompiledInstruction::new(0, &(), vec![0])]; let tx = Transaction::new_with_compiled_instructions( &[&keypair], &[], Hash::default(), vec![key1], instructions, ); let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_eq!(error_counters.invalid_program_for_execution, 1); assert_eq!(loaded_accounts.len(), 1); assert_eq!( loaded_accounts[0], (Err(TransactionError::InvalidProgramForExecution), None,) ); } #[test] fn test_load_accounts_bad_owner() { let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); let key1 = Pubkey::new(&[5u8; 32]); let account = AccountSharedData::new(1, 0, &Pubkey::default()); accounts.push((key0, account)); let mut account = AccountSharedData::new(40, 1, &Pubkey::default()); account.executable = true; accounts.push((key1, account)); let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; let tx = Transaction::new_with_compiled_instructions( &[&keypair], &[], Hash::default(), vec![key1], instructions, ); let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_eq!(error_counters.account_not_found, 1); assert_eq!(loaded_accounts.len(), 1); assert_eq!( loaded_accounts[0], (Err(TransactionError::ProgramAccountNotFound), None,) ); } #[test] fn test_load_accounts_not_executable() { let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); let key1 = Pubkey::new(&[5u8; 32]); let account = AccountSharedData::new(1, 0, &Pubkey::default()); accounts.push((key0, account)); let account = AccountSharedData::new(40, 1, &native_loader::id()); accounts.push((key1, account)); let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; let tx = Transaction::new_with_compiled_instructions( &[&keypair], &[], Hash::default(), vec![key1], instructions, ); let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_eq!(error_counters.invalid_program_for_execution, 1); assert_eq!(loaded_accounts.len(), 1); assert_eq!( loaded_accounts[0], (Err(TransactionError::InvalidProgramForExecution), None,) ); } #[test] fn test_load_accounts_multiple_loaders() { let mut accounts: Vec<(Pubkey, AccountSharedData)> = Vec::new(); let mut error_counters = ErrorCounters::default(); let keypair = Keypair::new(); let key0 = keypair.pubkey(); let key1 = Pubkey::new(&[5u8; 32]); let key2 = Pubkey::new(&[6u8; 32]); let mut account = AccountSharedData::new(1, 0, &Pubkey::default()); account.rent_epoch = 1; accounts.push((key0, account)); let mut account = AccountSharedData::new(40, 1, &Pubkey::default()); account.executable = true; account.rent_epoch = 1; account.owner = native_loader::id(); accounts.push((key1, account)); let mut account = AccountSharedData::new(41, 1, &Pubkey::default()); account.executable = true; account.rent_epoch = 1; account.owner = key1; accounts.push((key2, account)); let instructions = vec![ CompiledInstruction::new(1, &(), vec![0]), CompiledInstruction::new(2, &(), vec![0]), ]; let tx = Transaction::new_with_compiled_instructions( &[&keypair], &[], Hash::default(), vec![key1, key2], instructions, ); let loaded_accounts = load_accounts(tx, &accounts, &mut error_counters); assert_eq!(error_counters.account_not_found, 0); assert_eq!(loaded_accounts.len(), 1); match &loaded_accounts[0] { (Ok(loaded_transaction), _nonce_rollback) => { assert_eq!(loaded_transaction.accounts.len(), 3); assert_eq!(loaded_transaction.accounts[0], accounts[0].1); assert_eq!(loaded_transaction.loaders.len(), 2); assert_eq!(loaded_transaction.loaders[0].len(), 1); assert_eq!(loaded_transaction.loaders[1].len(), 2); for loaders in loaded_transaction.loaders.iter() { for (i, accounts_subset) in loaders.iter().enumerate() { // +1 to skip first not loader account assert_eq!(*accounts_subset, accounts[i + 1]); } } } (Err(e), _nonce_rollback) => Err(e).unwrap(), } } #[test] fn test_load_by_program_slot() { let accounts = Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); // Load accounts owned by various programs into AccountsDb let pubkey0 = solana_sdk::pubkey::new_rand(); let account0 = AccountSharedData::new(1, 0, &Pubkey::new(&[2; 32])); accounts.store_slow_uncached(0, &pubkey0, &account0); let pubkey1 = solana_sdk::pubkey::new_rand(); let account1 = AccountSharedData::new(1, 0, &Pubkey::new(&[2; 32])); accounts.store_slow_uncached(0, &pubkey1, &account1); let pubkey2 = solana_sdk::pubkey::new_rand(); let account2 = AccountSharedData::new(1, 0, &Pubkey::new(&[3; 32])); accounts.store_slow_uncached(0, &pubkey2, &account2); let loaded = accounts.load_by_program_slot(0, Some(&Pubkey::new(&[2; 32]))); assert_eq!(loaded.len(), 2); let loaded = accounts.load_by_program_slot(0, Some(&Pubkey::new(&[3; 32]))); assert_eq!(loaded, vec![(pubkey2, account2)]); let loaded = accounts.load_by_program_slot(0, Some(&Pubkey::new(&[4; 32]))); assert_eq!(loaded, vec![]); } #[test] fn test_accounts_account_not_found() { let accounts = Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); let mut error_counters = ErrorCounters::default(); let ancestors = vec![(0, 0)].into_iter().collect(); assert_eq!( accounts.load_executable_accounts( &ancestors, &solana_sdk::pubkey::new_rand(), &mut error_counters ), Err(TransactionError::ProgramAccountNotFound) ); assert_eq!(error_counters.account_not_found, 1); } #[test] #[should_panic] fn test_accounts_empty_bank_hash() { let accounts = Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); accounts.bank_hash_at(1); } #[test] fn test_accounts_locks() { let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); let account0 = AccountSharedData::new(1, 0, &Pubkey::default()); let account1 = AccountSharedData::new(2, 0, &Pubkey::default()); let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); let account3 = AccountSharedData::new(4, 0, &Pubkey::default()); let accounts = Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); accounts.store_slow_uncached(0, &keypair0.pubkey(), &account0); accounts.store_slow_uncached(0, &keypair1.pubkey(), &account1); accounts.store_slow_uncached(0, &keypair2.pubkey(), &account2); accounts.store_slow_uncached(0, &keypair3.pubkey(), &account3); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, 0, 2, vec![keypair0.pubkey(), keypair1.pubkey(), native_loader::id()], Hash::default(), instructions, ); let tx = Transaction::new(&[&keypair0], message, Hash::default()); let results0 = accounts.lock_accounts( &[tx.clone()], true, // demote_sysvar_write_locks ); assert!(results0[0].is_ok()); assert_eq!( *accounts .account_locks .lock() .unwrap() .readonly_locks .get(&keypair1.pubkey()) .unwrap(), 1 ); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, 0, 2, vec![keypair2.pubkey(), keypair1.pubkey(), native_loader::id()], Hash::default(), instructions, ); let tx0 = Transaction::new(&[&keypair2], message, Hash::default()); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, 0, 2, vec![keypair1.pubkey(), keypair3.pubkey(), native_loader::id()], Hash::default(), instructions, ); let tx1 = Transaction::new(&[&keypair1], message, Hash::default()); let txs = vec![tx0, tx1]; let results1 = accounts.lock_accounts( &txs, true, // demote_sysvar_write_locks ); assert!(results1[0].is_ok()); // Read-only account (keypair1) can be referenced multiple times assert!(results1[1].is_err()); // Read-only account (keypair1) cannot also be locked as writable assert_eq!( *accounts .account_locks .lock() .unwrap() .readonly_locks .get(&keypair1.pubkey()) .unwrap(), 2 ); accounts.unlock_accounts( &[tx], &results0, true, // demote_sysvar_write_locks ); accounts.unlock_accounts( &txs, &results1, true, // demote_sysvar_write_locks ); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, 0, 2, vec![keypair1.pubkey(), keypair3.pubkey(), native_loader::id()], Hash::default(), instructions, ); let tx = Transaction::new(&[&keypair1], message, Hash::default()); let results2 = accounts.lock_accounts( &[tx], true, // demote_sysvar_write_locks ); assert!(results2[0].is_ok()); // Now keypair1 account can be locked as writable // Check that read-only lock with zero references is deleted assert!(accounts .account_locks .lock() .unwrap() .readonly_locks .get(&keypair1.pubkey()) .is_none()); } #[test] fn test_accounts_locks_multithreaded() { let counter = Arc::new(AtomicU64::new(0)); let exit = Arc::new(AtomicBool::new(false)); let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let account0 = AccountSharedData::new(1, 0, &Pubkey::default()); let account1 = AccountSharedData::new(2, 0, &Pubkey::default()); let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); let accounts = Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); accounts.store_slow_uncached(0, &keypair0.pubkey(), &account0); accounts.store_slow_uncached(0, &keypair1.pubkey(), &account1); accounts.store_slow_uncached(0, &keypair2.pubkey(), &account2); let accounts_arc = Arc::new(accounts); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let readonly_message = Message::new_with_compiled_instructions( 1, 0, 2, vec![keypair0.pubkey(), keypair1.pubkey(), native_loader::id()], Hash::default(), instructions, ); let readonly_tx = Transaction::new(&[&keypair0], readonly_message, Hash::default()); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let writable_message = Message::new_with_compiled_instructions( 1, 0, 2, vec![keypair1.pubkey(), keypair2.pubkey(), native_loader::id()], Hash::default(), instructions, ); let writable_tx = Transaction::new(&[&keypair1], writable_message, Hash::default()); let counter_clone = counter.clone(); let accounts_clone = accounts_arc.clone(); let exit_clone = exit.clone(); thread::spawn(move || { let counter_clone = counter_clone.clone(); let exit_clone = exit_clone.clone(); loop { let txs = vec![writable_tx.clone()]; let results = accounts_clone.clone().lock_accounts( &txs, true, // demote_sysvar_write_locks ); for result in results.iter() { if result.is_ok() { counter_clone.clone().fetch_add(1, Ordering::SeqCst); } } accounts_clone.unlock_accounts( &txs, &results, true, // demote_sysvar_write_locks ); if exit_clone.clone().load(Ordering::Relaxed) { break; } } }); let counter_clone = counter; for _ in 0..5 { let txs = vec![readonly_tx.clone()]; let results = accounts_arc.clone().lock_accounts( &txs, true, // demote_sysvar_write_locks ); if results[0].is_ok() { let counter_value = counter_clone.clone().load(Ordering::SeqCst); thread::sleep(time::Duration::from_millis(50)); assert_eq!(counter_value, counter_clone.clone().load(Ordering::SeqCst)); } accounts_arc.unlock_accounts( &txs, &results, true, // demote_sysvar_write_locks ); thread::sleep(time::Duration::from_millis(50)); } exit.store(true, Ordering::Relaxed); } #[test] fn test_collect_accounts_to_store() { let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); let pubkey = solana_sdk::pubkey::new_rand(); let rent_collector = RentCollector::default(); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, 0, 2, vec![keypair0.pubkey(), pubkey, native_loader::id()], Hash::default(), instructions, ); let tx0 = Transaction::new(&[&keypair0], message, Hash::default()); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, 0, 2, vec![keypair1.pubkey(), pubkey, native_loader::id()], Hash::default(), instructions, ); let tx1 = Transaction::new(&[&keypair1], message, Hash::default()); let txs = vec![tx0, tx1]; let loaders = vec![(Ok(()), None), (Ok(()), None)]; let account0 = AccountSharedData::new(1, 0, &Pubkey::default()); let account1 = AccountSharedData::new(2, 0, &Pubkey::default()); let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); let transaction_accounts0 = vec![account0, account2.clone()]; let transaction_loaders0 = vec![]; let transaction_rent0 = 0; let loaded0 = ( Ok(LoadedTransaction { accounts: transaction_accounts0, account_deps: vec![], loaders: transaction_loaders0, rent: transaction_rent0, }), None, ); let transaction_accounts1 = vec![account1, account2]; let transaction_loaders1 = vec![]; let transaction_rent1 = 0; let loaded1 = ( Ok(LoadedTransaction { accounts: transaction_accounts1, account_deps: vec![], loaders: transaction_loaders1, rent: transaction_rent1, }), None, ); let mut loaded = vec![loaded0, loaded1]; let accounts = Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); { accounts .account_locks .lock() .unwrap() .insert_new_readonly(&pubkey); } let collected_accounts = accounts.collect_accounts_to_store( &txs, &loaders, loaded.as_mut_slice(), &rent_collector, &(Hash::default(), FeeCalculator::default()), true, true, // demote_sysvar_write_locks ); assert_eq!(collected_accounts.len(), 2); assert!(collected_accounts .iter() .any(|(pubkey, _account)| *pubkey == &keypair0.pubkey())); assert!(collected_accounts .iter() .any(|(pubkey, _account)| *pubkey == &keypair1.pubkey())); // Ensure readonly_lock reflects lock assert_eq!( *accounts .account_locks .lock() .unwrap() .readonly_locks .get(&pubkey) .unwrap(), 1 ); } #[test] fn test_has_duplicates() { assert!(!Accounts::has_duplicates(&[1, 2])); assert!(Accounts::has_duplicates(&[1, 2, 1])); } #[test] fn huge_clean() { solana_logger::setup(); let accounts = Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); let mut old_pubkey = Pubkey::default(); let zero_account = AccountSharedData::new(0, 0, &AccountSharedData::default().owner); info!("storing.."); for i in 0..2_000 { let pubkey = solana_sdk::pubkey::new_rand(); let account = AccountSharedData::new((i + 1) as u64, 0, &AccountSharedData::default().owner); accounts.store_slow_uncached(i, &pubkey, &account); accounts.store_slow_uncached(i, &old_pubkey, &zero_account); old_pubkey = pubkey; accounts.add_root(i); if i % 1_000 == 0 { info!(" store {}", i); } } info!("done..cleaning.."); accounts.accounts_db.clean_accounts(None); } fn load_accounts_no_store(accounts: &Accounts, tx: Transaction) -> Vec<TransactionLoadResult> { let rent_collector = RentCollector::default(); let fee_calculator = FeeCalculator::new(10); let mut hash_queue = BlockhashQueue::new(100); hash_queue.register_hash(&tx.message().recent_blockhash, &fee_calculator); let ancestors = vec![(0, 0)].into_iter().collect(); let mut error_counters = ErrorCounters::default(); accounts.load_accounts( &ancestors, &[tx], vec![(Ok(()), None)], &hash_queue, &mut error_counters, &rent_collector, &FeatureSet::all_enabled(), ) } #[test] fn test_instructions() { solana_logger::setup(); let accounts = Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); let instructions_key = solana_sdk::sysvar::instructions::id(); let keypair = Keypair::new(); let instructions = vec![CompiledInstruction::new(1, &(), vec![0, 1])]; let tx = Transaction::new_with_compiled_instructions( &[&keypair], &[solana_sdk::pubkey::new_rand(), instructions_key], Hash::default(), vec![native_loader::id()], instructions, ); let loaded_accounts = load_accounts_no_store(&accounts, tx); assert_eq!(loaded_accounts.len(), 1); assert!(loaded_accounts[0].0.is_err()); } fn create_accounts_prepare_if_nonce_account() -> ( Pubkey, AccountSharedData, AccountSharedData, Hash, FeeCalculator, Option<AccountSharedData>, ) { let data = nonce::state::Versions::new_current(nonce::State::Initialized( nonce::state::Data::default(), )); let account = AccountSharedData::new_data(42, &data, &system_program::id()).unwrap(); let mut pre_account = account.clone(); pre_account.set_lamports(43); ( Pubkey::default(), pre_account, account, Hash::new(&[1u8; 32]), FeeCalculator { lamports_per_signature: 1234, }, None, ) } fn run_prepare_if_nonce_account_test( account: &mut AccountSharedData, account_pubkey: &Pubkey, tx_result: &Result<()>, maybe_nonce_rollback: Option<(&Pubkey, &AccountSharedData, Option<&AccountSharedData>)>, last_blockhash_with_fee_calculator: &(Hash, FeeCalculator), expect_account: &AccountSharedData, ) -> bool { // Verify expect_account's relationship match maybe_nonce_rollback { Some((nonce_pubkey, _nonce_account, _maybe_fee_account)) if nonce_pubkey == account_pubkey && tx_result.is_ok() => { assert_eq!(expect_account, account) // Account update occurs in system_instruction_processor } Some((nonce_pubkey, nonce_account, _maybe_fee_account)) if nonce_pubkey == account_pubkey => { assert_ne!(expect_account, nonce_account) } _ => assert_eq!(expect_account, account), } prepare_if_nonce_account( account, account_pubkey, tx_result, maybe_nonce_rollback, last_blockhash_with_fee_calculator, true, ); expect_account == account } #[test] fn test_prepare_if_nonce_account_expected() { let ( pre_account_pubkey, pre_account, mut post_account, last_blockhash, last_fee_calculator, maybe_fee_account, ) = create_accounts_prepare_if_nonce_account(); let post_account_pubkey = pre_account_pubkey; let mut expect_account = post_account.clone(); let data = nonce::state::Versions::new_current(nonce::State::Initialized( nonce::state::Data::default(), )); expect_account.set_state(&data).unwrap(); assert!(run_prepare_if_nonce_account_test( &mut post_account, &post_account_pubkey, &Ok(()), Some(( &pre_account_pubkey, &pre_account, maybe_fee_account.as_ref() )), &(last_blockhash, last_fee_calculator), &expect_account, )); } #[test] fn test_prepare_if_nonce_account_not_nonce_tx() { let ( pre_account_pubkey, _pre_account, _post_account, last_blockhash, last_fee_calculator, _maybe_fee_account, ) = create_accounts_prepare_if_nonce_account(); let post_account_pubkey = pre_account_pubkey; let mut post_account = AccountSharedData::default(); let expect_account = post_account.clone(); assert!(run_prepare_if_nonce_account_test( &mut post_account, &post_account_pubkey, &Ok(()), None, &(last_blockhash, last_fee_calculator), &expect_account, )); } #[test] fn test_prepare_if_nonce_account_not_nonce_pubkey() { let ( pre_account_pubkey, pre_account, mut post_account, last_blockhash, last_fee_calculator, maybe_fee_account, ) = create_accounts_prepare_if_nonce_account(); let expect_account = post_account.clone(); // Wrong key assert!(run_prepare_if_nonce_account_test( &mut post_account, &Pubkey::new(&[1u8; 32]), &Ok(()), Some(( &pre_account_pubkey, &pre_account, maybe_fee_account.as_ref() )), &(last_blockhash, last_fee_calculator), &expect_account, )); } #[test] fn test_prepare_if_nonce_account_tx_error() { let ( pre_account_pubkey, pre_account, mut post_account, last_blockhash, last_fee_calculator, maybe_fee_account, ) = create_accounts_prepare_if_nonce_account(); let post_account_pubkey = pre_account_pubkey; let mut expect_account = pre_account.clone(); expect_account .set_state(&nonce::state::Versions::new_current( nonce::State::Initialized(nonce::state::Data { blockhash: last_blockhash, fee_calculator: last_fee_calculator.clone(), ..nonce::state::Data::default() }), )) .unwrap(); assert!(run_prepare_if_nonce_account_test( &mut post_account, &post_account_pubkey, &Err(TransactionError::InstructionError( 0, InstructionError::InvalidArgument, )), Some(( &pre_account_pubkey, &pre_account, maybe_fee_account.as_ref() )), &(last_blockhash, last_fee_calculator), &expect_account, )); } #[test] fn test_nonced_failure_accounts_rollback_from_pays() { let rent_collector = RentCollector::default(); let nonce_address = Pubkey::new_unique(); let nonce_authority = keypair_from_seed(&[0; 32]).unwrap(); let from = keypair_from_seed(&[1; 32]).unwrap(); let from_address = from.pubkey(); let to_address = Pubkey::new_unique(); let instructions = vec![ system_instruction::advance_nonce_account(&nonce_address, &nonce_authority.pubkey()), system_instruction::transfer(&from_address, &to_address, 42), ]; let message = Message::new(&instructions, Some(&from_address)); let blockhash = Hash::new_unique(); let tx = Transaction::new(&[&nonce_authority, &from], message, blockhash); let txs = vec![tx]; let nonce_state = nonce::state::Versions::new_current(nonce::State::Initialized(nonce::state::Data { authority: nonce_authority.pubkey(), blockhash, fee_calculator: FeeCalculator::default(), })); let nonce_account_pre = AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap(); let from_account_pre = AccountSharedData::new(4242, 0, &Pubkey::default()); let nonce_rollback = Some(NonceRollbackFull::new( nonce_address, nonce_account_pre.clone(), Some(from_account_pre.clone()), )); let loaders = vec![( Err(TransactionError::InstructionError( 1, InstructionError::InvalidArgument, )), nonce_rollback.clone(), )]; let nonce_state = nonce::state::Versions::new_current(nonce::State::Initialized(nonce::state::Data { authority: nonce_authority.pubkey(), blockhash: Hash::new_unique(), fee_calculator: FeeCalculator::default(), })); let nonce_account_post = AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap(); let from_account_post = AccountSharedData::new(4199, 0, &Pubkey::default()); let to_account = AccountSharedData::new(2, 0, &Pubkey::default()); let nonce_authority_account = AccountSharedData::new(3, 0, &Pubkey::default()); let recent_blockhashes_sysvar_account = AccountSharedData::new(4, 0, &Pubkey::default()); let transaction_accounts = vec![ from_account_post, nonce_authority_account, nonce_account_post, to_account, recent_blockhashes_sysvar_account, ]; let transaction_loaders = vec![]; let transaction_rent = 0; let loaded = ( Ok(LoadedTransaction { accounts: transaction_accounts, account_deps: vec![], loaders: transaction_loaders, rent: transaction_rent, }), nonce_rollback, ); let mut loaded = vec![loaded]; let next_blockhash = Hash::new_unique(); let accounts = Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); let collected_accounts = accounts.collect_accounts_to_store( &txs, &loaders, loaded.as_mut_slice(), &rent_collector, &(next_blockhash, FeeCalculator::default()), true, true, // demote_sysvar_write_locks ); assert_eq!(collected_accounts.len(), 2); assert_eq!( collected_accounts .iter() .find(|(pubkey, _account)| *pubkey == &from_address) .map(|(_pubkey, account)| *account) .cloned() .unwrap(), from_account_pre, ); let collected_nonce_account = collected_accounts .iter() .find(|(pubkey, _account)| *pubkey == &nonce_address) .map(|(_pubkey, account)| *account) .cloned() .unwrap(); assert_eq!(collected_nonce_account.lamports, nonce_account_pre.lamports,); assert!(nonce_account::verify_nonce_account( &collected_nonce_account, &next_blockhash )); } #[test] fn test_nonced_failure_accounts_rollback_nonce_pays() { let rent_collector = RentCollector::default(); let nonce_authority = keypair_from_seed(&[0; 32]).unwrap(); let nonce_address = nonce_authority.pubkey(); let from = keypair_from_seed(&[1; 32]).unwrap(); let from_address = from.pubkey(); let to_address = Pubkey::new_unique(); let instructions = vec![ system_instruction::advance_nonce_account(&nonce_address, &nonce_authority.pubkey()), system_instruction::transfer(&from_address, &to_address, 42), ]; let message = Message::new(&instructions, Some(&nonce_address)); let blockhash = Hash::new_unique(); let tx = Transaction::new(&[&nonce_authority, &from], message, blockhash); let txs = vec![tx]; let nonce_state = nonce::state::Versions::new_current(nonce::State::Initialized(nonce::state::Data { authority: nonce_authority.pubkey(), blockhash, fee_calculator: FeeCalculator::default(), })); let nonce_account_pre = AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap(); let nonce_rollback = Some(NonceRollbackFull::new( nonce_address, nonce_account_pre.clone(), None, )); let loaders = vec![( Err(TransactionError::InstructionError( 1, InstructionError::InvalidArgument, )), nonce_rollback.clone(), )]; let nonce_state = nonce::state::Versions::new_current(nonce::State::Initialized(nonce::state::Data { authority: nonce_authority.pubkey(), blockhash: Hash::new_unique(), fee_calculator: FeeCalculator::default(), })); let nonce_account_post = AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap(); let from_account_post = AccountSharedData::new(4200, 0, &Pubkey::default()); let to_account = AccountSharedData::new(2, 0, &Pubkey::default()); let nonce_authority_account = AccountSharedData::new(3, 0, &Pubkey::default()); let recent_blockhashes_sysvar_account = AccountSharedData::new(4, 0, &Pubkey::default()); let transaction_accounts = vec![ from_account_post, nonce_authority_account, nonce_account_post, to_account, recent_blockhashes_sysvar_account, ]; let transaction_loaders = vec![]; let transaction_rent = 0; let loaded = ( Ok(LoadedTransaction { accounts: transaction_accounts, account_deps: vec![], loaders: transaction_loaders, rent: transaction_rent, }), nonce_rollback, ); let mut loaded = vec![loaded]; let next_blockhash = Hash::new_unique(); let accounts = Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); let collected_accounts = accounts.collect_accounts_to_store( &txs, &loaders, loaded.as_mut_slice(), &rent_collector, &(next_blockhash, FeeCalculator::default()), true, true, // demote_sysvar_write_locks ); assert_eq!(collected_accounts.len(), 1); let collected_nonce_account = collected_accounts .iter() .find(|(pubkey, _account)| *pubkey == &nonce_address) .map(|(_pubkey, account)| *account) .cloned() .unwrap(); assert_eq!(collected_nonce_account.lamports, nonce_account_pre.lamports); assert!(nonce_account::verify_nonce_account( &collected_nonce_account, &next_blockhash )); } #[test] fn test_load_largest_accounts() { let accounts = Accounts::new_with_config(Vec::new(), &ClusterType::Development, HashSet::new(), false); let pubkey0 = Pubkey::new_unique(); let account0 = AccountSharedData::new(42, 0, &Pubkey::default()); accounts.store_slow_uncached(0, &pubkey0, &account0); let pubkey1 = Pubkey::new_unique(); let account1 = AccountSharedData::new(42, 0, &Pubkey::default()); accounts.store_slow_uncached(0, &pubkey1, &account1); let pubkey2 = Pubkey::new_unique(); let account2 = AccountSharedData::new(41, 0, &Pubkey::default()); accounts.store_slow_uncached(0, &pubkey2, &account2); let ancestors = vec![(0, 0)].into_iter().collect(); let all_pubkeys: HashSet<_> = vec![pubkey0, pubkey1, pubkey2].into_iter().collect(); // num == 0 should always return empty set assert_eq!( accounts.load_largest_accounts( &ancestors, 0, &HashSet::new(), AccountAddressFilter::Exclude ), vec![] ); assert_eq!( accounts.load_largest_accounts( &ancestors, 0, &all_pubkeys, AccountAddressFilter::Include ), vec![] ); // list should be sorted by balance, then pubkey, descending assert!(pubkey1 > pubkey0); assert_eq!( accounts.load_largest_accounts( &ancestors, 1, &HashSet::new(), AccountAddressFilter::Exclude ), vec![(pubkey1, 42)] ); assert_eq!( accounts.load_largest_accounts( &ancestors, 2, &HashSet::new(), AccountAddressFilter::Exclude ), vec![(pubkey1, 42), (pubkey0, 42)] ); assert_eq!( accounts.load_largest_accounts( &ancestors, 3, &HashSet::new(), AccountAddressFilter::Exclude ), vec![(pubkey1, 42), (pubkey0, 42), (pubkey2, 41)] ); // larger num should not affect results assert_eq!( accounts.load_largest_accounts( &ancestors, 6, &HashSet::new(), AccountAddressFilter::Exclude ), vec![(pubkey1, 42), (pubkey0, 42), (pubkey2, 41)] ); // AccountAddressFilter::Exclude should exclude entry let exclude1: HashSet<_> = vec![pubkey1].into_iter().collect(); assert_eq!( accounts.load_largest_accounts(&ancestors, 1, &exclude1, AccountAddressFilter::Exclude), vec![(pubkey0, 42)] ); assert_eq!( accounts.load_largest_accounts(&ancestors, 2, &exclude1, AccountAddressFilter::Exclude), vec![(pubkey0, 42), (pubkey2, 41)] ); assert_eq!( accounts.load_largest_accounts(&ancestors, 3, &exclude1, AccountAddressFilter::Exclude), vec![(pubkey0, 42), (pubkey2, 41)] ); // AccountAddressFilter::Include should limit entries let include1_2: HashSet<_> = vec![pubkey1, pubkey2].into_iter().collect(); assert_eq!( accounts.load_largest_accounts( &ancestors, 1, &include1_2, AccountAddressFilter::Include ), vec![(pubkey1, 42)] ); assert_eq!( accounts.load_largest_accounts( &ancestors, 2, &include1_2, AccountAddressFilter::Include ), vec![(pubkey1, 42), (pubkey2, 41)] ); assert_eq!( accounts.load_largest_accounts( &ancestors, 3, &include1_2, AccountAddressFilter::Include ), vec![(pubkey1, 42), (pubkey2, 41)] ); } }
36.695652
123
0.540626
33d0039360b4941b0be460d0de7c849c7dec99e0
3,127
use std::{cell::RefCell, fmt, rc::Rc, time}; use h2::client::SendRequest; use ntex_tls::types::HttpProtocol; use crate::http::body::MessageBody; use crate::http::message::{RequestHeadType, ResponseHead}; use crate::http::payload::Payload; use crate::io::IoBoxed; use crate::util::Bytes; use super::error::SendRequestError; use super::pool::Acquired; use super::{h1proto, h2proto}; pub(super) enum ConnectionType { H1(IoBoxed), H2(H2Sender), } impl fmt::Debug for ConnectionType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ConnectionType::H1(_) => write!(f, "http/1"), ConnectionType::H2(_) => write!(f, "http/2"), } } } #[derive(Clone)] pub(super) struct H2Sender(Rc<RefCell<H2SenderInner>>); struct H2SenderInner { io: SendRequest<Bytes>, closed: bool, } impl H2Sender { pub(super) fn new(io: SendRequest<Bytes>) -> Self { Self(Rc::new(RefCell::new(H2SenderInner { io, closed: false }))) } pub(super) fn is_closed(&self) -> bool { self.0.borrow().closed } pub(super) fn close(&self) { self.0.borrow_mut().closed = true; } pub(super) fn get_sender(&self) -> SendRequest<Bytes> { self.0.borrow().io.clone() } } #[doc(hidden)] /// HTTP client connection pub struct Connection { io: Option<ConnectionType>, created: time::Instant, pool: Option<Acquired>, } impl fmt::Debug for Connection { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.io { Some(ConnectionType::H1(_)) => write!(f, "H1Connection"), Some(ConnectionType::H2(_)) => write!(f, "H2Connection"), None => write!(f, "Connection(Empty)"), } } } impl Connection { pub(super) fn new( io: ConnectionType, created: time::Instant, pool: Option<Acquired>, ) -> Self { Self { pool, created, io: Some(io), } } pub(super) fn release(self) { if let Some(mut pool) = self.pool { pool.release(Self { io: self.io, created: self.created, pool: None, }); } } pub(super) fn into_inner(self) -> (ConnectionType, time::Instant) { (self.io.unwrap(), self.created) } pub fn protocol(&self) -> HttpProtocol { match self.io { Some(ConnectionType::H1(_)) => HttpProtocol::Http1, Some(ConnectionType::H2(_)) => HttpProtocol::Http2, None => HttpProtocol::Unknown, } } pub(super) async fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>( mut self, head: H, body: B, ) -> Result<(ResponseHead, Payload), SendRequestError> { match self.io.take().unwrap() { ConnectionType::H1(io) => { h1proto::send_request(io, head.into(), body, self.created, self.pool).await } ConnectionType::H2(io) => h2proto::send_request(io, head.into(), body).await, } } }
25.631148
91
0.563479
efe8f8ccd3d7284d508859b4ba47247df944d16e
2,794
use std::iter::FromIterator; use super::{Color, ColorStyle, ColorType, Effect, PaletteColor}; use enumset::EnumSet; /// Combine a color and an effect. /// /// Represents any transformation that can be applied to text. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct Style { /// Effect to apply. /// /// `None` to keep using previous effects. pub effects: EnumSet<Effect>, /// Color style to apply. /// /// `None` to keep using the previous colors. pub color: ColorStyle, } impl Default for Style { fn default() -> Self { Self::none() } } impl Style { /// Returns a new `Style` that doesn't apply anything. pub fn none() -> Self { Style { effects: EnumSet::new(), color: ColorStyle::inherit_parent(), } } /// Returns a new `Style` by merging all given styles. /// /// Will use the last non-`None` color, and will combine all effects. pub fn merge(styles: &[Style]) -> Self { styles.iter().collect() } /// Returns a combination of `self` and `other`. pub fn combine<S>(self, other: S) -> Self where S: Into<Style>, { Self::merge(&[self, other.into()]) } } impl From<Effect> for Style { fn from(effect: Effect) -> Self { Style { effects: EnumSet::only(effect), color: ColorStyle::inherit_parent(), } } } impl From<ColorStyle> for Style { fn from(color: ColorStyle) -> Self { Style { effects: EnumSet::new(), color, } } } impl From<Color> for Style { fn from(color: Color) -> Self { ColorStyle::from(color).into() } } impl From<PaletteColor> for Style { fn from(color: PaletteColor) -> Self { ColorStyle::from(color).into() } } impl From<ColorType> for Style { fn from(color: ColorType) -> Self { ColorStyle::from(color).into() } } /// Creates a new `Style` by merging all given styles. /// /// Will use the last non-`None` color, and will combine all effects. impl<'a> FromIterator<&'a Style> for Style { fn from_iter<I: IntoIterator<Item = &'a Style>>(iter: I) -> Style { let mut color = ColorStyle::inherit_parent(); let mut effects = EnumSet::new(); for style in iter { color = ColorStyle::merge(color, style.color); effects.insert_all(style.effects); } Style { color, effects } } } /// Creates a new `Style` by merging all given styles. /// /// Will use the last non-`None` color, and will combine all effects. impl<T: Into<Style>> FromIterator<T> for Style { fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Style { iter.into_iter().map(Into::into).collect() } }
24.508772
73
0.583035
16b85627efd6252ebaa507a61b6c4165c319f1d4
851
/* Copyright 2017 Christopher Bacher * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #![feature(proc_macro)] extern crate galvanic_mock; extern crate galvanic_assert; use galvanic_mock::{mockable, use_mocks}; #[mockable] trait EmptyTrait { } #[test]#[use_mocks] fn create_mock_with_empty_trait() { let mock = new_mock!(EmptyTrait); }
30.392857
75
0.748531
e21b7df0a76c02336a70f1b1f9b679706d66c72b
34,241
use std::fs::File; use git2; use crate::support::git; use crate::support::{basic_manifest, clippy_is_available, is_nightly, project}; use std::io::Write; #[cargo_test] fn do_not_fix_broken_builds() { let p = project() .file( "src/lib.rs", r#" pub fn foo() { let mut x = 3; drop(x); } pub fn foo2() { let _x: u32 = "a"; } "#, ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_status(101) .with_stderr_contains("[ERROR] Could not compile `foo`.") .run(); assert!(p.read_file("src/lib.rs").contains("let mut x = 3;")); } #[cargo_test] fn fix_broken_if_requested() { let p = project() .file( "src/lib.rs", r#" fn foo(a: &u32) -> u32 { a + 1 } pub fn bar() { foo(1); } "#, ) .build(); p.cargo("fix --allow-no-vcs --broken-code") .env("__CARGO_FIX_YOLO", "1") .run(); } #[cargo_test] fn broken_fixes_backed_out() { // This works as follows: // - Create a `rustc` shim (the "foo" project) which will pretend that the // verification step fails. // - There is an empty build script so `foo` has `OUT_DIR` to track the steps. // - The first "check", `foo` creates a file in OUT_DIR, and it completes // successfully with a warning diagnostic to remove unused `mut`. // - rustfix removes the `mut`. // - The second "check" to verify the changes, `foo` swaps out the content // with something that fails to compile. It creates a second file so it // won't do anything in the third check. // - cargo fix discovers that the fix failed, and it backs out the changes. // - The third "check" is done to display the original diagnostics of the // original code. let p = project() .file( "foo/Cargo.toml", r#" [package] name = 'foo' version = '0.1.0' [workspace] "#, ) .file( "foo/src/main.rs", r##" use std::env; use std::fs; use std::io::Write; use std::path::{Path, PathBuf}; use std::process::{self, Command}; fn main() { // Ignore calls to things like --print=file-names and compiling build.rs. let is_lib_rs = env::args_os() .map(PathBuf::from) .any(|l| l == Path::new("src/lib.rs")); if is_lib_rs { let path = PathBuf::from(env::var_os("OUT_DIR").unwrap()); let first = path.join("first"); let second = path.join("second"); if first.exists() && !second.exists() { fs::write("src/lib.rs", b"not rust code").unwrap(); fs::File::create(&second).unwrap(); } else { fs::File::create(&first).unwrap(); } } let status = Command::new("rustc") .args(env::args().skip(1)) .status() .expect("failed to run rustc"); process::exit(status.code().unwrap_or(2)); } "##, ) .file( "bar/Cargo.toml", r#" [package] name = 'bar' version = '0.1.0' [workspace] "#, ) .file("bar/build.rs", "fn main() {}") .file( "bar/src/lib.rs", r#" pub fn foo() { let mut x = 3; drop(x); } "#, ) .build(); // Build our rustc shim p.cargo("build").cwd("foo").run(); // Attempt to fix code, but our shim will always fail the second compile p.cargo("fix --allow-no-vcs --lib") .cwd("bar") .env("__CARGO_FIX_YOLO", "1") .env("RUSTC", p.root().join("foo/target/debug/foo")) .with_stderr_contains( "warning: failed to automatically apply fixes suggested by rustc \ to crate `bar`\n\ \n\ after fixes were automatically applied the compiler reported \ errors within these files:\n\ \n \ * src/lib.rs\n\ \n\ This likely indicates a bug in either rustc or cargo itself,\n\ and we would appreciate a bug report! You're likely to see \n\ a number of compiler warnings after this message which cargo\n\ attempted to fix but failed. If you could open an issue at\n\ [..]\n\ quoting the full output of this command we'd be very appreciative!\n\ Note that you may be able to make some more progress in the near-term\n\ fixing code with the `--broken-code` flag\n\ \n\ The following errors were reported:\n\ error: expected one of `!` or `::`, found `rust`\n\ ", ) .with_stderr_contains("Original diagnostics will follow.") .with_stderr_contains("[WARNING] variable does not need to be mutable") .with_stderr_does_not_contain("[..][FIXING][..]") .run(); // Make sure the fix which should have been applied was backed out assert!(p.read_file("bar/src/lib.rs").contains("let mut x = 3;")); } #[cargo_test] fn fix_path_deps() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] bar = { path = 'bar' } [workspace] "#, ) .file( "src/lib.rs", r#" extern crate bar; pub fn foo() -> u32 { let mut x = 3; x } "#, ) .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file( "bar/src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; x } "#, ) .build(); p.cargo("fix --allow-no-vcs -p foo -p bar") .env("__CARGO_FIX_YOLO", "1") .with_stdout("") .with_stderr_unordered( "\ [CHECKING] bar v0.1.0 ([..]) [FIXING] bar/src/lib.rs (1 fix) [CHECKING] foo v0.1.0 ([..]) [FIXING] src/lib.rs (1 fix) [FINISHED] [..] ", ) .run(); } #[cargo_test] fn do_not_fix_non_relevant_deps() { let p = project() .no_manifest() .file( "foo/Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] bar = { path = '../bar' } [workspace] "#, ) .file("foo/src/lib.rs", "") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file( "bar/src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; x } "#, ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .cwd("foo") .run(); assert!(p.read_file("bar/src/lib.rs").contains("mut")); } #[cargo_test] fn prepare_for_2018() { let p = project() .file( "src/lib.rs", r#" #![allow(unused)] mod foo { pub const FOO: &str = "fooo"; } mod bar { use ::foo::FOO; } fn main() { let x = ::foo::FOO; } "#, ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FIXING] src/lib.rs (2 fixes) [FINISHED] [..] "; p.cargo("fix --edition --allow-no-vcs") .with_stderr(stderr) .with_stdout("") .run(); println!("{}", p.read_file("src/lib.rs")); assert!(p.read_file("src/lib.rs").contains("use crate::foo::FOO;")); assert!(p .read_file("src/lib.rs") .contains("let x = crate::foo::FOO;")); } #[cargo_test] fn local_paths() { let p = project() .file( "src/lib.rs", r#" use test::foo; mod test { pub fn foo() {} } pub fn f() { foo(); } "#, ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FIXING] src/lib.rs (1 fix) [FINISHED] [..] "; p.cargo("fix --edition --allow-no-vcs") .with_stderr(stderr) .with_stdout("") .run(); println!("{}", p.read_file("src/lib.rs")); assert!(p.read_file("src/lib.rs").contains("use crate::test::foo;")); } #[cargo_test] fn upgrade_extern_crate() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" edition = '2018' [workspace] [dependencies] bar = { path = 'bar' } "#, ) .file( "src/lib.rs", r#" #![warn(rust_2018_idioms)] extern crate bar; use bar::bar; pub fn foo() { ::bar::bar(); bar(); } "#, ) .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "pub fn bar() {}") .build(); let stderr = "\ [CHECKING] bar v0.1.0 ([..]) [CHECKING] foo v0.1.0 ([..]) [FIXING] src/lib.rs (1 fix) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr(stderr) .with_stdout("") .run(); println!("{}", p.read_file("src/lib.rs")); assert!(!p.read_file("src/lib.rs").contains("extern crate")); } #[cargo_test] fn specify_rustflags() { let p = project() .file( "src/lib.rs", r#" #![allow(unused)] mod foo { pub const FOO: &str = "fooo"; } fn main() { let x = ::foo::FOO; } "#, ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FIXING] src/lib.rs (1 fix) [FINISHED] [..] "; p.cargo("fix --edition --allow-no-vcs") .env("RUSTFLAGS", "-C target-cpu=native") .with_stderr(stderr) .with_stdout("") .run(); } #[cargo_test] fn no_changes_necessary() { let p = project().file("src/lib.rs", "").build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .with_stderr(stderr) .with_stdout("") .run(); } #[cargo_test] fn fixes_extra_mut() { let p = project() .file( "src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; x } "#, ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FIXING] src/lib.rs (1 fix) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr(stderr) .with_stdout("") .run(); } #[cargo_test] fn fixes_two_missing_ampersands() { let p = project() .file( "src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; let mut y = 3; x + y } "#, ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FIXING] src/lib.rs (2 fixes) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr(stderr) .with_stdout("") .run(); } #[cargo_test] fn tricky() { let p = project() .file( "src/lib.rs", r#" pub fn foo() -> u32 { let mut x = 3; let mut y = 3; x + y } "#, ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FIXING] src/lib.rs (2 fixes) [FINISHED] [..] "; p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr(stderr) .with_stdout("") .run(); } #[cargo_test] fn preserve_line_endings() { let p = project() .file( "src/lib.rs", "fn add(a: &u32) -> u32 { a + 1 }\r\n\ pub fn foo() -> u32 { let mut x = 3; add(&x) }\r\n\ ", ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .run(); assert!(p.read_file("src/lib.rs").contains("\r\n")); } #[cargo_test] fn fix_deny_warnings() { let p = project() .file( "src/lib.rs", "#![deny(warnings)] pub fn foo() { let mut x = 3; drop(x); } ", ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .run(); } #[cargo_test] fn fix_deny_warnings_but_not_others() { let p = project() .file( "src/lib.rs", " #![deny(warnings)] pub fn foo() -> u32 { let mut x = 3; x } fn bar() {} ", ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .run(); assert!(!p.read_file("src/lib.rs").contains("let mut x = 3;")); assert!(p.read_file("src/lib.rs").contains("fn bar() {}")); } #[cargo_test] fn fix_two_files() { let p = project() .file( "src/lib.rs", " pub mod bar; pub fn foo() -> u32 { let mut x = 3; x } ", ) .file( "src/bar.rs", " pub fn foo() -> u32 { let mut x = 3; x } ", ) .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stderr_contains("[FIXING] src/bar.rs (1 fix)") .with_stderr_contains("[FIXING] src/lib.rs (1 fix)") .run(); assert!(!p.read_file("src/lib.rs").contains("let mut x = 3;")); assert!(!p.read_file("src/bar.rs").contains("let mut x = 3;")); } #[cargo_test] fn fixes_missing_ampersand() { let p = project() .file("src/main.rs", "fn main() { let mut x = 3; drop(x); }") .file( "src/lib.rs", r#" pub fn foo() { let mut x = 3; drop(x); } #[test] pub fn foo2() { let mut x = 3; drop(x); } "#, ) .file( "tests/a.rs", r#" #[test] pub fn foo() { let mut x = 3; drop(x); } "#, ) .file("examples/foo.rs", "fn main() { let mut x = 3; drop(x); }") .file("build.rs", "fn main() { let mut x = 3; drop(x); }") .build(); p.cargo("fix --all-targets --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .with_stdout("") .with_stderr_contains("[COMPILING] foo v0.0.1 ([..])") .with_stderr_contains("[FIXING] build.rs (1 fix)") // Don't assert number of fixes for this one, as we don't know if we're // fixing it once or twice! We run this all concurrently, and if we // compile (and fix) in `--test` mode first, we get two fixes. Otherwise // we'll fix one non-test thing, and then fix another one later in // test mode. .with_stderr_contains("[FIXING] src/lib.rs[..]") .with_stderr_contains("[FIXING] src/main.rs (1 fix)") .with_stderr_contains("[FIXING] examples/foo.rs (1 fix)") .with_stderr_contains("[FIXING] tests/a.rs (1 fix)") .with_stderr_contains("[FINISHED] [..]") .run(); p.cargo("build").run(); p.cargo("test").run(); } #[cargo_test] fn fix_features() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [features] bar = [] [workspace] "#, ) .file( "src/lib.rs", r#" #[cfg(feature = "bar")] pub fn foo() -> u32 { let mut x = 3; x } "#, ) .build(); p.cargo("fix --allow-no-vcs").run(); p.cargo("build").run(); p.cargo("fix --features bar --allow-no-vcs").run(); p.cargo("build --features bar").run(); } #[cargo_test] fn shows_warnings() { let p = project() .file( "src/lib.rs", "#[deprecated] fn bar() {} pub fn foo() { let _ = bar(); }", ) .build(); p.cargo("fix --allow-no-vcs") .with_stderr_contains("[..]warning: use of deprecated item[..]") .run(); } #[cargo_test] fn warns_if_no_vcs_detected() { let p = project().file("src/lib.rs", "pub fn foo() {}").build(); p.cargo("fix") .with_status(101) .with_stderr( "error: no VCS found for this package and `cargo fix` can potentially perform \ destructive changes; if you'd like to suppress this error pass `--allow-no-vcs`\ ", ) .run(); p.cargo("fix --allow-no-vcs").run(); } #[cargo_test] fn warns_about_dirty_working_directory() { let p = project().file("src/lib.rs", "pub fn foo() {}").build(); let repo = git2::Repository::init(&p.root()).unwrap(); let mut cfg = t!(repo.config()); t!(cfg.set_str("user.email", "[email protected]")); t!(cfg.set_str("user.name", "Foo Bar")); drop(cfg); git::add(&repo); git::commit(&repo); File::create(p.root().join("src/lib.rs")).unwrap(); p.cargo("fix") .with_status(101) .with_stderr( "\ error: the working directory of this package has uncommitted changes, \ and `cargo fix` can potentially perform destructive changes; if you'd \ like to suppress this error pass `--allow-dirty`, `--allow-staged`, or \ commit the changes to these files: * src/lib.rs (dirty) ", ) .run(); p.cargo("fix --allow-dirty").run(); } #[cargo_test] fn warns_about_staged_working_directory() { let p = project().file("src/lib.rs", "pub fn foo() {}").build(); let repo = git2::Repository::init(&p.root()).unwrap(); let mut cfg = t!(repo.config()); t!(cfg.set_str("user.email", "[email protected]")); t!(cfg.set_str("user.name", "Foo Bar")); drop(cfg); git::add(&repo); git::commit(&repo); File::create(&p.root().join("src/lib.rs")) .unwrap() .write_all("pub fn bar() {}".to_string().as_bytes()) .unwrap(); git::add(&repo); p.cargo("fix") .with_status(101) .with_stderr( "\ error: the working directory of this package has uncommitted changes, \ and `cargo fix` can potentially perform destructive changes; if you'd \ like to suppress this error pass `--allow-dirty`, `--allow-staged`, or \ commit the changes to these files: * src/lib.rs (staged) ", ) .run(); p.cargo("fix --allow-staged").run(); } #[cargo_test] fn does_not_warn_about_clean_working_directory() { let p = project().file("src/lib.rs", "pub fn foo() {}").build(); let repo = git2::Repository::init(&p.root()).unwrap(); let mut cfg = t!(repo.config()); t!(cfg.set_str("user.email", "[email protected]")); t!(cfg.set_str("user.name", "Foo Bar")); drop(cfg); git::add(&repo); git::commit(&repo); p.cargo("fix").run(); } #[cargo_test] fn does_not_warn_about_dirty_ignored_files() { let p = project() .file("src/lib.rs", "pub fn foo() {}") .file(".gitignore", "bar\n") .build(); let repo = git2::Repository::init(&p.root()).unwrap(); let mut cfg = t!(repo.config()); t!(cfg.set_str("user.email", "[email protected]")); t!(cfg.set_str("user.name", "Foo Bar")); drop(cfg); git::add(&repo); git::commit(&repo); File::create(p.root().join("bar")).unwrap(); p.cargo("fix").run(); } #[cargo_test] fn fix_all_targets_by_default() { let p = project() .file("src/lib.rs", "pub fn foo() { let mut x = 3; drop(x); }") .file("tests/foo.rs", "pub fn foo() { let mut x = 3; drop(x); }") .build(); p.cargo("fix --allow-no-vcs") .env("__CARGO_FIX_YOLO", "1") .run(); assert!(!p.read_file("src/lib.rs").contains("let mut x")); assert!(!p.read_file("tests/foo.rs").contains("let mut x")); } #[cargo_test] fn prepare_for_and_enable() { let p = project() .file( "Cargo.toml", r#" [package] name = 'foo' version = '0.1.0' edition = '2018' "#, ) .file("src/lib.rs", "") .build(); let stderr = "\ error: cannot prepare for the 2018 edition when it is enabled, so cargo cannot automatically fix errors in `src/lib.rs` To prepare for the 2018 edition you should first remove `edition = '2018'` from your `Cargo.toml` and then rerun this command. Once all warnings have been fixed then you can re-enable the `edition` key in `Cargo.toml`. For some more information about transitioning to the 2018 edition see: https://[..] "; p.cargo("fix --edition --allow-no-vcs") .with_stderr_contains(stderr) .with_status(101) .run(); } #[cargo_test] fn fix_overlapping() { let p = project() .file( "src/lib.rs", r#" pub fn foo<T>() {} pub struct A; pub mod bar { pub fn baz() { ::foo::<::A>(); } } "#, ) .build(); let stderr = "\ [CHECKING] foo [..] [FIXING] src/lib.rs (2 fixes) [FINISHED] dev [..] "; p.cargo("fix --allow-no-vcs --prepare-for 2018 --lib") .with_stderr(stderr) .run(); let contents = p.read_file("src/lib.rs"); println!("{}", contents); assert!(contents.contains("crate::foo::<crate::A>()")); } #[cargo_test] fn fix_idioms() { let p = project() .file( "Cargo.toml", r#" [package] name = 'foo' version = '0.1.0' edition = '2018' "#, ) .file( "src/lib.rs", r#" use std::any::Any; pub fn foo() { let _x: Box<Any> = Box::new(3); } "#, ) .build(); let stderr = "\ [CHECKING] foo [..] [FIXING] src/lib.rs (1 fix) [FINISHED] [..] "; p.cargo("fix --edition-idioms --allow-no-vcs") .with_stderr(stderr) .run(); assert!(p.read_file("src/lib.rs").contains("Box<dyn Any>")); } #[cargo_test] fn idioms_2015_ok() { let p = project().file("src/lib.rs", "").build(); p.cargo("fix --edition-idioms --allow-no-vcs").run(); } #[cargo_test] fn both_edition_migrate_flags() { let p = project().file("src/lib.rs", "").build(); let stderr = "\ error: The argument '--edition' cannot be used with '--prepare-for <prepare-for>' USAGE: cargo[..] fix --edition --message-format <FMT> For more information try --help "; p.cargo("fix --prepare-for 2018 --edition") .with_status(1) .with_stderr(stderr) .run(); } #[cargo_test] fn shows_warnings_on_second_run_without_changes() { let p = project() .file( "src/lib.rs", r#" #[deprecated] fn bar() {} pub fn foo() { let _ = bar(); } "#, ) .build(); p.cargo("fix --allow-no-vcs") .with_stderr_contains("[..]warning: use of deprecated item[..]") .run(); p.cargo("fix --allow-no-vcs") .with_stderr_contains("[..]warning: use of deprecated item[..]") .run(); } #[cargo_test] fn shows_warnings_on_second_run_without_changes_on_multiple_targets() { let p = project() .file( "src/lib.rs", r#" #[deprecated] fn bar() {} pub fn foo() { let _ = bar(); } "#, ) .file( "src/main.rs", r#" #[deprecated] fn bar() {} fn main() { let _ = bar(); } "#, ) .file( "tests/foo.rs", r#" #[deprecated] fn bar() {} #[test] fn foo_test() { let _ = bar(); } "#, ) .file( "tests/bar.rs", r#" #[deprecated] fn bar() {} #[test] fn foo_test() { let _ = bar(); } "#, ) .file( "examples/fooxample.rs", r#" #[deprecated] fn bar() {} fn main() { let _ = bar(); } "#, ) .build(); p.cargo("fix --allow-no-vcs --all-targets") .with_stderr_contains(" --> examples/fooxample.rs:6:29") .with_stderr_contains(" --> src/lib.rs:6:29") .with_stderr_contains(" --> src/main.rs:6:29") .with_stderr_contains(" --> tests/bar.rs:7:29") .with_stderr_contains(" --> tests/foo.rs:7:29") .run(); p.cargo("fix --allow-no-vcs --all-targets") .with_stderr_contains(" --> examples/fooxample.rs:6:29") .with_stderr_contains(" --> src/lib.rs:6:29") .with_stderr_contains(" --> src/main.rs:6:29") .with_stderr_contains(" --> tests/bar.rs:7:29") .with_stderr_contains(" --> tests/foo.rs:7:29") .run(); } #[cargo_test] fn doesnt_rebuild_dependencies() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] bar = { path = 'bar' } [workspace] "#, ) .file("src/lib.rs", "extern crate bar;") .file("bar/Cargo.toml", &basic_manifest("bar", "0.1.0")) .file("bar/src/lib.rs", "") .build(); p.cargo("fix --allow-no-vcs -p foo") .env("__CARGO_FIX_YOLO", "1") .with_stdout("") .with_stderr( "\ [CHECKING] bar v0.1.0 ([..]) [CHECKING] foo v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); p.cargo("fix --allow-no-vcs -p foo") .env("__CARGO_FIX_YOLO", "1") .with_stdout("") .with_stderr( "\ [CHECKING] foo v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn does_not_crash_with_rustc_wrapper() { // We don't have /usr/bin/env on Windows. if cfg!(windows) { return; } let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" "#, ) .file("src/lib.rs", "") .build(); p.cargo("fix --allow-no-vcs") .env("RUSTC_WRAPPER", "/usr/bin/env") .run(); } #[cargo_test] fn only_warn_for_relevant_crates() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [dependencies] a = { path = 'a' } "#, ) .file("src/lib.rs", "") .file( "a/Cargo.toml", r#" [package] name = "a" version = "0.1.0" "#, ) .file( "a/src/lib.rs", " pub fn foo() {} pub mod bar { use foo; pub fn baz() { foo() } } ", ) .build(); p.cargo("fix --allow-no-vcs --edition") .with_stderr( "\ [CHECKING] a v0.1.0 ([..]) [CHECKING] foo v0.1.0 ([..]) [FINISHED] dev [unoptimized + debuginfo] target(s) in [..] ", ) .run(); } #[cargo_test] fn fix_to_broken_code() { let p = project() .file( "foo/Cargo.toml", r#" [package] name = 'foo' version = '0.1.0' [workspace] "#, ) .file( "foo/src/main.rs", r##" use std::env; use std::fs; use std::io::Write; use std::path::{Path, PathBuf}; use std::process::{self, Command}; fn main() { let is_lib_rs = env::args_os() .map(PathBuf::from) .any(|l| l == Path::new("src/lib.rs")); if is_lib_rs { let path = PathBuf::from(env::var_os("OUT_DIR").unwrap()); let path = path.join("foo"); if path.exists() { panic!() } else { fs::File::create(&path).unwrap(); } } let status = Command::new("rustc") .args(env::args().skip(1)) .status() .expect("failed to run rustc"); process::exit(status.code().unwrap_or(2)); } "##, ) .file( "bar/Cargo.toml", r#" [package] name = 'bar' version = '0.1.0' [workspace] "#, ) .file("bar/build.rs", "fn main() {}") .file("bar/src/lib.rs", "pub fn foo() { let mut x = 3; drop(x); }") .build(); // Build our rustc shim p.cargo("build").cwd("foo").run(); // Attempt to fix code, but our shim will always fail the second compile p.cargo("fix --allow-no-vcs --broken-code") .cwd("bar") .env("RUSTC", p.root().join("foo/target/debug/foo")) .with_status(101) .with_stderr_contains("[WARNING] failed to automatically apply fixes [..]") .run(); assert_eq!( p.read_file("bar/src/lib.rs"), "pub fn foo() { let x = 3; drop(x); }" ); } #[cargo_test] fn fix_with_common() { let p = project() .file("src/lib.rs", "") .file( "tests/t1.rs", "mod common; #[test] fn t1() { common::try(); }", ) .file( "tests/t2.rs", "mod common; #[test] fn t2() { common::try(); }", ) .file("tests/common/mod.rs", "pub fn try() {}") .build(); p.cargo("fix --edition --allow-no-vcs").run(); assert_eq!(p.read_file("tests/common/mod.rs"), "pub fn r#try() {}"); } #[cargo_test] fn fix_in_existing_repo_weird_ignore() { // Check that ignore doesn't ignore the repo itself. let p = git::new("foo", |project| { project .file("src/lib.rs", "") .file(".gitignore", "foo\ninner\n") .file("inner/file", "") }) .unwrap(); p.cargo("fix").run(); // This is questionable about whether it is the right behavior. It should // probably be checking if any source file for the current project is // ignored. p.cargo("fix") .cwd("inner") .with_stderr_contains("[ERROR] no VCS found[..]") .with_status(101) .run(); p.cargo("fix").cwd("src").run(); } #[cargo_test] fn fix_with_clippy() { if !is_nightly() { // fix --clippy is unstable eprintln!("skipping test: requires nightly"); return; } if !clippy_is_available() { return; } let p = project() .file( "src/lib.rs", " pub fn foo() { let mut v = Vec::<String>::new(); let _ = v.iter_mut().filter(|&ref a| a.is_empty()); } ", ) .build(); let stderr = "\ [CHECKING] foo v0.0.1 ([..]) [FIXING] src/lib.rs (1 fix) [FINISHED] [..] "; p.cargo("fix -Zunstable-options --clippy --allow-no-vcs") .masquerade_as_nightly_cargo() .with_stderr(stderr) .with_stdout("") .run(); assert_eq!( p.read_file("src/lib.rs"), " pub fn foo() { let mut v = Vec::<String>::new(); let _ = v.iter_mut().filter(|a| a.is_empty()); } " ); }
25.72577
93
0.437487
bbc960848052cacb66546d07fea44eb3a4af1d4d
3,209
#[doc = "Register `uart_fifo_wdata` reader"] pub struct R(crate::R<UART_FIFO_WDATA_SPEC>); impl core::ops::Deref for R { type Target = crate::R<UART_FIFO_WDATA_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::convert::From<crate::R<UART_FIFO_WDATA_SPEC>> for R { fn from(reader: crate::R<UART_FIFO_WDATA_SPEC>) -> Self { R(reader) } } #[doc = "Register `uart_fifo_wdata` writer"] pub struct W(crate::W<UART_FIFO_WDATA_SPEC>); impl core::ops::Deref for W { type Target = crate::W<UART_FIFO_WDATA_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl core::convert::From<crate::W<UART_FIFO_WDATA_SPEC>> for W { fn from(writer: crate::W<UART_FIFO_WDATA_SPEC>) -> Self { W(writer) } } #[doc = "Field `uart_fifo_wdata` reader - "] pub struct UART_FIFO_WDATA_R(crate::FieldReader<u8, u8>); impl UART_FIFO_WDATA_R { pub(crate) fn new(bits: u8) -> Self { UART_FIFO_WDATA_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for UART_FIFO_WDATA_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `uart_fifo_wdata` writer - "] pub struct UART_FIFO_WDATA_W<'a> { w: &'a mut W, } impl<'a> UART_FIFO_WDATA_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0xff) | ((value as u32) & 0xff); self.w } } impl R { #[doc = "Bits 0:7"] #[inline(always)] pub fn uart_fifo_wdata(&self) -> UART_FIFO_WDATA_R { UART_FIFO_WDATA_R::new((self.bits & 0xff) as u8) } } impl W { #[doc = "Bits 0:7"] #[inline(always)] pub fn uart_fifo_wdata(&mut self) -> UART_FIFO_WDATA_W { UART_FIFO_WDATA_W { w: self } } #[doc = "Writes raw bits to the register."] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "uart_fifo_wdata.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [uart_fifo_wdata](index.html) module"] pub struct UART_FIFO_WDATA_SPEC; impl crate::RegisterSpec for UART_FIFO_WDATA_SPEC { type Ux = u32; } #[doc = "`read()` method returns [uart_fifo_wdata::R](R) reader structure"] impl crate::Readable for UART_FIFO_WDATA_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [uart_fifo_wdata::W](W) writer structure"] impl crate::Writable for UART_FIFO_WDATA_SPEC { type Writer = W; } #[doc = "`reset()` method sets uart_fifo_wdata to value 0"] impl crate::Resettable for UART_FIFO_WDATA_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
32.09
412
0.631973
0956fa7225f20ad74775ba03aa1d31bd8026608a
9,812
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use core::prelude::*; use ast; use codemap::{BytePos, spanned}; use parse::lexer::reader; use parse::parser::Parser; use parse::token::keywords; use parse::token; use parse::token::{get_ident_interner}; use opt_vec; use opt_vec::OptVec; // SeqSep : a sequence separator (token) // and whether a trailing separator is allowed. pub struct SeqSep { sep: Option<token::Token>, trailing_sep_allowed: bool } pub fn seq_sep_trailing_disallowed(t: token::Token) -> SeqSep { SeqSep { sep: Some(t), trailing_sep_allowed: false, } } pub fn seq_sep_trailing_allowed(t: token::Token) -> SeqSep { SeqSep { sep: Some(t), trailing_sep_allowed: true, } } pub fn seq_sep_none() -> SeqSep { SeqSep { sep: None, trailing_sep_allowed: false, } } // maps any token back to a string. not necessary if you know it's // an identifier.... pub fn token_to_str(token: &token::Token) -> ~str { token::to_str(get_ident_interner(), token) } impl Parser { // convert a token to a string using self's reader pub fn token_to_str(&self, token: &token::Token) -> ~str { token::to_str(get_ident_interner(), token) } // convert the current token to a string using self's reader pub fn this_token_to_str(&self) -> ~str { self.token_to_str(self.token) } pub fn unexpected_last(&self, t: &token::Token) -> ! { self.span_fatal( *self.last_span, fmt!( "unexpected token: `%s`", self.token_to_str(t) ) ); } pub fn unexpected(&self) -> ! { self.fatal( fmt!( "unexpected token: `%s`", self.this_token_to_str() ) ); } // expect and consume the token t. Signal an error if // the next token is not t. pub fn expect(&self, t: &token::Token) { if *self.token == *t { self.bump(); } else { self.fatal( fmt!( "expected `%s` but found `%s`", self.token_to_str(t), self.this_token_to_str() ) ) } } pub fn parse_ident(&self) -> ast::ident { self.check_strict_keywords(); self.check_reserved_keywords(); match *self.token { token::IDENT(i, _) => { self.bump(); i } token::INTERPOLATED(token::nt_ident(*)) => { self.bug("ident interpolation not converted to real token"); } _ => { self.fatal( fmt!( "expected ident, found `%s`", self.this_token_to_str() ) ); } } } pub fn parse_path_list_ident(&self) -> ast::path_list_ident { let lo = self.span.lo; let ident = self.parse_ident(); let hi = self.last_span.hi; spanned(lo, hi, ast::path_list_ident_ { name: ident, id: self.get_id() }) } // consume token 'tok' if it exists. Returns true if the given // token was present, false otherwise. pub fn eat(&self, tok: &token::Token) -> bool { return if *self.token == *tok { self.bump(); true } else { false }; } pub fn is_keyword(&self, kw: keywords::Keyword) -> bool { token::is_keyword(kw, self.token) } // if the next token is the given keyword, eat it and return // true. Otherwise, return false. pub fn eat_keyword(&self, kw: keywords::Keyword) -> bool { let is_kw = match *self.token { token::IDENT(sid, false) => kw.to_ident().name == sid.name, _ => false }; if is_kw { self.bump() } is_kw } // if the given word is not a keyword, signal an error. // if the next token is not the given word, signal an error. // otherwise, eat it. pub fn expect_keyword(&self, kw: keywords::Keyword) { if !self.eat_keyword(kw) { self.fatal( fmt!( "expected `%s`, found `%s`", self.id_to_str(kw.to_ident()), self.this_token_to_str() ) ); } } // signal an error if the given string is a strict keyword pub fn check_strict_keywords(&self) { if token::is_strict_keyword(self.token) { self.span_err(*self.last_span, fmt!("found `%s` in ident position", self.this_token_to_str())); } } // signal an error if the current token is a reserved keyword pub fn check_reserved_keywords(&self) { if token::is_reserved_keyword(self.token) { self.fatal(fmt!("`%s` is a reserved keyword", self.this_token_to_str())); } } // expect and consume a GT. if a >> is seen, replace it // with a single > and continue. If a GT is not seen, // signal an error. pub fn expect_gt(&self) { if *self.token == token::GT { self.bump(); } else if *self.token == token::BINOP(token::SHR) { self.replace_token( token::GT, self.span.lo + BytePos(1u), self.span.hi ); } else { let mut s: ~str = ~"expected `"; s += self.token_to_str(&token::GT); s += "`, found `"; s += self.this_token_to_str(); s += "`"; self.fatal(s); } } // parse a sequence bracketed by '<' and '>', stopping // before the '>'. pub fn parse_seq_to_before_gt<T: Copy>(&self, sep: Option<token::Token>, f: &fn(&Parser) -> T) -> OptVec<T> { let mut first = true; let mut v = opt_vec::Empty; while *self.token != token::GT && *self.token != token::BINOP(token::SHR) { match sep { Some(ref t) => { if first { first = false; } else { self.expect(t); } } _ => () } v.push(f(self)); } return v; } pub fn parse_seq_to_gt<T: Copy>(&self, sep: Option<token::Token>, f: &fn(&Parser) -> T) -> OptVec<T> { let v = self.parse_seq_to_before_gt(sep, f); self.expect_gt(); return v; } // parse a sequence, including the closing delimiter. The function // f must consume tokens until reaching the next separator or // closing bracket. pub fn parse_seq_to_end<T: Copy>(&self, ket: &token::Token, sep: SeqSep, f: &fn(&Parser) -> T) -> ~[T] { let val = self.parse_seq_to_before_end(ket, sep, f); self.bump(); val } // parse a sequence, not including the closing delimiter. The function // f must consume tokens until reaching the next separator or // closing bracket. pub fn parse_seq_to_before_end<T: Copy>(&self, ket: &token::Token, sep: SeqSep, f: &fn(&Parser) -> T) -> ~[T] { let mut first: bool = true; let mut v: ~[T] = ~[]; while *self.token != *ket { match sep.sep { Some(ref t) => { if first { first = false; } else { self.expect(t); } } _ => () } if sep.trailing_sep_allowed && *self.token == *ket { break; } v.push(f(self)); } return v; } // parse a sequence, including the closing delimiter. The function // f must consume tokens until reaching the next separator or // closing bracket. pub fn parse_unspanned_seq<T: Copy>(&self, bra: &token::Token, ket: &token::Token, sep: SeqSep, f: &fn(&Parser) -> T) -> ~[T] { self.expect(bra); let result = self.parse_seq_to_before_end(ket, sep, f); self.bump(); result } // NB: Do not use this function unless you actually plan to place the // spanned list in the AST. pub fn parse_seq<T: Copy>(&self, bra: &token::Token, ket: &token::Token, sep: SeqSep, f: &fn(&Parser) -> T) -> spanned<~[T]> { let lo = self.span.lo; self.expect(bra); let result = self.parse_seq_to_before_end(ket, sep, f); let hi = self.span.hi; self.bump(); spanned(lo, hi, result) } }
32.276316
90
0.483082
5ded5aaf4d52798280b5bd11d73339c5b7594d93
2,911
//! Implements helpters for the sections target related to finding //! things in the ast structure. use preamble::*; /// Collect the names of all beginning sections in a document. #[derive(Default)] pub struct SectionNameCollector<'e> { path: Vec<&'e Element>, pub sections: Vec<String>, } impl<'e> Traversion<'e, ()> for SectionNameCollector<'e> { path_methods!('e); fn work(&mut self, root: &'e Element, _: (), _: &mut io::Write) -> io::Result<bool> { if let Element::HtmlTag(ref tag) = *root { if tag.name.to_lowercase() == "section" { for attr in &tag.attributes { if attr.key == "begin" { self.sections.push(attr.value.trim().into()); } } } }; Ok(true) } } impl<'e> SectionNameCollector<'e> { pub fn collect_from(root: &Element) -> Vec<String> { let mut collector = SectionNameCollector::default(); if collector.run(root, (), &mut vec![]).is_ok() { collector.sections } else { vec![] } } } /// Return a path to the start / end of a section #[derive(Default)] pub struct SectionFinder<'e, 'a> { /// label of the section to find. pub label: &'a str, /// get start or end of section? pub begin: bool, path: Vec<&'e Element>, /// the resulting path. pub result: Vec<&'e Element>, } impl<'e, 'a> Traversion<'e, ()> for SectionFinder<'e, 'a> { path_methods!('e); fn work(&mut self, root: &'e Element, _: (), _: &mut io::Write) -> io::Result<bool> { // end recursion if result is found if !self.result.is_empty() { return Ok(false); } if let Element::HtmlTag(ref tag) = *root { if tag.name.to_lowercase() == "section" { for attr in &tag.attributes { if attr.key.to_lowercase() == if self.begin { "begin" } else { "end" } && attr.value.to_lowercase() == self.label.to_lowercase() { self.result = self.path.clone(); } } } }; Ok(true) } } impl<'a, 'e> SectionFinder<'e, 'a> { fn find_path(root: &'e Element, label: &'a str, begin: bool) -> Vec<&'e Element> { let mut finder = SectionFinder { label, begin, path: vec![], result: vec![], }; if finder.run(root, (), &mut vec![]).is_ok() { finder.result } else { vec![] } } pub fn get_start(root: &'e Element, label: &'a str) -> Vec<&'e Element> { SectionFinder::find_path(root, label, true) } pub fn get_end(root: &'e Element, label: &'a str) -> Vec<&'e Element> { SectionFinder::find_path(root, label, false) } }
29.40404
90
0.507729
5be73e630a3229a6ae81956e1ba911652256342f
120
use dade::model; #[model] enum TestModel { Value { #[field(ge = 2)] value: u8 }, } fn main() {}
12
24
0.466667
7108ef07c18b5e50a12fbc374dd5dcc5fb5276bb
1,662
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ mod deprecated_fields; mod disallow_circular_no_inline_fragments; mod disallow_reserved_aliases; mod disallow_typename_on_root; mod validate_connections; mod validate_global_variables; mod validate_module_names; mod validate_no_inline_with_raw_response_type; mod validate_relay_directives; mod validate_required_arguments; mod validate_selection_conflict; mod validate_server_only_directives; mod validate_unused_fragment_variables; mod validate_unused_variables; pub use deprecated_fields::{deprecated_fields, deprecated_fields_for_executable_definition}; pub use disallow_circular_no_inline_fragments::disallow_circular_no_inline_fragments; pub use disallow_reserved_aliases::disallow_reserved_aliases; pub use disallow_typename_on_root::disallow_typename_on_root; pub use validate_connections::validate_connections; pub use validate_global_variables::validate_global_variables; pub use validate_module_names::{extract_module_name, validate_module_names}; pub use validate_no_inline_with_raw_response_type::validate_no_inline_fragments_with_raw_response_type; pub use validate_relay_directives::validate_relay_directives; pub use validate_required_arguments::validate_required_arguments; pub use validate_selection_conflict::validate_selection_conflict; pub use validate_server_only_directives::validate_server_only_directives; pub use validate_unused_fragment_variables::validate_unused_fragment_variables; pub use validate_unused_variables::validate_unused_variables;
44.918919
103
0.88568
e46cc31a79a472e8b2d2db089a2f7afb181d55cb
47,223
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! The region check is a final pass that runs over the AST after we have inferred the type constraints but before we have actually finalized the types. Its purpose is to embed some final region constraints. The reason that this is not done earlier is that sometimes we don't know whether a given type will be a region pointer or not until this phase. In particular, we ensure that, if the type of an expression or variable is `&'r T`, then the expression or variable must occur within the region scope `r`. Note that in some cases `r` may still be a region variable, so this gives us a chance to influence the value for `r` that we infer to ensure we choose a value large enough to enclose all uses. There is a lengthy comment in visit_node() that explains this point a bit better. */ use middle::freevars::get_freevars; use middle::ty::{re_scope}; use middle::ty; use middle::typeck::check::FnCtxt; use middle::typeck::check::regionmanip::relate_nested_regions; use middle::typeck::infer::resolve_and_force_all_but_regions; use middle::typeck::infer::resolve_type; use middle::typeck::infer; use util::ppaux::{ty_to_str, region_to_str}; use middle::pat_util; use std::uint; use syntax::ast::{ManagedSigil, OwnedSigil, BorrowedSigil}; use syntax::ast::{def_arg, def_binding, def_local, def_self, def_upvar}; use syntax::ast; use syntax::codemap::span; use syntax::visit; pub struct Rcx { fcx: @mut FnCtxt, errors_reported: uint, // id of innermost fn or loop repeating_scope: ast::node_id, } pub type rvt = visit::vt<@mut Rcx>; fn encl_region_of_def(fcx: @mut FnCtxt, def: ast::def) -> ty::Region { let tcx = fcx.tcx(); match def { def_local(node_id, _) | def_arg(node_id, _) | def_self(node_id, _) | def_binding(node_id, _) => { tcx.region_maps.encl_region(node_id) } def_upvar(_, subdef, closure_id, body_id) => { match ty::ty_closure_sigil(fcx.node_ty(closure_id)) { BorrowedSigil => encl_region_of_def(fcx, *subdef), ManagedSigil | OwnedSigil => re_scope(body_id) } } _ => { tcx.sess.bug(fmt!("unexpected def in encl_region_of_def: %?", def)) } } } impl Rcx { pub fn tcx(&self) -> ty::ctxt { self.fcx.ccx.tcx } pub fn set_repeating_scope(&mut self, scope: ast::node_id) -> ast::node_id { let old_scope = self.repeating_scope; self.repeating_scope = scope; old_scope } pub fn resolve_type(&mut self, unresolved_ty: ty::t) -> ty::t { /*! * Try to resolve the type for the given node, returning * t_err if an error results. Note that we never care * about the details of the error, the same error will be * detected and reported in the writeback phase. * * Note one important point: we do not attempt to resolve * *region variables* here. This is because regionck is * essentially adding constraints to those region variables * and so may yet influence how they are resolved. * * Consider this silly example: * * fn borrow(x: &int) -> &int {x} * fn foo(x: @int) -> int { // block: B * let b = borrow(x); // region: <R0> * *b * } * * Here, the region of `b` will be `<R0>`. `<R0>` is * constrainted to be some subregion of the block B and some * superregion of the call. If we forced it now, we'd choose * the smaller region (the call). But that would make the *b * illegal. Since we don't resolve, the type of b will be * `&<R0>.int` and then `*b` will require that `<R0>` be * bigger than the let and the `*b` expression, so we will * effectively resolve `<R0>` to be the block B. */ match resolve_type(self.fcx.infcx(), unresolved_ty, resolve_and_force_all_but_regions) { Ok(t) => t, Err(_) => ty::mk_err() } } /// Try to resolve the type for the given node. pub fn resolve_node_type(@mut self, id: ast::node_id) -> ty::t { self.resolve_type(self.fcx.node_ty(id)) } /// Try to resolve the type for the given node. pub fn resolve_expr_type_adjusted(@mut self, expr: @ast::expr) -> ty::t { let ty_unadjusted = self.resolve_node_type(expr.id); if ty::type_is_error(ty_unadjusted) || ty::type_is_bot(ty_unadjusted) { ty_unadjusted } else { let tcx = self.fcx.tcx(); let adjustments = self.fcx.inh.adjustments; ty::adjust_ty(tcx, expr.span, ty_unadjusted, adjustments.find_copy(&expr.id)) } } } pub fn regionck_expr(fcx: @mut FnCtxt, e: @ast::expr) { let rcx = @mut Rcx { fcx: fcx, errors_reported: 0, repeating_scope: e.id }; if fcx.err_count_since_creation() == 0 { // regionck assumes typeck succeeded let v = regionck_visitor(); (v.visit_expr)(e, (rcx, v)); } fcx.infcx().resolve_regions(); } pub fn regionck_fn(fcx: @mut FnCtxt, blk: &ast::blk) { let rcx = @mut Rcx { fcx: fcx, errors_reported: 0, repeating_scope: blk.id }; if fcx.err_count_since_creation() == 0 { // regionck assumes typeck succeeded let v = regionck_visitor(); (v.visit_block)(blk, (rcx, v)); } fcx.infcx().resolve_regions(); } fn regionck_visitor() -> rvt { // (*) FIXME(#3238) should use visit_pat, not visit_arm/visit_local, // However, right now we run into an issue whereby some free // regions are not properly related if they appear within the // types of arguments that must be inferred. This could be // addressed by deferring the construction of the region // hierarchy, and in particular the relationships between free // regions, until regionck, as described in #3238. visit::mk_vt(@visit::Visitor {visit_item: visit_item, visit_expr: visit_expr, //visit_pat: visit_pat, // (*) see above visit_arm: visit_arm, visit_local: visit_local, visit_block: visit_block, .. *visit::default_visitor()}) } fn visit_item(_item: @ast::item, (_rcx, _v): (@mut Rcx, rvt)) { // Ignore items } fn visit_block(b: &ast::blk, (rcx, v): (@mut Rcx, rvt)) { rcx.fcx.tcx().region_maps.record_cleanup_scope(b.id); visit::visit_block(b, (rcx, v)); } fn visit_arm(arm: &ast::arm, (rcx, v): (@mut Rcx, rvt)) { // see above for arm.pats.iter().advance |&p| { constrain_bindings_in_pat(p, rcx); } visit::visit_arm(arm, (rcx, v)); } fn visit_local(l: @ast::local, (rcx, v): (@mut Rcx, rvt)) { // see above constrain_bindings_in_pat(l.node.pat, rcx); visit::visit_local(l, (rcx, v)); } fn constrain_bindings_in_pat(pat: @ast::pat, rcx: @mut Rcx) { let tcx = rcx.fcx.tcx(); debug!("regionck::visit_pat(pat=%s)", pat.repr(tcx)); do pat_util::pat_bindings(tcx.def_map, pat) |_, id, span, _| { // If we have a variable that contains region'd data, that // data will be accessible from anywhere that the variable is // accessed. We must be wary of loops like this: // // // from src/test/compile-fail/borrowck-lend-flow.rs // let mut v = ~3, w = ~4; // let mut x = &mut w; // loop { // **x += 1; // (2) // borrow(v); //~ ERROR cannot borrow // x = &mut v; // (1) // } // // Typically, we try to determine the region of a borrow from // those points where it is dereferenced. In this case, one // might imagine that the lifetime of `x` need only be the // body of the loop. But of course this is incorrect because // the pointer that is created at point (1) is consumed at // point (2), meaning that it must be live across the loop // iteration. The easiest way to guarantee this is to require // that the lifetime of any regions that appear in a // variable's type enclose at least the variable's scope. let encl_region = tcx.region_maps.encl_region(id); constrain_regions_in_type_of_node( rcx, id, encl_region, infer::BindingTypeIsNotValidAtDecl(span)); } } fn visit_expr(expr: @ast::expr, (rcx, v): (@mut Rcx, rvt)) { debug!("regionck::visit_expr(e=%s, repeating_scope=%?)", expr.repr(rcx.fcx.tcx()), rcx.repeating_scope); let has_method_map = rcx.fcx.inh.method_map.contains_key(&expr.id); // Record cleanup scopes, which are used by borrowck to decide the // maximum lifetime of a temporary rvalue. These were derived by // examining where trans creates block scopes, not because this // reflects some principled decision around temporary lifetimes. // Ordinarily this would seem like something that should be setup // in region, but we need to know which uses of operators are // overloaded. See #3511. let tcx = rcx.fcx.tcx(); match expr.node { // You'd think that x += y where `+=` is overloaded would be a // cleanup scope. You'd be... kind of right. In fact the // handling of `+=` and friends in trans for overloaded // operators is a hopeless mess and I can't figure out how to // represent it. - ndm // // ast::expr_assign_op(*) | ast::expr_index(*) | ast::expr_binary(*) | ast::expr_unary(*) if has_method_map => { tcx.region_maps.record_cleanup_scope(expr.id); } ast::expr_binary(_, ast::and, lhs, rhs) | ast::expr_binary(_, ast::or, lhs, rhs) => { tcx.region_maps.record_cleanup_scope(lhs.id); tcx.region_maps.record_cleanup_scope(rhs.id); } ast::expr_call(*) | ast::expr_method_call(*) => { tcx.region_maps.record_cleanup_scope(expr.id); } ast::expr_match(_, ref arms) => { tcx.region_maps.record_cleanup_scope(expr.id); for arms.iter().advance |arm| { for arm.guard.iter().advance |guard| { tcx.region_maps.record_cleanup_scope(guard.id); } } } ast::expr_loop(ref body, _) => { tcx.region_maps.record_cleanup_scope(body.id); } ast::expr_while(cond, ref body) => { tcx.region_maps.record_cleanup_scope(cond.id); tcx.region_maps.record_cleanup_scope(body.id); } _ => {} } // Check any autoderefs or autorefs that appear. { let r = rcx.fcx.inh.adjustments.find(&expr.id); for r.iter().advance |&adjustment| { debug!("adjustment=%?", adjustment); match *adjustment { @ty::AutoDerefRef( ty::AutoDerefRef {autoderefs: autoderefs, autoref: opt_autoref}) => { let expr_ty = rcx.resolve_node_type(expr.id); constrain_derefs(rcx, expr, autoderefs, expr_ty); for opt_autoref.iter().advance |autoref| { guarantor::for_autoref(rcx, expr, autoderefs, autoref); // Require that the resulting region encompasses // the current node. // // FIXME(#6268) remove to support nested method calls constrain_regions_in_type_of_node( rcx, expr.id, ty::re_scope(expr.id), infer::AutoBorrow(expr.span)); } } _ => {} } } } match expr.node { ast::expr_call(callee, ref args, _) => { constrain_callee(rcx, callee.id, expr, callee); constrain_call(rcx, callee.id, expr, None, *args, false); visit::visit_expr(expr, (rcx, v)); } ast::expr_method_call(callee_id, arg0, _, _, ref args, _) => { constrain_call(rcx, callee_id, expr, Some(arg0), *args, false); visit::visit_expr(expr, (rcx, v)); } ast::expr_index(callee_id, lhs, rhs) | ast::expr_assign_op(callee_id, _, lhs, rhs) | ast::expr_binary(callee_id, _, lhs, rhs) if has_method_map => { // As `expr_method_call`, but the call is via an // overloaded op. Note that we (sadly) currently use an // implicit "by ref" sort of passing style here. This // should be converted to an adjustment! constrain_call(rcx, callee_id, expr, Some(lhs), [rhs], true); visit::visit_expr(expr, (rcx, v)); } ast::expr_unary(callee_id, _, lhs) if has_method_map => { // As above. constrain_call(rcx, callee_id, expr, Some(lhs), [], true); visit::visit_expr(expr, (rcx, v)); } ast::expr_unary(_, ast::deref, base) => { // For *a, the lifetime of a must enclose the deref let base_ty = rcx.resolve_node_type(base.id); constrain_derefs(rcx, expr, 1, base_ty); visit::visit_expr(expr, (rcx, v)); } ast::expr_index(_, vec_expr, _) => { // For a[b], the lifetime of a must enclose the deref let vec_type = rcx.resolve_expr_type_adjusted(vec_expr); constrain_index(rcx, expr, vec_type); visit::visit_expr(expr, (rcx, v)); } ast::expr_cast(source, _) => { // Determine if we are casting `source` to an trait // instance. If so, we have to be sure that the type of // the source obeys the trait's region bound. // // Note: there is a subtle point here concerning type // parameters. It is possible that the type of `source` // contains type parameters, which in turn may contain // regions that are not visible to us (only the caller // knows about them). The kind checker is ultimately // responsible for guaranteeing region safety in that // particular case. There is an extensive comment on the // function check_cast_for_escaping_regions() in kind.rs // explaining how it goes about doing that. let target_ty = rcx.resolve_node_type(expr.id); match ty::get(target_ty).sty { ty::ty_trait(_, _, ty::RegionTraitStore(trait_region), _, _) => { let source_ty = rcx.fcx.expr_ty(source); constrain_regions_in_type( rcx, trait_region, infer::RelateObjectBound(expr.span), source_ty); } _ => () } visit::visit_expr(expr, (rcx, v)); } ast::expr_addr_of(_, base) => { guarantor::for_addr_of(rcx, expr, base); // Require that when you write a `&expr` expression, the // resulting pointer has a lifetime that encompasses the // `&expr` expression itself. Note that we constraining // the type of the node expr.id here *before applying // adjustments*. // // FIXME(#6268) nested method calls requires that this rule change let ty0 = rcx.resolve_node_type(expr.id); constrain_regions_in_type(rcx, ty::re_scope(expr.id), infer::AddrOf(expr.span), ty0); visit::visit_expr(expr, (rcx, v)); } ast::expr_match(discr, ref arms) => { guarantor::for_match(rcx, discr, *arms); visit::visit_expr(expr, (rcx, v)); } ast::expr_loop_body(subexpr) => { check_expr_fn_block(rcx, subexpr, v, true); } ast::expr_fn_block(*) => { check_expr_fn_block(rcx, expr, v, false); } ast::expr_loop(ref body, _) => { let repeating_scope = rcx.set_repeating_scope(body.id); visit::visit_expr(expr, (rcx, v)); rcx.set_repeating_scope(repeating_scope); } ast::expr_while(cond, ref body) => { let repeating_scope = rcx.set_repeating_scope(cond.id); (v.visit_expr)(cond, (rcx, v)); rcx.set_repeating_scope(body.id); (v.visit_block)(body, (rcx, v)); rcx.set_repeating_scope(repeating_scope); } _ => { visit::visit_expr(expr, (rcx, v)); } } } fn check_expr_fn_block(rcx: @mut Rcx, expr: @ast::expr, v: rvt, is_loop_body: bool) { let tcx = rcx.fcx.tcx(); match expr.node { ast::expr_fn_block(_, ref body) => { let function_type = rcx.resolve_node_type(expr.id); match ty::get(function_type).sty { ty::ty_closure( ty::ClosureTy { sigil: ast::BorrowedSigil, region: region, _}) => { if get_freevars(tcx, expr.id).is_empty() && !is_loop_body { // No free variables means that the environment // will be NULL at runtime and hence the closure // has static lifetime. } else { // Otherwise, the closure must not outlive the // variables it closes over, nor can it // outlive the innermost repeating scope // (since otherwise that would require // infinite stack). constrain_free_variables(rcx, region, expr); let repeating_scope = ty::re_scope(rcx.repeating_scope); rcx.fcx.mk_subr(true, infer::InfStackClosure(expr.span), region, repeating_scope); } } _ => () } let repeating_scope = rcx.set_repeating_scope(body.id); visit::visit_expr(expr, (rcx, v)); rcx.set_repeating_scope(repeating_scope); } _ => { tcx.sess.span_bug( expr.span, "Expected expr_fn_block"); } } } fn constrain_callee(rcx: @mut Rcx, callee_id: ast::node_id, call_expr: @ast::expr, callee_expr: @ast::expr) { let call_region = ty::re_scope(call_expr.id); let callee_ty = rcx.resolve_node_type(callee_id); match ty::get(callee_ty).sty { ty::ty_bare_fn(*) => { } ty::ty_closure(ref closure_ty) => { rcx.fcx.mk_subr(true, infer::InvokeClosure(callee_expr.span), call_region, closure_ty.region); } _ => { // this should not happen, but it does if the program is // erroneous // // tcx.sess.span_bug( // callee_expr.span, // fmt!("Calling non-function: %s", callee_ty.repr(tcx))); } } } fn constrain_call(rcx: @mut Rcx, // might be expr_call, expr_method_call, or an overloaded // operator callee_id: ast::node_id, call_expr: @ast::expr, receiver: Option<@ast::expr>, arg_exprs: &[@ast::expr], implicitly_ref_args: bool) { //! Invoked on every call site (i.e., normal calls, method calls, //! and overloaded operators). Constrains the regions which appear //! in the type of the function. Also constrains the regions that //! appear in the arguments appropriately. let tcx = rcx.fcx.tcx(); debug!("constrain_call(call_expr=%s, implicitly_ref_args=%?)", call_expr.repr(tcx), implicitly_ref_args); let callee_ty = rcx.resolve_node_type(callee_id); if ty::type_is_error(callee_ty) { // Bail, as function type is unknown return; } let fn_sig = ty::ty_fn_sig(callee_ty); // `callee_region` is the scope representing the time in which the // call occurs. // // FIXME(#6268) to support nested method calls, should be callee_id let callee_scope = call_expr.id; let callee_region = ty::re_scope(callee_scope); for arg_exprs.iter().advance |&arg_expr| { // ensure that any regions appearing in the argument type are // valid for at least the lifetime of the function: constrain_regions_in_type_of_node( rcx, arg_expr.id, callee_region, infer::CallArg(arg_expr.span)); // unfortunately, there are two means of taking implicit // references, and we need to propagate constraints as a // result. modes are going away and the "DerefArgs" code // should be ported to use adjustments if implicitly_ref_args { guarantor::for_by_ref(rcx, arg_expr, callee_scope); } } // as loop above, but for receiver for receiver.iter().advance |&r| { constrain_regions_in_type_of_node( rcx, r.id, callee_region, infer::CallRcvr(r.span)); if implicitly_ref_args { guarantor::for_by_ref(rcx, r, callee_scope); } } // constrain regions that may appear in the return type to be // valid for the function call: constrain_regions_in_type( rcx, callee_region, infer::CallReturn(call_expr.span), fn_sig.output); } fn constrain_derefs(rcx: @mut Rcx, deref_expr: @ast::expr, derefs: uint, mut derefd_ty: ty::t) { /*! * Invoked on any dereference that occurs, whether explicitly * or through an auto-deref. Checks that if this is a region * pointer being derefenced, the lifetime of the pointer includes * the deref expr. */ let tcx = rcx.fcx.tcx(); let r_deref_expr = ty::re_scope(deref_expr.id); for uint::range(0, derefs) |i| { debug!("constrain_derefs(deref_expr=?, derefd_ty=%s, derefs=%?/%?", rcx.fcx.infcx().ty_to_str(derefd_ty), i, derefs); match ty::get(derefd_ty).sty { ty::ty_rptr(r_ptr, _) => { mk_subregion_due_to_derefence(rcx, deref_expr.span, r_deref_expr, r_ptr); } _ => {} } match ty::deref(tcx, derefd_ty, true) { Some(mt) => derefd_ty = mt.ty, /* if this type can't be dereferenced, then there's already an error in the session saying so. Just bail out for now */ None => break } } } pub fn mk_subregion_due_to_derefence(rcx: @mut Rcx, deref_span: span, minimum_lifetime: ty::Region, maximum_lifetime: ty::Region) { rcx.fcx.mk_subr(true, infer::DerefPointer(deref_span), minimum_lifetime, maximum_lifetime) } fn constrain_index(rcx: @mut Rcx, index_expr: @ast::expr, indexed_ty: ty::t) { /*! * Invoked on any index expression that occurs. Checks that if * this is a slice being indexed, the lifetime of the pointer * includes the deref expr. */ debug!("constrain_index(index_expr=?, indexed_ty=%s", rcx.fcx.infcx().ty_to_str(indexed_ty)); let r_index_expr = ty::re_scope(index_expr.id); match ty::get(indexed_ty).sty { ty::ty_estr(ty::vstore_slice(r_ptr)) | ty::ty_evec(_, ty::vstore_slice(r_ptr)) => { rcx.fcx.mk_subr(true, infer::IndexSlice(index_expr.span), r_index_expr, r_ptr); } _ => {} } } fn constrain_free_variables(rcx: @mut Rcx, region: ty::Region, expr: @ast::expr) { /*! * Make sure that all free variables referenced inside the closure * outlive the closure itself. */ let tcx = rcx.fcx.ccx.tcx; debug!("constrain_free_variables(%s, %s)", region.repr(tcx), expr.repr(tcx)); for get_freevars(tcx, expr.id).iter().advance |freevar| { debug!("freevar def is %?", freevar.def); let def = freevar.def; let en_region = encl_region_of_def(rcx.fcx, def); debug!("en_region = %s", en_region.repr(tcx)); rcx.fcx.mk_subr(true, infer::FreeVariable(freevar.span), region, en_region); } } fn constrain_regions_in_type_of_node( rcx: @mut Rcx, id: ast::node_id, minimum_lifetime: ty::Region, origin: infer::SubregionOrigin) -> bool { //! Guarantees that any lifetimes which appear in the type of //! the node `id` (after applying adjustments) are valid for at //! least `minimum_lifetime` let tcx = rcx.fcx.tcx(); // Try to resolve the type. If we encounter an error, then typeck // is going to fail anyway, so just stop here and let typeck // report errors later on in the writeback phase. let ty0 = rcx.resolve_node_type(id); let adjustment = rcx.fcx.inh.adjustments.find_copy(&id); let ty = ty::adjust_ty(tcx, origin.span(), ty0, adjustment); debug!("constrain_regions_in_type_of_node(\ ty=%s, ty0=%s, id=%d, minimum_lifetime=%?, adjustment=%?)", ty_to_str(tcx, ty), ty_to_str(tcx, ty0), id, minimum_lifetime, adjustment); constrain_regions_in_type(rcx, minimum_lifetime, origin, ty) } fn constrain_regions_in_type( rcx: @mut Rcx, minimum_lifetime: ty::Region, origin: infer::SubregionOrigin, ty: ty::t) -> bool { /*! * Requires that any regions which appear in `ty` must be * superregions of `minimum_lifetime`. Also enforces the constraint * that given a pointer type `&'r T`, T must not contain regions * that outlive 'r, as well as analogous constraints for other * lifetime'd types. * * This check prevents regions from being used outside of the block in * which they are valid. Recall that regions represent blocks of * code or expressions: this requirement basically says "any place * that uses or may use a region R must be within the block of * code that R corresponds to." */ let e = rcx.errors_reported; let tcx = rcx.fcx.ccx.tcx; debug!("constrain_regions_in_type(minimum_lifetime=%s, ty=%s)", region_to_str(tcx, "", false, minimum_lifetime), ty_to_str(tcx, ty)); do relate_nested_regions(tcx, Some(minimum_lifetime), ty) |r_sub, r_sup| { debug!("relate(r_sub=%s, r_sup=%s)", region_to_str(tcx, "", false, r_sub), region_to_str(tcx, "", false, r_sup)); if r_sup.is_bound() || r_sub.is_bound() { // a bound region is one which appears inside an fn type. // (e.g., the `&` in `fn(&T)`). Such regions need not be // constrained by `minimum_lifetime` as they are placeholders // for regions that are as-yet-unknown. } else if r_sub == minimum_lifetime { rcx.fcx.mk_subr( true, origin, r_sub, r_sup); } else { rcx.fcx.mk_subr( true, infer::ReferenceOutlivesReferent(ty, origin.span()), r_sub, r_sup); } } return (e == rcx.errors_reported); } pub mod guarantor { /*! * The routines in this module are aiming to deal with the case * where a the contents of a borrowed pointer are re-borrowed. * Imagine you have a borrowed pointer `b` with lifetime L1 and * you have an expression `&*b`. The result of this borrow will * be another borrowed pointer with lifetime L2 (which is an * inference variable). The borrow checker is going to enforce * the constraint that L2 < L1, because otherwise you are * re-borrowing data for a lifetime larger than the original loan. * However, without the routines in this module, the region * inferencer would not know of this dependency and thus it might * infer the lifetime of L2 to be greater than L1 (issue #3148). * * There are a number of troublesome scenarios in the tests * `region-dependent-*.rs`, but here is one example: * * struct Foo { i: int } * struct Bar { foo: Foo } * fn get_i(x: &'a Bar) -> &'a int { * let foo = &x.foo; // Lifetime L1 * &foo.i // Lifetime L2 * } * * Note that this comes up either with `&` expressions, `ref` * bindings, and `autorefs`, which are the three ways to introduce * a borrow. * * The key point here is that when you are borrowing a value that * is "guaranteed" by a borrowed pointer, you must link the * lifetime of that borrowed pointer (L1, here) to the lifetime of * the borrow itself (L2). What do I mean by "guaranteed" by a * borrowed pointer? I mean any data that is reached by first * dereferencing a borrowed pointer and then either traversing * interior offsets or owned pointers. We say that the guarantor * of such data it the region of the borrowed pointer that was * traversed. This is essentially the same as the ownership * relation, except that a borrowed pointer never owns its * contents. * * NB: I really wanted to use the `mem_categorization` code here * but I cannot because final type resolution hasn't happened yet, * and `mem_categorization` requires that all types be known. * So this is very similar logic to what you would find there, * but more special purpose. */ use middle::typeck::check::regionck::Rcx; use middle::typeck::check::regionck::mk_subregion_due_to_derefence; use middle::typeck::infer; use middle::ty; use syntax::ast; use syntax::codemap::span; use util::ppaux::{ty_to_str}; use std::uint; pub fn for_addr_of(rcx: @mut Rcx, expr: @ast::expr, base: @ast::expr) { /*! * Computes the guarantor for an expression `&base` and then * ensures that the lifetime of the resulting pointer is linked * to the lifetime of its guarantor (if any). */ debug!("guarantor::for_addr_of(base=?)"); let guarantor = guarantor(rcx, base); link(rcx, expr.span, expr.id, guarantor); } pub fn for_match(rcx: @mut Rcx, discr: @ast::expr, arms: &[ast::arm]) { /*! * Computes the guarantors for any ref bindings in a match and * then ensures that the lifetime of the resulting pointer is * linked to the lifetime of its guarantor (if any). */ debug!("regionck::for_match()"); let discr_guarantor = guarantor(rcx, discr); debug!("discr_guarantor=%s", discr_guarantor.repr(rcx.tcx())); for arms.iter().advance |arm| { for arm.pats.iter().advance |pat| { link_ref_bindings_in_pat(rcx, *pat, discr_guarantor); } } } pub fn for_autoref(rcx: @mut Rcx, expr: @ast::expr, autoderefs: uint, autoref: &ty::AutoRef) { /*! * Computes the guarantor for an expression that has an * autoref adjustment and links it to the lifetime of the * autoref. This is only important when auto re-borrowing * region pointers. */ debug!("guarantor::for_autoref(autoref=%?)", autoref); let mut expr_ct = categorize_unadjusted(rcx, expr); debug!(" unadjusted cat=%?", expr_ct.cat); expr_ct = apply_autoderefs( rcx, expr, autoderefs, expr_ct); match *autoref { ty::AutoPtr(r, _) => { // In this case, we are implicitly adding an `&`. maybe_make_subregion(rcx, expr, r, expr_ct.cat.guarantor); } ty::AutoBorrowVec(r, _) | ty::AutoBorrowVecRef(r, _) | ty::AutoBorrowFn(r) => { // In each of these cases, what is being borrowed is // not the (autoderef'd) expr itself but rather the // contents of the autoderef'd expression (i.e., what // the pointer points at). maybe_make_subregion(rcx, expr, r, guarantor_of_deref(&expr_ct.cat)); } ty::AutoUnsafe(_) => {} } fn maybe_make_subregion( rcx: @mut Rcx, expr: @ast::expr, sub_region: ty::Region, sup_region: Option<ty::Region>) { for sup_region.iter().advance |r| { rcx.fcx.mk_subr(true, infer::Reborrow(expr.span), sub_region, *r); } } } pub fn for_by_ref(rcx: @mut Rcx, expr: @ast::expr, callee_scope: ast::node_id) { /*! * Computes the guarantor for cases where the `expr` is * being passed by implicit reference and must outlive * `callee_scope`. */ let tcx = rcx.tcx(); debug!("guarantor::for_by_ref(expr=%s, callee_scope=%?)", expr.repr(tcx), callee_scope); let expr_cat = categorize(rcx, expr); debug!("guarantor::for_by_ref(expr=%?, callee_scope=%?) category=%?", expr.id, callee_scope, expr_cat); let minimum_lifetime = ty::re_scope(callee_scope); for expr_cat.guarantor.iter().advance |guarantor| { mk_subregion_due_to_derefence(rcx, expr.span, minimum_lifetime, *guarantor); } } fn link( rcx: @mut Rcx, span: span, id: ast::node_id, guarantor: Option<ty::Region>) { /*! * * Links the lifetime of the borrowed pointer resulting from a borrow * to the lifetime of its guarantor (if any). */ debug!("link(id=%?, guarantor=%?)", id, guarantor); let bound = match guarantor { None => { // If guarantor is None, then the value being borrowed // is not guaranteed by a region pointer, so there are // no lifetimes to link. return; } Some(r) => { r } }; // this routine is used for the result of ref bindings and & // expressions, both of which always yield a region variable, so // mk_subr should never fail. let rptr_ty = rcx.resolve_node_type(id); if !ty::type_is_bot(rptr_ty) { let tcx = rcx.fcx.ccx.tcx; debug!("rptr_ty=%s", ty_to_str(tcx, rptr_ty)); let r = ty::ty_region(tcx, span, rptr_ty); rcx.fcx.mk_subr(true, infer::Reborrow(span), r, bound); } } /// Categorizes types based on what kind of pointer they are. /// Note that we don't bother to distinguish between rptrs (&T) /// and slices (&[T], &str)---they are all just `BorrowedPointer`. enum PointerCategorization { NotPointer, OwnedPointer, BorrowedPointer(ty::Region), OtherPointer } /// Guarantor of an expression paired with the /// PointerCategorization` of its type. struct ExprCategorization { guarantor: Option<ty::Region>, pointer: PointerCategorization } /// ExprCategorization paired with the full type of the expr struct ExprCategorizationType { cat: ExprCategorization, ty: ty::t } fn guarantor(rcx: @mut Rcx, expr: @ast::expr) -> Option<ty::Region> { /*! * * Computes the guarantor of `expr`, or None if `expr` is * not guaranteed by any region. Here `expr` is some expression * whose address is being taken (e.g., there is an expression * `&expr`). */ debug!("guarantor()"); match expr.node { ast::expr_unary(_, ast::deref, b) => { let cat = categorize(rcx, b); guarantor_of_deref(&cat) } ast::expr_field(b, _, _) => { categorize(rcx, b).guarantor } ast::expr_index(_, b, _) => { let cat = categorize(rcx, b); guarantor_of_deref(&cat) } ast::expr_paren(e) => { guarantor(rcx, e) } ast::expr_path(*) | ast::expr_self => { // Either a variable or constant and hence resides // in constant memory or on the stack frame. Either way, // not guaranteed by a region pointer. None } // All of these expressions are rvalues and hence their // value is not guaranteed by a region pointer. ast::expr_inline_asm(*) | ast::expr_mac(*) | ast::expr_lit(_) | ast::expr_unary(*) | ast::expr_addr_of(*) | ast::expr_binary(*) | ast::expr_vstore(*) | ast::expr_break(*) | ast::expr_again(*) | ast::expr_ret(*) | ast::expr_log(*) | ast::expr_while(*) | ast::expr_loop(*) | ast::expr_assign(*) | ast::expr_assign_op(*) | ast::expr_cast(*) | ast::expr_call(*) | ast::expr_method_call(*) | ast::expr_struct(*) | ast::expr_tup(*) | ast::expr_if(*) | ast::expr_match(*) | ast::expr_fn_block(*) | ast::expr_loop_body(*) | ast::expr_do_body(*) | ast::expr_block(*) | ast::expr_copy(*) | ast::expr_repeat(*) | ast::expr_vec(*) => { assert!(!ty::expr_is_lval( rcx.fcx.tcx(), rcx.fcx.inh.method_map, expr)); None } } } fn categorize(rcx: @mut Rcx, expr: @ast::expr) -> ExprCategorization { debug!("categorize()"); let mut expr_ct = categorize_unadjusted(rcx, expr); debug!("before adjustments, cat=%?", expr_ct.cat); match rcx.fcx.inh.adjustments.find(&expr.id) { Some(&@ty::AutoAddEnv(*)) => { // This is basically an rvalue, not a pointer, no regions // involved. expr_ct.cat = ExprCategorization { guarantor: None, pointer: NotPointer }; } Some(&@ty::AutoDerefRef(ref adjustment)) => { debug!("adjustment=%?", adjustment); expr_ct = apply_autoderefs( rcx, expr, adjustment.autoderefs, expr_ct); match adjustment.autoref { None => { } Some(ty::AutoUnsafe(_)) => { expr_ct.cat.guarantor = None; expr_ct.cat.pointer = OtherPointer; debug!("autoref, cat=%?", expr_ct.cat); } Some(ty::AutoPtr(r, _)) | Some(ty::AutoBorrowVec(r, _)) | Some(ty::AutoBorrowVecRef(r, _)) | Some(ty::AutoBorrowFn(r)) => { // If there is an autoref, then the result of this // expression will be some sort of borrowed pointer. expr_ct.cat.guarantor = None; expr_ct.cat.pointer = BorrowedPointer(r); debug!("autoref, cat=%?", expr_ct.cat); } } } None => {} } debug!("result=%?", expr_ct.cat); return expr_ct.cat; } fn categorize_unadjusted(rcx: @mut Rcx, expr: @ast::expr) -> ExprCategorizationType { debug!("categorize_unadjusted()"); let guarantor = { if rcx.fcx.inh.method_map.contains_key(&expr.id) { None } else { guarantor(rcx, expr) } }; let expr_ty = rcx.resolve_node_type(expr.id); ExprCategorizationType { cat: ExprCategorization { guarantor: guarantor, pointer: pointer_categorize(expr_ty) }, ty: expr_ty } } fn apply_autoderefs( rcx: @mut Rcx, expr: @ast::expr, autoderefs: uint, ct: ExprCategorizationType) -> ExprCategorizationType { let mut ct = ct; let tcx = rcx.fcx.ccx.tcx; if (ty::type_is_error(ct.ty)) { ct.cat.pointer = NotPointer; return ct; } for uint::range(0, autoderefs) |_| { ct.cat.guarantor = guarantor_of_deref(&ct.cat); match ty::deref(tcx, ct.ty, true) { Some(mt) => { ct.ty = mt.ty; ct.cat.pointer = pointer_categorize(ct.ty); } None => { tcx.sess.span_bug( expr.span, fmt!("Autoderef but type not derefable: %s", ty_to_str(tcx, ct.ty))); } } debug!("autoderef, cat=%?", ct.cat); } return ct; } fn pointer_categorize(ty: ty::t) -> PointerCategorization { match ty::get(ty).sty { ty::ty_rptr(r, _) | ty::ty_evec(_, ty::vstore_slice(r)) | ty::ty_estr(ty::vstore_slice(r)) => { BorrowedPointer(r) } ty::ty_uniq(*) | ty::ty_estr(ty::vstore_uniq) | ty::ty_evec(_, ty::vstore_uniq) => { OwnedPointer } ty::ty_box(*) | ty::ty_ptr(*) | ty::ty_evec(_, ty::vstore_box) | ty::ty_estr(ty::vstore_box) => { OtherPointer } ty::ty_closure(ref closure_ty) => { match closure_ty.sigil { ast::BorrowedSigil => BorrowedPointer(closure_ty.region), ast::OwnedSigil => OwnedPointer, ast::ManagedSigil => OtherPointer, } } _ => { NotPointer } } } fn guarantor_of_deref(cat: &ExprCategorization) -> Option<ty::Region> { match cat.pointer { NotPointer => cat.guarantor, BorrowedPointer(r) => Some(r), OwnedPointer => cat.guarantor, OtherPointer => None } } fn link_ref_bindings_in_pat( rcx: @mut Rcx, pat: @ast::pat, guarantor: Option<ty::Region>) { /*! * * Descends through the pattern, tracking the guarantor * of the value being matched. When a ref binding is encountered, * links the lifetime of that ref binding to the lifetime of * the guarantor. We begin with the guarantor of the * discriminant but of course as we go we may pass through * other pointers. */ debug!("link_ref_bindings_in_pat(pat=%s, guarantor=%?)", rcx.fcx.pat_to_str(pat), guarantor); match pat.node { ast::pat_wild => {} ast::pat_ident(ast::bind_by_ref(_), _, opt_p) => { link(rcx, pat.span, pat.id, guarantor); for opt_p.iter().advance |p| { link_ref_bindings_in_pat(rcx, *p, guarantor); } } ast::pat_ident(_, _, opt_p) => { for opt_p.iter().advance |p| { link_ref_bindings_in_pat(rcx, *p, guarantor); } } ast::pat_enum(_, None) => {} ast::pat_enum(_, Some(ref pats)) => { link_ref_bindings_in_pats(rcx, pats, guarantor); } ast::pat_struct(_, ref fpats, _) => { for fpats.iter().advance |fpat| { link_ref_bindings_in_pat(rcx, fpat.pat, guarantor); } } ast::pat_tup(ref ps) => { link_ref_bindings_in_pats(rcx, ps, guarantor) } ast::pat_box(p) => { link_ref_bindings_in_pat(rcx, p, None) } ast::pat_uniq(p) => { link_ref_bindings_in_pat(rcx, p, guarantor) } ast::pat_region(p) => { let rptr_ty = rcx.resolve_node_type(pat.id); let r = ty::ty_region(rcx.fcx.tcx(), pat.span, rptr_ty); link_ref_bindings_in_pat(rcx, p, Some(r)); } ast::pat_lit(*) => {} ast::pat_range(*) => {} ast::pat_vec(ref before, ref slice, ref after) => { let vec_ty = rcx.resolve_node_type(pat.id); let vstore = ty::ty_vstore(vec_ty); let guarantor1 = match vstore { ty::vstore_fixed(_) | ty::vstore_uniq => guarantor, ty::vstore_slice(r) => Some(r), ty::vstore_box => None }; link_ref_bindings_in_pats(rcx, before, guarantor1); for slice.iter().advance |&p| { link_ref_bindings_in_pat(rcx, p, guarantor); } link_ref_bindings_in_pats(rcx, after, guarantor1); } } } fn link_ref_bindings_in_pats(rcx: @mut Rcx, pats: &~[@ast::pat], guarantor: Option<ty::Region>) { for pats.iter().advance |pat| { link_ref_bindings_in_pat(rcx, *pat, guarantor); } } }
36.921814
87
0.543972
ebccdc4539de535d18f2913b21020ab323dd1eb2
9,161
use crate::contract::Context; use crate::util::expand_doc; use ethcontract_common::{Address, DeploymentInformation}; use proc_macro2::{Literal, TokenStream}; use quote::quote; pub(crate) fn expand(cx: &Context) -> TokenStream { let artifact_json = &cx.artifact_json; let contract_name = &cx.contract_name; let doc_str = cx .artifact .devdoc .details .as_deref() .unwrap_or("Generated by `ethcontract`"); let doc = expand_doc(doc_str); let deployments = cx.deployments.iter().map(|(network_id, deployment)| { let network_id = Literal::string(&network_id.to_string()); let address = expand_address(deployment.address); let deployment_information = expand_deployment_information(deployment.deployment_information); quote! { artifact.networks.insert( #network_id.to_owned(), self::ethcontract::common::truffle::Network { address: #address, deployment_information: #deployment_information, }, ); } }); quote! { #doc #[derive(Clone)] pub struct Contract { methods: Methods, } impl Contract { /// Retrieves the truffle artifact used to generate the type safe /// API for this contract. pub fn artifact() -> &'static self::ethcontract::Artifact { use self::ethcontract::private::lazy_static; use self::ethcontract::Artifact; lazy_static! { pub static ref ARTIFACT: Artifact = { #[allow(unused_mut)] let mut artifact = Artifact::from_json(#artifact_json) .expect("valid artifact JSON"); #( #deployments )* artifact }; } &ARTIFACT } /// Creates a new contract instance with the specified `web3` /// provider at the given `Address`. /// /// Note that this does not verify that a contract with a maching /// `Abi` is actually deployed at the given address. pub fn at<F, T>( web3: &self::ethcontract::web3::api::Web3<T>, address: self::ethcontract::Address, ) -> Self where F: std::future::Future< Output = Result<self::ethcontract::json::Value, self::ethcontract::web3::Error> > + Send + Unpin + 'static, T: self::ethcontract::web3::Transport<Out = F> + Send + Sync + 'static, { Contract::with_deployment_info(web3, address, None) } /// Creates a new contract instance with the specified `web3` provider with /// the given `Abi` at the given `Address` and an optional transaction hash. /// This hash is used to retrieve contract related information such as the /// creation block (which is useful for fetching all historic events). /// /// Note that this does not verify that a contract with a matching `Abi` is /// actually deployed at the given address nor that the transaction hash, /// when provided, is actually for this contract deployment. pub fn with_deployment_info<F, T>( web3: &self::ethcontract::web3::api::Web3<T>, address: self::ethcontract::Address, deployment_information: Option<ethcontract::common::DeploymentInformation>, ) -> Self where F: std::future::Future< Output = Result<self::ethcontract::json::Value, self::ethcontract::web3::Error> > + Send + Unpin + 'static, T: self::ethcontract::web3::Transport<Out = F> + Send + Sync + 'static, { use self::ethcontract::Instance; use self::ethcontract::transport::DynTransport; use self::ethcontract::web3::api::Web3; let transport = DynTransport::new(web3.transport().clone()); let web3 = Web3::new(transport); let abi = Self::artifact().abi.clone(); let instance = Instance::with_deployment_info(web3, abi, address, deployment_information); Contract::from_raw(instance) } /// Creates a contract from a raw instance. fn from_raw(instance: self::ethcontract::dyns::DynInstance) -> Self { let methods = Methods { instance }; Contract { methods } } /// Returns the contract address being used by this instance. pub fn address(&self) -> self::ethcontract::Address { self.raw_instance().address() } /// Returns the deployment information of the contract /// if it is known, `None` otherwise. pub fn deployment_information(&self) -> Option<ethcontract::common::DeploymentInformation> { self.raw_instance().deployment_information() } /// Returns a reference to the default method options used by this /// contract. pub fn defaults(&self) -> &self::ethcontract::contract::MethodDefaults { &self.raw_instance().defaults } /// Returns a mutable reference to the default method options used /// by this contract. pub fn defaults_mut(&mut self) -> &mut self::ethcontract::contract::MethodDefaults { &mut self.raw_instance_mut().defaults } /// Returns a reference to the raw runtime instance used by this /// contract. pub fn raw_instance(&self) -> &self::ethcontract::dyns::DynInstance { &self.methods.instance } /// Returns a mutable reference to the raw runtime instance used by /// this contract. fn raw_instance_mut(&mut self) -> &mut self::ethcontract::dyns::DynInstance { &mut self.methods.instance } } impl std::fmt::Debug for Contract { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { f.debug_tuple(stringify!(#contract_name)) .field(&self.address()) .finish() } } } } /// Expands an `Address` into a literal representation that can be used with /// quasi-quoting for code generation. fn expand_address(address: Address) -> TokenStream { let bytes = address .as_bytes() .iter() .copied() .map(Literal::u8_unsuffixed); quote! { self::ethcontract::H160([#( #bytes ),*]) } } /// Expands a deployment info into a literal representation that can be used /// with quasi-quoting for code generation. fn expand_deployment_information(deployment: Option<DeploymentInformation>) -> TokenStream { match deployment { Some(DeploymentInformation::BlockNumber(block)) => quote! { Some(ethcontract::common::DeploymentInformation::BlockNumber(#block)) }, Some(DeploymentInformation::TransactionHash(hash)) => { let bytes = hash.as_bytes().iter().copied().map(Literal::u8_unsuffixed); quote! { Some(ethcontract::common::DeploymentInformation::TransactionHash([#( #bytes ),*].into())) } } None => return quote! { None }, } } #[cfg(test)] mod tests { use super::*; #[test] #[rustfmt::skip] fn expand_address_value() { assert_quote!( expand_address(Address::zero()), { self::ethcontract::H160([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]) }, ); assert_quote!( expand_address("000102030405060708090a0b0c0d0e0f10111213".parse().unwrap()), { self::ethcontract::H160([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]) }, ); } #[test] #[rustfmt::skip] fn expand_deployment_information_value() { assert_quote!(expand_deployment_information(None), { None }); assert_quote!( expand_deployment_information(Some(DeploymentInformation::TransactionHash("000102030405060708090a0b0c0d0e0f10111213000000000000000000000000".parse().unwrap()))), { Some(ethcontract::common::DeploymentInformation::TransactionHash([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0].into())) }, ); assert_quote!( expand_deployment_information(Some(DeploymentInformation::BlockNumber(42))), { Some(ethcontract::common::DeploymentInformation::BlockNumber(42u64)) }, ); } }
38.170833
196
0.549722
11896a415a40355b18d9f5b234fa5458630ee6d5
9,226
#[doc = "Writer for register HSTPIPIER0_INTPIPES"] pub type W = crate::W<u32, super::HSTPIPIER0_INTPIPES>; #[doc = "Write proxy for field `RXINES`"] pub struct RXINES_W<'a> { w: &'a mut W, } impl<'a> RXINES_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } #[doc = "Write proxy for field `TXOUTES`"] pub struct TXOUTES_W<'a> { w: &'a mut W, } impl<'a> TXOUTES_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1); self.w } } #[doc = "Write proxy for field `UNDERFIES`"] pub struct UNDERFIES_W<'a> { w: &'a mut W, } impl<'a> UNDERFIES_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2); self.w } } #[doc = "Write proxy for field `PERRES`"] pub struct PERRES_W<'a> { w: &'a mut W, } impl<'a> PERRES_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3); self.w } } #[doc = "Write proxy for field `NAKEDES`"] pub struct NAKEDES_W<'a> { w: &'a mut W, } impl<'a> NAKEDES_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4); self.w } } #[doc = "Write proxy for field `OVERFIES`"] pub struct OVERFIES_W<'a> { w: &'a mut W, } impl<'a> OVERFIES_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5); self.w } } #[doc = "Write proxy for field `RXSTALLDES`"] pub struct RXSTALLDES_W<'a> { w: &'a mut W, } impl<'a> RXSTALLDES_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6); self.w } } #[doc = "Write proxy for field `SHORTPACKETIES`"] pub struct SHORTPACKETIES_W<'a> { w: &'a mut W, } impl<'a> SHORTPACKETIES_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7); self.w } } #[doc = "Write proxy for field `NBUSYBKES`"] pub struct NBUSYBKES_W<'a> { w: &'a mut W, } impl<'a> NBUSYBKES_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12); self.w } } #[doc = "Write proxy for field `PDISHDMAS`"] pub struct PDISHDMAS_W<'a> { w: &'a mut W, } impl<'a> PDISHDMAS_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16); self.w } } #[doc = "Write proxy for field `PFREEZES`"] pub struct PFREEZES_W<'a> { w: &'a mut W, } impl<'a> PFREEZES_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17); self.w } } #[doc = "Write proxy for field `RSTDTS`"] pub struct RSTDTS_W<'a> { w: &'a mut W, } impl<'a> RSTDTS_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18); self.w } } impl W { #[doc = "Bit 0 - Received IN Data Interrupt Enable"] #[inline(always)] pub fn rxines(&mut self) -> RXINES_W { RXINES_W { w: self } } #[doc = "Bit 1 - Transmitted OUT Data Interrupt Enable"] #[inline(always)] pub fn txoutes(&mut self) -> TXOUTES_W { TXOUTES_W { w: self } } #[doc = "Bit 2 - Underflow Interrupt Enable"] #[inline(always)] pub fn underfies(&mut self) -> UNDERFIES_W { UNDERFIES_W { w: self } } #[doc = "Bit 3 - Pipe Error Interrupt Enable"] #[inline(always)] pub fn perres(&mut self) -> PERRES_W { PERRES_W { w: self } } #[doc = "Bit 4 - NAKed Interrupt Enable"] #[inline(always)] pub fn nakedes(&mut self) -> NAKEDES_W { NAKEDES_W { w: self } } #[doc = "Bit 5 - Overflow Interrupt Enable"] #[inline(always)] pub fn overfies(&mut self) -> OVERFIES_W { OVERFIES_W { w: self } } #[doc = "Bit 6 - Received STALLed Interrupt Enable"] #[inline(always)] pub fn rxstalldes(&mut self) -> RXSTALLDES_W { RXSTALLDES_W { w: self } } #[doc = "Bit 7 - Short Packet Interrupt Enable"] #[inline(always)] pub fn shortpacketies(&mut self) -> SHORTPACKETIES_W { SHORTPACKETIES_W { w: self } } #[doc = "Bit 12 - Number of Busy Banks Enable"] #[inline(always)] pub fn nbusybkes(&mut self) -> NBUSYBKES_W { NBUSYBKES_W { w: self } } #[doc = "Bit 16 - Pipe Interrupts Disable HDMA Request Enable"] #[inline(always)] pub fn pdishdmas(&mut self) -> PDISHDMAS_W { PDISHDMAS_W { w: self } } #[doc = "Bit 17 - Pipe Freeze Enable"] #[inline(always)] pub fn pfreezes(&mut self) -> PFREEZES_W { PFREEZES_W { w: self } } #[doc = "Bit 18 - Reset Data Toggle Enable"] #[inline(always)] pub fn rstdts(&mut self) -> RSTDTS_W { RSTDTS_W { w: self } } }
28.042553
86
0.531975
ab2d78994c60e1061158e157cc5088a3a0dbdaac
2,532
use byteorder::{ByteOrder, LittleEndian}; use ring::aead; use ring::digest; use ring::error::Unspecified; use ring::rand::{SystemRandom, SecureRandom}; use std; // Authentication utilities for a single entry pub fn generate_key(target: i32, master: &[u8]) -> Result<String, Unspecified> { // Convert to bytes let mut buf: [u8; 4 + aead::MAX_TAG_LEN] = [0; 4 + aead::MAX_TAG_LEN]; // 4 bytes for input, MAX_LAG_LEN for cap LittleEndian::write_i32(&mut buf, target); // Create a 256-bit hash of the master secret let hash = digest::digest(&digest::SHA256, master); // Creating sealing key let sealing_key = aead::SealingKey::new(&aead::AES_256_GCM, hash.as_ref())?; // Generate a 96-bit nonce let mut nonce: [u8; 12] = [0; 12]; let rng = SystemRandom::new(); rng.fill(&mut nonce)?; let len = aead::seal_in_place( &sealing_key, &nonce, &[], &mut buf, aead::MAX_TAG_LEN)?; let mut result = String::with_capacity(12 * 2 + len * 2); for byte in &nonce { write!(&mut result as &mut std::fmt::Write, "{:02x}", byte).map_err(|_| Unspecified)? } for byte in &buf[..len] { write!(&mut result as &mut std::fmt::Write, "{:02x}", byte).map_err(|_| Unspecified)? } Ok(result) } pub fn try_decrypt_key(key: &str, master: &[u8]) -> Option<i32> { if key.len() % 2 != 0 || key.len() < 12 * 2 { // Got no nounce return None; } let data = &key[12*2..]; let mut nonce_vec: [u8; 12] = [0; 12]; let mut data_vec: Vec<u8> = Vec::with_capacity(data.len() / 2); let result: Result<(), std::num::ParseIntError> = do catch { for i in 0..12 { nonce_vec[i] = u8::from_str_radix(&key[i*2..i*2+2], 16)?; } for i in 0..data.len() / 2 { data_vec.push(u8::from_str_radix(&data[i*2..i*2+2], 16)?); } Ok(()) }; if result.is_err() { return None; } let result: Result<(), Unspecified> = do catch { // Create a 256-bit hash of the master secret let hash = digest::digest(&digest::SHA256, master); // Creating opening key let opening_key = aead::OpeningKey::new(&aead::AES_256_GCM, hash.as_ref())?; aead::open_in_place( &opening_key, &nonce_vec, &[], 0, &mut data_vec)?; Ok(()) }; if result.is_err() { return None; } Some(LittleEndian::read_i32(data_vec.as_slice())) }
28.133333
116
0.562006
16206136190e7b3688902374903ce7ec1ab61d50
12,975
use crate::rpc::methods::*; use crate::rpc::{ codec::base::OutboundCodec, protocol::{Encoding, Protocol, ProtocolId, RPCError, Version}, }; use crate::rpc::{RPCCodedResponse, RPCRequest, RPCResponse}; use libp2p::bytes::BytesMut; use snap::read::FrameDecoder; use snap::write::FrameEncoder; use std::io::Cursor; use std::io::ErrorKind; use std::io::{Read, Write}; use tokio_util::codec::{Decoder, Encoder}; use unsigned_varint::codec::Uvi; /* Inbound Codec */ pub struct SnappyInboundCodec { protocol: ProtocolId, inner: Uvi<usize>, len: Option<usize>, /// Maximum bytes that can be sent in one req/resp chunked responses. max_packet_size: usize, } impl SnappyInboundCodec { pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self { let uvi_codec = Uvi::default(); // this encoding only applies to ssz_snappy. debug_assert_eq!(protocol.encoding, Encoding::Snappy); SnappyInboundCodec { inner: uvi_codec, protocol, len: None, max_packet_size, } } } // Encoder for inbound streams: Encodes RPC Responses sent to peers. impl Encoder<RPCCodedResponse> for SnappyInboundCodec { type Error = RPCError; fn encode(&mut self, item: RPCCodedResponse, dst: &mut BytesMut) -> Result<(), Self::Error> { let bytes = match item { RPCCodedResponse::Success(resp) => match resp { RPCResponse::Status(res) => res, RPCResponse::Pong(res) => res, RPCResponse::MetaData(res) => res, }, RPCCodedResponse::InvalidRequest(err) => err.to_vec(), RPCCodedResponse::ServerError(err) => err.to_vec(), RPCCodedResponse::Unknown(err) => err.to_vec(), }; // encoded bytes should be within `max_packet_size` if bytes.len() > self.max_packet_size { return Err(RPCError::InternalError( "attempting to encode data > max_packet_size".into(), )); } // Inserts the length prefix of the uncompressed bytes into dst // encoded as a unsigned varint self.inner .encode(bytes.len(), dst) .map_err(RPCError::from)?; let mut writer = FrameEncoder::new(Vec::new()); writer.write_all(&bytes).map_err(RPCError::from)?; writer.flush().map_err(RPCError::from)?; // Write compressed bytes to `dst` dst.extend_from_slice(writer.get_ref()); Ok(()) } } // Decoder for inbound streams: Decodes RPC requests from peers impl Decoder for SnappyInboundCodec { type Item = RPCRequest; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { if self.len.is_none() { // Decode the length of the uncompressed bytes from an unsigned varint match self.inner.decode(src).map_err(RPCError::from)? { Some(length) => { self.len = Some(length); } None => return Ok(None), // need more bytes to decode length } }; let length = self.len.expect("length should be Some"); // Should not attempt to decode rpc chunks with length > max_packet_size if length > self.max_packet_size { return Err(RPCError::InvalidData); } let mut reader = FrameDecoder::new(Cursor::new(&src)); let mut decoded_buffer = vec![0; length]; match reader.read_exact(&mut decoded_buffer) { Ok(()) => { // `n` is how many bytes the reader read in the compressed stream let n = reader.get_ref().position(); self.len = None; let _read_bytes = src.split_to(n as usize); match self.protocol.message_name { Protocol::Status => match self.protocol.version { Version::V1 => { if decoded_buffer.len() > 0 { Ok(Some(RPCRequest::Status(decoded_buffer))) } else { Err(RPCError::InvalidData) } } }, Protocol::Goodbye => match self.protocol.version { Version::V1 => { if decoded_buffer.len() > 0 { Ok(Some(RPCRequest::Goodbye(decoded_buffer))) } else { Err(RPCError::InvalidData) } } }, Protocol::Ping => match self.protocol.version { Version::V1 => { if decoded_buffer.len() > 0 { Ok(Some(RPCRequest::Ping(decoded_buffer))) } else { Err(RPCError::InvalidData) } } }, Protocol::MetaData => match self.protocol.version { Version::V1 => { if decoded_buffer.len() > 0 { Err(RPCError::InvalidData) } else { Ok(Some(RPCRequest::MetaData)) } } }, } } Err(e) => match e.kind() { // Haven't received enough bytes to decode yet // TODO: check if this is the only Error variant where we return `Ok(None)` ErrorKind::UnexpectedEof => { return Ok(None); } _ => return Err(e).map_err(RPCError::from), }, } } } /* Outbound Codec: Codec for initiating RPC requests */ pub struct SnappyOutboundCodec { inner: Uvi<usize>, len: Option<usize>, protocol: ProtocolId, /// Maximum bytes that can be sent in one req/resp chunked responses. max_packet_size: usize, } impl SnappyOutboundCodec { pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self { let uvi_codec = Uvi::default(); // this encoding only applies to ssz_snappy. debug_assert_eq!(protocol.encoding, Encoding::Snappy); SnappyOutboundCodec { inner: uvi_codec, protocol, max_packet_size, len: None, } } } // Encoder for outbound streams: Encodes RPC Requests to peers impl Encoder<RPCRequest> for SnappyOutboundCodec { type Error = RPCError; fn encode(&mut self, item: RPCRequest, dst: &mut BytesMut) -> Result<(), Self::Error> { let bytes = match item { RPCRequest::Status(req) => req, RPCRequest::Goodbye(req) => req, RPCRequest::Ping(req) => req, RPCRequest::MetaData => return Ok(()), // no metadata to encode }; // encoded bytes should be within `max_packet_size` if bytes.len() > self.max_packet_size { return Err(RPCError::InternalError( "attempting to encode data > max_packet_size", )); } // Inserts the length prefix of the uncompressed bytes into dst // encoded as a unsigned varint self.inner .encode(bytes.len(), dst) .map_err(RPCError::from)?; let mut writer = FrameEncoder::new(Vec::new()); writer.write_all(&bytes).map_err(RPCError::from)?; writer.flush().map_err(RPCError::from)?; // Write compressed bytes to `dst` dst.extend_from_slice(writer.get_ref()); Ok(()) } } // Decoder for outbound streams: Decodes RPC responses from peers. // // The majority of the decoding has now been pushed upstream due to the changing specification. // We prefer to decode blocks and attestations with extra knowledge about the chain to perform // faster verification checks before decoding entire blocks/attestations. impl Decoder for SnappyOutboundCodec { type Item = RPCResponse; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { if self.len.is_none() { // Decode the length of the uncompressed bytes from an unsigned varint match self.inner.decode(src).map_err(RPCError::from)? { Some(length) => { self.len = Some(length as usize); } None => return Ok(None), // need more bytes to decode length } }; let length = self.len.expect("length should be Some"); // Should not attempt to decode rpc chunks with length > max_packet_size if length > self.max_packet_size { return Err(RPCError::InvalidData); } let mut reader = FrameDecoder::new(Cursor::new(&src)); let mut decoded_buffer = vec![0; length]; match reader.read_exact(&mut decoded_buffer) { Ok(()) => { // `n` is how many bytes the reader read in the compressed stream let n = reader.get_ref().position(); self.len = None; let _read_byts = src.split_to(n as usize); match self.protocol.message_name { Protocol::Status => match self.protocol.version { Version::V1 => { if decoded_buffer.len() > 0 { Ok(Some(RPCResponse::Status(decoded_buffer))) } else { Err(RPCError::InvalidData) } } }, Protocol::Goodbye => Err(RPCError::InvalidData), Protocol::Ping => match self.protocol.version { Version::V1 => { if decoded_buffer.len() > 0 { Ok(Some(RPCResponse::Pong(decoded_buffer))) } else { Err(RPCError::InvalidData) } } }, Protocol::MetaData => match self.protocol.version { Version::V1 => { if decoded_buffer.len() > 0 { Ok(Some(RPCResponse::MetaData(decoded_buffer))) } else { Err(RPCError::InvalidData) } } }, } } Err(e) => match e.kind() { // Haven't received enough bytes to decode yet // TODO: check if this is the only Error variant where we return `Ok(None)` ErrorKind::UnexpectedEof => { return Ok(None); } _ => return Err(e).map_err(RPCError::from), }, } } } impl OutboundCodec<RPCRequest> for SnappyOutboundCodec { type ErrorType = String; fn decode_error(&mut self, src: &mut BytesMut) -> Result<Option<Self::ErrorType>, RPCError> { if self.len.is_none() { // Decode the length of the uncompressed bytes from an unsigned varint match self.inner.decode(src).map_err(RPCError::from)? { Some(length) => { self.len = Some(length as usize); } None => return Ok(None), // need more bytes to decode length } }; let length = self.len.expect("length should be Some"); // Should not attempt to decode rpc chunks with length > max_packet_size if length > self.max_packet_size { return Err(RPCError::InvalidData); } let mut reader = FrameDecoder::new(Cursor::new(&src)); let mut decoded_buffer = vec![0; length]; match reader.read_exact(&mut decoded_buffer) { Ok(()) => { // `n` is how many bytes the reader read in the compressed stream let n = reader.get_ref().position(); self.len = None; let _read_bytes = src.split_to(n as usize); Ok(Some(String::from_utf8_lossy(&decoded_buffer).into())) } Err(e) => match e.kind() { // Haven't received enough bytes to decode yet // TODO: check if this is the only Error variant where we return `Ok(None)` ErrorKind::UnexpectedEof => { return Ok(None); } _ => return Err(e).map_err(RPCError::from), }, } } }
38.616071
97
0.512987
de430ce8fff1568289cb3e0b67929bcdc9af46c4
638
use std::io::Write; use serde_json::{Result, Value}; fn t1() -> Result<()> { let data = r#" [ { "a": "jim", "b": "susie" }, { "a": 3, "b": 4 } ] "#; let mut json1 = String::from(data); //println!("{:?}", json1); json1.retain(|c| !c.is_whitespace()); // println!("{:?}", json1); let stdout = std::io::stdout(); match stdout.lock().write_all(json1.as_bytes()) { Ok(_) => (), Err(err) => eprintln!("{}", err), }; let _v: Value = serde_json::from_str(&json1)?; //println!("{:?}", v); Ok(()) } fn main() { println!("Hello, Bill!"); let _v = t1(); }
16.358974
53
0.459248
fe70fe6cd511cc16c6b6af3ab7147c04d3610124
3,630
use super::*; use crate::element::*; use crate::error::{Error, Result}; use serde::{Deserialize, Serialize}; #[inline] fn is_false(v: &bool) -> bool { !v } /// Validator for boolean values. /// /// This validator type will only pass booleans. Validation only passes if the value also /// meets the `in`/`nin` requirements. /// /// # Defaults /// /// Fields that aren't specified for the validator use their defaults instead. The defaults for /// each field are: /// - comment: "" /// - in_list: empty /// - nin_list: empty /// - query: false /// #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] #[serde(deny_unknown_fields, default)] pub struct BoolValidator { /// An optional comment explaining the validator. #[serde(skip_serializing_if = "String::is_empty")] pub comment: String, /// A vector of specific allowed values, stored under the `in` field. If empty, this vector is not checked against. #[serde(rename = "in", skip_serializing_if = "Vec::is_empty")] pub in_list: Vec<bool>, /// A vector of specific unallowed values, stored under the `nin` field. #[serde(rename = "nin", skip_serializing_if = "Vec::is_empty")] pub nin_list: Vec<bool>, /// If true, queries against matching spots may have values in the `in` or `nin` lists. #[serde(skip_serializing_if = "is_false")] pub query: bool, } impl BoolValidator { /// Make a new validator with the default configuration. pub fn new() -> Self { Self::default() } /// Set a comment for the validator. pub fn comment(mut self, comment: impl Into<String>) -> Self { self.comment = comment.into(); self } /// Add a value to the `in` list. pub fn in_add(mut self, add: bool) -> Self { self.in_list.push(add); self } /// Add a value to the `nin` list. pub fn nin_add(mut self, add: bool) -> Self { self.nin_list.push(add); self } /// Set whether or not queries can use the `in` and `nin` lists. pub fn query(mut self, query: bool) -> Self { self.query = query; self } /// Build this into a [`Validator`] enum. pub fn build(self) -> Validator { Validator::Bool(self) } pub(crate) fn validate(&self, parser: &mut Parser) -> Result<()> { let elem = parser .next() .ok_or_else(|| Error::FailValidate("Expected a boolean".to_string()))??; let elem = if let Element::Bool(v) = elem { v } else { return Err(Error::FailValidate(format!( "Expected Bool, got {}", elem.name() ))); }; if !self.in_list.is_empty() && !self.in_list.iter().any(|v| *v == elem) { return Err(Error::FailValidate( "Boolean is not on `in` list".to_string(), )); } if self.nin_list.iter().any(|v| *v == elem) { return Err(Error::FailValidate("Boolean is on `nin` list".to_string())); } Ok(()) } fn query_check_bool(&self, other: &Self) -> bool { self.query || (other.in_list.is_empty() && other.nin_list.is_empty()) } pub(crate) fn query_check(&self, other: &Validator) -> bool { match other { Validator::Bool(other) => self.query_check_bool(other), Validator::Multi(list) => list.iter().all(|other| match other { Validator::Bool(other) => self.query_check_bool(other), _ => false, }), Validator::Any => true, _ => false, } } }
31.293103
119
0.575758
d7dec613e72ae8935a24e9ef47a8b12e7dc6be59
2,211
pub mod transient; pub mod stateful; use ethereum_types::{Address, U256}; use bytes::Bytes; use crate::{model::{evmc::{ Message, Output, TxContext, AccessStatus, StorageStatus }, code::Code}, executor::journal::Snapshot}; use self::stateful::Account; /// EVMC Host interface /// https://evmc.ethereum.org/structevmc__host__interface.html pub trait Host { fn account_exists(&self, address: Address) -> bool; fn get_storage(&self, address: Address, key: U256) -> U256; // slightly modified StorageStatus struct for the ease of gas cost/refund calculation fn set_storage(&mut self, address: Address, key: U256, value: U256) -> StorageStatus; fn get_balance(&self, address: Address) -> U256; fn get_code_size(&self, address: Address) -> U256; fn get_code_hash(&self, address: Address) -> U256; fn copy_code(&self, address: Address, code_offset: usize, memory_offset: usize, size: usize); fn self_destruct(&mut self, address: Address, beneficiary: Address); fn call(&mut self, msg: &Message) -> Output; fn get_tx_context(&self) -> TxContext; fn emit_log(&mut self, address: Address, data: &[u8], topics: &[U256]); fn access_account(&mut self, address: Address) -> AccessStatus; fn access_storage(&mut self, address: Address, key: U256) -> AccessStatus; // extensions fn add_account(&mut self, address: Address, account: Account); fn debug_get_storage(&self, address: Address, key: U256) -> U256; fn debug_set_storage(&mut self, address: Address, key: U256, new_value: U256); fn debug_set_storage_as_warm(&mut self); fn debug_deploy_contract(&mut self, address_hex: &str, code: Code, balance: U256); fn debug_deploy_contract2(&mut self, address: Address, code: Code, balance: U256); fn get_blockhash(&self, height: usize) -> U256; fn get_code(&self, address: Address, offset: usize, size: usize) -> Bytes; fn add_balance(&mut self, address: Address, amount: U256); fn subtract_balance(&mut self, address: Address, amount: U256); fn take_snapshot(&self) -> Snapshot; fn rollback(&mut self, snapshot: &Snapshot); fn force_update_storage(&mut self, address: Address, key: U256, value: U256); }
47.042553
97
0.701945
3383b40f5932b7bbd9c5647995931ec60355c877
144
fn main() { fn get_box<'a>(str: &'a str) -> Box<&'a str> { // string literals are `&'static str`s Box::new("hello") } }
20.571429
50
0.472222
2f53e0830ed69a95236fe77afd3e072f82663194
431
mod services; use crate::services::start_container; use actix_web::{middleware, App, HttpServer}; #[actix_web::main] async fn main() -> std::io::Result<()> { std::env::set_var("RUST_LOG", "actix_web=info"); env_logger::init(); HttpServer::new(|| { App::new() .wrap(middleware::Logger::default()) .service(start_container) }) .bind(("0.0.0.0", 17456))? .run() .await }
21.55
52
0.577726
2f3d36d3ae58359498b471b53bf839675251d0d2
6,573
use na::{DVector, RealField}; use std::ops::Range; use crate::joint::JointConstraint; use crate::math::{AngularVector, Point, Rotation, ANGULAR_DIM}; use crate::object::{BodyHandle, BodyPartHandle, BodySet}; use crate::solver::helper; use crate::solver::{ GenericNonlinearConstraint, IntegrationParameters, LinearConstraints, NonlinearConstraintGenerator, }; /// A constraint that removes all relative angular motion between two body parts. pub struct CartesianConstraint<N: RealField, Handle: BodyHandle> { b1: BodyPartHandle<Handle>, b2: BodyPartHandle<Handle>, anchor1: Point<N>, ref_frame1: Rotation<N>, anchor2: Point<N>, ref_frame2: Rotation<N>, ang_impulses: AngularVector<N>, break_torque_squared: N, broken: bool, bilateral_ground_rng: Range<usize>, bilateral_rng: Range<usize>, } impl<N: RealField, Handle: BodyHandle> CartesianConstraint<N, Handle> { /// Creates a cartesian constraint between two body parts. /// /// This will ensure the rotational parts of the frames given identified by `ref_frame1` and /// `ref_frame2` and attached to the corresponding bodies will coincide. pub fn new( b1: BodyPartHandle<Handle>, b2: BodyPartHandle<Handle>, anchor1: Point<N>, ref_frame1: Rotation<N>, anchor2: Point<N>, ref_frame2: Rotation<N>, ) -> Self { CartesianConstraint { b1, b2, anchor1, ref_frame1, anchor2, ref_frame2, break_torque_squared: N::max_value(), broken: false, ang_impulses: AngularVector::zeros(), bilateral_ground_rng: 0..0, bilateral_rng: 0..0, } } /// Changes the reference frame for the first body part. pub fn set_reference_frame_1(&mut self, ref_frame1: Rotation<N>) { self.ref_frame1 = ref_frame1 } /// Changes the reference frame for the second body part. pub fn set_reference_frame_2(&mut self, frame2: Rotation<N>) { self.ref_frame2 = frame2 } /// Changes the attach point for the first body part. pub fn set_anchor_1(&mut self, anchor1: Point<N>) { self.anchor1 = anchor1 } /// Changes the attach point for the second body part. pub fn set_anchor_2(&mut self, anchor2: Point<N>) { self.anchor2 = anchor2 } /// The maximum torque this joint can absorb before breaking. pub fn set_break_torque(&mut self, break_torque: N) { self.break_torque_squared = break_torque * break_torque; } } impl<N: RealField, Handle: BodyHandle> JointConstraint<N, Handle> for CartesianConstraint<N, Handle> { fn is_broken(&self) -> bool { self.broken } fn num_velocity_constraints(&self) -> usize { ANGULAR_DIM } fn anchors(&self) -> (BodyPartHandle<Handle>, BodyPartHandle<Handle>) { (self.b1, self.b2) } fn velocity_constraints( &mut self, _: &IntegrationParameters<N>, bodies: &dyn BodySet<N, Handle = Handle>, ext_vels: &DVector<N>, ground_j_id: &mut usize, j_id: &mut usize, jacobians: &mut [N], constraints: &mut LinearConstraints<N, usize>, ) { let body1 = try_ret!(bodies.get(self.b1.0)); let body2 = try_ret!(bodies.get(self.b2.0)); let part1 = try_ret!(body1.part(self.b1.1)); let part2 = try_ret!(body2.part(self.b2.1)); let pos1 = body1.position_at_material_point(part1, &self.anchor1) * self.ref_frame1; let pos2 = body2.position_at_material_point(part2, &self.anchor2) * self.ref_frame2; let anchor1 = Point::from(pos1.translation.vector); let anchor2 = Point::from(pos2.translation.vector); let assembly_id1 = body1.companion_id(); let assembly_id2 = body2.companion_id(); let first_bilateral_ground = constraints.bilateral_ground.len(); let first_bilateral = constraints.bilateral.len(); helper::cancel_relative_angular_velocity( body1, part1, self.b1, body2, part2, self.b2, assembly_id1, assembly_id2, &anchor1, &anchor2, ext_vels, &self.ang_impulses, 0, ground_j_id, j_id, jacobians, constraints, ); self.bilateral_ground_rng = first_bilateral_ground..constraints.bilateral_ground.len(); self.bilateral_rng = first_bilateral..constraints.bilateral.len(); } fn cache_impulses(&mut self, constraints: &LinearConstraints<N, usize>, inv_dt: N) { for c in &constraints.bilateral_ground[self.bilateral_ground_rng.clone()] { self.ang_impulses[c.impulse_id] = c.impulse; } for c in &constraints.bilateral[self.bilateral_rng.clone()] { self.ang_impulses[c.impulse_id] = c.impulse; } if self.ang_impulses.norm_squared() * inv_dt * inv_dt > self.break_torque_squared { self.broken = true; } } } impl<N: RealField, Handle: BodyHandle> NonlinearConstraintGenerator<N, Handle> for CartesianConstraint<N, Handle> { fn num_position_constraints(&self, bodies: &dyn BodySet<N, Handle = Handle>) -> usize { // FIXME: calling this at each iteration of the non-linear resolution is costly. if self.is_active(bodies) { 1 } else { 0 } } fn position_constraint( &self, parameters: &IntegrationParameters<N>, _: usize, bodies: &mut dyn BodySet<N, Handle = Handle>, jacobians: &mut [N], ) -> Option<GenericNonlinearConstraint<N, Handle>> { let body1 = bodies.get(self.b1.0)?; let body2 = bodies.get(self.b2.0)?; let part1 = body1.part(self.b1.1)?; let part2 = body2.part(self.b2.1)?; let pos1 = body1.position_at_material_point(part1, &self.anchor1) * self.ref_frame1; let pos2 = body2.position_at_material_point(part2, &self.anchor2) * self.ref_frame2; let anchor1 = Point::from(pos1.translation.vector); let anchor2 = Point::from(pos2.translation.vector); let rotation1 = pos1.rotation; let rotation2 = pos2.rotation; helper::cancel_relative_rotation( parameters, body1, part1, self.b1, body2, part2, self.b2, &anchor1, &anchor2, &rotation1, &rotation2, jacobians, ) } }
32.539604
96
0.621634
ab40799ae0688147a99788849cb621aeff3d17ec
78,085
use ast::*; use env::Env; use span::{Node, Span}; fn ident<T: From<Identifier>>(i: &str) -> T { Identifier { name: i.to_string(), } .into() } impl<T> From<T> for Node<T> { fn from(t: T) -> Node<T> { Node::new(t, Span::none()) } } impl<T> From<T> for Box<Node<T>> { fn from(t: T) -> Box<Node<T>> { Box::new(t.into()) } } impl<'a> From<&'a str> for Node<String> { fn from(t: &'a str) -> Node<String> { t.to_owned().into() } } macro_rules! mk_from_inner { ( $( $i:ident => $p:ident :: $v:ident ; )* ) => ( $( impl From<$i> for $p { fn from(i: $i) -> $p { $p::$v(i.into()) } } impl From<$i> for Node<$p> { fn from(i: $i) -> Node<$p> { $p::$v(i.into()).into() } } impl From<$i> for Box<Node<$p>> { fn from(i: $i) -> Box<Node<$p>> { $p::$v(i.into()).into() } } )* ); } mk_from_inner! { ArrayDeclarator => DerivedDeclarator::Array; AsmStatement => Statement::Asm; Attribute => Extension::Attribute; AvailabilityAttribute => Extension::AvailabilityAttribute; CallExpression => Expression::Call; CallExpression => Initializer::Expression; CallExpression => TypeOf::Expression; CastExpression => Expression::Cast; CompoundLiteral => Expression::CompoundLiteral; Constant => Expression::Constant; Constant => Initializer::Expression; Declaration => BlockItem::Declaration; Declaration => ExternalDeclaration::Declaration; Declarator => DeclaratorKind::Declarator; DoWhileStatement => Statement::DoWhile; EnumType => DeclarationSpecifier::TypeSpecifier; EnumType => TypeSpecifier::Enum; Expression => Initializer::Expression; FunctionDeclarator => DerivedDeclarator::Function; FunctionDefinition => ExternalDeclaration::FunctionDefinition; FunctionSpecifier => DeclarationSpecifier::Function; GnuExtendedAsmStatement => AsmStatement::GnuExtended; GnuExtendedAsmStatement => Statement::Asm; Identifier => DeclaratorKind::Identifier; Identifier => Expression::Identifier; IfStatement => Statement::If; Statement => BlockItem::Statement; Statement => Expression::Statement; StaticAssert => ExternalDeclaration::StaticAssert; StorageClassSpecifier => DeclarationSpecifier::StorageClass; StructField => StructDeclaration::Field; StructType => DeclarationSpecifier::TypeSpecifier; StructType => SpecifierQualifier::TypeSpecifier; StructType => TypeSpecifier::Struct; TS18661FloatType => DeclarationSpecifier::TypeSpecifier; TS18661FloatType => TypeSpecifier::TS18661Float; TypeQualifier => DeclarationSpecifier::TypeQualifier; TypeQualifier => PointerQualifier::TypeQualifier; TypeSpecifier => DeclarationSpecifier::TypeSpecifier; TypeSpecifier => SpecifierQualifier::TypeSpecifier; } mod expr { use ast::*; use span::Node; pub fn string<T: From<Expression>>(i: &str) -> T { Expression::StringLiteral(vec![i.to_string()].into()).into() } pub fn unop<T: From<Expression>>(op: UnaryOperator, e: Expression) -> T { Expression::UnaryOperator( UnaryOperatorExpression { operator: op.into(), operand: e.into(), } .into(), ) .into() } pub fn binop<T: From<Expression>>(op: BinaryOperator, a: Expression, b: Expression) -> T { Expression::BinaryOperator( BinaryOperatorExpression { operator: op.into(), lhs: a.into(), rhs: b.into(), } .into(), ) .into() } pub fn member<T: From<Expression>>( op: MemberOperator, e: Expression, i: Node<Identifier>, ) -> T { Expression::Member( MemberExpression { operator: op.into(), expression: Box::new(e.into()), identifier: i, } .into(), ) .into() } } mod int { use ast::*; pub fn num<T: From<Constant>>(base: IntegerBase, number: &str, suffix: IntegerSuffix) -> T { Constant::Integer(Integer { base: base, number: number.to_string().into_boxed_str(), suffix: suffix, }) .into() } pub const NONE: IntegerSuffix = IntegerSuffix { size: IntegerSize::Int, unsigned: false, imaginary: false, }; pub const UL: IntegerSuffix = IntegerSuffix { size: IntegerSize::Long, unsigned: true, imaginary: false, }; pub fn zero<T: From<Constant>>() -> T { num(IntegerBase::Decimal, "0", NONE.clone()) } pub fn dec<T: From<Constant>>(n: &str) -> T { num(IntegerBase::Decimal, n, NONE.clone()) } } mod float { use ast::*; pub fn num<T: From<Constant>>(base: FloatBase, number: &str, suffix: FloatSuffix) -> T { Constant::Float(Float { base: base, number: number.to_string().into_boxed_str(), suffix: suffix, }) .into() } pub const NONE: FloatSuffix = FloatSuffix { format: FloatFormat::Double, imaginary: false, }; pub fn dec<T: From<Constant>>(n: &str) -> T { num(FloatBase::Decimal, n, NONE.clone()) } } fn cchar(i: &str) -> Constant { Constant::Character(i.to_string()) } fn cstr<T: From<StringLiteral>>(i: &[&str]) -> T { i.into_iter() .map(|s| String::from(*s)) .collect::<Vec<String>>() .into() } #[test] fn test_integer() { use self::int::{num, NONE, UL}; use ast::IntegerBase::*; use parser::constant; let env = &mut Env::new(); assert_eq!(constant("0", env), Ok(num(Decimal, "0", NONE.clone()))); assert_eq!(constant("1", env), Ok(num(Decimal, "1", NONE.clone()))); assert_eq!( constant("1234567890", env), Ok(num(Decimal, "1234567890", NONE.clone())) ); assert_eq!( constant("01234567", env), Ok(num(Octal, "1234567", NONE.clone())) ); assert_eq!( constant("0x1234567890abdefABCDEF", env), Ok(num(Hexadecimal, "1234567890abdefABCDEF", NONE.clone())) ); assert_eq!( constant("0b0001001000110100", env), Ok(num(Binary, "0001001000110100", NONE.clone())) ); assert_eq!(constant("042lu", env), Ok(num(Octal, "42", UL.clone()))); assert_eq!(constant("042ul", env), Ok(num(Octal, "42", UL.clone()))); assert_eq!(constant("042uL", env), Ok(num(Octal, "42", UL.clone()))); assert!(constant("1a", env).is_err()); assert!(constant("08", env).is_err()); assert!(constant("0xX", env).is_err()); assert!(constant("1lul", env).is_err()); assert!(constant("2lL", env).is_err()); assert!(constant("0b2", env).is_err()); } #[test] fn test_floating() { use self::float::*; use ast::FloatBase::*; use parser::constant; let env = &mut Env::new(); const F: FloatSuffix = FloatSuffix { format: FloatFormat::Float, imaginary: false, }; const L: FloatSuffix = FloatSuffix { format: FloatFormat::LongDouble, imaginary: false, }; assert_eq!(constant("2.", env), Ok(num(Decimal, "2.", NONE.clone()))); assert_eq!( constant("2.e2", env), Ok(num(Decimal, "2.e2", NONE.clone())) ); assert_eq!(constant(".2", env), Ok(num(Decimal, ".2", NONE.clone()))); assert_eq!( constant(".2e2", env), Ok(num(Decimal, ".2e2", NONE.clone())) ); assert_eq!(constant("2.0", env), Ok(num(Decimal, "2.0", NONE.clone()))); assert_eq!(constant("2.0f", env), Ok(num(Decimal, "2.0", F.clone()))); assert_eq!( constant("24.01e100", env), Ok(num(Decimal, "24.01e100", NONE.clone())) ); assert_eq!( constant("24.01e+100", env), Ok(num(Decimal, "24.01e+100", NONE.clone())) ); assert_eq!( constant("24.01e-100", env), Ok(num(Decimal, "24.01e-100", NONE.clone())) ); assert_eq!( constant("24.01e100f", env), Ok(num(Decimal, "24.01e100", F.clone())) ); assert_eq!( constant("0x2Ap19L", env), Ok(num(Hexadecimal, "2Ap19", L.clone())) ); assert_eq!( constant("0x2A.p19L", env), Ok(num(Hexadecimal, "2A.p19", L.clone())) ); assert_eq!( constant("0x.DEp19L", env), Ok(num(Hexadecimal, ".DEp19", L.clone())) ); assert_eq!( constant("0x2A.DEp19L", env), Ok(num(Hexadecimal, "2A.DEp19", L.clone())) ); } #[test] fn ts18661_literal() { use self::float::*; use ast::FloatBase::*; use parser::constant; let env = &mut Env::new(); const F16: FloatSuffix = FloatSuffix { format: FloatFormat::TS18661Format(TS18661FloatType { format: TS18661FloatFormat::BinaryInterchange, width: 16, }), imaginary: false, }; const F64: FloatSuffix = FloatSuffix { format: FloatFormat::TS18661Format(TS18661FloatType { format: TS18661FloatFormat::BinaryInterchange, width: 64, }), imaginary: false, }; assert_eq!( constant("1.0f64", env), Ok(num(Decimal, "1.0", F64.clone())) ); assert_eq!( constant("0xAp1f16", env), Ok(num(Hexadecimal, "Ap1", F16.clone())) ); } #[test] fn test_character() { use parser::constant; let env = &mut Env::new(); assert_eq!(constant("'a'", env), Ok(cchar("'a'"))); assert_eq!(constant(r"'\n'", env), Ok(cchar(r"'\n'"))); assert_eq!(constant(r"'\\'", env), Ok(cchar(r"'\\'"))); assert_eq!(constant(r"'\''", env), Ok(cchar(r"'\''"))); assert_eq!(constant(r"'\1'", env), Ok(cchar(r"'\1'"))); assert_eq!(constant(r"'\02'", env), Ok(cchar(r"'\02'"))); assert_eq!(constant(r"'\027'", env), Ok(cchar(r"'\027'"))); assert_eq!(constant(r"'\xde'", env), Ok(cchar(r"'\xde'"))); } #[test] fn test_string() { use self::expr::*; use parser::expression; let env = &mut Env::new(); assert_eq!(expression(r#""foo""#, env), Ok(string(r#""foo""#))); assert_eq!(expression(r#""foo\n""#, env), Ok(string(r#""foo\n""#))); assert_eq!(expression(r#""\'\"""#, env), Ok(string(r#""\'\"""#))); assert_eq!(expression(r#""\xaf""#, env), Ok(string(r#""\xaf""#))); } #[test] fn test_postfix() { use self::expr::*; use ast::BinaryOperator::Index; use ast::MemberOperator::{Direct, Indirect}; use ast::UnaryOperator::PostIncrement; use parser::expression; let env = &mut Env::new(); assert_eq!( expression("a ++", env), Ok(unop(PostIncrement, ident("a"))) ); assert_eq!( expression("a.b->c[ d[ e ] ] ++", env), Ok(unop( PostIncrement, binop( Index, member(Indirect, member(Direct, ident("a"), ident("b")), ident("c")), binop(Index, ident("d"), ident("e")), ), )) ); } #[test] fn test_multiplicative() { use self::expr::*; use ast::BinaryOperator::{Divide, Multiply}; use ast::UnaryOperator::{PostDecrement, PreIncrement}; use parser::expression; let env = &mut Env::new(); assert_eq!( expression("a-- * ++b / c", env), Ok(binop( Divide, binop( Multiply, unop(PostDecrement, ident("a")), unop(PreIncrement, ident("b")), ), ident("c"), )) ); } #[test] fn test_logical_and() { use self::expr::*; use ast::BinaryOperator::LogicalAnd; use parser::expression; let env = &mut Env::new(); assert_eq!( expression("a && b", env), Ok(binop(LogicalAnd, ident("a"), ident("b"))) ); } #[test] fn test_chained_and() { use self::expr::*; use ast::BinaryOperator::LogicalAnd; use parser::expression; let env = &mut Env::new(); assert_eq!( expression("a && b && c", env), Ok(binop( LogicalAnd, binop(LogicalAnd, ident("a"), ident("b")), ident("c"), )) ); } #[test] fn test_chained_or() { use self::expr::*; use ast::BinaryOperator::LogicalOr; use parser::expression; let env = &mut Env::new(); assert_eq!( expression("a || b || c", env), Ok(binop( LogicalOr, binop(LogicalOr, ident("a"), ident("b")), ident("c"), )) ); } #[test] fn test_chained_shl() { use self::expr::*; use ast::BinaryOperator::ShiftLeft; use parser::expression; let env = &mut Env::new(); assert_eq!( expression("a << b << c", env), Ok(binop( ShiftLeft, binop(ShiftLeft, ident("a"), ident("b")), ident("c"), )) ); } #[test] fn test_chained_shr() { use self::expr::*; use ast::BinaryOperator::ShiftRight; use parser::expression; let env = &mut Env::new(); assert_eq!( expression("a >> b >> c", env), Ok(binop( ShiftRight, binop(ShiftRight, ident("a"), ident("b")), ident("c"), )) ); } #[test] fn test_comma() { use ast::Expression::Comma; use parser::expression; let env = &mut Env::new(); assert_eq!(expression("a", env), Ok(ident("a"))); assert_eq!( expression("a, a, a,a\n,a", env), Ok(Comma(Box::new(vec![ident("a"); 5].into())).into()) ); } #[test] fn test_cast() { use ast::TypeName; use ast::TypeSpecifier::Int; use env::Env; use parser::expression; let env = &mut Env::new(); assert_eq!( expression("(int) 1", env), Ok(CastExpression { type_name: TypeName { specifiers: vec![Int.into()], declarator: None, } .into(), expression: int::dec("1"), } .into()) ); assert!(expression("(foo) 1", env).is_err()); } #[test] fn test_declaration1() { use ast::ArraySize::{StaticExpression, VariableUnknown}; use ast::DerivedDeclarator::Pointer; use ast::StorageClassSpecifier::Typedef; use ast::TypeQualifier::Const; use ast::TypeSpecifier::Int; use parser::declaration; let env = &mut Env::new(); assert_eq!( declaration("int typedef * foo, baz[static 10][const *];", env), Ok(Declaration { specifiers: vec![Int.into(), Typedef.into()], declarators: vec![ InitDeclarator { declarator: Declarator { kind: ident("foo"), derived: vec![Pointer(vec![]).into()], extensions: vec![], } .into(), initializer: None, } .into(), InitDeclarator { declarator: Declarator { kind: ident("baz"), derived: vec![ ArrayDeclarator { qualifiers: vec![], size: StaticExpression(int::dec("10")), } .into(), ArrayDeclarator { qualifiers: vec![Const.into()], size: VariableUnknown, } .into(), ], extensions: vec![], } .into(), initializer: None, } .into(), ], } .into()) ); assert!(env.is_typename("foo")); assert!(env.is_typename("baz")); } #[test] fn test_declaration2() { use ast::DerivedDeclarator::Pointer; use ast::Enumerator; use ast::StorageClassSpecifier::Typedef; use ast::TypeQualifier::Const; use parser::declaration; let env = &mut Env::new(); assert_eq!( declaration("typedef enum { FOO, BAR = 1 } * const foobar;", env), Ok(Declaration { specifiers: vec![ Typedef.into(), EnumType { identifier: None, enumerators: vec![ Enumerator { identifier: ident("FOO"), expression: None, } .into(), Enumerator { identifier: ident("BAR"), expression: Some(int::dec("1")), } .into(), ], } .into(), ], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("foobar"), derived: vec![Pointer(vec![Const.into()]).into()], extensions: vec![], } .into(), initializer: None, } .into()], } .into()) ); assert!(env.is_typename("foobar")); } #[test] fn test_declaration3() { use ast::TypeSpecifier::{Float, Int}; use parser::declaration; let env = &mut Env::new(); assert_eq!( declaration("struct { int a, b; float c; } S;", env).unwrap(), Declaration { specifiers: vec![StructType { kind: StructKind::Struct.into(), identifier: None, declarations: Some(vec![ StructField { specifiers: vec![Int.into()], declarators: vec![ StructDeclarator { declarator: Some( Declarator { kind: ident("a"), derived: vec![], extensions: vec![], } .into(), ), bit_width: None, } .into(), StructDeclarator { declarator: Some( Declarator { kind: ident("b"), derived: vec![], extensions: vec![], } .into(), ), bit_width: None, } .into(), ], } .into(), StructField { specifiers: vec![Float.into()], declarators: vec![StructDeclarator { declarator: Some( Declarator { kind: ident("c"), derived: vec![], extensions: vec![], } .into(), ), bit_width: None, } .into()], } .into(), ]), } .into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("S"), derived: vec![], extensions: vec![], } .into(), initializer: None, } .into()], } .into() ); } #[test] fn test_declaration4() { use ast::TypeQualifier::Restrict; use ast::TypeSpecifier::Int; use parser::declaration; assert_eq!( declaration("int __restrict__;", &mut Env::with_core()), Ok(Declaration { specifiers: vec![Int.into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("__restrict__"), derived: vec![], extensions: vec![], } .into(), initializer: None, } .into()], } .into()) ); assert_eq!( declaration("int __restrict__;", &mut Env::with_gnu()), Ok(Declaration { specifiers: vec![Int.into(), Restrict.into()], declarators: vec![], } .into()) ); } #[test] fn test_declaration5() { use self::int::dec; use ast::ArraySize::VariableExpression; use ast::DeclaratorKind::Abstract; use ast::DerivedDeclarator::Pointer; use ast::TypeQualifier::Const; use ast::TypeSpecifier::{Char, Int, TypedefName}; use parser::declaration; let env = &mut Env::new(); env.add_typename("FILE"); env.add_typename("size_t"); assert_eq!( declaration( "char *fparseln(FILE *, size_t *, size_t *, const char[3], int);", env ), Ok(Declaration { specifiers: vec![Char.into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("fparseln"), derived: vec![ Pointer(vec![]).into(), FunctionDeclarator { parameters: vec![ ParameterDeclaration { specifiers: vec![TypedefName(ident("FILE")).into()], declarator: Some( Declarator { kind: Abstract.into(), derived: vec![Pointer(vec![]).into()], extensions: vec![], } .into(), ), extensions: vec![], } .into(), ParameterDeclaration { specifiers: vec![TypedefName(ident("size_t")).into()], declarator: Some( Declarator { kind: Abstract.into(), derived: vec![Pointer(vec![]).into()], extensions: vec![], } .into(), ), extensions: vec![], } .into(), ParameterDeclaration { specifiers: vec![TypedefName(ident("size_t")).into()], declarator: Some( Declarator { kind: Abstract.into(), derived: vec![Pointer(vec![]).into()], extensions: vec![], } .into(), ), extensions: vec![], } .into(), ParameterDeclaration { specifiers: vec![Const.into(), Char.into()], declarator: Some( Declarator { kind: Abstract.into(), derived: vec![ArrayDeclarator { qualifiers: vec![], size: VariableExpression(dec("3")), } .into()], extensions: vec![], } .into(), ), extensions: vec![], } .into(), ParameterDeclaration { specifiers: vec![Int.into()], declarator: None, extensions: vec![], } .into(), ], ellipsis: Ellipsis::None, } .into(), ], extensions: vec![], } .into(), initializer: None, } .into()], } .into()) ); } #[test] fn test_attribute() { use ast::DerivedDeclarator::Pointer; use ast::Extension::AsmLabel; use ast::StorageClassSpecifier::Extern; use ast::TypeSpecifier::{Char, Int, TypedefName}; use parser::declaration; let env = &mut Env::new(); env.add_typename("size_t"); assert_eq!( declaration( concat!( "extern int strerror_r (int __errnum, char *__buf, size_t __buflen)\n", "__asm__ (\"\" \"__xpg_strerror_r\") __attribute__ ((__nothrow__ , __leaf__))\n", "__attribute__ ((__nonnull__ (2)));", ), env, ), Ok(Declaration { specifiers: vec![Extern.into(), Int.into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("strerror_r"), derived: vec![FunctionDeclarator { parameters: vec![ ParameterDeclaration { specifiers: vec![Int.into()], declarator: Some( Declarator { kind: ident("__errnum"), derived: vec![], extensions: vec![], } .into(), ), extensions: vec![], } .into(), ParameterDeclaration { specifiers: vec![Char.into()], declarator: Some( Declarator { kind: ident("__buf"), derived: vec![Pointer(vec![]).into()], extensions: vec![], } .into(), ), extensions: vec![], } .into(), ParameterDeclaration { specifiers: vec![TypedefName(ident("size_t")).into()], declarator: Some( Declarator { kind: ident("__buflen"), derived: vec![], extensions: vec![], } .into(), ), extensions: vec![], } .into(), ], ellipsis: Ellipsis::None, } .into()], extensions: vec![ AsmLabel(cstr(&[r#""""#, r#""__xpg_strerror_r""#])).into(), Attribute { name: "__nothrow__".into(), arguments: vec![], } .into(), Attribute { name: "__leaf__".into(), arguments: vec![], } .into(), Attribute { name: "__nonnull__".into(), arguments: vec![int::dec("2")], } .into(), ], } .into(), initializer: None, } .into()], } .into()) ); } #[test] fn test_attribute2() { use self::int::dec; use ast::DeclarationSpecifier::Extension; use ast::DeclaratorKind::Abstract; use ast::DerivedDeclarator::Pointer; use ast::TypeQualifier::Const; use ast::TypeSpecifier::{Char, Void}; use parser::declaration; assert_eq!( declaration( r#"__attribute__((noreturn)) void d0 (void), __attribute__((format(printf, 1, 2))) d1 (const char *, ...), d2 (void);"#, &mut Env::new() ), Ok(Declaration { specifiers: vec![ Extension(vec![Attribute { name: "noreturn".into(), arguments: vec![], } .into()]) .into(), Void.into(), ], declarators: vec![ InitDeclarator { declarator: Declarator { kind: ident("d0"), derived: vec![FunctionDeclarator { parameters: vec![ParameterDeclaration { specifiers: vec![Void.into()], declarator: None, extensions: vec![], } .into()], ellipsis: Ellipsis::None, } .into()], extensions: vec![], } .into(), initializer: None, } .into(), InitDeclarator { declarator: Declarator { kind: ident("d1"), derived: vec![FunctionDeclarator { parameters: vec![ParameterDeclaration { specifiers: vec![Const.into(), Char.into()], declarator: Some( Declarator { kind: Abstract.into(), derived: vec![Pointer(vec![]).into()], extensions: vec![], } .into(), ), extensions: vec![], } .into()], ellipsis: Ellipsis::Some, } .into()], extensions: vec![Attribute { name: "format".into(), arguments: vec![ident("printf"), dec("1"), dec("2")], } .into()], } .into(), initializer: None, } .into(), InitDeclarator { declarator: Declarator { kind: ident("d2"), derived: vec![FunctionDeclarator { parameters: vec![ParameterDeclaration { specifiers: vec![Void.into()], declarator: None, extensions: vec![], } .into()], ellipsis: Ellipsis::None, } .into()], extensions: vec![], } .into(), initializer: None, } .into(), ], } .into()) ); } #[test] fn test_attribute3() { use ast::DeclarationSpecifier::Extension; use ast::DerivedDeclarator::Pointer; use ast::FunctionSpecifier::Inline; use ast::Statement::Compound; use ast::StorageClassSpecifier::Extern; use ast::TypeQualifier::{Const, Restrict}; use ast::TypeSpecifier::Char; use parser::translation_unit; assert_eq!( translation_unit( concat!( "extern __inline __attribute__ ((__always_inline__)) __attribute__ \n", "((__artificial__)) __attribute__ ((__warn_unused_result__)) char *\n", "__attribute__ ((__nothrow__ , __leaf__)) realpath (const char *__restrict\n", "__name, char *__restrict __resolved) {}" ), &mut Env::new() ), Ok(TranslationUnit(vec![ ExternalDeclaration::FunctionDefinition( FunctionDefinition { specifiers: vec![ Extern.into(), Inline.into(), Extension(vec![Attribute { name: "__always_inline__".into(), arguments: vec![], } .into()]) .into(), Extension(vec![Attribute { name: "__artificial__".into(), arguments: vec![], } .into()]) .into(), Extension(vec![Attribute { name: "__warn_unused_result__".into(), arguments: vec![], } .into()]) .into(), Char.into(), ], declarator: Declarator { kind: ident("realpath"), derived: vec![ Pointer(vec![PointerQualifier::Extension(vec![ Attribute { name: "__nothrow__".into(), arguments: vec![], } .into(), Attribute { name: "__leaf__".into(), arguments: vec![], } .into(), ]) .into()]) .into(), FunctionDeclarator { parameters: vec![ ParameterDeclaration { specifiers: vec![Const.into(), Char.into()], declarator: Some( Declarator { kind: ident("__name"), derived: vec![ Pointer(vec![Restrict.into()]).into() ], extensions: vec![], } .into(), ), extensions: vec![], } .into(), ParameterDeclaration { specifiers: vec![Char.into()], declarator: Some( Declarator { kind: ident("__resolved"), derived: vec![ Pointer(vec![Restrict.into()]).into() ], extensions: vec![], } .into(), ), extensions: vec![], } .into(), ], ellipsis: Ellipsis::None, } .into(), ], extensions: vec![], } .into(), declarations: vec![], statement: Compound(vec![]).into(), } .into(), ) .into() ])) .into() ); } #[test] fn test_alignof() { use ast::Expression::AlignOf; use ast::TypeSpecifier::Long; use parser::expression; assert_eq!( expression("_Alignof(long long)", &mut Env::new()), Ok(AlignOf( TypeName { specifiers: vec![Long.into(), Long.into()], declarator: None, } .into(), ) .into()) ); assert_eq!( expression("__alignof(long long)", &mut Env::new()), Ok(AlignOf( TypeName { specifiers: vec![Long.into(), Long.into()], declarator: None, } .into(), ) .into()) ); assert_eq!( expression("__alignof__(long long)", &mut Env::new()), Ok(AlignOf( TypeName { specifiers: vec![Long.into(), Long.into()], declarator: None, } .into(), ) .into()) ); } #[test] fn test_stmt_expr() { use ast::Statement::{Compound, Expression}; use ast::TypeSpecifier::Int; use parser::expression; assert_eq!( expression("({ int p = 0; p; })", &mut Env::new()), Ok(Compound(vec![ Declaration { specifiers: vec![Int.into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("p"), derived: vec![], extensions: vec![], } .into(), initializer: Some(int::zero()), } .into()], } .into(), Expression(Some(ident("p"))).into(), ]) .into()) ); } #[test] fn test_expr_cast() { use ast::TypeName; use ast::TypeSpecifier::TypedefName; use parser::expression; let env = &mut Env::new(); env.add_typename("U64"); assert_eq!( expression("(U64)foo", env), Ok(CastExpression { type_name: TypeName { specifiers: vec![TypedefName(ident("U64")).into()], declarator: None, } .into(), expression: ident("foo"), } .into()) ); } #[test] fn test_directives() { use parser::translation_unit; assert_eq!( translation_unit( r#"# 1 "<stdin>" # 1 "<built-in>" # 1 "<command-line>" # 31 "<command-line>" # 1 "/usr/include/stdc-predef.h" 1 3 4 # 32 "<command-line>" 2 # 1 "<stdin>" "#, &mut Env::new() ), Ok(TranslationUnit(vec![])) ); } #[test] fn test_gnu_asm() { use parser::statement; assert_eq!( statement( r#"__asm ("pmovmskb %1, %0" : "=r" (__m) : "x" (__x));"#, &mut Env::new() ), Ok(GnuExtendedAsmStatement { qualifier: None, template: cstr(&[r#""pmovmskb %1, %0""#]), outputs: vec![GnuAsmOperand { symbolic_name: None, constraints: cstr(&[r#""=r""#]), variable_name: ident("__m"), } .into()], inputs: vec![GnuAsmOperand { symbolic_name: None, constraints: cstr(&[r#""x""#]), variable_name: ident("__x"), } .into()], clobbers: vec![], } .into()) ); } #[test] fn test_union() { use self::int::dec; use ast::ArraySize::VariableExpression; use ast::Designator::Member; use ast::Initializer::{Expression, List}; use ast::TypeSpecifier::{Double, Int, Long}; use parser::declaration; assert_eq!( declaration( "union { long double __l; int __i[3]; } __u = { __l: __x };", &mut Env::new() ), Ok(Declaration { specifiers: vec![StructType { kind: StructKind::Union.into(), identifier: None, declarations: Some(vec![ StructField { specifiers: vec![Long.into(), Double.into()], declarators: vec![StructDeclarator { declarator: Some( Declarator { kind: ident("__l"), derived: vec![], extensions: vec![], } .into(), ), bit_width: None, } .into()], } .into(), StructField { specifiers: vec![Int.into()], declarators: vec![StructDeclarator { declarator: Some( Declarator { kind: ident("__i"), derived: vec![ArrayDeclarator { qualifiers: vec![], size: VariableExpression(dec("3")), } .into()], extensions: vec![], } .into(), ), bit_width: None, } .into()], } .into(), ]), } .into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("__u"), derived: vec![], extensions: vec![], } .into(), initializer: Some( List(vec![InitializerListItem { designation: vec![Member(ident("__l")).into()], initializer: Expression(ident("__x")).into(), } .into()]) .into(), ), } .into()], } .into()) ); } #[test] fn test_offsetof() { use self::int::dec; use ast::ArraySize::VariableExpression; use ast::Expression::OffsetOf; use ast::OffsetMember::IndirectMember; use ast::TypeSpecifier::Int; use parser::expression; assert_eq!( expression( "__builtin_offsetof(struct { struct { int b; } a[2]; }, a->b)", &mut Env::new() ), Ok(OffsetOf( OffsetOfExpression { type_name: TypeName { specifiers: vec![StructType { kind: StructKind::Struct.into(), identifier: None, declarations: Some(vec![StructField { specifiers: vec![StructType { kind: StructKind::Struct.into(), identifier: None, declarations: Some(vec![StructField { specifiers: vec![Int.into()], declarators: vec![StructDeclarator { declarator: Some( Declarator { kind: ident("b"), derived: vec![], extensions: vec![], } .into(), ), bit_width: None, } .into()], } .into()]), } .into()], declarators: vec![StructDeclarator { declarator: Some( Declarator { kind: ident("a"), derived: vec![ArrayDeclarator { qualifiers: vec![], size: VariableExpression(dec("2")), } .into()], extensions: vec![], } .into(), ), bit_width: None, } .into()], } .into()]), } .into()], declarator: None, } .into(), designator: OffsetDesignator { base: ident("a"), members: vec![IndirectMember(ident("b")).into()], } .into(), } .into() ) .into()) ); } #[test] fn test_call() { use parser::expression; assert_eq!( expression("foo(bar, baz)", &mut Env::new()), Ok(CallExpression { callee: ident("foo"), arguments: vec![ident("bar"), ident("baz")], } .into()) ); } #[test] fn test_typeof() { use ast::TypeSpecifier::TypeOf; use parser::declaration; assert_eq!( declaration( "__typeof__(foo(bar, baz)) ook = foo(bar, baz);", &mut Env::new() ), Ok(Declaration { specifiers: vec![TypeOf( CallExpression { callee: ident("foo"), arguments: vec![ident("bar"), ident("baz")], } .into(), ) .into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("ook"), derived: vec![], extensions: vec![], } .into(), initializer: Some( CallExpression { callee: ident("foo"), arguments: vec![ident("bar"), ident("baz")], } .into(), ), } .into()], } .into()) ); } #[test] fn test_if() { use ast::Statement::Compound; use parser::statement; assert_eq!( statement("if (x) do {} while(y); else z();", &mut Env::new()), Ok(IfStatement { condition: ident("x"), then_statement: DoWhileStatement { statement: Compound(vec![]).into(), expression: ident("y"), } .into(), else_statement: Some( Statement::Expression(Some( CallExpression { callee: ident("z"), arguments: vec![], } .into() )) .into() ), } .into()) ); } // Check that a typedef that can be mistaken for a K&R-style argument declaration is correctly // parsed as an external declaration. What went wrong: until we encounter bar, the thing looks like // a function definition, where the name is followed by a two declarations K&R-style, similar to: // // ``` // int foo(i) // int i; // <-- __attribute__ and typedef occupy this slot, since both are valid declarations. // { } // ```: #[test] fn test_attribute4() { use ast::Statement::Compound; use ast::StorageClassSpecifier::Typedef; use ast::TypeSpecifier::Int; use parser::translation_unit; let env = &mut Env::new(); assert_eq!( translation_unit( r#" int foo (int) __attribute__ ((__nothrow__)); typedef int named; int bar (int f) { } "#, env ), Ok(TranslationUnit(vec![ Declaration { specifiers: vec![Int.into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("foo"), derived: vec![FunctionDeclarator { parameters: vec![ParameterDeclaration { specifiers: vec![Int.into()], declarator: None, extensions: vec![], } .into()], ellipsis: Ellipsis::None, } .into()], extensions: vec![Attribute { name: "__nothrow__".into(), arguments: vec![], } .into()], } .into(), initializer: None, } .into()], } .into(), Declaration { specifiers: vec![Typedef.into(), Int.into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("named"), derived: vec![], extensions: vec![], } .into(), initializer: None, } .into()], } .into(), FunctionDefinition { specifiers: vec![Int.into()], declarator: Declarator { kind: ident("bar"), derived: vec![FunctionDeclarator { parameters: vec![ParameterDeclaration { specifiers: vec![Int.into()], declarator: Some( Declarator { kind: ident("f"), derived: vec![], extensions: vec![], } .into(), ), extensions: vec![], } .into()], ellipsis: Ellipsis::None, } .into()], extensions: vec![], } .into(), declarations: vec![], statement: Compound(vec![]).into(), } .into(), ])) ); } #[test] fn test_attribute5() { use ast::Statement::Compound; use ast::TypeSpecifier::Int; use parser::translation_unit; assert_eq!( translation_unit( "int foo(int a __attribute__((unused)), int b __attribute__((unused))) {}", &mut Env::new(), ), Ok(TranslationUnit(vec![FunctionDefinition { specifiers: vec![Int.into()], declarator: Declarator { kind: ident("foo"), derived: vec![FunctionDeclarator { parameters: vec![ ParameterDeclaration { specifiers: vec![Int.into()], declarator: Some( Declarator { kind: ident("a"), derived: vec![], extensions: vec![], } .into(), ), extensions: vec![Attribute { name: "unused".into(), arguments: vec![], } .into()], } .into(), ParameterDeclaration { specifiers: vec![Int.into()], declarator: Some( Declarator { kind: ident("b"), derived: vec![], extensions: vec![], } .into(), ), extensions: vec![Attribute { name: "unused".into(), arguments: vec![], } .into()], } .into(), ], ellipsis: Ellipsis::None, } .into()], extensions: vec![], } .into(), declarations: vec![], statement: Compound(vec![]).into(), } .into()])) ); } #[test] fn test_declaration6() { use ast::Expression::AlignOf; use ast::StorageClassSpecifier::Typedef; use ast::TypeSpecifier::{Double, Long}; use parser::declaration; assert_eq!( declaration( r"typedef struct { long long __max_align_ll __attribute__((__aligned__(__alignof__(long long)))); long double __max_align_ld __attribute__((__aligned__(__alignof__(long double)))); } max_align_t;", &mut Env::new() ), Ok(Declaration { specifiers: vec![ Typedef.into(), StructType { kind: StructKind::Struct.into(), identifier: None, declarations: Some(vec![ StructField { specifiers: vec![Long.into(), Long.into()], declarators: vec![StructDeclarator { declarator: Some( Declarator { kind: ident("__max_align_ll"), derived: vec![], extensions: vec![Attribute { name: "__aligned__".into(), arguments: vec![AlignOf( TypeName { specifiers: vec![Long.into(), Long.into()], declarator: None, } .into(), ) .into()], } .into()], } .into(), ), bit_width: None, } .into()], } .into(), StructField { specifiers: vec![Long.into(), Double.into()], declarators: vec![StructDeclarator { declarator: Some( Declarator { kind: ident("__max_align_ld"), derived: vec![], extensions: vec![Attribute { name: "__aligned__".into(), arguments: vec![AlignOf( TypeName { specifiers: vec![Long.into(), Double.into()], declarator: None, } .into(), ) .into()], } .into()], } .into(), ), bit_width: None, } .into()], } .into(), ]), } .into(), ], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("max_align_t"), derived: vec![], extensions: vec![], } .into(), initializer: None, } .into()], } .into()) ); } fn make_declaration(name: &str, specifiers: &[Node<DeclarationSpecifier>]) -> Declaration { Declaration { specifiers: specifiers.to_vec(), declarators: vec![InitDeclarator { declarator: Declarator { kind: ident(name), derived: vec![], extensions: vec![], } .into(), initializer: None, } .into()], } } #[test] fn test_ambiguous_declaration1() { use ast::DerivedDeclarator::KRFunction; use ast::StorageClassSpecifier::Typedef; use ast::TypeSpecifier::Int; use ast::{FunctionDefinition, TranslationUnit}; use parser::translation_unit; let env = &mut Env::new(); assert_eq!( translation_unit( r" typedef int a; int foo() { int a; }", env ), Ok(TranslationUnit(vec![ make_declaration("a", &[Typedef.into(), Int.into()]).into(), FunctionDefinition { specifiers: vec![Int.into()], declarator: Declarator { kind: ident("foo"), derived: vec![KRFunction(vec![]).into()], extensions: vec![] } .into(), declarations: vec![], statement: Statement::Compound(vec![make_declaration("a", &[Int.into()]).into()]) .into() } .into() ])) ); } #[test] fn test_ambiguous_declaration2() { use parser::translation_unit; let env = &mut Env::new(); assert!(translation_unit( r" typedef int a; void foo() { unsigned int; const a; a x; unsigned a; a = 1; }", env ) .is_ok()); } #[test] fn test_ambiguous_parameter_field_declaration() { use parser::translation_unit; let env = &mut Env::new(); // If parameter list treated "a" as a type specifier instead of identifier, this would succeed. assert!(translation_unit( r" typedef int a; int foo(int a* b) {}", env ) .is_err()); } #[test] fn test_ambiguous_struct_field_declaration() { use parser::translation_unit; let env = &mut Env::new(); // If struct field treated "a" as a type specifier instead of identifier, this would succeed. assert!(translation_unit( r" typedef int a; struct a { a a, b; };", env ) .is_ok()); } #[test] fn test_struct_name_scope() { use parser::translation_unit; let env = &mut Env::new(); // Struct fields maintain a separate assert!(translation_unit( r" typedef int a; struct a { a a; a b; };", env ) .is_ok()); } #[test] fn test_typedef_redefinition() { use parser::translation_unit; let env = &mut Env::new(); assert!(translation_unit( r" typedef int a; void foo() { a a; _Atomic (a) b; }", env ) .is_err()); assert!(translation_unit( r" typedef int a; void foo(int a, _Atomic (a) b) {}", env ) .is_err()); } #[test] fn test_defines_symbol_before_initializer() { // This test is currently broken, and should be enabled once symbols are defined at the // end of a declarator (not declaration). use parser::translation_unit; let env = &mut Env::new(); // Technically, "a" is defined as a symbol before the "= .." part of the initializer is parsed. assert!(translation_unit( r" typedef int a; int foo() { int a = sizeof(_Atomic(a)); }", env ) .is_err()); } #[test] fn test_enum_modifies_scope() { // Enable once enum correctly modifies scope. use parser::translation_unit; let env = &mut Env::new(); // enum {a} defines a new variable "a" into the current scope. So the next _Atomic(a) must fail. assert!(translation_unit( r" typedef int a; int foo() { int x = (enum {a})1; _Atomic(a) b; }", env ) .is_err()); // Similarly, "a" is defined immediately after its declaration. assert!(translation_unit( r" typedef int a; int foo() { int x = (enum {a, b = (a)1})1; }", env ) .is_err()); } #[test] fn test_restores_scope_after_function_decl() { use parser::translation_unit; let env = &mut Env::new(); assert!(translation_unit( r" typedef int a; int foo(a a) {} int bar(int a); _Atomic (a) b; ", env ) .is_ok()); } #[test] fn test_restores_scope_after_block() { use parser::translation_unit; let env = &mut Env::new(); assert!(translation_unit( r" void foo() { typedef int a; { a a; } _Atomic (a) b; }", env ) .is_ok()); } #[test] fn test_restores_scope_after_loops() { use parser::translation_unit; let env = &mut Env::new(); assert!(translation_unit( r" typedef int a; void foo() { for (a a;;) a = a; while (true) {int a;} do { int a; } while(true); _Atomic (a) b; }", env ) .is_ok()); } #[test] fn test_restores_scope_after_selections() { // Enable once enum constants modify scope. use parser::translation_unit; let env = &mut Env::new(); // Test that scope of "if" condition and statement is cleaned up. assert!(translation_unit( r" typedef int a, b; int x; void foo() { if (sizeof(enum {a})) x = sizeof(enum{b}); else x = b; switch (sizeof(enum {b})) x = b; a x, y; b z, w; }", env ) .is_ok()); // Test that "if" condition enum constants are defined within its scope. assert!(translation_unit( r" typedef int a; void foo() { int x; if (sizeof(enum {a})) x = (_Atomic(a))1; }", env ) .is_err()); } #[test] fn test_keyword_expr() { use parser::expression; assert_eq!( expression("__func__", &mut Env::new()), Ok(ident("__func__")) ); assert_eq!( expression("__FUNCTION__", &mut Env::new()), Ok(ident("__FUNCTION__")) ); assert_eq!( expression("__PRETTY_FUNCTION__", &mut Env::new()), Ok(ident("__PRETTY_FUNCTION__")) ); } #[test] fn test_ts18661_float() { use parser::declaration; assert_eq!( declaration("_Float64 foo = 1.5;", &mut Env::new()), Ok(Declaration { specifiers: vec![TS18661FloatType { format: TS18661FloatFormat::BinaryInterchange, width: 64, } .into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("foo"), derived: vec![], extensions: vec![], } .into(), initializer: Some(float::dec("1.5")), } .into()], } .into()) ); } #[test] fn test_gnu_extension() { use ast::TypeSpecifier::Long; use parser::translation_unit; assert_eq!( translation_unit("__extension__ union { long l; };", &mut Env::with_gnu()), Ok(TranslationUnit(vec![Declaration { specifiers: vec![StructType { kind: StructKind::Union.into(), identifier: None, declarations: Some(vec![StructField { specifiers: vec![Long.into()], declarators: vec![StructDeclarator { declarator: Some( Declarator { kind: ident("l"), derived: vec![], extensions: vec![], } .into(), ), bit_width: None, } .into()], } .into()]), } .into()], declarators: vec![], } .into()]) .into()) ); assert_eq!( translation_unit(r#"__extension__ _Static_assert(1,"ERR");"#, &mut Env::new()), Ok(TranslationUnit(vec![StaticAssert { expression: int::dec("1"), message: cstr(&[r#""ERR""#]), } .into()]) .into()) ); } #[test] fn test_declaration7() { use ast::DeclaratorKind::Abstract; use ast::DerivedDeclarator::Pointer; use ast::TypeQualifier::Nullable; use ast::TypeSpecifier::{Int, Void}; use parser::declaration; let env = &mut Env::with_clang(); assert_eq!( // This is the first Clang-specific declaration you'll encounter in macOS // if you #include <stdio.h>. declaration("int (* _Nullable _close)(void *);", env), Ok(Declaration { specifiers: vec![Int.into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: Declarator { kind: ident("_close"), derived: vec![Pointer(vec![Nullable.into()]).into()], extensions: vec![], } .into(), derived: vec![FunctionDeclarator { parameters: vec![ParameterDeclaration { specifiers: vec![Void.into()], declarator: Some( Declarator { kind: Abstract.into(), derived: vec![Pointer(vec![]).into()], extensions: vec![], } .into(), ), extensions: vec![], } .into()], ellipsis: Ellipsis::None, } .into()], extensions: vec![], } .into(), initializer: None, } .into()], } .into()) ); } #[test] fn test_kr_definition1() { use ast::DerivedDeclarator::{KRFunction, Pointer}; use ast::Statement::Compound; use ast::TranslationUnit; use ast::TypeSpecifier::{Char, Int}; use parser::translation_unit; let env = &mut Env::new(); assert_eq!( translation_unit("int main(argc, argv) int argc; char **argv; { }", env), Ok(TranslationUnit(vec![FunctionDefinition { specifiers: vec![Int.into()], declarator: Declarator { kind: ident("main"), derived: vec![KRFunction(vec![ident("argc"), ident("argv")]).into()], extensions: vec![], } .into(), declarations: vec![ Declaration { specifiers: vec![Int.into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("argc"), derived: vec![], extensions: vec![], } .into(), initializer: None } .into()], } .into(), Declaration { specifiers: vec![Char.into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("argv"), derived: vec![Pointer(vec![]).into(), Pointer(vec![]).into()], extensions: vec![], } .into(), initializer: None } .into()], } .into(), ], statement: Compound(vec![]).into(), } .into()])) ); } #[test] fn test_clang_availability_attr() { use ast::AvailabilityClause::*; use ast::TypeSpecifier::Int; use parser::declaration; let env = &mut Env::with_clang(); let src = r#"int f __attribute__((availability(p1,introduced=1.2.3))) __attribute__((availability(p2,unavailable,replacement="f2")));"#; assert_eq!( declaration(src, env), Ok(Declaration { specifiers: vec![Int.into(),], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("f"), derived: vec![], extensions: vec![ AvailabilityAttribute { platform: ident("p1"), clauses: vec![Introduced( AvailabilityVersion { major: "1".into(), minor: Some("2".into()), subminor: Some("3".into()), } .into() ) .into()], } .into(), AvailabilityAttribute { platform: ident("p2"), clauses: vec![ Unavailable.into(), Replacement(cstr(&["\"f2\""])).into(), ], } .into(), ], } .into(), initializer: None, } .into(),], } .into()) ); } #[test] fn test_struct_decl() { use ast::Declaration; use parser::declaration; let env = &mut Env::new(); assert_eq!( declaration("struct foo S;", env).unwrap(), Declaration { specifiers: vec![StructType { kind: StructKind::Struct.into(), identifier: Some(ident("foo")), declarations: None, } .into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("S"), derived: vec![], extensions: vec![], } .into(), initializer: None, } .into()], } .into() ); } #[test] fn test_struct_empty_decl() { use ast::Declaration; use parser::declaration; let env = &mut Env::with_core(); assert!(declaration("struct foo { } S;", env).is_err()); let env = &mut Env::with_gnu(); assert_eq!( declaration("struct foo { } S;", env).unwrap(), Declaration { specifiers: vec![StructType { kind: StructKind::Struct.into(), identifier: Some(ident("foo")), declarations: Some(Vec::new()), } .into()], declarators: vec![InitDeclarator { declarator: Declarator { kind: ident("S"), derived: vec![], extensions: vec![], } .into(), initializer: None, } .into()], } .into() ); } #[test] fn test_compound_literal() { use parser::expression; use ast::{CompoundLiteral, StructType}; use ast::Designator::Member; use self::int::dec; let env = &mut Env::with_gnu(); assert_eq!( expression("(struct test_struct) { 1, .x = 2, 3 }", env), Ok(CompoundLiteral { type_name: TypeName { specifiers: vec![StructType { kind: StructKind::Struct.into(), identifier: Some(ident("test_struct")), declarations: None, }.into()], declarator: None, } .into(), initializer_list: vec![ InitializerListItem { designation: vec![], initializer: dec("1"), } .into(), InitializerListItem { designation: vec![Member(ident("x")).into()], initializer: dec("2"), } .into(), InitializerListItem { designation: vec![], initializer: dec("3"), } .into(), ], } .into()) ); }
31.728972
140
0.382378
11fe42bde047da272c0ec86652d7e6c308e1a5d8
9,883
use crate::message::pty::{MainShutdown, PtyOptions, PtyRequest, PtyResponse, PtyShutdown}; use crate::prelude::*; use super::pty::PtyService; use lifeline::dyn_bus::DynBus; use std::{ collections::{hash_map::DefaultHasher, HashMap}, hash::{Hash, Hasher}, path::PathBuf, }; use tab_api::{ config::history_path, env::is_raw_mode, pty::{PtyWebsocketRequest, PtyWebsocketResponse}, }; use time::Duration; use tokio::time; /// Drives messages between the pty, and the websocket connection to the daemon /// Handles startup & shutdown events, including daemon termination commands. /// Spawns the ClientSessionService, which handles the active tab session. pub struct ClientService { _run: Lifeline, _carrier: MainPtyCarrier, } impl Service for ClientService { type Bus = MainBus; type Lifeline = anyhow::Result<Self>; fn spawn(bus: &Self::Bus) -> Self::Lifeline { let pty_bus = PtyBus::default(); let _carrier = pty_bus.carry_from(bus)?; let tx_shutdown = bus.tx::<MainShutdown>()?; let _run = { let rx = bus.rx::<PtyWebsocketRequest>()?; let tx = bus.tx::<PtyWebsocketResponse>()?; Self::try_task("run", Self::run(rx, tx, tx_shutdown, pty_bus)) }; Ok(Self { _run, _carrier }) } } impl ClientService { async fn run( mut rx: impl Receiver<PtyWebsocketRequest>, mut tx: impl Sender<PtyWebsocketResponse> + Clone + Send + 'static, mut tx_shutdown: impl Sender<MainShutdown>, pty_bus: PtyBus, ) -> anyhow::Result<()> { // TODO: handle ptyshutdown here. // it should cancel the session lifeline let mut _session = None; while let Some(msg) = rx.recv().await { match msg { PtyWebsocketRequest::Init(create) => { debug!("initializing on tab {}", create.id); let name = create.name.clone(); let mut env = HashMap::new(); env.insert("SHELL".to_string(), create.shell.clone()); env.insert("TAB".to_string(), create.name.clone()); env.insert("TAB_ID".to_string(), create.id.0.to_string()); let shell = resolve_shell(create.shell.as_str()); debug!("shell detection: {:?}", shell); match shell { Shell::Sh => { let home = history_path("sh", create.name.as_str())?; std::fs::create_dir_all(home.parent().unwrap())?; env.insert("HISTFILE".to_string(), home.to_string_lossy().to_string()); } Shell::Zsh => { // this doesn't work on OSX. /etc/zshrc overwrites it let home = history_path("zsh", create.name.as_str())?; std::fs::create_dir_all(home.parent().unwrap())?; env.insert("HISTFILE".to_string(), home.to_string_lossy().to_string()); } Shell::Bash => { let home = history_path("bash", create.name.as_str())?; std::fs::create_dir_all(home.parent().unwrap())?; env.insert("HISTFILE".to_string(), home.to_string_lossy().to_string()); } Shell::Fish => { let mut hasher = DefaultHasher::new(); name.hash(&mut hasher); let id = hasher.finish(); let history = format!("tab_{}", id); env.insert("fish_history".to_string(), history); } Shell::Unknown => {} } let mut args = vec![]; // todo: better resolution of shells if let Shell::Fish = shell { args.push("--interactive".to_string()); } if !is_raw_mode() { // if we are in test mode, try to make the terminal as predictable as possible info!("Raw mode is disabled. Launching in non-interactive debug mode."); env.insert("PS1".into(), "$ ".into()); if let Shell::Bash = shell { args.push("--noprofile".into()); args.push("--norc".into()); args.push("--noediting".into()); env.insert("BASH_SILENCE_DEPRECATION_WARNING".into(), "1".into()); } } let working_directory = PathBuf::from(create.dir.clone()); let options = PtyOptions { dimensions: create.dimensions, command: create.shell.clone(), args, working_directory: working_directory.clone(), env, }; pty_bus.store_resource::<PtyOptions>(options); let session = ClientSessionService::spawn(&pty_bus)?; _session = Some(session); debug!("tab initialized, name {}", name); tx.send(PtyWebsocketResponse::Started(create)).await?; } PtyWebsocketRequest::Input(_) => {} PtyWebsocketRequest::Resize(_) => {} PtyWebsocketRequest::Terminate => { // in case we somehow get a pty termination request, but don't have a session running, // send a main shutdown message time::delay_for(Duration::from_millis(100)).await; tx_shutdown.send(MainShutdown {}).await?; } } } Ok(()) } } /// Drives an active tab session, forwarding input/output events betweeen the pty & daemon. /// Handles termination requests (from the daemon), and termination events (from the pty). pub struct ClientSessionService { _pty: PtyService, _output: Lifeline, _input: Lifeline, } impl Service for ClientSessionService { type Bus = PtyBus; type Lifeline = anyhow::Result<Self>; fn spawn(bus: &Self::Bus) -> Self::Lifeline { let _pty = PtyService::spawn(&bus)?; let _output = { let rx_response = bus.rx::<PtyResponse>()?; let tx_websocket = bus.tx::<PtyWebsocketResponse>()?; let tx_shutdown = bus.tx::<PtyShutdown>()?; Self::try_task( "output", Self::output(rx_response, tx_websocket, tx_shutdown), ) }; let _input = { let rx_request = bus.rx::<PtyWebsocketRequest>()?; let tx_pty = bus.tx::<PtyRequest>()?; let tx_shutdown = bus.tx::<PtyShutdown>()?; Self::try_task("input", Self::input(rx_request, tx_pty, tx_shutdown)) }; Ok(Self { _pty, _output, _input, }) } } impl ClientSessionService { async fn input( mut rx: impl Receiver<PtyWebsocketRequest>, mut tx_pty: impl Sender<PtyRequest>, mut tx_shutdown: impl Sender<PtyShutdown>, ) -> anyhow::Result<()> { while let Some(request) = rx.recv().await { match request { PtyWebsocketRequest::Input(input) => { let message = PtyRequest::Input(input); tx_pty.send(message).await.ok(); } PtyWebsocketRequest::Terminate => { info!("Terminating due to command request."); tx_pty.send(PtyRequest::Shutdown).await.ok(); time::delay_for(Duration::from_millis(20)).await; tx_shutdown.send(PtyShutdown {}).await?; } PtyWebsocketRequest::Resize(dimensions) => { debug!("received resize request: {:?}", dimensions); tx_pty.send(PtyRequest::Resize(dimensions)).await.ok(); } _ => {} } } Ok(()) } async fn output( mut rx: impl Receiver<PtyResponse>, mut tx: impl Sender<PtyWebsocketResponse>, mut tx_shutdown: impl Sender<PtyShutdown>, ) -> anyhow::Result<()> { while let Some(msg) = rx.recv().await { match msg { PtyResponse::Output(out) => { tx.send(PtyWebsocketResponse::Output(out)).await?; } PtyResponse::Terminated(code) => { debug!("pty child process terminated with status: {:?}", &code); tx.send(PtyWebsocketResponse::Stopped).await?; time::delay_for(Duration::from_millis(500)).await; tx_shutdown.send(PtyShutdown {}).await?; } } } Ok(()) } } #[derive(Debug, Clone)] pub enum Shell { Sh, Zsh, Bash, Fish, Unknown, } pub fn resolve_shell(command: &str) -> Shell { for fragment in command.split(|c| c == '/' || c == ' ' || c == '\\') { let fragment = fragment.trim(); if fragment.eq_ignore_ascii_case("sh") { return Shell::Sh; } else if fragment.eq_ignore_ascii_case("zsh") { return Shell::Zsh; } else if fragment.eq_ignore_ascii_case("bash") { return Shell::Bash; } else if fragment.eq_ignore_ascii_case("fish") { return Shell::Fish; } } Shell::Unknown }
36.069343
106
0.500759
79694a7c45372d13116791c6f7b54b81d42d4db5
4,443
use super::*; use crate::AtoFinanceLedger; #[test] fn test_do_bonus() { new_test_ext().execute_with(|| { System::set_block_number(5); const ACCOUNT_ID_1: u64 = 1; const ACCOUNT_ID_2: u64 = 2; // Dispatch a signed extrinsic. assert_eq!(Balances::free_balance(ACCOUNT_ID_1), 100_000_000_000_000); assert_eq!(Balances::free_balance(ACCOUNT_ID_2), 200_000_000_000_000); // let puzzle_hash = toVec("TEST_PUZZLE_HASH"); // assert_noop!( // AtochaPot::do_bonus(puzzle_hash.clone(), ACCOUNT_ID_1, 150000000000000), // Error::<Test>::InsufficientBalance // ); // Get Error::<Test>::InsufficientBalance let res = AtochaPot::do_bonus( puzzle_hash.clone(), ACCOUNT_ID_1, 150_000_000_000_000, 5u32.into(), ); assert!(res.is_err()); // pid: PuzzleSubjectHash, // who: T::AccountId, // amount: BalanceOf<T>, assert_ok!(AtochaPot::do_bonus( puzzle_hash.clone(), ACCOUNT_ID_1, 50_000_000_000_000, 5u32.into() )); assert_eq!(Balances::free_balance(ACCOUNT_ID_1), 50_000_000_000_000); let pot_ledger = AtoFinanceLedger::<Test>::get(&puzzle_hash); assert_eq!(pot_ledger.funds, 50_000_000_000_000); assert_eq!(pot_ledger.total, 50_000_000_000_000); // Change owner not allowed. assert_noop!( AtochaPot::do_bonus(puzzle_hash.clone(), ACCOUNT_ID_2, 50_000_000_000_000, 5u32.into()), Error::<Test>::LedgerOwnerNotMatch ); // Additional bound assert_ok!(AtochaPot::do_bonus( puzzle_hash.clone(), ACCOUNT_ID_1, 10_000_000_000_000, 5u32.into() )); assert_eq!(Balances::free_balance(ACCOUNT_ID_1), 40_000_000_000_000); let pot_ledger = AtoFinanceLedger::<Test>::get(&puzzle_hash); assert_eq!(pot_ledger.funds, 60_000_000_000_000); assert_eq!(pot_ledger.total, 60_000_000_000_000); }); } #[test] fn test_do_sponsorship() { new_test_ext().execute_with(|| { const ACCOUNT_ID_1: u64 = 1; const ACCOUNT_ID_2: u64 = 2; // Dispatch a signed extrinsic. assert_eq!(Balances::free_balance(ACCOUNT_ID_1), 100_000_000_000_000); assert_eq!(Balances::free_balance(ACCOUNT_ID_2), 200_000_000_000_000); // let puzzle_hash = toVec("TEST_PUZZLE_HASH"); // puzzle must exists. assert_noop!( AtochaPot::do_sponsorship( puzzle_hash.clone(), ACCOUNT_ID_1, 20_000_000_000_000, 1u32.into(), "Some-Things".as_bytes().to_vec() ), Error::<Test>::PuzzleNotExists ); assert_ok!(AtochaPot::do_bonus( puzzle_hash.clone(), ACCOUNT_ID_1, 10_000_000_000_000, 5u32.into() )); assert_eq!(Balances::free_balance(ACCOUNT_ID_1), 90_000_000_000_000); let pot_ledger = AtoFinanceLedger::<Test>::get(&puzzle_hash); assert_eq!(pot_ledger.funds, 10_000_000_000_000); assert_eq!(pot_ledger.total, 10_000_000_000_000); assert_ok!(AtochaPot::do_sponsorship( puzzle_hash.clone(), ACCOUNT_ID_1, 20_000_000_000_000, 5u32.into(), // block number "Some-Things-1".as_bytes().to_vec() )); let pot_ledger = AtoFinanceLedger::<Test>::get(&puzzle_hash); assert_eq!(pot_ledger.funds, 10_000_000_000_000); assert_eq!(pot_ledger.total, 30_000_000_000_000); assert_eq!(pot_ledger.sponsor_list.len(), 2); assert_eq!( pot_ledger.sponsor_list[0], SponsorData { sponsor: ACCOUNT_ID_1, funds: 20_000_000_000_000, create_bn: 5, reason: toVec("Some-Things-1") } ); assert_eq!( pot_ledger.sponsor_list[1], SponsorData { sponsor: ACCOUNT_ID_1, funds: 10_000_000_000_000, create_bn: 5, reason: Vec::new(), } ); assert_ok!(AtochaPot::do_sponsorship( puzzle_hash.clone(), ACCOUNT_ID_2, 30_000_000_000_000, 6u32.into(), // block number "Some-Things-2".as_bytes().to_vec() )); let pot_ledger = AtoFinanceLedger::<Test>::get(&puzzle_hash); assert_eq!(pot_ledger.funds, 10_000_000_000_000); assert_eq!(pot_ledger.total, 60_000_000_000_000); assert_eq!(pot_ledger.sponsor_list.len(), 3); assert_eq!( pot_ledger.sponsor_list[0], SponsorData { sponsor: ACCOUNT_ID_2, funds: 30_000_000_000_000, create_bn: 6, reason: toVec("Some-Things-2") } ); assert_eq!( pot_ledger.sponsor_list[1], SponsorData { sponsor: ACCOUNT_ID_1, funds: 20_000_000_000_000, create_bn: 5, reason: toVec("Some-Things-1") } ); assert_eq!( pot_ledger.sponsor_list[2], SponsorData { sponsor: ACCOUNT_ID_1, funds: 10_000_000_000_000, create_bn: 5, reason: Vec::new(), } ); }); }
25.982456
91
0.699977
626fbaada5cbf1e6745f2c246d1aa8a12d984e39
9,042
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A mini version of ast::Ty, which is easier to use, and features an explicit `Self` type to use //! when specifying impls to be derived. pub use self::PtrTy::*; pub use self::Ty::*; use syntax::ast; use syntax::ast::{Expr, Generics, Ident, SelfKind}; use syntax::ext::base::ExtCtxt; use syntax::ext::build::AstBuilder; use syntax::codemap::respan; use syntax::ptr::P; use syntax_pos::Span; /// The types of pointers #[derive(Clone, Eq, PartialEq)] #[allow(dead_code)] pub enum PtrTy<'a> { /// &'lifetime mut Borrowed(Option<&'a str>, ast::Mutability), /// *mut Raw(ast::Mutability), } /// A path, e.g. `::std::option::Option::<i32>` (global). Has support /// for type parameters and a lifetime. #[derive(Clone, Eq, PartialEq)] pub struct Path<'a> { pub path: Vec<&'a str> , pub lifetime: Option<&'a str>, pub params: Vec<Box<Ty<'a>>>, pub global: bool, } impl<'a> Path<'a> { pub fn new<'r>(path: Vec<&'r str> ) -> Path<'r> { Path::new_(path, None, Vec::new(), true) } pub fn new_local<'r>(path: &'r str) -> Path<'r> { Path::new_(vec!( path ), None, Vec::new(), false) } pub fn new_<'r>(path: Vec<&'r str> , lifetime: Option<&'r str>, params: Vec<Box<Ty<'r>>>, global: bool) -> Path<'r> { Path { path: path, lifetime: lifetime, params: params, global: global } } pub fn to_ty(&self, cx: &ExtCtxt, span: Span, self_ty: Ident, self_generics: &Generics) -> P<ast::Ty> { cx.ty_path(self.to_path(cx, span, self_ty, self_generics)) } pub fn to_path(&self, cx: &ExtCtxt, span: Span, self_ty: Ident, self_generics: &Generics) -> ast::Path { let idents = self.path.iter().map(|s| cx.ident_of(*s)).collect(); let lt = mk_lifetimes(cx, span, &self.lifetime); let tys = self.params.iter().map(|t| t.to_ty(cx, span, self_ty, self_generics)).collect(); cx.path_all(span, self.global, idents, lt, tys, Vec::new()) } } /// A type. Supports pointers, Self, and literals #[derive(Clone, Eq, PartialEq)] pub enum Ty<'a> { Self_, /// &/Box/ Ty Ptr(Box<Ty<'a>>, PtrTy<'a>), /// mod::mod::Type<[lifetime], [Params...]>, including a plain type /// parameter, and things like `i32` Literal(Path<'a>), /// includes unit Tuple(Vec<Ty<'a>> ) } pub fn borrowed_ptrty<'r>() -> PtrTy<'r> { Borrowed(None, ast::Mutability::Immutable) } pub fn borrowed<'r>(ty: Box<Ty<'r>>) -> Ty<'r> { Ptr(ty, borrowed_ptrty()) } pub fn borrowed_explicit_self<'r>() -> Option<Option<PtrTy<'r>>> { Some(Some(borrowed_ptrty())) } pub fn borrowed_self<'r>() -> Ty<'r> { borrowed(Box::new(Self_)) } pub fn nil_ty<'r>() -> Ty<'r> { Tuple(Vec::new()) } fn mk_lifetime(cx: &ExtCtxt, span: Span, lt: &Option<&str>) -> Option<ast::Lifetime> { match *lt { Some(ref s) => Some(cx.lifetime(span, cx.ident_of(*s).name)), None => None } } fn mk_lifetimes(cx: &ExtCtxt, span: Span, lt: &Option<&str>) -> Vec<ast::Lifetime> { match *lt { Some(ref s) => vec!(cx.lifetime(span, cx.ident_of(*s).name)), None => vec!() } } impl<'a> Ty<'a> { pub fn to_ty(&self, cx: &ExtCtxt, span: Span, self_ty: Ident, self_generics: &Generics) -> P<ast::Ty> { match *self { Ptr(ref ty, ref ptr) => { let raw_ty = ty.to_ty(cx, span, self_ty, self_generics); match *ptr { Borrowed(ref lt, mutbl) => { let lt = mk_lifetime(cx, span, lt); cx.ty_rptr(span, raw_ty, lt, mutbl) } Raw(mutbl) => cx.ty_ptr(span, raw_ty, mutbl) } } Literal(ref p) => { p.to_ty(cx, span, self_ty, self_generics) } Self_ => { cx.ty_path(self.to_path(cx, span, self_ty, self_generics)) } Tuple(ref fields) => { let ty = ast::TyKind::Tup(fields.iter() .map(|f| f.to_ty(cx, span, self_ty, self_generics)) .collect()); cx.ty(span, ty) } } } pub fn to_path(&self, cx: &ExtCtxt, span: Span, self_ty: Ident, self_generics: &Generics) -> ast::Path { match *self { Self_ => { let self_params = self_generics.ty_params.iter().map(|ty_param| { cx.ty_ident(span, ty_param.ident) }).collect(); let lifetimes = self_generics.lifetimes.iter() .map(|d| d.lifetime) .collect(); cx.path_all(span, false, vec![self_ty], lifetimes, self_params, Vec::new()) } Literal(ref p) => { p.to_path(cx, span, self_ty, self_generics) } Ptr(..) => { cx.span_bug(span, "pointer in a path in generic `derive`") } Tuple(..) => { cx.span_bug(span, "tuple in a path in generic `derive`") } } } } fn mk_ty_param(cx: &ExtCtxt, span: Span, name: &str, bounds: &[Path], self_ident: Ident, self_generics: &Generics) -> ast::TyParam { let bounds = bounds.iter().map(|b| { let path = b.to_path(cx, span, self_ident, self_generics); cx.typarambound(path) }).collect(); cx.typaram(span, cx.ident_of(name), bounds, None) } fn mk_generics(lifetimes: Vec<ast::LifetimeDef>, ty_params: Vec<ast::TyParam>) -> Generics { Generics { lifetimes: lifetimes, ty_params: P::from_vec(ty_params), where_clause: ast::WhereClause { id: ast::DUMMY_NODE_ID, predicates: Vec::new(), }, } } /// Lifetimes and bounds on type parameters #[derive(Clone)] pub struct LifetimeBounds<'a> { pub lifetimes: Vec<(&'a str, Vec<&'a str>)>, pub bounds: Vec<(&'a str, Vec<Path<'a>>)>, } impl<'a> LifetimeBounds<'a> { pub fn empty() -> LifetimeBounds<'a> { LifetimeBounds { lifetimes: Vec::new(), bounds: Vec::new() } } pub fn to_generics(&self, cx: &ExtCtxt, span: Span, self_ty: Ident, self_generics: &Generics) -> Generics { let lifetimes = self.lifetimes.iter().map(|&(ref lt, ref bounds)| { let bounds = bounds.iter().map( |b| cx.lifetime(span, cx.ident_of(*b).name)).collect(); cx.lifetime_def(span, cx.ident_of(*lt).name, bounds) }).collect(); let ty_params = self.bounds.iter().map(|t| { match *t { (ref name, ref bounds) => { mk_ty_param(cx, span, *name, bounds, self_ty, self_generics) } } }).collect(); mk_generics(lifetimes, ty_params) } } pub fn get_explicit_self(cx: &ExtCtxt, span: Span, self_ptr: &Option<PtrTy>) -> (P<Expr>, ast::ExplicitSelf) { // this constructs a fresh `self` path let self_path = cx.expr_self(span); match *self_ptr { None => { (self_path, respan(span, SelfKind::Value(ast::Mutability::Immutable))) } Some(ref ptr) => { let self_ty = respan( span, match *ptr { Borrowed(ref lt, mutbl) => { let lt = lt.map(|s| cx.lifetime(span, cx.ident_of(s).name)); SelfKind::Region(lt, mutbl) } Raw(_) => cx.span_bug(span, "attempted to use *self in deriving definition") }); let self_expr = cx.expr_deref(span, self_path); (self_expr, self_ty) } } }
32.06383
98
0.493364
ed382406efacf7d8a9cbee69cc593760bca33275
25,520
//~ NOTE not a function //~| NOTE not a foreign function or static //~| NOTE not a function or static //~| NOTE not an `extern` block // This test enumerates as many compiler-builtin ungated attributes as // possible (that is, all the mutually compatible ones), and checks // that we get "expected" (*) warnings for each in the various weird // places that users might put them in the syntax. // // (*): The word "expected" is in quotes above because the cases where // warnings are and are not emitted might not match a user's intuition // nor the rustc developers' intent. I am really just trying to // capture today's behavior in a test, not so that it become enshrined // as the absolute behavior going forward, but rather so that we do // not change the behavior in the future without even being *aware* of // the change when it happens. // // At the time of authoring, the attributes here are listed in the // order that they occur in `librustc_feature`. // // Any builtin attributes that: // // - are not stable, or // // - could not be included here covering the same cases as the other // attributes without raising an *error* from rustc (note though // that warnings are of course expected) // // have their own test case referenced by filename in an inline // comment. // // The test feeds numeric inputs to each attribute that accepts them // without error. We do this for two reasons: (1.) to exercise how // inputs are handled by each, and (2.) to ease searching for related // occurrences in the source text. // check-pass #![feature(test)] #![warn(unused_attributes, unknown_lints)] //~^ NOTE the lint level is defined here //~| NOTE the lint level is defined here // UNGATED WHITE-LISTED BUILT-IN ATTRIBUTES #![warn(x5400)] //~ WARN unknown lint: `x5400` #![allow(x5300)] //~ WARN unknown lint: `x5300` #![forbid(x5200)] //~ WARN unknown lint: `x5200` #![deny(x5100)] //~ WARN unknown lint: `x5100` #![macro_use] // (allowed if no argument; see issue-43160-gating-of-macro_use.rs) // skipping testing of cfg // skipping testing of cfg_attr #![should_panic] //~ WARN `#[should_panic]` only has an effect #![ignore] //~ WARN `#[ignore]` only has an effect on functions #![no_implicit_prelude] #![reexport_test_harness_main = "2900"] // see gated-link-args.rs // see issue-43106-gating-of-macro_escape.rs for crate-level; but non crate-level is below at "2700" // (cannot easily test gating of crate-level #[no_std]; but non crate-level is below at "2600") #![proc_macro_derive()] //~ WARN `#[proc_macro_derive]` only has an effect #![doc = "2400"] #![cold] //~ WARN attribute should be applied to a function //~^ WARN this was previously accepted #![link()] //~ WARN attribute should be applied to an `extern` block //~^ WARN this was previously accepted #![link_name = "1900"] //~^ WARN attribute should be applied to a foreign function //~^^ WARN this was previously accepted by the compiler #![link_section = "1800"] //~^ WARN attribute should be applied to a function or static //~^^ WARN this was previously accepted by the compiler // see issue-43106-gating-of-rustc_deprecated.rs #![must_use] // see issue-43106-gating-of-stable.rs // see issue-43106-gating-of-unstable.rs // see issue-43106-gating-of-deprecated.rs #![windows_subsystem = "windows"] // UNGATED CRATE-LEVEL BUILT-IN ATTRIBUTES #![crate_name = "0900"] #![crate_type = "bin"] // cannot pass "0800" here #![crate_id = "10"] //~^ WARN use of deprecated attribute //~| HELP remove this attribute //~| NOTE `#[warn(deprecated)]` on by default // FIXME(#44232) we should warn that this isn't used. #![feature(rust1)] //~^ WARN no longer requires an attribute to enable //~| NOTE `#[warn(stable_features)]` on by default #![no_start] //~^ WARN use of deprecated attribute //~| HELP remove this attribute // (cannot easily gating state of crate-level #[no_main]; but non crate-level is below at "0400") #![no_builtins] #![recursion_limit = "0200"] #![type_length_limit = "0100"] // USES OF BUILT-IN ATTRIBUTES IN OTHER ("UNUSUAL") PLACES #[warn(x5400)] //~^ WARN unknown lint: `x5400` mod warn { mod inner { #![warn(x5400)] } //~^ WARN unknown lint: `x5400` #[warn(x5400)] fn f() { } //~^ WARN unknown lint: `x5400` #[warn(x5400)] struct S; //~^ WARN unknown lint: `x5400` #[warn(x5400)] type T = S; //~^ WARN unknown lint: `x5400` #[warn(x5400)] impl S { } //~^ WARN unknown lint: `x5400` } #[allow(x5300)] //~^ WARN unknown lint: `x5300` mod allow { mod inner { #![allow(x5300)] } //~^ WARN unknown lint: `x5300` #[allow(x5300)] fn f() { } //~^ WARN unknown lint: `x5300` #[allow(x5300)] struct S; //~^ WARN unknown lint: `x5300` #[allow(x5300)] type T = S; //~^ WARN unknown lint: `x5300` #[allow(x5300)] impl S { } //~^ WARN unknown lint: `x5300` } #[forbid(x5200)] //~^ WARN unknown lint: `x5200` mod forbid { mod inner { #![forbid(x5200)] } //~^ WARN unknown lint: `x5200` #[forbid(x5200)] fn f() { } //~^ WARN unknown lint: `x5200` #[forbid(x5200)] struct S; //~^ WARN unknown lint: `x5200` #[forbid(x5200)] type T = S; //~^ WARN unknown lint: `x5200` #[forbid(x5200)] impl S { } //~^ WARN unknown lint: `x5200` } #[deny(x5100)] //~^ WARN unknown lint: `x5100` mod deny { mod inner { #![deny(x5100)] } //~^ WARN unknown lint: `x5100` #[deny(x5100)] fn f() { } //~^ WARN unknown lint: `x5100` #[deny(x5100)] struct S; //~^ WARN unknown lint: `x5100` #[deny(x5100)] type T = S; //~^ WARN unknown lint: `x5100` #[deny(x5100)] impl S { } //~^ WARN unknown lint: `x5100` } #[macro_use] mod macro_use { mod inner { #![macro_use] } #[macro_use] fn f() { } //~^ `#[macro_use]` only has an effect #[macro_use] struct S; //~^ `#[macro_use]` only has an effect #[macro_use] type T = S; //~^ `#[macro_use]` only has an effect #[macro_use] impl S { } //~^ `#[macro_use]` only has an effect } #[macro_export] //~^ WARN `#[macro_export]` only has an effect on macro definitions mod macro_export { mod inner { #![macro_export] } //~^ WARN `#[macro_export]` only has an effect on macro definitions #[macro_export] fn f() { } //~^ WARN `#[macro_export]` only has an effect on macro definitions #[macro_export] struct S; //~^ WARN `#[macro_export]` only has an effect on macro definitions #[macro_export] type T = S; //~^ WARN `#[macro_export]` only has an effect on macro definitions #[macro_export] impl S { } //~^ WARN `#[macro_export]` only has an effect on macro definitions } // At time of unit test authorship, if compiling without `--test` then // non-crate-level #[test] attributes seem to be ignored. #[test] mod test { mod inner { #![test] } fn f() { } struct S; type T = S; impl S { } } // At time of unit test authorship, if compiling without `--test` then // non-crate-level #[bench] attributes seem to be ignored. #[bench] mod bench { mod inner { #![bench] } #[bench] struct S; #[bench] type T = S; #[bench] impl S { } } #[path = "3800"] mod path { mod inner { #![path="3800"] } #[path = "3800"] fn f() { } //~^ WARN `#[path]` only has an effect #[path = "3800"] struct S; //~^ WARN `#[path]` only has an effect #[path = "3800"] type T = S; //~^ WARN `#[path]` only has an effect #[path = "3800"] impl S { } //~^ WARN `#[path]` only has an effect } #[automatically_derived] //~^ WARN `#[automatically_derived]` only has an effect mod automatically_derived { mod inner { #![automatically_derived] } //~^ WARN `#[automatically_derived] #[automatically_derived] fn f() { } //~^ WARN `#[automatically_derived] #[automatically_derived] struct S; //~^ WARN `#[automatically_derived] #[automatically_derived] type T = S; //~^ WARN `#[automatically_derived] #[automatically_derived] impl S { } } #[no_mangle] //~^ WARN attribute should be applied to a free function, impl method or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! mod no_mangle { //~^ NOTE not a free function, impl method or static mod inner { #![no_mangle] } //~^ WARN attribute should be applied to a free function, impl method or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a free function, impl method or static #[no_mangle] fn f() { } #[no_mangle] struct S; //~^ WARN attribute should be applied to a free function, impl method or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a free function, impl method or static #[no_mangle] type T = S; //~^ WARN attribute should be applied to a free function, impl method or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a free function, impl method or static #[no_mangle] impl S { } //~^ WARN attribute should be applied to a free function, impl method or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a free function, impl method or static trait Tr { #[no_mangle] fn foo(); //~^ WARN attribute should be applied to a free function, impl method or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a free function, impl method or static #[no_mangle] fn bar() {} //~^ WARN attribute should be applied to a free function, impl method or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a free function, impl method or static } } #[should_panic] //~^ WARN `#[should_panic]` only has an effect on mod should_panic { mod inner { #![should_panic] } //~^ WARN `#[should_panic]` only has an effect on #[should_panic] fn f() { } #[should_panic] struct S; //~^ WARN `#[should_panic]` only has an effect on #[should_panic] type T = S; //~^ WARN `#[should_panic]` only has an effect on #[should_panic] impl S { } //~^ WARN `#[should_panic]` only has an effect on } #[ignore] //~^ WARN `#[ignore]` only has an effect on functions mod ignore { mod inner { #![ignore] } //~^ WARN `#[ignore]` only has an effect on functions #[ignore] fn f() { } #[ignore] struct S; //~^ WARN `#[ignore]` only has an effect on functions #[ignore] type T = S; //~^ WARN `#[ignore]` only has an effect on functions #[ignore] impl S { } //~^ WARN `#[ignore]` only has an effect on functions } #[no_implicit_prelude] mod no_implicit_prelude { mod inner { #![no_implicit_prelude] } #[no_implicit_prelude] fn f() { } //~^ WARN `#[no_implicit_prelude]` only has an effect #[no_implicit_prelude] struct S; //~^ WARN `#[no_implicit_prelude]` only has an effect #[no_implicit_prelude] type T = S; //~^ WARN `#[no_implicit_prelude]` only has an effect #[no_implicit_prelude] impl S { } //~^ WARN `#[no_implicit_prelude]` only has an effect } #[reexport_test_harness_main = "2900"] //~^ WARN crate-level attribute should be mod reexport_test_harness_main { mod inner { #![reexport_test_harness_main="2900"] } //~^ WARN crate-level attribute should be #[reexport_test_harness_main = "2900"] fn f() { } //~^ WARN crate-level attribute should be #[reexport_test_harness_main = "2900"] struct S; //~^ WARN crate-level attribute should be #[reexport_test_harness_main = "2900"] type T = S; //~^ WARN crate-level attribute should be #[reexport_test_harness_main = "2900"] impl S { } //~^ WARN crate-level attribute should be } // Cannot feed "2700" to `#[macro_escape]` without signaling an error. #[macro_escape] //~^ WARN `#[macro_escape]` is a deprecated synonym for `#[macro_use]` mod macro_escape { mod inner { #![macro_escape] } //~^ WARN `#[macro_escape]` is a deprecated synonym for `#[macro_use]` //~| HELP try an outer attribute: `#[macro_use]` #[macro_escape] fn f() { } //~^ WARN `#[macro_escape]` only has an effect #[macro_escape] struct S; //~^ WARN `#[macro_escape]` only has an effect #[macro_escape] type T = S; //~^ WARN `#[macro_escape]` only has an effect #[macro_escape] impl S { } //~^ WARN `#[macro_escape]` only has an effect } #[no_std] //~^ WARN crate-level attribute should be an inner attribute mod no_std { mod inner { #![no_std] } //~^ WARN crate-level attribute should be in the root module #[no_std] fn f() { } //~^ WARN crate-level attribute should be an inner attribute #[no_std] struct S; //~^ WARN crate-level attribute should be an inner attribute #[no_std] type T = S; //~^ WARN crate-level attribute should be an inner attribute #[no_std] impl S { } //~^ WARN crate-level attribute should be an inner attribute } // At time of authorship, #[proc_macro_derive = "2500"] signals error // when it occurs on a mod (apart from crate-level). Therefore it goes // into its own file; see issue-43106-gating-of-proc_macro_derive.rs #[doc = "2400"] mod doc { mod inner { #![doc="2400"] } #[doc = "2400"] fn f() { } #[doc = "2400"] struct S; #[doc = "2400"] type T = S; #[doc = "2400"] impl S { } } #[cold] //~^ WARN attribute should be applied to a function //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! mod cold { //~^ NOTE not a function mod inner { #![cold] } //~^ WARN attribute should be applied to a function //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a function #[cold] fn f() { } #[cold] struct S; //~^ WARN attribute should be applied to a function //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a function #[cold] type T = S; //~^ WARN attribute should be applied to a function //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a function #[cold] impl S { } //~^ WARN attribute should be applied to a function //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a function } #[link_name = "1900"] //~^ WARN attribute should be applied to a foreign function or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! mod link_name { //~^ NOTE not a foreign function or static #[link_name = "1900"] //~^ WARN attribute should be applied to a foreign function or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| HELP try `#[link(name = "1900")]` instead extern "C" { } //~^ NOTE not a foreign function or static mod inner { #![link_name="1900"] } //~^ WARN attribute should be applied to a foreign function or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a foreign function or static #[link_name = "1900"] fn f() { } //~^ WARN attribute should be applied to a foreign function or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a foreign function or static #[link_name = "1900"] struct S; //~^ WARN attribute should be applied to a foreign function or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a foreign function or static #[link_name = "1900"] type T = S; //~^ WARN attribute should be applied to a foreign function or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a foreign function or static #[link_name = "1900"] impl S { } //~^ WARN attribute should be applied to a foreign function or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a foreign function or static } #[link_section = "1800"] //~^ WARN attribute should be applied to a function or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! mod link_section { //~^ NOTE not a function or static mod inner { #![link_section="1800"] } //~^ WARN attribute should be applied to a function or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a function or static #[link_section = "1800"] fn f() { } #[link_section = "1800"] struct S; //~^ WARN attribute should be applied to a function or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a function or static #[link_section = "1800"] type T = S; //~^ WARN attribute should be applied to a function or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a function or static #[link_section = "1800"] impl S { } //~^ WARN attribute should be applied to a function or static [unused_attributes] //~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! //~| NOTE not a function or static } // Note that this is a `check-pass` test, so it will never invoke the linker. #[link()] //~^ WARN attribute should be applied to an `extern` block //~| WARN this was previously accepted mod link { //~^ NOTE not an `extern` block mod inner { #![link()] } //~^ WARN attribute should be applied to an `extern` block //~| WARN this was previously accepted //~| NOTE not an `extern` block #[link()] fn f() { } //~^ WARN attribute should be applied to an `extern` block //~| WARN this was previously accepted //~| NOTE not an `extern` block #[link()] struct S; //~^ WARN attribute should be applied to an `extern` block //~| WARN this was previously accepted //~| NOTE not an `extern` block #[link()] type T = S; //~^ WARN attribute should be applied to an `extern` block //~| WARN this was previously accepted //~| NOTE not an `extern` block #[link()] impl S { } //~^ WARN attribute should be applied to an `extern` block //~| WARN this was previously accepted //~| NOTE not an `extern` block } struct StructForDeprecated; #[deprecated] mod deprecated { mod inner { #![deprecated] } #[deprecated] fn f() { } #[deprecated] struct S1; #[deprecated] type T = super::StructForDeprecated; #[deprecated] impl super::StructForDeprecated { } } #[must_use] mod must_use { mod inner { #![must_use] } #[must_use] fn f() { } #[must_use] struct S; #[must_use] type T = S; #[must_use] impl S { } } #[windows_subsystem = "windows"] //~^ WARN crate-level attribute should be an inner attribute mod windows_subsystem { mod inner { #![windows_subsystem="windows"] } //~^ WARN crate-level attribute should be in the root module #[windows_subsystem = "windows"] fn f() { } //~^ WARN crate-level attribute should be an inner attribute #[windows_subsystem = "windows"] struct S; //~^ WARN crate-level attribute should be an inner attribute #[windows_subsystem = "windows"] type T = S; //~^ WARN crate-level attribute should be an inner attribute #[windows_subsystem = "windows"] impl S { } //~^ WARN crate-level attribute should be an inner attribute } // BROKEN USES OF CRATE-LEVEL BUILT-IN ATTRIBUTES #[crate_name = "0900"] //~^ WARN crate-level attribute should be an inner attribute mod crate_name { mod inner { #![crate_name="0900"] } //~^ WARN crate-level attribute should be in the root module #[crate_name = "0900"] fn f() { } //~^ WARN crate-level attribute should be an inner attribute #[crate_name = "0900"] struct S; //~^ WARN crate-level attribute should be an inner attribute #[crate_name = "0900"] type T = S; //~^ WARN crate-level attribute should be an inner attribute #[crate_name = "0900"] impl S { } //~^ WARN crate-level attribute should be an inner attribute } #[crate_type = "0800"] //~^ WARN crate-level attribute should be an inner attribute mod crate_type { mod inner { #![crate_type="0800"] } //~^ WARN crate-level attribute should be in the root module #[crate_type = "0800"] fn f() { } //~^ WARN crate-level attribute should be an inner attribute #[crate_type = "0800"] struct S; //~^ WARN crate-level attribute should be an inner attribute #[crate_type = "0800"] type T = S; //~^ WARN crate-level attribute should be an inner attribute #[crate_type = "0800"] impl S { } //~^ WARN crate-level attribute should be an inner attribute } #[feature(x0600)] //~^ WARN crate-level attribute should be an inner attribute mod feature { mod inner { #![feature(x0600)] } //~^ WARN crate-level attribute should be in the root module #[feature(x0600)] fn f() { } //~^ WARN crate-level attribute should be an inner attribute #[feature(x0600)] struct S; //~^ WARN crate-level attribute should be an inner attribute #[feature(x0600)] type T = S; //~^ WARN crate-level attribute should be an inner attribute #[feature(x0600)] impl S { } //~^ WARN crate-level attribute should be an inner attribute } #[no_main] //~^ WARN crate-level attribute should be an inner attribute mod no_main_1 { mod inner { #![no_main] } //~^ WARN crate-level attribute should be in the root module #[no_main] fn f() { } //~^ WARN crate-level attribute should be an inner attribute #[no_main] struct S; //~^ WARN crate-level attribute should be an inner attribute #[no_main] type T = S; //~^ WARN crate-level attribute should be an inner attribute #[no_main] impl S { } //~^ WARN crate-level attribute should be an inner attribute } #[no_builtins] //~^ WARN crate-level attribute should be an inner attribute mod no_builtins { mod inner { #![no_builtins] } //~^ WARN crate-level attribute should be in the root module #[no_builtins] fn f() { } //~^ WARN crate-level attribute should be an inner attribute #[no_builtins] struct S; //~^ WARN crate-level attribute should be an inner attribute #[no_builtins] type T = S; //~^ WARN crate-level attribute should be an inner attribute #[no_builtins] impl S { } //~^ WARN crate-level attribute should be an inner attribute } #[recursion_limit="0200"] //~^ WARN crate-level attribute should be an inner attribute mod recursion_limit { mod inner { #![recursion_limit="0200"] } //~^ WARN crate-level attribute should be in the root module #[recursion_limit="0200"] fn f() { } //~^ WARN crate-level attribute should be an inner attribute #[recursion_limit="0200"] struct S; //~^ WARN crate-level attribute should be an inner attribute #[recursion_limit="0200"] type T = S; //~^ WARN crate-level attribute should be an inner attribute #[recursion_limit="0200"] impl S { } //~^ WARN crate-level attribute should be an inner attribute } #[type_length_limit="0100"] //~^ WARN crate-level attribute should be an inner attribute mod type_length_limit { mod inner { #![type_length_limit="0100"] } //~^ WARN crate-level attribute should be in the root module #[type_length_limit="0100"] fn f() { } //~^ WARN crate-level attribute should be an inner attribute #[type_length_limit="0100"] struct S; //~^ WARN crate-level attribute should be an inner attribute #[type_length_limit="0100"] type T = S; //~^ WARN crate-level attribute should be an inner attribute #[type_length_limit="0100"] impl S { } //~^ WARN crate-level attribute should be an inner attribute } fn main() {}
33.185956
136
0.659522
fb7841fbed54b9ed954317afcaba037356d13053
12,132
// Copyright Materialize, Inc. and contributors. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. //! Hoist projections through operators. //! //! Projections can be re-introduced in the physical planning stage. use std::collections::HashMap; use std::mem; use crate::TransformArgs; use expr::{Id, MirRelationExpr}; /// Hoist projections through operators. #[derive(Debug)] pub struct ProjectionLifting; impl crate::Transform for ProjectionLifting { fn transform( &self, relation: &mut MirRelationExpr, _: TransformArgs, ) -> Result<(), crate::TransformError> { self.action(relation, &mut HashMap::new()); Ok(()) } } impl ProjectionLifting { /// Hoist projections through operators. pub fn action( &self, relation: &mut MirRelationExpr, // Map from names to new get type and projection required at use. gets: &mut HashMap<Id, (repr::RelationType, Vec<usize>)>, ) { match relation { MirRelationExpr::Constant { .. } => {} MirRelationExpr::Get { id, .. } => { if let Some((typ, columns)) = gets.get(id) { *relation = MirRelationExpr::Get { id: *id, typ: typ.clone(), } .project(columns.clone()); } } MirRelationExpr::Let { id, value, body } => { self.action(value, gets); let id = Id::Local(*id); if let MirRelationExpr::Project { input, outputs } = &mut **value { let typ = input.typ(); let prior = gets.insert(id, (typ, outputs.clone())); assert!(!prior.is_some()); **value = input.take_dangerous(); } self.action(body, gets); gets.remove(&id); } MirRelationExpr::Project { input, outputs } => { self.action(input, gets); if let MirRelationExpr::Project { input: inner, outputs: inner_outputs, } = &mut **input { for output in outputs.iter_mut() { *output = inner_outputs[*output]; } **input = inner.take_dangerous(); } } MirRelationExpr::Map { input, scalars } => { self.action(input, gets); if let MirRelationExpr::Project { input: inner, outputs, } = &mut **input { // Retain projected columns and scalar columns. let mut new_outputs = outputs.clone(); let inner_arity = inner.arity(); new_outputs.extend(inner_arity..(inner_arity + scalars.len())); // Rewrite scalar expressions using inner columns. for scalar in scalars.iter_mut() { scalar.permute(&new_outputs); } *relation = inner .take_dangerous() .map(scalars.clone()) .project(new_outputs); } } MirRelationExpr::FlatMap { input, func, exprs } => { self.action(input, gets); if let MirRelationExpr::Project { input: inner, outputs, } = &mut **input { // Retain projected columns and scalar columns. let mut new_outputs = outputs.clone(); let inner_arity = inner.arity(); new_outputs.extend(inner_arity..(inner_arity + func.output_arity())); // Rewrite scalar expression using inner columns. for expr in exprs.iter_mut() { expr.permute(&new_outputs); } *relation = inner .take_dangerous() .flat_map(func.clone(), exprs.clone()) .project(new_outputs); } } MirRelationExpr::Filter { input, predicates } => { self.action(input, gets); if let MirRelationExpr::Project { input: inner, outputs, } = &mut **input { // Rewrite scalar expressions using inner columns. for predicate in predicates.iter_mut() { predicate.permute(outputs); } *relation = inner .take_dangerous() .filter(predicates.clone()) .project(outputs.clone()); } } MirRelationExpr::Join { inputs, equivalences, implementation, } => { for input in inputs.iter_mut() { self.action(input, gets); } // Track the location of the projected columns in the un-projected join. let mut projection = Vec::new(); let mut temp_arity = 0; for join_input in inputs.iter_mut() { if let MirRelationExpr::Project { input, outputs } = join_input { for output in outputs.iter() { projection.push(temp_arity + *output); } temp_arity += input.arity(); *join_input = input.take_dangerous(); } else { let arity = join_input.arity(); projection.extend(temp_arity..(temp_arity + arity)); temp_arity += arity; } } if projection.len() != temp_arity || (0..temp_arity).any(|i| projection[i] != i) { // Update equivalences and implementation. for equivalence in equivalences.iter_mut() { for expr in equivalence { expr.permute(&projection[..]); } } *implementation = expr::JoinImplementation::Unimplemented; *relation = relation.take_dangerous().project(projection); } } MirRelationExpr::Reduce { input, group_key, aggregates, monotonic: _, expected_group_size: _, } => { // Reduce *absorbs* projections, which is amazing! self.action(input, gets); if let MirRelationExpr::Project { input: inner, outputs, } = &mut **input { for key in group_key.iter_mut() { key.permute(outputs); } for aggregate in aggregates.iter_mut() { aggregate.expr.permute(outputs); } **input = inner.take_dangerous(); } } MirRelationExpr::TopK { input, group_key, order_key, limit, offset, monotonic: _, } => { self.action(input, gets); if let MirRelationExpr::Project { input: inner, outputs, } = &mut **input { for key in group_key.iter_mut() { *key = outputs[*key]; } for key in order_key.iter_mut() { key.column = outputs[key.column]; } *relation = inner .take_dangerous() .top_k( group_key.clone(), order_key.clone(), limit.clone(), offset.clone(), ) .project(outputs.clone()); } } MirRelationExpr::Negate { input } => { self.action(input, gets); if let MirRelationExpr::Project { input: inner, outputs, } = &mut **input { *relation = inner.take_dangerous().negate().project(outputs.clone()); } } MirRelationExpr::Threshold { input } => { // We cannot, in general, lift projections out of threshold. // If we could reason that the input cannot be negative, we // would be able to lift the projection, but otherwise our // action on weights need to accumulate the restricted rows. self.action(input, gets); } MirRelationExpr::DeclareKeys { input, .. } => { self.action(input, gets); } MirRelationExpr::Union { base, inputs } => { // We cannot, in general, lift projections out of unions. self.action(base, gets); for input in &mut *inputs { self.action(input, gets); } if let MirRelationExpr::Project { input: base_input, outputs: base_outputs, } = &mut **base { let base_typ = base_input.typ(); let mut can_lift = true; for input in &mut *inputs { match input { MirRelationExpr::Project { input, outputs } if input.typ() == base_typ && outputs == base_outputs => {} _ => { can_lift = false; break; } } } if can_lift { let base_outputs = mem::take(base_outputs); **base = base_input.take_dangerous(); for inp in inputs { match inp { MirRelationExpr::Project { input, .. } => { *inp = input.take_dangerous(); } _ => unreachable!(), } } *relation = relation.take_dangerous().project(base_outputs); } } } MirRelationExpr::ArrangeBy { input, keys } => { self.action(input, gets); if let MirRelationExpr::Project { input: inner, outputs, } = &mut **input { for key_set in keys.iter_mut() { for key in key_set.iter_mut() { key.permute(outputs); } } *relation = inner .take_dangerous() .arrange_by(keys) .project(outputs.clone()); } } } } }
38.031348
98
0.415101
56b60c6be4ebe0659868f2bd8882e56f9e33db56
552
use crate::error::prelude::*; use crate::handlers::proof_presentation::verifier::messages::VerifierMessages; use crate::settings; pub mod verifier; mod messages; mod state_machine; mod states; pub fn verify_thread_id(thread_id: &str, message: &VerifierMessages) -> VcxResult<()> { if !settings::indy_mocks_enabled() && !message.thread_id_matches(thread_id) { return Err(VcxError::from_msg(VcxErrorKind::InvalidJson, format!("Cannot handle message {:?}: thread id does not match, expected {:?}", message, thread_id))); }; Ok(()) }
34.5
166
0.719203
7127bee245717feb0bd8fdac6c1aadb477b3a409
761
#![allow( clippy::unneeded_field_pattern, clippy::same_item_push, clippy::unknown_clippy_lints )] #[macro_use] mod display_object; #[macro_use] extern crate smallvec; #[macro_use] extern crate downcast_rs; #[macro_use] mod avm1; mod avm2; mod bounding_box; mod character; mod collect; pub mod color_transform; pub mod context; mod drawing; mod ecma_conversions; pub mod events; mod font; mod html; mod library; pub mod loader; mod player; mod prelude; pub mod property_map; pub mod shape_utils; pub mod string_utils; pub mod tag_utils; mod transform; mod types; mod vminterface; mod xml; pub mod backend; pub mod external; pub use chrono; pub use events::PlayerEvent; pub use indexmap; pub use player::Player; pub use swf; pub use swf::Color;
14.921569
35
0.760841
56b8ed3d61fa4716241ce5ea616e578bb9a01a65
82
//! Amazon EC2 Container Service include!(concat!(env!("OUT_DIR"), "/ecs.rs"));
16.4
46
0.646341
e8a40b4e74f7e1bde36626894648ea8b54cc13c3
16,463
use architecture::*; use goblin; use loader::*; use memory::backing::Memory; use std::collections::BTreeMap; use std::fs::File; use std::io::Read; use std::path::{Path, PathBuf}; /// The address where the first library will be loaded const DEFAULT_LIB_BASE: u64 = 0x4000_0000; /// The step in address between where we will load libraries. const LIB_BASE_STEP: u64 = 0x0200_0000; // Some MIPS-specific DT entries. This will eventually land in Goblin. const DT_MIPS_LOCAL_GOTNO: u64 = 0x7000000a; const DT_MIPS_GOTSYM: u64 = 0x70000013; const DT_MIPS_SYMTABNO: u64 = 0x70000011; /// Loader which links together multiple Elf files. /// /// Can do some rudimentary linking of binaries. #[derive(Debug)] pub struct ElfLinker { /// The filename (path included) of the file we're loading. filename: PathBuf, /// A mapping from lib name (for example `libc.so.6`) to Elf. loaded: BTreeMap<String, Elf>, /// The current memory mapping. memory: Memory, /// A mapping of function symbol names to addresses symbols: BTreeMap<String, u64>, /// The address we will place the next library at. next_lib_address: u64, /// Functions as specified by the user user_functions: Vec<u64>, /// If set, we will do relocations as we link do_relocations: bool } impl ElfLinker { /// Takes a path to an Elf and loads the Elf, its dependencies, and links /// them together. pub fn new(filename: &Path, do_relocations: bool) -> Result<ElfLinker> { let mut file = File::open(filename)?; let mut buf = Vec::new(); file.read_to_end(&mut buf)?; // get the endianness of this elf for the memory model let mut endian = Endian::Big; if let goblin::Object::Elf(elf_peek) = goblin::Object::parse(&buf)? { if elf_peek.header.endianness()?.is_little() { endian = Endian::Little; } } else { bail!(format!("{} was not an Elf", filename.to_str().unwrap())); } let mut elf_linker = ElfLinker { filename: filename.to_owned(), loaded: BTreeMap::new(), memory: Memory::new(endian), symbols: BTreeMap::new(), next_lib_address: DEFAULT_LIB_BASE, user_functions: Vec::new(), do_relocations }; elf_linker.load_elf(filename, 0)?; Ok(elf_linker) } /// Get the ELFs loaded and linked in this loader pub fn loaded(&self) -> &BTreeMap<String, Elf> { &self.loaded } /// Get the filename of the ELF we're loading pub fn filename(&self) -> &Path { &self.filename } /// Takes the path to an Elf, and a base address the Elf should be loaded /// at. Loads the Elf, all it's dependencies (DT_NEEDED), and then handles /// the supported relocations. pub fn load_elf(&mut self, filename: &Path, base_address: u64) -> Result<()> { // Does this file exist in the same directory as the original file? let mut base_path = match self.filename.as_path().parent() { Some(base_path) => base_path.to_path_buf(), None => PathBuf::new() }; base_path.push(filename); let filename = if base_path.exists() { &base_path } else { filename }; info!("Loading {} with base_address 0x{:x}", filename.to_str().unwrap(), base_address); let elf = Elf::from_file_with_base_address(filename, base_address)?; // Update our memory map based on what's in the Elf for (address, section) in elf.memory()?.sections() { self.memory.set_memory(*address, section.data().to_owned(), section.permissions()); } // Add this Elf to the loaded Elfs let filename = filename.file_name() .unwrap() .to_str() .unwrap() .to_string(); self.loaded.insert(filename.clone(), elf); { let ref elf = self.loaded[&filename]; // Add its exported symbols to our symbols for symbol in elf.exported_symbols() { if self.symbols.get(symbol.name()).is_some() { continue; } self.symbols.insert( symbol.name().to_string(), elf.base_address() + symbol.address() ); } } // Ensure all shared objects we rely on are loaded for so_name in self.loaded[&filename].dt_needed()?.clone() { if self.loaded.get(&so_name).is_none() { self.next_lib_address += LIB_BASE_STEP; let next_lib_address = self.next_lib_address; self.load_elf(Path::new(&so_name), next_lib_address)?; } } if self.do_relocations { match self.loaded[&filename].elf().header.e_machine { goblin::elf::header::EM_386 => self.relocations_x86(&filename)?, goblin::elf::header::EM_MIPS => self.relocations_mips(&filename)?, _ => bail!("relocations unsupported for target architecture") } } Ok(()) } /// Get the `Elf` for the primary elf loaded. pub fn get_elf(&self) -> Result<&Elf> { let loaded = self.loaded(); let filename = self.filename() .file_name() .and_then(|filename| filename.to_str()) .ok_or("Could not get filename for ElfLinker's primary program")?; let elf = loaded.get(filename) .ok_or(format!("Could not get {} from ElfLinker", filename))?; Ok(elf) } /// If the primary `Elf` we're loading has an interpreter designated in its /// dynamic sectino, get the `Elf` for the interpreter. pub fn get_interpreter(&self) -> Result<Option<&Elf>> { let elf = self.get_elf()?; let interpreter_elf = match elf.elf().interpreter { Some(interpreter_filename) => { let interpreter_filename = Path::new(interpreter_filename) .file_name() .and_then(|filename| filename.to_str()) .ok_or(format!("Failed to get filename portion of interpreter filename"))?; Some(self.loaded().get(interpreter_filename) .ok_or(format!("Could not find interpreter {}", interpreter_filename))?) } None => None }; Ok(interpreter_elf) } /// Perform x86-specific relocations fn relocations_x86(&mut self, filename: &str) -> Result<()> { // Process relocations let ref elf = self.loaded[filename]; let dynsyms = elf.elf().dynsyms; let dynstrtab = elf.elf().dynstrtab; for reloc in elf.elf().dynrelas.iter().chain( elf.elf().dynrels.iter().chain( elf.elf().pltrelocs.iter())) { match reloc.r_type { goblin::elf::reloc::R_386_32 => { let ref sym = dynsyms.get(reloc.r_sym) .expect("Unable to resolve relocation symbol"); let sym_name = &dynstrtab[sym.st_name]; let value = match self.symbols.get(sym_name) { Some(v) => v.to_owned() as u32, None => bail!("Could not resolve symbol {}", sym_name) }; self.memory.set32( reloc.r_offset as u64 + elf.base_address(), value )?; } goblin::elf::reloc::R_386_GOT32 => { bail!("R_386_GOT32"); }, goblin::elf::reloc::R_386_PLT32 => { let ref sym = dynsyms.get(reloc.r_sym) .expect("Unable to resolve relocation symbol"); let sym_name = &dynstrtab[sym.st_name]; bail!("R_386_PLT32 {:?}:0x{:x}:{}", self.filename, reloc.r_offset, sym_name); }, goblin::elf::reloc::R_386_COPY => { bail!("R_386_COPY"); }, goblin::elf::reloc::R_386_GLOB_DAT => { let ref sym = dynsyms.get(reloc.r_sym) .expect("Unable to resolve relocation symbol"); let sym_name = &dynstrtab[sym.st_name]; let value = match self.symbols.get(sym_name) { Some(v) => v.to_owned() as u32, None => { warn!("Could not resolve symbol {}", sym_name); continue } }; self.memory.set32( reloc.r_offset as u64 + elf.base_address(), value )?; }, goblin::elf::reloc::R_386_JMP_SLOT => { let ref sym = dynsyms.get(reloc.r_sym) .expect("Unable to resolve relocation symbol"); let sym_name = &dynstrtab[sym.st_name]; let value = match self.symbols.get(sym_name) { Some(v) => v.to_owned() as u32, None => bail!("Could not resolve symbol {}", sym_name) }; self.memory.set32( reloc.r_offset as u64 + elf.base_address(), value )?; }, goblin::elf::reloc::R_386_RELATIVE => { let value = self.memory.get32(reloc.r_offset as u64 + elf.base_address()); let value = match value { Some(value) => elf.base_address() as u32 + value, None => bail!("Invalid address for R_386_RELATIVE {:?}:{:x}", self.filename, reloc.r_offset) }; self.memory.set32(reloc.r_offset as u64 + elf.base_address(), value)?; }, goblin::elf::reloc::R_386_GOTPC => { bail!("R_386_GOT_PC"); }, goblin::elf::reloc::R_386_TLS_TPOFF => { warn!("Ignoring R_386_TLS_TPOFF Relocation"); }, goblin::elf::reloc::R_386_IRELATIVE => { warn!("R_386_IRELATIVE {:?}:0x{:x} going unprocessed", self.filename, reloc.r_offset); } _ => bail!("unhandled relocation type {}", reloc.r_type) } } Ok(()) } /// Perform MIPS-specific relocations fn relocations_mips(&mut self, filename: &str) -> Result<()> { let elf = &self.loaded[filename]; fn get_dynamic(elf: &Elf, tag: u64) -> Option<u64> { elf.elf().dynamic.and_then(|dynamic| dynamic.dyns .iter() .find(|dyn| dyn.d_tag == tag) .map(|dyn| dyn.d_val)) } // The number of local GOT entries. Also an index into the GOT // for the first external GOT entry. let local_gotno = get_dynamic(elf, DT_MIPS_LOCAL_GOTNO) .ok_or("Could not get DT_MIPS_LOCAL_GOTNO")?; // Index of the first dynamic symbol table entry that corresponds // to an entry in the GOT. let gotsym = get_dynamic(elf, DT_MIPS_GOTSYM) .ok_or("Could not get DT_MIPS_GOTSYM")?; // The number of entries in the dynamic symbol table let symtabno = get_dynamic(elf, DT_MIPS_SYMTABNO) .ok_or("Could not get DT_MIPS_SYMTABNO")?; // The address of the GOT section let pltgot = get_dynamic(elf, goblin::elf::dyn::DT_PLTGOT) .ok_or("Could not get DT_PLTGOT")?; // Start by adding the base address to all entries in the GOT for i in 0..(local_gotno + (symtabno - gotsym)) { let address = elf.base_address() + (i * 4) + pltgot; let value = self.memory.get32(address) .ok_or(format!("Could not get memory at address 0x{:x} for adding base address", address))?; self.memory.set32(address, value.wrapping_add(elf.base_address() as u32))?; } let dynstrtab = elf.elf().dynstrtab; let dynsyms = elf.elf().dynsyms; let mut address = pltgot + elf.base_address() + (local_gotno * 4); for i in gotsym..(symtabno) { let sym = dynsyms.get(i as usize) .ok_or(format!("Could not get symbol {}", i))?; let symbol_name = dynstrtab.get(sym.st_name) .ok_or(format!("Could not get symbol name for {}", i))??; // Internal entries have already been relocated, so we only need to // relocate external entries if sym.st_shndx == 0 { if let Some(value) = self.symbols.get(symbol_name) { self.memory.set32(address, *value as u32)?; if symbol_name == "_rtld_global" { println!("0x{:08x} symbol {} 0x{:08x} {:02} {} 0x{:x}", address, i, sym.st_value, sym.st_shndx, symbol_name, value); } } else { format!("Could not get symbol with name: \"{}\"", symbol_name); } } address += 4; } // handle all relocation entries for dynrel in elf.elf().dynrels { if dynrel.r_type == goblin::elf::reloc::R_MIPS_REL32 { let value = self.memory .get32(dynrel.r_offset + elf.base_address()) .ok_or(format!("Could not load R_MIPS_REL32 at 0x{:x}", dynrel.r_offset + elf.base_address()))?; self.memory.set32( dynrel.r_offset + elf.base_address(), value + (elf.base_address() as u32) )?; } } Ok(()) } /// Inform the linker of a function at the given address. /// /// This function will be added to calls to `function_entries` and will be automatically /// lifted when calling `to_program`. pub fn add_user_function(&mut self, address: u64) { self.user_functions.push(address); } } impl Loader for ElfLinker { fn memory(&self) -> Result<Memory> { Ok(self.memory.clone()) } fn function_entries(&self) -> Result<Vec<FunctionEntry>> { let mut function_entries = Vec::new(); for loaded in &self.loaded { // let fe = loaded.1.function_entries()?; // for e in &fe { // println!("{} 0x{:x}", loaded.0, e.address()); // } function_entries.append(&mut loaded.1.function_entries()?); } for address in &self.user_functions { function_entries.push(FunctionEntry::new(*address, None)); } Ok(function_entries) } // TODO Just maybe a bit too much unwrapping here. fn program_entry(&self) -> u64 { let filename = self.filename .as_path() .file_name() .unwrap() .to_str() .unwrap(); self.loaded[filename].program_entry() } fn architecture(&self) -> &Architecture { let filename = self.filename .as_path() .file_name() .unwrap() .to_str() .unwrap(); self.loaded[filename].architecture() } }
37.246606
99
0.501913
229c6bb4f64c741f4a8a799b914f285fdf040cdf
38,913
//! The `SegmentAccountant` is an allocator for equally- //! sized chunks of the underlying storage file (segments). //! //! It must maintain these critical safety properties: //! //! A. We must not overwrite existing segments when they //! contain the most-recent stable state for a page. //! B. We must not overwrite existing segments when active //! threads may have references to `LogOffset`'s that point //! into those segments. //! //! To complicate matters, the `PageCache` only knows //! when it has put a page into an IO buffer, but it //! doesn't keep track of when that IO buffer is //! stabilized (until write coalescing is implemented). //! //! To address these safety concerns, we rely on //! these techniques: //! //! 1. We delay the reuse of any existing segment //! by ensuring that we never deactivate a //! segment until all data written into it, as //! well as all data written to earlier segments, //! has been written to disk and fsynced. //! 2. we use a `epoch::Guard::defer()` from //! `IoBufs::write_to_log` that guarantees //! that we defer all segment deactivation //! until all threads are finished that //! may have witnessed pointers into a segment //! that will be marked for reuse in the future. //! //! Another concern that arises due to the fact that //! IO buffers may be written out-of-order is the //! correct recovery of segments. If there is data //! loss in recently written segments, we must be //! careful to preserve linearizability in the log. //! To do this, we must detect "torn segments" that //! were not able to be fully written before a crash //! happened. //! //! But what if we wrote a later segment before we //! were able to write its immediate predecessor segment, //! and then a crash happened? We must preserve //! linearizability, so we must not recover the later //! segment when its predecessor was lost in the crash. //! //! 3. This case is solved again by having a concept of //! an "unstable tail" of segments that, during recovery, //! must appear consecutively among the recovered //! segments with the highest LSN numbers. We //! prevent reuse of segments while they remain in //! this "unstable tail" by only allowing them to be //! reallocated after another later segment has written //! a "stable consecutive lsn" into its own header //! that is higher than ours. #![allow(unused_results)] use std::mem; use super::PageState; use crate::pagecache::*; use crate::*; /// The segment accountant keeps track of the logical blocks /// of storage. It scans through all segments quickly during /// recovery and attempts to locate torn segments. #[derive(Debug)] pub(crate) struct SegmentAccountant { // static or one-time set config: RunningConfig, // TODO these should be sharded to improve performance segments: Vec<Segment>, clean_counter: usize, // TODO put behind a single mutex // NB MUST group pause_rewriting with ordering // and free! free: VecSet<LogOffset>, tip: LogOffset, max_stabilized_lsn: Lsn, to_clean: VecSet<LogOffset>, pause_rewriting: bool, ordering: BTreeMap<Lsn, LogOffset>, async_truncations: BTreeMap<LogOffset, OneShot<Result<()>>>, } /// A `Segment` holds the bookkeeping information for /// a contiguous block of the disk. It may contain many /// fragments from different pages. Over time, we track /// when segments become reusable and allow them to be /// overwritten for new data. #[derive(Default, Clone, Debug, PartialEq)] struct Segment { present: FastSet8<PageId>, // a copy of present that lets us make decisions // about draining without waiting for // deferred_replacements to take effect during // segment deactivation. not_yet_replaced: FastSet8<PageId>, deferred_rm_blob: FastSet8<BlobPointer>, // set of pages that we replaced from other segments deferred_replacements: FastSet8<(PageId, SegmentId)>, lsn: Option<Lsn>, state: SegmentState, } #[derive(Debug, Copy, Eq, Hash, Ord, PartialOrd, PartialEq, Clone)] pub(crate) enum SegmentState { /// the segment is marked for reuse, should never receive /// new pids, Free, /// the segment is being written to or actively recovered, and /// will have pages assigned to it Active, /// the segment is no longer being written to or recovered, and /// will have pages marked as relocated from it Inactive, /// the segment is having its resident pages relocated before /// becoming free Draining, } use self::SegmentState::{Active, Draining, Free, Inactive}; impl Default for SegmentState { fn default() -> Self { Free } } impl Segment { fn len(&self) -> usize { self.present.len() } fn is_free(&self) -> bool { self.state == Free } fn is_inactive(&self) -> bool { self.state == Inactive } fn _is_active(&self) -> bool { self.state == Active } fn is_draining(&self) -> bool { self.state == Draining } fn free_to_active(&mut self, new_lsn: Lsn) { trace!( "setting Segment to Active with new lsn {:?}, was {:?}", new_lsn, self.lsn ); assert_eq!(self.state, Free); self.present.clear(); self.not_yet_replaced.clear(); self.deferred_rm_blob.clear(); self.deferred_replacements.clear(); self.lsn = Some(new_lsn); self.state = Active; } /// Transitions a segment to being in the `Inactive` state. /// Returns the set of page replacements that happened /// while this Segment was Active fn active_to_inactive( &mut self, lsn: Lsn, from_recovery: bool, config: &Config, ) -> Result<FastSet8<(PageId, usize)>> { trace!("setting Segment with lsn {:?} to Inactive", self.lsn()); assert!( self.state == Active || self.state == Draining, "segment {} should have been \ Active or Draining, before deactivating, but was {:?}", self.lsn(), self.state ); if from_recovery { assert!(lsn >= self.lsn()); } else { assert_eq!(self.lsn(), lsn); } self.state = Inactive; // now we can push any deferred blob removals to the removed set let deferred_rm_blob = mem::replace(&mut self.deferred_rm_blob, FastSet8::default()); for ptr in deferred_rm_blob { trace!( "removing blob {} while transitioning \ segment lsn {:?} to Inactive", ptr, self.lsn, ); remove_blob(ptr, config)?; } let deferred_replacements = mem::replace(&mut self.deferred_replacements, FastSet8::default()); Ok(deferred_replacements) } fn inactive_to_draining(&mut self, lsn: Lsn) { trace!("setting Segment with lsn {:?} to Draining", self.lsn()); assert_eq!( self.state, Inactive, "segment with lsn {:?} should have been \ Inactive before draining", self.lsn ); assert!(lsn >= self.lsn()); self.state = Draining; } fn draining_to_free(&mut self, lsn: Lsn) { trace!("setting Segment with lsn {:?} to Free", self.lsn()); assert!(self.is_draining()); assert!(lsn >= self.lsn()); self.present.clear(); self.not_yet_replaced.clear(); self.state = Free; } fn recovery_ensure_initialized(&mut self, lsn: Lsn) { if let Some(current_lsn) = self.lsn { if current_lsn != lsn { trace!("(snapshot) recovering segment with base lsn {}", lsn); assert!(lsn > current_lsn); self.state = Free; self.free_to_active(lsn); } } else { trace!("(snapshot) recovering segment with base lsn {}", lsn); self.free_to_active(lsn); } } fn lsn(&self) -> Lsn { self.lsn.unwrap() } /// Add a pid to the Segment. The caller must provide /// the Segment's LSN. fn insert_pid(&mut self, pid: PageId, lsn: Lsn) { assert_eq!(lsn, self.lsn()); // if this breaks, maybe we didn't implement the transition // logic right in write_to_log, and maybe a thread is // using the SA to add pids AFTER their calls to // res.complete() worked. assert_eq!( self.state, Active, "expected segment with lsn {} to be Active", lsn ); self.not_yet_replaced.insert(pid); self.present.insert(pid); } /// Mark that a pid in this Segment has been relocated. /// The caller must provide the LSN of the removal. fn remove_pid(&mut self, pid: PageId, lsn: Lsn, in_recovery: bool) { assert!(lsn >= self.lsn()); match self.state { Active => { // we have received a removal before // transferring this segment to Inactive. // This should have been deferred by the // segment that actually replaced this, // and if we're still Active, something is // wrong. if !in_recovery { panic!("remove_pid called on Active segment"); } assert!( !self.present.contains(&pid), "did not expect present to contain pid {} during recovery", pid, ); } Inactive | Draining => { self.present.remove(&pid); } Free => panic!("remove_pid called on a Free Segment"), } } fn defer_replace_pids( &mut self, deferred: FastSet8<(PageId, SegmentId)>, lsn: Lsn, ) { assert!(lsn >= self.lsn()); self.deferred_replacements.extend(deferred); } fn remove_blob( &mut self, blob_ptr: BlobPointer, config: &Config, ) -> Result<()> { match self.state { Active => { // we have received a removal before // transferring this segment to Inactive, so // we defer this pid's removal until the transfer. self.deferred_rm_blob.insert(blob_ptr); } Inactive | Draining => { trace!( "directly removing blob {} that was referred-to \ in a segment that has already been marked as Inactive \ or Draining.", blob_ptr, ); remove_blob(blob_ptr, config)?; } Free => panic!("remove_blob called on a Free Segment"), } Ok(()) } // The live percentage between 0 and 100 fn live_pct(&self) -> u8 { let total = self.present.len(); if total == 0 { return 100; } let live = self.present.len() * 100 / total; assert!(live <= 100); u8::try_from(live).unwrap() } fn can_free(&self) -> bool { self.state == Draining && self.is_empty() } fn is_empty(&self) -> bool { self.present.is_empty() } } impl SegmentAccountant { /// Create a new `SegmentAccountant` from previously recovered segments. pub(super) fn start( config: RunningConfig, snapshot: &Snapshot, ) -> Result<Self> { let mut ret = Self { config, segments: vec![], clean_counter: 0, free: VecSet::default(), tip: 0, max_stabilized_lsn: -1, to_clean: VecSet::default(), pause_rewriting: false, ordering: BTreeMap::default(), async_truncations: BTreeMap::default(), }; if let SegmentMode::Linear = ret.config.segment_mode { // this is a hack to prevent segments from being overwritten // when operating without a `PageCache` ret.pause_rewriting(); } ret.initialize_from_snapshot(snapshot)?; Ok(ret) } fn initial_segments( &self, snapshot: &Snapshot, ) -> Result<(Vec<Segment>, Vec<u64>)> { let segment_size = self.config.segment_size; let file_len = self.config.file.metadata()?.len(); let empty_snapshot = snapshot.pt.is_empty(); let number_of_segments = usize::try_from(file_len / segment_size as u64).unwrap() + if empty_snapshot || file_len % u64::try_from(segment_size).unwrap() < u64::try_from(SEG_HEADER_LEN).unwrap() { 0 } else { 1 }; if empty_snapshot { assert_eq!(number_of_segments, 0); } // generate segments from snapshot lids let mut segments = vec![Segment::default(); number_of_segments]; let mut segment_sizes = vec![0_u64; number_of_segments]; let mut add = |pid, lsn, sz, lid: LogOffset, segments: &mut Vec<Segment>| { let idx = assert_usize(lid / segment_size as LogOffset); trace!( "adding lsn: {} lid: {} for pid {} to segment {} during SA recovery", lsn, lid, pid, idx ); let segment_lsn = lsn / segment_size as Lsn * segment_size as Lsn; segments[idx].recovery_ensure_initialized(segment_lsn); segments[idx].insert_pid(pid, segment_lsn); segment_sizes[idx] += sz; }; for (pid, state) in &snapshot.pt { match state { PageState::Present(coords) => { for (lsn, ptr, sz) in coords { add(*pid, lsn, *sz, ptr.lid(), &mut segments); } } PageState::Free(lsn, ptr) => { add( *pid, lsn, u64::try_from(MAX_MSG_HEADER_LEN).unwrap(), ptr.lid(), &mut segments, ); } } } Ok((segments, segment_sizes)) } fn initialize_from_snapshot(&mut self, snapshot: &Snapshot) -> Result<()> { let segment_size = self.config.segment_size; let (mut segments, segment_sizes) = self.initial_segments(snapshot)?; let currently_active_segment = { // this logic allows us to free the last // active segment if it was empty. let prospective_currently_active_segment = usize::try_from(snapshot.last_lid / segment_size as LogOffset) .unwrap(); if let Some(segment) = segments.get(prospective_currently_active_segment) { if segment.is_empty() { // we want to add this to the free list below, // so don't skip freeing it for being active usize::max_value() } else { prospective_currently_active_segment } } else { // segment was not used yet usize::max_value() } }; let cleanup_threshold = u64::from(self.config.segment_cleanup_threshold); let drain_sz = u64::try_from(segment_size).unwrap() * 100 / cleanup_threshold; for (idx, segment) in segments.iter_mut().enumerate() { let segment_base = idx as LogOffset * segment_size as LogOffset; if segment_base >= self.tip { // set tip above the beginning of any self.tip = segment_base + segment_size as LogOffset; trace!( "raised self.tip to {} during SA initialization", self.tip ); } let segment_lsn = if let Some(lsn) = segment.lsn { lsn } else { // this segment was not used in the recovered // snapshot, so we can assume it is free let idx = self.segment_id(segment_base); self.segments[idx].state = Free; self.free_segment(segment_base, false); continue; }; if idx != currently_active_segment && segment_lsn + segment_size as Lsn <= snapshot.max_header_stable_lsn { if segment_sizes[idx] == 0 { // can free trace!( "freeing segment with lid {} during SA initialization", segment_base ); if self.tip == segment_base + segment_size as LogOffset { self.tip -= segment_size as LogOffset; } else { segment.state = Free; self.free_segment(segment_base, true); } } else if segment_sizes[idx] <= drain_sz { trace!( "SA draining segment at {} during startup \ with size {} being < drain size of {}", segment_base, segment_sizes[idx], drain_sz ); segment.state = Draining; self.to_clean.insert(segment_base); } } } trace!("initialized self.segments to {:?}", segments); self.segments = segments; self.ordering = self .segments .iter() .enumerate() .filter_map(|(id, s)| { if s.lsn.is_some() { Some((s.lsn(), id as LogOffset * segment_size as LogOffset)) } else { None } }) .collect(); trace!("initialized self.ordering to {:?}", self.ordering); Ok(()) } fn free_segment(&mut self, lid: LogOffset, in_recovery: bool) { debug!("freeing segment {}", lid); debug!("free list before free {:?}", self.free); let idx = self.segment_id(lid); assert!( self.tip > lid, "freed a segment above our current file tip, \ please report this bug!" ); assert_eq!(self.segments[idx].state, Free); assert!(!self.free.contains(&lid), "double-free of a segment occurred"); if in_recovery { // We only want to immediately remove the segment // mapping if we're in recovery because otherwise // we may be acting on updates relating to things // in IO buffers, before they have been flushed. // The latter will be removed from the mapping // before being reused, in the next() method. if let Some(old_lsn) = self.segments[idx].lsn { trace!( "removing segment {} with lsn {} from ordering", lid, old_lsn ); self.ordering.remove(&old_lsn); } } self.free.insert(lid); } /// Causes all new allocations to occur at the end of the file, which /// is necessary to preserve consistency while concurrently iterating /// through the log during snapshot creation. pub(super) fn pause_rewriting(&mut self) { self.pause_rewriting = true; } /// Re-enables segment rewriting after iteration is complete. pub(super) fn resume_rewriting(&mut self) { // we never want to resume segment rewriting in Linear mode if self.config.segment_mode != SegmentMode::Linear { self.pause_rewriting = false; } } /// Called by the `PageCache` when a page has been rewritten completely. /// We mark all of the old segments that contained the previous state /// from the page, and if the old segments are empty or clear enough to /// begin accelerated cleaning we mark them as so. pub(super) fn mark_replace( &mut self, pid: PageId, lsn: Lsn, old_ptrs: Vec<DiskPtr>, new_ptr: DiskPtr, ) -> Result<()> { let _measure = Measure::new(&M.accountant_mark_replace); trace!( "mark_replace pid {} from ptrs {:?} to ptr {} with lsn {}", pid, old_ptrs, new_ptr, lsn ); let new_idx = self.segment_id(new_ptr.lid()); // make sure we're not actively trying to replace the destination let new_segment_start = new_idx as LogOffset * self.config.segment_size as LogOffset; assert!(!self.to_clean.contains(&new_segment_start)); // Do we need to schedule any blob cleanups? // Not if we just moved the pointer without changing // the underlying blob, as is the case with a single Blob // with nothing else. let schedule_rm_blob = !(old_ptrs.len() == 1 && old_ptrs[0].is_blob()); let mut deferred_replacements = FastSet8::default(); for old_ptr in old_ptrs { let old_lid = old_ptr.lid(); if schedule_rm_blob && old_ptr.is_blob() { trace!( "queueing blob removal for {} in our own segment", old_ptr ); self.segments[new_idx] .remove_blob(old_ptr.blob().1, &self.config)?; } let old_idx = self.segment_id(old_lid); if new_idx == old_idx { // we probably haven't flushed this segment yet, so don't // mark the pid as being removed from it continue; } if self.segments[old_idx].lsn() > lsn { // has been replaced after this call already, // quite a big race happened panic!( "mark_replace called on previous version of segment. \ this means it was reused while other threads still \ had references to it." ); } if self.segments[old_idx].state == Free { // this segment is already reused panic!( "mark_replace called on Free segment with lid {}. \ this means it was dropped while other threads still had \ references to it.", old_idx * self.config.segment_size ); } self.segments[old_idx].not_yet_replaced.remove(&pid); deferred_replacements.insert((pid, old_idx)); } self.segments[new_idx].defer_replace_pids(deferred_replacements, lsn); self.mark_link(pid, lsn, new_ptr); Ok(()) } fn possibly_clean_or_free_segment(&mut self, idx: usize, lsn: Lsn) { let can_drain = segment_is_drainable( idx, self.segments.len(), self.segments[idx].live_pct(), self.segments[idx].len(), &self.config, ) && self.segments[idx].is_inactive(); let segment_start = (idx * self.config.segment_size) as LogOffset; if can_drain { // can be cleaned trace!( "SA inserting {} into to_clean from possibly_clean_or_free_segment", segment_start ); self.segments[idx].inactive_to_draining(lsn); self.to_clean.insert(segment_start); } if self.segments[idx].can_free() { // can be reused immediately self.segments[idx].draining_to_free(lsn); self.to_clean.remove(&segment_start); trace!( "freed segment {} in possibly_clean_or_free_segment", segment_start ); self.free_segment(segment_start, false); } } /// Called by the `PageCache` to find pages that are in /// segments eligible for cleaning that it should /// try to rewrite elsewhere. pub(super) fn clean( &mut self, ignore_pid: Option<PageId>, ) -> Option<PageId> { let seg_offset = if self.to_clean.is_empty() || self.to_clean.len() == 1 { 0 } else { self.clean_counter % self.to_clean.len() }; let item = self.to_clean.get(seg_offset).cloned(); if let Some(lid) = item { let idx = self.segment_id(lid); let segment = &self.segments[idx]; assert!(segment.state == Draining || segment.state == Inactive); let present = &segment.not_yet_replaced; if present.is_empty() { // This could legitimately be empty if it's completely // filled with failed flushes. return None; } self.clean_counter += 1; let offset = if present.len() == 1 { 0 } else { self.clean_counter % present.len() }; let pid = present.iter().nth(offset).unwrap(); if Some(*pid) == ignore_pid { return None; } trace!("telling caller to clean {} from segment at {}", pid, lid,); return Some(*pid); } None } /// Called from `PageCache` when some state has been added /// to a logical page at a particular offset. We ensure the /// page is present in the segment's page set. pub(super) fn mark_link(&mut self, pid: PageId, lsn: Lsn, ptr: DiskPtr) { let _measure = Measure::new(&M.accountant_mark_link); trace!("mark_link pid {} at ptr {}", pid, ptr); let idx = self.segment_id(ptr.lid()); // make sure we're not actively trying to replace the destination let new_segment_start = idx as LogOffset * self.config.segment_size as LogOffset; assert!(!self.to_clean.contains(&new_segment_start)); let segment = &mut self.segments[idx]; let segment_lsn = lsn / self.config.segment_size as Lsn * self.config.segment_size as Lsn; // a race happened, and our Lsn does not apply anymore assert_eq!( segment.lsn(), segment_lsn, "segment somehow got reused by the time a link was \ marked on it. expected lsn: {} actual: {}", segment_lsn, segment.lsn() ); segment.insert_pid(pid, segment_lsn); } pub(super) fn stabilize(&mut self, stable_lsn: Lsn) -> Result<()> { let segment_size = self.config.segment_size as Lsn; let lsn = ((stable_lsn / segment_size) - 1) * segment_size; trace!( "stabilize({}), normalized: {}, last: {}", stable_lsn, lsn, self.max_stabilized_lsn ); if self.max_stabilized_lsn >= lsn { trace!( "expected stabilization lsn {} \ to be greater than the previous value of {}", lsn, self.max_stabilized_lsn ); return Ok(()); } let bounds = ( std::ops::Bound::Excluded(self.max_stabilized_lsn), std::ops::Bound::Included(lsn), ); let can_deactivate = self .ordering .range(bounds) .map(|(lsn, _lid)| *lsn) .collect::<Vec<_>>(); self.max_stabilized_lsn = lsn; for lsn in can_deactivate { self.deactivate_segment(lsn)?; } Ok(()) } /// Called after the trailer of a segment has been written to disk, /// indicating that no more pids will be added to a segment. Moves /// the segment into the Inactive state. /// /// # Panics /// The provided lsn and lid must exactly match the existing segment. fn deactivate_segment(&mut self, lsn: Lsn) -> Result<()> { let lid = self.ordering[&lsn]; let idx = self.segment_id(lid); trace!( "deactivating segment with lsn {}: {:?}", lsn, self.segments[idx] ); let replacements = if self.segments[idx].state == Active { self.segments[idx].active_to_inactive(lsn, false, &self.config)? } else { Default::default() }; let mut old_segments = FastSet8::default(); for &(pid, old_idx) in &replacements { old_segments.insert(old_idx); let old_segment = &mut self.segments[old_idx]; assert!( old_segment.state != Active && old_segment.state != Free, "segment {} is processing pid {} replacements for \ old segment {}, which is in the {:?} state. \ all replacements for pid: {:?}", lid, pid, old_idx * self.config.segment_size, old_segment.state, replacements .iter() .filter(|(p, _)| p == &pid) .collect::<Vec<_>>() ); #[cfg(feature = "event_log")] assert!( old_segment.present.contains(&pid), "we expect deferred replacements to provide \ all previous segments so we can clean them. \ pid {} old_ptr segment: {} segments with (offset, state, present): {:?}", pid, old_idx * self.config.segment_size, self.segments .iter() .enumerate() .filter_map(|(i, s)| { if s.present.contains(&pid) { Some(( i * self.config.segment_size, s.state, s.present.clone(), )) } else { None } }) .collect::<Vec<_>>() ); old_segment.remove_pid(pid, lsn, false); } for old_idx in old_segments { self.possibly_clean_or_free_segment(old_idx, lsn); } // if we have a lot of free segments in our whole file, // let's start relocating the current tip to boil it down let free_segs = self.segments.iter().filter(|s| s.is_free()).count(); let inactive_segs = self.segments.iter().filter(|s| s.is_inactive()).count(); let free_ratio = (free_segs * 100) / (1 + free_segs + inactive_segs); if free_ratio >= usize::from(self.config.segment_cleanup_threshold) && inactive_segs > 5 { let last_index = self.segments.iter().rposition(Segment::is_inactive).unwrap(); let segment_start = (last_index * self.config.segment_size) as LogOffset; self.to_clean.insert(segment_start); } Ok(()) } fn bump_tip(&mut self) -> LogOffset { let lid = self.tip; let truncations = self.async_truncations.split_off(&lid); for (_at, truncation) in truncations { match truncation.wait() { Some(Ok(())) => {} error => { error!("failed to shrink file: {:?}", error); } } } self.tip += self.config.segment_size as LogOffset; trace!("advancing file tip from {} to {}", lid, self.tip); lid } /// Returns the next offset to write a new segment in. pub(super) fn next(&mut self, lsn: Lsn) -> Result<LogOffset> { let _measure = Measure::new(&M.accountant_next); assert_eq!( lsn % self.config.segment_size as Lsn, 0, "unaligned Lsn provided to next!" ); let free: Vec<LogOffset> = self .free .iter() .filter(|lid| { let idx = usize::try_from( *lid / self.config.segment_size as LogOffset, ) .unwrap(); if let Some(last_lsn) = self.segments[idx].lsn { last_lsn < self.max_stabilized_lsn } else { true } }) .copied() .collect(); trace!("evaluating free list {:?} in SA::next", free); // truncate if possible while self.tip != 0 && self.free.len() > 1 { let last_segment = self.tip - self.config.segment_size as LogOffset; if free.contains(&last_segment) { self.free.remove(&last_segment); self.truncate(last_segment)?; } else { break; } } // pop free or add to end let safe = free.first(); let lid = match (self.pause_rewriting, safe) { (true, _) | (_, None) => self.bump_tip(), (_, Some(&next)) => { self.free.remove(&next); next } }; // pin lsn to this segment let idx = self.segment_id(lid); assert_eq!(self.segments[idx].state, Free); // remove the old ordering from our list if let Some(old_lsn) = self.segments[idx].lsn { self.ordering.remove(&old_lsn); } self.segments[idx].free_to_active(lsn); self.ordering.insert(lsn, lid); debug!( "segment accountant returning offset: {} \ paused: {} on deck: {:?}", lid, self.pause_rewriting, self.free, ); assert!( lsn >= Lsn::try_from(lid).unwrap(), "lsn {} should always be greater than or equal to lid {}", lsn, lid ); Ok(lid) } /// Returns an iterator over a snapshot of current segment /// log sequence numbers and their corresponding file offsets. pub(super) fn segment_snapshot_iter_from( &mut self, lsn: Lsn, ) -> Box<dyn Iterator<Item = (Lsn, LogOffset)>> { assert!( !self.ordering.is_empty(), "expected ordering to have been initialized already" ); assert!( self.pause_rewriting, "must pause rewriting before \ iterating over segments" ); let segment_len = self.config.segment_size as Lsn; let normalized_lsn = lsn / segment_len * segment_len; trace!( "generated iterator over {:?} where lsn >= {}", self.ordering, normalized_lsn ); Box::new( self.ordering .clone() .into_iter() .filter(move |&(l, _)| l >= normalized_lsn), ) } // truncate the file to the desired length fn truncate(&mut self, at: LogOffset) -> Result<()> { assert_eq!( at % self.config.segment_size as LogOffset, 0, "new length must be io-buf-len aligned" ); self.tip = at; assert!(!self.free.contains(&at), "double-free of a segment occurred"); trace!("asynchronously truncating file to length {}", at); let (completer, promise) = OneShot::pair(); let config = self.config.clone(); let _result = threadpool::spawn(move || { debug!("truncating file to length {}", at); let res = config .file .set_len(at) .and_then(|_| config.file.sync_all()) .map_err(|e| e.into()); completer.fill(res); }); #[cfg(test)] _result.unwrap(); if self.async_truncations.insert(at, promise).is_some() { panic!( "somehow segment {} was truncated before \ the previous truncation completed", at ); } Ok(()) } fn segment_id(&mut self, lid: LogOffset) -> SegmentId { let idx = assert_usize(lid / self.config.segment_size as LogOffset); // TODO never resize like this, make it a single // responsibility when the tip is bumped / truncated. if self.segments.len() < idx + 1 { self.segments.resize(idx + 1, Segment::default()); } idx } } /// The log may be configured to write data /// in several different ways, depending on /// the constraints of the system using it. #[derive(Debug, PartialEq, Clone, Copy)] pub enum SegmentMode { /// Write to the end of the log, always. Linear, /// Keep track of segment utilization, and /// reuse segments when their contents are /// fully relocated elsewhere. /// Will try to copy data out of segments /// once they reach a configurable threshold. Gc, } fn segment_is_drainable( idx: usize, num_segments: usize, live_pct: u8, len: usize, config: &Config, ) -> bool { // we calculate the cleanup threshold in a skewed way, // which encourages earlier segments to be rewritten // more frequently. let base_cleanup_threshold = usize::from(config.segment_cleanup_threshold); let cleanup_skew = config.segment_cleanup_skew; let relative_prop = if num_segments == 0 { 50 } else { (idx * 100) / num_segments }; // we bias to having a higher threshold closer to segment 0 let inverse_prop = 100 - relative_prop; let relative_threshold = cleanup_skew * inverse_prop / 100; let computed_threshold = base_cleanup_threshold + relative_threshold; // We should always be below 100, or we will rewrite everything let cleanup_threshold = if computed_threshold == 0 { 1 } else if computed_threshold > 99 { 99 } else { computed_threshold }; let segment_low_pct = live_pct as usize <= cleanup_threshold; let segment_low_count = len < MINIMUM_ITEMS_PER_SEGMENT * 100 / cleanup_threshold; segment_low_pct || segment_low_count }
32.7
90
0.5345
7273ad8dad4cc5b515ddb9e58333301a102f1b79
6,328
#![feature(get_mut_unchecked)] use core::any::Any; use rcore_fs::vfs::*; use std::io::{Read, Seek, SeekFrom, Write}; use std::path::{Path, PathBuf}; use std::string::String; use std::sync::{Arc, Weak}; use std::sync::{Mutex, MutexGuard}; #[macro_use] extern crate log; /// File system at host pub struct HostFS { path: PathBuf, self_ref: Weak<HostFS>, } /// INode for `HostFS` pub struct HNode { path: PathBuf, file: Mutex<Option<std::fs::File>>, fs: Arc<HostFS>, } impl FileSystem for HostFS { fn sync(&self) -> Result<()> { warn!("HostFS: sync is unimplemented"); Ok(()) } fn root_inode(&self) -> Arc<dyn INode> { Arc::new(HNode { path: self.path.clone(), file: Mutex::new(None), fs: self.self_ref.upgrade().unwrap(), }) } fn info(&self) -> FsInfo { unimplemented!() } } impl HostFS { /// Create a new `HostFS` from host `path` pub fn new(path: impl AsRef<Path>) -> Arc<HostFS> { HostFS { path: path.as_ref().to_path_buf(), self_ref: Weak::default(), } .wrap() } /// Wrap pure `HostFS` with Arc /// Used in constructors fn wrap(self) -> Arc<Self> { // Create an Arc, make a Weak from it, then put it into the struct. let mut fs = Arc::new(self); unsafe { Arc::get_mut_unchecked(&mut fs).self_ref = Arc::downgrade(&fs); } fs } } impl INode for HNode { fn read_at(&self, offset: usize, buf: &mut [u8]) -> Result<usize> { let mut guard = self.open_file()?; let file = guard.as_mut().unwrap(); file.seek(SeekFrom::Start(offset as u64))?; let len = file.read(buf)?; Ok(len) } fn write_at(&self, offset: usize, buf: &[u8]) -> Result<usize> { let mut guard = self.open_file()?; let file = guard.as_mut().unwrap(); file.seek(SeekFrom::Start(offset as u64))?; let len = file.write(buf)?; Ok(len) } fn poll(&self) -> Result<PollStatus> { unimplemented!() } fn metadata(&self) -> Result<Metadata> { let metadata = self.path.metadata()?; Ok(metadata.into()) } fn set_metadata(&self, _metadata: &Metadata) -> Result<()> { warn!("HostFS: set_metadata() is unimplemented"); Ok(()) } fn sync_all(&self) -> Result<()> { let mut guard = self.open_file()?; let file = guard.as_mut().unwrap(); file.sync_all()?; Ok(()) } fn sync_data(&self) -> Result<()> { let mut guard = self.open_file()?; let file = guard.as_mut().unwrap(); file.sync_data()?; Ok(()) } fn resize(&self, len: usize) -> Result<()> { let mut guard = self.open_file()?; let file = guard.as_mut().unwrap(); file.set_len(len as u64)?; Ok(()) } fn create(&self, name: &str, type_: FileType, _mode: u32) -> Result<Arc<dyn INode>> { let new_path = self.path.join(name); if new_path.exists() { return Err(FsError::EntryExist); } match type_ { FileType::File => { std::fs::File::create(&new_path)?; } FileType::Dir => { std::fs::create_dir(&new_path)?; } _ => unimplemented!("only support creating file or dir in HostFS"), } Ok(Arc::new(HNode { path: new_path, file: Mutex::new(None), fs: self.fs.clone(), })) } fn link(&self, name: &str, other: &Arc<dyn INode>) -> Result<()> { let other = other.downcast_ref::<Self>().ok_or(FsError::NotSameFs)?; std::fs::hard_link(&other.path, &self.path.join(name))?; Ok(()) } fn unlink(&self, name: &str) -> Result<()> { let new_path = self.path.join(name); if new_path.is_file() { std::fs::remove_file(new_path)?; } else if new_path.is_dir() { std::fs::remove_dir(new_path)?; } else { return Err(FsError::EntryNotFound); } Ok(()) } fn move_(&self, old_name: &str, target: &Arc<dyn INode>, new_name: &str) -> Result<()> { let target = target.downcast_ref::<Self>().ok_or(FsError::NotSameFs)?; let old_path = self.path.join(old_name); let new_path = target.path.join(new_name); std::fs::rename(old_path, new_path)?; Ok(()) } fn find(&self, name: &str) -> Result<Arc<dyn INode>> { let new_path = self.path.join(name); if !new_path.exists() { return Err(FsError::EntryNotFound); } Ok(Arc::new(HNode { path: new_path, file: Mutex::new(None), fs: self.fs.clone(), })) } fn get_entry(&self, id: usize) -> Result<String> { if !self.path.is_dir() { return Err(FsError::NotDir); } self.path .read_dir()? .nth(id) .ok_or(FsError::EntryNotFound)?? .file_name() .into_string() .map_err(|_| FsError::InvalidParam) } fn io_control(&self, _cmd: u32, _data: usize) -> Result<()> { Err(FsError::NotSupported) } fn mmap(&self, _area: MMapArea) -> Result<()> { Err(FsError::NotSupported) } fn fs(&self) -> Arc<dyn FileSystem> { self.fs.clone() } fn as_any_ref(&self) -> &dyn Any { self } } impl HNode { /// Ensure to open the file and store a `File` into `self.file`, /// return the `MutexGuard`. /// If the type of `self.path` is not file, then return Err fn open_file(&self) -> Result<MutexGuard<Option<std::fs::File>>> { if !self.path.exists() { return Err(FsError::EntryNotFound); } if !self.path.is_file() { return Err(FsError::NotFile); } let mut maybe_file = self.file.lock().unwrap(); if maybe_file.is_none() { let file = std::fs::OpenOptions::new() .read(true) .write(true) .create(true) .open(&self.path)?; *maybe_file = Some(file); } Ok(maybe_file) } }
27.275862
92
0.517383
1ee9f333080dc61188779d863b3c027887ba67ec
1,769
// Copyright 2020-2021 the Deno authors. All rights reserved. MIT license. use crate::decorators::decorators_to_defs; use crate::decorators::DecoratorDef; use crate::params::param_to_param_def; use crate::ts_type::ts_type_ann_to_def; use crate::ts_type::TsTypeDef; use crate::ts_type_param::maybe_type_param_decl_to_type_param_defs; use crate::ts_type_param::TsTypeParamDef; use crate::ParamDef; use deno_ast::ParsedSource; use serde::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct FunctionDef { pub params: Vec<ParamDef>, pub return_type: Option<TsTypeDef>, pub is_async: bool, pub is_generator: bool, pub type_params: Vec<TsTypeParamDef>, #[serde(skip_serializing_if = "Vec::is_empty")] pub decorators: Vec<DecoratorDef>, } pub fn function_to_function_def( parsed_source: &ParsedSource, function: &deno_ast::swc::ast::Function, ) -> FunctionDef { let params = function .params .iter() .map(|param| param_to_param_def(parsed_source, param)) .collect(); let maybe_return_type = function.return_type.as_ref().map(ts_type_ann_to_def); let type_params = maybe_type_param_decl_to_type_param_defs(function.type_params.as_ref()); let decorators = decorators_to_defs(parsed_source, &function.decorators); FunctionDef { params, return_type: maybe_return_type, is_async: function.is_async, is_generator: function.is_generator, type_params, decorators, } } pub fn get_doc_for_fn_decl( parsed_source: &ParsedSource, fn_decl: &deno_ast::swc::ast::FnDecl, ) -> (String, FunctionDef) { let name = fn_decl.ident.sym.to_string(); let fn_def = function_to_function_def(parsed_source, &fn_decl.function); (name, fn_def) }
29
80
0.751837
7a6755ea913766b860fdbb224ca69f89bbef4dd0
205
mod attr; mod get; mod handle; pub(crate) use attr::parse_feature_nlas; pub use attr::{EthtoolFeatureAttr, EthtoolFeatureBit}; pub use get::EthtoolFeatureGetRequest; pub use handle::EthtoolFeatureHandle;
22.777778
54
0.804878
39d194ac1f6b6e9881ace30ab6090c10098ea389
9,702
use std::borrow::Cow; use std::cell::RefCell; use std::io; use std::net::ToSocketAddrs; use std::rc::Rc; use std::str; use std::sync::Arc; use err_derive::Error; use futures::sync::mpsc; use quinn_proto as quinn; use rustls::{KeyLogFile, ProtocolVersion, TLSError}; use slog::Logger; use quinn_proto::{EndpointConfig, ServerConfig, TransportConfig}; use crate::tls::{Certificate, CertificateChain, PrivateKey}; use crate::udp::UdpSocket; use crate::{Driver, Endpoint, EndpointInner, Incoming}; /// A helper for constructing an `Endpoint`. pub struct EndpointBuilder<'a> { reactor: Option<&'a tokio_reactor::Handle>, logger: Logger, server_config: Option<ServerConfig>, config: EndpointConfig, client_config: ClientConfig, } #[allow(missing_docs)] impl<'a> EndpointBuilder<'a> { /// Start a builder with a specific initial low-level configuration. pub fn new(config: EndpointConfig) -> Self { Self { config, ..Self::default() } } /// Build an endpoint bound to `addr`. pub fn bind<T: ToSocketAddrs>( self, addr: T, ) -> Result<(Endpoint, Driver, Incoming), EndpointError> { let socket = std::net::UdpSocket::bind(addr).map_err(EndpointError::Socket)?; self.from_socket(socket) } /// Build an endpoint around a pre-configured socket. pub fn from_socket( self, socket: std::net::UdpSocket, ) -> Result<(Endpoint, Driver, Incoming), EndpointError> { let reactor = if let Some(x) = self.reactor { Cow::Borrowed(x) } else { Cow::Owned(tokio_reactor::Handle::default()) }; let addr = socket.local_addr().map_err(EndpointError::Socket)?; let socket = UdpSocket::from_std(socket, &reactor).map_err(EndpointError::Socket)?; let (send, recv) = mpsc::channel(4); let rc = Rc::new(RefCell::new(EndpointInner::new( self.logger.clone(), socket, quinn::Endpoint::new( self.logger, Arc::new(self.config), self.server_config.map(Arc::new), )?, send, addr.is_ipv6(), ))); Ok(( Endpoint { inner: rc.clone(), default_client_config: self.client_config, }, Driver(rc), recv, )) } /// Accept incoming connections. pub fn listen(&mut self, config: ServerConfig) -> &mut Self { self.server_config = Some(config); self } pub fn reactor(&mut self, handle: &'a tokio_reactor::Handle) -> &mut Self { self.reactor = Some(handle); self } pub fn logger(&mut self, logger: Logger) -> &mut Self { self.logger = logger; self } /// Set the default configuration used for outgoing connections. /// /// The default can be overriden by using `Endpoint::connect_with`. pub fn default_client_config(&mut self, config: ClientConfig) -> &mut Self { self.client_config = config; self } } impl<'a> Default for EndpointBuilder<'a> { fn default() -> Self { Self { reactor: None, logger: Logger::root(slog::Discard, o!()), server_config: None, config: EndpointConfig::default(), client_config: ClientConfig::default(), } } } /// Errors that can occur during the construction of an `Endpoint`. #[derive(Debug, Error)] pub enum EndpointError { /// An error during setup of the underlying UDP socket. #[error(display = "failed to set up UDP socket: {}", _0)] Socket(io::Error), /// An error configuring TLS. #[error(display = "failed to set up TLS: {}", _0)] Tls(TLSError), /// Errors relating to web PKI infrastructure #[error(display = "webpki failed: {:?}", _0)] WebPki(webpki::Error), /// An error in the Quinn transport configuration #[error(display = "configuration error: {:?}", _0)] Config(quinn::ConfigError), } impl From<quinn::ConfigError> for EndpointError { fn from(x: quinn::ConfigError) -> Self { EndpointError::Config(x) } } impl From<webpki::Error> for EndpointError { fn from(e: webpki::Error) -> Self { EndpointError::WebPki(e) } } /// Helper for constructing a `ServerConfig` to be passed to `EndpointBuilder::listen` to enable /// incoming connections. pub struct ServerConfigBuilder { config: ServerConfig, } impl ServerConfigBuilder { /// Construct a builder using `config` as the initial state. pub fn new(config: ServerConfig) -> Self { Self { config } } /// Construct the complete `ServerConfig`. pub fn build(self) -> ServerConfig { self.config } /// Enable NSS-compatible cryptographic key logging to the `SSLKEYLOGFILE` environment variable. /// /// Useful for debugging encrypted communications with protocol analyzers such as Wireshark. pub fn enable_keylog(&mut self) -> &mut Self { Arc::make_mut(&mut self.config.tls_config).key_log = Arc::new(KeyLogFile::new()); self } /// Set the certificate chain that will be presented to clients. pub fn certificate( &mut self, cert_chain: CertificateChain, key: PrivateKey, ) -> Result<&mut Self, TLSError> { Arc::make_mut(&mut self.config.tls_config).set_single_cert(cert_chain.certs, key.inner)?; Ok(self) } /// Set the application-layer protocols to accept, in order of descending preference. /// /// When set, clients which don't declare support for at least one of the supplied protocols will be rejected. /// /// The IANA maintains a [registry] of standard protocol IDs, but custom IDs may be used as well. /// /// [registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids pub fn protocols(&mut self, protocols: &[&[u8]]) -> &mut Self { Arc::make_mut(&mut self.config.tls_config).alpn_protocols = protocols.iter().map(|x| x.to_vec()).collect(); self } /// Whether to require clients to prove they can receive packets before accepting a connection pub fn use_stateless_retry(&mut self, enabled: bool) -> &mut Self { self.config.use_stateless_retry = enabled; self } } impl Default for ServerConfigBuilder { fn default() -> Self { Self { config: ServerConfig::default(), } } } /// Helper for creating new outgoing connections. pub struct ClientConfigBuilder { transport: TransportConfig, crypto: quinn::ClientConfig, } impl ClientConfigBuilder { /// Create a new builder with default options set. pub fn new() -> Self { let mut crypto = quinn::ClientConfig::new(); crypto .root_store .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); crypto.ct_logs = Some(&ct_logs::LOGS); crypto.versions = vec![ProtocolVersion::TLSv1_3]; crypto.enable_early_data = true; Self { transport: TransportConfig::default(), crypto, } } /// Add a trusted certificate authority. /// /// For more advanced/less secure certificate verification, construct a [`ClientConfig`] /// manually and use rustls's `dangerous_configuration` feature to override the certificate /// verifier. pub fn add_certificate_authority( &mut self, cert: Certificate, ) -> Result<&mut Self, EndpointError> { { let anchor = webpki::trust_anchor_util::cert_der_as_trust_anchor( untrusted::Input::from(&cert.inner.0), )?; self.crypto .root_store .add_server_trust_anchors(&webpki::TLSServerTrustAnchors(&[anchor])); } Ok(self) } /// Enable NSS-compatible cryptographic key logging to the `SSLKEYLOGFILE` environment variable. /// /// Useful for debugging encrypted communications with protocol analyzers such as Wireshark. pub fn enable_keylog(&mut self) -> &mut Self { self.crypto.key_log = Arc::new(KeyLogFile::new()); self } /// Set the application-layer protocols to accept, in order of descending preference. /// /// When set, clients which don't declare support for at least one of the supplied protocols will be rejected. /// /// The IANA maintains a [registry] of standard protocol IDs, but custom IDs may be used as well. /// /// [registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids pub fn protocols(&mut self, protocols: &[&[u8]]) -> &mut Self { self.crypto.alpn_protocols = protocols.iter().map(|x| x.to_vec()).collect(); self } /// Begin connecting from `endpoint` to `addr`. pub fn build(self) -> ClientConfig { ClientConfig { transport: Arc::new(self.transport), tls_config: Arc::new(self.crypto), } } } impl Default for ClientConfigBuilder { fn default() -> Self { Self::new() } } /// Configuration for outgoing connections #[derive(Clone)] pub struct ClientConfig { /// Transport configuration to use pub transport: Arc<TransportConfig>, /// TLS configuration to use. /// /// `versions` *must* be `vec![ProtocolVersion::TLSv1_3]`. pub tls_config: Arc<quinn::ClientConfig>, } impl Default for ClientConfig { fn default() -> Self { ClientConfigBuilder::default().build() } }
31.809836
126
0.620491
d7ed2e8a9162f142270f65bce18774c2aa356690
144
use crate::Publisher; impl<'a, T: 'a + Send> Publisher<'a, T> { pub fn filter(self) -> Publisher<'a, T> { unimplemented!() } }
18
45
0.541667
8758d738b0123a84eef1d517c6cf4df4ed53540a
1,103
use serde::{Deserialize, Serialize}; use tendermint::block::signed_header::SignedHeader; use tendermint_light_client::ClientId; /// TMHeader serializes to the same form as TMHeader in wormhole, but is using Tendermint types, /// not tendermint_light_client types - although structurewise, these are compatible. /// Light client is only compatible with tendermint v0.33.6 #[derive(Serialize, Deserialize, Clone, Debug)] pub struct TMHeader { pub signed_header: SignedHeader, pub validator_set: Vec<tendermint::validator::Info>, } /// Create client payload for initializing tendermint light client #[derive(Serialize, Deserialize, Clone, Debug)] pub struct TMCreateClientPayload { pub header: TMHeader, pub trusting_period: u64, pub max_clock_drift: u64, pub unbonding_period: u64, pub client_id: ClientId, } /// Update client payload for updating tendermint light client #[derive(Serialize, Deserialize, Clone, Debug)] pub struct TMUpdateClientPayload { pub header: TMHeader, pub client_id: ClientId, pub next_validator_set: Vec<tendermint::validator::Info>, }
35.580645
96
0.764279
166ba20f124e6d9629dabd04890a4890611acbf3
1,822
#![feature(const_mut_refs)] #![feature(const_fn)] #![feature(raw_ref_op)] #![feature(const_raw_ptr_deref)] const NULL: *mut i32 = std::ptr::null_mut(); const A: *const i32 = &4; // It could be made sound to allow it to compile, // but we do not want to allow this to compile, // as that would be an enormous footgun in oli-obk's opinion. const B: *mut i32 = &mut 4; //~ ERROR mutable references are not allowed // Ok, no actual mutable allocation exists const B2: Option<&mut i32> = None; // Not ok, can't prove that no mutable allocation ends up in final value const B3: Option<&mut i32> = Some(&mut 42); //~ ERROR temporary value dropped while borrowed const fn helper(x: &mut i32) -> Option<&mut i32> { Some(x) } const B4: Option<&mut i32> = helper(&mut 42); //~ ERROR temporary value dropped while borrowed // Ok, because no references to mutable data exist here, since the `{}` moves // its value and then takes a reference to that. const C: *const i32 = &{ let mut x = 42; x += 3; x }; use std::cell::UnsafeCell; struct NotAMutex<T>(UnsafeCell<T>); unsafe impl<T> Sync for NotAMutex<T> {} const FOO: NotAMutex<&mut i32> = NotAMutex(UnsafeCell::new(&mut 42)); //~^ ERROR temporary value dropped while borrowed static FOO2: NotAMutex<&mut i32> = NotAMutex(UnsafeCell::new(&mut 42)); //~^ ERROR temporary value dropped while borrowed static mut FOO3: NotAMutex<&mut i32> = NotAMutex(UnsafeCell::new(&mut 42)); //~^ ERROR temporary value dropped while borrowed // `BAR` works, because `&42` promotes immediately instead of relying on // the enclosing scope rule. const BAR: NotAMutex<&i32> = NotAMutex(UnsafeCell::new(&42)); fn main() { println!("{}", unsafe { *A }); unsafe { *B = 4 } // Bad news unsafe { **FOO.0.get() = 99; assert_eq!(**FOO.0.get(), 99); } }
31.413793
94
0.673985
eb20c34c95d8d70ee46ad92a16d43b366e69380a
3,448
#![feature(test)] extern crate quickcheck; extern crate rand; extern crate test; use quickcheck::{Arbitrary, StdGen}; use rand::isaac::IsaacRng; use test::Bencher; macro_rules! bench_shrink { ($(($fn_name:ident, $type:ty),)*) => { $( #[bench] fn $fn_name(b: &mut Bencher) { // Use a deterministic generator to benchmark on the same data let mut gen = StdGen::new(IsaacRng::new_unseeded(), 100); let value: $type = Arbitrary::arbitrary(&mut gen); b.iter(|| { for _ in value.shrink() { // Do nothing } }); } )* }; } bench_shrink! { (shrink_string_1_tuple, (String,)), (shrink_string_2_tuple, (String, String)), (shrink_string_3_tuple, (String, String, String)), (shrink_string_4_tuple, (String, String, String, String)), (shrink_string_5_tuple, (String, String, String, String, String)), (shrink_string_6_tuple, (String, String, String, String, String, String)), (shrink_string_7_tuple, (String, String, String, String, String, String, String)), (shrink_string_8_tuple, (String, String, String, String, String, String, String, String)), (shrink_vec_u8_1_tuple, (Vec<u8>,)), (shrink_vec_u8_2_tuple, (Vec<u8>, Vec<u8>)), (shrink_vec_u8_3_tuple, (Vec<u8>, Vec<u8>, Vec<u8>)), (shrink_vec_u8_4_tuple, (Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>)), (shrink_vec_u8_5_tuple, (Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>)), (shrink_vec_u8_6_tuple, (Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>)), (shrink_vec_u8_7_tuple, (Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>)), (shrink_vec_u8_8_tuple, (Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>)), (shrink_u64_1_tuple, (u64,)), (shrink_u64_2_tuple, (u64, u64)), (shrink_u64_3_tuple, (u64, u64, u64)), (shrink_u64_4_tuple, (u64, u64, u64, u64)), (shrink_u64_5_tuple, (u64, u64, u64, u64, u64)), (shrink_u64_6_tuple, (u64, u64, u64, u64, u64, u64)), (shrink_u64_7_tuple, (u64, u64, u64, u64, u64, u64, u64)), (shrink_u64_8_tuple, (u64, u64, u64, u64, u64, u64, u64, u64)), (shrink_i64_1_tuple, (i64,)), (shrink_i64_2_tuple, (i64, i64)), (shrink_i64_3_tuple, (i64, i64, i64)), (shrink_i64_4_tuple, (i64, i64, i64, i64)), (shrink_i64_5_tuple, (i64, i64, i64, i64, i64)), (shrink_i64_6_tuple, (i64, i64, i64, i64, i64, i64)), (shrink_i64_7_tuple, (i64, i64, i64, i64, i64, i64, i64)), (shrink_i64_8_tuple, (i64, i64, i64, i64, i64, i64, i64, i64)), (shrink_f64_1_tuple, (f64,)), (shrink_f64_2_tuple, (f64, f64)), (shrink_f64_3_tuple, (f64, f64, f64)), (shrink_f64_4_tuple, (f64, f64, f64, f64)), (shrink_f64_5_tuple, (f64, f64, f64, f64, f64)), (shrink_f64_6_tuple, (f64, f64, f64, f64, f64, f64)), (shrink_f64_7_tuple, (f64, f64, f64, f64, f64, f64, f64)), (shrink_f64_8_tuple, (f64, f64, f64, f64, f64, f64, f64, f64)), (shrink_unit_1_tuple, ((),)), (shrink_unit_2_tuple, ((), ())), (shrink_unit_3_tuple, ((), (), ())), (shrink_unit_4_tuple, ((), (), (), ())), (shrink_unit_5_tuple, ((), (), (), (), ())), (shrink_unit_6_tuple, ((), (), (), (), (), ())), (shrink_unit_7_tuple, ((), (), (), (), (), (), ())), (shrink_unit_8_tuple, ((), (), (), (), (), (), (), ())), }
40.564706
102
0.580046
215980474b1a64aa56d3f8e165a8ed6b05d8d674
20,137
use crate::Stream; use std::borrow::Borrow; use std::hash::Hash; use std::pin::Pin; use std::task::{Context, Poll}; /// Combine many streams into one, indexing each source stream with a unique /// key. /// /// `StreamMap` is similar to [`StreamExt::merge`] in that it combines source /// streams into a single merged stream that yields values in the order that /// they arrive from the source streams. However, `StreamMap` has a lot more /// flexibility in usage patterns. /// /// `StreamMap` can: /// /// * Merge an arbitrary number of streams. /// * Track which source stream the value was received from. /// * Handle inserting and removing streams from the set of managed streams at /// any point during iteration. /// /// All source streams held by `StreamMap` are indexed using a key. This key is /// included with the value when a source stream yields a value. The key is also /// used to remove the stream from the `StreamMap` before the stream has /// completed streaming. /// /// # `Unpin` /// /// Because the `StreamMap` API moves streams during runtime, both streams and /// keys must be `Unpin`. In order to insert a `!Unpin` stream into a /// `StreamMap`, use [`pin!`] to pin the stream to the stack or [`Box::pin`] to /// pin the stream in the heap. /// /// # Implementation /// /// `StreamMap` is backed by a `Vec<(K, V)>`. There is no guarantee that this /// internal implementation detail will persist in future versions, but it is /// important to know the runtime implications. In general, `StreamMap` works /// best with a "smallish" number of streams as all entries are scanned on /// insert, remove, and polling. In cases where a large number of streams need /// to be merged, it may be advisable to use tasks sending values on a shared /// [`mpsc`] channel. /// /// [`StreamExt::merge`]: crate::StreamExt::merge /// [`mpsc`]: https://docs.rs/tokio/1.0/tokio/sync/mpsc/index.html /// [`pin!`]: https://docs.rs/tokio/1.0/tokio/macro.pin.html /// [`Box::pin`]: std::boxed::Box::pin /// /// # Examples /// /// Merging two streams, then remove them after receiving the first value /// /// ``` /// use tokio_stream::{StreamExt, StreamMap, Stream}; /// use tokio::sync::mpsc; /// use std::pin::Pin; /// /// #[tokio::main] /// async fn main() { /// let (tx1, mut rx1) = mpsc::channel::<usize>(10); /// let (tx2, mut rx2) = mpsc::channel::<usize>(10); /// /// // Convert the channels to a `Stream`. /// let rx1 = Box::pin(async_stream::stream! { /// while let Some(item) = rx1.recv().await { /// yield item; /// } /// }) as Pin<Box<dyn Stream<Item = usize> + Send>>; /// /// let rx2 = Box::pin(async_stream::stream! { /// while let Some(item) = rx2.recv().await { /// yield item; /// } /// }) as Pin<Box<dyn Stream<Item = usize> + Send>>; /// /// tokio::spawn(async move { /// tx1.send(1).await.unwrap(); /// /// // This value will never be received. The send may or may not return /// // `Err` depending on if the remote end closed first or not. /// let _ = tx1.send(2).await; /// }); /// /// tokio::spawn(async move { /// tx2.send(3).await.unwrap(); /// let _ = tx2.send(4).await; /// }); /// /// let mut map = StreamMap::new(); /// /// // Insert both streams /// map.insert("one", rx1); /// map.insert("two", rx2); /// /// // Read twice /// for _ in 0..2 { /// let (key, val) = map.next().await.unwrap(); /// /// if key == "one" { /// assert_eq!(val, 1); /// } else { /// assert_eq!(val, 3); /// } /// /// // Remove the stream to prevent reading the next value /// map.remove(key); /// } /// } /// ``` /// /// This example models a read-only client to a chat system with channels. The /// client sends commands to join and leave channels. `StreamMap` is used to /// manage active channel subscriptions. /// /// For simplicity, messages are displayed with `println!`, but they could be /// sent to the client over a socket. /// /// ```no_run /// use tokio_stream::{Stream, StreamExt, StreamMap}; /// /// enum Command { /// Join(String), /// Leave(String), /// } /// /// fn commands() -> impl Stream<Item = Command> { /// // Streams in user commands by parsing `stdin`. /// # tokio_stream::pending() /// } /// /// // Join a channel, returns a stream of messages received on the channel. /// fn join(channel: &str) -> impl Stream<Item = String> + Unpin { /// // left as an exercise to the reader /// # tokio_stream::pending() /// } /// /// #[tokio::main] /// async fn main() { /// let mut channels = StreamMap::new(); /// /// // Input commands (join / leave channels). /// let cmds = commands(); /// tokio::pin!(cmds); /// /// loop { /// tokio::select! { /// Some(cmd) = cmds.next() => { /// match cmd { /// Command::Join(chan) => { /// // Join the channel and add it to the `channels` /// // stream map /// let msgs = join(&chan); /// channels.insert(chan, msgs); /// } /// Command::Leave(chan) => { /// channels.remove(&chan); /// } /// } /// } /// Some((chan, msg)) = channels.next() => { /// // Received a message, display it on stdout with the channel /// // it originated from. /// println!("{}: {}", chan, msg); /// } /// // Both the `commands` stream and the `channels` stream are /// // complete. There is no more work to do, so leave the loop. /// else => break, /// } /// } /// } /// ``` #[derive(Debug)] pub struct StreamMap<K, V> { /// Streams stored in the map entries: Vec<(K, V)>, } impl<K, V> StreamMap<K, V> { /// An iterator visiting all key-value pairs in arbitrary order. /// /// The iterator element type is &'a (K, V). /// /// # Examples /// /// ``` /// use tokio_stream::{StreamMap, pending}; /// /// let mut map = StreamMap::new(); /// /// map.insert("a", pending::<i32>()); /// map.insert("b", pending()); /// map.insert("c", pending()); /// /// for (key, stream) in map.iter() { /// println!("({}, {:?})", key, stream); /// } /// ``` pub fn iter(&self) -> impl Iterator<Item = &(K, V)> { self.entries.iter() } /// An iterator visiting all key-value pairs mutably in arbitrary order. /// /// The iterator element type is &'a mut (K, V). /// /// # Examples /// /// ``` /// use tokio_stream::{StreamMap, pending}; /// /// let mut map = StreamMap::new(); /// /// map.insert("a", pending::<i32>()); /// map.insert("b", pending()); /// map.insert("c", pending()); /// /// for (key, stream) in map.iter_mut() { /// println!("({}, {:?})", key, stream); /// } /// ``` pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut (K, V)> { self.entries.iter_mut() } /// Creates an empty `StreamMap`. /// /// The stream map is initially created with a capacity of `0`, so it will /// not allocate until it is first inserted into. /// /// # Examples /// /// ``` /// use tokio_stream::{StreamMap, Pending}; /// /// let map: StreamMap<&str, Pending<()>> = StreamMap::new(); /// ``` pub fn new() -> StreamMap<K, V> { StreamMap { entries: vec![] } } /// Creates an empty `StreamMap` with the specified capacity. /// /// The stream map will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the stream map will not allocate. /// /// # Examples /// /// ``` /// use tokio_stream::{StreamMap, Pending}; /// /// let map: StreamMap<&str, Pending<()>> = StreamMap::with_capacity(10); /// ``` pub fn with_capacity(capacity: usize) -> StreamMap<K, V> { StreamMap { entries: Vec::with_capacity(capacity), } } /// Returns an iterator visiting all keys in arbitrary order. /// /// The iterator element type is &'a K. /// /// # Examples /// /// ``` /// use tokio_stream::{StreamMap, pending}; /// /// let mut map = StreamMap::new(); /// /// map.insert("a", pending::<i32>()); /// map.insert("b", pending()); /// map.insert("c", pending()); /// /// for key in map.keys() { /// println!("{}", key); /// } /// ``` pub fn keys(&self) -> impl Iterator<Item = &K> { self.iter().map(|(k, _)| k) } /// An iterator visiting all values in arbitrary order. /// /// The iterator element type is &'a V. /// /// # Examples /// /// ``` /// use tokio_stream::{StreamMap, pending}; /// /// let mut map = StreamMap::new(); /// /// map.insert("a", pending::<i32>()); /// map.insert("b", pending()); /// map.insert("c", pending()); /// /// for stream in map.values() { /// println!("{:?}", stream); /// } /// ``` pub fn values(&self) -> impl Iterator<Item = &V> { self.iter().map(|(_, v)| v) } /// An iterator visiting all values mutably in arbitrary order. /// /// The iterator element type is &'a mut V. /// /// # Examples /// /// ``` /// use tokio_stream::{StreamMap, pending}; /// /// let mut map = StreamMap::new(); /// /// map.insert("a", pending::<i32>()); /// map.insert("b", pending()); /// map.insert("c", pending()); /// /// for stream in map.values_mut() { /// println!("{:?}", stream); /// } /// ``` pub fn values_mut(&mut self) -> impl Iterator<Item = &mut V> { self.iter_mut().map(|(_, v)| v) } /// Returns the number of streams the map can hold without reallocating. /// /// This number is a lower bound; the `StreamMap` might be able to hold /// more, but is guaranteed to be able to hold at least this many. /// /// # Examples /// /// ``` /// use tokio_stream::{StreamMap, Pending}; /// /// let map: StreamMap<i32, Pending<()>> = StreamMap::with_capacity(100); /// assert!(map.capacity() >= 100); /// ``` pub fn capacity(&self) -> usize { self.entries.capacity() } /// Returns the number of streams in the map. /// /// # Examples /// /// ``` /// use tokio_stream::{StreamMap, pending}; /// /// let mut a = StreamMap::new(); /// assert_eq!(a.len(), 0); /// a.insert(1, pending::<i32>()); /// assert_eq!(a.len(), 1); /// ``` pub fn len(&self) -> usize { self.entries.len() } /// Returns `true` if the map contains no elements. /// /// # Examples /// /// ``` /// use tokio_stream::{StreamMap, pending}; /// /// let mut a = StreamMap::new(); /// assert!(a.is_empty()); /// a.insert(1, pending::<i32>()); /// assert!(!a.is_empty()); /// ``` pub fn is_empty(&self) -> bool { self.entries.is_empty() } /// Clears the map, removing all key-stream pairs. Keeps the allocated /// memory for reuse. /// /// # Examples /// /// ``` /// use tokio_stream::{StreamMap, pending}; /// /// let mut a = StreamMap::new(); /// a.insert(1, pending::<i32>()); /// a.clear(); /// assert!(a.is_empty()); /// ``` pub fn clear(&mut self) { self.entries.clear(); } /// Insert a key-stream pair into the map. /// /// If the map did not have this key present, `None` is returned. /// /// If the map did have this key present, the new `stream` replaces the old /// one and the old stream is returned. /// /// # Examples /// /// ``` /// use tokio_stream::{StreamMap, pending}; /// /// let mut map = StreamMap::new(); /// /// assert!(map.insert(37, pending::<i32>()).is_none()); /// assert!(!map.is_empty()); /// /// map.insert(37, pending()); /// assert!(map.insert(37, pending()).is_some()); /// ``` pub fn insert(&mut self, k: K, stream: V) -> Option<V> where K: Hash + Eq, { let ret = self.remove(&k); self.entries.push((k, stream)); ret } /// Removes a key from the map, returning the stream at the key if the key was previously in the map. /// /// The key may be any borrowed form of the map's key type, but `Hash` and /// `Eq` on the borrowed form must match those for the key type. /// /// # Examples /// /// ``` /// use tokio_stream::{StreamMap, pending}; /// /// let mut map = StreamMap::new(); /// map.insert(1, pending::<i32>()); /// assert!(map.remove(&1).is_some()); /// assert!(map.remove(&1).is_none()); /// ``` pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V> where K: Borrow<Q>, Q: Hash + Eq, { for i in 0..self.entries.len() { if self.entries[i].0.borrow() == k { return Some(self.entries.swap_remove(i).1); } } None } /// Returns `true` if the map contains a stream for the specified key. /// /// The key may be any borrowed form of the map's key type, but `Hash` and /// `Eq` on the borrowed form must match those for the key type. /// /// # Examples /// /// ``` /// use tokio_stream::{StreamMap, pending}; /// /// let mut map = StreamMap::new(); /// map.insert(1, pending::<i32>()); /// assert_eq!(map.contains_key(&1), true); /// assert_eq!(map.contains_key(&2), false); /// ``` pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool where K: Borrow<Q>, Q: Hash + Eq, { for i in 0..self.entries.len() { if self.entries[i].0.borrow() == k { return true; } } false } } impl<K, V> StreamMap<K, V> where K: Unpin, V: Stream + Unpin, { /// Polls the next value, includes the vec entry index fn poll_next_entry(&mut self, cx: &mut Context<'_>) -> Poll<Option<(usize, V::Item)>> { use Poll::*; let start = self::rand::thread_rng_n(self.entries.len() as u32) as usize; let mut idx = start; for _ in 0..self.entries.len() { let (_, stream) = &mut self.entries[idx]; match Pin::new(stream).poll_next(cx) { Ready(Some(val)) => return Ready(Some((idx, val))), Ready(None) => { // Remove the entry self.entries.swap_remove(idx); // Check if this was the last entry, if so the cursor needs // to wrap if idx == self.entries.len() { idx = 0; } else if idx < start && start <= self.entries.len() { // The stream being swapped into the current index has // already been polled, so skip it. idx = idx.wrapping_add(1) % self.entries.len(); } } Pending => { idx = idx.wrapping_add(1) % self.entries.len(); } } } // If the map is empty, then the stream is complete. if self.entries.is_empty() { Ready(None) } else { Pending } } } impl<K, V> Default for StreamMap<K, V> { fn default() -> Self { Self::new() } } impl<K, V> Stream for StreamMap<K, V> where K: Clone + Unpin, V: Stream + Unpin, { type Item = (K, V::Item); fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { if let Some((idx, val)) = ready!(self.poll_next_entry(cx)) { let key = self.entries[idx].0.clone(); Poll::Ready(Some((key, val))) } else { Poll::Ready(None) } } fn size_hint(&self) -> (usize, Option<usize>) { let mut ret = (0, Some(0)); for (_, stream) in &self.entries { let hint = stream.size_hint(); ret.0 += hint.0; match (ret.1, hint.1) { (Some(a), Some(b)) => ret.1 = Some(a + b), (Some(_), None) => ret.1 = None, _ => {} } } ret } } impl<K, V> std::iter::FromIterator<(K, V)> for StreamMap<K, V> where K: Hash + Eq, { fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self { let iterator = iter.into_iter(); let (lower_bound, _) = iterator.size_hint(); let mut stream_map = Self::with_capacity(lower_bound); for (key, value) in iterator { stream_map.insert(key, value); } stream_map } } impl<K, V> Extend<(K, V)> for StreamMap<K, V> { fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = (K, V)>, { self.entries.extend(iter); } } mod rand { use std::cell::Cell; mod loom { #[cfg(not(loom))] pub(crate) mod rand { use std::collections::hash_map::RandomState; use std::hash::{BuildHasher, Hash, Hasher}; use std::sync::atomic::AtomicU32; use std::sync::atomic::Ordering::Relaxed; static COUNTER: AtomicU32 = AtomicU32::new(1); pub(crate) fn seed() -> u64 { let rand_state = RandomState::new(); let mut hasher = rand_state.build_hasher(); // Hash some unique-ish data to generate some new state COUNTER.fetch_add(1, Relaxed).hash(&mut hasher); // Get the seed hasher.finish() } } #[cfg(loom)] pub(crate) mod rand { pub(crate) fn seed() -> u64 { 1 } } } /// Fast random number generate /// /// Implement xorshift64+: 2 32-bit xorshift sequences added together. /// Shift triplet `[17,7,16]` was calculated as indicated in Marsaglia's /// Xorshift paper: <https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf> /// This generator passes the SmallCrush suite, part of TestU01 framework: /// <http://simul.iro.umontreal.ca/testu01/tu01.html> #[derive(Debug)] pub(crate) struct FastRand { one: Cell<u32>, two: Cell<u32>, } impl FastRand { /// Initialize a new, thread-local, fast random number generator. pub(crate) fn new(seed: u64) -> FastRand { let one = (seed >> 32) as u32; let mut two = seed as u32; if two == 0 { // This value cannot be zero two = 1; } FastRand { one: Cell::new(one), two: Cell::new(two), } } pub(crate) fn fastrand_n(&self, n: u32) -> u32 { // This is similar to fastrand() % n, but faster. // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ let mul = (self.fastrand() as u64).wrapping_mul(n as u64); (mul >> 32) as u32 } fn fastrand(&self) -> u32 { let mut s1 = self.one.get(); let s0 = self.two.get(); s1 ^= s1 << 17; s1 = s1 ^ s0 ^ s1 >> 7 ^ s0 >> 16; self.one.set(s0); self.two.set(s1); s0.wrapping_add(s1) } } // Used by `StreamMap` pub(crate) fn thread_rng_n(n: u32) -> u32 { thread_local! { static THREAD_RNG: FastRand = FastRand::new(loom::rand::seed()); } THREAD_RNG.with(|rng| rng.fastrand_n(n)) } }
29.141823
105
0.507523
e673c58191c14870f17115d145acd8b89d8d35f5
2,334
// Based on https://btrfs.wiki.kernel.org/index.php/Btrfs_design use alloc::{boxed::Box, rc::Rc, string::String, string::ToString, vec, vec::Vec}; use crate::write_uart; // --------------- // BTRFS B-TREE // --------------- #[repr(C)] struct BtrfsHeader { csum: [u8; 32], fsid: [u8; 16], bytenr: u64, flags: u64, chunk_tree_uid: [u8; 16], generation: u64, owner: u64, nritems: u32, level: u8, } #[repr(C)] struct BtrfsDiskKey { object_id: u64, _type: u8, offset: u64, } #[repr(C)] struct BtrfsItem { key: BtrfsDiskKey, offset: u32, size: u32, } // Directories/Files are BtrfsItems and each item has an associated key // The BtrfsHeader specifies a new btrfs block, based on a hierarchical view. E.g. a partition, virtual partition, top level directory, mounter partition // ------------------ // Filesystem Viewer // ------------------ struct FileInfo; // BTRFS FILES and DIRS struct File { metadata: FileInfo, } // A file is contrasted to a directory since dirs have builtin children/pointers to `.` and `..` struct Dir { metadata: FileInfo, } // Read: LBA, length, buffer // Write: LBA, length, buffer // given disk N, Partition P, use the underlying partition format functions to retrieve and edit the data // from an SSD pub trait FileOperations { // no async fn create_new(path: &str) -> Self; // no async fn delete(); // async fn get_from_disk(disk_num: u64, block_address: u64, buffer: &str); // no async fn write_to_disk(disk_num: u64, block_address: u64, buffer: &str); } impl FileOperations for File { fn create_new(path: &str) -> Self { Self { metadata: FileInfo {}, } } fn delete() {} fn get_from_disk(disk_num: u64, block_address: u64, buffer: &str) {} fn write_to_disk(disk_num: u64, block_address: u64, buffer: &str) {} } impl FileOperations for Dir { fn create_new(path: &str) -> Self { Self { metadata: FileInfo {}, } } fn delete() {} fn get_from_disk(disk_num: u64, block_address: u64, buffer: &str) {} fn write_to_disk(disk_num: u64, block_address: u64, buffer: &str) {} } #[test] fn test_files() { let _file = File::create_new("path"); std::println!("Successfully created a btrfs file!\n"); }
23.108911
153
0.61868
e500ab638bfd4ea60836270e26de781e501aa93b
828
use std::sync::atomic::AtomicBool; pub mod actions; pub mod download_handle; pub mod events; pub mod session; pub(crate) static ARIA_STARTED: AtomicBool = AtomicBool::new(false); pub mod errors { use thiserror::Error; pub type Result<T> = std::result::Result<T, AriaError>; #[derive(Debug, Error)] pub enum AriaError { #[error("Aria2 has already been started once in this process !")] AlreadyInitialized, #[error("The given handle ({0}) couldn't be used to unregister the listener, it must be invalid !")] InvalidCallbackHandle(usize), #[error("Run error code")] RunError(i32), #[error("Add error: {0}")] AddError(i32), } } pub mod prelude { pub use crate::{ errors::Result, session::{Aria2Context, Session}, }; }
24.352941
108
0.624396