language
stringlengths
0
24
filename
stringlengths
9
214
code
stringlengths
99
9.93M
Rust
hhvm/hphp/hack/src/hackrs/datastore/non_evicting.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::hash::Hash; use anyhow::Result; pub struct NonEvictingStore<K: Hash + Eq, V> { store: hash::DashMap<K, V>, } pub struct NonEvictingLocalStore<K: Hash + Eq, V> { store: hash::HashMap<K, V>, } impl<K: Hash + Eq, V> Default for NonEvictingStore<K, V> { fn default() -> Self { Self { store: Default::default(), } } } impl<K: Hash + Eq, V> NonEvictingStore<K, V> { pub fn new() -> Self { Default::default() } } impl<K, V> crate::Store<K, V> for NonEvictingStore<K, V> where K: Copy + Send + Sync + Hash + Eq, V: Clone + Send + Sync, { fn contains_key(&self, key: K) -> Result<bool> { Ok(self.store.contains_key(&key)) } fn get(&self, key: K) -> Result<Option<V>> { Ok(self.store.get(&key).map(|x| V::clone(&*x))) } fn insert(&self, key: K, val: V) -> Result<()> { self.store.insert(key, val); Ok(()) } fn remove_batch(&self, keys: &mut dyn Iterator<Item = K>) -> Result<()> { for key in keys { if self.get(key)?.is_some() { self.store.remove(&key); } } Ok(()) } } impl<K: Hash + Eq, V> std::fmt::Debug for NonEvictingStore<K, V> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("NonEvictingStore").finish() } } impl<K: Hash + Eq, V> Default for NonEvictingLocalStore<K, V> { fn default() -> Self { Self { store: Default::default(), } } } impl<K: Hash + Eq, V> NonEvictingLocalStore<K, V> { pub fn new() -> Self { Default::default() } } impl<K, V> crate::LocalStore<K, V> for NonEvictingLocalStore<K, V> where K: Copy + Hash + Eq, V: Clone, { fn get(&self, key: K) -> Option<V> { self.store.get(&key).map(|x| V::clone(x)) } fn insert(&mut self, key: K, val: V) { self.store.insert(key, val); } fn remove_batch(&mut self, keys: &mut dyn Iterator<Item = K>) { for key in keys { self.store.remove(&key); } } } impl<K: Hash + Eq, V> std::fmt::Debug for NonEvictingLocalStore<K, V> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("NonEvictingLocalStore").finish() } }
TOML
hhvm/hphp/hack/src/hackrs/decl_enforceability/Cargo.toml
# @generated by autocargo [package] name = "decl_enforceability" version = "0.0.0" edition = "2021" [lib] path = "decl_enforceability.rs" [dependencies] oxidized = { version = "0.0.0", path = "../../oxidized" } pos = { version = "0.0.0", path = "../pos/cargo/pos" } special_names = { version = "0.0.0", path = "../special_names/cargo/special_names" } ty = { version = "0.0.0", path = "../ty/cargo/ty" }
Rust
hhvm/hphp/hack/src/hackrs/decl_enforceability/decl_enforceability.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use pos::Positioned; use special_names as sn; use ty::decl; use ty::decl::Ty; use ty::reason::Reason; pub fn supportdyn_mixed<R: Reason>(pos: R::Pos, reason: R) -> Ty<R> { make_supportdyn_type(pos, reason.clone(), Ty::mixed(reason)) } fn make_supportdyn_type<R: Reason>(pos: R::Pos, reason: R, ty: Ty<R>) -> Ty<R> { Ty::apply( reason, Positioned::new(pos, *sn::classes::cSupportDyn), [ty].into(), ) } /// Add `as supportdyn<mixed>` constraints to the type parameters pub fn add_supportdyn_constraints<R: Reason>(pos: &R::Pos, tparams: &mut [decl::Tparam<R, Ty<R>>]) { for tparam in tparams { if !sn::coeffects::is_generated_generic(tparam.name.id()) && !noautobound(tparam) { let mut constraints = Vec::with_capacity(1 + tparam.constraints.len()); constraints.push(( decl::ty::ConstraintKind::ConstraintAs, supportdyn_mixed(pos.clone(), R::witness_from_decl(pos.clone())), )); constraints.extend(tparam.constraints.iter().cloned()); tparam.constraints = constraints.into_boxed_slice() } } } /// Add `as supportdyn<mixed>` constraints to the type parameters if in implicit /// pessimisation mode. pub fn maybe_add_supportdyn_constraints<R: Reason>( opts: &oxidized::typechecker_options::TypecheckerOptions, this_class: Option<&decl::ShallowClass<R>>, pos: &R::Pos, tparams: &mut [decl::Tparam<R, Ty<R>>], ) { if implicit_sdt_for_class(opts, this_class) { add_supportdyn_constraints(pos, tparams) } } fn noautodynamic<R: Reason>(this_class: Option<&decl::ShallowClass<R>>) -> bool { match this_class { None => false, Some(sc) => sc .user_attributes .iter() .any(|ua| ua.name.id() == *sn::user_attributes::uaNoAutoDynamic), } } fn noautobound<R: Reason>(tp: &decl::Tparam<R, Ty<R>>) -> bool { tp.user_attributes .iter() .any(|ua| ua.name.id() == *sn::user_attributes::uaNoAutoBound) } fn implicit_sdt_for_class<R: Reason>( opts: &oxidized::typechecker_options::TypecheckerOptions, this_class: Option<&decl::ShallowClass<R>>, ) -> bool { opts.tco_everything_sdt && !noautodynamic(this_class) }
Rust
hhvm/hphp/hack/src/hackrs/decl_parser/decl_parser.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::marker::PhantomData; use std::sync::Arc; use file_provider::FileProvider; use names::FileSummary; pub use oxidized::decl_parser_options::DeclParserOptions; use oxidized_by_ref::direct_decl_parser::ParsedFileWithHashes; use pos::RelativePath; use ty::decl::shallow; use ty::decl::shallow::NamedDecl; use ty::reason::Reason; #[derive(Debug, Clone)] pub struct DeclParser<R: Reason> { file_provider: Arc<dyn FileProvider>, deregister_php_stdlib: bool, decl_parser_opts: DeclParserOptions, // We could make our parse methods generic over `R` instead, but it's // usually more convenient for callers (especially tests) to pin the decl // parser to a single Reason type. _phantom: PhantomData<R>, } impl<R: Reason> DeclParser<R> { pub fn new( file_provider: Arc<dyn FileProvider>, decl_parser_opts: DeclParserOptions, deregister_php_stdlib: bool, ) -> Self { Self { file_provider, decl_parser_opts, deregister_php_stdlib, _phantom: PhantomData, } } pub fn parse(&self, path: RelativePath) -> anyhow::Result<Vec<shallow::NamedDecl<R>>> { let arena = bumpalo::Bump::new(); let text = self.file_provider.get(path)?; let hashed_file = self.parse_impl(path, &text, &arena); Ok(hashed_file .into_iter() .map(|(name, decl, _)| NamedDecl::from(&(name, decl))) .collect()) } pub fn parse_and_summarize( &self, path: RelativePath, ) -> anyhow::Result<(Vec<shallow::NamedDecl<R>>, FileSummary)> { let arena = bumpalo::Bump::new(); let text = self.file_provider.get(path)?; let hashed_file = self.parse_impl(path, &text, &arena); let summary = names::FileSummary::new(&hashed_file); let decls = hashed_file .into_iter() .map(|(name, decl, _)| NamedDecl::from(&(name, decl))) .collect(); Ok((decls, summary)) } /// Parse and hash decls, removing stdlib decls if that's what parser-options say. pub fn parse_impl<'a>( &self, path: RelativePath, text: &'a [u8], arena: &'a bumpalo::Bump, ) -> ParsedFileWithHashes<'a> { let prefix = path.prefix(); let deregister_php_stdlib_if_hhi = self.deregister_php_stdlib; let opts = &self.decl_parser_opts; let parsed_file = direct_decl_parser::parse_decls_for_typechecking(opts, path.into(), text, arena); ParsedFileWithHashes::new(parsed_file, deregister_php_stdlib_if_hhi, prefix, arena) } }
TOML
hhvm/hphp/hack/src/hackrs/decl_parser/cargo/decl_parser/Cargo.toml
# @generated by autocargo [package] name = "decl_parser" version = "0.0.0" edition = "2021" [lib] path = "../../decl_parser.rs" [dependencies] anyhow = "1.0.71" bumpalo = { version = "3.11.1", features = ["collections"] } direct_decl_parser = { version = "0.0.0", path = "../../../../parser/api/cargo/direct_decl_parser" } file_provider = { version = "0.0.0", path = "../../../file_provider/cargo/file_provider" } names = { version = "0.0.0", path = "../../../../naming/names_rust" } oxidized = { version = "0.0.0", path = "../../../../oxidized" } oxidized_by_ref = { version = "0.0.0", path = "../../../../oxidized_by_ref" } pos = { version = "0.0.0", path = "../../../pos/cargo/pos" } ty = { version = "0.0.0", path = "../../../ty/cargo/ty" }
Rust
hhvm/hphp/hack/src/hackrs/depgraph_api/depgraph_api.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::fmt::Debug; use dep::Dep; use hh24_types::DependencyHash; use pos::ClassConstName; use pos::ConstName; use pos::FunName; use pos::MethodName; use pos::ModuleName; use pos::PropName; use pos::TypeName; use typing_deps_hash::DepType; pub type Result<T, E = Error> = std::result::Result<T, E>; #[derive(thiserror::Error, Debug)] pub enum Error {} // Implementations of `FoldedDeclProvider` need to be able to record // dependencies (if needed). We do this by having the functions of this // trait take a "who's asking?" symbol of this type. #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum DeclName { Fun(FunName), Const(ConstName), Type(TypeName), Module(ModuleName), } impl DeclName { pub fn to_dep(&self) -> Dep { Dep::new(match self { DeclName::Fun(n) => typing_deps_hash::hash1(DepType::Fun, n.as_str().as_bytes()), DeclName::Const(n) => typing_deps_hash::hash1(DepType::GConst, n.as_str().as_bytes()), DeclName::Type(n) => typing_deps_hash::hash1(DepType::Type, n.as_str().as_bytes()), DeclName::Module(n) => typing_deps_hash::hash1(DepType::Module, n.as_str().as_bytes()), }) } } impl From<FunName> for DeclName { fn from(name: FunName) -> Self { Self::Fun(name) } } impl From<ConstName> for DeclName { fn from(name: ConstName) -> Self { Self::Const(name) } } impl From<ModuleName> for DeclName { fn from(name: ModuleName) -> Self { Self::Module(name) } } impl From<TypeName> for DeclName { fn from(name: TypeName) -> Self { Self::Type(name) } } // nb(sf, 2022-03-15): c.f. ` Typing_deps.Dep.variant` #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] /// A node in the dependency graph that, when changed, must recheck all of its /// dependents. pub enum DependencyName { /// Represents another class depending on a class via an inheritance-like /// mechanism (`extends`, `implements`, `use`, `require extends`, `require /// implements`, etc.) Extends(TypeName), /// Represents something depending on a class constant. Const(TypeName, ClassConstName), /// Represents something depending on a class constructor. Constructor(TypeName), /// Represents something depending on a class's instance property. Prop(TypeName, PropName), /// Represents something depending on a class's static property. StaticProp(TypeName, PropName), /// Represents something depending on a class's instance method. Method(TypeName, MethodName), /// Represents something depending on a class's static method. StaticMethod(TypeName, MethodName), /// Represents something depending on all members of a class. Particularly /// useful for switch exhaustiveness-checking. We establish a dependency on /// all members of an enum in that case. AllMembers(TypeName), /// Represents something depending on a global constant. GConst(ConstName), /// Represents something depending on a global function. Fun(FunName), /// Represents something depending on a class/typedef/trait/interface Type(TypeName), /// Represents something depending on a module. Module(ModuleName), } impl From<FunName> for DependencyName { fn from(name: FunName) -> Self { Self::Fun(name) } } impl From<ConstName> for DependencyName { fn from(name: ConstName) -> Self { Self::GConst(name) } } impl From<ModuleName> for DependencyName { fn from(name: ModuleName) -> Self { Self::Module(name) } } impl From<TypeName> for DependencyName { fn from(name: TypeName) -> Self { Self::Type(name) } } impl From<DeclName> for DependencyName { fn from(name: DeclName) -> Self { match name { DeclName::Fun(name) => DependencyName::Fun(name), DeclName::Const(name) => DependencyName::GConst(name), DeclName::Type(name) => DependencyName::Type(name), DeclName::Module(name) => DependencyName::Module(name), } } } impl From<DependencyName> for DependencyHash { // Keep in sync with Dep.make in typing_deps.ml fn from(name: DependencyName) -> Self { match name { DependencyName::Extends(t) => Self::of_symbol(DepType::Extends, &t), DependencyName::Const(t, c) => Self::of_member(DepType::Const, t.into(), &c), DependencyName::Constructor(t) => Self::of_member(DepType::Constructor, t.into(), ""), DependencyName::Prop(t, p) => Self::of_member(DepType::Prop, t.into(), &p), DependencyName::StaticProp(t, p) => Self::of_member(DepType::SProp, t.into(), &p), DependencyName::Method(t, m) => Self::of_member(DepType::Method, t.into(), &m), DependencyName::StaticMethod(t, m) => Self::of_member(DepType::SMethod, t.into(), &m), DependencyName::AllMembers(t) => Self::of_member(DepType::AllMembers, t.into(), ""), DependencyName::GConst(c) => Self::of_symbol(DepType::GConst, &c), DependencyName::Fun(f) => Self::of_symbol(DepType::Fun, &f), DependencyName::Type(t) => Self::of_symbol(DepType::Type, &t), DependencyName::Module(m) => Self::of_symbol(DepType::Module, &m), } } } impl DependencyName { pub fn to_dep(&self) -> Dep { Dep::new(hh24_types::DependencyHash::from(*self).0) } // Keep in sync with Dep.extract_name in typing_deps.ml pub fn extract_name(&self) -> String { use core_utils_rust::strip_ns; match self { DependencyName::Type(t) | DependencyName::Extends(t) | DependencyName::Constructor(t) | DependencyName::AllMembers(t) => strip_ns(t).into(), DependencyName::Const(t, c) => format!("{}::{}", strip_ns(t), c), DependencyName::Prop(t, p) | DependencyName::StaticProp(t, p) => { format!("{}::{}", strip_ns(t), p) } DependencyName::Method(t, m) | DependencyName::StaticMethod(t, m) => { format!("{}::{}", strip_ns(t), m) } DependencyName::GConst(c) => strip_ns(c).into(), DependencyName::Fun(f) => strip_ns(f).into(), DependencyName::Module(m) => m.as_str().into(), } } pub fn dep_type(&self) -> DepType { match self { DependencyName::Extends(_) => DepType::Extends, DependencyName::Const(_, _) => DepType::Const, DependencyName::Constructor(_) => DepType::Constructor, DependencyName::Prop(_, _) => DepType::Prop, DependencyName::StaticProp(_, _) => DepType::SProp, DependencyName::Method(_, _) => DepType::Method, DependencyName::StaticMethod(_, _) => DepType::SMethod, DependencyName::AllMembers(_) => DepType::AllMembers, DependencyName::GConst(_) => DepType::GConst, DependencyName::Fun(_) => DepType::Fun, DependencyName::Type(_) => DepType::Type, DependencyName::Module(_) => DepType::Module, } } } /// Organize and administer dependency records. pub trait DepGraphWriter: Debug + Send + Sync { /// Record a dependency. // e.g. If class B extends A {} then A <- B (B depends on A). So here, // dependent is B and dependency is A. fn add_dependency(&self, dependent: DeclName, dependency: DependencyName) -> Result<()>; } /// Query dependency records. pub trait DepGraphReader: Debug + Send + Sync { /// Retrieve dependents of a name. fn get_dependents(&self, dependency: DependencyName) -> Box<dyn Iterator<Item = Dep> + '_>; } /// A no-op implementation of the `DepGraphReader` & `DepGraphWriter` traits. /// All registered edges are thrown away. #[derive(Debug, Clone, Default)] pub struct NoDepGraph; impl DepGraphWriter for NoDepGraph { fn add_dependency(&self, _dependent: DeclName, _dependency: DependencyName) -> Result<()> { Ok(()) } } impl DepGraphReader for NoDepGraph { fn get_dependents(&self, _dependency: DependencyName) -> Box<dyn Iterator<Item = Dep>> { Box::new(std::iter::empty()) } } /// Read & write dependency records. pub trait DepGraph: DepGraphReader + DepGraphWriter {} impl<T: DepGraphReader + DepGraphWriter> DepGraph for T {}
TOML
hhvm/hphp/hack/src/hackrs/depgraph_api/cargo/depgraph_api/Cargo.toml
# @generated by autocargo [package] name = "depgraph_api" version = "0.0.0" edition = "2021" [lib] path = "../../depgraph_api.rs" [dependencies] core_utils_rust = { version = "0.0.0", path = "../../../../utils/core" } dep = { version = "0.0.0", path = "../../../../depgraph/cargo/dep" } hh24_types = { version = "0.0.0", path = "../../../../utils/hh24_types" } pos = { version = "0.0.0", path = "../../../pos/cargo/pos" } thiserror = "1.0.43" typing_deps_hash = { version = "0.0.0", path = "../../../../deps/cargo/typing_deps_hash" }
Rust
hhvm/hphp/hack/src/hackrs/file_provider/file_provider.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::fmt::Debug; use anyhow::Result; use bstr::BString; use pos::RelativePath; mod provider; pub use provider::DiskProvider; /// The interface through which the typechecker can access the contents of the /// repository and HHI files. /// /// The implementation may load file contents from the filesystem, or from a /// cache in front of the filesystem, or from in-memory IDE buffers. pub trait FileProvider: Debug + Send + Sync { /// Return the contents of the given file. May return `Ok("")` if the file /// can't be found in the underlying store, or may return a `std::io::Error` /// with `ErrorKind::NotFound` (wrapped in `anyhow::Error`), depending on /// the use case. fn get(&self, file: RelativePath) -> Result<BString>; }
Rust
hhvm/hphp/hack/src/hackrs/file_provider/provider.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::sync::Arc; use anyhow::Result; use bstr::BString; use pos::RelativePath; use pos::RelativePathCtx; use tempdir::TempDir; #[derive(Debug)] pub struct DiskProvider { relative_path_ctx: Arc<RelativePathCtx>, // Drop the tempdir when the disk provider goes out of scope _hhi_root: Option<TempDir>, } impl DiskProvider { pub fn new(relative_path_ctx: Arc<RelativePathCtx>, hhi_root: Option<TempDir>) -> Self { Self { relative_path_ctx, _hhi_root: hhi_root, } } pub fn read(&self, file: RelativePath) -> std::io::Result<BString> { let absolute_path = file.to_absolute(&self.relative_path_ctx); Ok(std::fs::read(absolute_path)?.into()) } } impl super::FileProvider for DiskProvider { fn get(&self, file: RelativePath) -> Result<BString> { Ok(self.read(file)?) } }
TOML
hhvm/hphp/hack/src/hackrs/file_provider/cargo/file_provider/Cargo.toml
# @generated by autocargo [package] name = "file_provider" version = "0.0.0" edition = "2021" [lib] path = "../../file_provider.rs" [dependencies] anyhow = "1.0.71" bstr = { version = "1.4.0", features = ["serde", "std", "unicode"] } pos = { version = "0.0.0", path = "../../../pos/cargo/pos" } tempdir = "0.3"
Rust
hhvm/hphp/hack/src/hackrs/folded_decl_provider/eager.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::sync::Arc; use datastore::Store; use oxidized::naming_types::KindOfType; use pos::TypeName; use shallow_decl_provider::ShallowDeclProvider; use ty::decl::FoldedClass; use ty::reason::Reason; use super::Error; use super::Result; use super::TypeDecl; /// A `FoldedDeclProvider` which assumes that all extant decls have eagerly been /// inserted in its store. It returns `None` when asked for a decl which is not /// present in the store. #[derive(Debug)] pub struct EagerFoldedDeclProvider<R: Reason> { store: Arc<dyn Store<TypeName, Arc<FoldedClass<R>>>>, shallow_decl_provider: Arc<dyn ShallowDeclProvider<R>>, } impl<R: Reason> EagerFoldedDeclProvider<R> { pub fn new( store: Arc<dyn Store<TypeName, Arc<FoldedClass<R>>>>, shallow_decl_provider: Arc<dyn ShallowDeclProvider<R>>, ) -> Self { Self { store, shallow_decl_provider, } } } impl<R: Reason> super::FoldedDeclProvider<R> for EagerFoldedDeclProvider<R> { fn get_type(&self, name: TypeName) -> Result<Option<TypeDecl<R>>> { match self.shallow_decl_provider.get_type_kind(name)? { None => Ok(None), Some(KindOfType::TTypedef) => Ok(Some(TypeDecl::Typedef( self.shallow_decl_provider.get_typedef(name)?.expect( "got None after get_type_kind indicated a typedef with this name exists", ), ))), Some(KindOfType::TClass) => match self.store.get(name).map_err(Error::Store)? { Some(c) => Ok(Some(TypeDecl::Class(c))), None => Ok(None), }, } } }
Rust
hhvm/hphp/hack/src/hackrs/folded_decl_provider/fold.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::sync::Arc; use eq_modulo_pos::EqModuloPos; use hash::IndexMap; use hash::IndexSet; use oxidized::global_options::GlobalOptions; use pos::ClassConstName; use pos::MethodName; use pos::ModuleName; use pos::Positioned; use pos::PropName; use pos::TypeConstName; use pos::TypeName; use special_names as sn; use ty::decl::folded::Constructor; use ty::decl::subst::Subst; use ty::decl::AbstractTypeconst; use ty::decl::Abstraction; use ty::decl::CeVisibility; use ty::decl::ClassConst; use ty::decl::ClassConstKind; use ty::decl::ClassEltFlags; use ty::decl::ClassEltFlagsArgs; use ty::decl::ClassishKind; use ty::decl::ConcreteTypeconst; use ty::decl::ConsistentKind; use ty::decl::FoldedClass; use ty::decl::FoldedElement; use ty::decl::Requirement; use ty::decl::ShallowClass; use ty::decl::ShallowClassConst; use ty::decl::ShallowMethod; use ty::decl::ShallowProp; use ty::decl::ShallowTypeconst; use ty::decl::TaccessType; use ty::decl::Ty; use ty::decl::TypeConst; use ty::decl::Typeconst; use ty::decl::Visibility; use ty::decl_error::DeclError; use ty::reason::Reason; use super::inherit::Inherited; use super::Result; use super::Substitution; mod decl_enum; // note(sf, 2022-02-03): c.f. hphp/hack/src/decl/decl_folded_class.ml #[derive(Debug)] pub struct DeclFolder<'a, R: Reason> { /// Options affecting typechecking behaviors. opts: &'a GlobalOptions, /// The class whose folded decl we are producing. child: &'a ShallowClass<R>, /// The folded decls of all (recursive) ancestors of `child`. parents: &'a IndexMap<TypeName, Arc<FoldedClass<R>>>, /// Hack errors which will be written to `child`'s folded decl. errors: Vec<DeclError<R::Pos>>, } #[derive(PartialEq)] enum Pass { Extends, Traits, Xhp, } impl<'a, R: Reason> DeclFolder<'a, R> { pub fn decl_class( opts: &'a GlobalOptions, child: &'a ShallowClass<R>, parents: &'a IndexMap<TypeName, Arc<FoldedClass<R>>>, errors: Vec<DeclError<R::Pos>>, ) -> Result<Arc<FoldedClass<R>>> { let this = Self { opts, child, parents, errors, }; this.decl_class_impl() } fn visibility( &self, cls: TypeName, module: Option<ModuleName>, vis: Visibility, ) -> CeVisibility { match vis { Visibility::Public => CeVisibility::Public, Visibility::Private => CeVisibility::Private(cls), Visibility::Protected => CeVisibility::Protected(cls), Visibility::Internal => module.map_or(CeVisibility::Public, |module_name| { CeVisibility::Internal(module_name) }), } } fn synthesize_const_defaults(&self, consts: &mut IndexMap<ClassConstName, ClassConst<R>>) { for c in consts.values_mut() { if c.kind == ClassConstKind::CCAbstract(true) { c.kind = ClassConstKind::CCConcrete; } } } /// When all type constants have been inherited and declared, this step /// synthesizes the defaults of abstract type constants into concrete type /// constants. fn synthesize_type_const_defaults( &self, type_consts: &mut IndexMap<TypeConstName, TypeConst<R>>, consts: &mut IndexMap<ClassConstName, ClassConst<R>>, ) { for (name, tc) in type_consts.iter_mut() { if let Typeconst::TCAbstract(atc) = &mut tc.kind { if let Some(ty) = atc.default.take() { tc.kind = Typeconst::TCConcrete(ConcreteTypeconst { ty }); tc.is_concretized = true; if let Some(c) = consts.get_mut(&ClassConstName(name.as_symbol())) { c.kind = ClassConstKind::CCConcrete; } } } } } /// Every class, interface, and trait implicitly defines a `::class` to allow /// accessing its fully qualified name as a string. fn decl_class_class(&self, consts: &mut IndexMap<ClassConstName, ClassConst<R>>) { // note(sf, 2022-02-08): c.f. Decl_folded_class.class_class_decl let pos = self.child.name.pos(); let name = self.child.name.id(); let reason = R::class_class(pos.clone(), name); let classname_ty = Ty::apply( reason.clone(), Positioned::new(pos.clone(), *sn::classes::cClassname), [Ty::this(reason)].into(), ); let class_const = ClassConst { is_synthesized: true, kind: ClassConstKind::CCConcrete, pos: pos.clone(), ty: classname_ty, origin: name, refs: Box::default(), }; consts.insert(*sn::members::mClass, class_const); } /// Each concrete type constant `T = τ` implicitly defines a class /// constant of the same name `T` having type `TypeStructure<τ>`. fn type_const_structure(&self, stc: &ShallowTypeconst<R>) -> ClassConst<R> { let pos = stc.name.pos(); let r = R::witness_from_decl(pos.clone()); let tsid = Positioned::new(pos.clone(), *sn::fb::cTypeStructure); // The type `this`. let tthis = Ty::this(r.clone()); // The type `this::T`. let taccess = Ty::access( r.clone(), TaccessType { ty: tthis, type_const: stc.name.clone(), }, ); // The type `TypeStructure<this::T>`. let ts_ty = Ty::apply(r, tsid, [taccess].into()); let kind = match &stc.kind { Typeconst::TCAbstract(AbstractTypeconst { default, .. }) => { ClassConstKind::CCAbstract(default.is_some()) } Typeconst::TCConcrete(_) => ClassConstKind::CCConcrete, }; // A class constant (which will be associated with the name `T`) of type // `TypeStructure<this::T>`. ClassConst { is_synthesized: true, kind, pos: pos.clone(), ty: ts_ty, origin: self.child.name.id(), refs: Default::default(), } } fn maybe_add_supportdyn_bound(&self, p: &R::Pos, kind: &mut Typeconst<R>) { if self.opts.tco_everything_sdt { if let Typeconst::TCAbstract(AbstractTypeconst { as_constraint: as_constraint @ None, .. }) = kind { *as_constraint = Some(decl_enforceability::supportdyn_mixed( p.clone(), R::witness_from_decl(p.clone()), )); } } } fn decl_type_const( &self, type_consts: &mut IndexMap<TypeConstName, TypeConst<R>>, class_consts: &mut IndexMap<ClassConstName, ClassConst<R>>, stc: &ShallowTypeconst<R>, ) { // note(sf, 2022-02-10): c.f. Decl_folded_class.typeconst_fold match self.child.kind { ClassishKind::Cenum => return, ClassishKind::CenumClass(_) | ClassishKind::Ctrait | ClassishKind::Cinterface | ClassishKind::Cclass(_) => {} } let TypeConstName(name) = stc.name.id(); let ptc = type_consts.get(stc.name.id_ref()); let ptc_enforceable = ptc.and_then(|tc| tc.enforceable.as_ref()); let ptc_reifiable = ptc.and_then(|tc| tc.reifiable.as_ref()); let mut kind = stc.kind.clone(); if !stc.is_ctx { self.maybe_add_supportdyn_bound(stc.name.pos(), &mut kind); } let type_const = TypeConst { is_synthesized: false, name: stc.name.clone(), kind, origin: self.child.name.id(), enforceable: ty::decl::ty::Enforceable( stc.enforceable.as_ref().or(ptc_enforceable).cloned(), ), reifiable: stc.reifiable.as_ref().or(ptc_reifiable).cloned(), is_concretized: false, is_ctx: stc.is_ctx, }; type_consts.insert(TypeConstName(name), type_const); class_consts.insert(ClassConstName(name), self.type_const_structure(stc)); } fn decl_class_const( &self, consts: &mut IndexMap<ClassConstName, ClassConst<R>>, c: &ShallowClassConst<R>, ) { // note(sf, 2022-02-10): c.f. Decl_folded_class.class_const_fold let class_const = ClassConst { is_synthesized: false, kind: c.kind, pos: c.name.pos().clone(), ty: c.ty.clone(), origin: self.child.name.id(), refs: c.refs.clone(), }; consts.insert(c.name.id(), class_const); } fn decl_prop(&self, props: &mut IndexMap<PropName, FoldedElement>, sp: &ShallowProp<R>) { // note(sf, 2022-02-08): c.f. Decl_folded_class.prop_decl let cls = self.child.name.id(); let prop = sp.name.id(); let vis = self.visibility( cls, self.child.module.as_ref().map(Positioned::id), sp.visibility, ); let prop_flags = &sp.flags; let flag_args = ClassEltFlagsArgs { xhp_attr: sp.xhp_attr, is_abstract: prop_flags.is_abstract(), is_final: true, is_superfluous_override: false, is_lsb: false, is_synthesized: false, is_const: prop_flags.is_const(), is_lateinit: prop_flags.is_lateinit(), is_dynamicallycallable: false, is_readonly_prop: prop_flags.is_readonly(), supports_dynamic_type: false, needs_init: prop_flags.needs_init(), safe_global_variable: false, }; let elt = FoldedElement { origin: self.child.name.id(), visibility: vis, deprecated: None, flags: ClassEltFlags::new(flag_args), }; props.insert(prop, elt); } fn decl_static_prop( &self, static_props: &mut IndexMap<PropName, FoldedElement>, sp: &ShallowProp<R>, ) { let cls = self.child.name.id(); let prop = sp.name.id(); let vis = self.visibility( cls, self.child.module.as_ref().map(Positioned::id), sp.visibility, ); let prop_flags = &sp.flags; let flag_args = ClassEltFlagsArgs { xhp_attr: sp.xhp_attr, is_abstract: prop_flags.is_abstract(), is_final: true, is_superfluous_override: false, is_lsb: prop_flags.is_lsb(), is_synthesized: false, is_const: prop_flags.is_const(), is_lateinit: prop_flags.is_lateinit(), is_dynamicallycallable: false, is_readonly_prop: prop_flags.is_readonly(), supports_dynamic_type: false, needs_init: false, safe_global_variable: prop_flags.is_safe_global_variable(), }; let elt = FoldedElement { origin: self.child.name.id(), visibility: vis, deprecated: None, flags: ClassEltFlags::new(flag_args), }; static_props.insert(prop, elt); } fn decl_method( &self, methods: &mut IndexMap<MethodName, FoldedElement>, sm: &ShallowMethod<R>, ) { let cls = self.child.name.id(); let meth = sm.name.id(); let vis = match (methods.get(&meth), sm.visibility) { ( Some(FoldedElement { visibility: CeVisibility::Protected(cls), .. }), Visibility::Protected, ) => CeVisibility::Protected(*cls), (_, v) => self.visibility(cls, self.child.module.as_ref().map(Positioned::id), v), }; let meth_flags = &sm.flags; let flag_args = ClassEltFlagsArgs { xhp_attr: None, is_abstract: meth_flags.is_abstract(), is_final: meth_flags.is_final(), is_superfluous_override: meth_flags.is_override() && !methods.contains_key(&meth), is_lsb: false, is_synthesized: false, is_const: false, is_lateinit: false, is_dynamicallycallable: meth_flags.is_dynamicallycallable(), is_readonly_prop: false, supports_dynamic_type: meth_flags.supports_dynamic_type(), needs_init: false, safe_global_variable: false, }; let elt = FoldedElement { origin: cls, visibility: vis, deprecated: sm.deprecated, flags: ClassEltFlags::new(flag_args), }; methods.insert(meth, elt); } fn decl_constructor(&self, constructor: &mut Constructor) { // Constructors in children of `self.child` must be consistent? let consistency = if self.child.is_final { ConsistentKind::FinalClass } else if (self.child.user_attributes.iter()) .any(|ua| ua.name.id() == *sn::user_attributes::uaConsistentConstruct) { ConsistentKind::ConsistentConstruct } else { ConsistentKind::Inconsistent }; let elt = self.child.constructor.as_ref().map(|sm| { let cls = self.child.name.id(); let vis = self.visibility( cls, self.child.module.as_ref().map(Positioned::id), sm.visibility, ); let meth_flags = &sm.flags; let flag_args = ClassEltFlagsArgs { xhp_attr: None, is_abstract: meth_flags.is_abstract(), is_final: meth_flags.is_final(), is_superfluous_override: false, is_lsb: false, is_synthesized: false, is_const: false, is_lateinit: false, is_dynamicallycallable: false, is_readonly_prop: false, supports_dynamic_type: false, needs_init: false, safe_global_variable: false, }; FoldedElement { origin: self.child.name.id(), visibility: vis, deprecated: sm.deprecated, flags: ClassEltFlags::new(flag_args), } }); let consistency = ConsistentKind::coalesce(constructor.consistency, consistency); if elt.is_none() { // Child class doesn't define ctor; just update consistency. constructor.consistency = consistency; } else { // Child constructor exists, replace wholesale. *constructor = Constructor::new(elt, consistency) } } fn get_implements(&self, ty: &Ty<R>, ancestors: &mut IndexMap<TypeName, Ty<R>>) { let (_, pos_id, tyl) = ty.unwrap_class_type(); match self.parents.get(&pos_id.id()) { None => { // The class lives in PHP land. ancestors.insert(pos_id.id(), ty.clone()); } Some(cls) => { let subst = Subst::new(&cls.tparams, tyl); let substitution = Substitution { subst: &subst }; // Update `ancestors`. for (&anc_name, anc_ty) in &cls.ancestors { ancestors.insert(anc_name, substitution.instantiate(anc_ty)); } // Now add `ty`. ancestors.insert(pos_id.id(), ty.clone()); } } } // HHVM implicitly adds StringishObject interface for every class/interface/trait // with a __toString method; "string" also implements this interface pub fn stringish_object_parent(cls: &ShallowClass<R>) -> Option<Ty<R>> { if cls.name.id() != *sn::classes::cStringishObject { (cls.methods.iter()) .find(|meth| meth.name.id() == *sn::members::__toString) .map(|meth| { Ty::apply( R::hint(meth.name.pos().clone()), Positioned::new( meth.name.pos().clone(), sn::classes::cStringishObject.clone(), ), Box::new([]), ) }) } else { None } } /// Check that the kind of a class is compatible with its parent /// For example, a class cannot extend an interface, an interface cannot /// extend a trait etc ... /// TODO: T87242856 fn check_extend_kind( &mut self, parent_pos: &R::Pos, parent_kind: ClassishKind, parent_name: TypeName, ) { match (parent_kind, self.child.kind) { // What is allowed (ClassishKind::Cclass(_), ClassishKind::Cclass(_)) | (ClassishKind::Ctrait, ClassishKind::Ctrait) | (ClassishKind::Cinterface, ClassishKind::Cinterface) | (ClassishKind::CenumClass(_), ClassishKind::CenumClass(_)) => {} // enums extend `BuiltinEnum` under the hood (ClassishKind::Cclass(k), ClassishKind::Cenum | ClassishKind::CenumClass(_)) if k.is_abstract() => {} // What is disallowed _ => self.errors.push(DeclError::WrongExtendKind { parent_pos: parent_pos.clone(), parent_kind, parent_name, pos: self.child.name.pos().clone(), kind: self.child.kind, name: self.child.name.id(), }), } } fn add_class_parent_or_trait( &mut self, pass: Pass, extends: &mut IndexSet<TypeName>, ty: &Ty<R>, ) { let (_, pos_id, _) = ty.unwrap_class_type(); extends.insert(pos_id.id()); if let Some(cls) = self.parents.get(&pos_id.id()) { if pass == Pass::Extends { self.check_extend_kind(pos_id.pos(), cls.kind, cls.name); } if pass == Pass::Xhp { // If we are crawling the xhp attribute deps, need to merge their xhp deps as well // XHP attribute dependencies don't actually pull the trait into the class, // so we need to track them totally separately. extends.extend(cls.xhp_attr_deps.iter().cloned()); } extends.extend(cls.extends.iter().cloned()); } } fn get_extends(&mut self) -> IndexSet<TypeName> { let mut extends = IndexSet::default(); for extend in self.child.extends.iter() { self.add_class_parent_or_trait(Pass::Extends, &mut extends, extend) } for use_ in self.child.uses.iter() { self.add_class_parent_or_trait(Pass::Traits, &mut extends, use_) } extends } fn get_xhp_attr_deps(&mut self) -> IndexSet<TypeName> { let mut xhp_attr_deps = IndexSet::default(); for xhp_attr_use in self.child.xhp_attr_uses.iter() { self.add_class_parent_or_trait(Pass::Xhp, &mut xhp_attr_deps, xhp_attr_use) } xhp_attr_deps } /// Accumulate requirements so that we can successfully check the bodies /// of trait methods / check that classes satisfy these requirements fn flatten_parent_class_reqs( &self, req_ancestors: &mut Vec<Requirement<R>>, req_ancestors_extends: &mut IndexSet<TypeName>, parent_ty: &Ty<R>, ) { let (_, pos_id, parent_params) = parent_ty.unwrap_class_type(); if let Some(cls) = self.parents.get(&pos_id.id()) { let subst = Subst::new(&cls.tparams, parent_params); let substitution = Substitution { subst: &subst }; req_ancestors.extend( cls.req_ancestors .iter() .map(|req| substitution.instantiate(&req.ty)) .map(|ty| Requirement::new(pos_id.pos().clone(), ty)), ); match self.child.kind { ClassishKind::Cclass(_) => { // Not necessary to accumulate req_ancestors_extends for classes -- // it's not used } ClassishKind::Ctrait | ClassishKind::Cinterface => { req_ancestors_extends.extend(cls.req_ancestors_extends.iter().cloned()); } ClassishKind::Cenum | ClassishKind::CenumClass(_) => { panic!(); } } } } fn flatten_parent_class_class_reqs( &self, req_class_ancestors: &mut Vec<Requirement<R>>, parent_ty: &Ty<R>, ) { let (_, pos_id, parent_params) = parent_ty.unwrap_class_type(); if let Some(parent_type) = self.parents.get(&pos_id.id()) { let subst = Subst::new(&parent_type.tparams, parent_params); let substitution = Substitution { subst: &subst }; req_class_ancestors.extend( (parent_type.req_class_ancestors.iter()) .map(|req| substitution.instantiate(&req.ty)) .map(|ty| Requirement::new(pos_id.pos().clone(), ty)), ); } } fn declared_class_req( &self, req_ancestors: &mut Vec<Requirement<R>>, req_ancestors_extends: &mut IndexSet<TypeName>, req_ty: &Ty<R>, ) { let (_, pos_id, _) = req_ty.unwrap_class_type(); // Since the req is declared on this class, we should // emphatically *not* substitute: a require extends Foo<T> is // going to be this class's <T> req_ancestors.push(Requirement::new(pos_id.pos().clone(), req_ty.clone())); req_ancestors_extends.insert(pos_id.id()); if let Some(cls) = self.parents.get(&pos_id.id()) { req_ancestors_extends.extend(cls.extends.iter().cloned()); req_ancestors_extends.extend(cls.xhp_attr_deps.iter().cloned()); // The req may be on an interface that has reqs of its own; the // flattened ancestry required by *those* reqs need to be added // in to, e.g., interpret accesses to protected functions inside // traits req_ancestors_extends.extend(cls.req_ancestors_extends.iter().cloned()); } } /// Cheap hack: we cannot do unification / subtyping in the decl phase because /// the type arguments of the types that we are trying to unify may not have /// been declared yet. See the test iface_require_circular.php for details. /// /// However, we don't want a super long req_extends list because of the perf /// overhead. And while we can't do proper unification we can dedup types /// that are syntactically equivalent. /// /// A nicer solution might be to add a phase in between type-decl and type-check /// that prunes the list via proper subtyping, but that's a little more work /// than I'm willing to do now. fn naive_dedup(&self, req_ancestors: &mut Vec<Requirement<R>>) { let mut seen_reqs: IndexMap<TypeName, Vec<Ty<R>>> = IndexMap::default(); // Reverse to match the OCaml ordering for building the seen_reqs map // (since OCaml uses `rev_filter_map` for perf reasons) req_ancestors.reverse(); req_ancestors.retain(|req_extend| { let (_, pos_id, targs) = req_extend.ty.unwrap_class_type(); if let Some(seen_targs) = seen_reqs.get(&pos_id.id()) { if targs.eq_modulo_pos_and_reason(seen_targs) { false } else { // Seems odd to replace the existing targs list when we // see a different one, but the OCaml does it, so we // need to as well seen_reqs.insert(pos_id.id(), targs.to_vec()); true } } else { seen_reqs.insert(pos_id.id(), targs.to_vec()); true } }); // Reverse again to match the OCaml ordering for the returned list req_ancestors.reverse(); } fn get_class_requirements( &self, ) -> ( Box<[Requirement<R>]>, IndexSet<TypeName>, Box<[Requirement<R>]>, ) { let mut req_ancestors = vec![]; let mut req_ancestors_extends = IndexSet::default(); for req_extend in self.child.req_extends.iter() { self.declared_class_req(&mut req_ancestors, &mut req_ancestors_extends, req_extend); } for req_implement in self.child.req_implements.iter() { self.declared_class_req( &mut req_ancestors, &mut req_ancestors_extends, req_implement, ); } for use_ in self.child.uses.iter() { self.flatten_parent_class_reqs(&mut req_ancestors, &mut req_ancestors_extends, use_); } if self.child.kind.is_cinterface() { for extend in self.child.extends.iter() { self.flatten_parent_class_reqs( &mut req_ancestors, &mut req_ancestors_extends, extend, ); } } else { for implement in self.child.implements.iter() { self.flatten_parent_class_reqs( &mut req_ancestors, &mut req_ancestors_extends, implement, ); } } self.naive_dedup(&mut req_ancestors); let mut req_class_ancestors: Vec<_> = (self.child.req_class.iter()) .map(|req_ty| { let (_, pos_id, _) = req_ty.unwrap_class_type(); Requirement::new(pos_id.pos().clone(), req_ty.clone()) }) .collect(); for ty in self.child.uses.iter() { self.flatten_parent_class_class_reqs(&mut req_class_ancestors, ty); } self.naive_dedup(&mut req_class_ancestors); ( req_ancestors.into_boxed_slice(), req_ancestors_extends, req_class_ancestors.into_boxed_slice(), ) } fn get_sealed_whitelist(&self) -> Option<IndexSet<TypeName>> { (self.child.user_attributes.iter()) .find(|ua| ua.name.id() == *sn::user_attributes::uaSealed) .map(|ua| ua.classname_params().iter().copied().collect()) } fn get_deferred_init_members_helper(&self) -> IndexSet<PropName> { let shallow_props = (self.child.props.iter()) .filter(|prop| prop.xhp_attr.is_none()) .filter(|prop| !prop.flags.is_lateinit()) .filter(|prop| prop.flags.needs_init()) .map(|prop| prop.name.id()); let extends_props = (self.child.extends.iter()) .map(|extend| extend.unwrap_class_type()) .filter_map(|(_, pos_id, _)| self.parents.get(&pos_id.id())) .flat_map(|ty| ty.deferred_init_members.iter().copied()); let parent_construct = if self.child.mode == oxidized::file_info::Mode::Mhhi { None } else { if self.child.kind == ClassishKind::Ctrait { self.child.req_extends.iter() } else { self.child.extends.iter() } .map(|ty| ty.unwrap_class_type()) .filter_map(|(_, pos_id, _)| self.parents.get(&pos_id.id())) .find(|parent| parent.has_concrete_constructor() && self.child.constructor.is_some()) .map(|_| *sn::members::parentConstruct) }; shallow_props .chain(extends_props) .chain(parent_construct.into_iter()) .collect() } /// Return all init-requiring props of the class and its ancestors from the /// given shallow class decl and the ancestors' folded decls. fn get_deferred_init_members(&self, cstr: &Option<FoldedElement>) -> IndexSet<PropName> { let has_concrete_cstr = match cstr { Some(e) if !e.is_abstract() => true, _ => false, }; let has_own_cstr = has_concrete_cstr && self.child.constructor.is_some(); match self.child.kind { ClassishKind::Cclass(cls) if cls.is_abstract() && !has_own_cstr => { self.get_deferred_init_members_helper() } ClassishKind::Ctrait => self.get_deferred_init_members_helper(), _ => IndexSet::default(), } } fn decl_class_impl(mut self) -> Result<Arc<FoldedClass<R>>> { let Inherited { substs, mut props, mut static_props, mut methods, mut static_methods, mut constructor, mut consts, mut type_consts, } = Inherited::make(self.opts, self.child, self.parents)?; for sp in self.child.props.iter() { self.decl_prop(&mut props, sp); } for sp in self.child.static_props.iter() { self.decl_static_prop(&mut static_props, sp); } for sm in self.child.methods.iter() { self.decl_method(&mut methods, sm); } for sm in self.child.static_methods.iter() { self.decl_method(&mut static_methods, sm); } self.decl_constructor(&mut constructor); for c in self.child.consts.iter() { self.decl_class_const(&mut consts, c); } self.decl_class_class(&mut consts); for tc in self.child.typeconsts.iter() { self.decl_type_const(&mut type_consts, &mut consts, tc); } if self.child.kind == ClassishKind::Cclass(Abstraction::Concrete) || self.child.kind == ClassishKind::CenumClass(Abstraction::Concrete) { self.synthesize_const_defaults(&mut consts); self.synthesize_type_const_defaults(&mut type_consts, &mut consts); } let stringish_object_opt = DeclFolder::stringish_object_parent(self.child); // Order matters - the earlier, the higher precedence its ancestors will have let direct_ancestors = (stringish_object_opt.iter()) .chain(self.child.extends.iter()) .chain(self.child.implements.iter()) .chain(self.child.uses.iter()); let mut ancestors = Default::default(); for ty in direct_ancestors.rev() { self.get_implements(ty, &mut ancestors); } let extends = self.get_extends(); let xhp_attr_deps = self.get_xhp_attr_deps(); let (req_ancestors, req_ancestors_extends, req_class_ancestors) = self.get_class_requirements(); // TODO(T88552052) can make logic more explicit now, enum members appear to // only need abstract without default and concrete type consts let enum_inner_ty = type_consts .get(&*sn::fb::tInner) .and_then(|tc| match &tc.kind { Typeconst::TCConcrete(tc) => Some(&tc.ty), Typeconst::TCAbstract(atc) => atc.default.as_ref(), }); self.rewrite_class_consts_for_enum(enum_inner_ty, &ancestors, &mut consts); let sealed_whitelist = self.get_sealed_whitelist(); let deferred_init_members = self.get_deferred_init_members(&constructor.elt); let mut tparams = self.child.tparams.clone(); decl_enforceability::maybe_add_supportdyn_constraints( self.opts, Some(self.child), self.child.name.pos(), &mut tparams, ); let fc = Arc::new(FoldedClass { name: self.child.name.id(), pos: self.child.name.pos().clone(), kind: self.child.kind, is_final: self.child.is_final, is_const: (self.child.user_attributes.iter()) .any(|ua| ua.name.id() == *sn::user_attributes::uaConst), // Support both attribute and keyword for now, until typechecker changes are made is_internal: self.child.is_internal, is_xhp: self.child.is_xhp, support_dynamic_type: self.child.support_dynamic_type, enum_type: self.child.enum_type.clone(), has_xhp_keyword: self.child.has_xhp_keyword, module: self.child.module.clone(), is_module_level_trait: (self.child.user_attributes.iter()) .any(|ua| ua.name.id() == *sn::user_attributes::uaModuleLevelTrait), tparams, where_constraints: self.child.where_constraints.clone(), substs, ancestors, props, static_props, methods, static_methods, constructor, consts, type_consts, xhp_enum_values: self.child.xhp_enum_values.clone(), extends, xhp_attr_deps, xhp_marked_empty: self.child.xhp_marked_empty, req_ancestors, req_ancestors_extends, req_class_ancestors, sealed_whitelist, deferred_init_members, decl_errors: self.errors.into_boxed_slice(), docs_url: self.child.docs_url.clone(), }); Ok(fc) } }
Rust
hhvm/hphp/hack/src/hackrs/folded_decl_provider/folded_decl_provider.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::fmt::Debug; use std::sync::Arc; use itertools::Itertools; use pos::TypeName; use ty::decl::FoldedClass; use ty::decl::TypedefDecl; use ty::reason::Reason; mod eager; mod fold; mod inherit; mod provider; mod subst; pub use eager::EagerFoldedDeclProvider; pub use fold::DeclFolder; pub use provider::LazyFoldedDeclProvider; pub use subst::Substitution; pub type Result<T, E = Error> = std::result::Result<T, E>; #[derive(thiserror::Error, Debug)] pub enum Error { #[error("{0}")] Shallow(#[from] shallow_decl_provider::Error), #[error( "Failed to declare {class} because of error in ancestor {} (via {}): {error}", .parents.first().unwrap(), .parents.iter().rev().join(", "), )] Parent { class: TypeName, parents: Vec<TypeName>, #[source] error: Box<Error>, }, #[error("Error in FoldedDeclProvider datastore: {0}")] Store(#[source] anyhow::Error), } #[derive(Clone, Debug)] pub enum TypeDecl<R: Reason> { Class(Arc<FoldedClass<R>>), Typedef(Arc<TypedefDecl<R>>), } /// A get-or-compute interface for folded declarations. A folded class /// declaration represents the near-complete type signature of that class; it /// includes information about ancestors and metadata for all of the class' /// members (including inherited members), but omits the types of all methods /// and properties. pub trait FoldedDeclProvider<R: Reason>: Debug + Send + Sync { /// Fetch the declaration of the class or typedef with the given name. fn get_type(&self, name: TypeName) -> Result<Option<TypeDecl<R>>>; /// Fetch the declaration of the typedef with the given name. If the given /// name is bound to a class rather than a typedef, return `None`. fn get_typedef(&self, name: TypeName) -> Result<Option<Arc<TypedefDecl<R>>>> { Ok(self.get_type(name)?.and_then(|decl| match decl { TypeDecl::Typedef(td) => Some(td), TypeDecl::Class(..) => None, })) } /// Fetch the declaration of the class with the given name. If the given /// name is bound to a typedef rather than a class, return `None`. fn get_class(&self, name: TypeName) -> Result<Option<Arc<FoldedClass<R>>>> { Ok(self.get_type(name)?.and_then(|decl| match decl { TypeDecl::Class(cls) => Some(cls), TypeDecl::Typedef(..) => None, })) } }
Rust
hhvm/hphp/hack/src/hackrs/folded_decl_provider/inherit.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::sync::Arc; use hash::IndexMap; use indexmap::map::Entry; use oxidized::global_options::GlobalOptions; use pos::ClassConstName; use pos::MethodName; use pos::PropName; use pos::TypeConstName; use pos::TypeName; use ty::decl::folded::Constructor; use ty::decl::subst::Subst; use ty::decl::ty::ConsistentKind; use ty::decl::AbstractTypeconst; use ty::decl::Abstraction; use ty::decl::CeVisibility; use ty::decl::ClassConst; use ty::decl::ClassConstKind; use ty::decl::ClassishKind; use ty::decl::FoldedClass; use ty::decl::FoldedElement; use ty::decl::ShallowClass; use ty::decl::SubstContext; use ty::decl::Ty; use ty::decl::TypeConst; use ty::decl::Typeconst; use ty::reason::Reason; use super::subst::Substitution; use super::Result; // note(sf, 2022-02-03): c.f. hphp/hack/src/decl/decl_inherit.ml #[derive(Debug)] pub struct Inherited<R: Reason> { // note(sf, 2022-01-27): c.f. `Decl_inherit.inherited` pub substs: IndexMap<TypeName, SubstContext<R>>, pub props: IndexMap<PropName, FoldedElement>, pub static_props: IndexMap<PropName, FoldedElement>, pub methods: IndexMap<MethodName, FoldedElement>, pub static_methods: IndexMap<MethodName, FoldedElement>, pub constructor: Constructor, pub consts: IndexMap<ClassConstName, ClassConst<R>>, pub type_consts: IndexMap<TypeConstName, TypeConst<R>>, } impl<R: Reason> Default for Inherited<R> { fn default() -> Self { Self { substs: Default::default(), props: Default::default(), static_props: Default::default(), methods: Default::default(), static_methods: Default::default(), constructor: Constructor::new(None, ConsistentKind::Inconsistent), consts: Default::default(), type_consts: Default::default(), } } } impl<R: Reason> Inherited<R> { // Reasons to keep the old signature: // - We don't want to override a concrete method with an // abstract one; // - We don't want to override a method that's actually // implemented by the programmer with one that's "synthetic", // e.g. arising merely from a require-extends declaration in a // trait. // When these two considerations conflict, we give precedence to // abstractness for determining priority of the method. fn should_keep_old_sig(new_sig: &FoldedElement, old_sig: &FoldedElement) -> bool { !old_sig.is_abstract() && new_sig.is_abstract() || old_sig.is_abstract() == new_sig.is_abstract() && !old_sig.is_synthesized() && new_sig.is_synthesized() } fn add_constructor(&mut self, constructor: Constructor) { let elt = match (constructor.elt.as_ref(), self.constructor.elt.take()) { (None, self_ctor) => self_ctor, (Some(other_ctor), Some(self_ctor)) if Self::should_keep_old_sig(other_ctor, &self_ctor) => { Some(self_ctor) } (_, _) => constructor.elt, }; self.constructor = Constructor::new( elt, ConsistentKind::coalesce(self.constructor.consistency, constructor.consistency), ); } fn add_substs(&mut self, other_substs: IndexMap<TypeName, SubstContext<R>>) { for (key, new_subst) in other_substs { match self.substs.entry(key) { Entry::Vacant(e) => { e.insert(new_subst); } Entry::Occupied(mut e) => { if !new_subst.from_req_extends || e.get().from_req_extends { // If the old substitution context came via require // extends, then we want to use the substitutions from // the actual extends instead. e.g., // ``` // class Base<+T> {} // trait MyTrait { require extends Base<mixed>; } // class Child extends Base<int> { use MyTrait; } // ``` // Here the substitution context `{MyTrait/[T -> mixed]}` // should be overridden by `{Child/[T -> int]}`, because // it's the actual extension of class `Base`. e.insert(new_subst); } } } } } fn add_method( methods: &mut IndexMap<MethodName, FoldedElement>, (key, mut fe): (MethodName, FoldedElement), ) { match methods.entry(key) { Entry::Vacant(entry) => { // The method didn't exist so far, let's add it. entry.insert(fe); } Entry::Occupied(mut entry) => { if !Self::should_keep_old_sig(&fe, entry.get()) { fe.set_is_superfluous_override(false); entry.insert(fe); } else { // Otherwise, we *are* overwriting a method // definition. This is OK when a naming // conflict is parent class vs trait (trait // wins!), but not really OK when the naming // conflict is trait vs trait (we rely on HHVM // to catch the error at runtime). } } } } fn add_methods(&mut self, other_methods: IndexMap<MethodName, FoldedElement>) { for (key, fe) in other_methods { Self::add_method(&mut self.methods, (key, fe)) } } fn add_static_methods(&mut self, other_static_methods: IndexMap<MethodName, FoldedElement>) { for (key, fe) in other_static_methods { Self::add_method(&mut self.static_methods, (key, fe)) } } fn add_props(&mut self, other_props: IndexMap<PropName, FoldedElement>) { self.props.extend(other_props) } fn add_static_props(&mut self, other_static_props: IndexMap<PropName, FoldedElement>) { self.static_props.extend(other_static_props) } fn add_consts(&mut self, other_consts: IndexMap<ClassConstName, ClassConst<R>>) { for (name, new_const) in other_consts { match self.consts.entry(name) { Entry::Vacant(e) => { e.insert(new_const); } Entry::Occupied(mut e) => { let old_const = e.get(); match ( new_const.is_synthesized, old_const.is_synthesized, new_const.kind, old_const.kind, ) { // Don't replace a constant with a synthesized constant. // This covers the following case: // ``` // class HasFoo { abstract const int FOO; } // trait T { require extends Foo; } // class Child extends HasFoo { // use T; // } // ``` // In this case, `Child` still doesn't have a value for // the `FOO` constant. (true, false, _, _) => {} // Don't replace a concrete constant with an // abstract constant found later in the MRO. ( _, _, ClassConstKind::CCAbstract(false), ClassConstKind::CCAbstract(true), ) | (_, _, ClassConstKind::CCAbstract(_), ClassConstKind::CCConcrete) => {} _ => { e.insert(new_const); } } } } } } fn add_type_consts( &mut self, opts: &GlobalOptions, child: &ShallowClass<R>, other_type_consts: IndexMap<TypeConstName, TypeConst<R>>, ) { let fix_synthesized = opts.tco_enable_strict_const_semantics > 3; for (name, mut new_const) in other_type_consts { match self.type_consts.entry(name) { Entry::Vacant(e) => { // The type constant didn't exist so far, let's add it. e.insert(new_const); } Entry::Occupied(mut e) => { let old_const = e.get(); if new_const.is_enforceable() && !old_const.is_enforceable() { // If some typeconst in some ancestor was enforceable, // then the child class' typeconst will be enforceable // too, even if we didn't take that ancestor typeconst. e.get_mut().enforceable = new_const.enforceable.clone(); } let old_const = e.get(); let is_class = || match child.kind { ClassishKind::Cclass(_) => true, ClassishKind::Ctrait | ClassishKind::Cinterface | ClassishKind::Cenum | ClassishKind::CenumClass(_) => false, }; match ( old_const.is_synthesized, new_const.is_synthesized, &old_const.kind, &new_const.kind, ) { (false, true, _, _) if is_class() && fix_synthesized => {} // This covers the following case // ``` // interface I1 { abstract const type T; } // interface I2 { const type T = int; } // class C implements I1, I2 {} // ``` // Then `C::T == I2::T` since `I2::T `is not abstract (_, _, Typeconst::TCConcrete(_), Typeconst::TCAbstract(_)) => {} // This covers the following case // ``` // interface I { // abstract const type T as arraykey; // } // // abstract class A { // abstract const type T as arraykey = string; // } // // final class C extends A implements I {} // ``` // `C::T` must come from `A`, not `I`, as `A` // provides the default that will synthesize into a // concrete type constant in `C`. ( _, _, Typeconst::TCAbstract(AbstractTypeconst { default: Some(_), .. }), Typeconst::TCAbstract(AbstractTypeconst { default: None, .. }), ) => {} // When a type constant is declared in multiple // parents we need to make a subtle choice of what // type we inherit. For example in: // ``` // interface I1 { abstract const type t as Container<int>; } // interface I2 { abstract const type t as KeyedContainer<int, int>; } // abstract class C implements I1, I2 {} // ``` // Depending on the order the interfaces are // declared, we may report an error. Since this // could be confusing there is special logic in // `Typing_extends` that checks for this potentially // ambiguous situation and warns the programmer to // explicitly declare `T` in `C`. _ => { if old_const.is_enforceable() && !new_const.is_enforceable() { // If a typeconst we already inherited from some // other ancestor was enforceable, then the one // we inherit here will be enforceable too. new_const.enforceable = old_const.enforceable.clone(); } e.insert(new_const); } } } } } } fn add_inherited(&mut self, opts: &GlobalOptions, child: &ShallowClass<R>, other: Self) { let Self { substs, props, static_props, methods, static_methods, constructor, consts, type_consts, } = other; self.add_substs(substs); self.add_props(props); self.add_static_props(static_props); self.add_methods(methods); self.add_static_methods(static_methods); self.add_constructor(constructor); self.add_consts(consts); self.add_type_consts(opts, child, type_consts); } fn mark_as_synthesized(&mut self) { (self.substs.values_mut()).for_each(|s| s.set_from_req_extends(true)); (self.constructor.elt.iter_mut()).for_each(|e| e.set_is_synthesized(true)); (self.props.values_mut()).for_each(|p| p.set_is_synthesized(true)); (self.static_props.values_mut()).for_each(|p| p.set_is_synthesized(true)); (self.methods.values_mut()).for_each(|m| m.set_is_synthesized(true)); (self.static_methods.values_mut()).for_each(|m| m.set_is_synthesized(true)); (self.consts.values_mut()).for_each(|c| c.set_is_synthesized(true)); (self.type_consts.values_mut()).for_each(|c| c.set_is_synthesized(true)); } } struct MemberFolder<'a, R: Reason> { opts: &'a GlobalOptions, child: &'a ShallowClass<R>, parents: &'a IndexMap<TypeName, Arc<FoldedClass<R>>>, members: Inherited<R>, } impl<'a, R: Reason> MemberFolder<'a, R> { // c.f. `Decl_inherit.from_class` and `Decl_inherit.inherit_hack_class`. fn members_from_class(&self, parent_ty: &Ty<R>) -> Result<Inherited<R>> { fn is_not_private<N>((_, elt): &(&N, &FoldedElement)) -> bool { match elt.visibility { CeVisibility::Private(_) if elt.is_lsb() => true, CeVisibility::Private(_) => false, _ => true, } } fn chown(elt: FoldedElement, owner: TypeName) -> FoldedElement { match elt.visibility { CeVisibility::Private(_) => FoldedElement { visibility: CeVisibility::Private(owner), ..elt }, CeVisibility::Protected(_) if !elt.is_synthesized() => FoldedElement { visibility: CeVisibility::Protected(owner), ..elt }, _ => elt, } } let (_, parent_pos_id, parent_tyl) = parent_ty.unwrap_class_type(); if let Some(parent_folded_decl) = self.parents.get(&parent_pos_id.id()) { let sig = Subst::new(&parent_folded_decl.tparams, parent_tyl); let subst = Substitution { subst: &sig }; let consts = (parent_folded_decl.consts.iter()) .map(|(name, cc)| (*name, subst.instantiate_class_const(cc))) .collect(); let type_consts = (parent_folded_decl.type_consts.iter()) .map(|(name, tc)| (*name, subst.instantiate_type_const(tc))) .collect(); let parent_inh = match parent_folded_decl.kind { ClassishKind::Ctrait => Inherited { consts, type_consts, props: (parent_folded_decl.props.iter()) .map(|(k, v)| (*k, chown(v.clone(), self.child.name.id()))) .collect(), static_props: (parent_folded_decl.static_props.iter()) .map(|(k, v)| (*k, chown(v.clone(), self.child.name.id()))) .collect(), methods: (parent_folded_decl.methods) .iter() .map(|(k, v)| (*k, chown(v.clone(), self.child.name.id()))) .collect(), static_methods: (parent_folded_decl.static_methods.iter()) .map(|(k, v)| (*k, chown(v.clone(), self.child.name.id()))) .collect(), ..Default::default() }, ClassishKind::Cclass(_) | ClassishKind::Cinterface => Inherited { consts, type_consts, props: (parent_folded_decl.props.iter()) .filter(is_not_private) .map(|(k, v)| (*k, v.clone())) .collect(), static_props: (parent_folded_decl.static_props.iter()) .filter(is_not_private) .map(|(k, v)| (*k, v.clone())) .collect(), methods: (parent_folded_decl.methods.iter()) .filter(is_not_private) .map(|(k, v)| (*k, v.clone())) .collect(), static_methods: (parent_folded_decl.static_methods.iter()) .filter(is_not_private) .map(|(k, v)| (*k, v.clone())) .collect(), ..Default::default() }, ClassishKind::Cenum | ClassishKind::CenumClass(_) => Inherited { consts, type_consts, props: parent_folded_decl.props.clone(), static_props: parent_folded_decl.static_props.clone(), methods: parent_folded_decl.methods.clone(), static_methods: parent_folded_decl.static_methods.clone(), ..Default::default() }, }; // TODO(hrust): Do we need sharing? let mut substs = parent_folded_decl.substs.clone(); substs.insert( parent_folded_decl.name, SubstContext { subst: sig, class_context: self.child.name.id(), from_req_extends: false, }, ); let constructor = parent_folded_decl.constructor.clone(); return Ok(Inherited { substs, constructor, ..parent_inh }); } Ok(Default::default()) } fn class_constants_from_class(&self, ty: &Ty<R>) -> Result<Inherited<R>> { let (_, pos_id, tyl) = ty.unwrap_class_type(); if let Some(parent) = self.parents.get(&pos_id.id()) { let sig = Subst::new(&parent.tparams, tyl); let subst = Substitution { subst: &sig }; return Ok(Inherited { consts: (parent.consts.iter()) .map(|(name, cc)| (*name, subst.instantiate_class_const(cc))) .collect(), type_consts: (parent.type_consts.iter()) .map(|(name, tc)| (*name, subst.instantiate_type_const(tc))) .collect(), ..Default::default() }); } Ok(Default::default()) } // This logic deals with importing XHP attributes from an XHP class via the // "attribute :foo" syntax. // c.f. Decl_inherit.from_class_xhp_attrs_only fn xhp_attrs_from_class(&self, ty: &Ty<R>) -> Result<Inherited<R>> { let (_, pos_id, _tyl) = ty.unwrap_class_type(); if let Some(parent) = self.parents.get(&pos_id.id()) { // Filter out properties that are not XHP attributes. return Ok(Inherited { props: (parent.props.iter()) .filter(|(_, prop)| prop.get_xhp_attr().is_some()) .map(|(name, prop)| (name.clone(), prop.clone())) .collect(), ..Default::default() }); } Ok(Default::default()) } fn add_from_interface_constants(&mut self) -> Result<()> { for ty in self.child.req_implements.iter() { self.members .add_inherited(self.opts, self.child, self.class_constants_from_class(ty)?) } Ok(()) } fn add_from_implements_constants(&mut self) -> Result<()> { for ty in self.child.implements.iter() { self.members .add_inherited(self.opts, self.child, self.class_constants_from_class(ty)?) } Ok(()) } fn add_from_xhp_attr_uses(&mut self) -> Result<()> { for ty in self.child.xhp_attr_uses.iter() { self.members .add_inherited(self.opts, self.child, self.xhp_attrs_from_class(ty)?) } Ok(()) } fn add_from_parents(&mut self) -> Result<()> { let mut tys: Vec<&Ty<R>> = Vec::new(); match self.child.kind { ClassishKind::Cclass(Abstraction::Abstract) => { tys.extend(self.child.implements.iter()); tys.extend(self.child.extends.iter()); } ClassishKind::Ctrait => { tys.extend(self.child.implements.iter()); tys.extend(self.child.extends.iter()); tys.extend(self.child.req_implements.iter()); } ClassishKind::Cclass(_) | ClassishKind::Cinterface | ClassishKind::Cenum | ClassishKind::CenumClass(_) => { tys.extend(self.child.extends.iter()); } }; // Interfaces implemented, classes extended and interfaces required to // be implemented. for ty in tys.iter().rev() { self.members .add_inherited(self.opts, self.child, self.members_from_class(ty)?); } Ok(()) } fn add_from_requirements(&mut self) -> Result<()> { for ty in self.child.req_extends.iter() { let mut inherited = self.members_from_class(ty)?; inherited.mark_as_synthesized(); self.members.add_inherited(self.opts, self.child, inherited); } Ok(()) } fn add_from_traits(&mut self) -> Result<()> { for ty in self.child.uses.iter() { self.members .add_inherited(self.opts, self.child, self.members_from_class(ty)?); } Ok(()) } fn add_from_included_enums_constants(&mut self) -> Result<()> { if let Some(et) = self.child.enum_type.as_ref() { for ty in et.includes.iter() { self.members.add_inherited( self.opts, self.child, self.class_constants_from_class(ty)?, ); } } Ok(()) } } impl<R: Reason> Inherited<R> { pub fn make( opts: &GlobalOptions, child: &ShallowClass<R>, parents: &IndexMap<TypeName, Arc<FoldedClass<R>>>, ) -> Result<Self> { let mut folder = MemberFolder { opts, child, parents, members: Self::default(), }; folder.add_from_parents()?; // Members inherited from parents ... folder.add_from_requirements()?; folder.add_from_traits()?; // ... can be overridden by traits. folder.add_from_xhp_attr_uses()?; folder.add_from_interface_constants()?; folder.add_from_included_enums_constants()?; folder.add_from_implements_constants()?; Ok(folder.members) } }
Rust
hhvm/hphp/hack/src/hackrs/folded_decl_provider/provider.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::sync::Arc; use datastore::Store; use hash::IndexMap; use hash::IndexSet; use oxidized::global_options::GlobalOptions; use oxidized::naming_types::KindOfType; use pos::Positioned; use pos::TypeName; use shallow_decl_provider::ShallowDeclProvider; use ty::decl::FoldedClass; use ty::decl::ShallowClass; use ty::decl::Ty; use ty::decl_error::DeclError; use ty::reason::Reason; use super::fold::DeclFolder; use super::Error; use super::Result; use super::TypeDecl; // note(sf, 2022-02-03): c.f. hphp/hack/src/decl/decl_folded_class.ml /// A `FoldedDeclProvider` which, if the requested class name is not present in /// its store, recursively computes the folded decl for that class by requesting /// the shallow decls of that class and its ancestors from its /// `ShallowDeclProvider`. #[derive(Debug)] pub struct LazyFoldedDeclProvider<R: Reason> { opts: Arc<GlobalOptions>, store: Arc<dyn Store<TypeName, Arc<FoldedClass<R>>>>, shallow_decl_provider: Arc<dyn ShallowDeclProvider<R>>, } impl<R: Reason> LazyFoldedDeclProvider<R> { pub fn new( opts: Arc<GlobalOptions>, store: Arc<dyn Store<TypeName, Arc<FoldedClass<R>>>>, shallow_decl_provider: Arc<dyn ShallowDeclProvider<R>>, ) -> Self { Self { opts, store, shallow_decl_provider, } } } impl<R: Reason> super::FoldedDeclProvider<R> for LazyFoldedDeclProvider<R> { fn get_type(&self, name: TypeName) -> Result<Option<TypeDecl<R>>> { match self.shallow_decl_provider.get_type_kind(name)? { None => Ok(None), Some(KindOfType::TTypedef) => Ok(self .shallow_decl_provider .get_typedef(name)? .map(TypeDecl::Typedef)), Some(KindOfType::TClass) => { let mut stack = Default::default(); Ok(self .get_folded_class_impl(&mut stack, name)? .map(TypeDecl::Class)) } } } } impl<R: Reason> LazyFoldedDeclProvider<R> { fn detect_cycle( &self, stack: &mut IndexSet<TypeName>, errors: &mut Vec<DeclError<R::Pos>>, pos_id: &Positioned<TypeName, R::Pos>, ) -> bool { if stack.contains(&pos_id.id()) { errors.push(DeclError::CyclicClassDef( pos_id.pos().clone(), stack.iter().copied().collect(), )); true } else { false } } fn decl_class_type( &self, stack: &mut IndexSet<TypeName>, errors: &mut Vec<DeclError<R::Pos>>, ty: &Ty<R>, ) -> Result<Option<(TypeName, Arc<FoldedClass<R>>)>> { let (_, pos_id, _) = ty.unwrap_class_type(); if !self.detect_cycle(stack, errors, &pos_id) { if let Some(folded_decl) = self.get_folded_class_impl(stack, pos_id.id())? { return Ok(Some((pos_id.id(), folded_decl))); } } Ok(None) } fn parent_error(sc: &ShallowClass<R>, parent: &Ty<R>, err: Error) -> Error { // We tried to produce a decl of the parent of the given class but // failed. We capture this chain of events as a `Parent` error. The has // the effect of explaining that "we couldn't decl 'class' because we // couldn't decl 'parent' because ... x" ( where 'x' is the underlying // error like, the parent's php file is missing). let (_, parent_name, _) = parent.unwrap_class_type(); match err { Error::Parent { class: _, mut parents, error, } => { parents.push(parent_name.id()); Error::Parent { class: sc.name.id(), parents, error, } } _ => Error::Parent { class: sc.name.id(), parents: vec![parent_name.id()], error: Box::new(err), }, } } /// Produce a stream of a class's parent types that will be folded /// recursively before folding the class itself. pub fn parents_to_fold(sc: &ShallowClass<R>) -> impl Iterator<Item = &Ty<R>> { (sc.extends.iter()) .chain(sc.implements.iter()) .chain(sc.uses.iter()) .chain(sc.xhp_attr_uses.iter()) .chain(sc.req_extends.iter()) .chain(sc.req_implements.iter()) .chain( (sc.enum_type.as_ref()) .map_or([].as_slice(), |et| &et.includes) .iter(), ) } // note(sf, 2022-03-02): c.f. Decl_folded_class.class_parents_decl fn decl_class_parents( &self, stack: &mut IndexSet<TypeName>, errors: &mut Vec<DeclError<R::Pos>>, sc: &ShallowClass<R>, ) -> Result<IndexMap<TypeName, Arc<FoldedClass<R>>>> { Self::parents_to_fold(sc) .chain(DeclFolder::stringish_object_parent(sc).iter()) .map(|ty| { self.decl_class_type(stack, errors, ty) .map_err(|err| Self::parent_error(sc, ty, err)) }) .filter_map(Result::transpose) .collect() } fn decl_class( &self, stack: &mut IndexSet<TypeName>, name: TypeName, ) -> Result<Option<Arc<FoldedClass<R>>>> { let mut errors = vec![]; let shallow_class = match self.shallow_decl_provider.get_class(name)? { None => return Ok(None), Some(c) => c, }; stack.insert(name); let parents = self.decl_class_parents(stack, &mut errors, &shallow_class)?; stack.remove(&name); Ok(Some(DeclFolder::decl_class( &self.opts, &shallow_class, &parents, errors, )?)) } fn get_folded_class_impl( &self, stack: &mut IndexSet<TypeName>, name: TypeName, ) -> Result<Option<Arc<FoldedClass<R>>>> { match self.store.get(name).map_err(Error::Store)? { Some(rc) => Ok(Some(rc)), None => match self.decl_class(stack, name)? { None => Ok(None), Some(rc) => { self.store .insert(name, Arc::clone(&rc)) .map_err(Error::Store)?; Ok(Some(rc)) } }, } } }
Rust
hhvm/hphp/hack/src/hackrs/folded_decl_provider/subst.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::collections::BTreeMap; use pos::TypeName; use ty::decl::subst::Subst; use ty::decl::ty::ShapeType; use ty::decl::AbstractTypeconst; use ty::decl::ClassConst; use ty::decl::ClassRefinement; use ty::decl::ConcreteTypeconst; use ty::decl::FunParam; use ty::decl::FunType; use ty::decl::PossiblyEnforcedTy; use ty::decl::RefinedConst; use ty::decl::RefinedConstBound; use ty::decl::ShapeFieldType; use ty::decl::TaccessType; use ty::decl::Tparam; use ty::decl::TrefinementType; use ty::decl::Ty; use ty::decl::Ty_; use ty::decl::TypeConst; use ty::decl::Typeconst; use ty::decl::WhereConstraint; use ty::reason::Reason; // note(sf, 2022-02-14): c.f. `Decl_subst`, `Decl_instantiate` #[derive(Debug, Clone)] pub struct Substitution<'a, R: Reason> { pub subst: &'a Subst<R>, } impl<'a, R: Reason> Substitution<'a, R> { fn merge_hk_type( &self, orig_r: R, orig_var: TypeName, ty: &Ty<R>, args: impl Iterator<Item = Ty<R>>, ) -> Ty<R> { let ty_: &Ty_<R> = ty.node(); let res_ty_ = match ty_ { Ty_::Tapply(params) => { // We could insist on `existing_args.is_empty()` here // unless we want to support partial application. let (name, existing_args) = &**params; Ty_::Tapply(Box::new(( name.clone(), existing_args.iter().cloned().chain(args).collect(), ))) } Ty_::Tgeneric(params) => { // Same here. let (name, ref existing_args) = **params; Ty_::Tgeneric(Box::new(( name, existing_args.iter().cloned().chain(args).collect(), ))) } // We could insist on existing_args = [] here unless we want to // support partial application. _ => ty_.clone(), }; let r = ty.reason().clone(); Ty::new(R::instantiate(r, orig_var, orig_r), res_ty_) } pub fn instantiate(&self, ty: &Ty<R>) -> Ty<R> { // PERF: If subst is empty then instantiation is a no-op. We can save a // significant amount of CPU by avoiding recursively deconstructing the // `ty` data type. if self.subst.0.is_empty() { return ty.clone(); } let r = ty.reason().clone(); let ty_: &Ty_<R> = ty.node(); match ty_ { Ty_::Tgeneric(params) => { let (x, ref existing_args) = **params; let args = existing_args.iter().map(|arg| self.instantiate(arg)); match self.subst.0.get(&x) { Some(x_ty) => self.merge_hk_type(r, x, x_ty, args), None => Ty::generic(r, x, args.collect()), } } _ => Ty::new(r, self.instantiate_(ty_)), } } fn instantiate_(&self, x: &Ty_<R>) -> Ty_<R> { match x { Ty_::Tgeneric(_) => panic!("subst.rs: instantiate_: impossible!"), // IMPORTANT: We cannot expand `Taccess` during instantiation // because this can be called before all type consts have been // declared and inherited. Ty_::Taccess(ta) => Ty_::Taccess(Box::new(TaccessType { ty: self.instantiate(&ta.ty), type_const: ta.type_const.clone(), })), Ty_::TvecOrDict(tys) => Ty_::TvecOrDict(Box::new(( self.instantiate(&tys.0), self.instantiate(&tys.1), ))), Ty_::Tthis | Ty_::Tmixed | Ty_::Twildcard | Ty_::Tdynamic | Ty_::Tnonnull | Ty_::Tany | Ty_::Tprim(_) => x.clone(), Ty_::Ttuple(tys) => Ty_::Ttuple( tys.iter() .map(|t| self.instantiate(t)) .collect::<Box<[_]>>(), ), Ty_::Tunion(tys) => Ty_::Tunion( tys.iter() .map(|t| self.instantiate(t)) .collect::<Box<[_]>>(), ), Ty_::Tintersection(tys) => Ty_::Tintersection( tys.iter() .map(|t| self.instantiate(t)) .collect::<Box<[_]>>(), ), Ty_::Toption(ty) => { let ty = self.instantiate(ty); // We want to avoid double option: `??T`. match ty.node() as &Ty_<R> { ty_node @ Ty_::Toption(_) => ty_node.clone(), _ => Ty_::Toption(ty), } } Ty_::Tlike(ty) => Ty_::Tlike(self.instantiate(ty)), Ty_::Tfun(ft) => { let tparams = &ft.tparams; let outer_subst = self; let mut subst = self.subst.clone(); for tp in tparams.iter() { subst.0.remove(tp.name.id_ref()); } let subst = Substitution { subst: &subst }; let params = ft .params .iter() .map(|fp| FunParam { ty: subst.instantiate_possibly_enforced_ty(&fp.ty), pos: fp.pos.clone(), name: fp.name, flags: fp.flags, }) .collect::<Box<[_]>>(); let ret = subst.instantiate_possibly_enforced_ty(&ft.ret); let tparams = tparams .iter() .map(|tp| Tparam { constraints: tp .constraints .iter() .map(|(ck, ty)| (*ck, subst.instantiate(ty))) .collect::<Box<[_]>>(), variance: tp.variance, name: tp.name.clone(), tparams: tp.tparams.clone(), reified: tp.reified, user_attributes: tp.user_attributes.clone(), }) .collect::<Box<[_]>>(); let where_constraints = ft .where_constraints .iter() .map(|WhereConstraint(ty1, ck, ty2)| { WhereConstraint(subst.instantiate(ty1), *ck, outer_subst.instantiate(ty2)) }) .collect::<Box<[_]>>(); Ty_::Tfun(Box::new(FunType { params, ret, tparams, where_constraints, flags: ft.flags, implicit_params: ft.implicit_params.clone(), ifc_decl: ft.ifc_decl.clone(), cross_package: ft.cross_package.clone(), })) } Ty_::Tapply(params) => { let (name, tys) = &**params; Ty_::Tapply(Box::new(( name.clone(), tys.iter() .map(|ty| self.instantiate(ty)) .collect::<Box<[_]>>(), ))) } Ty_::Tshape(params) => { let ShapeType(ref shape_kind, ref fdm) = **params; let shape_kind = self.instantiate(shape_kind); let fdm = fdm .iter() .map(|(f, sft)| { ( *f, ShapeFieldType { field_name_pos: sft.field_name_pos.clone(), ty: self.instantiate(&sft.ty), optional: sft.optional, }, ) }) .collect::<BTreeMap<_, _>>(); Ty_::Tshape(Box::new(ShapeType(shape_kind, fdm))) } Ty_::Trefinement(tr) => Ty_::Trefinement(Box::new(TrefinementType { ty: self.instantiate(&tr.ty), refinement: ClassRefinement { consts: (tr.refinement.consts.iter()) .map(|(k, v)| (*k, self.instantiate_class_type_refinement(v))) .collect(), }, })), } } fn instantiate_class_type_refinement(&self, rc: &RefinedConst<Ty<R>>) -> RefinedConst<Ty<R>> { use RefinedConstBound::*; let bound = match &rc.bound { Exact(ty) => Exact(self.instantiate(ty)), Loose(bounds) => Loose(ty::decl::RefinedConstBounds { lower: bounds.lower.iter().map(|ty| self.instantiate(ty)).collect(), upper: bounds.upper.iter().map(|ty| self.instantiate(ty)).collect(), }), }; RefinedConst { bound, is_ctx: rc.is_ctx, } } fn instantiate_possibly_enforced_ty( &self, et: &PossiblyEnforcedTy<Ty<R>>, ) -> PossiblyEnforcedTy<Ty<R>> { PossiblyEnforcedTy { ty: self.instantiate(&et.ty), enforced: et.enforced, } } pub fn instantiate_class_const(&self, cc: &ClassConst<R>) -> ClassConst<R> { ClassConst { is_synthesized: cc.is_synthesized, kind: cc.kind, pos: cc.pos.clone(), ty: self.instantiate(&cc.ty), origin: cc.origin, refs: cc.refs.clone(), } } fn instantiate_type_const_kind(&self, kind: &Typeconst<R>) -> Typeconst<R> { match kind { Typeconst::TCAbstract(k) => Typeconst::TCAbstract(AbstractTypeconst { as_constraint: k.as_constraint.as_ref().map(|ty| self.instantiate(ty)), super_constraint: k.super_constraint.as_ref().map(|ty| self.instantiate(ty)), default: k.default.as_ref().map(|ty| self.instantiate(ty)), }), Typeconst::TCConcrete(k) => Typeconst::TCConcrete(ConcreteTypeconst { ty: self.instantiate(&k.ty), }), } } pub fn instantiate_type_const(&self, tc: &TypeConst<R>) -> TypeConst<R> { TypeConst { is_synthesized: tc.is_synthesized, name: tc.name.clone(), kind: self.instantiate_type_const_kind(&tc.kind), origin: tc.origin, enforceable: tc.enforceable.clone(), reifiable: tc.reifiable.clone(), is_concretized: tc.is_concretized, is_ctx: tc.is_ctx, } } }
TOML
hhvm/hphp/hack/src/hackrs/folded_decl_provider/cargo/folded_decl_provider/Cargo.toml
# @generated by autocargo [package] name = "folded_decl_provider" version = "0.0.0" edition = "2021" [lib] path = "../../folded_decl_provider.rs" [dependencies] anyhow = "1.0.71" datastore = { version = "0.0.0", path = "../../../datastore" } decl_enforceability = { version = "0.0.0", path = "../../../decl_enforceability" } eq_modulo_pos = { version = "0.0.0", path = "../../../../utils/eq_modulo_pos" } hash = { version = "0.0.0", path = "../../../../utils/hash" } indexmap = { version = "1.9.2", features = ["arbitrary", "rayon", "serde-1"] } itertools = "0.10.3" oxidized = { version = "0.0.0", path = "../../../../oxidized" } pos = { version = "0.0.0", path = "../../../pos/cargo/pos" } shallow_decl_provider = { version = "0.0.0", path = "../../../shallow_decl_provider/cargo/shallow_decl_provider" } special_names = { version = "0.0.0", path = "../../../special_names/cargo/special_names" } thiserror = "1.0.43" ty = { version = "0.0.0", path = "../../../ty/cargo/ty" }
Rust
hhvm/hphp/hack/src/hackrs/folded_decl_provider/fold/decl_enum.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use hash::IndexMap; use pos::ClassConstName; use pos::Positioned; use pos::TypeName; use special_names as sn; use ty::decl::folded::ClassConst; use ty::decl::Prim; use ty::decl::Ty; use ty::decl::Ty_; use ty::reason::Reason; use super::DeclFolder; struct EnumKind<R: Reason> { /// Underlying type of the enum, e.g. int or string. For subclasses of /// `Enum`, this is the type parameter of the Enum. For enum classes, this /// is `HH\MemberOf<E, I>`. // NB(jakebailey, 2022-03-11): `base` is copied from OCaml but not used here. // base: Ty<R>, /// Type containing the enum name. /// For subclasses of Enum, this is also the type parameter of Enum. ty: Ty<R>, /// Reflects what's after the `as` keyword in the enum definition. // NB(jakebailey, 2022-03-11): `constraint` is copied from OCaml but not used here. // constraint: Option<Ty<R>>, /// For enum classes, this is the raw interface I, as provided by the user. interface: Option<Ty<R>>, } impl<'a, R: Reason> DeclFolder<'a, R> { /// Figures out if `self.child` needs to be treated like an enum. fn enum_kind( &self, inner_ty: Option<&Ty<R>>, ancestors: &IndexMap<TypeName, Ty<R>>, ) -> Option<EnumKind<R>> { let is_enum_class = matches!(self.child.kind, ty::decl::ty::ClassishKind::CenumClass(..)); match &self.child.enum_type { None => { let enum_ty = match ancestors.get(&*sn::fb::cEnum) { None => return None, Some(ty) => ty, }; match enum_ty.unwrap_class_type() { (_, name, [ty_exp]) if name.id() == *sn::fb::cEnum => Some(EnumKind { // base: ty_exp.clone(), ty: ty_exp.clone(), // constraint: None, interface: None, }), (_, name, _) if name.id() == *sn::fb::cEnum => { // The fallback if the class does not declare `TInner` (i.e. // it is abstract) is to use `this::TInner` let r = || enum_ty.reason().clone(); let ty_exp = match inner_ty { Some(ty) => ty.clone(), None => Ty::access( r(), ty::decl::TaccessType { ty: Ty::this(r()), type_const: Positioned::new( enum_ty.pos().clone(), *sn::fb::tInner, ), }, ), }; Some(EnumKind { // base: ty_exp.clone(), ty: ty_exp, // constraint: None, interface: None, }) } _ => None, } } Some(enum_type) => { let reason = enum_type.base.reason(); let pos = reason.pos(); let enum_ty = Ty::apply(reason.clone(), self.child.name.clone(), [].into()); let (te_base, te_interface) = if is_enum_class { let te_interface = enum_type.base.clone(); // TODO(T77095784) make a new reason ! let te_base = Ty::apply( reason.clone(), Positioned::new(pos.clone(), *sn::classes::cMemberOf), [enum_ty, enum_type.base.clone()].into(), ); (te_base, Some(te_interface)) } else { (enum_type.base.clone(), None) }; Some(EnumKind { ty: Ty::apply(te_base.reason().clone(), self.child.name.clone(), [].into()), // base: te_base, // constraint: enum_type.constraint.clone(), interface: te_interface, }) } } } /// If `self.child` is an Enum, we give all of the constants in the class /// the type of the Enum. We don't do this for `Enum<mixed>` and /// `Enum<arraykey>`, since that could *lose* type information. pub fn rewrite_class_consts_for_enum( &self, inner_ty: Option<&Ty<R>>, ancestors: &IndexMap<TypeName, Ty<R>>, consts: &mut IndexMap<ClassConstName, ClassConst<R>>, ) { let EnumKind { // base: _, ty, // constraint: _, interface, } = match self.enum_kind(inner_ty, ancestors) { None => return, Some(kind) => kind, }; // Don't rewrite enum classes. if interface.is_some() { return; } // Don't rewrite `Enum<mixed>` or `Enum<arraykey>`. if matches!(ty.node_ref(), Ty_::Tmixed | Ty_::Tprim(Prim::Tarraykey)) { return; } // A special constant called "class" gets added, and we don't // want to rewrite its type. // Also for enum class, the type is set in the lowerer. for (&name, c) in consts.iter_mut() { if name != *sn::members::mClass { c.ty = ty.clone(); } } } }
Rust
hhvm/hphp/hack/src/hackrs/hackrs_test/hackrs_test.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. #![cfg(test)] use std::collections::BTreeMap; use std::path::PathBuf; use std::sync::Arc; use anyhow::Result; use datastore::NonEvictingStore; use decl_parser::DeclParser; use fbinit::FacebookInit; use folded_decl_provider::FoldedDeclProvider; use folded_decl_provider::LazyFoldedDeclProvider; use hackrs_test_utils::serde_store::StoreOpts::Unserialized; use hackrs_test_utils::store::make_shallow_decl_store; use hh24_test::TestRepo; use naming_provider::SqliteNamingTable; use oxidized::decl_parser_options::DeclParserOptions; use oxidized::parser_options::ParserOptions; use pos::RelativePathCtx; use shallow_decl_provider::LazyShallowDeclProvider; use tempdir::TempDir; use ty::reason::BReason; mod folded_decl_provider_test; mod pos_test; struct TestContext { root: TestRepo, decl_parser: DeclParser<BReason>, folded_decl_provider: Arc<dyn FoldedDeclProvider<BReason>>, } impl TestContext { fn new(_fb: FacebookInit, files: BTreeMap<&str, &str>) -> Result<Self> { let root = TestRepo::new(&files)?; let tmpdir = TempDir::new("rupro_test")?; let naming_db = tmpdir.path().join("names.sql"); hh24_test::create_naming_table(&naming_db, &files)?; let naming_provider = Arc::new(SqliteNamingTable::new(&naming_db).unwrap()); let path_ctx = Arc::new(RelativePathCtx { root: root.path().to_path_buf(), hhi: PathBuf::new(), dummy: PathBuf::new(), tmp: tmpdir.path().to_path_buf(), }); let parser_opts = ParserOptions::default(); let decl_parser = DeclParser::new( Arc::new(file_provider::DiskProvider::new(path_ctx, None)), DeclParserOptions::from_parser_options(&parser_opts), parser_opts.po_deregister_php_stdlib, ); let shallow_decl_provider = Arc::new(LazyShallowDeclProvider::new( Arc::new(make_shallow_decl_store::<BReason>(Unserialized)), naming_provider, decl_parser.clone(), )); let folded_decl_provider = Arc::new(LazyFoldedDeclProvider::new( Arc::new(Default::default()), // TODO: remove? Arc::new(NonEvictingStore::new()), shallow_decl_provider, )); Ok(Self { root, decl_parser, folded_decl_provider, }) } }
TOML
hhvm/hphp/hack/src/hackrs/hackrs_test/cargo/hackrs_test/Cargo.toml
# @generated by autocargo [package] name = "hackrs_test" version = "0.0.0" edition = "2021" [lib] path = "../../hackrs_test.rs" [dev-dependencies] anyhow = "1.0.71" datastore = { version = "0.0.0", path = "../../../datastore" } decl_parser = { version = "0.0.0", path = "../../../decl_parser/cargo/decl_parser" } fbinit = { version = "0.1.2", git = "https://github.com/facebookexperimental/rust-shed.git", branch = "main" } file_provider = { version = "0.0.0", path = "../../../file_provider/cargo/file_provider" } folded_decl_provider = { version = "0.0.0", path = "../../../folded_decl_provider/cargo/folded_decl_provider" } hackrs_test_utils = { version = "0.0.0", path = "../../../hackrs_test_utils/cargo/hackrs_test_utils" } hh24_test = { version = "0.0.0", path = "../../../../utils/cargo/hh24_test" } itertools = "0.10.3" maplit = "1.0" naming_provider = { version = "0.0.0", path = "../../../naming_provider/cargo/naming_provider" } ocamlrep = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" } oxidized = { version = "0.0.0", path = "../../../../oxidized" } pos = { version = "0.0.0", path = "../../../pos/cargo/pos" } relative_path = { version = "0.0.0", path = "../../../../utils/rust/relative_path" } shallow_decl_provider = { version = "0.0.0", path = "../../../shallow_decl_provider/cargo/shallow_decl_provider" } tempdir = "0.3" ty = { version = "0.0.0", path = "../../../ty/cargo/ty" }
Rust
hhvm/hphp/hack/src/hackrs/hackrs_test/folded_decl_provider_test/mod.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. #![cfg(test)] use std::fs; use anyhow::Result; use maplit::btreemap; use pos::Prefix; use pos::RelativePath; use pos::TypeName; use ty::decl::shallow; use ty::decl_error::DeclError; use crate::FacebookInit; use crate::TestContext; #[fbinit::test] fn when_cyclic_class_error(fb: FacebookInit) -> Result<()> { let ctx = TestContext::new( fb, btreemap! { "a.php" => "class A extends B {}", "b.php" => "class B extends A {}" }, )?; let (a, b) = (TypeName::new(r#"\A"#), TypeName::new(r#"\B"#)); // To declare B, we'll first declare A. During the declaring of A, the // dependency on B will be noted as a cycle in A's errors. ctx.folded_decl_provider.get_class(b)?; // Since we already declared A incidentally above, this next line will // simply pull it from cache. let decl = ctx.folded_decl_provider.get_class(a)?.unwrap(); // Now check that A has recorded the cyclic class error as we predict. match decl.decl_errors.first().unwrap() { DeclError::CyclicClassDef(_, ts) => { itertools::assert_equal(ts.iter().copied(), [b, a].into_iter()) } _ => panic!(), }; Ok(()) } #[fbinit::test] fn results_stable(fb: FacebookInit) -> Result<()> { // Our use of `index_map::IndexMap` in strategic places implies folded class // maps are stable. for _ in 1..5 { let ctx = TestContext::new( fb, btreemap! { "a.php" => "class A {}", "b.php" => "class B extends A {}", "c.php" => "class C extends B {}", "d.php" => "class D extends C {}", }, )?; let (a, b, c, d) = ( TypeName::new(r#"\A"#), TypeName::new(r#"\B"#), TypeName::new(r#"\C"#), TypeName::new(r#"\D"#), ); let decl = ctx.folded_decl_provider.get_class(d)?.unwrap(); itertools::assert_equal(decl.ancestors.keys().copied(), [a, b, c].into_iter()) } Ok(()) } #[fbinit::test] fn when_file_missing_error(fb: FacebookInit) -> Result<()> { let ctx = TestContext::new( fb, btreemap! { "a.php" => "class A {}", "b.php" => "class B extends A {}", "c.php" => "class C extends B {}", "d.php" => "class D extends C {}", }, )?; let (a, b, c, d) = ( TypeName::new(r#"\A"#), TypeName::new(r#"\B"#), TypeName::new(r#"\C"#), TypeName::new(r#"\D"#), ); // check we can decl parse 'd.php' for decl in ctx .decl_parser .parse(RelativePath::new(Prefix::Root, "d.php"))? { match decl { shallow::NamedDecl::Class(cls, _) => { assert_eq!(cls, d); } _ => panic!("unexpected decl in 'd.php'"), } } // remove 'a.php' fs::remove_file(ctx.root.path().join("a.php").as_path())?; // try getting a folded decl for 'D' use ::folded_decl_provider::Error; match ctx.folded_decl_provider.get_class(d) { Err( ref err @ Error::Parent { ref class, ref parents, .. }, ) => { // check the error is about 'D' assert_eq!(*class, d); // check we enumerated all 'D's parents assert!([&a, &b, &c].iter().all(|p| parents.contains(p))); // check the error text assert_eq!( format!("{}", err), "Failed to declare \\D because of error in ancestor \\A (via \\C, \\B, \\A): Failed to parse decls in root|a.php: No such file or directory (os error 2)" ) } _ => panic!("failure folding 'D' expected"), } Ok(()) }
Rust
hhvm/hphp/hack/src/hackrs/hackrs_test/pos_test/mod.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. #![cfg(test)] use anyhow::Result; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; use oxidized::file_pos_large::FilePosLarge; use pos::BPos; use pos::RelativePath; use relative_path::Prefix; #[test] fn bpos_from_ocamlrep() -> Result<()> { // make a pos let file = RelativePath::new(Prefix::Root, std::path::Path::new("yellow/brick/road")); let begin = FilePosLarge::from_line_column_offset(0usize, 0usize, 0usize); let until = FilePosLarge::from_line_column_offset(10usize, 0usize, 1024usize); let pos = BPos::new(file, begin, until); // convert it to an ocamlrep let alloc = &ocamlrep::Arena::new(); let word: usize = pos.to_ocamlrep(alloc).to_bits(); let value: ocamlrep::Value<'_> = unsafe { ocamlrep::Value::from_bits(word) }; // convert it back from an ocamlrep & check it "round trips" assert_eq!(pos, BPos::from_ocamlrep(value).unwrap()); Ok(()) }
Rust
hhvm/hphp/hack/src/hackrs/hackrs_test_utils/decl_provider.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::path::PathBuf; use std::sync::Arc; use datastore::NonEvictingStore; use decl_parser::DeclParser; use folded_decl_provider::FoldedDeclProvider; use folded_decl_provider::LazyFoldedDeclProvider; use naming_provider::SqliteNamingTable; use oxidized::parser_options::ParserOptions; use shallow_decl_provider::EagerShallowDeclProvider; use shallow_decl_provider::LazyShallowDeclProvider; use shallow_decl_provider::ShallowDeclProvider; use shallow_decl_provider::ShallowDeclStore; use ty::reason::Reason; use crate::serde_store::StoreOpts; use crate::SerializingStore; pub fn make_folded_decl_provider<R: Reason>( store_opts: StoreOpts, naming_table: Option<&PathBuf>, shallow_decl_store: ShallowDeclStore<R>, opts: Arc<ParserOptions>, decl_parser: DeclParser<R>, ) -> impl FoldedDeclProvider<R> { let shallow_decl_provider: Arc<dyn ShallowDeclProvider<R>> = if let Some(naming_table_path) = naming_table { Arc::new(LazyShallowDeclProvider::new( Arc::new(shallow_decl_store), Arc::new(SqliteNamingTable::new(naming_table_path).unwrap()), decl_parser, )) } else { Arc::new(EagerShallowDeclProvider::new(Arc::new(shallow_decl_store))) }; LazyFoldedDeclProvider::new( opts, match store_opts { StoreOpts::Serialized(compression_type) => { Arc::new(SerializingStore::with_compression(compression_type)) } StoreOpts::Unserialized => Arc::new(NonEvictingStore::new()), }, shallow_decl_provider, ) }
Rust
hhvm/hphp/hack/src/hackrs/hackrs_test_utils/mod.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. pub mod decl_provider; pub mod serde_store; pub mod store; pub use decl_provider::*; pub use serde_store::*; pub use store::*;
Rust
hhvm/hphp/hack/src/hackrs/hackrs_test_utils/serde_store.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::fmt::Debug; use std::hash::Hash; use anyhow::Result; use hash::DashMap; use serde::de::DeserializeOwned; use serde::Serialize; pub struct SerializingStore<K: Hash + Eq, V: Serialize + DeserializeOwned> { /// A non-evicting store for serialized values. store: DashMap<K, Box<[u8]>>, /// An LRU cache of hashconsed values, in front of the non-evicting /// serialized store. cache: moka::sync::SegmentedCache<K, V>, compression: Compression, } #[derive(Copy, Clone, Debug)] pub enum Compression { None, Zstd, Lz4, } #[derive(Copy, Clone, Debug)] pub enum StoreOpts { Unserialized, Serialized(Compression), } impl<K, V> Default for SerializingStore<K, V> where K: Copy + Hash + Eq + Send + Sync + 'static, V: Clone + Serialize + DeserializeOwned + Send + Sync + 'static, { fn default() -> Self { Self { store: Default::default(), cache: moka::sync::SegmentedCache::new(1024, 32), compression: Default::default(), } } } impl Default for Compression { fn default() -> Self { Self::Zstd } } impl<K, V> SerializingStore<K, V> where K: Copy + Hash + Eq + Send + Sync + 'static, V: Clone + Serialize + DeserializeOwned + Send + Sync + 'static, { pub fn new() -> Self { Default::default() } pub fn with_compression(compression: Compression) -> Self { Self { compression, ..Default::default() } } } impl<K, V> datastore::Store<K, V> for SerializingStore<K, V> where K: Copy + Hash + Eq + Send + Sync + 'static, V: Clone + Serialize + DeserializeOwned + Send + Sync + 'static, { fn contains_key(&self, key: K) -> Result<bool> { if self.cache.contains_key(&key) { return Ok(true); } Ok(self.store.contains_key(&key)) } fn get(&self, key: K) -> Result<Option<V>> { if let val @ Some(..) = self.cache.get(&key) { return Ok(val); } let val_opt: Option<V> = self .store .get(&key) .map(|val| match self.compression { Compression::None => deserialize(&val), Compression::Zstd => { let serialized = zstd_decompress(&val)?; deserialize(&serialized) } Compression::Lz4 => { let serialized = lz4_decompress(&val)?; deserialize(&serialized) } }) .transpose()?; Ok(val_opt.map(|val| self.cache.get_with(key, || val))) } fn insert(&self, key: K, val: V) -> Result<()> { let serialized = serialize(&val)?; self.cache.insert(key, val); let compressed = match self.compression { Compression::None => serialized, Compression::Zstd => zstd_compress(&serialized)?, Compression::Lz4 => lz4_compress(&serialized)?, }; self.store.insert(key, compressed.into_boxed_slice()); Ok(()) } fn remove_batch(&self, keys: &mut dyn Iterator<Item = K>) -> Result<()> { for key in keys { if self.get(key)?.is_some() { self.store.remove(&key); self.cache.invalidate(&key); } } Ok(()) } } fn serialize<T: Serialize>(val: &T) -> Result<Vec<u8>> { let mut serialized = Vec::new(); bincode::serialize_into(&mut serialized, &intern::WithIntern(val))?; Ok(serialized) } fn deserialize<T: DeserializeOwned>(serialized: &[u8]) -> Result<T> { Ok(intern::WithIntern::strip(bincode::deserialize(serialized))?) } fn zstd_compress(mut bytes: &[u8]) -> Result<Vec<u8>> { let mut compressed = vec![]; zstd::stream::copy_encode(&mut bytes, &mut compressed, 0)?; Ok(compressed) } fn zstd_decompress(mut compressed: &[u8]) -> Result<Vec<u8>> { let mut decompressed = vec![]; zstd::stream::copy_decode(&mut compressed, &mut decompressed)?; Ok(decompressed) } fn lz4_compress(mut bytes: &[u8]) -> Result<Vec<u8>> { let mut encoder = lz4::EncoderBuilder::new().level(1).build(vec![])?; std::io::copy(&mut bytes, &mut encoder)?; let (compressed, result) = encoder.finish(); result?; Ok(compressed) } fn lz4_decompress(compressed: &[u8]) -> Result<Vec<u8>> { let mut decompressed = vec![]; let mut decoder = lz4::Decoder::new(compressed)?; std::io::copy(&mut decoder, &mut decompressed)?; Ok(decompressed) } impl<K, V> Debug for SerializingStore<K, V> where K: Hash + Eq, V: Serialize + DeserializeOwned, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("SerializingStore").finish() } } impl std::str::FromStr for Compression { type Err = &'static str; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "none" => Ok(Self::None), "zstd" => Ok(Self::Zstd), "lz4" => Ok(Self::Lz4), _ => Err("compression must be one of 'none', 'zstd', 'lz4'"), } } } impl std::fmt::Display for Compression { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::None => write!(f, "none"), Self::Zstd => write!(f, "zstd"), Self::Lz4 => write!(f, "lz4"), } } }
Rust
hhvm/hphp/hack/src/hackrs/hackrs_test_utils/store.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::sync::Arc; use datastore::NonEvictingStore; use decl_parser::DeclParser; use indicatif::ParallelProgressIterator; use pos::RelativePath; use pos::TypeName; use rayon::iter::IntoParallelRefIterator; use rayon::iter::ParallelIterator; use shallow_decl_provider::ShallowDeclStore; use ty::reason::Reason; use crate::serde_store::StoreOpts; use crate::SerializingStore; pub fn make_shallow_decl_store<R: Reason>(opts: StoreOpts) -> ShallowDeclStore<R> { match opts { StoreOpts::Serialized(compression_type) => { ShallowDeclStore::new( Arc::new(SerializingStore::with_compression(compression_type)), // classes Arc::new(SerializingStore::with_compression(compression_type)), // typedefs Arc::new(SerializingStore::with_compression(compression_type)), // funs Arc::new(SerializingStore::with_compression(compression_type)), // consts Arc::new(SerializingStore::with_compression(compression_type)), // modules Arc::new(SerializingStore::with_compression(compression_type)), // properties Arc::new(SerializingStore::with_compression(compression_type)), // static_properties Arc::new(SerializingStore::with_compression(compression_type)), // methods Arc::new(SerializingStore::with_compression(compression_type)), // static_methods Arc::new(SerializingStore::with_compression(compression_type)), // constructors ) } StoreOpts::Unserialized => ShallowDeclStore::with_no_member_stores( Arc::new(NonEvictingStore::default()), // classes Arc::new(NonEvictingStore::default()), // typedefs Arc::new(NonEvictingStore::default()), // funs Arc::new(NonEvictingStore::default()), // consts Arc::new(NonEvictingStore::default()), // modules ), } } pub fn make_non_evicting_shallow_decl_store<R: Reason>() -> ShallowDeclStore<R> { make_shallow_decl_store(StoreOpts::Unserialized) } pub fn populate_shallow_decl_store<R: Reason>( shallow_decl_store: &ShallowDeclStore<R>, decl_parser: DeclParser<R>, filenames: &[RelativePath], ) -> Vec<TypeName> { let len = filenames.len(); filenames .par_iter() .progress_count(len as u64) .flat_map_iter(|path| { let (mut decls, summary) = decl_parser.parse_and_summarize(*path).unwrap(); decls.reverse(); // To match OCaml behavior for name collisions shallow_decl_store.add_decls(decls).unwrap(); summary .classes() .map(|decl| TypeName::new(&decl.symbol)) .collect::<Vec<_>>() .into_iter() }) .collect() }
TOML
hhvm/hphp/hack/src/hackrs/hackrs_test_utils/cargo/hackrs_test_utils/Cargo.toml
# @generated by autocargo [package] name = "hackrs_test_utils" version = "0.0.0" edition = "2021" [lib] path = "../../mod.rs" [dependencies] anyhow = "1.0.71" bincode = "1.3.3" datastore = { version = "0.0.0", path = "../../../datastore" } decl_parser = { version = "0.0.0", path = "../../../decl_parser/cargo/decl_parser" } folded_decl_provider = { version = "0.0.0", path = "../../../folded_decl_provider/cargo/folded_decl_provider" } hash = { version = "0.0.0", path = "../../../../utils/hash" } indicatif = { version = "0.17.3", features = ["improved_unicode", "rayon", "tokio"] } intern = { version = "0.1.0", path = "../../../../utils/intern" } lz4 = "1.24.0" moka = { version = "0.10.0", features = ["future"] } naming_provider = { version = "0.0.0", path = "../../../naming_provider/cargo/naming_provider" } oxidized = { version = "0.0.0", path = "../../../../oxidized" } pos = { version = "0.0.0", path = "../../../pos/cargo/pos" } rayon = "1.2" serde = { version = "1.0.176", features = ["derive", "rc"] } shallow_decl_provider = { version = "0.0.0", path = "../../../shallow_decl_provider/cargo/shallow_decl_provider" } ty = { version = "0.0.0", path = "../../../ty/cargo/ty" } zstd = { version = "0.11.2+zstd.1.5.2", features = ["experimental", "zstdmt"] }
Rust
hhvm/hphp/hack/src/hackrs/naming_provider/naming_provider.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::fmt; use std::fmt::Debug; use std::path::Path; use anyhow::Result; use hh24_types::ToplevelCanonSymbolHash; use hh24_types::ToplevelSymbolHash; use oxidized::file_info::NameType; use oxidized::naming_types::KindOfType; use parking_lot::Mutex; use pos::ConstName; use pos::FunName; use pos::ModuleName; use pos::RelativePath; use pos::TypeName; /// An abstraction over the global symbol table. Should be used by /// `LazyShallowDeclProvider` only, since folding and typechecking logic should /// have no need for a `NamingProvider`. pub trait NamingProvider: Debug + Send + Sync { fn get_type_path_and_kind(&self, name: TypeName) -> Result<Option<(RelativePath, KindOfType)>>; fn get_type_path(&self, name: TypeName) -> Result<Option<RelativePath>> { Ok(self.get_type_path_and_kind(name)?.map(|(path, _kind)| path)) } fn get_fun_path(&self, name: FunName) -> Result<Option<RelativePath>>; fn get_const_path(&self, name: ConstName) -> Result<Option<RelativePath>>; fn get_module_path(&self, name: ModuleName) -> Result<Option<RelativePath>>; /// Case-insensitive lookup. Fetch the correct casing according to the /// symbol table. fn get_canon_type_name(&self, name: TypeName) -> Result<Option<TypeName>>; fn get_canon_fun_name(&self, name: FunName) -> Result<Option<FunName>>; } /// A naming table in a SQLite database (with the same database schema as /// hh_server's SQLite saved states). pub struct SqliteNamingTable { names: Mutex<names::Names>, } impl SqliteNamingTable { pub fn new(path: impl AsRef<Path>) -> anyhow::Result<Self> { Ok(Self { names: Mutex::new(names::Names::from_file(path)?), }) } } impl NamingProvider for SqliteNamingTable { fn get_type_path_and_kind(&self, name: TypeName) -> Result<Option<(RelativePath, KindOfType)>> { let path_opt = self .names .lock() .get_filename(ToplevelSymbolHash::from_type(name.as_str()))?; Ok(path_opt.and_then(|(path, name_type)| { let kind = match name_type { NameType::Class => KindOfType::TClass, NameType::Typedef => KindOfType::TTypedef, _ => return None, }; Some((RelativePath::from(&path), kind)) })) } fn get_fun_path(&self, name: FunName) -> Result<Option<RelativePath>> { let path_opt = self .names .lock() .get_path_by_symbol_hash(ToplevelSymbolHash::from_fun(name.as_str()))?; Ok(path_opt.map(|path| RelativePath::new(path.prefix(), path.path()))) } fn get_const_path(&self, name: ConstName) -> Result<Option<RelativePath>> { let path_opt = self .names .lock() .get_path_by_symbol_hash(ToplevelSymbolHash::from_const(name.as_str()))?; Ok(path_opt.map(|path| RelativePath::new(path.prefix(), path.path()))) } fn get_module_path(&self, name: ModuleName) -> Result<Option<RelativePath>> { let path_opt = self .names .lock() .get_path_by_symbol_hash(ToplevelSymbolHash::from_module(name.as_str()))?; Ok(path_opt.map(|path| RelativePath::new(path.prefix(), path.path()))) } fn get_canon_type_name(&self, name: TypeName) -> Result<Option<TypeName>> { Ok(self .names .lock() .get_type_name_case_insensitive(ToplevelCanonSymbolHash::from(name))? .map(Into::into)) } fn get_canon_fun_name(&self, name: FunName) -> Result<Option<FunName>> { Ok(self .names .lock() .get_fun_name_case_insensitive(ToplevelCanonSymbolHash::from(name))? .map(Into::into)) } } impl Debug for SqliteNamingTable { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "SqliteNamingTable") } }
TOML
hhvm/hphp/hack/src/hackrs/naming_provider/cargo/naming_provider/Cargo.toml
# @generated by autocargo [package] name = "naming_provider" version = "0.0.0" edition = "2021" [lib] path = "../../naming_provider.rs" [dependencies] anyhow = "1.0.71" hh24_types = { version = "0.0.0", path = "../../../../utils/hh24_types" } names = { version = "0.0.0", path = "../../../../naming/names_rust" } oxidized = { version = "0.0.0", path = "../../../../oxidized" } parking_lot = { version = "0.12.1", features = ["send_guard"] } pos = { version = "0.0.0", path = "../../../pos/cargo/pos" }
Rust
hhvm/hphp/hack/src/hackrs/pos/pos.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::fmt; use std::hash::Hash; use eq_modulo_pos::EqModuloPos; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; use oxidized::file_pos_small::FilePosSmall; use oxidized::pos_span_raw::PosSpanRaw; use oxidized::pos_span_tiny::PosSpanTiny; use serde::de::DeserializeOwned; use serde::Deserialize; use serde::Serialize; mod relative_path; mod symbol; mod to_oxidized; pub use oxidized::file_pos_large::FilePosLarge; pub use symbol::*; pub use to_oxidized::ToOxidized; pub use crate::relative_path::*; pub trait Pos: Eq + Hash + Clone + std::fmt::Debug + Serialize + DeserializeOwned + for<'a> From<&'a oxidized::pos::Pos> + for<'a> From<&'a oxidized_by_ref::pos::Pos<'a>> + for<'a> ToOxidized<'a, Output = &'a oxidized_by_ref::pos::Pos<'a>> + ToOcamlRep + FromOcamlRep + EqModuloPos + 'static { /// Make a new instance. If the implementing Pos is stateful, /// it will call cons() to obtain interned values to construct the instance. fn mk(cons: impl FnOnce() -> (RelativePath, FilePosLarge, FilePosLarge)) -> Self; fn none() -> Self; fn from_ast(pos: &oxidized::pos::Pos) -> Self { Self::mk(|| { let PosSpanRaw { start, end } = pos.to_raw_span(); (pos.filename().into(), start, end) }) } fn from_decl(pos: &oxidized_by_ref::pos::Pos<'_>) -> Self { Self::mk(|| { let PosSpanRaw { start, end } = pos.to_raw_span(); (pos.filename().into(), start, end) }) } fn is_hhi(&self) -> bool; } /// Represents a closed-ended range [start, end] in a file. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] enum PosImpl { Small { prefix: Prefix, suffix: Bytes, span: Box<(FilePosSmall, FilePosSmall)>, }, Large { prefix: Prefix, suffix: Bytes, span: Box<(FilePosLarge, FilePosLarge)>, }, Tiny { prefix: Prefix, suffix: Bytes, span: PosSpanTiny, }, } static_assertions::assert_eq_size!(PosImpl, u128); #[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct BPos(PosImpl); impl Pos for BPos { fn mk(cons: impl FnOnce() -> (RelativePath, FilePosLarge, FilePosLarge)) -> Self { let (file, start, end) = cons(); Self::new(file, start, end) } fn none() -> Self { BPos::none() } fn is_hhi(&self) -> bool { let BPos(pos_impl) = self; let prefix = match *pos_impl { PosImpl::Small { prefix, .. } => prefix, PosImpl::Large { prefix, .. } => prefix, PosImpl::Tiny { prefix, .. } => prefix, }; prefix == Prefix::Hhi } } impl BPos { pub fn new(file: RelativePath, start: FilePosLarge, end: FilePosLarge) -> Self { let prefix = file.prefix(); let suffix = file.suffix(); if let Some(span) = PosSpanTiny::make(&start, &end) { return BPos(PosImpl::Tiny { prefix, suffix, span, }); } let (lnum, bol, offset) = start.line_beg_offset(); if let Some(start) = FilePosSmall::from_lnum_bol_offset(lnum, bol, offset) { let (lnum, bol, offset) = end.line_beg_offset(); if let Some(end) = FilePosSmall::from_lnum_bol_offset(lnum, bol, offset) { let span = Box::new((start, end)); return BPos(PosImpl::Small { prefix, suffix, span, }); } } let span = Box::new((start, end)); BPos(PosImpl::Large { prefix, suffix, span, }) } pub const fn none() -> Self { let file = RelativePath::empty(); Self(PosImpl::Tiny { prefix: file.prefix(), suffix: file.suffix(), span: PosSpanTiny::make_dummy(), }) } pub fn is_none(&self) -> bool { match self { BPos(PosImpl::Tiny { span, .. }) => span.is_dummy() && self.file().is_empty(), _ => false, } } pub const fn file(&self) -> RelativePath { match self.0 { PosImpl::Small { prefix, suffix, .. } | PosImpl::Large { prefix, suffix, .. } | PosImpl::Tiny { prefix, suffix, .. } => RelativePath::from_bytes(prefix, suffix), } } } impl fmt::Debug for BPos { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut do_fmt = |start_line, start_col, end_line, end_col| { if start_line == end_line { write!( f, "Pos({:?}, {}:{}-{})", &self.file(), &start_line, &(start_col + 1), &(end_col + 1), ) } else { write!( f, "Pos({:?}, {}:{}-{}:{})", &self.file(), &start_line, &(start_col + 1), &end_line, &(end_col + 1), ) } }; if self.is_none() { return write!(f, "Pos(None)"); } match &self.0 { PosImpl::Small { span, .. } => { let (start, end) = &**span; do_fmt(start.line(), start.column(), end.line(), end.column()) } PosImpl::Large { span, .. } => { let (start, end) = &**span; do_fmt(start.line(), start.column(), end.line(), end.column()) } PosImpl::Tiny { span, .. } => { let span = span.to_raw_span(); do_fmt( span.start.line(), span.start.column(), span.end.line(), span.end.column(), ) } } } } impl EqModuloPos for BPos { fn eq_modulo_pos(&self, _rhs: &Self) -> bool { true } fn eq_modulo_pos_and_reason(&self, _rhs: &Self) -> bool { true } } impl From<BPos> for oxidized::pos::Pos { fn from(pos: BPos) -> Self { let file = std::sync::Arc::new(pos.file().into()); Self::from_raw_span( file, match &pos.0 { PosImpl::Small { span, .. } => { let (start, end) = **span; PosSpanRaw { start: start.into(), end: end.into(), } } PosImpl::Large { span, .. } => { let (start, end) = **span; PosSpanRaw { start, end } } PosImpl::Tiny { span, .. } => span.to_raw_span(), }, ) } } impl<'a> From<&'a oxidized::pos::Pos> for BPos { fn from(pos: &'a oxidized::pos::Pos) -> Self { Self::from_ast(pos) } } impl<'a> From<&'a oxidized_by_ref::pos::Pos<'a>> for BPos { fn from(pos: &'a oxidized_by_ref::pos::Pos<'a>) -> Self { Self::from_decl(pos) } } impl<'a> ToOxidized<'a> for BPos { type Output = &'a oxidized_by_ref::pos::Pos<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { let file = self.file().to_oxidized(arena); arena.alloc(match &self.0 { PosImpl::Small { span, .. } => { let (start, end) = **span; oxidized_by_ref::pos::Pos::from_raw_span( arena, file, PosSpanRaw { start: start.into(), end: end.into(), }, ) } PosImpl::Large { span, .. } => { let (start, end) = **span; oxidized_by_ref::pos::Pos::from_raw_span(arena, file, PosSpanRaw { start, end }) } PosImpl::Tiny { span, .. } => { let span = span.to_raw_span(); oxidized_by_ref::pos::Pos::from_raw_span(arena, file, span) } }) } } impl ToOcamlRep for BPos { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { let file = match &self.0 { PosImpl::Small { prefix, suffix, .. } | PosImpl::Large { prefix, suffix, .. } | PosImpl::Tiny { prefix, suffix, .. } => { let mut file = alloc.block_with_size(2); alloc.set_field(&mut file, 0, prefix.to_ocamlrep(alloc)); alloc.set_field(&mut file, 1, suffix.to_ocamlrep(alloc)); file.build() } }; match &self.0 { PosImpl::Small { span, .. } => { let (start, end) = &**span; let mut pos = alloc.block_with_size_and_tag(3usize, 0u8); alloc.set_field(&mut pos, 0, file); alloc.set_field(&mut pos, 1, start.to_ocamlrep(alloc)); alloc.set_field(&mut pos, 2, end.to_ocamlrep(alloc)); pos.build() } PosImpl::Large { span, .. } => { let (start, end) = &**span; let mut pos = alloc.block_with_size_and_tag(3usize, 1u8); alloc.set_field(&mut pos, 0, file); alloc.set_field(&mut pos, 1, start.to_ocamlrep(alloc)); alloc.set_field(&mut pos, 2, end.to_ocamlrep(alloc)); pos.build() } PosImpl::Tiny { span, .. } => { let mut pos = alloc.block_with_size_and_tag(2usize, 2u8); alloc.set_field(&mut pos, 0, file); alloc.set_field(&mut pos, 1, span.to_ocamlrep(alloc)); pos.build() } } } } impl FromOcamlRep for BPos { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { let block = ocamlrep::from::expect_block(value)?; match block.tag() { 0u8 /* Pos_small */ => { ocamlrep::from::expect_block_size(block, 3usize)?; let path = ocamlrep::from::expect_tuple(block[0], 2)?; let (prefix, suffix) = ( Prefix::from_ocamlrep(path[0])?, Bytes::from_ocamlrep(path[1])? ); let span = Box::new(( FilePosSmall::from_ocamlrep(block[1])?, FilePosSmall::from_ocamlrep(block[2])? )); Ok(BPos(PosImpl::Small{prefix, suffix, span})) }, 1u8 /* Pos_large */ => { ocamlrep::from::expect_block_size(block, 3usize)?; let path = ocamlrep::from::expect_tuple(block[0], 2)?; let (prefix, suffix) = ( Prefix::from_ocamlrep(path[0])?, Bytes::from_ocamlrep(path[1])? ); let span = Box::new(( FilePosLarge::from_ocamlrep(block[1])?, FilePosLarge::from_ocamlrep(block[2])? )); Ok(BPos(PosImpl::Large{prefix, suffix, span})) }, 2u8 /* Pos_tiny */ => { ocamlrep::from::expect_block_size(block, 2usize)?; let path = ocamlrep::from::expect_tuple(block[0], 2)?; let (prefix, suffix) = ( Prefix::from_ocamlrep(path[0])?, Bytes::from_ocamlrep(path[1])? ); let span = PosSpanTiny::from_ocamlrep(block[1])?; Ok(BPos(PosImpl::Tiny{prefix, suffix, span})) }, tag /* Pos_from_reason */ => { Err(ocamlrep::FromError::BlockTagOutOfRange{max:2u8, actual:tag}) } } } } /// A stateless sentinel Pos. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct NPos; impl Pos for NPos { fn mk(_cons: impl FnOnce() -> (RelativePath, FilePosLarge, FilePosLarge)) -> Self { NPos } fn none() -> Self { NPos } fn is_hhi(&self) -> bool { false // See T81321312. // Note(SF, 2022-03-23): Jake advises "This definition will lead to a // small behavior difference between `NPos` and `BPos`: when // typechecking in posisition-free mode we'll register depedencies on // hhi files but in positioned mode we won't. If this turns out to be // problematic, one solution is to make `NPos` store a `u8` rather than // being zero-sized and in that we can store a bit for whether the // position is in a hhi file." } } impl EqModuloPos for NPos { fn eq_modulo_pos(&self, _rhs: &Self) -> bool { true } fn eq_modulo_pos_and_reason(&self, _rhs: &Self) -> bool { true } } impl<'a> From<&'a oxidized::pos::Pos> for NPos { fn from(pos: &'a oxidized::pos::Pos) -> Self { Self::from_ast(pos) } } impl<'a> From<&'a oxidized_by_ref::pos::Pos<'a>> for NPos { fn from(pos: &'a oxidized_by_ref::pos::Pos<'a>) -> Self { Self::from_decl(pos) } } impl<'a> ToOxidized<'a> for NPos { type Output = &'a oxidized_by_ref::pos::Pos<'a>; fn to_oxidized(&self, _arena: &'a bumpalo::Bump) -> Self::Output { oxidized_by_ref::pos::Pos::none() } } impl ToOcamlRep for NPos { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { oxidized_by_ref::pos::Pos::none().to_ocamlrep(alloc) } } impl FromOcamlRep for NPos { fn from_ocamlrep(_value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { Ok(Self) } } #[derive(Clone, PartialEq, Eq, EqModuloPos, Hash)] #[derive(Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub struct Positioned<S, P> { // Caution: field order matters because we derive // `ToOcamlRep`/`FromOcamlRep` for this type. pos: P, id: S, } impl<S: fmt::Debug, P: fmt::Debug> fmt::Debug for Positioned<S, P> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if std::mem::size_of::<P>() == 0 { write!(f, "{:?}", &self.id) } else { f.debug_tuple("").field(&self.pos).field(&self.id).finish() } } } impl<S, P> Positioned<S, P> { pub fn new(pos: P, id: S) -> Self { Self { pos, id } } pub fn pos(&self) -> &P { &self.pos } pub fn into_pos(self) -> P { self.pos } pub fn id_ref(&self) -> &S { &self.id } } impl<S: Copy, P> Positioned<S, P> { pub fn id(&self) -> S { self.id } } impl<'a, S: From<&'a str>, P: Pos> From<&'a oxidized::ast_defs::Id> for Positioned<S, P> { fn from(pos_id: &'a oxidized::ast_defs::Id) -> Self { let oxidized::ast_defs::Id(pos, id) = pos_id; Self::new(Pos::from_ast(pos), S::from(id)) } } impl<'a, S: From<&'a str>, P: Pos> From<oxidized_by_ref::ast_defs::Id<'a>> for Positioned<S, P> { fn from(pos_id: oxidized_by_ref::ast_defs::Id<'a>) -> Self { let oxidized_by_ref::ast_defs::Id(pos, id) = pos_id; Self::new(Pos::from_decl(pos), S::from(id)) } } impl<'a, S: From<&'a str>, P: Pos> From<oxidized_by_ref::typing_defs::PosId<'a>> for Positioned<S, P> { fn from(pos_id: oxidized_by_ref::typing_defs::PosId<'a>) -> Self { let (pos, id) = pos_id; Self::new(Pos::from_decl(pos), S::from(id)) } } impl<'a, S: ToOxidized<'a, Output = &'a str>, P: Pos> ToOxidized<'a> for Positioned<S, P> { type Output = oxidized_by_ref::typing_reason::PosId<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { (self.pos.to_oxidized(arena), self.id.to_oxidized(arena)) } }
Rust
hhvm/hphp/hack/src/hackrs/pos/relative_path.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::ffi::OsStr; use std::fmt; use std::os::unix::ffi::OsStrExt; use std::path::Path; use std::path::PathBuf; use ocamlrep::FromOcamlRep; use ocamlrep::FromOcamlRepIn; use ocamlrep::ToOcamlRep; pub use relative_path::Prefix; pub use relative_path::RelativePathCtx; use crate::Bytes; use crate::ToOxidized; #[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] #[derive(serde::Serialize, serde::Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub struct RelativePath { prefix: Prefix, suffix: Bytes, } impl RelativePath { pub fn new<P: AsRef<Path>>(prefix: Prefix, suffix: P) -> Self { let suffix = Bytes::new(suffix.as_ref().as_os_str().as_bytes()); Self::from_bytes(prefix, suffix) } pub const fn empty() -> Self { Self { prefix: Prefix::Dummy, suffix: Bytes::EMPTY, } } pub fn is_empty(&self) -> bool { self.prefix == Prefix::Dummy && self.suffix == Bytes::EMPTY } pub const fn from_bytes(prefix: Prefix, suffix: Bytes) -> Self { Self { prefix, suffix } } #[inline] pub const fn prefix(&self) -> Prefix { self.prefix } #[inline] pub const fn suffix(&self) -> Bytes { self.suffix } #[inline] pub fn is_hhi(&self) -> bool { self.prefix() == Prefix::Hhi } pub fn to_absolute(&self, ctx: &RelativePathCtx) -> PathBuf { let mut buf = ctx.prefix_path(self.prefix).to_owned(); buf.push(OsStr::from_bytes(self.suffix.as_bytes())); buf } pub fn suffix_buf(&self) -> PathBuf { PathBuf::from(OsStr::from_bytes(self.suffix.as_bytes())) } } impl arena_trait::TrivialDrop for RelativePath {} impl<'a> ToOxidized<'a> for RelativePath { type Output = &'a oxidized_by_ref::relative_path::RelativePath<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(oxidized_by_ref::relative_path::RelativePath::new( self.prefix, Path::new(OsStr::from_bytes(self.suffix.as_bytes())), )) } } impl<'a> FromOcamlRepIn<'a> for RelativePath { fn from_ocamlrep_in( value: ocamlrep::Value<'_>, _arena: &'a bumpalo::Bump, ) -> Result<Self, ocamlrep::FromError> { let path = relative_path::RelativePath::from_ocamlrep(value)?; Ok(Self::from(&path)) } } impl From<RelativePath> for relative_path::RelativePath { fn from(path: RelativePath) -> Self { Self::make( path.prefix, OsStr::from_bytes(path.suffix.as_bytes()).into(), ) } } impl From<&relative_path::RelativePath> for RelativePath { fn from(path: &relative_path::RelativePath) -> Self { Self::new(path.prefix(), path.path()) } } impl From<&oxidized_by_ref::relative_path::RelativePath<'_>> for RelativePath { fn from(path: &oxidized_by_ref::relative_path::RelativePath<'_>) -> Self { Self::new(path.prefix(), path.path()) } } impl fmt::Debug for RelativePath { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "{}|{}", self.prefix, Path::new(OsStr::from_bytes(self.suffix.as_bytes())).display() ) } }
Rust
hhvm/hphp/hack/src/hackrs/pos/symbol.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. // Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use eq_modulo_pos::EqModuloPos; use hh24_types::ToplevelCanonSymbolHash; use hh24_types::ToplevelSymbolHash; use intern::string::BytesId; use intern::string::IntoUtf8Bytes; use intern::string::StringId; use ocamlrep::FromOcamlRep; use ocamlrep::FromOcamlRepIn; use ocamlrep::ToOcamlRep; use serde::Deserialize; use serde::Serialize; use crate::ToOxidized; #[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] #[derive(Serialize, Deserialize)] pub struct Symbol(pub StringId); // nb: StringId implements Hash & Eq using the u32 id, and Ord // using the underlying string after a fast check for equal ids. impl Symbol { pub fn new<S: IntoUtf8Bytes>(s: S) -> Self { Self(intern::string::intern(s)) } } impl Symbol { pub fn as_str(&self) -> &'static str { self.0.as_str() } pub fn as_bytes(&self) -> &'static [u8] { self.0.as_str().as_bytes() } } impl std::ops::Deref for Symbol { type Target = str; fn deref(&self) -> &str { self.as_str() } } impl std::convert::AsRef<str> for Symbol { fn as_ref(&self) -> &str { self.as_str() } } impl std::fmt::Debug for Symbol { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self.as_str()) } } impl std::fmt::Display for Symbol { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.as_str()) } } impl<T: IntoUtf8Bytes> From<T> for Symbol { fn from(s: T) -> Self { Self::new(s) } } impl EqModuloPos for Symbol { fn eq_modulo_pos(&self, rhs: &Self) -> bool { self == rhs } fn eq_modulo_pos_and_reason(&self, rhs: &Self) -> bool { self == rhs } } impl<'a> ToOxidized<'a> for Symbol { type Output = &'a str; fn to_oxidized(&self, _arena: &'a bumpalo::Bump) -> &'a str { self.as_str() } } impl ToOcamlRep for Symbol { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { alloc.add_copy(self.as_str()) } } impl FromOcamlRep for Symbol { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { Ok(Self::new(ocamlrep::str_from_ocamlrep(value)?)) } } impl<'a> FromOcamlRepIn<'a> for Symbol { fn from_ocamlrep_in( value: ocamlrep::Value<'_>, _arena: &'a bumpalo::Bump, ) -> Result<Self, ocamlrep::FromError> { Ok(Self::new(ocamlrep::str_from_ocamlrep(value)?)) } } impl arena_trait::TrivialDrop for Symbol {} #[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] #[derive(Serialize, Deserialize)] pub struct Bytes(pub BytesId); // nb: BytesId implements Hash & Eq using the u32 id, and Ord // using the underlying bytestring after a fast check for equal ids. impl Bytes { pub const EMPTY: Bytes = Bytes(BytesId::EMPTY); pub fn new<S: AsRef<[u8]>>(s: S) -> Self { Self(intern::string::intern_bytes(s.as_ref())) } } impl Bytes { pub fn as_bytes(&self) -> &'static [u8] { self.0.as_bytes() } pub fn as_bstr(&self) -> &bstr::BStr { self.0.as_bytes().into() } } impl std::ops::Deref for Bytes { type Target = [u8]; fn deref(&self) -> &[u8] { self.as_bytes() } } impl std::convert::AsRef<[u8]> for Bytes { fn as_ref(&self) -> &[u8] { self.as_bytes() } } impl std::fmt::Debug for Bytes { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self.as_bstr()) } } impl std::fmt::Display for Bytes { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.as_bstr()) } } impl From<&[u8]> for Bytes { fn from(s: &[u8]) -> Self { Self::new(s) } } impl From<&bstr::BStr> for Bytes { fn from(s: &bstr::BStr) -> Self { Self::new(s) } } impl From<&str> for Bytes { fn from(s: &str) -> Self { Self::new(s) } } impl EqModuloPos for Bytes { fn eq_modulo_pos(&self, rhs: &Self) -> bool { self == rhs } fn eq_modulo_pos_and_reason(&self, rhs: &Self) -> bool { self == rhs } } impl<'a> ToOxidized<'a> for Bytes { type Output = &'a [u8]; fn to_oxidized(&self, _arena: &'a bumpalo::Bump) -> &'a [u8] { self.0.as_bytes() } } impl ToOcamlRep for Bytes { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { alloc.add_copy(self.as_bytes()) } } impl FromOcamlRep for Bytes { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { Ok(Self::new(ocamlrep::bytes_from_ocamlrep(value)?)) } } impl<'a> FromOcamlRepIn<'a> for Bytes { fn from_ocamlrep_in( value: ocamlrep::Value<'_>, _arena: &'a bumpalo::Bump, ) -> Result<Self, ocamlrep::FromError> { Ok(Self::new(ocamlrep::bytes_from_ocamlrep(value)?)) } } impl arena_trait::TrivialDrop for Bytes {} macro_rules! common_impls { ($name:ident) => { impl $name { pub fn new<S: IntoUtf8Bytes>(s: S) -> Self { Self(Symbol::new(s)) } pub fn as_str(&self) -> &'static str { self.0.as_str() } pub fn as_symbol(self) -> Symbol { self.0 } } impl std::ops::Deref for $name { type Target = str; fn deref(&self) -> &str { self.as_str() } } impl std::convert::AsRef<str> for $name { fn as_ref(&self) -> &str { self.as_str() } } impl std::borrow::Borrow<str> for $name { fn borrow(&self) -> &str { self.as_str() } } impl std::borrow::Borrow<[u8]> for $name { fn borrow(&self) -> &[u8] { self.as_bytes() } } impl std::fmt::Debug for $name { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self.as_str()) } } impl std::fmt::Display for $name { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.as_str()) } } impl<T: IntoUtf8Bytes> From<T> for $name { fn from(s: T) -> Self { Self::new(s) } } impl<'a> ToOxidized<'a> for $name { type Output = &'a str; fn to_oxidized(&self, _arena: &'a bumpalo::Bump) -> &'a str { self.as_str() } } impl ToOcamlRep for $name { fn to_ocamlrep<'a, A: ocamlrep::Allocator>( &'a self, alloc: &'a A, ) -> ocamlrep::Value<'a> { alloc.add_copy(self.as_str()) } } impl FromOcamlRep for $name { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { Ok(Self::new(ocamlrep::str_from_ocamlrep(value)?)) } } impl<'a> FromOcamlRepIn<'a> for $name { fn from_ocamlrep_in( value: ocamlrep::Value<'_>, _arena: &'a bumpalo::Bump, ) -> Result<Self, ocamlrep::FromError> { Ok(Self::new(ocamlrep::str_from_ocamlrep(value)?)) } } impl arena_trait::TrivialDrop for $name {} }; } // The following newtype wrappers are all for name categories that are // disjoint from each other. // Toplevel names can have namespace qualifiers, unlike member names. // Toplevel names are not case sensitive in HHVM // // Any one of these name wrappers could turn into an enum if necessary // to avoid stringly typed mangled names during compilation. /// A TypeName is the name of a class, interface, trait, type parameter, /// type alias, newtype, or primitive type names like int, arraykey, etc. #[derive(Eq, PartialEq, EqModuloPos, Clone, Copy, Hash, Ord, PartialOrd)] #[derive(Serialize, Deserialize)] pub struct TypeName(pub Symbol); common_impls!(TypeName); impl From<TypeName> for ToplevelSymbolHash { fn from(symbol: TypeName) -> Self { Self::from_type(symbol.as_str()) } } impl From<TypeName> for ToplevelCanonSymbolHash { fn from(symbol: TypeName) -> Self { Self::from_type(symbol.as_str().to_owned()) } } /// ModuleName is introduced by the experimental Modules feature and `internal` /// visibility. ModuleNames are not bindable names and are not intended /// to be interchangeable with any other kind of name. #[derive(Eq, PartialEq, EqModuloPos, Clone, Copy, Hash, Ord, PartialOrd)] #[derive(Serialize, Deserialize)] pub struct ModuleName(pub Symbol); common_impls!(ModuleName); impl From<ModuleName> for ToplevelSymbolHash { fn from(symbol: ModuleName) -> Self { Self::from_module(symbol.as_str()) } } /// Name of a top level constant. #[derive(Eq, PartialEq, EqModuloPos, Clone, Copy, Hash, Ord, PartialOrd)] #[derive(Serialize, Deserialize)] pub struct ConstName(pub Symbol); common_impls!(ConstName); impl From<ConstName> for ToplevelSymbolHash { fn from(symbol: ConstName) -> Self { Self::from_const(symbol.as_str()) } } /// Name of a top level function. #[derive(Eq, PartialEq, EqModuloPos, Clone, Copy, Hash, Ord, PartialOrd)] #[derive(Serialize, Deserialize)] pub struct FunName(pub Symbol); common_impls!(FunName); impl From<FunName> for ToplevelSymbolHash { fn from(symbol: FunName) -> Self { Self::from_fun(symbol.as_str()) } } impl From<FunName> for ToplevelCanonSymbolHash { fn from(symbol: FunName) -> Self { Self::from_fun(symbol.as_str().to_owned()) } } /// ClassConstName is the name of a class const, which are disjoint from /// global constants, type constants, and other class members. #[derive(Eq, PartialEq, EqModuloPos, Clone, Copy, Hash, Ord, PartialOrd)] #[derive(Serialize, Deserialize)] pub struct ClassConstName(pub Symbol); common_impls!(ClassConstName); #[derive(Eq, PartialEq, EqModuloPos, Clone, Copy, Hash, Ord, PartialOrd)] #[derive(Serialize, Deserialize)] pub struct TypeConstName(pub Symbol); common_impls!(TypeConstName); #[derive(Eq, PartialEq, EqModuloPos, Clone, Copy, Hash, Ord, PartialOrd)] #[derive(Serialize, Deserialize)] pub struct MethodName(pub Symbol); common_impls!(MethodName); #[derive(Eq, PartialEq, EqModuloPos, Clone, Copy, Hash, Ord, PartialOrd)] #[derive(Serialize, Deserialize)] pub struct PropName(pub Symbol); common_impls!(PropName);
Rust
hhvm/hphp/hack/src/hackrs/pos/to_oxidized.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::collections::BTreeMap; use std::collections::HashMap; use arena_trait::TrivialDrop; use indexmap::IndexMap; use indexmap::IndexSet; use ocamlrep::ToOcamlRep; use oxidized_by_ref::i_map::IMap; use oxidized_by_ref::s_map::SMap; use oxidized_by_ref::s_set::SSet; pub trait ToOxidized<'a> { type Output: TrivialDrop + Clone + ToOcamlRep + 'a; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output; fn to_oxidized_ref(&self, arena: &'a bumpalo::Bump) -> &'a Self::Output { &*arena.alloc(self.to_oxidized(arena)) } } impl<'a, T: ToOxidized<'a>> ToOxidized<'a> for std::sync::Arc<T> { type Output = T::Output; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { (**self).to_oxidized(arena) } } impl<'a, T: ToOxidized<'a>> ToOxidized<'a> for Box<[T]> { type Output = &'a [T::Output]; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc_slice_fill_iter(self.iter().map(|x| x.to_oxidized(arena))) } } impl<'a, T: ToOxidized<'a>> ToOxidized<'a> for [T] { type Output = &'a [T::Output]; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc_slice_fill_iter(self.iter().map(|x| x.to_oxidized(arena))) } } impl<'a> ToOxidized<'a> for &str { type Output = &'a str; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc_str(self) } } impl<'a, T1: ToOxidized<'a>, T2: ToOxidized<'a>> ToOxidized<'a> for (T1, T2) { type Output = &'a (T1::Output, T2::Output); fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc((self.0.to_oxidized(arena), self.1.to_oxidized(arena))) } } impl<'a, V: ToOxidized<'a>> ToOxidized<'a> for Option<V> { type Output = Option<V::Output>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { self.as_ref().map(|x| x.to_oxidized(arena)) } } impl<'a, K: ToOxidized<'a, Output = &'a str>, V: ToOxidized<'a>> ToOxidized<'a> for BTreeMap<K, V> { type Output = SMap<'a, V::Output>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { SMap::from( arena, self.iter() .map(|(k, v)| (k.to_oxidized(arena), v.to_oxidized(arena))), ) } } impl<'a, T: ToOxidized<'a, Output = &'a str>, S> ToOxidized<'a> for IndexSet<T, S> { type Output = SSet<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { SSet::from(arena, self.iter().map(|s| s.to_oxidized(arena))) } } impl<'a, K: ToOxidized<'a, Output = &'a str>, V: ToOxidized<'a>, S> ToOxidized<'a> for IndexMap<K, V, S> { type Output = SMap<'a, V::Output>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { SMap::from( arena, self.iter() .map(|(k, v)| (k.to_oxidized(arena), v.to_oxidized(arena))), ) } } impl<'a, K: ToOxidized<'a, Output = &'a str>, V: ToOxidized<'a>, S> ToOxidized<'a> for HashMap<K, V, S> { type Output = SMap<'a, V::Output>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { SMap::from( arena, self.iter() .map(|(k, v)| (k.to_oxidized(arena), v.to_oxidized(arena))), ) } } impl<'a, K: ToOxidized<'a, Output = isize>, V: ToOxidized<'a>> ToOxidized<'a> for &im::HashMap<K, V> { type Output = IMap<'a, &'a V::Output>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { IMap::from( arena, self.iter() .map(|(k, v)| (k.to_oxidized(arena), &*arena.alloc(v.to_oxidized(arena)))), ) } }
TOML
hhvm/hphp/hack/src/hackrs/pos/cargo/pos/Cargo.toml
# @generated by autocargo [package] name = "pos" version = "0.0.0" edition = "2021" [lib] path = "../../pos.rs" [dependencies] arena_trait = { version = "0.0.0", path = "../../../../arena_trait" } bstr = { version = "1.4.0", features = ["serde", "std", "unicode"] } bumpalo = { version = "3.11.1", features = ["collections"] } eq_modulo_pos = { version = "0.0.0", path = "../../../../utils/eq_modulo_pos" } hh24_types = { version = "0.0.0", path = "../../../../utils/hh24_types" } im = { version = "15.1", features = ["rayon", "serde"] } indexmap = { version = "1.9.2", features = ["arbitrary", "rayon", "serde-1"] } intern = { version = "0.1.0", path = "../../../../utils/intern" } ocamlrep = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" } oxidized = { version = "0.0.0", path = "../../../../oxidized" } oxidized_by_ref = { version = "0.0.0", path = "../../../../oxidized_by_ref" } relative_path = { version = "0.0.0", path = "../../../../utils/rust/relative_path" } serde = { version = "1.0.176", features = ["derive", "rc"] } static_assertions = "1.1.0"
Rust
hhvm/hphp/hack/src/hackrs/shallow_decl_provider/provider.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::sync::Arc; use decl_parser::DeclParser; use itertools::Itertools; use naming_provider::NamingProvider; use oxidized::naming_types::KindOfType; use pos::ConstName; use pos::FunName; use pos::ModuleName; use pos::RelativePath; use pos::TypeName; use ty::decl::shallow::NamedDecl; use ty::decl::ConstDecl; use ty::decl::FunDecl; use ty::decl::ModuleDecl; use ty::decl::ShallowClass; use ty::decl::TypedefDecl; use ty::reason::Reason; use super::Error; use super::Result; use super::ShallowDeclStore; use super::TypeDecl; /// A `ShallowDeclProvider` which, if the requested name is not present in its /// store, uses the given naming table to find the file containing the requested /// symbol, parses it with the given `DeclParser`, and inserts the parsed decls /// into its store. #[derive(Debug)] pub struct LazyShallowDeclProvider<R: Reason> { store: Arc<ShallowDeclStore<R>>, naming_provider: Arc<dyn NamingProvider>, parser: DeclParser<R>, } impl<R: Reason> LazyShallowDeclProvider<R> { pub fn new( store: Arc<ShallowDeclStore<R>>, naming_provider: Arc<dyn NamingProvider>, parser: DeclParser<R>, ) -> Self { Self { store, naming_provider, parser, } } pub fn parse_and_cache_decls_in(&self, path: RelativePath) -> Result<()> { let decls_result = self.parser.parse(path); let decls = decls_result.map_err(|file_provider_error| Error::DeclParse { path, file_provider_error, })?; self.dedup_and_add_decls(path, decls)?; Ok(()) } // NB: Must be manually kept in sync with // `hackrs_provider_backend::HhServerProviderBackend::dedup_and_add_decls` and // OCaml function `Direct_decl_utils.dedup_decls`. pub fn dedup_and_add_decls( &self, path: RelativePath, decls: impl IntoIterator<Item = NamedDecl<R>>, ) -> Result<()> { // dedup, taking the decl which was declared first syntactically let decls = decls .into_iter() .unique_by(|decl| (decl.name(), decl.name_kind())); // dedup with symbols declared in other files let decls = self.remove_naming_conflict_losers(path, decls)?; self.store.add_decls(decls)?; Ok(()) } /// If a symbol was also declared in another file, and that file /// was determined to be the winner in the naming table, remove /// its decl from the list. // // NB: Must be manually kept in sync with // `hackrs_provider_backend::HhServerProviderBackend::remove_naming_conflict_losers` // and OCaml function `Direct_decl_utils.remove_naming_conflict_losers`. fn remove_naming_conflict_losers( &self, path: RelativePath, decls: impl Iterator<Item = NamedDecl<R>>, ) -> Result<Vec<NamedDecl<R>>> { let mut winners = vec![]; for decl in decls { let path_opt = match decl { NamedDecl::Class(name, _) | NamedDecl::Typedef(name, _) => { self.naming_provider.get_type_path(name)? } NamedDecl::Fun(name, _) => self.naming_provider.get_fun_path(name)?, NamedDecl::Const(name, _) => self.naming_provider.get_const_path(name)?, NamedDecl::Module(name, _) => self.naming_provider.get_module_path(name)?, }; if path_opt.map_or(true, |p| p == path) { winners.push(decl) } } Ok(winners) } } impl<R: Reason> super::ShallowDeclProvider<R> for LazyShallowDeclProvider<R> { fn get_fun(&self, name: FunName) -> Result<Option<Arc<FunDecl<R>>>> { if let res @ Some(..) = self.store.get_fun(name)? { return Ok(res); } if let Some(path) = self.naming_provider.get_fun_path(name)? { self.parse_and_cache_decls_in(path)?; return Ok(self.store.get_fun(name)?); } Ok(None) } fn get_const(&self, name: ConstName) -> Result<Option<Arc<ConstDecl<R>>>> { if let res @ Some(..) = self.store.get_const(name)? { return Ok(res); } if let Some(path) = self.naming_provider.get_const_path(name)? { self.parse_and_cache_decls_in(path)?; return Ok(self.store.get_const(name)?); } Ok(None) } fn get_module(&self, name: ModuleName) -> Result<Option<Arc<ModuleDecl<R>>>> { if let res @ Some(..) = self.store.get_module(name)? { return Ok(res); } if let Some(path) = self.naming_provider.get_module_path(name)? { self.parse_and_cache_decls_in(path)?; return Ok(self.store.get_module(name)?); } Ok(None) } fn get_type_kind(&self, name: TypeName) -> Result<Option<KindOfType>> { Ok(self .naming_provider .get_type_path_and_kind(name)? .map(|(_path, kind)| kind)) } fn get_type(&self, name: TypeName) -> Result<Option<TypeDecl<R>>> { if let Some(kind) = self.get_type_kind(name)? { match kind { KindOfType::TClass => Ok(self.get_class(name)?.map(|decl| TypeDecl::Class(decl))), KindOfType::TTypedef => { Ok(self.get_typedef(name)?.map(|decl| TypeDecl::Typedef(decl))) } } } else { Ok(None) } } fn get_typedef(&self, name: TypeName) -> Result<Option<Arc<TypedefDecl<R>>>> { if let res @ Some(..) = self.store.get_typedef(name)? { return Ok(res); } if let Some(path) = self.naming_provider.get_type_path(name)? { self.parse_and_cache_decls_in(path)?; return Ok(self.store.get_typedef(name)?); } Ok(None) } fn get_class(&self, name: TypeName) -> Result<Option<Arc<ShallowClass<R>>>> { if let res @ Some(..) = self.store.get_class(name)? { return Ok(res); } if let Some(path) = self.naming_provider.get_type_path(name)? { self.parse_and_cache_decls_in(path)?; return Ok(self.store.get_class(name)?); } Ok(None) } } /// A `ShallowDeclProvider` which assumes its store never evicts values and is /// fully populated with all shallow decls in the repository (i.e., the store /// must be eagerly populated in advance). #[derive(Debug)] pub struct EagerShallowDeclProvider<R: Reason> { store: Arc<ShallowDeclStore<R>>, } impl<R: Reason> EagerShallowDeclProvider<R> { pub fn new(store: Arc<ShallowDeclStore<R>>) -> Self { Self { store } } } impl<R: Reason> super::ShallowDeclProvider<R> for EagerShallowDeclProvider<R> { fn get_fun(&self, name: FunName) -> Result<Option<Arc<FunDecl<R>>>> { Ok(self.store.get_fun(name)?) } fn get_const(&self, name: ConstName) -> Result<Option<Arc<ConstDecl<R>>>> { Ok(self.store.get_const(name)?) } fn get_module(&self, name: ModuleName) -> Result<Option<Arc<ModuleDecl<R>>>> { Ok(self.store.get_module(name)?) } fn get_type_kind(&self, name: TypeName) -> Result<Option<KindOfType>> { if self.get_class(name)?.is_some() { Ok(Some(KindOfType::TClass)) } else if self.get_typedef(name)?.is_some() { Ok(Some(KindOfType::TTypedef)) } else { Ok(None) } } fn get_type(&self, name: TypeName) -> Result<Option<TypeDecl<R>>> { if let Some(class) = self.get_class(name)? { Ok(Some(TypeDecl::Class(class))) } else { Ok(self.get_typedef(name)?.map(TypeDecl::Typedef)) } } fn get_typedef(&self, name: TypeName) -> Result<Option<Arc<TypedefDecl<R>>>> { Ok(self.store.get_typedef(name)?) } fn get_class(&self, name: TypeName) -> Result<Option<Arc<ShallowClass<R>>>> { Ok(self.store.get_class(name)?) } }
Rust
hhvm/hphp/hack/src/hackrs/shallow_decl_provider/shallow_decl_provider.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::fmt::Debug; use std::sync::Arc; use oxidized::naming_types::KindOfType; use pos::ConstName; use pos::FunName; use pos::ModuleName; use pos::RelativePath; use pos::TypeName; use ty::decl::shallow::ConstDecl; use ty::decl::shallow::FunDecl; use ty::decl::shallow::ModuleDecl; use ty::decl::shallow::TypedefDecl; use ty::decl::ShallowClass; use ty::reason::Reason; mod provider; mod store; pub use provider::EagerShallowDeclProvider; pub use provider::LazyShallowDeclProvider; pub use store::ShallowDeclStore; pub type Result<T, E = Error> = std::result::Result<T, E>; #[derive(thiserror::Error, Debug)] pub enum Error { #[error("Failed to parse decls in {path:?}: {file_provider_error}")] DeclParse { path: RelativePath, #[source] file_provider_error: anyhow::Error, }, #[error("Unexpected error: {0}")] Unexpected(#[from] anyhow::Error), } #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] #[serde(bound = "R: Reason")] pub enum TypeDecl<R: Reason> { Class(Arc<ShallowClass<R>>), Typedef(Arc<TypedefDecl<R>>), } /// A get-or-compute interface for shallow decls (i.e., the type signature /// information syntactically available in a file; the output of the /// decl-parser). /// /// Consumers of a `ShallowDeclProvider` expect the member-type accessors /// (`get_method_type`, `get_constructor_type`, etc.) to be performant. For /// instance, if our `Store` implementations store data in a serialized format, /// looking up a method type should only deserialize that individual method, not /// the entire `ShallowClass` containing that method declaration. /// /// Quick lookup of method and property types is useful because types of methods /// and properties are omitted in the `FoldedClass` representation. This is done /// to reduce copying and overfetching in `FoldedDeclProvider` implementations /// which store data in a serialized format: we want to avoid copying the type /// of a method which is inherited by 1000 classes into 1000 separate serialized /// blobs. To avoid this, we serialize method types in a separate data store /// keyed by `(TypeName, MethodName)`. Each method is stored only once, for the /// class which defined the method. Inheritors must look up the method type /// using the name of the "origin" class which defined it. pub trait ShallowDeclProvider<R: Reason>: Debug + Send + Sync { /// Fetch the declaration of the toplevel function with the given name. fn get_fun(&self, name: FunName) -> Result<Option<Arc<FunDecl<R>>>>; /// Fetch the declaration of the global constant with the given name. fn get_const(&self, name: ConstName) -> Result<Option<Arc<ConstDecl<R>>>>; /// Fetch the declaration of the module with the given name. fn get_module(&self, name: ModuleName) -> Result<Option<Arc<ModuleDecl<R>>>>; /// Indicate whether the type with the given name is a typedef or class. fn get_type_kind(&self, name: TypeName) -> Result<Option<KindOfType>>; /// Fetch the declaration of the class or typedef with the given name. fn get_type(&self, name: TypeName) -> Result<Option<TypeDecl<R>>>; /// Fetch the declaration of the typedef with the given name. If the given /// name is bound to a class rather than a typedef, return `None`. fn get_typedef(&self, name: TypeName) -> Result<Option<Arc<TypedefDecl<R>>>>; /// Fetch the declaration of the class with the given name. If the given /// name is bound to a typedef rather than a class, return `None`. fn get_class(&self, name: TypeName) -> Result<Option<Arc<ShallowClass<R>>>>; }
Rust
hhvm/hphp/hack/src/hackrs/shallow_decl_provider/store.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::sync::Arc; use anyhow::Result; use datastore::Store; use pos::ConstName; use pos::FunName; use pos::MethodName; use pos::ModuleName; use pos::PropName; use pos::TypeName; use ty::decl::shallow::ModuleDecl; use ty::decl::shallow::NamedDecl; use ty::decl::ConstDecl; use ty::decl::FunDecl; use ty::decl::ShallowClass; use ty::decl::Ty; use ty::decl::TypedefDecl; use ty::reason::Reason; /// A datastore for shallow declarations (i.e., the information we get from /// decl-parsing a file). The backing datastores are permitted to evict their /// contents at any time. /// /// Consumers of a `ShallowDeclStore` expect the member-type accessors /// (`get_method_type`, `get_constructor_type`, etc.) to be performant. For /// instance, if our `Store` implementations store data in a serialized format, /// looking up a method type should only deserialize that individual method, not /// the entire `ShallowClass` containing that method declaration. #[derive(Debug)] pub struct ShallowDeclStore<R: Reason> { classes: Arc<dyn Store<TypeName, Arc<ShallowClass<R>>>>, typedefs: Arc<dyn Store<TypeName, Arc<TypedefDecl<R>>>>, funs: Arc<dyn Store<FunName, Arc<FunDecl<R>>>>, consts: Arc<dyn Store<ConstName, Arc<ConstDecl<R>>>>, modules: Arc<dyn Store<ModuleName, Arc<ModuleDecl<R>>>>, // The below tables are intended to be index tables for information stored // in the `classes` table (the underlying data is shared via the `Hc` in // `Ty`). When inserting or removing from the `classes` table, these // indices must be updated. properties: Arc<dyn Store<(TypeName, PropName), Ty<R>>>, static_properties: Arc<dyn Store<(TypeName, PropName), Ty<R>>>, methods: Arc<dyn Store<(TypeName, MethodName), Ty<R>>>, static_methods: Arc<dyn Store<(TypeName, MethodName), Ty<R>>>, constructors: Arc<dyn Store<TypeName, Ty<R>>>, } impl<R: Reason> ShallowDeclStore<R> { pub fn new( classes: Arc<dyn Store<TypeName, Arc<ShallowClass<R>>>>, typedefs: Arc<dyn Store<TypeName, Arc<TypedefDecl<R>>>>, funs: Arc<dyn Store<FunName, Arc<FunDecl<R>>>>, consts: Arc<dyn Store<ConstName, Arc<ConstDecl<R>>>>, modules: Arc<dyn Store<ModuleName, Arc<ModuleDecl<R>>>>, properties: Arc<dyn Store<(TypeName, PropName), Ty<R>>>, static_properties: Arc<dyn Store<(TypeName, PropName), Ty<R>>>, methods: Arc<dyn Store<(TypeName, MethodName), Ty<R>>>, static_methods: Arc<dyn Store<(TypeName, MethodName), Ty<R>>>, constructors: Arc<dyn Store<TypeName, Ty<R>>>, ) -> Self { Self { classes, typedefs, funs, consts, modules, properties, static_properties, methods, static_methods, constructors, } } /// Construct a `ShallowDeclStore` which looks up class members from the /// given `classes` table rather than maintaining separate member stores. /// Intended to be used with `Store` implementations which hold on to /// hash-consed `Ty`s in memory (rather than storing them in a /// serialized format), so that looking up individual members doesn't /// involve deserializing an entire `ShallowClass`. pub fn with_no_member_stores( classes: Arc<dyn Store<TypeName, Arc<ShallowClass<R>>>>, typedefs: Arc<dyn Store<TypeName, Arc<TypedefDecl<R>>>>, funs: Arc<dyn Store<FunName, Arc<FunDecl<R>>>>, consts: Arc<dyn Store<ConstName, Arc<ConstDecl<R>>>>, modules: Arc<dyn Store<ModuleName, Arc<ModuleDecl<R>>>>, ) -> Self { Self { properties: Arc::new(PropFinder { classes: Arc::clone(&classes), }), static_properties: Arc::new(StaticPropFinder { classes: Arc::clone(&classes), }), methods: Arc::new(MethodFinder { classes: Arc::clone(&classes), }), static_methods: Arc::new(StaticMethodFinder { classes: Arc::clone(&classes), }), constructors: Arc::new(ConstructorFinder { classes: Arc::clone(&classes), }), classes, typedefs, funs, consts, modules, } } pub fn add_decls(&self, decls: impl IntoIterator<Item = NamedDecl<R>>) -> Result<()> { for decl in decls.into_iter() { match decl { NamedDecl::Class(name, decl) => self.add_class(name, Arc::new(decl))?, NamedDecl::Fun(name, decl) => self.funs.insert(name, Arc::new(decl))?, NamedDecl::Typedef(name, decl) => self.typedefs.insert(name, Arc::new(decl))?, NamedDecl::Const(name, decl) => self.consts.insert(name, Arc::new(decl))?, NamedDecl::Module(name, decl) => self.modules.insert(name, Arc::new(decl))?, } } Ok(()) } pub fn get_fun(&self, name: FunName) -> Result<Option<Arc<FunDecl<R>>>> { self.funs.get(name) } pub fn get_const(&self, name: ConstName) -> Result<Option<Arc<ConstDecl<R>>>> { self.consts.get(name) } pub fn get_module(&self, name: ModuleName) -> Result<Option<Arc<ModuleDecl<R>>>> { self.modules.get(name) } pub fn get_class(&self, name: TypeName) -> Result<Option<Arc<ShallowClass<R>>>> { self.classes.get(name) } pub fn get_typedef(&self, name: TypeName) -> Result<Option<Arc<TypedefDecl<R>>>> { self.typedefs.get(name) } pub fn get_property_type( &self, class_name: TypeName, property_name: PropName, ) -> Result<Option<Ty<R>>> { self.properties.get((class_name, property_name)) } pub fn get_static_property_type( &self, class_name: TypeName, property_name: PropName, ) -> Result<Option<Ty<R>>> { self.static_properties.get((class_name, property_name)) } pub fn get_method_type( &self, class_name: TypeName, method_name: MethodName, ) -> Result<Option<Ty<R>>> { self.methods.get((class_name, method_name)) } pub fn get_static_method_type( &self, class_name: TypeName, method_name: MethodName, ) -> Result<Option<Ty<R>>> { self.static_methods.get((class_name, method_name)) } pub fn get_constructor_type(&self, class_name: TypeName) -> Result<Option<Ty<R>>> { self.constructors.get(class_name) } fn add_class(&self, name: TypeName, cls: Arc<ShallowClass<R>>) -> Result<()> { let cid = cls.name.id(); for prop in cls.props.iter().rev() { self.properties .insert((cid, prop.name.id()), prop.ty.clone())? } for prop in cls.static_props.iter().rev() { self.static_properties .insert((cid, prop.name.id()), prop.ty.clone())? } for meth in cls.methods.iter().rev() { self.methods .insert((cid, meth.name.id()), meth.ty.clone())? } for meth in cls.static_methods.iter().rev() { self.static_methods .insert((cid, meth.name.id()), meth.ty.clone())? } if let Some(constructor) = &cls.constructor { self.constructors.insert(cid, constructor.ty.clone())? } self.classes.insert(name, cls)?; Ok(()) } } /// Looks up props from the `classes` Store instead of storing them separately. #[derive(Debug)] struct PropFinder<R: Reason> { classes: Arc<dyn Store<TypeName, Arc<ShallowClass<R>>>>, } impl<R: Reason> Store<(TypeName, PropName), Ty<R>> for PropFinder<R> { fn get(&self, (class_name, property_name): (TypeName, PropName)) -> Result<Option<Ty<R>>> { Ok(self.classes.get(class_name)?.and_then(|cls| { cls.props.iter().rev().find_map(|prop| { if prop.name.id() == property_name { Some(prop.ty.clone()) } else { None } }) })) } fn insert(&self, _: (TypeName, PropName), _: Ty<R>) -> Result<()> { Ok(()) } fn remove_batch(&self, _: &mut dyn Iterator<Item = (TypeName, PropName)>) -> Result<()> { Ok(()) } } /// Looks up props from the `classes` Store instead of storing them separately. #[derive(Debug)] struct StaticPropFinder<R: Reason> { classes: Arc<dyn Store<TypeName, Arc<ShallowClass<R>>>>, } impl<R: Reason> Store<(TypeName, PropName), Ty<R>> for StaticPropFinder<R> { fn get(&self, (class_name, property_name): (TypeName, PropName)) -> Result<Option<Ty<R>>> { Ok(self.classes.get(class_name)?.and_then(|cls| { cls.static_props.iter().rev().find_map(|prop| { if prop.name.id() == property_name { Some(prop.ty.clone()) } else { None } }) })) } fn insert(&self, _: (TypeName, PropName), _: Ty<R>) -> Result<()> { Ok(()) } fn remove_batch(&self, _: &mut dyn Iterator<Item = (TypeName, PropName)>) -> Result<()> { Ok(()) } } /// Looks up methods from the `classes` Store instead of storing them separately. #[derive(Debug)] struct MethodFinder<R: Reason> { classes: Arc<dyn Store<TypeName, Arc<ShallowClass<R>>>>, } impl<R: Reason> Store<(TypeName, MethodName), Ty<R>> for MethodFinder<R> { fn get(&self, (class_name, method_name): (TypeName, MethodName)) -> Result<Option<Ty<R>>> { Ok(self.classes.get(class_name)?.and_then(|cls| { cls.methods.iter().rev().find_map(|meth| { if meth.name.id() == method_name { Some(meth.ty.clone()) } else { None } }) })) } fn insert(&self, _: (TypeName, MethodName), _: Ty<R>) -> Result<()> { Ok(()) } fn remove_batch(&self, _: &mut dyn Iterator<Item = (TypeName, MethodName)>) -> Result<()> { Ok(()) } } /// Looks up methods from the `classes` Store instead of storing them separately. #[derive(Debug)] struct StaticMethodFinder<R: Reason> { classes: Arc<dyn Store<TypeName, Arc<ShallowClass<R>>>>, } impl<R: Reason> Store<(TypeName, MethodName), Ty<R>> for StaticMethodFinder<R> { fn get(&self, (class_name, method_name): (TypeName, MethodName)) -> Result<Option<Ty<R>>> { Ok(self.classes.get(class_name)?.and_then(|cls| { cls.static_methods.iter().rev().find_map(|meth| { if meth.name.id() == method_name { Some(meth.ty.clone()) } else { None } }) })) } fn insert(&self, _: (TypeName, MethodName), _: Ty<R>) -> Result<()> { Ok(()) } fn remove_batch(&self, _: &mut dyn Iterator<Item = (TypeName, MethodName)>) -> Result<()> { Ok(()) } } /// Looks up constructors from the `classes` Store instead of storing them separately. #[derive(Debug)] struct ConstructorFinder<R: Reason> { classes: Arc<dyn Store<TypeName, Arc<ShallowClass<R>>>>, } impl<R: Reason> Store<TypeName, Ty<R>> for ConstructorFinder<R> { fn get(&self, class_name: TypeName) -> Result<Option<Ty<R>>> { Ok(self .classes .get(class_name)? .and_then(|cls| cls.constructor.as_ref().map(|meth| meth.ty.clone()))) } fn insert(&self, _: TypeName, _: Ty<R>) -> Result<()> { Ok(()) } fn remove_batch(&self, _: &mut dyn Iterator<Item = TypeName>) -> Result<()> { Ok(()) } }
TOML
hhvm/hphp/hack/src/hackrs/shallow_decl_provider/cargo/shallow_decl_provider/Cargo.toml
# @generated by autocargo [package] name = "shallow_decl_provider" version = "0.0.0" edition = "2021" [lib] path = "../../shallow_decl_provider.rs" [dependencies] anyhow = "1.0.71" datastore = { version = "0.0.0", path = "../../../datastore" } decl_parser = { version = "0.0.0", path = "../../../decl_parser/cargo/decl_parser" } itertools = "0.10.3" naming_provider = { version = "0.0.0", path = "../../../naming_provider/cargo/naming_provider" } oxidized = { version = "0.0.0", path = "../../../../oxidized" } pos = { version = "0.0.0", path = "../../../pos/cargo/pos" } serde = { version = "1.0.176", features = ["derive", "rc"] } thiserror = "1.0.43" ty = { version = "0.0.0", path = "../../../ty/cargo/ty" }
Rust
hhvm/hphp/hack/src/hackrs/special_names/special_names.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. // These use the same casing as naming_special_names.ml for now. #![allow(non_upper_case_globals)] use hash::HashSet; use naming_special_names_rust as sn; use once_cell::sync::Lazy; use pos::ClassConstName; use pos::ConstName; use pos::FunName; use pos::MethodName; use pos::PropName; use pos::Symbol; use pos::TypeConstName; use pos::TypeName; macro_rules! lazy { ($value:expr) => { Lazy::new(|| $value.into()) }; } fn concat<S1: AsRef<str>, S2: AsRef<str>>(s1: S1, s2: S2) -> String { format!("{}{}", s1.as_ref(), s2.as_ref()) } pub fn types() -> impl Iterator<Item = TypeName> { classes::iter() .chain(collections::iter()) .chain(attribute_kinds::iter()) .chain(user_attributes::iter()) .chain(fb::types()) .chain(shapes::types()) .chain(regex::types()) .chain(coeffects::types()) .chain(capabilities::iter()) } pub fn functions() -> impl Iterator<Item = FunName> { autoimported_functions::iter() .chain(pseudo_functions::iter()) .chain(stdlib_functions::iter()) .chain(fb::functions()) .chain(hh::iter()) } pub mod classes { use super::*; pub static cParent: Lazy<TypeName> = lazy!(sn::classes::PARENT); pub static cStatic: Lazy<TypeName> = lazy!(sn::classes::STATIC); pub static cSelf: Lazy<TypeName> = lazy!(sn::classes::SELF); pub static cUnknown: Lazy<TypeName> = lazy!(sn::classes::UNKNOWN); pub static cAwaitable: Lazy<TypeName> = lazy!(sn::classes::AWAITABLE); pub static cGenerator: Lazy<TypeName> = lazy!(sn::classes::GENERATOR); pub static cAsyncGenerator: Lazy<TypeName> = lazy!(sn::classes::ASYNC_GENERATOR); pub static cHHFormatString: Lazy<TypeName> = lazy!(sn::classes::HH_FORMAT_STRING); pub static cHH_BuiltinEnum: Lazy<TypeName> = lazy!(sn::classes::HH_BUILTIN_ENUM); pub static cHH_BuiltinEnumClass: Lazy<TypeName> = lazy!(sn::classes::HH_BUILTIN_ENUM_CLASS); pub static cHH_BuiltinAbstractEnumClass: Lazy<TypeName> = lazy!(sn::classes::HH_BUILTIN_ABSTRACT_ENUM_CLASS); pub static cThrowable: Lazy<TypeName> = lazy!(sn::classes::THROWABLE); pub static cStdClass: Lazy<TypeName> = lazy!(sn::classes::STD_CLASS); pub static cDateTime: Lazy<TypeName> = lazy!(sn::classes::DATE_TIME); pub static cDateTimeImmutable: Lazy<TypeName> = lazy!(sn::classes::DATE_TIME_IMMUTABLE); pub static cAsyncIterator: Lazy<TypeName> = lazy!(sn::classes::ASYNC_ITERATOR); pub static cAsyncKeyedIterator: Lazy<TypeName> = lazy!(sn::classes::ASYNC_KEYED_ITERATOR); pub static cStringish: Lazy<TypeName> = lazy!(sn::classes::STRINGISH); pub static cStringishObject: Lazy<TypeName> = lazy!(sn::classes::STRINGISH_OBJECT); pub static cXHPChild: Lazy<TypeName> = lazy!(sn::classes::XHP_CHILD); pub static cIMemoizeParam: Lazy<TypeName> = lazy!(sn::classes::IMEMOIZE_PARAM); pub static cUNSAFESingletonMemoizeParam: Lazy<TypeName> = lazy!(sn::classes::UNSAFE_SINGLETON_MEMOIZE_PARAM); pub static cClassname: Lazy<TypeName> = lazy!(sn::classes::CLASS_NAME); pub static cTypename: Lazy<TypeName> = lazy!(sn::classes::TYPE_NAME); pub static cIDisposable: Lazy<TypeName> = lazy!(sn::classes::IDISPOSABLE); pub static cIAsyncDisposable: Lazy<TypeName> = lazy!(sn::classes::IASYNC_DISPOSABLE); pub static cMemberOf: Lazy<TypeName> = lazy!(sn::classes::MEMBER_OF); pub static cEnumClassLabel: Lazy<TypeName> = lazy!(sn::classes::ENUM_CLASS_LABEL); pub static cSpliceable: Lazy<TypeName> = lazy!(sn::classes::SPLICEABLE); pub static cSupportDyn: Lazy<TypeName> = lazy!(sn::classes::SUPPORT_DYN); pub fn iter_keywords() -> impl Iterator<Item = TypeName> { [*cParent, *cStatic, *cSelf].into_iter() } pub fn iter() -> impl Iterator<Item = TypeName> { [ *cUnknown, *cAwaitable, *cGenerator, *cAsyncGenerator, *cHHFormatString, *cHH_BuiltinEnum, *cHH_BuiltinEnumClass, *cHH_BuiltinAbstractEnumClass, *cThrowable, *cStdClass, *cDateTime, *cDateTimeImmutable, *cAsyncIterator, *cAsyncKeyedIterator, *cStringish, *cStringishObject, *cXHPChild, *cIMemoizeParam, *cUNSAFESingletonMemoizeParam, *cClassname, *cTypename, *cIDisposable, *cIAsyncDisposable, *cMemberOf, *cEnumClassLabel, *cSpliceable, *cSupportDyn, ] .into_iter() } } pub mod collections { use super::*; // concrete classes pub static cVector: Lazy<TypeName> = lazy!(sn::collections::VECTOR); pub static cMutableVector: Lazy<TypeName> = lazy!(sn::collections::MUTABLE_VECTOR); pub static cImmVector: Lazy<TypeName> = lazy!(sn::collections::IMM_VECTOR); pub static cSet: Lazy<TypeName> = lazy!(sn::collections::SET); pub static cConstSet: Lazy<TypeName> = lazy!(sn::collections::CONST_SET); pub static cMutableSet: Lazy<TypeName> = lazy!(sn::collections::MUTABLE_SET); pub static cImmSet: Lazy<TypeName> = lazy!(sn::collections::IMM_SET); pub static cMap: Lazy<TypeName> = lazy!(sn::collections::MAP); pub static cMutableMap: Lazy<TypeName> = lazy!(sn::collections::MUTABLE_MAP); pub static cImmMap: Lazy<TypeName> = lazy!(sn::collections::IMM_MAP); pub static cPair: Lazy<TypeName> = lazy!(sn::collections::PAIR); // interfaces pub static cContainer: Lazy<TypeName> = lazy!(sn::collections::CONTAINER); pub static cKeyedContainer: Lazy<TypeName> = lazy!(sn::collections::KEYED_CONTAINER); pub static cTraversable: Lazy<TypeName> = lazy!(sn::collections::TRAVERSABLE); pub static cKeyedTraversable: Lazy<TypeName> = lazy!(sn::collections::KEYED_TRAVERSABLE); pub static cCollection: Lazy<TypeName> = lazy!(sn::collections::COLLECTION); pub static cConstVector: Lazy<TypeName> = lazy!(sn::collections::CONST_VECTOR); pub static cConstMap: Lazy<TypeName> = lazy!(sn::collections::CONST_MAP); pub static cConstCollection: Lazy<TypeName> = lazy!(sn::collections::CONST_COLLECTION); pub static cAnyArray: Lazy<TypeName> = lazy!(sn::collections::ANY_ARRAY); pub static cDict: Lazy<TypeName> = lazy!(sn::collections::DICT); pub static cVec: Lazy<TypeName> = lazy!(sn::collections::VEC); pub static cKeyset: Lazy<TypeName> = lazy!(sn::collections::KEYSET); pub fn iter() -> impl Iterator<Item = TypeName> { [ *cVector, *cMutableVector, *cImmVector, *cSet, *cConstSet, *cMutableSet, *cImmSet, *cMap, *cMutableMap, *cImmMap, *cPair, *cContainer, *cKeyedContainer, *cTraversable, *cKeyedTraversable, *cCollection, *cConstVector, *cConstMap, *cConstCollection, *cAnyArray, *cDict, *cVec, *cKeyset, ] .into_iter() } } pub mod members { use super::*; pub static mGetInstanceKey: Lazy<MethodName> = lazy!(sn::members::M_GET_INSTANCE_KEY); pub static mClass: Lazy<ClassConstName> = lazy!(sn::members::M_CLASS); pub static __construct: Lazy<MethodName> = lazy!(sn::members::__CONSTRUCT); pub static __destruct: Lazy<MethodName> = lazy!(sn::members::__DESTRUCT); pub static __call: Lazy<MethodName> = lazy!(sn::members::__CALL); pub static __callStatic: Lazy<MethodName> = lazy!(sn::members::__CALL_STATIC); pub static __clone: Lazy<MethodName> = lazy!(sn::members::__CLONE); pub static __debugInfo: Lazy<MethodName> = lazy!(sn::members::__DEBUG_INFO); pub static __dispose: Lazy<MethodName> = lazy!(sn::members::__DISPOSE); pub static __disposeAsync: Lazy<MethodName> = lazy!(sn::members::__DISPOSE_ASYNC); pub static __get: Lazy<MethodName> = lazy!(sn::members::__GET); pub static __invoke: Lazy<MethodName> = lazy!(sn::members::__INVOKE); pub static __isset: Lazy<MethodName> = lazy!(sn::members::__ISSET); pub static __set: Lazy<MethodName> = lazy!(sn::members::__SET); pub static __set_state: Lazy<MethodName> = lazy!(sn::members::__SET_STATE); pub static __sleep: Lazy<MethodName> = lazy!(sn::members::__SLEEP); pub static __toString: Lazy<MethodName> = lazy!(sn::members::__TO_STRING); pub static __unset: Lazy<MethodName> = lazy!(sn::members::__UNSET); pub static __wakeup: Lazy<MethodName> = lazy!(sn::members::__WAKEUP); /// Not really a PropName, but it's treated as one in deferred_init_members /// of folded decls. pub static parentConstruct: Lazy<PropName> = lazy!(concat("parent::", *__construct)); } pub mod attribute_kinds { use super::*; pub static cls: Lazy<TypeName> = lazy!(sn::attribute_kinds::CLS); pub static clscst: Lazy<TypeName> = lazy!(sn::attribute_kinds::CLS_CST); pub static enum_: Lazy<TypeName> = lazy!(sn::attribute_kinds::ENUM); pub static typealias: Lazy<TypeName> = lazy!(sn::attribute_kinds::TYPE_ALIAS); pub static fn_: Lazy<TypeName> = lazy!(sn::attribute_kinds::FN); pub static mthd: Lazy<TypeName> = lazy!(sn::attribute_kinds::MTHD); pub static instProperty: Lazy<TypeName> = lazy!(sn::attribute_kinds::INST_PROPERTY); pub static staticProperty: Lazy<TypeName> = lazy!(sn::attribute_kinds::STATIC_PROPERTY); pub static parameter: Lazy<TypeName> = lazy!(sn::attribute_kinds::PARAMETER); pub static typeparam: Lazy<TypeName> = lazy!(sn::attribute_kinds::TYPE_PARAM); pub static file: Lazy<TypeName> = lazy!(sn::attribute_kinds::FILE); pub static typeconst: Lazy<TypeName> = lazy!(sn::attribute_kinds::TYPE_CONST); pub static lambda: Lazy<TypeName> = lazy!(sn::attribute_kinds::LAMBDA); pub static enumcls: Lazy<TypeName> = lazy!(sn::attribute_kinds::ENUM_CLS); pub fn iter() -> impl Iterator<Item = TypeName> { [ *cls, *clscst, *enum_, *typealias, *fn_, *mthd, *instProperty, *staticProperty, *parameter, *typeparam, *file, *typeconst, *lambda, *enumcls, ] .into_iter() } } pub mod user_attributes { use super::*; pub static uaOverride: Lazy<TypeName> = lazy!(sn::user_attributes::OVERRIDE); pub static uaConsistentConstruct: Lazy<TypeName> = lazy!(sn::user_attributes::CONSISTENT_CONSTRUCT); pub static uaConst: Lazy<TypeName> = lazy!(sn::user_attributes::CONST); pub static uaDeprecated: Lazy<TypeName> = lazy!(sn::user_attributes::DEPRECATED); pub static uaEntryPoint: Lazy<TypeName> = lazy!(sn::user_attributes::ENTRY_POINT); pub static uaMemoize: Lazy<TypeName> = lazy!(sn::user_attributes::MEMOIZE); pub static uaMemoizeLSB: Lazy<TypeName> = lazy!(sn::user_attributes::MEMOIZE_LSB); pub static uaPHPStdLib: Lazy<TypeName> = lazy!(sn::user_attributes::PHP_STD_LIB); pub static uaAcceptDisposable: Lazy<TypeName> = lazy!(sn::user_attributes::ACCEPT_DISPOSABLE); pub static uaReturnDisposable: Lazy<TypeName> = lazy!(sn::user_attributes::RETURN_DISPOSABLE); pub static uaLSB: Lazy<TypeName> = lazy!(sn::user_attributes::LSB); pub static uaSealed: Lazy<TypeName> = lazy!(sn::user_attributes::SEALED); pub static uaLateInit: Lazy<TypeName> = lazy!(sn::user_attributes::LATE_INIT); pub static uaNewable: Lazy<TypeName> = lazy!(sn::user_attributes::NEWABLE); pub static uaEnforceable: Lazy<TypeName> = lazy!(sn::user_attributes::ENFORCEABLE); pub static uaExplicit: Lazy<TypeName> = lazy!(sn::user_attributes::EXPLICIT); pub static uaNonDisjoint: Lazy<TypeName> = lazy!(sn::user_attributes::NON_DISJOINT); pub static uaSoft: Lazy<TypeName> = lazy!(sn::user_attributes::SOFT); pub static uaWarn: Lazy<TypeName> = lazy!(sn::user_attributes::WARN); pub static uaMockClass: Lazy<TypeName> = lazy!(sn::user_attributes::MOCK_CLASS); pub static uaProvenanceSkipFrame: Lazy<TypeName> = lazy!(sn::user_attributes::PROVENANCE_SKIP_FRAME); pub static uaDynamicallyCallable: Lazy<TypeName> = lazy!(sn::user_attributes::DYNAMICALLY_CALLABLE); pub static uaDynamicallyConstructible: Lazy<TypeName> = lazy!(sn::user_attributes::DYNAMICALLY_CONSTRUCTIBLE); pub static uaReifiable: Lazy<TypeName> = lazy!(sn::user_attributes::REIFIABLE); pub static uaNeverInline: Lazy<TypeName> = lazy!(sn::user_attributes::NEVER_INLINE); pub static uaDisableTypecheckerInternal: Lazy<TypeName> = lazy!(sn::user_attributes::DISABLE_TYPECHECKER_INTERNAL); pub static uaHasTopLevelCode: Lazy<TypeName> = lazy!(sn::user_attributes::HAS_TOP_LEVEL_CODE); pub static uaIsFoldable: Lazy<TypeName> = lazy!(sn::user_attributes::IS_FOLDABLE); pub static uaNative: Lazy<TypeName> = lazy!(sn::user_attributes::NATIVE); pub static uaOutOnly: Lazy<TypeName> = lazy!(sn::user_attributes::OUT_ONLY); pub static uaAlwaysInline: Lazy<TypeName> = lazy!(sn::user_attributes::ALWAYS_INLINE); pub static uaEnableUnstableFeatures: Lazy<TypeName> = lazy!(sn::user_attributes::ENABLE_UNSTABLE_FEATURES); pub static uaEnumClass: Lazy<TypeName> = lazy!(sn::user_attributes::ENUM_CLASS); pub static uaPolicied: Lazy<TypeName> = lazy!(sn::user_attributes::POLICIED); pub static uaInferFlows: Lazy<TypeName> = lazy!(sn::user_attributes::INFERFLOWS); pub static uaExternal: Lazy<TypeName> = lazy!(sn::user_attributes::EXTERNAL); pub static uaCanCall: Lazy<TypeName> = lazy!(sn::user_attributes::CAN_CALL); pub static uaSupportDynamicType: Lazy<TypeName> = lazy!(sn::user_attributes::SUPPORT_DYNAMIC_TYPE); pub static uaNoAutoDynamic: Lazy<TypeName> = lazy!(sn::user_attributes::NO_AUTO_DYNAMIC); pub static uaNoAutoBound: Lazy<TypeName> = lazy!(sn::user_attributes::NO_AUTO_BOUND); pub static uaRequireDynamic: Lazy<TypeName> = lazy!(sn::user_attributes::REQUIRE_DYNAMIC); pub static uaEnableMethodTraitDiamond: Lazy<TypeName> = lazy!(sn::user_attributes::ENABLE_METHOD_TRAIT_DIAMOND); pub static uaIgnoreReadonlyLocalErrors: Lazy<TypeName> = lazy!(sn::user_attributes::IGNORE_READONLY_LOCAL_ERRORS); pub static uaIgnoreCoeffectLocalErrors: Lazy<TypeName> = lazy!(sn::user_attributes::IGNORE_COEFFECT_LOCAL_ERRORS); pub static uaModuleLevelTrait: Lazy<TypeName> = lazy!(sn::user_attributes::MODULE_LEVEL_TRAIT); pub fn iter() -> impl Iterator<Item = TypeName> { [ *uaOverride, *uaConsistentConstruct, *uaConst, *uaDeprecated, *uaEntryPoint, *uaMemoize, *uaMemoizeLSB, *uaPHPStdLib, *uaAcceptDisposable, *uaReturnDisposable, *uaLSB, *uaSealed, *uaLateInit, *uaNewable, *uaEnforceable, *uaExplicit, *uaNonDisjoint, *uaSoft, *uaWarn, *uaMockClass, *uaProvenanceSkipFrame, *uaDynamicallyCallable, *uaDynamicallyConstructible, *uaReifiable, *uaNeverInline, *uaDisableTypecheckerInternal, *uaHasTopLevelCode, *uaIsFoldable, *uaNative, *uaOutOnly, *uaAlwaysInline, *uaEnableUnstableFeatures, *uaEnumClass, *uaPolicied, *uaInferFlows, *uaExternal, *uaCanCall, *uaSupportDynamicType, *uaRequireDynamic, *uaEnableMethodTraitDiamond, *uaIgnoreReadonlyLocalErrors, *uaIgnoreCoeffectLocalErrors, ] .into_iter() } } pub mod special_functions { use super::*; pub static echo: Lazy<FunName> = lazy!(sn::special_functions::ECHO); } pub mod autoimported_functions { use super::*; pub static invariant_violation: Lazy<FunName> = lazy!(sn::autoimported_functions::INVARIANT_VIOLATION); pub static invariant: Lazy<FunName> = lazy!(sn::autoimported_functions::INVARIANT); pub static meth_caller: Lazy<FunName> = lazy!(sn::autoimported_functions::METH_CALLER); pub fn iter() -> impl Iterator<Item = FunName> { [*invariant_violation, *invariant, *meth_caller].into_iter() } } pub mod special_idents { use super::*; pub static this: Lazy<Symbol> = lazy!(sn::special_idents::THIS); pub static placeholder: Lazy<Symbol> = lazy!(sn::special_idents::PLACEHOLDER); pub static dollardollar: Lazy<Symbol> = lazy!(sn::special_idents::DOLLAR_DOLLAR); pub static tmp_var_prefix: Lazy<Symbol> = lazy!(sn::special_idents::TMP_VAR_PREFIX); } pub mod pseudo_functions { use super::*; pub static isset: Lazy<FunName> = lazy!(sn::pseudo_functions::ISSET); pub static unset: Lazy<FunName> = lazy!(sn::pseudo_functions::UNSET); pub static hh_show: Lazy<FunName> = lazy!(sn::pseudo_functions::HH_SHOW); pub static hh_expect: Lazy<FunName> = lazy!(sn::pseudo_functions::HH_EXPECT); pub static hh_expect_equivalent: Lazy<FunName> = lazy!(sn::pseudo_functions::HH_EXPECT_EQUIVALENT); pub static hh_show_env: Lazy<FunName> = lazy!(sn::pseudo_functions::HH_SHOW_ENV); pub static hh_log_level: Lazy<FunName> = lazy!(sn::pseudo_functions::HH_LOG_LEVEL); pub static hh_force_solve: Lazy<FunName> = lazy!(sn::pseudo_functions::HH_FORCE_SOLVE); pub static hh_loop_forever: Lazy<FunName> = lazy!(sn::pseudo_functions::HH_LOOP_FOREVER); pub static echo: Lazy<FunName> = lazy!(sn::pseudo_functions::ECHO); pub static empty: Lazy<FunName> = lazy!(sn::pseudo_functions::EMPTY); pub static exit: Lazy<FunName> = lazy!(sn::pseudo_functions::EXIT); pub static die: Lazy<FunName> = lazy!(sn::pseudo_functions::DIE); pub static unsafe_cast: Lazy<FunName> = lazy!(sn::pseudo_functions::UNSAFE_CAST); pub static unsafe_nonnull_cast: Lazy<FunName> = lazy!(sn::pseudo_functions::UNSAFE_NONNULL_CAST); pub static enforced_cast: Lazy<FunName> = lazy!(sn::pseudo_functions::ENFORCED_CAST); pub fn iter() -> impl Iterator<Item = FunName> { [ *isset, *unset, *hh_show, *hh_expect, *hh_expect_equivalent, *hh_show_env, *hh_log_level, *hh_force_solve, *hh_loop_forever, *echo, *empty, *exit, *die, *unsafe_cast, *unsafe_nonnull_cast, *enforced_cast, ] .into_iter() } } pub mod stdlib_functions { use super::*; pub static is_array: Lazy<FunName> = lazy!(sn::std_lib_functions::IS_ARRAY); pub static is_null: Lazy<FunName> = lazy!(sn::std_lib_functions::IS_NULL); pub static get_class: Lazy<FunName> = lazy!(sn::std_lib_functions::GET_CLASS); pub static array_filter: Lazy<FunName> = lazy!(sn::std_lib_functions::ARRAY_FILTER); pub static call_user_func: Lazy<FunName> = lazy!(sn::std_lib_functions::CALL_USER_FUNC); pub static type_structure: Lazy<FunName> = lazy!(sn::std_lib_functions::TYPE_STRUCTURE); pub static array_mark_legacy: Lazy<FunName> = lazy!(sn::std_lib_functions::ARRAY_MARK_LEGACY); pub static array_unmark_legacy: Lazy<FunName> = lazy!(sn::std_lib_functions::ARRAY_UNMARK_LEGACY); pub static is_php_array: Lazy<FunName> = lazy!(sn::std_lib_functions::IS_PHP_ARRAY); pub static is_any_array: Lazy<FunName> = lazy!(sn::std_lib_functions::IS_ANY_ARRAY); pub static is_dict_or_darray: Lazy<FunName> = lazy!(sn::std_lib_functions::IS_DICT_OR_DARRAY); pub static is_vec_or_varray: Lazy<FunName> = lazy!(sn::std_lib_functions::IS_VEC_OR_VARRAY); pub fn iter() -> impl Iterator<Item = FunName> { [ *is_array, *is_null, *get_class, *array_filter, *call_user_func, *type_structure, *array_mark_legacy, *array_unmark_legacy, *is_php_array, *is_any_array, *is_dict_or_darray, *is_vec_or_varray, ] .into_iter() } } pub mod typehints { use super::*; pub static null: Lazy<TypeName> = lazy!(sn::typehints::NULL); pub static void: Lazy<TypeName> = lazy!(sn::typehints::VOID); pub static resource: Lazy<TypeName> = lazy!(sn::typehints::RESOURCE); pub static num: Lazy<TypeName> = lazy!(sn::typehints::NUM); pub static arraykey: Lazy<TypeName> = lazy!(sn::typehints::ARRAYKEY); pub static noreturn: Lazy<TypeName> = lazy!(sn::typehints::NORETURN); pub static mixed: Lazy<TypeName> = lazy!(sn::typehints::MIXED); pub static nonnull: Lazy<TypeName> = lazy!(sn::typehints::NONNULL); pub static this: Lazy<TypeName> = lazy!(sn::typehints::THIS); pub static dynamic: Lazy<TypeName> = lazy!(sn::typehints::DYNAMIC); pub static nothing: Lazy<TypeName> = lazy!(sn::typehints::NOTHING); pub static int: Lazy<TypeName> = lazy!(sn::typehints::INT); pub static bool: Lazy<TypeName> = lazy!(sn::typehints::BOOL); pub static float: Lazy<TypeName> = lazy!(sn::typehints::FLOAT); pub static string: Lazy<TypeName> = lazy!(sn::typehints::STRING); pub static darray: Lazy<TypeName> = lazy!(sn::typehints::DARRAY); pub static varray: Lazy<TypeName> = lazy!(sn::typehints::VARRAY); pub static varray_or_darray: Lazy<TypeName> = lazy!(sn::typehints::VARRAY_OR_DARRAY); pub static vec_or_dict: Lazy<TypeName> = lazy!(sn::typehints::VEC_OR_DICT); pub static callable: Lazy<TypeName> = lazy!(sn::typehints::CALLABLE); pub static object_cast: Lazy<TypeName> = lazy!(sn::typehints::OBJECT_CAST); pub static supportdyn: Lazy<TypeName> = lazy!(sn::typehints::SUPPORTDYN); pub static hh_sypportdyn: Lazy<TypeName> = lazy!(sn::typehints::HH_SUPPORTDYN); pub static wildcard: Lazy<TypeName> = lazy!(sn::typehints::WILDCARD); pub static reserved_typehints: Lazy<HashSet<TypeName>> = Lazy::new(|| { [ *null, *void, *resource, *num, *arraykey, *noreturn, *mixed, *nonnull, *this, *dynamic, *nothing, *int, *bool, *float, *string, *darray, *varray, *varray_or_darray, *vec_or_dict, *callable, *wildcard, ] .into_iter() .collect() }); } pub mod pseudo_consts { use super::*; pub static g__LINE__: Lazy<ConstName> = lazy!(sn::pseudo_consts::G__LINE__); pub static g__CLASS__: Lazy<ConstName> = lazy!(sn::pseudo_consts::G__CLASS__); pub static g__TRAIT__: Lazy<ConstName> = lazy!(sn::pseudo_consts::G__TRAIT__); pub static g__FILE__: Lazy<ConstName> = lazy!(sn::pseudo_consts::G__FILE__); pub static g__DIR__: Lazy<ConstName> = lazy!(sn::pseudo_consts::G__DIR__); pub static g__FUNCTION__: Lazy<ConstName> = lazy!(sn::pseudo_consts::G__FUNCTION__); pub static g__METHOD__: Lazy<ConstName> = lazy!(sn::pseudo_consts::G__METHOD__); pub static g__NAMESPACE__: Lazy<ConstName> = lazy!(sn::pseudo_consts::G__NAMESPACE__); pub static g__COMPILER_FRONTEND__: Lazy<ConstName> = lazy!(sn::pseudo_consts::G__COMPILER_FRONTEND__); pub static g__FUNCTION_CREDENTIAL__: Lazy<ConstName> = lazy!(sn::pseudo_consts::G__FUNCTION_CREDENTIAL__); pub static exit: Lazy<ConstName> = lazy!(sn::pseudo_consts::EXIT); pub static die: Lazy<ConstName> = lazy!(sn::pseudo_consts::DIE); } pub mod fb { use super::*; pub static cEnum: Lazy<TypeName> = lazy!(sn::fb::ENUM); pub static tInner: Lazy<TypeConstName> = lazy!(sn::fb::INNER); pub static idx: Lazy<FunName> = lazy!(sn::fb::IDX); pub static cTypeStructure: Lazy<TypeName> = lazy!(sn::fb::TYPE_STRUCTURE); pub fn types() -> impl Iterator<Item = TypeName> { [*cEnum, *cTypeStructure].into_iter() } pub fn functions() -> impl Iterator<Item = FunName> { [*idx].into_iter() } } pub mod hh { use super::*; pub static contains: Lazy<FunName> = lazy!(sn::hh::CONTAINS); pub static contains_key: Lazy<FunName> = lazy!(sn::hh::CONTAINS_KEY); pub fn iter() -> impl Iterator<Item = FunName> { [*contains, *contains_key].into_iter() } } pub mod shapes { use super::*; pub static cShapes: Lazy<TypeName> = lazy!(sn::shapes::SHAPES); pub static idx: Lazy<MethodName> = lazy!(sn::shapes::IDX); pub static at: Lazy<MethodName> = lazy!(sn::shapes::AT); pub static keyExists: Lazy<MethodName> = lazy!(sn::shapes::KEY_EXISTS); pub static removeKey: Lazy<MethodName> = lazy!(sn::shapes::REMOVE_KEY); pub static toArray: Lazy<MethodName> = lazy!(sn::shapes::TO_ARRAY); pub static toDict: Lazy<MethodName> = lazy!(sn::shapes::TO_DICT); pub fn types() -> impl Iterator<Item = TypeName> { [*cShapes].into_iter() } } pub mod superglobals { use super::*; pub static globals: Lazy<Symbol> = lazy!(sn::superglobals::GLOBALS); } pub mod regex { use super::*; pub static tPattern: Lazy<TypeName> = lazy!(sn::regex::T_PATTERN); pub fn types() -> impl Iterator<Item = TypeName> { [*tPattern].into_iter() } } pub mod emitter_special_functions { use super::*; pub static eval: Lazy<FunName> = lazy!(sn::emitter_special_functions::EVAL); pub static set_frame_metadata: Lazy<FunName> = lazy!(sn::emitter_special_functions::SET_FRAME_METADATA); pub static systemlib_reified_generics: Lazy<FunName> = lazy!(sn::emitter_special_functions::SYSTEMLIB_REIFIED_GENERICS); } pub mod xhp { use super::*; pub static pcdata: Lazy<Symbol> = lazy!(sn::xhp::PCDATA); pub static any: Lazy<Symbol> = lazy!(sn::xhp::ANY); pub static empty: Lazy<Symbol> = lazy!(sn::xhp::EMPTY); } pub mod unstable_features { use super::*; pub static coeffects_provisional: Lazy<Symbol> = lazy!(sn::unstable_features::COEFFECTS_PROVISIONAL); pub static ifc: Lazy<Symbol> = lazy!(sn::unstable_features::IFC); pub static readonly: Lazy<Symbol> = lazy!(sn::unstable_features::READONLY); pub static expression_trees: Lazy<Symbol> = lazy!(sn::unstable_features::EXPRESSION_TREES); pub static modules: Lazy<Symbol> = lazy!(sn::unstable_features::MODULES); } pub mod coeffects { use super::*; pub static capability: Lazy<Symbol> = lazy!("$#capability"); pub static local_capability: Lazy<Symbol> = lazy!("$#local_capability"); pub static contexts: Lazy<TypeName> = lazy!("\\HH\\Contexts"); pub static unsafe_contexts: Lazy<TypeName> = lazy!(concat(*contexts, "\\Unsafe")); pub static generated_generic_prefix: Lazy<Symbol> = lazy!("T/"); pub fn types() -> impl Iterator<Item = TypeName> { [*contexts, *unsafe_contexts].into_iter() } pub fn is_generated_generic(x: impl AsRef<str>) -> bool { x.as_ref().starts_with(generated_generic_prefix.as_str()) } } pub mod readonly { use super::*; pub static idx: Lazy<FunName> = lazy!(sn::readonly::IDX); pub static as_mut: Lazy<FunName> = lazy!(sn::readonly::AS_MUT); } pub mod capabilities { use super::*; pub static defaults: Lazy<TypeName> = lazy!(concat(*coeffects::contexts, "\\defaults")); pub static write_props: Lazy<TypeName> = lazy!(concat(*coeffects::contexts, "\\write_props")); const prefix: &str = "\\HH\\Capabilities\\"; pub static writeProperty: Lazy<TypeName> = lazy!(concat(prefix, "WriteProperty")); pub static accessGlobals: Lazy<TypeName> = lazy!(concat(prefix, "AccessGlobals")); pub static readGlobals: Lazy<TypeName> = lazy!(concat(prefix, "ReadGlobals")); pub static system: Lazy<TypeName> = lazy!(concat(prefix, "System")); pub static systemLocal: Lazy<TypeName> = lazy!(concat(prefix, "SystemLocal")); pub static implicitPolicy: Lazy<TypeName> = lazy!(concat(prefix, "ImplicitPolicy")); pub static implicitPolicyLocal: Lazy<TypeName> = lazy!(concat(prefix, "ImplicitPolicyLocal")); pub static io: Lazy<TypeName> = lazy!(concat(prefix, "IO")); pub static rx: Lazy<TypeName> = lazy!(concat(prefix, "Rx")); pub static rxLocal: Lazy<TypeName> = lazy!(concat(*rx, "Local")); pub fn iter() -> impl Iterator<Item = TypeName> { [ *defaults, *write_props, *writeProperty, *accessGlobals, *readGlobals, *system, *systemLocal, *implicitPolicy, *implicitPolicyLocal, *io, *rx, *rxLocal, ] .into_iter() } } pub mod expression_trees { use super::*; pub static makeTree: Lazy<MethodName> = lazy!(sn::expression_trees::MAKE_TREE); pub static intType: Lazy<MethodName> = lazy!(sn::expression_trees::INT_TYPE); pub static floatType: Lazy<MethodName> = lazy!(sn::expression_trees::FLOAT_TYPE); pub static boolType: Lazy<MethodName> = lazy!(sn::expression_trees::BOOL_TYPE); pub static stringType: Lazy<MethodName> = lazy!(sn::expression_trees::STRING_TYPE); pub static nullType: Lazy<MethodName> = lazy!(sn::expression_trees::NULL_TYPE); pub static voidType: Lazy<MethodName> = lazy!(sn::expression_trees::VOID_TYPE); pub static symbolType: Lazy<MethodName> = lazy!(sn::expression_trees::SYMBOL_TYPE); pub static visitInt: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_INT); pub static visitFloat: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_FLOAT); pub static visitBool: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_BOOL); pub static visitString: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_STRING); pub static visitNull: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_NULL); pub static visitBinop: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_BINOP); pub static visitUnop: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_UNOP); pub static visitLocal: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_LOCAL); pub static visitLambda: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_LAMBDA); pub static visitGlobalFunction: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_GLOBAL_FUNCTION); pub static visitStaticMethod: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_STATIC_METHOD); pub static visitCall: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_CALL); pub static visitAssign: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_ASSIGN); pub static visitTernary: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_TERNARY); pub static visitIf: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_IF); pub static visitWhile: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_WHILE); pub static visitReturn: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_RETURN); pub static visitFor: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_FOR); pub static visitBreak: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_BREAK); pub static visitContinue: Lazy<MethodName> = lazy!(sn::expression_trees::VISIT_CONTINUE); pub static splice: Lazy<MethodName> = lazy!(sn::expression_trees::SPLICE); pub static dollardollarTmpVar: Lazy<Symbol> = lazy!(sn::expression_trees::DOLLARDOLLAR_TMP_VAR); }
TOML
hhvm/hphp/hack/src/hackrs/special_names/cargo/special_names/Cargo.toml
# @generated by autocargo [package] name = "special_names" version = "0.0.0" edition = "2021" [lib] path = "../../special_names.rs" [dependencies] hash = { version = "0.0.0", path = "../../../../utils/hash" } naming_special_names_rust = { version = "0.0.0", path = "../../../../naming" } once_cell = "1.12" pos = { version = "0.0.0", path = "../../../pos/cargo/pos" }
Rust
hhvm/hphp/hack/src/hackrs/ty/decl.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. mod debug; pub mod folded; mod from_oxidized; mod ocamlrep; mod printer; pub mod shallow; pub mod subst; mod to_oxidized; pub mod ty; pub use folded::ClassConst; pub use folded::FoldedClass; pub use folded::FoldedElement; pub use folded::Requirement; pub use folded::SubstContext; pub use folded::TypeConst; pub use shallow::ConstDecl; pub use shallow::FunDecl; pub use shallow::ModuleDecl; pub use shallow::ShallowClass; pub use shallow::ShallowClassConst; pub use shallow::ShallowMethod; pub use shallow::ShallowProp; pub use shallow::ShallowTypeconst; pub use shallow::TypedefDecl; pub use ty::AbstractTypeconst; pub use ty::Abstraction; pub use ty::CeVisibility; pub use ty::ClassConstFrom; pub use ty::ClassConstKind; pub use ty::ClassConstRef; pub use ty::ClassEltFlags; pub use ty::ClassEltFlagsArgs; pub use ty::ClassRefinement; pub use ty::ClassishKind; pub use ty::ConcreteTypeconst; pub use ty::ConsistentKind; pub use ty::EnumType; pub use ty::FunParam; pub use ty::FunType; pub use ty::PossiblyEnforcedTy; pub use ty::Prim; pub use ty::RefinedConst; pub use ty::RefinedConstBound; pub use ty::RefinedConstBounds; pub use ty::ShapeFieldType; pub use ty::TaccessType; pub use ty::Tparam; pub use ty::TrefinementType; pub use ty::Ty; pub use ty::Ty_; pub use ty::Typeconst; pub use ty::UserAttribute; pub use ty::Visibility; pub use ty::WhereConstraint; pub use ty::XhpAttribute;
Rust
hhvm/hphp/hack/src/hackrs/ty/decl_error.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use eq_modulo_pos::EqModuloPos; use pos::TypeName; use serde::Deserialize; use serde::Serialize; #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] pub enum DeclError<P> { WrongExtendKind { pos: P, kind: oxidized::ast_defs::ClassishKind, name: TypeName, parent_pos: P, parent_kind: oxidized::ast_defs::ClassishKind, parent_name: TypeName, }, CyclicClassDef(P, Vec<TypeName>), }
Rust
hhvm/hphp/hack/src/hackrs/ty/local.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. mod decl; mod kind; mod to_ocamlrep; mod ty; mod tyvar; mod variance; pub use decl::ClassElt; pub use kind::Kind; pub use ty::Exact; pub use ty::FunParam; pub use ty::FunType; pub use ty::ParamMode; pub use ty::Prim; pub use ty::Tparam; pub use ty::Ty; pub use ty::Ty_; pub use tyvar::Tyvar; pub use variance::Variance;
Rust
hhvm/hphp/hack/src/hackrs/ty/local_error.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. mod error_code; mod error_primary; mod error_reason; use eq_modulo_pos::EqModuloPos; pub use error_code::TypingErrorCode; pub use error_primary::Primary; pub use error_reason::ReasonsCallback; use serde::Deserialize; use serde::Serialize; use crate::decl_error::DeclError; use crate::reason::Reason; #[derive(Clone, Debug)] pub struct ErrorMessage<P>(P, String); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[serde(bound = "R: Reason")] pub enum TypingError<R: Reason> { Primary(Primary<R>), } impl<R: Reason> TypingError<R> { pub fn primary(primary: Primary<R>) -> Self { TypingError::Primary(primary) } } impl<R: Reason> From<&DeclError<R::Pos>> for TypingError<R> { fn from(decl_error: &DeclError<R::Pos>) -> Self { match decl_error { &DeclError::WrongExtendKind { ref pos, kind, name, ref parent_pos, parent_kind, parent_name, } => Self::Primary(Primary::WrongExtendKind { pos: pos.clone(), kind, name, parent_pos: parent_pos.clone(), parent_kind, parent_name, }), DeclError::CyclicClassDef(pos, stack) => { Self::Primary(Primary::CyclicClassDef(pos.clone(), stack.clone())) } } } }
Rust
hhvm/hphp/hack/src/hackrs/ty/ocamlrep.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use ocamlrep::Allocator; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; use super::decl_error::*; impl<P: ToOcamlRep> ToOcamlRep for DeclError<P> { fn to_ocamlrep<'a, A: Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { match self { Self::WrongExtendKind { pos, kind, name, parent_pos, parent_kind, parent_name, } => { let mut block = alloc.block_with_size_and_tag(6usize, 0u8); alloc.set_field(&mut block, 0, alloc.add(pos)); alloc.set_field(&mut block, 1, alloc.add(kind)); alloc.set_field(&mut block, 2, alloc.add(name)); alloc.set_field(&mut block, 3, alloc.add(parent_pos)); alloc.set_field(&mut block, 4, alloc.add(parent_kind)); alloc.set_field(&mut block, 5, alloc.add(parent_name)); block.build() } Self::CyclicClassDef(pos, stack) => { // The stack is an SSet rather than a list in OCaml, so we need // to construct a tree set here. One way is sorting the list and // passing it to `sorted_iter_to_ocaml_set`. let mut stack = stack.clone(); stack.sort_unstable(); stack.dedup(); let mut iter = stack.iter().copied().map(|s| alloc.add(s.as_str())); let (stack, _) = ocamlrep::sorted_iter_to_ocaml_set(&mut iter, alloc, stack.len()); let mut block = alloc.block_with_size_and_tag(2usize, 1u8); alloc.set_field(&mut block, 0, alloc.add(pos)); alloc.set_field(&mut block, 1, stack); block.build() } } } } impl<P: FromOcamlRep> FromOcamlRep for DeclError<P> { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { let block = ocamlrep::from::expect_block(value)?; match block.tag() { 0 => { ocamlrep::from::expect_block_size(block, 6)?; Ok(Self::WrongExtendKind { pos: ocamlrep::from::field(block, 0)?, kind: ocamlrep::from::field(block, 1)?, name: ocamlrep::from::field(block, 2)?, parent_pos: ocamlrep::from::field(block, 3)?, parent_kind: ocamlrep::from::field(block, 4)?, parent_name: ocamlrep::from::field(block, 5)?, }) } 1 => { ocamlrep::from::expect_block_size(block, 2)?; Ok(Self::CyclicClassDef( ocamlrep::from::field(block, 0)?, ocamlrep::vec_from_ocaml_set(block[1])?, )) } t => Err(ocamlrep::FromError::BlockTagOutOfRange { max: 1, actual: t }), } } }
Rust
hhvm/hphp/hack/src/hackrs/ty/prop.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. mod constraint; use std::ops::Deref; pub use constraint::Cstr; use hcons::Conser; use hcons::Hc; use crate::local::Ty; use crate::local_error::TypingError; use crate::reason::Reason; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum PropF<R: Reason, A> { Atom(Cstr<R>), Conj(Vec<A>), Disj(TypingError<R>, Vec<A>), } impl<R: Reason> PropF<R, Prop<R>> { pub fn inj(self) -> Prop<R> { Prop(Hc::new(self)) } } impl<R: Reason> hcons::Consable for PropF<R, Prop<R>> { #[inline] fn conser() -> &'static Conser<PropF<R, Prop<R>>> { R::prop_conser() } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Prop<R: Reason>(Hc<PropF<R, Prop<R>>>); impl<R: Reason> Deref for Prop<R> { type Target = PropF<R, Prop<R>>; fn deref(&self) -> &Self::Target { let Prop(hc_prop_f) = self; Deref::deref(hc_prop_f) } } impl<R: Reason> Prop<R> { pub fn conjs(ps: Vec<Prop<R>>) -> Self { PropF::Conj(ps).inj() } pub fn conj(self, other: Self) -> Self { Self::conjs(vec![self, other]) } pub fn disjs(ps: Vec<Prop<R>>, fail: TypingError<R>) -> Self { PropF::Disj(fail, ps).inj() } pub fn disj(self, other: Self, fail: TypingError<R>) -> Self { Self::disjs(vec![self, other], fail) } pub fn subtype(ty_sub: Ty<R>, ty_sup: Ty<R>) -> Self { PropF::Atom(Cstr::subtype(ty_sub, ty_sup)).inj() } pub fn valid() -> Self { PropF::Conj(vec![]).inj() } pub fn invalid(fail: TypingError<R>) -> Self { PropF::Disj(fail, vec![]).inj() } pub fn is_valid(&self) -> bool { match self.deref() { PropF::Atom(_) => false, PropF::Conj(ps) => ps.iter().all(|p| p.is_valid()), PropF::Disj(_, ps) => ps.iter().any(|p| p.is_valid()), } } pub fn is_unsat(&self) -> bool { match self.deref() { PropF::Atom(_) => false, PropF::Conj(ps) => ps.iter().any(|p| p.is_unsat()), PropF::Disj(_, ps) => ps.iter().all(|p| p.is_unsat()), } } }
Rust
hhvm/hphp/hack/src/hackrs/ty/reason.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::hash::Hash; use std::sync::Arc; use eq_modulo_pos::EqModuloPos; use hcons::Conser; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; use once_cell::sync::Lazy; pub use oxidized::typing_reason::ArgPosition; pub use oxidized::typing_reason::BlameSource; use pos::BPos; use pos::NPos; use pos::Pos; use pos::Positioned; use pos::Symbol; use pos::ToOxidized; use pos::TypeConstName; use pos::TypeName; use serde::de::DeserializeOwned; use serde::Deserialize; use serde::Serialize; use crate::decl; use crate::local; use crate::prop::Prop; use crate::prop::PropF; use crate::visitor::Walkable; pub trait Reason: Eq + EqModuloPos + Hash + Clone + Walkable<Self> + std::fmt::Debug + Send + Sync + Serialize + DeserializeOwned + for<'a> From<oxidized_by_ref::typing_reason::T_<'a>> + for<'a> ToOxidized<'a, Output = oxidized_by_ref::typing_reason::T_<'a>> + ToOcamlRep + FromOcamlRep + 'static { /// Position type. type Pos: Pos + Send + Sync + 'static; /// Make a new instance. If the implementing Reason is stateful, /// it will call cons() to obtain the ReasonImpl to construct the instance. fn mk(cons: impl FnOnce() -> ReasonImpl<Self, Self::Pos>) -> Self; fn none() -> Self; fn witness(pos: Self::Pos) -> Self { Self::mk(|| ReasonImpl::Rwitness(pos)) } fn witness_from_decl(pos: Self::Pos) -> Self { Self::mk(|| ReasonImpl::RwitnessFromDecl(pos)) } fn hint(pos: Self::Pos) -> Self { Self::mk(|| ReasonImpl::Rhint(pos)) } fn instantiate(r1: Self, ty_name: TypeName, r2: Self) -> Self { Self::mk(|| ReasonImpl::Rinstantiate(r1, ty_name, r2)) } fn class_class(pos: Self::Pos, ty_name: TypeName) -> Self { Self::mk(|| ReasonImpl::RclassClass(pos, ty_name)) } fn no_return(pos: Self::Pos) -> Self { Self::mk(|| ReasonImpl::RnoReturn(pos)) } fn implicit_upper_bound(pos: Self::Pos, sym: Symbol) -> Self { Self::mk(|| ReasonImpl::RimplicitUpperBound(pos, sym)) } fn tyvar(pos: Self::Pos) -> Self { Self::mk(|| ReasonImpl::RtypeVariable(pos)) } fn early_solve_failed(pos: Self::Pos) -> Self { Self::mk(|| ReasonImpl::RsolveFail(pos)) } fn type_variable_generics(pos: Self::Pos, kind_name: Symbol, use_name: Symbol) -> Self { Self::mk(|| ReasonImpl::RtypeVariableGenerics(pos, kind_name, use_name)) } fn pos(&self) -> &Self::Pos; fn decl_ty_conser() -> &'static Conser<decl::Ty_<Self>>; fn local_ty_conser() -> &'static Conser<local::Ty_<Self, local::Ty<Self>>>; fn prop_conser() -> &'static Conser<PropF<Self, Prop<Self>>>; fn from_oxidized(reason: oxidized_by_ref::typing_reason::T_<'_>) -> Self { Self::mk(|| { use oxidized_by_ref::typing_reason::Blame as OBlame; use oxidized_by_ref::typing_reason::T_ as OR; use ReasonImpl as RI; match reason { OR::Rnone => RI::Rnone, OR::RmissingClass(pos) => RI::RmissingClass(pos.into()), OR::Rwitness(pos) => RI::Rwitness(pos.into()), OR::RwitnessFromDecl(pos) => RI::RwitnessFromDecl(pos.into()), OR::Ridx(&(pos, r)) => RI::Ridx(pos.into(), r.into()), OR::RidxVector(pos) => RI::RidxVector(pos.into()), OR::RidxVectorFromDecl(pos) => RI::RidxVectorFromDecl(pos.into()), OR::Rforeach(pos) => RI::Rforeach(pos.into()), OR::Rasyncforeach(pos) => RI::Rasyncforeach(pos.into()), OR::Rarith(pos) => RI::Rarith(pos.into()), OR::RarithRet(pos) => RI::RarithRet(pos.into()), OR::RarithRetFloat(&(pos, r, arg_position)) => { RI::RarithRetFloat(pos.into(), r.into(), arg_position) } OR::RarithRetNum(&(pos, r, arg_position)) => { RI::RarithRetNum(pos.into(), r.into(), arg_position) } OR::RarithRetInt(pos) => RI::RarithRetInt(pos.into()), OR::RarithDynamic(pos) => RI::RarithDynamic(pos.into()), OR::RbitwiseDynamic(pos) => RI::RbitwiseDynamic(pos.into()), OR::RincdecDynamic(pos) => RI::RincdecDynamic(pos.into()), OR::Rcomp(pos) => RI::Rcomp(pos.into()), OR::RconcatRet(pos) => RI::RconcatRet(pos.into()), OR::RlogicRet(pos) => RI::RlogicRet(pos.into()), OR::Rbitwise(pos) => RI::Rbitwise(pos.into()), OR::RbitwiseRet(pos) => RI::RbitwiseRet(pos.into()), OR::RnoReturn(pos) => RI::RnoReturn(pos.into()), OR::RnoReturnAsync(pos) => RI::RnoReturnAsync(pos.into()), OR::RretFunKind(&(pos, fun_kind)) => RI::RretFunKind(pos.into(), fun_kind), OR::RretFunKindFromDecl(&(pos, fun_kind)) => { RI::RretFunKindFromDecl(pos.into(), fun_kind) } OR::Rhint(pos) => RI::Rhint(pos.into()), OR::Rthrow(pos) => RI::Rthrow(pos.into()), OR::Rplaceholder(pos) => RI::Rplaceholder(pos.into()), OR::RretDiv(pos) => RI::RretDiv(pos.into()), OR::RyieldGen(pos) => RI::RyieldGen(pos.into()), OR::RyieldAsyncgen(pos) => RI::RyieldAsyncgen(pos.into()), OR::RyieldAsyncnull(pos) => RI::RyieldAsyncnull(pos.into()), OR::RyieldSend(pos) => RI::RyieldSend(pos.into()), OR::RlostInfo(&(sym, r, OBlame::Blame(&(pos, blame_source)))) => { RI::RlostInfo(Symbol::new(sym), r.into(), Blame(pos.into(), blame_source)) } OR::Rformat(&(pos, sym, r)) => RI::Rformat(pos.into(), Symbol::new(sym), r.into()), OR::RclassClass(&(pos, s)) => RI::RclassClass(pos.into(), TypeName(Symbol::new(s))), OR::RunknownClass(pos) => RI::RunknownClass(pos.into()), OR::RvarParam(pos) => RI::RvarParam(pos.into()), OR::RvarParamFromDecl(pos) => RI::RvarParamFromDecl(pos.into()), OR::RunpackParam(&(pos1, pos2, i)) => RI::RunpackParam(pos1.into(), pos2.into(), i), OR::RinoutParam(pos) => RI::RinoutParam(pos.into()), OR::Rinstantiate(&(r1, sym, r2)) => { RI::Rinstantiate(r1.into(), TypeName(Symbol::new(sym)), r2.into()) } OR::Rtypeconst(&(r1, pos_id, sym, r2)) => RI::Rtypeconst( r1.into(), pos_id.into(), Symbol::new(sym.0.unwrap()), r2.into(), ), OR::RtypeAccess(&(r, list)) => RI::RtypeAccess( r.into(), list.iter() .map(|(&r, s)| (r.into(), Symbol::new(s.0.unwrap()))) .collect(), ), OR::RexprDepType(&(r, pos, edt_reason)) => { RI::RexprDepType(r.into(), pos.into(), edt_reason.into()) } OR::RnullsafeOp(pos) => RI::RnullsafeOp(pos.into()), OR::RtconstNoCstr(&pos_id) => RI::RtconstNoCstr(pos_id.into()), OR::Rpredicated(&(pos, s)) => RI::Rpredicated(pos.into(), Symbol::new(s)), OR::Ris(pos) => RI::Ris(pos.into()), OR::Ras(pos) => RI::Ras(pos.into()), OR::Requal(pos) => RI::Requal(pos.into()), OR::RvarrayOrDarrayKey(pos) => RI::RvarrayOrDarrayKey(pos.into()), OR::RvecOrDictKey(pos) => RI::RvecOrDictKey(pos.into()), OR::Rusing(pos) => RI::Rusing(pos.into()), OR::RdynamicProp(pos) => RI::RdynamicProp(pos.into()), OR::RdynamicCall(pos) => RI::RdynamicCall(pos.into()), OR::RdynamicConstruct(pos) => RI::RdynamicConstruct(pos.into()), OR::RidxDict(pos) => RI::RidxDict(pos.into()), OR::RsetElement(pos) => RI::RsetElement(pos.into()), OR::RmissingOptionalField(&(pos, s)) => { RI::RmissingOptionalField(pos.into(), Symbol::new(s)) } OR::RunsetField(&(pos, s)) => RI::RunsetField(pos.into(), Symbol::new(s)), OR::RcontravariantGeneric(&(r, s)) => { RI::RcontravariantGeneric(r.into(), Symbol::new(s)) } OR::RinvariantGeneric(&(r, s)) => RI::RinvariantGeneric(r.into(), Symbol::new(s)), OR::Rregex(pos) => RI::Rregex(pos.into()), OR::RimplicitUpperBound(&(pos, s)) => { RI::RimplicitUpperBound(pos.into(), Symbol::new(s)) } OR::RtypeVariable(pos) => RI::RtypeVariable(pos.into()), OR::RtypeVariableGenerics(&(pos, s1, s2)) => { RI::RtypeVariableGenerics(pos.into(), Symbol::new(s1), Symbol::new(s2)) } OR::RtypeVariableError(pos) => RI::RtypeVariableError(pos.into()), OR::RglobalTypeVariableGenerics(&(pos, s1, s2)) => { RI::RglobalTypeVariableGenerics(pos.into(), Symbol::new(s1), Symbol::new(s2)) } OR::RsolveFail(pos) => RI::RsolveFail(pos.into()), OR::RcstrOnGenerics(&(pos, pos_id)) => { RI::RcstrOnGenerics(pos.into(), pos_id.into()) } OR::RlambdaParam(&(pos, r)) => RI::RlambdaParam(pos.into(), r.into()), OR::Rshape(&(pos, s)) => RI::Rshape(pos.into(), Symbol::new(s)), OR::RshapeLiteral(pos) => RI::RshapeLiteral(pos.into()), OR::Renforceable(pos) => RI::Renforceable(pos.into()), OR::Rdestructure(pos) => RI::Rdestructure(pos.into()), OR::RkeyValueCollectionKey(pos) => RI::RkeyValueCollectionKey(pos.into()), OR::RglobalClassProp(pos) => RI::RglobalClassProp(pos.into()), OR::RglobalFunParam(pos) => RI::RglobalFunParam(pos.into()), OR::RglobalFunRet(pos) => RI::RglobalFunRet(pos.into()), OR::Rsplice(pos) => RI::Rsplice(pos.into()), OR::RetBoolean(pos) => RI::RetBoolean(pos.into()), OR::RdefaultCapability(pos) => RI::RdefaultCapability(pos.into()), OR::RconcatOperand(pos) => RI::RconcatOperand(pos.into()), OR::RinterpOperand(pos) => RI::RinterpOperand(pos.into()), OR::RdynamicCoercion(&r) => RI::RdynamicCoercion(r.into()), OR::RsupportDynamicType(pos) => RI::RsupportDynamicType(pos.into()), OR::RdynamicPartialEnforcement(&(pos, s, r)) => { RI::RdynamicPartialEnforcement(pos.into(), Symbol::new(s), r.into()) } OR::RrigidTvarEscape(&(pos, s1, s2, r)) => { RI::RrigidTvarEscape(pos.into(), Symbol::new(s1), Symbol::new(s2), r.into()) } OR::RopaqueTypeFromModule(&(pos, s, r)) => { RI::RopaqueTypeFromModule(pos.into(), Symbol::new(s), r.into()) } OR::Rinvalid => RI::Rinvalid, } }) } } #[derive(Debug, Clone, PartialEq, Eq, EqModuloPos, Hash, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub struct Blame<P>(pub P, pub BlameSource); #[derive(Debug, Clone, PartialEq, Eq, EqModuloPos, Hash, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub enum ExprDepTypeReason { ERexpr(isize), ERstatic, ERclass(Symbol), ERparent(Symbol), ERself(Symbol), ERpu(Symbol), } impl<'a> From<oxidized_by_ref::typing_reason::ExprDepTypeReason<'a>> for ExprDepTypeReason { fn from(edtr: oxidized_by_ref::typing_reason::ExprDepTypeReason<'a>) -> Self { use oxidized_by_ref::typing_reason::ExprDepTypeReason as Obr; match edtr { Obr::ERexpr(i) => ExprDepTypeReason::ERexpr(i), Obr::ERstatic => ExprDepTypeReason::ERstatic, Obr::ERclass(s) => ExprDepTypeReason::ERclass(Symbol::new(s)), Obr::ERparent(s) => ExprDepTypeReason::ERparent(Symbol::new(s)), Obr::ERself(s) => ExprDepTypeReason::ERself(Symbol::new(s)), Obr::ERpu(s) => ExprDepTypeReason::ERpu(Symbol::new(s)), } } } impl<'a> ToOxidized<'a> for ExprDepTypeReason { type Output = oxidized_by_ref::typing_reason::ExprDepTypeReason<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use oxidized_by_ref::typing_reason::ExprDepTypeReason as Obr; match self { ExprDepTypeReason::ERexpr(i) => Obr::ERexpr(*i), ExprDepTypeReason::ERstatic => Obr::ERstatic, ExprDepTypeReason::ERclass(s) => Obr::ERclass(s.to_oxidized(arena)), ExprDepTypeReason::ERparent(s) => Obr::ERparent(s.to_oxidized(arena)), ExprDepTypeReason::ERself(s) => Obr::ERself(s.to_oxidized(arena)), ExprDepTypeReason::ERpu(s) => Obr::ERpu(s.to_oxidized(arena)), } } } #[derive(Debug, Clone, PartialEq, Eq, EqModuloPos, Hash, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub enum ReasonImpl<R, P> { Rnone, Rwitness(P), RwitnessFromDecl(P), /// Used as an index into a vector-like array or string. /// Position of indexing, reason for the indexed type Ridx(P, R), RidxVector(P), /// Used as an index, in the Vector case RidxVectorFromDecl(P), /// Because it is iterated in a foreach loop Rforeach(P), /// Because it is iterated "await as" in foreach Rasyncforeach(P), Rarith(P), RarithRet(P), /// pos, arg float typing reason, arg position RarithRetFloat(P, R, oxidized::typing_reason::ArgPosition), /// pos, arg num typing reason, arg position RarithRetNum(P, R, oxidized::typing_reason::ArgPosition), RarithRetInt(P), RarithDynamic(P), RbitwiseDynamic(P), RincdecDynamic(P), Rcomp(P), RconcatRet(P), RlogicRet(P), Rbitwise(P), RbitwiseRet(P), RnoReturn(P), RnoReturnAsync(P), RretFunKind(P, oxidized::ast_defs::FunKind), RretFunKindFromDecl(P, oxidized::ast_defs::FunKind), Rhint(P), Rthrow(P), Rplaceholder(P), RretDiv(P), RyieldGen(P), RyieldAsyncgen(P), RyieldAsyncnull(P), RyieldSend(P), RlostInfo(Symbol, R, Blame<P>), Rformat(P, Symbol, R), RclassClass(P, TypeName), RunknownClass(P), RvarParam(P), RvarParamFromDecl(P), /// splat pos, fun def pos, number of args before splat RunpackParam(P, P, isize), RinoutParam(P), Rinstantiate(R, TypeName, R), Rtypeconst(R, Positioned<TypeConstName, P>, Symbol, R), RtypeAccess(R, Vec<(R, Symbol)>), RexprDepType(R, P, ExprDepTypeReason), /// ?-> operator is used RnullsafeOp(P), RtconstNoCstr(Positioned<TypeConstName, P>), Rpredicated(P, Symbol), Ris(P), Ras(P), Requal(P), RvarrayOrDarrayKey(P), RvecOrDictKey(P), Rusing(P), RdynamicProp(P), RdynamicCall(P), RdynamicConstruct(P), RidxDict(P), RsetElement(P), RmissingOptionalField(P, Symbol), RunsetField(P, Symbol), RcontravariantGeneric(R, Symbol), RinvariantGeneric(R, Symbol), Rregex(P), RimplicitUpperBound(P, Symbol), RtypeVariable(P), RtypeVariableGenerics(P, Symbol, Symbol), RtypeVariableError(P), RglobalTypeVariableGenerics(P, Symbol, Symbol), RsolveFail(P), RcstrOnGenerics(P, Positioned<TypeName, P>), RlambdaParam(P, R), Rshape(P, Symbol), RshapeLiteral(P), Renforceable(P), Rdestructure(P), RkeyValueCollectionKey(P), RglobalClassProp(P), RglobalFunParam(P), RglobalFunRet(P), Rsplice(P), RetBoolean(P), RdefaultCapability(P), RconcatOperand(P), RinterpOperand(P), RdynamicCoercion(R), RsupportDynamicType(P), RdynamicPartialEnforcement(P, Symbol, R), RrigidTvarEscape(P, Symbol, Symbol, R), RopaqueTypeFromModule(P, Symbol, R), RmissingClass(P), Rinvalid, } #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub struct BReason(Arc<ReasonImpl<BReason, BPos>>); impl Reason for BReason { type Pos = BPos; fn mk(cons: impl FnOnce() -> ReasonImpl<Self, Self::Pos>) -> Self { let x = cons(); Self(Arc::new(x)) } fn none() -> Self { BReason(Arc::new(ReasonImpl::Rnone)) } fn pos(&self) -> &BPos { use ReasonImpl::*; match &*self.0 { Rnone => unimplemented!(), Rwitness(p) | RwitnessFromDecl(p) | Rhint(p) => p, r => unimplemented!("BReason::pos: {:?}", r), } } #[inline] fn decl_ty_conser() -> &'static Conser<decl::Ty_<BReason>> { static CONSER: Lazy<Conser<decl::Ty_<BReason>>> = Lazy::new(Conser::new); &CONSER } #[inline] fn local_ty_conser() -> &'static Conser<local::Ty_<BReason, local::Ty<BReason>>> { static CONSER: Lazy<Conser<local::Ty_<BReason, local::Ty<BReason>>>> = Lazy::new(Conser::new); &CONSER } #[inline] fn prop_conser() -> &'static Conser<PropF<BReason, Prop<BReason>>> { static CONSER: Lazy<Conser<PropF<BReason, Prop<BReason>>>> = Lazy::new(Conser::new); &CONSER } } impl Walkable<BReason> for BReason {} impl<'a> From<oxidized_by_ref::typing_reason::Reason<'a>> for BReason { fn from(reason: oxidized_by_ref::typing_reason::Reason<'a>) -> Self { Self::from_oxidized(reason) } } impl<'a> ToOxidized<'a> for BReason { type Output = oxidized_by_ref::typing_reason::Reason<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use oxidized_by_ref::typing_reason::Blame as OBlame; use oxidized_by_ref::typing_reason::Reason as OR; use ReasonImpl as RI; match &*self.0 { RI::Rnone => OR::Rnone, RI::Rwitness(pos) => OR::Rwitness(pos.to_oxidized(arena)), RI::RwitnessFromDecl(pos) => OR::RwitnessFromDecl(pos.to_oxidized(arena)), RI::Ridx(pos, r) => { OR::Ridx(arena.alloc((pos.to_oxidized(arena), r.to_oxidized(arena)))) } RI::RidxVector(pos) => OR::RidxVector(pos.to_oxidized(arena)), RI::RidxVectorFromDecl(pos) => OR::RidxVectorFromDecl(pos.to_oxidized(arena)), RI::Rforeach(pos) => OR::Rforeach(pos.to_oxidized(arena)), RI::Rasyncforeach(pos) => OR::Rasyncforeach(pos.to_oxidized(arena)), RI::Rarith(pos) => OR::Rarith(pos.to_oxidized(arena)), RI::RarithRet(pos) => OR::RarithRet(pos.to_oxidized(arena)), RI::RarithRetFloat(pos, r, arg_position) => OR::RarithRetFloat(arena.alloc(( pos.to_oxidized(arena), r.to_oxidized(arena), *arg_position, ))), RI::RarithRetNum(pos, r, arg_position) => OR::RarithRetNum(arena.alloc(( pos.to_oxidized(arena), r.to_oxidized(arena), *arg_position, ))), RI::RarithRetInt(pos) => OR::RarithRetInt(pos.to_oxidized(arena)), RI::RarithDynamic(pos) => OR::RarithDynamic(pos.to_oxidized(arena)), RI::RbitwiseDynamic(pos) => OR::RbitwiseDynamic(pos.to_oxidized(arena)), RI::RincdecDynamic(pos) => OR::RincdecDynamic(pos.to_oxidized(arena)), RI::Rcomp(pos) => OR::Rcomp(pos.to_oxidized(arena)), RI::RconcatRet(pos) => OR::RconcatRet(pos.to_oxidized(arena)), RI::RlogicRet(pos) => OR::RlogicRet(pos.to_oxidized(arena)), RI::Rbitwise(pos) => OR::Rbitwise(pos.to_oxidized(arena)), RI::RbitwiseRet(pos) => OR::RbitwiseRet(pos.to_oxidized(arena)), RI::RnoReturn(pos) => OR::RnoReturn(pos.to_oxidized(arena)), RI::RnoReturnAsync(pos) => OR::RnoReturnAsync(pos.to_oxidized(arena)), RI::RretFunKind(pos, fun_kind) => { OR::RretFunKind(arena.alloc((pos.to_oxidized(arena), *fun_kind))) } RI::RretFunKindFromDecl(pos, fun_kind) => { OR::RretFunKindFromDecl(arena.alloc((pos.to_oxidized(arena), *fun_kind))) } RI::Rhint(pos) => OR::Rhint(pos.to_oxidized(arena)), RI::Rthrow(pos) => OR::Rthrow(pos.to_oxidized(arena)), RI::Rplaceholder(pos) => OR::Rplaceholder(pos.to_oxidized(arena)), RI::RretDiv(pos) => OR::RretDiv(pos.to_oxidized(arena)), RI::RyieldGen(pos) => OR::RyieldGen(pos.to_oxidized(arena)), RI::RyieldAsyncgen(pos) => OR::RyieldAsyncgen(pos.to_oxidized(arena)), RI::RyieldAsyncnull(pos) => OR::RyieldAsyncnull(pos.to_oxidized(arena)), RI::RyieldSend(pos) => OR::RyieldSend(pos.to_oxidized(arena)), RI::RlostInfo(sym, r, Blame(pos, blame_source)) => OR::RlostInfo(arena.alloc(( sym.to_oxidized(arena), r.to_oxidized(arena), OBlame::Blame(arena.alloc((pos.to_oxidized(arena), *blame_source))), ))), RI::Rformat(pos, sym, r) => OR::Rformat(arena.alloc(( pos.to_oxidized(arena), sym.to_oxidized(arena), r.to_oxidized(arena), ))), RI::RclassClass(pos, s) => { OR::RclassClass(arena.alloc((pos.to_oxidized(arena), s.to_oxidized(arena)))) } RI::RunknownClass(pos) => OR::RunknownClass(pos.to_oxidized(arena)), RI::RvarParam(pos) => OR::RvarParam(pos.to_oxidized(arena)), RI::RvarParamFromDecl(pos) => OR::RvarParamFromDecl(pos.to_oxidized(arena)), RI::RunpackParam(pos1, pos2, i) => OR::RunpackParam(arena.alloc(( pos1.to_oxidized(arena), pos2.to_oxidized(arena), *i, ))), RI::RinoutParam(pos) => OR::RinoutParam(pos.to_oxidized(arena)), RI::Rinstantiate(r1, type_name, r2) => OR::Rinstantiate(arena.alloc(( r1.to_oxidized(arena), type_name.to_oxidized(arena), r2.to_oxidized(arena), ))), RI::Rtypeconst(r1, pos_id, sym, r2) => OR::Rtypeconst(arena.alloc(( r1.to_oxidized(arena), pos_id.to_oxidized(arena), &*arena.alloc(oxidized_by_ref::lazy::Lazy(Some(sym.to_oxidized(arena)))), r2.to_oxidized(arena), ))), RI::RtypeAccess(r, list) => OR::RtypeAccess(arena.alloc(( r.to_oxidized(arena), &*arena.alloc_slice_fill_iter(list.iter().map(|(r, s)| { ( &*arena.alloc(r.to_oxidized(arena)), &*arena.alloc(oxidized_by_ref::lazy::Lazy(Some(s.to_oxidized(arena)))), ) })), ))), RI::RexprDepType(r, pos, edt_reason) => OR::RexprDepType(arena.alloc(( r.to_oxidized(arena), pos.to_oxidized(arena), edt_reason.to_oxidized(arena), ))), RI::RnullsafeOp(pos) => OR::RnullsafeOp(pos.to_oxidized(arena)), RI::RtconstNoCstr(pos_id) => OR::RtconstNoCstr(arena.alloc(pos_id.to_oxidized(arena))), RI::Rpredicated(pos, s) => { OR::Rpredicated(arena.alloc((pos.to_oxidized(arena), s.to_oxidized(arena)))) } RI::Ris(pos) => OR::Ris(pos.to_oxidized(arena)), RI::Ras(pos) => OR::Ras(pos.to_oxidized(arena)), RI::Requal(pos) => OR::Requal(pos.to_oxidized(arena)), RI::RvarrayOrDarrayKey(pos) => OR::RvarrayOrDarrayKey(pos.to_oxidized(arena)), RI::RvecOrDictKey(pos) => OR::RvecOrDictKey(pos.to_oxidized(arena)), RI::Rusing(pos) => OR::Rusing(pos.to_oxidized(arena)), RI::RdynamicProp(pos) => OR::RdynamicProp(pos.to_oxidized(arena)), RI::RdynamicCall(pos) => OR::RdynamicCall(pos.to_oxidized(arena)), RI::RdynamicConstruct(pos) => OR::RdynamicConstruct(pos.to_oxidized(arena)), RI::RidxDict(pos) => OR::RidxDict(pos.to_oxidized(arena)), RI::RsetElement(pos) => OR::RsetElement(pos.to_oxidized(arena)), RI::RmissingOptionalField(pos, s) => OR::RmissingOptionalField( arena.alloc((pos.to_oxidized(arena), s.to_oxidized(arena))), ), RI::RunsetField(pos, s) => { OR::RunsetField(arena.alloc((pos.to_oxidized(arena), s.to_oxidized(arena)))) } RI::RcontravariantGeneric(r, s) => { OR::RcontravariantGeneric(arena.alloc((r.to_oxidized(arena), s.to_oxidized(arena)))) } RI::RinvariantGeneric(r, s) => { OR::RinvariantGeneric(arena.alloc((r.to_oxidized(arena), s.to_oxidized(arena)))) } RI::Rregex(pos) => OR::Rregex(pos.to_oxidized(arena)), RI::RimplicitUpperBound(pos, s) => { OR::RimplicitUpperBound(arena.alloc((pos.to_oxidized(arena), s.to_oxidized(arena)))) } RI::RtypeVariable(pos) => OR::RtypeVariable(pos.to_oxidized(arena)), RI::RtypeVariableGenerics(pos, s1, s2) => OR::RtypeVariableGenerics(arena.alloc(( pos.to_oxidized(arena), s1.to_oxidized(arena), s2.to_oxidized(arena), ))), RI::RglobalTypeVariableGenerics(pos, s1, s2) => { OR::RglobalTypeVariableGenerics(arena.alloc(( pos.to_oxidized(arena), s1.to_oxidized(arena), s2.to_oxidized(arena), ))) } RI::RtypeVariableError(pos) => OR::RtypeVariableError(pos.to_oxidized(arena)), RI::RsolveFail(pos) => OR::RsolveFail(pos.to_oxidized(arena)), RI::RcstrOnGenerics(pos, pos_id) => OR::RcstrOnGenerics( arena.alloc((pos.to_oxidized(arena), pos_id.to_oxidized(arena))), ), RI::RlambdaParam(pos, r) => { OR::RlambdaParam(arena.alloc((pos.to_oxidized(arena), r.to_oxidized(arena)))) } RI::Rshape(pos, s) => { OR::Rshape(arena.alloc((pos.to_oxidized(arena), s.to_oxidized(arena)))) } RI::RshapeLiteral(pos) => OR::RshapeLiteral(pos.to_oxidized(arena)), RI::Renforceable(pos) => OR::Renforceable(pos.to_oxidized(arena)), RI::Rdestructure(pos) => OR::Rdestructure(pos.to_oxidized(arena)), RI::RkeyValueCollectionKey(pos) => OR::RkeyValueCollectionKey(pos.to_oxidized(arena)), RI::RglobalClassProp(pos) => OR::RglobalClassProp(pos.to_oxidized(arena)), RI::RglobalFunParam(pos) => OR::RglobalFunParam(pos.to_oxidized(arena)), RI::RglobalFunRet(pos) => OR::RglobalFunRet(pos.to_oxidized(arena)), RI::Rsplice(pos) => OR::Rsplice(pos.to_oxidized(arena)), RI::RetBoolean(pos) => OR::RetBoolean(pos.to_oxidized(arena)), RI::RdefaultCapability(pos) => OR::RdefaultCapability(pos.to_oxidized(arena)), RI::RconcatOperand(pos) => OR::RconcatOperand(pos.to_oxidized(arena)), RI::RinterpOperand(pos) => OR::RinterpOperand(pos.to_oxidized(arena)), RI::RdynamicCoercion(r) => OR::RdynamicCoercion(arena.alloc(r.to_oxidized(arena))), RI::RsupportDynamicType(pos) => OR::RsupportDynamicType(pos.to_oxidized(arena)), RI::RdynamicPartialEnforcement(pos, s, r) => { OR::RdynamicPartialEnforcement(arena.alloc(( pos.to_oxidized(arena), s.to_oxidized(arena), r.to_oxidized(arena), ))) } RI::RrigidTvarEscape(pos, s1, s2, r) => OR::RrigidTvarEscape(arena.alloc(( pos.to_oxidized(arena), s1.to_oxidized(arena), s2.to_oxidized(arena), r.to_oxidized(arena), ))), RI::RopaqueTypeFromModule(pos, s1, r) => OR::RopaqueTypeFromModule(arena.alloc(( pos.to_oxidized(arena), s1.to_oxidized(arena), r.to_oxidized(arena), ))), RI::RmissingClass(pos) => OR::RmissingClass(pos.to_oxidized(arena)), RI::Rinvalid => OR::Rinvalid, } } } impl EqModuloPos for BReason { fn eq_modulo_pos(&self, rhs: &Self) -> bool { self.0.eq_modulo_pos(&rhs.0) } fn eq_modulo_pos_and_reason(&self, _rhs: &Self) -> bool { true } } /// A stateless sentinal Reason. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct NReason; impl Reason for NReason { type Pos = NPos; fn mk(_cons: impl FnOnce() -> ReasonImpl<Self, Self::Pos>) -> Self { NReason } fn none() -> Self { NReason } fn pos(&self) -> &NPos { &NPos } #[inline] fn decl_ty_conser() -> &'static Conser<decl::Ty_<NReason>> { static CONSER: Lazy<Conser<decl::Ty_<NReason>>> = Lazy::new(Conser::new); &CONSER } #[inline] fn local_ty_conser() -> &'static Conser<local::Ty_<NReason, local::Ty<NReason>>> { static CONSER: Lazy<Conser<local::Ty_<NReason, local::Ty<NReason>>>> = Lazy::new(Conser::new); &CONSER } #[inline] fn prop_conser() -> &'static Conser<PropF<NReason, Prop<NReason>>> { static CONSER: Lazy<Conser<PropF<NReason, Prop<NReason>>>> = Lazy::new(Conser::new); &CONSER } } impl Walkable<NReason> for NReason {} impl<'a> From<oxidized_by_ref::typing_reason::T_<'a>> for NReason { fn from(reason: oxidized_by_ref::typing_reason::T_<'a>) -> Self { Self::from_oxidized(reason) } } impl<'a> ToOxidized<'a> for NReason { type Output = oxidized_by_ref::typing_reason::Reason<'a>; fn to_oxidized(&self, _arena: &'a bumpalo::Bump) -> Self::Output { oxidized_by_ref::typing_reason::Reason::Rnone } } impl ToOcamlRep for NReason { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { oxidized_by_ref::typing_reason::Reason::Rnone.to_ocamlrep(alloc) } } impl FromOcamlRep for NReason { fn from_ocamlrep(_value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { Ok(Self) } } impl EqModuloPos for NReason { fn eq_modulo_pos(&self, _rhs: &Self) -> bool { true } fn eq_modulo_pos_and_reason(&self, _rhs: &Self) -> bool { true } }
Rust
hhvm/hphp/hack/src/hackrs/ty/ty.rs
#[macro_use] pub mod visitor; pub mod decl; pub mod decl_error; pub mod local; pub mod local_error; mod ocamlrep; pub mod prop; pub mod reason;
Rust
hhvm/hphp/hack/src/hackrs/ty/visitor.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use crate::decl; use crate::local; use crate::reason::Reason; /// A type which can be traversed by a `Visitor`. pub trait Walkable<R: Reason> { fn accept(&self, v: &mut dyn Visitor<R>) { self.recurse(v); } fn recurse(&self, _v: &mut dyn Visitor<R>) {} } /// A visitor over data structures containing decls or types. pub trait Visitor<R: Reason> { /// Must return `self`. fn object(&mut self) -> &mut dyn Visitor<R>; fn visit_pos(&mut self, _: &R::Pos) {} fn visit_symbol(&mut self, _: &pos::Symbol) {} fn visit_type_name(&mut self, o: &pos::TypeName) { o.recurse(self.object()); } fn visit_module_name(&mut self, o: &pos::ModuleName) { o.recurse(self.object()); } fn visit_const_name(&mut self, o: &pos::ConstName) { o.recurse(self.object()); } fn visit_fun_name(&mut self, o: &pos::FunName) { o.recurse(self.object()); } fn visit_class_const_name(&mut self, o: &pos::ClassConstName) { o.recurse(self.object()); } fn visit_type_const_name(&mut self, o: &pos::TypeConstName) { o.recurse(self.object()); } fn visit_method_name(&mut self, o: &pos::MethodName) { o.recurse(self.object()); } fn visit_prop_name(&mut self, o: &pos::PropName) { o.recurse(self.object()); } fn visit_decl_ty(&mut self, o: &decl::Ty<R>) { o.recurse(self.object()); } fn visit_local_ty(&mut self, o: &local::Ty<R>) { o.recurse(self.object()); } fn visit_decl(&mut self, o: &crate::decl::shallow::Decl<R>) { o.recurse(self.object()); } fn visit_named_decl(&mut self, o: &crate::decl::shallow::NamedDecl<R>) { o.recurse(self.object()); } fn visit_shallow_class(&mut self, o: &crate::decl::ShallowClass<R>) { o.recurse(self.object()); } } impl<R: Reason, T: Walkable<R>> Walkable<R> for Option<T> { fn recurse(&self, v: &mut dyn Visitor<R>) { match self { Some(some) => some.accept(v), None => {} } } } impl<R: Reason, T: Walkable<R> + ?Sized> Walkable<R> for &T { fn recurse(&self, v: &mut dyn Visitor<R>) { let obj: &T = self; obj.accept(v) } } impl<R: Reason, T: Walkable<R> + ?Sized> Walkable<R> for Box<T> { fn recurse(&self, v: &mut dyn Visitor<R>) { let obj: &T = self; obj.accept(v) } } impl<R: Reason, T: Walkable<R>> Walkable<R> for [T] { fn recurse(&self, v: &mut dyn Visitor<R>) { for obj in self { obj.accept(v); } } } impl<R: Reason, T: Walkable<R>> Walkable<R> for Vec<T> { fn recurse(&self, v: &mut dyn Visitor<R>) { for obj in self { obj.accept(v); } } } impl<R: Reason, K: Walkable<R>, V: Walkable<R>> Walkable<R> for std::collections::BTreeMap<K, V> { fn recurse(&self, v: &mut dyn Visitor<R>) { for (key, val) in self { key.accept(v); val.accept(v); } } } impl<R: Reason, T: Walkable<R>> Walkable<R> for hcons::Hc<T> { fn recurse(&self, v: &mut dyn Visitor<R>) { let obj: &T = self; obj.accept(v) } } impl<R: Reason, S: Copy + Walkable<R>> Walkable<R> for pos::Positioned<S, R::Pos> { fn recurse(&self, v: &mut dyn Visitor<R>) { v.visit_pos(self.pos()); self.id_ref().accept(v); } } /// Generate an impl of `Walkable<R>` for the given type which recurses on the /// given fields. /// /// # Examples /// /// Suppose we have this struct definition: /// /// struct Foo<R: Reason> { /// pos: R::Pos, /// ty: Ty<R>, /// constraint: Ty<R>, /// } /// /// We can generate an impl of `Walkable<R>` for `Foo<R>` like this: /// /// walkable!(Foo<R> => [ty, constraint]); /// /// The macro will expand to something like the following: /// /// impl<R: Reason> Walkable<R> for Foo<R> { /// fn recurse(&self, v: &mut dyn Visitor<R>) { /// self.ty.accept(v); /// self.constraint.accept(v); /// } /// } /// /// Note that the macro implicitly introduces the type parameter `R`. /// /// If the type is one which a `Visitor` may be interested in handling, add a /// `visit_` method to the `Visitor` trait, and reference that method with the /// `as` keyword in the `walkable!` macro: /// /// walkable!(Foo<R> as visit_foo => [ty, constraint]); /// /// This will expand to: /// /// impl<R: Reason> Walkable<R> for Foo<R> { /// fn accept(&self, v: &mut dyn crate::visitor::Visitor<R>) { /// v.visit_foo(self); /// } /// fn recurse(&self, v: &mut dyn Visitor<R>) { /// self.ty.accept(v); /// self.constraint.accept(v); /// } /// } /// /// If the type has type parameters other than `R`: /// /// struct Foo<R: Reason, T> { /// pos: R::Pos, /// ty: T, /// constraint: T, /// } /// /// Use the `impl` and `for` keywords to introduce all type parameters. Note /// that the `R: Reason` parameter is no longer implicitly introduced: /// /// walkable!(impl<R: Reason, T> for Foo<R, T> as visit_foo => [ty, constraint]); /// /// For enums: /// /// enum Typeconst<R: Reason> { /// Abstract(AbstractTypeconst<R>), /// Concrete(ConcreteTypeconst<R>), /// } /// /// Write a list of `pattern => [fields]` arms in curly braces: /// /// walkable!(Typeconst<R> as visit_typeconst => { /// Self::Abstract(at) => [at], /// Self::Concrete(ct) => [ct], /// }); /// /// For leaves (structures which cannot contain the types we are interested in /// visiting), either 1) don't implement `Walkable<R>`, and don't specify fields /// of that type in implementations of `Walkable<R>` for other types (as done /// with the field `pos` in `Foo<R>` in the example above), or 2) use /// `walkable!` to generate a no-op implementation of `Walkable<R>` (when not /// implementing `Walkable<R>` would be inconvenient): /// /// #[derive(Ord, PartialOrd)] /// enum Kind { A, B, C, D } /// struct Bar<R> { map: BTreeMap<Kind, Ty<R>> } /// walkable!(Bar<R> => [map]); // requires Kind : Walkable<R> /// walkable!(Kind); /// /// This leaf-node use expands to: /// /// impl<R: Reason> Walkable<R> for Kind {} macro_rules! walkable { ( @ACCEPT($r:ident, $visit:ident) ) => { fn accept(& self, v: &mut dyn $crate::visitor::Visitor<$r>) { v.$visit(self); } }; ( @STRUCT($r:ident, $reason_bound:path, [$($gen:ident)*], $name:ty, $({$accept:item},)? [$($e:tt)*]) ) => { impl<$r: $reason_bound $( , $gen: $crate::visitor::Walkable<$r> )* > $crate::visitor::Walkable<$r> for $name { $($accept)* #[allow(unused_variables)] fn recurse(&self, v: &mut dyn $crate::visitor::Visitor<$r>) { $( self.$e.accept(v); )* } } }; ( @ENUM($r:ident, $reason_bound:path, [$($gen:ident)*], $name:ty, $({$accept:item},)? [$( $variant:pat, [$($e:tt)*] )*]) ) => { impl<$r: $reason_bound $( , $gen: $crate::visitor::Walkable<$r> )* > $crate::visitor::Walkable<$r> for $name { $($accept)* #[allow(unused_variables)] fn recurse(& self, v: &mut dyn $crate::visitor::Visitor<$r>) { match self { $( $variant => { $( $e.accept(v); )* } )* } } } }; ( impl < $r:ident : $bound:path $( , $gen:ident )* $(,)? > for $name:ty as $visit:ident => [ $($e:tt),* $(,)? ] ) => { walkable! { @STRUCT($r, $bound, [$($gen)*], $name, {walkable!{ @ACCEPT($r, $visit) }}, [$($e)*]) } }; ( impl < $r:ident : $bound:path $( , $gen:ident )* $(,)? > for $name:ty => [ $($e:tt),* $(,)? ] ) => { walkable! { @STRUCT($r, $bound, [$($gen)*], $name, [$($e)*]) } }; ( impl < $r:ident : $bound:path $( , $gen:ident )* $(,)? > for $name:ty as $visit:ident => { $( $variant:pat => [ $($e:tt),* $(,)? ] ),* $(,)? } ) => { walkable! { @ENUM($r, $crate::reason::Reason, [$($gen)*], $name, {walkable!{ @ACCEPT($r, $visit) }}, [$($variant, [$($e)*])*]) } }; ( impl < $r:ident : $bound:path $( , $gen:ident )* $(,)? > for $name:ty => { $( $variant:pat => [ $($e:tt),* $(,)? ] ),* $(,)? } ) => { walkable! { @ENUM($r, $crate::reason::Reason, [$($gen)*], $name, [$($variant, [$($e)*])*]) } }; ( $name:ty as $visit:ident => [ $($e:tt),* $(,)? ] ) => { walkable! { @STRUCT(R, $crate::reason::Reason, [], $name, {walkable!{ @ACCEPT(R, $visit) }}, [$($e)*]) } }; ( $name:ty => [ $($e:tt),* $(,)? ] ) => { walkable! { @STRUCT(R, $crate::reason::Reason, [], $name, [$($e)*]) } }; ( $name:ty as $visit:ident => { $( $variant:pat => [ $($e:tt),* $(,)? ] ),* $(,)? } ) => { walkable! { @ENUM(R, $crate::reason::Reason, [], $name, {walkable!{ @ACCEPT(R, $visit) }}, [$($variant, [$($e)*])*]) } }; ( $name:ty => { $( $variant:pat => [ $($e:tt),* $(,)? ] ),* $(,)? } ) => { walkable! { @ENUM(R, $crate::reason::Reason, [], $name, [$($variant, [$($e)*])*]) } }; ( $name:ty as $visit:ident) => { walkable! { @STRUCT(R, $crate::reason::Reason, [], $name, {walkable!{ @ACCEPT(R, $visit) }}, []) } }; ( $name:ty ) => { walkable! { @STRUCT(R, $crate::reason::Reason, [], $name, []) } }; } walkable!(isize); walkable!(bool); walkable!(String); walkable!(impl<R: Reason, A, B> for (A, B) => [0, 1]); walkable!(impl<R: Reason, A, B, C> for (A, B, C) => [0, 1, 2]); walkable!(impl<R: Reason, A, B, C, D> for (A, B, C, D) => [0, 1, 2, 3]); walkable!(oxidized::file_info::Mode); walkable!(pos::Symbol as visit_symbol => []); walkable!(pos::TypeName as visit_type_name => [0]); walkable!(pos::ModuleName as visit_module_name => [0]); walkable!(pos::ConstName as visit_const_name => [0]); walkable!(pos::FunName as visit_fun_name => [0]); walkable!(pos::ClassConstName as visit_class_const_name => [0]); walkable!(pos::TypeConstName as visit_type_const_name => [0]); walkable!(pos::MethodName as visit_method_name => [0]); walkable!(pos::PropName as visit_prop_name => [0]); walkable!(crate::decl::ty::ClassishKind);
TOML
hhvm/hphp/hack/src/hackrs/ty/cargo/ty/Cargo.toml
# @generated by autocargo [package] name = "ty" version = "0.0.0" edition = "2021" [lib] path = "../../ty.rs" [dependencies] arena_collections = { version = "0.0.0", path = "../../../../arena_collections" } bumpalo = { version = "3.11.1", features = ["collections"] } eq_modulo_pos = { version = "0.0.0", path = "../../../../utils/eq_modulo_pos" } hash = { version = "0.0.0", path = "../../../../utils/hash" } hcons = { version = "0.0.0", path = "../../../../hcons" } im = { version = "15.1", features = ["rayon", "serde"] } ocamlrep = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" } once_cell = "1.12" oxidized = { version = "0.0.0", path = "../../../../oxidized" } oxidized_by_ref = { version = "0.0.0", path = "../../../../oxidized_by_ref" } pos = { version = "0.0.0", path = "../../../pos/cargo/pos" } serde = { version = "1.0.176", features = ["derive", "rc"] } static_assertions = "1.1.0" utils = { version = "0.0.0", path = "../../../utils/cargo/utils" }
Rust
hhvm/hphp/hack/src/hackrs/ty/decl/debug.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::fmt; use super::folded::FoldedClass; use super::shallow::ShallowClass; use crate::reason::Reason; // Our Class structs have a lot of fields, but in a lot of cases, most of them // will have empty or default values, making Debug output very noisy. These // manual Debug impls omit fields with empty values, hopefully making the Debug // output easier to read. impl<R: Reason> fmt::Debug for ShallowClass<R> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let ShallowClass { mode, is_final, is_abstract, is_internal, is_xhp, has_xhp_keyword, kind, module, name, tparams, where_constraints, extends, uses, xhp_attr_uses, xhp_enum_values, xhp_marked_empty, req_extends, req_implements, req_class, implements, support_dynamic_type, consts, typeconsts, props, static_props, constructor, static_methods, methods, user_attributes, enum_type, docs_url, } = self; let mut s = f.debug_struct("ShallowClass"); if *mode != oxidized::file_info::Mode::Mstrict { s.field("mode", mode); } if *is_final { s.field("is_final", is_final); } if *is_abstract { s.field("is_abstract", is_abstract); } if *is_internal { s.field("is_internal", is_internal); } if *is_xhp { s.field("is_xhp", is_xhp); } if *has_xhp_keyword { s.field("has_xhp_keyword", has_xhp_keyword); } s.field("kind", kind); if let Some(module) = module { s.field("module", module); } s.field("name", name); if !tparams.is_empty() { s.field("tparams", tparams); } if !where_constraints.is_empty() { s.field("where_constraints", where_constraints); } if !extends.is_empty() { s.field("extends", extends); } if !uses.is_empty() { s.field("uses", uses); } if !xhp_attr_uses.is_empty() { s.field("xhp_attr_uses", xhp_attr_uses); } if !xhp_enum_values.is_empty() { s.field("xhp_enum_values", xhp_enum_values); } if *xhp_marked_empty { s.field("xhp_marked_empty", xhp_marked_empty); } if !req_extends.is_empty() { s.field("req_extends", req_extends); } if !req_implements.is_empty() { s.field("req_implements", req_implements); } if !req_class.is_empty() { s.field("req_class", req_class); } if !implements.is_empty() { s.field("implements", implements); } if *support_dynamic_type { s.field("support_dynamic_type", support_dynamic_type); } if !consts.is_empty() { s.field("consts", consts); } if !typeconsts.is_empty() { s.field("typeconsts", typeconsts); } if !props.is_empty() { s.field("props", props); } if !static_props.is_empty() { s.field("static_props", static_props); } if let Some(constructor) = constructor { s.field("constructor", constructor); } if !static_methods.is_empty() { s.field("static_methods", static_methods); } if !methods.is_empty() { s.field("methods", methods); } if !user_attributes.is_empty() { s.field("user_attributes", user_attributes); } if let Some(enum_type) = enum_type { s.field("enum_type", enum_type); } if let Some(docs_url) = docs_url { s.field("docs_url", docs_url); } s.finish() } } impl<R: Reason> fmt::Debug for FoldedClass<R> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let FoldedClass { name, pos, kind, is_final, is_const, is_internal, is_xhp, has_xhp_keyword, support_dynamic_type, enum_type, module, is_module_level_trait, tparams, where_constraints, substs, ancestors, props, static_props, methods, static_methods, constructor, consts, type_consts, xhp_enum_values, xhp_marked_empty, extends, xhp_attr_deps, req_ancestors, req_ancestors_extends, req_class_ancestors, sealed_whitelist, deferred_init_members, decl_errors, docs_url, } = self; let mut s = f.debug_struct("FoldedClass"); s.field("name", name); if std::mem::size_of::<R::Pos>() != 0 { s.field("pos", pos); } s.field("kind", kind); if *is_final { s.field("is_final", is_final); } if *is_const { s.field("is_const", is_const); } if *is_internal { s.field("is_internal", is_internal); } if *is_xhp { s.field("is_xhp", is_xhp); } if *has_xhp_keyword { s.field("has_xhp_keyword", has_xhp_keyword); } if *support_dynamic_type { s.field("support_dynamic_type", support_dynamic_type); } if let Some(enum_type) = enum_type { s.field("enum_type", enum_type); } if let Some(module) = module { s.field("module", module); } if *is_module_level_trait { s.field("is_module_level_trait", is_module_level_trait); } if !tparams.is_empty() { s.field("tparams", tparams); } if !where_constraints.is_empty() { s.field("where_constraints", where_constraints); } if !substs.is_empty() { s.field("substs", substs); } if !ancestors.is_empty() { s.field("ancestors", ancestors); } if !props.is_empty() { s.field("props", props); } if !static_props.is_empty() { s.field("static_props", static_props); } if !methods.is_empty() { s.field("methods", methods); } if !static_methods.is_empty() { s.field("static_methods", static_methods); } if let Some(elt) = &constructor.elt { s.field("constructor", &(elt, constructor.consistency)); } if !consts.is_empty() { s.field("consts", consts); } if !type_consts.is_empty() { s.field("type_consts", type_consts); } if !xhp_enum_values.is_empty() { s.field("xhp_enum_values", xhp_enum_values); } if *xhp_marked_empty { s.field("xhp_marked_empty", xhp_marked_empty); } if !extends.is_empty() { s.field("extends", extends); } if !xhp_attr_deps.is_empty() { s.field("xhp_attr_deps", xhp_attr_deps); } if !req_ancestors.is_empty() { s.field("req_ancestors", req_ancestors); } if !req_ancestors_extends.is_empty() { s.field("req_ancestors_extends", req_ancestors_extends); } if !req_class_ancestors.is_empty() { s.field("req_class_ancestors", req_class_ancestors); } if let Some(sealed_whitelist) = sealed_whitelist { s.field("sealed_whitelist", sealed_whitelist); } if !deferred_init_members.is_empty() { s.field("deferred_init_members", deferred_init_members); } if !decl_errors.is_empty() { s.field("decl_errors", decl_errors); } if let Some(docs_url) = docs_url { s.field("docs_url", docs_url); } s.finish() } }
Rust
hhvm/hphp/hack/src/hackrs/ty/decl/folded.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::collections::BTreeMap; use std::fmt; use eq_modulo_pos::EqModuloPos; use hash::IndexMap; use hash::IndexSet; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; pub use oxidized::ast_defs::Abstraction; pub use oxidized::ast_defs::ClassishKind; use pos::Bytes; use pos::ClassConstName; use pos::MethodName; use pos::ModuleName; use pos::Positioned; use pos::PropName; use pos::Symbol; use pos::TypeConstName; use pos::TypeName; use serde::Deserialize; use serde::Serialize; pub use crate::decl::subst::Subst; use crate::decl::ty::ConsistentKind; use crate::decl::ty::Enforceable; use crate::decl::ty::XhpEnumValue; use crate::decl::CeVisibility; use crate::decl::ClassConstKind; use crate::decl::ClassConstRef; use crate::decl::ClassEltFlags; use crate::decl::EnumType; use crate::decl::Tparam; use crate::decl::Ty; use crate::decl::Typeconst; use crate::decl::WhereConstraint; use crate::decl::XhpAttribute; use crate::decl_error::DeclError; use crate::reason::Reason; #[derive(Debug, Clone, Eq, EqModuloPos, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub struct FoldedElement { // note(sf, 2022-01-28): c.f. `Decl_defs.element` pub flags: ClassEltFlags, pub origin: TypeName, pub visibility: CeVisibility, /// If the element is deprecated, this holds the deprecation message. pub deprecated: Option<Bytes>, } /// A substitution context contains all the information necessary for changing /// the type of an inherited class element to the class that is inheriting the /// class element. It's best illustrated via an example. /// ``` /// class A<Ta1, Ta2> { public function test(Ta1 $x, Ta2 $y): void {} } /// class B<Tb> extends A<Tb, int> {} /// class C extends B<string> {} /// ``` /// The method `A::test()` has the type `(function(Ta1, Ta2): void)` in the /// context of class `A`. However in the context of class `B`, it will have type /// `(function(Tb, int): void)`. /// /// The substitution that leads to this change is [Ta1 -> Tb, Ta2 -> int], which /// will produce a new type in the context of class B. It's subst_context would /// then be: /// /// ``` /// { subst = [Ta1 -> Tb, Ta2 -> int]; /// class_context = 'B'; /// from_req_extends = false; /// } /// ``` /// /// The `from_req_extends` field is set to` true` if the context was inherited /// via a require extends type. This information is relevant when folding /// `substs` during inheritance. See the `inherit` module. #[derive(Debug, Clone, Eq, EqModuloPos, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct SubstContext<R: Reason> { // note(sf, 2022-01-28): c.f. `Decl_defs.subst_context` pub subst: Subst<R>, pub class_context: TypeName, pub from_req_extends: bool, } impl<R: Reason> SubstContext<R> { pub fn set_from_req_extends(&mut self, p: bool) { self.from_req_extends = p; } } #[derive(Debug, Clone, Eq, EqModuloPos, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct TypeConst<R: Reason> { // note(sf, 2022-02-08): c.f. `Typing_defs.typeconst_type` pub is_synthesized: bool, pub name: Positioned<TypeConstName, R::Pos>, pub kind: Typeconst<R>, // abstract or concrete pub origin: TypeName, pub enforceable: Enforceable<R::Pos>, pub reifiable: Option<R::Pos>, // When Some, points to __Reifiable attribute pub is_concretized: bool, pub is_ctx: bool, } impl<R: Reason> TypeConst<R> { pub fn is_enforceable(&self) -> bool { self.enforceable.is_some() } pub fn is_reifiable(&self) -> bool { self.reifiable.is_some() } } #[derive(Debug, Clone, Eq, EqModuloPos, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct ClassConst<R: Reason> { // note(sf, 2022-02-08): c.f. `Typing_defs.class_const` pub is_synthesized: bool, pub kind: ClassConstKind, pub pos: R::Pos, pub ty: Ty<R>, pub origin: TypeName, // Identifies the class from which this const originates pub refs: Box<[ClassConstRef]>, } impl<R: Reason> ClassConst<R> { pub fn set_is_synthesized(&mut self, p: bool) { self.is_synthesized = p; } } impl<R: Reason> TypeConst<R> { pub fn set_is_synthesized(&mut self, p: bool) { self.is_synthesized = p; } } /// The position is that of the hint in the `use` / `implements` AST node /// that causes a class to have this requirement applied to it. E.g. /// /// ``` /// class Foo {} /// /// interface Bar { /// require extends Foo; <- position of the decl_phase ty /// } /// /// class Baz extends Foo implements Bar { <- position of the `implements` /// } /// ``` #[derive(Clone, Eq, EqModuloPos, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct Requirement<R: Reason> { pub pos: R::Pos, pub ty: Ty<R>, } #[derive(Clone, Debug, Eq, EqModuloPos, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub struct Constructor { pub elt: Option<FoldedElement>, pub consistency: ConsistentKind, } #[derive(Clone, Eq, EqModuloPos, PartialEq, Serialize, Deserialize)] #[serde(bound = "R: Reason")] pub struct FoldedClass<R: Reason> { // note(sf, 2022-01-27): c.f. `Decl_defs.decl_class_type` pub name: TypeName, pub pos: R::Pos, pub kind: ClassishKind, pub is_final: bool, pub is_const: bool, pub is_internal: bool, pub is_xhp: bool, pub has_xhp_keyword: bool, pub support_dynamic_type: bool, pub enum_type: Option<EnumType<R>>, pub module: Option<Positioned<ModuleName, R::Pos>>, pub is_module_level_trait: bool, pub tparams: Box<[Tparam<R, Ty<R>>]>, pub where_constraints: Box<[WhereConstraint<Ty<R>>]>, pub substs: IndexMap<TypeName, SubstContext<R>>, pub ancestors: IndexMap<TypeName, Ty<R>>, pub props: IndexMap<PropName, FoldedElement>, pub static_props: IndexMap<PropName, FoldedElement>, pub methods: IndexMap<MethodName, FoldedElement>, pub static_methods: IndexMap<MethodName, FoldedElement>, pub constructor: Constructor, pub consts: IndexMap<ClassConstName, ClassConst<R>>, pub type_consts: IndexMap<TypeConstName, TypeConst<R>>, pub xhp_enum_values: BTreeMap<Symbol, Box<[XhpEnumValue]>>, pub xhp_marked_empty: bool, pub extends: IndexSet<TypeName>, pub xhp_attr_deps: IndexSet<TypeName>, pub req_ancestors: Box<[Requirement<R>]>, pub req_ancestors_extends: IndexSet<TypeName>, /// `req_class_ancestors` gathers all the `require class` requirements /// declared in self and ancestors. Note that `require class` requirements /// are _not_ stored in `req_ancestors` or `req_ancestors_extends` fields. pub req_class_ancestors: Box<[Requirement<R>]>, pub sealed_whitelist: Option<IndexSet<TypeName>>, pub deferred_init_members: IndexSet<PropName>, pub decl_errors: Box<[DeclError<R::Pos>]>, pub docs_url: Option<String>, } impl<R: Reason> FoldedClass<R> { // c.f. `Decl_folded_class.class_is_abstract` pub fn is_abstract(&self) -> bool { match self.kind { ClassishKind::Cclass(abstraction) | ClassishKind::CenumClass(abstraction) => { abstraction.is_abstract() } ClassishKind::Cinterface | ClassishKind::Ctrait | ClassishKind::Cenum => true, } } // c.f. `Decl_defs.dc_need_init`, via `has_concrete_cstr` in `Decl_folded_class.class_decl` pub fn has_concrete_constructor(&self) -> bool { match &self.constructor.elt { Some(elt) => elt.is_concrete(), None => false, } } } impl FoldedElement { pub fn is_concrete(&self) -> bool { !self.is_abstract() } pub fn is_abstract(&self) -> bool { self.flags.contains(ClassEltFlags::ABSTRACT) } pub fn set_is_abstract(&mut self, p: bool) { self.flags.set(ClassEltFlags::ABSTRACT, p) } pub fn is_final(&self) -> bool { self.flags.contains(ClassEltFlags::FINAL) } pub fn set_is_final(&mut self, p: bool) { self.flags.set(ClassEltFlags::FINAL, p) } pub fn is_superfluous_override(&self) -> bool { self.flags.contains(ClassEltFlags::SUPERFLUOUS_OVERRIDE) } pub fn set_is_superfluous_override(&mut self, p: bool) { self.flags.set(ClassEltFlags::SUPERFLUOUS_OVERRIDE, p) } pub fn is_lsb(&self) -> bool { self.flags.contains(ClassEltFlags::LSB) } pub fn set_is_lsb(&mut self, p: bool) { self.flags.set(ClassEltFlags::LSB, p) } pub fn is_synthesized(&self) -> bool { self.flags.contains(ClassEltFlags::SYNTHESIZED) } pub fn set_is_synthesized(&mut self, p: bool) { self.flags.set(ClassEltFlags::SYNTHESIZED, p) } pub fn is_const(&self) -> bool { self.flags.contains(ClassEltFlags::CONST) } pub fn set_is_const(&mut self, p: bool) { self.flags.set(ClassEltFlags::CONST, p) } pub fn is_lateinit(&self) -> bool { self.flags.contains(ClassEltFlags::LATEINIT) } pub fn set_is_lateinit(&mut self, p: bool) { self.flags.set(ClassEltFlags::LATEINIT, p) } pub fn is_dynamicallycallable(&self) -> bool { self.flags.contains(ClassEltFlags::DYNAMICALLYCALLABLE) } pub fn set_is_dynamicallycallable(&mut self, p: bool) { self.flags.set(ClassEltFlags::DYNAMICALLYCALLABLE, p) } pub fn supports_dynamic_type(&self) -> bool { self.flags.contains(ClassEltFlags::SUPPORT_DYNAMIC_TYPE) } pub fn set_supports_dynamic_type(&mut self, p: bool) { self.flags.set(ClassEltFlags::SUPPORT_DYNAMIC_TYPE, p) } pub fn is_readonly_prop(&self) -> bool { self.flags.contains(ClassEltFlags::READONLY_PROP) } pub fn set_is_readonly_prop(&mut self, p: bool) { self.flags.set(ClassEltFlags::READONLY_PROP, p) } pub fn needs_init(&self) -> bool { self.flags.contains(ClassEltFlags::NEEDS_INIT) } pub fn set_needs_init(&mut self, p: bool) { self.flags.set(ClassEltFlags::NEEDS_INIT, p) } pub fn get_xhp_attr(&self) -> Option<XhpAttribute> { self.flags.get_xhp_attr() } } impl<R: Reason> Requirement<R> { pub fn new(pos: R::Pos, ty: Ty<R>) -> Self { Self { pos, ty } } } impl<R: Reason> fmt::Debug for Requirement<R> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("Requirement") .field(&self.pos) .field(&self.ty) .finish() } } impl Constructor { pub fn new(elt: Option<FoldedElement>, consistency: ConsistentKind) -> Self { Self { elt, consistency } } }
Rust
hhvm/hphp/hack/src/hackrs/ty/decl/from_oxidized.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use oxidized_by_ref as obr; use pos::Pos; use crate::decl; use crate::decl::folded; use crate::decl::shallow; use crate::decl::ty; use crate::decl::Ty; use crate::decl::Ty_; use crate::reason::Reason; #[inline] fn slice<T: Copy + Into<U>, U>(items: &[T]) -> Box<[U]> { items.iter().copied().map(Into::into).collect() } #[inline] fn map<'a, K1, V1, K2, V2, M>(items: impl Iterator<Item = (&'a K1, &'a V1)>) -> M where K1: Copy + Into<K2> + 'a, V1: Copy + Into<V2> + 'a, M: FromIterator<(K2, V2)>, { items.map(|(&k, &v)| (k.into(), v.into())).collect() } impl From<obr::ast_defs::XhpEnumValue<'_>> for ty::XhpEnumValue { fn from(x: obr::ast_defs::XhpEnumValue<'_>) -> Self { use obr::ast_defs::XhpEnumValue as Obr; match x { Obr::XEVInt(i) => Self::XEVInt(i), Obr::XEVString(s) => Self::XEVString(s.into()), } } } impl From<obr::typing_defs::CeVisibility<'_>> for ty::CeVisibility { fn from(x: obr::typing_defs::CeVisibility<'_>) -> Self { use obr::typing_defs::CeVisibility as Obr; match x { Obr::Vpublic => Self::Public, Obr::Vprivate(s) => Self::Private(s.into()), Obr::Vprotected(s) => Self::Protected(s.into()), Obr::Vinternal(s) => Self::Internal(s.into()), } } } impl From<obr::typing_defs::IfcFunDecl<'_>> for ty::IfcFunDecl { fn from(x: obr::typing_defs::IfcFunDecl<'_>) -> Self { use obr::typing_defs_core::IfcFunDecl as Obr; match x { Obr::FDPolicied(s) => Self::FDPolicied(s.map(Into::into)), Obr::FDInferFlows => Self::FDInferFlows, } } } fn tshape_field_name_from_decl<P: Pos>( x: obr::typing_defs::TshapeFieldName<'_>, ) -> (ty::ShapeFieldNamePos<P>, ty::TshapeFieldName) { use obr::typing_defs_core::TshapeFieldName as Obr; use ty::ShapeFieldNamePos as SfnPos; use ty::TshapeFieldName; match x { Obr::TSFlitInt(&pos_id) => ( SfnPos::Simple(pos_id.0.into()), TshapeFieldName::TSFlitInt(pos_id.1.into()), ), Obr::TSFlitStr(&pos_bytes) => ( SfnPos::Simple(pos_bytes.0.into()), TshapeFieldName::TSFlitStr(pos_bytes.1.into()), ), Obr::TSFclassConst(&(pos_id1, pos_id2)) => ( SfnPos::ClassConst(pos_id1.0.into(), pos_id2.0.into()), TshapeFieldName::TSFclassConst(pos_id1.1.into(), pos_id2.1.into()), ), } } impl From<&obr::typing_defs::UserAttributeParam<'_>> for ty::UserAttributeParam { fn from(attr: &obr::typing_defs::UserAttributeParam<'_>) -> Self { use obr::typing_defs::UserAttributeParam as UAP; match *attr { UAP::Classname(cn) => Self::Classname(cn.into()), UAP::EnumClassLabel(l) => Self::EnumClassLabel(l.into()), UAP::String(s) => Self::String(s.into()), UAP::Int(i) => Self::Int(i.into()), } } } impl<P: Pos> From<&obr::typing_defs::UserAttribute<'_>> for ty::UserAttribute<P> { fn from(attr: &obr::typing_defs::UserAttribute<'_>) -> Self { Self { name: attr.name.into(), params: (attr.params.iter()).map(Into::into).collect(), } } } impl<R: Reason> From<&obr::typing_defs::Tparam<'_>> for ty::Tparam<R, Ty<R>> { fn from(tparam: &obr::typing_defs::Tparam<'_>) -> Self { Self { variance: tparam.variance, name: tparam.name.into(), tparams: slice(tparam.tparams), constraints: (tparam.constraints.iter()) .map(|(kind, ty)| (*kind, (*ty).into())) .collect(), reified: tparam.reified, user_attributes: slice(tparam.user_attributes), } } } impl<R: Reason> From<&obr::typing_defs::WhereConstraint<'_>> for ty::WhereConstraint<Ty<R>> { fn from(x: &obr::typing_defs::WhereConstraint<'_>) -> Self { Self(x.0.into(), x.1, x.2.into()) } } fn decl_shape_field_type<R: Reason>( field_name_pos: ty::ShapeFieldNamePos<R::Pos>, sft: &obr::typing_defs::ShapeFieldType<'_>, ) -> ty::ShapeFieldType<R> { ty::ShapeFieldType { field_name_pos, optional: sft.optional, ty: sft.ty.into(), } } impl<R: Reason> From<&obr::typing_defs::Ty<'_>> for Ty<R> { fn from(ty: &obr::typing_defs::Ty<'_>) -> Self { use obr::typing_defs_core; use Ty_::*; let reason = R::from(*ty.0); let ty_ = match ty.1 { typing_defs_core::Ty_::Tthis => Tthis, typing_defs_core::Ty_::Tapply(&(pos_id, tys)) => { Tapply(Box::new((pos_id.into(), slice(tys)))) } typing_defs_core::Ty_::Tmixed => Tmixed, typing_defs_core::Ty_::Twildcard => Twildcard, typing_defs_core::Ty_::Tlike(ty) => Tlike(ty.into()), typing_defs_core::Ty_::Tany(_) => Tany, typing_defs_core::Ty_::Tnonnull => Tnonnull, typing_defs_core::Ty_::Tdynamic => Tdynamic, typing_defs_core::Ty_::Toption(ty) => Toption(ty.into()), typing_defs_core::Ty_::Tprim(prim) => Tprim(*prim), typing_defs_core::Ty_::Tfun(ft) => Tfun(Box::new(ft.into())), typing_defs_core::Ty_::Ttuple(tys) => Ttuple(slice(tys)), typing_defs_core::Ty_::Tshape(&typing_defs_core::ShapeType { origin: _, unknown_value: kind, fields, }) => Tshape(Box::new(ty::ShapeType( kind.into(), fields .iter() .map(|(name, ty)| { let (field_name_pos, name) = tshape_field_name_from_decl(name.0); (name, decl_shape_field_type(field_name_pos, ty)) }) .collect(), ))), typing_defs_core::Ty_::Trefinement(&(ty, cr)) => { Trefinement(Box::new(decl::TrefinementType { ty: ty.into(), refinement: ty::ClassRefinement { consts: (cr.cr_consts.iter()) .map(|(k, v)| ((*k).into(), (*v).into())) .collect(), }, })) } typing_defs_core::Ty_::Tgeneric(&(pos_id, tys)) => { Tgeneric(Box::new((pos_id.into(), slice(tys)))) } typing_defs_core::Ty_::Tunion(tys) => Tunion(slice(tys)), typing_defs_core::Ty_::Tintersection(tys) => Tintersection(slice(tys)), typing_defs_core::Ty_::TvecOrDict(&(ty1, ty2)) => { TvecOrDict(Box::new((ty1.into(), ty2.into()))) } typing_defs_core::Ty_::Taccess(taccess_type) => Taccess(Box::new(taccess_type.into())), typing_defs_core::Ty_::TunappliedAlias(_) | typing_defs_core::Ty_::Tnewtype(_) | typing_defs_core::Ty_::Tdependent(_) | typing_defs_core::Ty_::Tclass(_) | typing_defs_core::Ty_::Tneg(_) | typing_defs_core::Ty_::Tvar(_) => { unreachable!("Not used in decl tys") } }; Ty::new(reason, ty_) } } impl<R: Reason> From<obr::typing_defs::RefinedConst<'_>> for ty::RefinedConst<Ty<R>> { fn from(rc: obr::typing_defs::RefinedConst<'_>) -> Self { Self { bound: rc.bound.into(), is_ctx: rc.is_ctx, } } } impl<R: Reason> From<obr::typing_defs::RefinedConstBound<'_>> for ty::RefinedConstBound<Ty<R>> { fn from(ctr: obr::typing_defs::RefinedConstBound<'_>) -> Self { use obr::typing_defs::RefinedConstBound::*; match ctr { TRexact(ty) => Self::Exact(ty.into()), TRloose(bounds) => Self::Loose(bounds.into()), } } } impl<R: Reason> From<&obr::typing_defs::RefinedConstBounds<'_>> for ty::RefinedConstBounds<Ty<R>> { fn from(bounds: &obr::typing_defs::RefinedConstBounds<'_>) -> Self { Self { lower: slice(bounds.lower), upper: slice(bounds.upper), } } } impl<R: Reason> From<&obr::typing_defs::TaccessType<'_>> for ty::TaccessType<R, Ty<R>> { fn from(taccess_type: &obr::typing_defs::TaccessType<'_>) -> Self { Self { ty: taccess_type.0.into(), type_const: taccess_type.1.into(), } } } impl<R: Reason> From<obr::typing_defs::Capability<'_>> for ty::Capability<R, Ty<R>> { fn from(cap: obr::typing_defs::Capability<'_>) -> Self { use obr::typing_defs_core::Capability as Obr; match cap { Obr::CapDefaults(pos) => Self::CapDefaults(pos.into()), Obr::CapTy(ty) => Self::CapTy(ty.into()), } } } impl<R: Reason> From<&obr::typing_defs::FunImplicitParams<'_>> for ty::FunImplicitParams<R, Ty<R>> { fn from(x: &obr::typing_defs::FunImplicitParams<'_>) -> Self { Self { capability: x.capability.into(), } } } impl<R: Reason> From<&obr::typing_defs::FunType<'_>> for ty::FunType<R, Ty<R>> { fn from(ft: &obr::typing_defs::FunType<'_>) -> Self { Self { tparams: slice(ft.tparams), where_constraints: slice(ft.where_constraints), params: slice(ft.params), implicit_params: ft.implicit_params.into(), ret: ft.ret.into(), flags: ft.flags, ifc_decl: ft.ifc_decl.into(), cross_package: ft.cross_package.as_ref().map(|s| (*s).into()), } } } impl<R: Reason> From<&obr::typing_defs_core::PossiblyEnforcedTy<'_>> for decl::ty::PossiblyEnforcedTy<Ty<R>> { fn from(ty: &obr::typing_defs_core::PossiblyEnforcedTy<'_>) -> Self { Self { ty: ty.type_.into(), enforced: ty.enforced, } } } impl<R: Reason> From<&obr::typing_defs_core::FunParam<'_>> for ty::FunParam<R, Ty<R>> { fn from(fp: &obr::typing_defs_core::FunParam<'_>) -> Self { Self { pos: fp.pos.into(), name: fp.name.map(Into::into), ty: fp.type_.into(), flags: fp.flags, } } } impl From<obr::typing_defs::ClassConstFrom<'_>> for ty::ClassConstFrom { fn from(x: obr::typing_defs::ClassConstFrom<'_>) -> Self { use obr::typing_defs::ClassConstFrom as Obr; match x { Obr::Self_ => Self::Self_, Obr::From(s) => Self::From(s.into()), } } } impl From<obr::typing_defs::ClassConstRef<'_>> for ty::ClassConstRef { fn from(x: obr::typing_defs::ClassConstRef<'_>) -> Self { Self(x.0.into(), x.1.into()) } } impl<R: Reason> From<&obr::typing_defs::AbstractTypeconst<'_>> for ty::AbstractTypeconst<R> { fn from(x: &obr::typing_defs::AbstractTypeconst<'_>) -> Self { Self { as_constraint: x.as_constraint.map(Into::into), super_constraint: x.super_constraint.map(Into::into), default: x.default.map(Into::into), } } } impl<R: Reason> From<&obr::typing_defs::ConcreteTypeconst<'_>> for ty::ConcreteTypeconst<R> { fn from(x: &obr::typing_defs::ConcreteTypeconst<'_>) -> Self { Self { ty: x.tc_type.into(), } } } impl<R: Reason> From<obr::typing_defs::Typeconst<'_>> for ty::Typeconst<R> { fn from(x: obr::typing_defs::Typeconst<'_>) -> Self { use obr::typing_defs::Typeconst as Obr; match x { Obr::TCAbstract(atc) => Self::TCAbstract(atc.into()), Obr::TCConcrete(ctc) => Self::TCConcrete(ctc.into()), } } } impl<R: Reason> From<&obr::typing_defs::EnumType<'_>> for ty::EnumType<R> { fn from(x: &obr::typing_defs::EnumType<'_>) -> Self { Self { base: x.base.into(), constraint: x.constraint.map(Into::into), includes: slice(x.includes), } } } impl<P: Pos> From<(&obr::pos::Pos<'_>, bool)> for ty::Enforceable<P> { fn from((pos, is_enforceable): (&obr::pos::Pos<'_>, bool)) -> Self { if is_enforceable { Self(Some(pos.into())) } else { Self(None) } } } impl<R: Reason> From<&obr::shallow_decl_defs::ShallowClassConst<'_>> for shallow::ShallowClassConst<R> { fn from(scc: &obr::shallow_decl_defs::ShallowClassConst<'_>) -> Self { Self { kind: scc.abstract_, name: scc.name.into(), ty: scc.type_.into(), refs: slice(scc.refs), } } } impl<R: Reason> From<&obr::shallow_decl_defs::ShallowTypeconst<'_>> for shallow::ShallowTypeconst<R> { fn from(stc: &obr::shallow_decl_defs::ShallowTypeconst<'_>) -> Self { Self { name: stc.name.into(), kind: stc.kind.into(), enforceable: <ty::Enforceable<R::Pos>>::from(stc.enforceable), reifiable: stc.reifiable.map(Into::into), is_ctx: stc.is_ctx, } } } impl<R: Reason> From<&obr::shallow_decl_defs::ShallowMethod<'_>> for shallow::ShallowMethod<R> { fn from(sm: &obr::shallow_decl_defs::ShallowMethod<'_>) -> Self { Self { name: sm.name.into(), ty: sm.type_.into(), visibility: sm.visibility, deprecated: sm.deprecated.map(Into::into), attributes: slice(sm.attributes), flags: sm.flags, } } } impl<R: Reason> From<&obr::shallow_decl_defs::ShallowProp<'_>> for shallow::ShallowProp<R> { fn from(sp: &obr::shallow_decl_defs::ShallowProp<'_>) -> Self { Self { name: sp.name.into(), xhp_attr: sp.xhp_attr, ty: sp.type_.into(), visibility: sp.visibility, flags: sp.flags, } } } impl<R: Reason> From<&obr::shallow_decl_defs::ClassDecl<'_>> for shallow::ShallowClass<R> { fn from(sc: &obr::shallow_decl_defs::ClassDecl<'_>) -> Self { // Destructure to help ensure we convert every field. let obr::shallow_decl_defs::ClassDecl { mode, final_, abstract_, is_xhp, internal, has_xhp_keyword, kind, module, name, tparams, where_constraints, extends, uses, xhp_attr_uses, xhp_enum_values, xhp_marked_empty, req_extends, req_implements, req_class, implements, support_dynamic_type, consts, typeconsts, props, sprops, constructor, static_methods, methods, user_attributes, enum_type, docs_url, } = sc; Self { mode: *mode, is_final: *final_, is_abstract: *abstract_, is_internal: *internal, is_xhp: *is_xhp, has_xhp_keyword: *has_xhp_keyword, kind: *kind, module: module.map(Into::into), name: (*name).into(), tparams: slice(tparams), where_constraints: slice(where_constraints), extends: slice(extends), uses: slice(uses), xhp_attr_uses: slice(xhp_attr_uses), xhp_enum_values: (xhp_enum_values.iter()) .map(|(&k, v)| (k.into(), slice(v))) .collect(), xhp_marked_empty: *xhp_marked_empty, req_extends: slice(req_extends), req_implements: slice(req_implements), req_class: slice(req_class), implements: slice(implements), support_dynamic_type: *support_dynamic_type, consts: slice(consts), typeconsts: slice(typeconsts), props: slice(props), static_props: slice(sprops), constructor: constructor.map(Into::into), static_methods: slice(static_methods), methods: slice(methods), user_attributes: slice(user_attributes), enum_type: enum_type.map(Into::into), docs_url: docs_url.map(Into::into), } } } impl<R: Reason> From<&obr::shallow_decl_defs::FunDecl<'_>> for shallow::FunDecl<R> { fn from(sf: &obr::shallow_decl_defs::FunDecl<'_>) -> Self { Self { pos: sf.pos.into(), ty: sf.type_.into(), deprecated: sf.deprecated.map(Into::into), module: sf.module.map(Into::into), internal: sf.internal, php_std_lib: sf.php_std_lib, support_dynamic_type: sf.support_dynamic_type, no_auto_dynamic: sf.no_auto_dynamic, no_auto_likes: sf.no_auto_likes, } } } impl<R: Reason> From<&obr::shallow_decl_defs::TypedefDecl<'_>> for shallow::TypedefDecl<R> { fn from(x: &obr::shallow_decl_defs::TypedefDecl<'_>) -> Self { Self { module: x.module.map(Into::into), pos: x.pos.into(), vis: x.vis, tparams: slice(x.tparams), as_constraint: x.as_constraint.map(Into::into), super_constraint: x.super_constraint.map(Into::into), ty: x.type_.into(), is_ctx: x.is_ctx, attributes: slice(x.attributes), internal: x.internal, docs_url: x.docs_url.map(Into::into), } } } impl<R: Reason> From<&obr::shallow_decl_defs::ConstDecl<'_>> for shallow::ConstDecl<R> { fn from(x: &obr::shallow_decl_defs::ConstDecl<'_>) -> Self { Self { pos: x.pos.into(), ty: x.type_.into(), } } } impl From<obr::typing_defs::ModuleReference<'_>> for ty::ModuleReference { fn from(x: obr::typing_defs::ModuleReference<'_>) -> Self { use obr::typing_defs::ModuleReference as Obr; match x { Obr::MRGlobal => Self::MRGlobal, Obr::MRPrefix(m) => Self::MRPrefix(m.into()), Obr::MRExact(m) => Self::MRExact(m.into()), } } } impl<R: Reason> From<&obr::shallow_decl_defs::ModuleDefType<'_>> for shallow::ModuleDecl<R> { fn from(x: &obr::shallow_decl_defs::ModuleDefType<'_>) -> Self { Self { pos: x.pos.into(), exports: x.exports.map(slice), imports: x.imports.map(slice), } } } impl<R: Reason> From<&obr::shallow_decl_defs::Decl<'_>> for shallow::Decl<R> { fn from(decl: &obr::shallow_decl_defs::Decl<'_>) -> Self { use obr::shallow_decl_defs::Decl as Obr; match *decl { Obr::Class(x) => Self::Class(x.into()), Obr::Fun(x) => Self::Fun(x.into()), Obr::Typedef(x) => Self::Typedef(x.into()), Obr::Const(x) => Self::Const(x.into()), Obr::Module(x) => Self::Module(x.into()), } } } impl<R: Reason> From<&(&str, obr::shallow_decl_defs::Decl<'_>)> for shallow::NamedDecl<R> { fn from(decl: &(&str, obr::shallow_decl_defs::Decl<'_>)) -> Self { use obr::shallow_decl_defs::Decl as Obr; match *decl { (name, Obr::Class(x)) => Self::Class(name.into(), x.into()), (name, Obr::Fun(x)) => Self::Fun(name.into(), x.into()), (name, Obr::Typedef(x)) => Self::Typedef(name.into(), x.into()), (name, Obr::Const(x)) => Self::Const(name.into(), x.into()), (name, Obr::Module(x)) => Self::Module(name.into(), x.into()), } } } impl From<&obr::decl_defs::Element<'_>> for folded::FoldedElement { fn from(x: &obr::decl_defs::Element<'_>) -> Self { Self { flags: x.flags, origin: x.origin.into(), visibility: x.visibility.into(), deprecated: x.deprecated.map(Into::into), } } } impl<R: Reason> From<&obr::decl_defs::SubstContext<'_>> for folded::SubstContext<R> { fn from(x: &obr::decl_defs::SubstContext<'_>) -> Self { Self { subst: folded::Subst(map(x.subst.iter())), class_context: x.class_context.into(), from_req_extends: x.from_req_extends, } } } impl<R: Reason> From<&obr::typing_defs::TypeconstType<'_>> for folded::TypeConst<R> { fn from(x: &obr::typing_defs::TypeconstType<'_>) -> Self { Self { is_synthesized: x.synthesized, name: x.name.into(), kind: x.kind.into(), origin: x.origin.into(), enforceable: x.enforceable.into(), reifiable: x.reifiable.map(Into::into), is_concretized: x.concretized, is_ctx: x.is_ctx, } } } impl<R: Reason> From<&obr::typing_defs::ClassConst<'_>> for folded::ClassConst<R> { fn from(x: &obr::typing_defs::ClassConst<'_>) -> Self { Self { is_synthesized: x.synthesized, kind: x.abstract_, pos: x.pos.into(), ty: x.type_.into(), origin: x.origin.into(), refs: slice(x.refs), } } } impl<R: Reason> From<&obr::decl_defs::Requirement<'_>> for folded::Requirement<R> { fn from(req: &obr::decl_defs::Requirement<'_>) -> Self { Self { pos: req.0.into(), ty: req.1.into(), } } } impl From<(Option<&obr::decl_defs::Element<'_>>, ty::ConsistentKind)> for folded::Constructor { fn from(construct: (Option<&obr::decl_defs::Element<'_>>, ty::ConsistentKind)) -> Self { Self::new(construct.0.map(Into::into), construct.1) } } impl<P> From<obr::decl_defs::DeclError<'_>> for crate::decl_error::DeclError<P> where P: for<'a> From<&'a obr::pos::Pos<'a>>, { fn from(decl_error: obr::decl_defs::DeclError<'_>) -> Self { use obr::decl_defs::DeclError as Obr; match decl_error { Obr::WrongExtendKind { pos, kind, name, parent_pos, parent_kind, parent_name, } => Self::WrongExtendKind { pos: pos.into(), kind, name: name.into(), parent_pos: parent_pos.into(), parent_kind, parent_name: parent_name.into(), }, Obr::CyclicClassDef { pos, stack } => { Self::CyclicClassDef(pos.into(), stack.iter().copied().map(Into::into).collect()) } } } } impl<R: Reason> From<&obr::decl_defs::DeclClassType<'_>> for folded::FoldedClass<R> { fn from(cls: &obr::decl_defs::DeclClassType<'_>) -> Self { // Destructure to help ensure we convert every field. A couple fields // are ignored because they're redundant with other fields (and // `folded::FoldedClass` just omits the redundant fields). let obr::decl_defs::DeclClassType { name, pos, kind, abstract_: _, // `Self::is_abstract()` just reads the `kind` field final_, const_, internal, is_xhp, has_xhp_keyword, support_dynamic_type, module, is_module_level_trait, tparams, where_constraints, substs, ancestors, props, sprops, methods, smethods, consts, typeconsts, xhp_enum_values, xhp_marked_empty, construct, need_init: _, // `Self::has_concrete_constructor()` reads the `constructor` field deferred_init_members, req_ancestors, req_ancestors_extends, req_class_ancestors, extends, sealed_whitelist, xhp_attr_deps, enum_type, decl_errors, docs_url, } = cls; Self { name: (*name).into(), pos: (*pos).into(), kind: *kind, is_final: *final_, is_const: *const_, is_internal: *internal, is_xhp: *is_xhp, has_xhp_keyword: *has_xhp_keyword, support_dynamic_type: *support_dynamic_type, enum_type: enum_type.map(Into::into), module: module.map(Into::into), is_module_level_trait: *is_module_level_trait, tparams: slice(tparams), where_constraints: slice(where_constraints), substs: map(substs.iter()), ancestors: map(ancestors.iter()), props: map(props.iter()), static_props: map(sprops.iter()), methods: map(methods.iter()), static_methods: map(smethods.iter()), constructor: (*construct).into(), consts: map(consts.iter()), type_consts: map(typeconsts.iter()), xhp_enum_values: (xhp_enum_values.iter()) .map(|(&s, &evs)| (s.into(), slice(evs))) .collect(), xhp_marked_empty: *xhp_marked_empty, extends: extends.iter().copied().map(Into::into).collect(), xhp_attr_deps: xhp_attr_deps.iter().copied().map(Into::into).collect(), req_ancestors: req_ancestors.iter().copied().map(Into::into).collect(), req_ancestors_extends: (req_ancestors_extends.iter()) .copied() .map(Into::into) .collect(), req_class_ancestors: (req_class_ancestors.iter()) .copied() .map(Into::into) .collect(), sealed_whitelist: (sealed_whitelist) .map(|l| l.iter().copied().map(Into::into).collect()), deferred_init_members: (deferred_init_members.iter()) .copied() .map(Into::into) .collect(), decl_errors: slice(decl_errors), docs_url: docs_url.map(Into::into), } } }
Rust
hhvm/hphp/hack/src/hackrs/ty/decl/ocamlrep.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; use super::folded::*; use super::ty::*; use crate::reason::Reason; // See comment on definition of `Enforceable` impl<P: pos::Pos> ToOcamlRep for Enforceable<P> { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { let mut block = alloc.block_with_size(2); let pos = self.as_ref().map_or_else( || alloc.add(oxidized_by_ref::pos::Pos::none()), |p| alloc.add(p), ); alloc.set_field(&mut block, 0, pos); alloc.set_field(&mut block, 1, alloc.add_copy(self.is_some())); block.build() } } impl<P: pos::Pos> FromOcamlRep for Enforceable<P> { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { let block = ocamlrep::from::expect_tuple(value, 2)?; let pos: P = ocamlrep::from::field(block, 0)?; let is_enforceable: bool = ocamlrep::from::field(block, 1)?; if is_enforceable { Ok(Self(Some(pos))) } else { Ok(Self(None)) } } } // We need to hand-roll a ToOcamlRep impl for FoldedClass instead of deriving it // in order to synthesize the `need_init` and `abstract` fields, which we derive // from other information in the class in hackrs (whereas OCaml stores it // redundantly). impl<R: Reason> ToOcamlRep for FoldedClass<R> { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { // Destructure to help ensure we convert every field. let Self { name, pos, kind, is_final, is_const, is_internal, is_xhp, has_xhp_keyword, support_dynamic_type, module, is_module_level_trait, tparams, where_constraints, substs, ancestors, props, static_props, methods, static_methods, consts, type_consts, xhp_enum_values, xhp_marked_empty, constructor, deferred_init_members, req_ancestors, req_ancestors_extends, req_class_ancestors, extends, sealed_whitelist, xhp_attr_deps, enum_type, decl_errors, docs_url, } = self; let need_init = self.has_concrete_constructor(); let abstract_ = self.is_abstract(); let mut block = alloc.block_with_size(36); alloc.set_field(&mut block, 0, alloc.add_copy(need_init)); alloc.set_field(&mut block, 1, alloc.add_copy(abstract_)); alloc.set_field(&mut block, 2, alloc.add(is_final)); alloc.set_field(&mut block, 3, alloc.add(is_const)); alloc.set_field(&mut block, 4, alloc.add(is_internal)); alloc.set_field(&mut block, 5, alloc.add(deferred_init_members)); alloc.set_field(&mut block, 6, alloc.add(kind)); alloc.set_field(&mut block, 7, alloc.add(is_xhp)); alloc.set_field(&mut block, 8, alloc.add(has_xhp_keyword)); alloc.set_field(&mut block, 9, alloc.add(module)); alloc.set_field(&mut block, 10, alloc.add(is_module_level_trait)); alloc.set_field(&mut block, 11, alloc.add(name)); alloc.set_field(&mut block, 12, alloc.add(pos)); alloc.set_field(&mut block, 13, alloc.add(tparams)); alloc.set_field(&mut block, 14, alloc.add(where_constraints)); alloc.set_field(&mut block, 15, alloc.add(substs)); alloc.set_field(&mut block, 16, alloc.add(consts)); alloc.set_field(&mut block, 17, alloc.add(type_consts)); alloc.set_field(&mut block, 18, alloc.add(props)); alloc.set_field(&mut block, 19, alloc.add(static_props)); alloc.set_field(&mut block, 20, alloc.add(methods)); alloc.set_field(&mut block, 21, alloc.add(static_methods)); alloc.set_field(&mut block, 22, alloc.add(constructor)); alloc.set_field(&mut block, 23, alloc.add(ancestors)); alloc.set_field(&mut block, 24, alloc.add(support_dynamic_type)); alloc.set_field(&mut block, 25, alloc.add(req_ancestors)); alloc.set_field(&mut block, 26, alloc.add(req_ancestors_extends)); alloc.set_field(&mut block, 27, alloc.add(req_class_ancestors)); alloc.set_field(&mut block, 28, alloc.add(extends)); alloc.set_field(&mut block, 29, alloc.add(sealed_whitelist)); alloc.set_field(&mut block, 30, alloc.add(xhp_attr_deps)); alloc.set_field(&mut block, 31, alloc.add(xhp_enum_values)); alloc.set_field(&mut block, 32, alloc.add(xhp_marked_empty)); alloc.set_field(&mut block, 33, alloc.add(enum_type)); alloc.set_field(&mut block, 34, alloc.add(decl_errors)); alloc.set_field(&mut block, 35, alloc.add(docs_url)); block.build() } } // Hand-written here because we lack the `need_init` and `abstract` fields. // See comment on impl of ToOcamlRep for FoldedClass. impl<R: Reason> FromOcamlRep for FoldedClass<R> { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { let block = ocamlrep::from::expect_tuple(value, 36)?; Ok(Self { is_final: ocamlrep::from::field(block, 2)?, is_const: ocamlrep::from::field(block, 3)?, is_internal: ocamlrep::from::field(block, 4)?, deferred_init_members: ocamlrep::from::field(block, 5)?, kind: ocamlrep::from::field(block, 6)?, is_xhp: ocamlrep::from::field(block, 7)?, has_xhp_keyword: ocamlrep::from::field(block, 8)?, module: ocamlrep::from::field(block, 9)?, is_module_level_trait: ocamlrep::from::field(block, 10)?, name: ocamlrep::from::field(block, 11)?, pos: ocamlrep::from::field(block, 12)?, tparams: ocamlrep::from::field(block, 13)?, where_constraints: ocamlrep::from::field(block, 14)?, substs: ocamlrep::from::field(block, 15)?, consts: ocamlrep::from::field(block, 16)?, type_consts: ocamlrep::from::field(block, 17)?, props: ocamlrep::from::field(block, 18)?, static_props: ocamlrep::from::field(block, 19)?, methods: ocamlrep::from::field(block, 20)?, static_methods: ocamlrep::from::field(block, 21)?, constructor: ocamlrep::from::field(block, 22)?, ancestors: ocamlrep::from::field(block, 23)?, support_dynamic_type: ocamlrep::from::field(block, 24)?, req_ancestors: ocamlrep::from::field(block, 25)?, req_ancestors_extends: ocamlrep::from::field(block, 26)?, req_class_ancestors: ocamlrep::from::field(block, 27)?, extends: ocamlrep::from::field(block, 28)?, sealed_whitelist: ocamlrep::from::field(block, 29)?, xhp_attr_deps: ocamlrep::from::field(block, 30)?, xhp_enum_values: ocamlrep::from::field(block, 31)?, xhp_marked_empty: ocamlrep::from::field(block, 32)?, enum_type: ocamlrep::from::field(block, 33)?, decl_errors: ocamlrep::from::field(block, 34)?, docs_url: ocamlrep::from::field(block, 35)?, }) } } /// It's not possible for us to derive ToOcamlRep for TshapeFieldName because we /// represent it differently: OCaml includes positions in TshapeFieldName, but /// we cannot (see the documentation on `TshapeFieldName` for rationale). /// /// Instead, we store the positions in shape-map values, and feed them into this /// function to produce the OCaml representation of `TshapeFieldName`. fn shape_field_name_to_ocamlrep<'a, A: ocamlrep::Allocator, P: ToOcamlRep>( alloc: &'a A, name: &'a TshapeFieldName, field_name_pos: &'a ShapeFieldNamePos<P>, ) -> ocamlrep::Value<'a> { let simple_pos = || match field_name_pos { ShapeFieldNamePos::Simple(p) => p.to_ocamlrep(alloc), ShapeFieldNamePos::ClassConst(..) => panic!("expected ShapeFieldNamePos::Simple"), }; match name { TshapeFieldName::TSFlitInt(x) => { let mut pos_string = alloc.block_with_size(2); alloc.set_field(&mut pos_string, 0, simple_pos()); alloc.set_field(&mut pos_string, 1, alloc.add(x)); let pos_string = pos_string.build(); let mut block = alloc.block_with_size_and_tag(1usize, 0u8); alloc.set_field(&mut block, 0, pos_string); block.build() } TshapeFieldName::TSFlitStr(x) => { let mut pos_string = alloc.block_with_size(2); alloc.set_field(&mut pos_string, 0, simple_pos()); alloc.set_field(&mut pos_string, 1, alloc.add(x)); let pos_string = pos_string.build(); let mut block = alloc.block_with_size_and_tag(1usize, 1u8); alloc.set_field(&mut block, 0, pos_string); block.build() } TshapeFieldName::TSFclassConst(cls, name) => { let (pos1, pos2) = match field_name_pos { ShapeFieldNamePos::ClassConst(p1, p2) => { (p1.to_ocamlrep(alloc), p2.to_ocamlrep(alloc)) } ShapeFieldNamePos::Simple(..) => panic!("expected ShapeFieldNamePos::ClassConst"), }; let mut cls_pos_id = alloc.block_with_size(2); alloc.set_field(&mut cls_pos_id, 0, pos1); alloc.set_field(&mut cls_pos_id, 1, alloc.add(cls)); let cls_pos_id = cls_pos_id.build(); let mut const_pos_string = alloc.block_with_size(2); alloc.set_field(&mut const_pos_string, 0, pos2); alloc.set_field(&mut const_pos_string, 1, alloc.add(name)); let const_pos_string = const_pos_string.build(); let mut block = alloc.block_with_size_and_tag(2usize, 2u8); alloc.set_field(&mut block, 0, cls_pos_id); alloc.set_field(&mut block, 1, const_pos_string); block.build() } } } // See comment on `shape_field_name_to_ocamlrep`. #[derive(FromOcamlRep)] enum OcamlShapeFieldName<P> { Int(pos::Positioned<pos::Symbol, P>), Str(pos::Positioned<pos::Bytes, P>), ClassConst( pos::Positioned<pos::TypeName, P>, pos::Positioned<pos::Symbol, P>, ), } impl<R: Reason> ToOcamlRep for ShapeFieldType<R> { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { let mut block = alloc.block_with_size_and_tag(2usize, 0u8); let ShapeFieldType { optional, ty, field_name_pos: _, } = self; alloc.set_field(&mut block, 0, alloc.add(optional)); alloc.set_field(&mut block, 1, alloc.add(ty)); block.build() } } impl<R: Reason> ToOcamlRep for ShapeType<R> { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { let Self(shape_kind, shape_field_type_map) = &self; let map = if shape_field_type_map.is_empty() { ocamlrep::Value::int(0) } else { let len = shape_field_type_map.len(); let mut iter = shape_field_type_map.iter().map(|(k, v)| { let k = shape_field_name_to_ocamlrep(alloc, k, &v.field_name_pos); (k, v.to_ocamlrep(alloc)) }); let (map, _) = ocamlrep::sorted_iter_to_ocaml_map(&mut iter, alloc, len); map }; let mut block = alloc.block_with_size(3); // Note: we always set decl shapes to Missing_origin (0) as it is only for type aliases alloc.set_field(&mut block, 0, ocamlrep::Value::int(0)); alloc.set_field(&mut block, 1, alloc.add(shape_kind)); alloc.set_field(&mut block, 2, map); block.build() } } impl<R: Reason> FromOcamlRep for ShapeType<R> { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { let block = ocamlrep::from::expect_tuple(value, 3)?; Ok(ShapeType( ocamlrep::from::field(block, 1)?, ocamlrep::vec_from_ocaml_map(block[2])? .into_iter() .map(|(k, (optional, ty))| match k { OcamlShapeFieldName::Int(pos_id) => ( TshapeFieldName::TSFlitInt(pos_id.id()), ShapeFieldType { optional, ty, field_name_pos: ShapeFieldNamePos::Simple(pos_id.into_pos()), }, ), OcamlShapeFieldName::Str(pos_id) => ( TshapeFieldName::TSFlitStr(pos_id.id()), ShapeFieldType { optional, ty, field_name_pos: ShapeFieldNamePos::Simple(pos_id.into_pos()), }, ), OcamlShapeFieldName::ClassConst(cls_id, const_id) => ( TshapeFieldName::TSFclassConst(cls_id.id(), const_id.id()), ShapeFieldType { optional, ty, field_name_pos: ShapeFieldNamePos::ClassConst( cls_id.into_pos(), const_id.into_pos(), ), }, ), }) .collect(), )) } } // Hand-written because we represent shape field names differently (see comment // on `shape_field_name_to_ocamlrep`) and don't represent TanySentinel. impl<R: Reason> ToOcamlRep for Ty_<R> { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { match self { Ty_::Tthis => ocamlrep::Value::int(0), Ty_::Tapply(x) => { let mut block = alloc.block_with_size_and_tag(2usize, 0u8); alloc.set_field(&mut block, 0, alloc.add(&x.0)); alloc.set_field(&mut block, 1, alloc.add(&x.1)); block.build() } Ty_::Trefinement(x) => { let mut block = alloc.block_with_size_and_tag(2usize, 1u8); alloc.set_field(&mut block, 0, alloc.add(&x.ty)); alloc.set_field(&mut block, 1, alloc.add(&x.refinement)); block.build() } Ty_::Tmixed => ocamlrep::Value::int(1), Ty_::Twildcard => ocamlrep::Value::int(2), Ty_::Tlike(x) => { let mut block = alloc.block_with_size_and_tag(1usize, 2u8); alloc.set_field(&mut block, 0, alloc.add(x)); block.build() } Ty_::Tany => { let mut block = alloc.block_with_size_and_tag(1usize, 3u8); alloc.set_field(&mut block, 0, alloc.add(&())); // TanySentinel block.build() } Ty_::Tnonnull => ocamlrep::Value::int(3), Ty_::Tdynamic => ocamlrep::Value::int(4), Ty_::Toption(x) => { let mut block = alloc.block_with_size_and_tag(1usize, 4u8); alloc.set_field(&mut block, 0, alloc.add(x)); block.build() } Ty_::Tprim(x) => { let mut block = alloc.block_with_size_and_tag(1usize, 5u8); alloc.set_field(&mut block, 0, alloc.add(x)); block.build() } Ty_::Tfun(x) => { let mut block = alloc.block_with_size_and_tag(1usize, 6u8); alloc.set_field(&mut block, 0, alloc.add(&**x)); block.build() } Ty_::Ttuple(x) => { let mut block = alloc.block_with_size_and_tag(1usize, 7u8); alloc.set_field(&mut block, 0, alloc.add(&**x)); block.build() } Ty_::Tshape(shape) => { let mut block = alloc.block_with_size_and_tag(1usize, 8u8); alloc.set_field(&mut block, 0, alloc.add(&**shape)); block.build() } Ty_::Tgeneric(x) => { let mut block = alloc.block_with_size_and_tag(2usize, 9u8); alloc.set_field(&mut block, 0, alloc.add(&x.0)); alloc.set_field(&mut block, 1, alloc.add(&x.1)); block.build() } Ty_::Tunion(x) => { let mut block = alloc.block_with_size_and_tag(1usize, 10u8); alloc.set_field(&mut block, 0, alloc.add(&**x)); block.build() } Ty_::Tintersection(x) => { let mut block = alloc.block_with_size_and_tag(1usize, 11u8); alloc.set_field(&mut block, 0, alloc.add(&**x)); block.build() } Ty_::TvecOrDict(x) => { let mut block = alloc.block_with_size_and_tag(2usize, 12u8); alloc.set_field(&mut block, 0, alloc.add(&x.0)); alloc.set_field(&mut block, 1, alloc.add(&x.1)); block.build() } Ty_::Taccess(x) => { let mut block = alloc.block_with_size_and_tag(1usize, 13u8); alloc.set_field(&mut block, 0, alloc.add(&**x)); block.build() } } } } // Hand-written because we represent shape field names differently (see comment // on `shape_field_name_to_ocamlrep`) and don't represent TanySentinel. impl<R: Reason> FromOcamlRep for Ty_<R> { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { if value.is_int() { match value.as_int().unwrap() { 0 => Ok(Ty_::Tthis), 1 => Ok(Ty_::Tmixed), 2 => Ok(Ty_::Twildcard), 3 => Ok(Ty_::Tnonnull), 4 => Ok(Ty_::Tdynamic), t => Err(ocamlrep::FromError::NullaryVariantTagOutOfRange { max: 4, actual: t }), } } else { let block = ocamlrep::from::expect_block(value)?; match block.tag() { 0 => { ocamlrep::from::expect_block_size(block, 2)?; Ok(Ty_::Tapply(Box::new(( ocamlrep::from::field(block, 0)?, ocamlrep::from::field(block, 1)?, )))) } 1 => { ocamlrep::from::expect_block_size(block, 2)?; Ok(Ty_::Trefinement(Box::new(TrefinementType { ty: ocamlrep::from::field(block, 0)?, refinement: ocamlrep::from::field(block, 1)?, }))) } 2 => { ocamlrep::from::expect_block_size(block, 1)?; Ok(Ty_::Tlike(ocamlrep::from::field(block, 0)?)) } 3 => { ocamlrep::from::expect_block_size(block, 1)?; let () = ocamlrep::from::field(block, 0)?; // TanySentinel Ok(Ty_::Tany) } 4 => { ocamlrep::from::expect_block_size(block, 1)?; Ok(Ty_::Toption(ocamlrep::from::field(block, 0)?)) } 5 => { ocamlrep::from::expect_block_size(block, 1)?; Ok(Ty_::Tprim(ocamlrep::from::field(block, 0)?)) } 6 => { ocamlrep::from::expect_block_size(block, 1)?; Ok(Ty_::Tfun(ocamlrep::from::field(block, 0)?)) } 7 => { ocamlrep::from::expect_block_size(block, 1)?; Ok(Ty_::Ttuple(ocamlrep::from::field(block, 0)?)) } 8 => { ocamlrep::from::expect_block_size(block, 1)?; Ok(Ty_::Tshape(ocamlrep::from::field(block, 0)?)) } 9 => { ocamlrep::from::expect_block_size(block, 2)?; Ok(Ty_::Tgeneric(Box::new(( ocamlrep::from::field(block, 0)?, ocamlrep::from::field(block, 1)?, )))) } 10 => { ocamlrep::from::expect_block_size(block, 1)?; Ok(Ty_::Tunion(ocamlrep::from::field(block, 0)?)) } 11 => { ocamlrep::from::expect_block_size(block, 1)?; Ok(Ty_::Tintersection(ocamlrep::from::field(block, 0)?)) } 12 => { ocamlrep::from::expect_block_size(block, 2)?; Ok(Ty_::TvecOrDict(Box::new(( ocamlrep::from::field(block, 0)?, ocamlrep::from::field(block, 1)?, )))) } 13 => { ocamlrep::from::expect_block_size(block, 1)?; Ok(Ty_::Taccess(ocamlrep::from::field(block, 0)?)) } t => Err(ocamlrep::FromError::BlockTagOutOfRange { max: 14, actual: t }), } } } }
Rust
hhvm/hphp/hack/src/hackrs/ty/decl/shallow.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::collections::BTreeMap; use eq_modulo_pos::EqModuloPos; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; pub use oxidized::ast_defs::Visibility; pub use oxidized_by_ref::method_flags::MethodFlags; pub use oxidized_by_ref::prop_flags::PropFlags; use pos::Bytes; use pos::ClassConstName; use pos::ConstName; use pos::FunName; use pos::MethodName; use pos::ModuleName; use pos::Positioned; use pos::PropName; use pos::Symbol; use pos::TypeConstName; use pos::TypeName; use serde::Deserialize; use serde::Serialize; use crate::decl::ty::ClassConstKind; use crate::decl::ty::ClassConstRef; pub use crate::decl::ty::ConstDecl; use crate::decl::ty::Enforceable; use crate::decl::ty::EnumType; use crate::decl::ty::FunElt; use crate::decl::ty::ModuleDefType; use crate::decl::ty::Tag; use crate::decl::ty::Tparam; use crate::decl::ty::Ty; use crate::decl::ty::Typeconst; use crate::decl::ty::TypedefType; use crate::decl::ty::UserAttribute; use crate::decl::ty::WhereConstraint; use crate::decl::ty::XhpAttribute; use crate::decl::ty::XhpEnumValue; use crate::reason::Reason; #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct ShallowClassConst<R: Reason> { pub kind: ClassConstKind, pub name: Positioned<ClassConstName, R::Pos>, /// This field is used for two different meanings in two different places: /// /// enum class A:arraykey { int X = "a"; } /// /// In an enum class, X.ty = \HH\MemberOf<A,int>. /// /// enum B:int as arraykey { X = "a"; Y = 1; Z = B::X; } /// /// In a legacy enum, X.ty = string, Y.ty = int, and Z.ty = TAny, and ty is /// just a simple syntactic attempt to retrieve the type from the initializer. pub ty: Ty<R>, /// This is a list of all scope-resolution operators "A::B" that are mentioned /// in the const initializer, for members of regular-enums and enum-class-enums /// to detect circularity of initializers. We don't yet have a similar mechanism /// for top-level const initializers. pub refs: Box<[ClassConstRef]>, } walkable!(ShallowClassConst<R> => [ty]); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct ShallowTypeconst<R: Reason> { pub name: Positioned<TypeConstName, R::Pos>, pub kind: Typeconst<R>, pub enforceable: Enforceable<R::Pos>, pub reifiable: Option<R::Pos>, // When Some, points to __Reifiable attribute pub is_ctx: bool, } walkable!(ShallowTypeconst<R> => [kind]); impl<R: Reason> ShallowTypeconst<R> { pub fn is_enforceable(&self) -> bool { self.enforceable.is_some() } pub fn is_reifiable(&self) -> bool { self.reifiable.is_some() } } #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct ShallowProp<R: Reason> { pub name: Positioned<PropName, R::Pos>, pub xhp_attr: Option<XhpAttribute>, pub ty: Ty<R>, pub visibility: Visibility, pub flags: PropFlags, } walkable!(ShallowProp<R> => [ty]); impl<R: Reason> ShallowProp<R> { pub fn is_required_xhp_attribute(&self) -> bool { match self.xhp_attr { None => false, Some(attr) => match attr.tag { None => false, Some(tag) => match tag { Tag::Required => true, Tag::LateInit => false, }, }, } } } #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct ShallowMethod<R: Reason> { // note(sf, 2022-01-27): // - c.f. // - `Shallow_decl_defs.shallow_method` // - `oxidized_by_ref::shallow_decl_defs::ShallowMethod<'_>` pub name: Positioned<MethodName, R::Pos>, pub ty: Ty<R>, pub visibility: Visibility, pub deprecated: Option<Bytes>, // e.g. "The method foo is deprecated: ..." pub flags: MethodFlags, pub attributes: Box<[UserAttribute<R::Pos>]>, } walkable!(ShallowMethod<R> => [ty]); #[derive(Clone, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct ShallowClass<R: Reason> { // note(sf, 2022-01-27): // - c.f. // - `Shallow_decl_defs.shallow_class` // - `oxidized_by_ref::shallow_decl_defs::ShallowClass<'_>` pub mode: oxidized::file_info::Mode, pub is_final: bool, pub is_abstract: bool, pub is_xhp: bool, pub is_internal: bool, pub has_xhp_keyword: bool, pub kind: oxidized::ast_defs::ClassishKind, pub module: Option<Positioned<ModuleName, R::Pos>>, pub name: Positioned<TypeName, R::Pos>, pub tparams: Box<[Tparam<R, Ty<R>>]>, pub where_constraints: Box<[WhereConstraint<Ty<R>>]>, pub extends: Box<[Ty<R>]>, pub uses: Box<[Ty<R>]>, pub xhp_attr_uses: Box<[Ty<R>]>, pub xhp_enum_values: BTreeMap<Symbol, Box<[XhpEnumValue]>>, pub xhp_marked_empty: bool, pub req_extends: Box<[Ty<R>]>, pub req_implements: Box<[Ty<R>]>, pub req_class: Box<[Ty<R>]>, pub implements: Box<[Ty<R>]>, pub support_dynamic_type: bool, pub consts: Box<[ShallowClassConst<R>]>, pub typeconsts: Box<[ShallowTypeconst<R>]>, pub props: Box<[ShallowProp<R>]>, pub static_props: Box<[ShallowProp<R>]>, pub constructor: Option<ShallowMethod<R>>, pub static_methods: Box<[ShallowMethod<R>]>, pub methods: Box<[ShallowMethod<R>]>, pub user_attributes: Box<[UserAttribute<R::Pos>]>, pub enum_type: Option<EnumType<R>>, pub docs_url: Option<String>, } walkable!(ShallowClass<R> as visit_shallow_class => [ mode, is_final, is_abstract, is_xhp, is_internal, has_xhp_keyword, kind, module, name, tparams, where_constraints, extends, uses, xhp_attr_uses, xhp_enum_values, req_extends, req_implements, req_class, implements, support_dynamic_type, consts, typeconsts, props, static_props, constructor, static_methods, methods, user_attributes, enum_type, docs_url ]); pub type FunDecl<R> = FunElt<R>; pub type ClassDecl<R> = ShallowClass<R>; pub type TypedefDecl<R> = TypedefType<R>; pub type ModuleDecl<R> = ModuleDefType<R>; #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub enum NamedDecl<R: Reason> { Class(TypeName, ClassDecl<R>), Fun(FunName, FunDecl<R>), Typedef(TypeName, TypedefDecl<R>), Const(ConstName, ConstDecl<R>), Module(ModuleName, ModuleDecl<R>), } walkable!(NamedDecl<R> as visit_named_decl => { Self::Class(_, x) => [x], Self::Fun(_, x) => [x], Self::Typedef(_, x) => [x], Self::Const(_, x) => [x], Self::Module(_, x) => [x], }); impl<R: Reason> NamedDecl<R> { pub fn name(&self) -> Symbol { match self { Self::Class(name, _) => name.as_symbol(), Self::Fun(name, _) => name.as_symbol(), Self::Typedef(name, _) => name.as_symbol(), Self::Const(name, _) => name.as_symbol(), Self::Module(name, _) => name.as_symbol(), } } pub fn name_kind(&self) -> oxidized::naming_types::NameKind { use oxidized::naming_types::KindOfType; use oxidized::naming_types::NameKind; match self { Self::Class(..) => NameKind::TypeKind(KindOfType::TClass), Self::Typedef(..) => NameKind::TypeKind(KindOfType::TTypedef), Self::Fun(..) => NameKind::FunKind, Self::Const(..) => NameKind::ConstKind, Self::Module(..) => NameKind::ModuleKind, } } } #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub enum Decl<R: Reason> { Class(ClassDecl<R>), Fun(FunDecl<R>), Typedef(TypedefDecl<R>), Const(ConstDecl<R>), Module(ModuleDecl<R>), } walkable!(Decl<R> as visit_decl => { Self::Class(x) => [x], Self::Fun(x) => [x], Self::Typedef(x) => [x], Self::Const(x) => [x], Self::Module(x) => [x], }); impl<R: Reason> Decl<R> { pub fn name_kind(&self) -> oxidized::naming_types::NameKind { use oxidized::naming_types::KindOfType; use oxidized::naming_types::NameKind; match self { Self::Class(..) => NameKind::TypeKind(KindOfType::TClass), Self::Typedef(..) => NameKind::TypeKind(KindOfType::TTypedef), Self::Fun(..) => NameKind::FunKind, Self::Const(..) => NameKind::ConstKind, Self::Module(..) => NameKind::ModuleKind, } } }
Rust
hhvm/hphp/hack/src/hackrs/ty/decl/subst.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use eq_modulo_pos::EqModuloPos; use hash::IndexMap; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; use pos::TypeName; use serde::Deserialize; use serde::Serialize; use crate::decl::Tparam; use crate::decl::Ty; use crate::reason::Reason; /// Maps type names to types with which to replace them. #[derive(Debug, Clone, Eq, EqModuloPos, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct Subst<R: Reason>(pub IndexMap<TypeName, Ty<R>>); impl<R: Reason> From<IndexMap<TypeName, Ty<R>>> for Subst<R> { fn from(map: IndexMap<TypeName, Ty<R>>) -> Self { Self(map) } } impl<R: Reason> From<Subst<R>> for IndexMap<TypeName, Ty<R>> { fn from(subst: Subst<R>) -> Self { subst.0 } } impl<R: Reason> Subst<R> { pub fn new(tparams: &[Tparam<R, Ty<R>>], targs: &[Ty<R>]) -> Self { // If there are fewer type arguments than type parameters, we'll have // emitted an error elsewhere. We bind missing types to `Tany` // here to keep parity with the OCaml implementation, which // produces `Tany` because of a now-dead feature called "silent_mode". let targs = targs .iter() .cloned() .chain(std::iter::repeat(Ty::any(R::none()))); Self( tparams .iter() .map(|tparam| tparam.name.id()) .zip(targs) .collect(), ) } }
Rust
hhvm/hphp/hack/src/hackrs/ty/decl/to_oxidized.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use oxidized_by_ref as obr; use pos::Pos; use pos::ToOxidized; use super::folded; use super::shallow; use super::ty::*; use crate::reason::Reason; impl<'a> ToOxidized<'a> for CeVisibility { type Output = obr::typing_defs::CeVisibility<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use obr::typing_defs::CeVisibility as Obr; match self { CeVisibility::Public => Obr::Vpublic, CeVisibility::Private(v) => Obr::Vprivate(v.to_oxidized(arena)), CeVisibility::Protected(v) => Obr::Vprotected(v.to_oxidized(arena)), CeVisibility::Internal(v) => Obr::Vinternal(v.to_oxidized(arena)), } } } impl<'a> ToOxidized<'a> for IfcFunDecl { type Output = obr::typing_defs::IfcFunDecl<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use obr::typing_defs::IfcFunDecl as Obr; match self { IfcFunDecl::FDPolicied(x) => Obr::FDPolicied(x.to_oxidized(arena)), IfcFunDecl::FDInferFlows => Obr::FDInferFlows, } } } impl<'a> ToOxidized<'a> for UserAttributeParam { type Output = obr::typing_defs::UserAttributeParam<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use obr::typing_defs::UserAttributeParam as P; match self { UserAttributeParam::Classname(cn) => P::Classname(cn.to_oxidized(arena)), UserAttributeParam::EnumClassLabel(l) => P::EnumClassLabel(l.to_oxidized(arena)), UserAttributeParam::String(s) => P::String(s.to_oxidized(arena).into()), UserAttributeParam::Int(i) => P::Int(i.to_oxidized(arena)), } } } impl<'a, P: Pos> ToOxidized<'a> for UserAttribute<P> { type Output = &'a obr::typing_defs::UserAttribute<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::UserAttribute { name: self.name.to_oxidized(arena), params: self.params.to_oxidized(arena), }) } } impl<'a, R: Reason> ToOxidized<'a> for Tparam<R, Ty<R>> { type Output = &'a obr::typing_defs::Tparam<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::Tparam { variance: self.variance, name: self.name.to_oxidized(arena), tparams: self.tparams.to_oxidized(arena), constraints: arena.alloc_slice_fill_iter( self.constraints .iter() .map(|(x, y)| (*x, y.to_oxidized(arena))), ), reified: self.reified, user_attributes: self.user_attributes.to_oxidized(arena), }) } } impl<'a, R: Reason> ToOxidized<'a> for WhereConstraint<Ty<R>> { type Output = &'a obr::typing_defs::WhereConstraint<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { let WhereConstraint(tvar_ty, kind, as_ty) = self; arena.alloc(obr::typing_defs::WhereConstraint( tvar_ty.to_oxidized(arena), *kind, as_ty.to_oxidized(arena), )) } } impl<'a, R: Reason> ToOxidized<'a> for Ty<R> { type Output = &'a obr::typing_defs::Ty<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::Ty( arena.alloc(self.reason().to_oxidized(arena)), self.node().to_oxidized(arena), )) } } impl<'a, R: Reason> ToOxidized<'a> for ShapeFieldType<R> { type Output = &'a obr::typing_defs::ShapeFieldType<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::ShapeFieldType { optional: self.optional, ty: self.ty.to_oxidized(arena), }) } } fn oxidize_shape_field_name<'a, P: Pos>( arena: &'a bumpalo::Bump, name: TshapeFieldName, field_name_pos: &ShapeFieldNamePos<P>, ) -> obr::typing_defs::TshapeFieldName<'a> { use obr::typing_defs::TshapeFieldName as Obr; use ShapeFieldNamePos as SfnPos; let simple_pos = || match field_name_pos { SfnPos::Simple(p) => p.to_oxidized(arena), SfnPos::ClassConst(..) => panic!("expected ShapeFieldNamePos::Simple"), }; match name { TshapeFieldName::TSFlitInt(x) => Obr::TSFlitInt(arena.alloc(obr::typing_defs::PosString( simple_pos(), x.to_oxidized(arena), ))), TshapeFieldName::TSFlitStr(x) => Obr::TSFlitStr(arena.alloc( obr::typing_defs::PosByteString(simple_pos(), x.to_oxidized(arena).into()), )), TshapeFieldName::TSFclassConst(cls, name) => { let (pos1, pos2) = match field_name_pos { SfnPos::ClassConst(p1, p2) => (p1.to_oxidized(arena), p2.to_oxidized(arena)), SfnPos::Simple(..) => panic!("expected ShapeFieldNamePos::ClassConst"), }; Obr::TSFclassConst(arena.alloc(( (pos1, cls.to_oxidized(arena)), obr::typing_defs::PosString(pos2, name.to_oxidized(arena)), ))) } } } impl<'a, R: Reason> ToOxidized<'a> for Ty_<R> { type Output = obr::typing_defs::Ty_<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use obr::t_shape_map::TShapeField; use obr::t_shape_map::TShapeMap; use obr::typing_defs; match self { Ty_::Tthis => typing_defs::Ty_::Tthis, Ty_::Tapply(x) => typing_defs::Ty_::Tapply(x.to_oxidized(arena)), Ty_::Tmixed => typing_defs::Ty_::Tmixed, Ty_::Twildcard => typing_defs::Ty_::Twildcard, Ty_::Tlike(x) => typing_defs::Ty_::Tlike(x.to_oxidized(arena)), Ty_::Tany => typing_defs::Ty_::Tany(obr::tany_sentinel::TanySentinel), Ty_::Tnonnull => typing_defs::Ty_::Tnonnull, Ty_::Tdynamic => typing_defs::Ty_::Tdynamic, Ty_::Toption(x) => typing_defs::Ty_::Toption(x.to_oxidized(arena)), Ty_::Tprim(x) => typing_defs::Ty_::Tprim(arena.alloc(*x)), Ty_::Tfun(x) => typing_defs::Ty_::Tfun(x.to_oxidized(arena)), Ty_::Ttuple(x) => typing_defs::Ty_::Ttuple(x.to_oxidized(arena)), Ty_::Tshape(shape) => { let mut shape_fields = arena_collections::AssocListMut::new_in(arena); let ShapeType(shape_kind, shape_field_type_map) = &**shape; for (k, v) in shape_field_type_map.iter() { let k = oxidize_shape_field_name(arena, *k, &v.field_name_pos); shape_fields.insert_or_replace(TShapeField(k), v.to_oxidized(arena)); } let shape_kind = shape_kind.to_oxidized(arena); let shape_origin = typing_defs::TypeOrigin::MissingOrigin; typing_defs::Ty_::Tshape(arena.alloc(typing_defs::ShapeType { origin: shape_origin, unknown_value: shape_kind, fields: TShapeMap::from(shape_fields), })) } Ty_::Tgeneric(x) => typing_defs::Ty_::Tgeneric(x.to_oxidized(arena)), Ty_::Tunion(x) => typing_defs::Ty_::Tunion(x.to_oxidized(arena)), Ty_::Tintersection(x) => typing_defs::Ty_::Tintersection(x.to_oxidized(arena)), Ty_::TvecOrDict(x) => typing_defs::Ty_::TvecOrDict(x.to_oxidized(arena)), Ty_::Taccess(x) => typing_defs::Ty_::Taccess(x.to_oxidized(arena)), Ty_::Trefinement(tr) => typing_defs::Ty_::Trefinement( arena.alloc((tr.ty.to_oxidized(arena), tr.refinement.to_oxidized(arena))), ), } } } impl<'a, R: Reason> ToOxidized<'a> for ClassRefinement<Ty<R>> { type Output = obr::typing_defs::ClassRefinement<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { obr::typing_defs::ClassRefinement { cr_consts: self.consts.to_oxidized(arena), } } } impl<'a, R: Reason> ToOxidized<'a> for RefinedConstBound<Ty<R>> { type Output = &'a obr::typing_defs::RefinedConstBound<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use obr::typing_defs::RefinedConstBound::*; arena.alloc(match self { Self::Exact(ty) => TRexact(ty.to_oxidized(arena)), Self::Loose(bounds) => TRloose(bounds.to_oxidized(arena)), }) } } impl<'a, R: Reason> ToOxidized<'a> for RefinedConst<Ty<R>> { type Output = obr::typing_defs::RefinedConst<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { obr::typing_defs::RefinedConst { bound: *self.bound.to_oxidized(arena), is_ctx: self.is_ctx, } } } impl<'a, R: Reason> ToOxidized<'a> for RefinedConstBounds<Ty<R>> { type Output = &'a obr::typing_defs::RefinedConstBounds<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::RefinedConstBounds { lower: self.lower.to_oxidized(arena), upper: self.upper.to_oxidized(arena), }) } } impl<'a, R: Reason> ToOxidized<'a> for TaccessType<R, Ty<R>> { type Output = &'a obr::typing_defs::TaccessType<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::TaccessType( self.ty.to_oxidized(arena), self.type_const.to_oxidized(arena), )) } } impl<'a, R: Reason> ToOxidized<'a> for FunImplicitParams<R, Ty<R>> { type Output = &'a obr::typing_defs::FunImplicitParams<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::FunImplicitParams { capability: match &self.capability { Capability::CapDefaults(p) => { obr::typing_defs::Capability::CapDefaults(p.to_oxidized(arena)) } Capability::CapTy(ty) => obr::typing_defs::Capability::CapTy(ty.to_oxidized(arena)), }, }) } } impl<'a, R: Reason> ToOxidized<'a> for FunType<R, Ty<R>> { type Output = &'a obr::typing_defs::FunType<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::FunType { tparams: self.tparams.to_oxidized(arena), where_constraints: self.where_constraints.to_oxidized(arena), params: self.params.to_oxidized(arena), implicit_params: self.implicit_params.to_oxidized(arena), ret: self.ret.to_oxidized(arena), flags: self.flags, ifc_decl: self.ifc_decl.to_oxidized(arena), cross_package: self.cross_package.to_oxidized(arena), }) } } impl<'a, R: Reason> ToOxidized<'a> for PossiblyEnforcedTy<Ty<R>> { type Output = &'a obr::typing_defs::PossiblyEnforcedTy<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::PossiblyEnforcedTy { enforced: self.enforced, type_: self.ty.to_oxidized(arena), }) } } impl<'a, R: Reason> ToOxidized<'a> for FunParam<R, Ty<R>> { type Output = &'a obr::typing_defs::FunParam<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::FunParam { pos: self.pos.to_oxidized(arena), name: self.name.to_oxidized(arena), type_: self.ty.to_oxidized(arena), flags: self.flags, }) } } impl<'a> ToOxidized<'a> for XhpEnumValue { type Output = obr::ast_defs::XhpEnumValue<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use obr::ast_defs::XhpEnumValue as Obr; match self { Self::XEVInt(i) => Obr::XEVInt(*i), Self::XEVString(s) => Obr::XEVString(s.to_oxidized(arena)), } } } impl<'a> ToOxidized<'a> for ClassConstFrom { type Output = obr::typing_defs::ClassConstFrom<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use obr::typing_defs::ClassConstFrom as Obr; match self { Self::Self_ => Obr::Self_, Self::From(ty) => Obr::From(ty.to_oxidized(arena)), } } } impl<'a> ToOxidized<'a> for ClassConstRef { type Output = obr::typing_defs::ClassConstRef<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { let ClassConstRef(class, symbol) = self; obr::typing_defs::ClassConstRef(class.to_oxidized(arena), symbol.to_oxidized(arena)) } } impl<'a, R: Reason> ToOxidized<'a> for AbstractTypeconst<R> { type Output = &'a obr::typing_defs::AbstractTypeconst<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::AbstractTypeconst { as_constraint: self.as_constraint.to_oxidized(arena), super_constraint: self.super_constraint.to_oxidized(arena), default: self.default.to_oxidized(arena), }) } } impl<'a, R: Reason> ToOxidized<'a> for ConcreteTypeconst<R> { type Output = &'a obr::typing_defs::ConcreteTypeconst<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::ConcreteTypeconst { tc_type: self.ty.to_oxidized(arena), }) } } impl<'a, R: Reason> ToOxidized<'a> for Typeconst<R> { type Output = obr::typing_defs::Typeconst<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use obr::typing_defs::Typeconst as Obr; match self { Self::TCAbstract(x) => Obr::TCAbstract(x.to_oxidized(arena)), Self::TCConcrete(x) => Obr::TCConcrete(x.to_oxidized(arena)), } } } impl<'a, R: Reason> ToOxidized<'a> for EnumType<R> { type Output = &'a obr::typing_defs::EnumType<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::EnumType { base: self.base.to_oxidized(arena), constraint: self.constraint.as_ref().map(|c| c.to_oxidized(arena)), includes: self.includes.to_oxidized(arena), }) } } impl<'a, P: Pos> ToOxidized<'a> for Enforceable<P> { type Output = (&'a obr::pos::Pos<'a>, bool); fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { self.0.as_ref().map_or_else( || (obr::pos::Pos::none(), false), |x| (x.to_oxidized(arena), true), ) } } impl<'a, R: Reason> ToOxidized<'a> for folded::SubstContext<R> { type Output = &'a obr::decl_defs::SubstContext<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { let subst = &self.subst.0; arena.alloc(obr::decl_defs::SubstContext { subst: subst.to_oxidized(arena), class_context: self.class_context.to_oxidized(arena), from_req_extends: self.from_req_extends, }) } } impl<'a, R: Reason> ToOxidized<'a> for folded::TypeConst<R> { type Output = &'a obr::typing_defs::TypeconstType<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::TypeconstType { synthesized: self.is_synthesized, concretized: self.is_concretized, is_ctx: self.is_ctx, enforceable: self.enforceable.as_ref().map_or_else( || (obr::pos::Pos::none(), false), |x| (x.to_oxidized(arena), true), ), reifiable: self.reifiable.as_ref().map(|x| x.to_oxidized(arena)), origin: self.origin.to_oxidized(arena), kind: self.kind.to_oxidized(arena), name: self.name.to_oxidized(arena), }) } } impl<'a, R: Reason> ToOxidized<'a> for folded::ClassConst<R> { type Output = &'a obr::typing_defs::ClassConst<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::typing_defs::ClassConst { synthesized: self.is_synthesized, abstract_: self.kind, origin: self.origin.to_oxidized(arena), refs: self.refs.to_oxidized(arena), type_: self.ty.to_oxidized(arena), pos: self.pos.to_oxidized(arena), }) } } impl<'a, R: Reason> ToOxidized<'a> for folded::Requirement<R> { type Output = &'a obr::decl_defs::Requirement<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::decl_defs::Requirement( self.pos.to_oxidized(arena), self.ty.to_oxidized(arena), )) } } impl<'a> ToOxidized<'a> for folded::Constructor { type Output = (Option<&'a obr::decl_defs::Element<'a>>, ConsistentKind); fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { (self.elt.to_oxidized(arena), self.consistency) } } impl<'a, R: Reason> ToOxidized<'a> for folded::FoldedClass<R> { type Output = &'a obr::decl_defs::DeclClassType<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { // Destructure to help ensure we convert every field. let Self { name, pos, kind, is_final, is_const, is_internal, is_xhp, has_xhp_keyword, support_dynamic_type, module, is_module_level_trait, tparams, where_constraints, substs, ancestors, props, static_props, methods, static_methods, consts, type_consts, xhp_enum_values, xhp_marked_empty, constructor, deferred_init_members, req_ancestors, req_ancestors_extends, req_class_ancestors, extends, sealed_whitelist, xhp_attr_deps, enum_type, decl_errors, docs_url, } = self; arena.alloc(obr::decl_defs::DeclClassType { name: name.to_oxidized(arena), pos: pos.to_oxidized(arena), kind: *kind, abstract_: self.is_abstract(), final_: *is_final, const_: *is_const, internal: *is_internal, is_xhp: *is_xhp, has_xhp_keyword: *has_xhp_keyword, support_dynamic_type: *support_dynamic_type, module: module.as_ref().map(|m| { let (pos, id) = m.to_oxidized(arena); obr::ast_defs::Id(pos, id) }), is_module_level_trait: *is_module_level_trait, tparams: tparams.to_oxidized(arena), where_constraints: where_constraints.to_oxidized(arena), substs: substs.to_oxidized(arena), ancestors: ancestors.to_oxidized(arena), props: props.to_oxidized(arena), sprops: static_props.to_oxidized(arena), methods: methods.to_oxidized(arena), smethods: static_methods.to_oxidized(arena), consts: consts.to_oxidized(arena), typeconsts: type_consts.to_oxidized(arena), xhp_enum_values: xhp_enum_values.to_oxidized(arena), xhp_marked_empty: *xhp_marked_empty, construct: constructor.to_oxidized(arena), need_init: self.has_concrete_constructor(), deferred_init_members: deferred_init_members.to_oxidized(arena), req_ancestors: req_ancestors.to_oxidized(arena), req_ancestors_extends: req_ancestors_extends.to_oxidized(arena), req_class_ancestors: req_class_ancestors.to_oxidized(arena), extends: extends.to_oxidized(arena), sealed_whitelist: sealed_whitelist.to_oxidized(arena), xhp_attr_deps: xhp_attr_deps.to_oxidized(arena), enum_type: enum_type.as_ref().map(|et| et.to_oxidized(arena)), decl_errors: decl_errors.to_oxidized(arena), docs_url: docs_url.as_deref().to_oxidized(arena), }) } } impl<'a> ToOxidized<'a> for folded::FoldedElement { type Output = &'a obr::decl_defs::Element<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { arena.alloc(obr::decl_defs::Element { origin: self.origin.to_oxidized(arena), visibility: self.visibility.to_oxidized(arena), deprecated: self.deprecated.map(|x| { bumpalo::collections::String::from_utf8_lossy_in(x.as_bytes(), arena) .into_bump_str() }), flags: self.flags, }) } } impl<'a, P: ToOxidized<'a, Output = &'a obr::pos::Pos<'a>>> ToOxidized<'a> for crate::decl_error::DeclError<P> { type Output = obr::decl_defs::DeclError<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use obr::decl_defs::DeclError; match self { &Self::WrongExtendKind { ref pos, kind, name, ref parent_pos, parent_kind, parent_name, } => DeclError::WrongExtendKind { pos: pos.to_oxidized(arena), kind, name: name.to_oxidized(arena), parent_pos: parent_pos.to_oxidized(arena), parent_kind, parent_name: parent_name.to_oxidized(arena), }, Self::CyclicClassDef(pos, stack) => DeclError::CyclicClassDef { pos: pos.to_oxidized(arena), stack: obr::s_set::SSet::from(arena, stack.iter().map(|s| s.to_oxidized(arena))), }, } } } impl<'a, R: Reason> ToOxidized<'a> for shallow::ShallowMethod<R> { type Output = &'a obr::shallow_decl_defs::ShallowMethod<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { let Self { name, ty, visibility, deprecated, flags, attributes, } = self; arena.alloc(obr::shallow_decl_defs::ShallowMethod { name: name.to_oxidized(arena), type_: ty.to_oxidized(arena), visibility: *visibility, deprecated: deprecated.as_ref().map(|s| { bumpalo::collections::String::from_utf8_lossy_in(s.as_bytes(), arena) .into_bump_str() }), flags: *flags, attributes: attributes.to_oxidized(arena), }) } } impl<'a, R: Reason> ToOxidized<'a> for shallow::ShallowProp<R> { type Output = &'a obr::shallow_decl_defs::ShallowProp<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { let Self { name, xhp_attr, ty, visibility, flags, } = self; arena.alloc(obr::shallow_decl_defs::ShallowProp { name: name.to_oxidized(arena), xhp_attr: *xhp_attr, type_: ty.to_oxidized(arena), visibility: *visibility, flags: *flags, }) } } impl<'a, R: Reason> ToOxidized<'a> for shallow::ShallowClassConst<R> { type Output = &'a obr::shallow_decl_defs::ShallowClassConst<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { let Self { kind, name, ty, refs, } = self; arena.alloc(obr::shallow_decl_defs::ShallowClassConst { abstract_: *kind, name: name.to_oxidized(arena), type_: ty.to_oxidized(arena), refs: refs.to_oxidized(arena), }) } } impl<'a, R: Reason> ToOxidized<'a> for shallow::ShallowTypeconst<R> { type Output = &'a obr::shallow_decl_defs::ShallowTypeconst<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { let Self { name, kind, enforceable, reifiable, is_ctx, } = self; arena.alloc(obr::shallow_decl_defs::ShallowTypeconst { name: name.to_oxidized(arena), kind: kind.to_oxidized(arena), enforceable: enforceable.to_oxidized(arena), reifiable: reifiable.as_ref().map(|p| p.to_oxidized(arena)), is_ctx: *is_ctx, }) } } impl<'a, R: Reason> ToOxidized<'a> for shallow::ClassDecl<R> { type Output = &'a obr::shallow_decl_defs::ClassDecl<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { let Self { mode, is_final, is_abstract, is_internal, is_xhp, has_xhp_keyword, kind, module, name, tparams, where_constraints, extends, uses, xhp_attr_uses, xhp_enum_values, xhp_marked_empty, req_extends, req_implements, req_class, implements, support_dynamic_type, consts, typeconsts, props, static_props, constructor, static_methods, methods, user_attributes, enum_type, docs_url, } = self; arena.alloc(obr::shallow_decl_defs::ClassDecl { mode: *mode, final_: *is_final, abstract_: *is_abstract, is_xhp: *is_xhp, internal: *is_internal, has_xhp_keyword: *has_xhp_keyword, kind: *kind, module: module.as_ref().map(|m| { let (pos, id) = m.to_oxidized(arena); obr::ast_defs::Id(pos, id) }), name: name.to_oxidized(arena), tparams: tparams.to_oxidized(arena), where_constraints: where_constraints.to_oxidized(arena), extends: extends.to_oxidized(arena), uses: uses.to_oxidized(arena), xhp_attr_uses: xhp_attr_uses.to_oxidized(arena), xhp_enum_values: xhp_enum_values.to_oxidized(arena), xhp_marked_empty: *xhp_marked_empty, req_extends: req_extends.to_oxidized(arena), req_implements: req_implements.to_oxidized(arena), req_class: req_class.to_oxidized(arena), implements: implements.to_oxidized(arena), support_dynamic_type: *support_dynamic_type, consts: consts.to_oxidized(arena), typeconsts: typeconsts.to_oxidized(arena), props: props.to_oxidized(arena), sprops: static_props.to_oxidized(arena), constructor: constructor.as_ref().map(|c| c.to_oxidized(arena)), static_methods: static_methods.to_oxidized(arena), methods: methods.to_oxidized(arena), user_attributes: user_attributes.to_oxidized(arena), enum_type: enum_type.as_ref().map(|e| e.to_oxidized(arena)), docs_url: docs_url .as_ref() .map(|s| bumpalo::collections::String::from_str_in(s, arena).into_bump_str()), }) } } impl<'a, R: Reason> ToOxidized<'a> for shallow::FunDecl<R> { type Output = &'a obr::shallow_decl_defs::FunDecl<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { let Self { deprecated, module, internal, ty, pos, php_std_lib, support_dynamic_type, no_auto_dynamic, no_auto_likes, } = self; arena.alloc(obr::shallow_decl_defs::FunDecl { deprecated: deprecated.as_ref().map(|s| { bumpalo::collections::String::from_utf8_lossy_in(s.as_bytes(), arena) .into_bump_str() }), internal: *internal, type_: ty.to_oxidized(arena), pos: pos.to_oxidized(arena), php_std_lib: *php_std_lib, support_dynamic_type: *support_dynamic_type, no_auto_dynamic: *no_auto_dynamic, no_auto_likes: *no_auto_likes, module: module.as_ref().map(|m| { let (pos, id) = m.to_oxidized(arena); obr::ast_defs::Id(pos, id) }), }) } } impl<'a, R: Reason> ToOxidized<'a> for shallow::TypedefDecl<R> { type Output = &'a obr::shallow_decl_defs::TypedefDecl<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { let Self { module, pos, vis, tparams, as_constraint, super_constraint, ty, is_ctx, attributes, internal, docs_url, } = self; arena.alloc(obr::shallow_decl_defs::TypedefDecl { module: module.as_ref().map(|m| { let (pos, id) = m.to_oxidized(arena); obr::ast_defs::Id(pos, id) }), pos: pos.to_oxidized(arena), vis: *vis, tparams: tparams.to_oxidized(arena), as_constraint: as_constraint.as_ref().map(|t| t.to_oxidized(arena)), super_constraint: super_constraint.as_ref().map(|t| t.to_oxidized(arena)), type_: ty.to_oxidized(arena), is_ctx: *is_ctx, attributes: attributes.to_oxidized(arena), internal: *internal, docs_url: docs_url.as_deref().to_oxidized(arena), }) } } impl<'a, R: Reason> ToOxidized<'a> for shallow::ConstDecl<R> { type Output = &'a obr::shallow_decl_defs::ConstDecl<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { let Self { pos, ty } = self; arena.alloc(obr::shallow_decl_defs::ConstDecl { pos: pos.to_oxidized(arena), type_: ty.to_oxidized(arena), }) } } impl<'a> ToOxidized<'a> for ModuleReference { type Output = obr::typing_defs::ModuleReference<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use obr::typing_defs::ModuleReference as Obr; match self { ModuleReference::MRGlobal => Obr::MRGlobal, ModuleReference::MRPrefix(m) => Obr::MRPrefix(m.to_oxidized(arena)), ModuleReference::MRExact(m) => Obr::MRExact(m.to_oxidized(arena)), } } } impl<'a, R: Reason> ToOxidized<'a> for shallow::ModuleDecl<R> { type Output = &'a obr::shallow_decl_defs::ModuleDecl<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { let Self { pos, exports, imports, } = self; arena.alloc(obr::shallow_decl_defs::ModuleDecl { pos: pos.to_oxidized(arena), exports: exports.to_oxidized(arena), imports: imports.to_oxidized(arena), }) } }
Rust
hhvm/hphp/hack/src/hackrs/ty/decl/ty.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::collections::BTreeMap; use std::fmt; use eq_modulo_pos::EqModuloPos; use hcons::Hc; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; use oxidized::aast; pub use oxidized::aast_defs::ReifyKind; pub use oxidized::aast_defs::Tprim as Prim; use oxidized::ast_defs; pub use oxidized::ast_defs::Abstraction; pub use oxidized::ast_defs::ClassishKind; pub use oxidized::ast_defs::ConstraintKind; pub use oxidized::ast_defs::Visibility; pub use oxidized::typing_defs::ClassConstKind; pub use oxidized::typing_defs_core::ConsistentKind; pub use oxidized::typing_defs_core::Enforcement; pub use oxidized::typing_defs_core::ParamMode; pub use oxidized::typing_defs_flags; pub use oxidized::typing_defs_flags::ClassEltFlags; pub use oxidized::typing_defs_flags::ClassEltFlagsArgs; pub use oxidized::typing_defs_flags::FunParamFlags; pub use oxidized::typing_defs_flags::FunTypeFlags; pub use oxidized::xhp_attribute::Tag; pub use oxidized::xhp_attribute::XhpAttribute; use pos::Bytes; use pos::ModuleName; use pos::Positioned; use pos::Symbol; use pos::TypeConstName; use pos::TypeName; use serde::de::DeserializeOwned; use serde::Deserialize; use serde::Serialize; use utils::core::Ident; use crate::reason; use crate::reason::Reason; #[derive( Copy, Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize )] pub enum Exact { Exact, Nonexact, } // c.f. ast_defs::XhpEnumValue #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub enum XhpEnumValue { XEVInt(isize), XEVString(Symbol), } walkable!(XhpEnumValue => { Self::XEVInt(i) => [i], Self::XEVString(s) => [s], }); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub enum CeVisibility { Public, Private(TypeName), Protected(TypeName), Internal(ModuleName), } walkable!(CeVisibility => { Self::Public => [], Self::Private(t) => [t], Self::Protected(t) => [t], Self::Internal(m) => [m], }); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub enum IfcFunDecl { FDPolicied(Option<Symbol>), FDInferFlows, } // The OCaml type `tshape_field_name` includes positions, but ignores those // positions in its `ord` implementation. We can't do the same, though: Rust // hash tables require impls of Hash and Eq to agree, and our Hash impl must // take positions into account (else hash-consing will produce bad results). We // could write a custom Ord impl which disagrees with the Eq impl, but it would // violate the [PartialOrd requirement][] that `a == b` if and only if // `partial_cmp(a, b) == Some(Equal)`, and the [Ord requirement][] for a strict // total order. // // [PartialOrd requirement]: https://doc.rust-lang.org/std/cmp/trait.PartialOrd.html // [Ord requirement]: https://doc.rust-lang.org/std/cmp/trait.Ord.html#corollaries // // Instead, we omit the positions from these keys, and store the field name's // position as part of the map's value (in a `ShapeFieldNamePos`). #[derive(Copy, Clone, Debug, Eq, EqModuloPos, Hash, Ord, PartialEq, PartialOrd)] #[derive(Serialize, Deserialize)] pub enum TshapeFieldName { TSFlitInt(Symbol), TSFlitStr(Bytes), TSFclassConst(TypeName, Symbol), } walkable!(TshapeFieldName); /// The position of a shape field name; e.g., the position of `'a'` in /// `shape('a' => int)`, or the positions of `Foo` and `X` in /// `shape(Foo::X => int)`. #[derive(Clone, Debug, Eq, EqModuloPos, Hash, Ord, PartialEq, PartialOrd)] #[derive(Serialize, Deserialize)] pub enum ShapeFieldNamePos<P> { Simple(P), ClassConst(P, P), } #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub enum DependentType { Texpr(Ident), } #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub enum UserAttributeParam { Classname(TypeName), EnumClassLabel(Symbol), String(Bytes), Int(Symbol), } walkable!(UserAttributeParam); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub struct UserAttribute<P> { pub name: Positioned<TypeName, P>, pub params: Box<[UserAttributeParam]>, } impl<P> UserAttribute<P> { pub fn classname_params(&self) -> Vec<TypeName> { (self.params.iter()) .filter_map(|p| match p { UserAttributeParam::Classname(cn) => Some(*cn), _ => None, }) .collect() } } walkable!(impl<R: Reason> for UserAttribute<R::Pos> => [name, params]); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason, TY: Serialize + DeserializeOwned")] pub struct Tparam<R: Reason, TY> { pub variance: ast_defs::Variance, pub name: Positioned<TypeName, R::Pos>, pub tparams: Box<[Tparam<R, TY>]>, pub constraints: Box<[(ConstraintKind, TY)]>, pub reified: ReifyKind, pub user_attributes: Box<[UserAttribute<R::Pos>]>, } walkable!(impl<R: Reason, TY> for Tparam<R, TY> => [tparams, constraints]); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub struct WhereConstraint<TY>(pub TY, pub ast_defs::ConstraintKind, pub TY); walkable!(impl<R: Reason, TY> for WhereConstraint<TY> => [0, 1, 2]); #[derive(Clone, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct Ty<R: Reason>(R, Hc<Ty_<R>>); walkable!(Ty<R> as visit_decl_ty => [0, 1]); impl<R: Reason> Ty<R> { #[inline] pub fn new(reason: R, ty: Ty_<R>) -> Self { Self(reason, Hc::new(ty)) } pub fn prim(r: R, prim: Prim) -> Self { Self::new(r, Ty_::Tprim(prim)) } pub fn void(r: R) -> Self { Self::prim(r, Prim::Tvoid) } pub fn mixed(r: R) -> Self { Self::new(r, Ty_::Tmixed) } pub fn any(r: R) -> Self { Self::new(r, Ty_::Tany) } pub fn this(r: R) -> Self { Self::new(r, Ty_::Tthis) } pub fn apply( reason: R, type_name: Positioned<TypeName, R::Pos>, tparams: Box<[Ty<R>]>, ) -> Self { Self::new(reason, Ty_::Tapply(Box::new((type_name, tparams)))) } pub fn generic(reason: R, name: TypeName, tparams: Box<[Ty<R>]>) -> Self { Self::new(reason, Ty_::Tgeneric(Box::new((name, tparams)))) } #[inline] pub fn access(reason: R, taccess: TaccessType<R, Ty<R>>) -> Self { Self::new(reason, Ty_::Taccess(Box::new(taccess))) } pub fn pos(&self) -> &R::Pos { self.0.pos() } pub fn reason(&self) -> &R { &self.0 } pub fn node(&self) -> &Hc<Ty_<R>> { &self.1 } pub fn node_ref(&self) -> &Ty_<R> { &self.1 } pub fn unwrap_class_type(&self) -> (&R, Positioned<TypeName, R::Pos>, &[Ty<R>]) { use Ty_::*; let r = self.reason(); match &**self.node() { Tapply(id_and_args) => { let (pos_id, args) = &**id_and_args; (r, pos_id.clone(), args) } _ => (r, Positioned::new(r.pos().clone(), TypeName::from("")), &[]), } } } /// A shape may specify whether or not fields are required. For example, consider /// this typedef: /// /// ``` /// type ShapeWithOptionalField = shape(?'a' => ?int); /// ``` /// /// With this definition, the field 'a' may be unprovided in a shape. In this /// case, the field 'a' would have sf_optional set to true. #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[serde(bound = "R: Reason")] pub struct ShapeFieldType<R: Reason> { pub field_name_pos: ShapeFieldNamePos<R::Pos>, pub optional: bool, pub ty: Ty<R>, } walkable!(ShapeFieldType<R> => [ty]); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[serde(bound = "R: Reason")] pub struct ShapeType<R: Reason>(pub Ty<R>, pub BTreeMap<TshapeFieldName, ShapeFieldType<R>>); walkable!(ShapeType<R> => [0, 1]); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[serde(bound = "R: Reason")] pub enum Ty_<R: Reason> { /// The late static bound type of a class Tthis, /// Either an object type or a type alias, ty list are the arguments Tapply(Box<(Positioned<TypeName, R::Pos>, Box<[Ty<R>]>)>), /// 'With' refinements of the form `_ with { type T as int; type TC = C; }`. Trefinement(Box<TrefinementType<Ty<R>>>), /// "Any" is the type of a variable with a missing annotation, and "mixed" is /// the type of a variable annotated as "mixed". THESE TWO ARE VERY DIFFERENT! /// Any unifies with anything, i.e., it is both a supertype and subtype of any /// other type. You can do literally anything to it; it's the "trust me" type. /// Mixed, on the other hand, is only a supertype of everything. You need to do /// a case analysis to figure out what it is (i.e., its elimination form). /// /// Here's an example to demonstrate: /// /// ``` /// function f($x): int { /// return $x + 1; /// } /// ``` /// /// In that example, $x has type Tany. This unifies with anything, so adding /// one to it is allowed, and returning that as int is allowed. /// /// In contrast, if $x were annotated as mixed, adding one to that would be /// a type error -- mixed is not a subtype of int, and you must be a subtype /// of int to take part in addition. (The converse is true though -- int is a /// subtype of mixed.) A case analysis would need to be done on $x, via /// is_int or similar. /// /// mixed exists only in the decl_phase phase because it is desugared into ?nonnull /// during the localization phase. Tmixed, Twildcard, Tlike(Ty<R>), Tany, Tnonnull, /// A dynamic type is a special type which sometimes behaves as if it were a /// top type; roughly speaking, where a specific value of a particular type is /// expected and that type is dynamic, anything can be given. We call this /// behaviour "coercion", in that the types "coerce" to dynamic. In other ways it /// behaves like a bottom type; it can be used in any sort of binary expression /// or even have object methods called from it. However, it is in fact neither. /// /// it captures dynamicism within function scope. /// See tests in typecheck/dynamic/ for more examples. Tdynamic, /// Nullable, called "option" in the ML parlance. Toption(Ty<R>), /// All the primitive types: int, string, void, etc. Tprim(aast::Tprim), /// A wrapper around fun_type, which contains the full type information for a /// function, method, lambda, etc. Tfun(Box<FunType<R, Ty<R>>>), /// Tuple, with ordered list of the types of the elements of the tuple. Ttuple(Box<[Ty<R>]>), /// Whether all fields of this shape are known, types of each of the /// known arms. Tshape(Box<ShapeType<R>>), /// The type of a generic parameter. The constraints on a generic parameter /// are accessed through the lenv.tpenv component of the environment, which /// is set up when checking the body of a function or method. See uses of /// Typing_phase.add_generic_parameters_and_constraints. The list denotes /// type arguments. Tgeneric(Box<(TypeName, Box<[Ty<R>]>)>), /// Union type. /// The values that are members of this type are the union of the values /// that are members of the components of the union. /// Some examples (writing | for binary union) /// Tunion [] is the "nothing" type, with no values /// Tunion [int;float] is the same as num /// Tunion [null;t] is the same as Toption t Tunion(Box<[Ty<R>]>), Tintersection(Box<[Ty<R>]>), /// Tvec_or_dict (ty1, ty2) => "vec_or_dict<ty1, ty2>" TvecOrDict(Box<(Ty<R>, Ty<R>)>), Taccess(Box<TaccessType<R, Ty<R>>>), } // We've boxed all variants of Ty_ which are larger than two usizes, so the // total size should be equal to `[usize; 3]` (one more for the discriminant). // This is important because all variants use the same amount of memory and are // passed around by value, so adding a large unboxed variant can cause a large // regression. static_assertions::assert_eq_size!(Ty_<reason::NReason>, [usize; 3]); static_assertions::assert_eq_size!(Ty_<reason::BReason>, [usize; 3]); impl<R: Reason> hcons::Consable for Ty_<R> { #[inline] fn conser() -> &'static hcons::Conser<Ty_<R>> { R::decl_ty_conser() } } impl<R: Reason> crate::visitor::Walkable<R> for Ty_<R> { fn recurse(&self, v: &mut dyn crate::visitor::Visitor<R>) { use Ty_::*; match self { Tthis | Tmixed | Twildcard | Tany | Tnonnull | Tdynamic | Tprim(_) => {} Tapply(id_and_args) => { let (_, args) = &**id_and_args; args.accept(v) } Tlike(ty) | Toption(ty) => ty.accept(v), Tfun(ft) => ft.accept(v), Ttuple(tys) | Tunion(tys) | Tintersection(tys) => tys.accept(v), Tshape(kind_and_fields) => { let ShapeType(_, fields) = &**kind_and_fields; fields.accept(v) } Tgeneric(id_and_args) => { let (_, args) = &**id_and_args; args.accept(v) } TvecOrDict(key_and_val_tys) => { let (kty, vty) = &**key_and_val_tys; kty.accept(v); vty.accept(v) } Taccess(tt) => tt.accept(v), Trefinement(tr) => tr.accept(v), } } } /// A Type const access expression of the form <type expr>::C. #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason, TY: Serialize + DeserializeOwned")] pub struct TaccessType<R: Reason, TY> { /// Type expression to the left of `::` pub ty: TY, /// Name of type const to the right of `::` pub type_const: Positioned<TypeConstName, R::Pos>, } walkable!(impl<R: Reason, TY> for TaccessType<R, TY> => [ty]); /// A decl refinement type of the form 'T with { Refinements }' #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq)] #[derive(Serialize, Deserialize)] #[serde(bound = "TY: Serialize + DeserializeOwned")] pub struct TrefinementType<TY> { /// Type expression to the left of `::` pub ty: TY, /// The refinement pub refinement: ClassRefinement<TY>, } walkable!(impl<R: Reason> for TrefinementType<Ty<R>> => [ty, refinement]); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq)] #[derive(Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "TY: Serialize + DeserializeOwned")] pub struct ClassRefinement<TY> { pub consts: BTreeMap<TypeConstName, RefinedConst<TY>>, } walkable!(impl<R: Reason> for ClassRefinement<Ty<R>> => [consts]); /// Constant refinements (either `type` or `ctx`) #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq)] #[derive(Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "TY: Serialize + DeserializeOwned")] pub struct RefinedConst<TY> { pub bound: RefinedConstBound<TY>, pub is_ctx: bool, } walkable!(impl<R: Reason> for RefinedConst<Ty<R>> => [bound]); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq)] #[derive(Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "TY: Serialize + DeserializeOwned")] pub enum RefinedConstBound<TY> { Exact(TY), Loose(RefinedConstBounds<TY>), } walkable!(impl<R: Reason, TY> for RefinedConstBound<TY> => { Self::Exact(ty) => [ty], Self::Loose(bounds) => [bounds], }); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq)] #[derive(Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "TY: Serialize + DeserializeOwned")] pub struct RefinedConstBounds<TY> { pub lower: Box<[TY]>, pub upper: Box<[TY]>, } walkable!(impl<R: Reason, TY> for RefinedConstBounds<TY> => [lower, upper]); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason, TY: Serialize + DeserializeOwned")] pub enum Capability<R: Reason, TY> { CapDefaults(R::Pos), CapTy(TY), } walkable!(impl<R: Reason, TY> for Capability<R, TY> => { Self::CapDefaults(..) => [], Self::CapTy(ty) => [ty], }); /// Companion to fun_params type, intended to consolidate checking of /// implicit params for functions. #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason, TY: Serialize + DeserializeOwned")] pub struct FunImplicitParams<R: Reason, TY> { pub capability: Capability<R, TY>, } walkable!(impl<R: Reason, TY> for FunImplicitParams<R, TY> => [capability]); /// The type of a function AND a method. #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason, TY: Serialize + DeserializeOwned")] pub struct FunType<R: Reason, TY> { pub tparams: Box<[Tparam<R, TY>]>, pub where_constraints: Box<[WhereConstraint<TY>]>, pub params: FunParams<R, TY>, pub implicit_params: FunImplicitParams<R, TY>, /// Carries through the sync/async information from the aast pub ret: PossiblyEnforcedTy<TY>, pub flags: typing_defs_flags::FunTypeFlags, pub ifc_decl: IfcFunDecl, pub cross_package: Option<Symbol>, } walkable!(impl<R: Reason, TY> for FunType<R, TY> => [ tparams, where_constraints, params, implicit_params, ret ]); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "TY: Serialize + DeserializeOwned")] pub struct PossiblyEnforcedTy<TY> { /// True if consumer of this type enforces it at runtime pub enforced: Enforcement, pub ty: TY, } walkable!(impl<R: Reason, TY> for PossiblyEnforcedTy<TY> => [ty]); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason, TY: Serialize + DeserializeOwned")] pub struct FunParam<R: Reason, TY> { pub pos: R::Pos, pub name: Option<Symbol>, pub ty: PossiblyEnforcedTy<TY>, pub flags: FunParamFlags, } walkable!(impl<R: Reason, TY> for FunParam<R, TY> => [ty]); pub type FunParams<R, TY> = Box<[FunParam<R, TY>]>; /// Origin of Class Constant References: /// In order to be able to detect cycle definitions like /// class C { /// const int A = D::A; /// } /// class D { /// const int A = C::A; /// } /// we need to remember which constants were used during initialization. /// /// Currently the syntax of constants allows direct references to another class /// like D::A, or self references using self::A. /// /// class_const_from encodes the origin (class vs self). #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub enum ClassConstFrom { Self_, From(TypeName), } /// Class Constant References: /// In order to be able to detect cycle definitions like /// class C { /// const int A = D::A; /// } /// class D { /// const int A = C::A; /// } /// we need to remember which constants were used during initialization. /// /// Currently the syntax of constants allows direct references to another class /// like D::A, or self references using self::A. #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub struct ClassConstRef(pub ClassConstFrom, pub Symbol); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct ConstDecl<R: Reason> { pub pos: R::Pos, pub ty: Ty<R>, } walkable!(ConstDecl<R> => [ty]); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct FunElt<R: Reason> { pub deprecated: Option<Bytes>, pub module: Option<Positioned<ModuleName, R::Pos>>, /// Top-level functions have limited visibilities pub internal: bool, pub ty: Ty<R>, pub pos: R::Pos, pub php_std_lib: bool, pub support_dynamic_type: bool, pub no_auto_dynamic: bool, pub no_auto_likes: bool, } walkable!(FunElt<R> => [ty]); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct AbstractTypeconst<R: Reason> { pub as_constraint: Option<Ty<R>>, pub super_constraint: Option<Ty<R>>, pub default: Option<Ty<R>>, } walkable!(AbstractTypeconst<R> => [as_constraint, super_constraint, default]); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct ConcreteTypeconst<R: Reason> { pub ty: Ty<R>, } walkable!(ConcreteTypeconst<R> => [ty]); #[derive(Clone, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub enum Typeconst<R: Reason> { TCAbstract(AbstractTypeconst<R>), TCConcrete(ConcreteTypeconst<R>), } walkable!(Typeconst<R> => { Self::TCAbstract(x) => [x], Self::TCConcrete(x) => [x], }); impl<R: Reason> fmt::Debug for Typeconst<R> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::TCAbstract(x) => x.fmt(f), Self::TCConcrete(x) => x.fmt(f), } } } #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct EnumType<R: Reason> { pub base: Ty<R>, pub constraint: Option<Ty<R>>, pub includes: Box<[Ty<R>]>, } walkable!(EnumType<R> => [base, constraint, includes]); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] #[serde(bound = "R: Reason")] pub struct TypedefType<R: Reason> { pub module: Option<Positioned<ModuleName, R::Pos>>, pub pos: R::Pos, pub vis: aast::TypedefVisibility, pub tparams: Box<[Tparam<R, Ty<R>>]>, pub as_constraint: Option<Ty<R>>, pub super_constraint: Option<Ty<R>>, pub ty: Ty<R>, pub is_ctx: bool, pub attributes: Box<[UserAttribute<R::Pos>]>, pub internal: bool, pub docs_url: Option<String>, } walkable!(TypedefType<R> => [tparams, as_constraint, super_constraint, ty]); walkable!(ast_defs::ConstraintKind); #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub enum ModuleReference { MRGlobal, MRPrefix(ModuleName), MRExact(ModuleName), } #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub struct ModuleDefType<R: Reason> { pub pos: R::Pos, pub exports: Option<Box<[ModuleReference]>>, pub imports: Option<Box<[ModuleReference]>>, } walkable!(ModuleDefType<R> => []); /// When the option is `Some`, it points to the location of the `__Enforceable` /// attribute which caused the containing typeconst to be enforceable. /// /// The newtype allows us to implement ToOxidized and ToOcamlRep in such a way /// that we produce `(Pos, bool)` tuples, which is how this is represented on /// the OCaml side. #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] pub struct Enforceable<P>(pub Option<P>); impl<P> Enforceable<P> { pub fn is_some(&self) -> bool { self.0.is_some() } pub fn is_none(&self) -> bool { self.0.is_none() } pub fn as_ref(&self) -> Option<&P> { self.0.as_ref() } }
Rust
hhvm/hphp/hack/src/hackrs/ty/local/decl.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use pos::TypeName; use crate::decl::folded; use crate::decl::Ty; use crate::reason::Reason; #[derive(Debug)] pub struct ClassElt<R: Reason> { ty: Ty<R>, #[allow(dead_code)] origin: TypeName, } impl<R: Reason> ClassElt<R> { pub fn new(folded_elt: &folded::FoldedElement, ty: Ty<R>) -> Self { Self { ty, origin: folded_elt.origin, } } pub fn ty(&self) -> &Ty<R> { &self.ty } pub fn pos(&self) -> &R::Pos { self.ty.pos() } }
Rust
hhvm/hphp/hack/src/hackrs/ty/local/kind.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use im::HashSet; use super::ty::Ty; use crate::reason::Reason; #[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] pub struct KindFlags { enforcable: bool, newable: bool, require_dynamic: bool, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Kind<R: Reason> { lower_bounds: HashSet<Ty<R>>, upper_bounds: HashSet<Ty<R>>, flags: KindFlags, } impl<R: Reason> Kind<R> { pub fn size(&self) -> usize { self.lower_bounds.len() + self.upper_bounds.len() } pub fn add_upper_bound(&mut self, ty: Ty<R>) { self.upper_bounds.insert(ty); } pub fn remove_upper_bound(&mut self, ty: &Ty<R>) { self.upper_bounds.remove(ty); } pub fn set_upper_bounds(&mut self, upper_bounds: HashSet<Ty<R>>) { self.upper_bounds = upper_bounds; } pub fn upper_bounds(&self) -> &HashSet<Ty<R>> { &self.upper_bounds } pub fn add_lower_bound(&mut self, ty: Ty<R>) { self.lower_bounds.insert(ty); } pub fn remove_lower_bound(&mut self, ty: &Ty<R>) { self.lower_bounds.remove(ty); } pub fn set_lower_bounds(&mut self, lower_bounds: HashSet<Ty<R>>) { self.lower_bounds = lower_bounds; } pub fn lower_bounds(&self) -> &HashSet<Ty<R>> { &self.lower_bounds } } impl<R: Reason> Default for Kind<R> { fn default() -> Self { Kind { lower_bounds: HashSet::default(), upper_bounds: HashSet::default(), flags: KindFlags::default(), } } }
Rust
hhvm/hphp/hack/src/hackrs/ty/local/to_ocamlrep.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use pos::ToOxidized; impl<R: crate::reason::Reason> ocamlrep::ToOcamlRep for super::ty::Ty<R> { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { // This implementation of `to_ocamlrep` (which allocates in an arena, // converts to OCaml, then drops the arena) violates a `ToOcamlRep` // requirement: we may not drop values after passing them to `alloc.add` // or invoking `to_ocamlrep` (else memoization will behave incorrectly // in `add_root`). This leads to bizarre behavior (particularly in // optimized builds). // // For example, suppose we're converting a typed AST via ToOcamlRep, and // it contains the types `int` and `float`. When converting `int`, we'll // construct an arena and arena-allocate Tint, in order to construct the // oxidized_by_ref value `Tprim(&Tint)`, and convert that to OCaml. The // `ocamlrep::Allocator` will remember that the address of the `&Tint` // pointer corresponds to a certain OCaml value, so that when it // encounters future instances of that pointer, it can use that same // OCaml value rather than allocating a new one. We'd then free the // arena once we're finished converting that type. When converting the // second type, we construct a new arena, arena-allocate Tfloat, and // attempt to construct `Tprim(&Tfloat)`. But if the new arena was // allocated in the same location as the old, it may choose the same // address for our arena-allocated `Tfloat` as our `Tint` was, and our // ocamlrep Allocator will incorrectly use the `Tint` OCaml value. // // This memoization behavior is only enabled if we invoke // `ocamlrep::Allocator::add_root`, so we must take care not to use it // (including indirectly, through macros like `ocaml_ffi`) on values // containing this type. let arena = &bumpalo::Bump::new(); let ty = self.to_oxidized(arena); // SAFETY: Transmute away the lifetime to allow the arena-allocated // value to be converted to OCaml. Won't break type safety in Rust, but // will produce broken OCaml values if used with `add_root` (see above // comment). let ty = unsafe { std::mem::transmute::< &'_ oxidized_by_ref::typing_defs::Ty<'_>, &'a oxidized_by_ref::typing_defs::Ty<'a>, >(&ty) }; ty.to_ocamlrep(alloc) } }
Rust
hhvm/hphp/hack/src/hackrs/ty/local/ty.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::ops::Deref; use hcons::Hc; use im::HashSet; use oxidized::aast_defs::ReifyKind; use oxidized::ast_defs::ConstraintKind; use oxidized::ast_defs::Variance; use oxidized::typing_defs_flags::FunTypeFlags; use pos::Positioned; use pos::Symbol; use pos::ToOxidized; use pos::TypeName; pub use crate::decl; pub use crate::decl::ty::Exact; pub use crate::decl::ty::Prim; use crate::decl::UserAttribute; use crate::local::tyvar::Tyvar; use crate::reason::Reason; use crate::visitor::Visitor; use crate::visitor::Walkable; // TODO: Share the representation from decl_defs #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Tparam<R: Reason> { pub variance: Variance, pub name: Positioned<TypeName, R::Pos>, pub tparams: Vec<Tparam<R>>, pub constraints: Vec<(ConstraintKind, Ty<R>)>, pub reified: ReifyKind, pub user_attributes: Vec<UserAttribute<R::Pos>>, } // TODO: Share the representation from decl_defs #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct FunParam<R: Reason> { pub pos: R::Pos, pub name: Option<Symbol>, pub ty: Ty<R>, } walkable!(FunParam<R> => [ty]); // TODO: Share the representation from decl_defs #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct FunType<R: Reason> { pub tparams: Box<[Tparam<R>]>, pub params: Vec<FunParam<R>>, pub ret: Ty<R>, pub flags: FunTypeFlags, } walkable!(FunType<R> => [params, ret]); #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum ParamMode { FPnormal, FPinout, } impl From<&oxidized::ast_defs::ParamKind> for ParamMode { fn from(pk: &oxidized::ast_defs::ParamKind) -> Self { match pk { oxidized::ast::ParamKind::Pinout(_) => Self::FPinout, oxidized::ast::ParamKind::Pnormal => Self::FPnormal, } } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum Ty_<R: Reason, TY> { /// A primitive type Tprim(Prim), /// A wrapper around `FunType`, which contains the full type information /// for a function, method, lambda, etc. Tfun(FunType<R>), /// Any type /// TODO: any and err are a bit weird in that they are not actually types /// but rather they represent a set of inconsistent bounds on a tyvar /// we might want to rethink them prefering a sum type _or_ /// distinguishing types with `Tany` from those without Tany, /// The type of a generic parameter. The constraints on a generic parameter /// are accessed through the lenv.tpenv component of the environment, which /// is set up when checking the body of a function or method. See uses of /// Typing_phase.add_generic_parameters_and_constraints. The list denotes /// type arguments. Tgeneric(TypeName, Vec<TY>), /// An instance of a class or interface, ty list are the arguments /// If exact=Exact, then this represents instances of *exactly* this class /// If exact=Nonexact, this also includes subclasses Tclass(Positioned<TypeName, R::Pos>, Exact, Vec<TY>), Tvar(Tyvar), Tunion(Vec<TY>), Toption(TY), Tintersection(Vec<TY>), Tnonnull, } walkable!(impl<R: Reason, TY> for Ty_<R, TY> => { Ty_::Tprim(_) => [], Ty_::Tfun(fun_type) => [fun_type], Ty_::Tany => [], Ty_::Tgeneric(_, args) => [args], Ty_::Tclass(_, _, args) => [args], Ty_::Tunion(args) => [args], Ty_::Toption(arg) => [arg], Ty_::Tintersection(args) => [args], Ty_::Tvar(_) => [], Ty_::Tnonnull => [], }); impl<R: Reason> hcons::Consable for Ty_<R, Ty<R>> { #[inline] fn conser() -> &'static hcons::Conser<Ty_<R, Ty<R>>> { R::local_ty_conser() } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Ty<R: Reason>(R, Hc<Ty_<R, Ty<R>>>); walkable!(Ty<R> as visit_local_ty => [0, 1]); impl<R: Reason> Ty<R> { #[inline] pub fn new(reason: R, ty: Ty_<R, Ty<R>>) -> Self { Self(reason, Hc::new(ty)) } pub fn with_reason(self, reason: R) -> Self { Self(reason, self.1) } pub fn map_reason<F>(self, f: F) -> Self where F: FnOnce(R) -> R, { Self(f(self.0), self.1) } pub fn shallow_match(&self, other: &Self) -> bool { match (self.deref(), other.deref()) { (Ty_::Tnonnull, Ty_::Tnonnull) | (Ty_::Tany, Ty_::Tany) => true, (Ty_::Tprim(p1), Ty_::Tprim(p2)) => p1 == p2, (Ty_::Tclass(cn_sub, exact_sub, _), Ty_::Tclass(cn_sup, exact_sup, _)) => { cn_sub.id() == cn_sup.id() && exact_sub == exact_sup } // TODO[mjt] compares function flags here (Ty_::Tfun(_fty_sub), Ty_::Tfun(_fty_sup)) => true, _ => false, } } pub fn prim(r: R, prim: Prim) -> Ty<R> { Self::new(r, Ty_::Tprim(prim)) } pub fn null(r: R) -> Ty<R> { Self::prim(r, Prim::Tnull) } pub fn void(r: R) -> Ty<R> { Self::prim(r, Prim::Tvoid) } pub fn bool(r: R) -> Ty<R> { Self::prim(r, Prim::Tbool) } pub fn int(r: R) -> Ty<R> { Self::prim(r, Prim::Tint) } pub fn float(r: R) -> Ty<R> { Self::prim(r, Prim::Tfloat) } pub fn string(r: R) -> Ty<R> { Self::prim(r, Prim::Tstring) } pub fn num(r: R) -> Ty<R> { Self::prim(r, Prim::Tnum) } pub fn arraykey(r: R) -> Ty<R> { Self::prim(r, Prim::Tarraykey) } pub fn fun(r: R, ft: FunType<R>) -> Ty<R> { Self::new(r, Ty_::Tfun(ft)) } pub fn option(r: R, ty: Ty<R>) -> Self { match ty.deref() { Ty_::Toption(_) => ty, Ty_::Tunion(tys) if tys.is_empty() => Self::null(r), _ => Self::new(r, Ty_::Toption(ty)), } } pub fn class(r: R, cname: Positioned<TypeName, R::Pos>, exact: Exact, tys: Vec<Self>) -> Self { Self::new(r, Ty_::Tclass(cname, exact, tys)) } pub fn union(r: R, tys: Vec<Ty<R>>) -> Self { if tys.len() == 1 { tys.into_iter().next().unwrap() } else { Self::new(r, Ty_::Tunion(tys)) } } pub fn intersection(r: R, tys: Vec<Ty<R>>) -> Self { if tys.len() == 1 { tys.into_iter().next().unwrap() } else { Self::new(r, Ty_::Tintersection(tys)) } } pub fn is_intersection(&self) -> bool { matches!(self.deref(), Ty_::Tintersection(_)) } pub fn is_union(&self) -> bool { matches!(self.deref(), Ty_::Tunion(_)) } pub fn var(r: R, tv: Tyvar) -> Self { Self::new(r, Ty_::Tvar(tv)) } pub fn any(r: R) -> Ty<R> { Self::new(r, Ty_::Tany) } pub fn generic(r: R, ty_name: TypeName, args: Vec<Ty<R>>) -> Self { Self::new(r, Ty_::Tgeneric(ty_name, args)) } pub fn nonnull(r: R) -> Ty<R> { Self::new(r, Ty_::Tnonnull) } pub fn is_nonnull(&self) -> bool { matches!(self.deref(), Ty_::Tnonnull) } pub fn is_var(&self) -> bool { matches!(self.deref(), Ty_::Tvar(_)) } pub fn mixed(r: R) -> Ty<R> { let inner = Self::nonnull(r.clone()); Self::option(r, inner) } pub fn nothing(r: R) -> Ty<R> { Self::union(r, vec![]) } pub fn reason(&self) -> &R { &self.0 } pub fn generic_name(&self) -> Option<&TypeName> { match self.deref() { Ty_::Tgeneric(name, _) => Some(name), _ => None, } } pub fn tyvar_opt(&self) -> Option<&Tyvar> { match self.deref() { Ty_::Tvar(tv) => Some(tv), _ => None, } } pub fn node(&self) -> &Hc<Ty_<R, Ty<R>>> { &self.1 } pub fn occurs(&self, tv: Tyvar) -> bool { TyvarOccurs::new(tv, self).occurs } pub fn tyvars<F>(&self, get_tparam_variance: F) -> (HashSet<Tyvar>, HashSet<Tyvar>) where F: Fn(TypeName) -> Option<Vec<Variance>>, { let mut covs = HashSet::default(); let mut contravs = HashSet::default(); self.tyvars_help( Variance::Covariant, &mut covs, &mut contravs, &get_tparam_variance, ); (covs, contravs) } fn tyvars_help<F>( &self, variance: Variance, covs: &mut HashSet<Tyvar>, contravs: &mut HashSet<Tyvar>, get_tparam_variance: &F, ) where F: Fn(TypeName) -> Option<Vec<Variance>>, { match self.deref() { Ty_::Tvar(tv) => match variance { Variance::Covariant => { covs.insert(*tv); } Variance::Contravariant => { contravs.insert(*tv); } Variance::Invariant => { covs.insert(*tv); contravs.insert(*tv); } }, Ty_::Toption(ty) => ty.tyvars_help(variance, covs, contravs, get_tparam_variance), Ty_::Tunion(tys) | Ty_::Tintersection(tys) => tys .iter() .for_each(|ty| ty.tyvars_help(variance, covs, contravs, get_tparam_variance)), Ty_::Tfun(ft) => { for fp in &ft.params { // TODO[mjt] handle inout params when we have them // for now treat all contravariantly fp.ty .tyvars_help(variance.negate(), covs, contravs, get_tparam_variance); } ft.ret .tyvars_help(variance, covs, contravs, get_tparam_variance) } Ty_::Tclass(cn, _, typarams) if !typarams.is_empty() => { if let Some(vars) = get_tparam_variance(cn.id()) { typarams.iter().zip(vars.iter()).for_each(|(tp, variance)| { tp.tyvars_help(*variance, covs, contravs, get_tparam_variance) }) } } Ty_::Tclass(_, _, _) | Ty_::Tgeneric(_, _) | Ty_::Tany | Ty_::Tnonnull | Ty_::Tprim(_) => {} } } } impl<R: Reason> Deref for Ty<R> { type Target = Ty_<R, Ty<R>>; fn deref(&self) -> &Self::Target { &self.1 } } struct TyvarOccurs { tv: Tyvar, occurs: bool, } impl TyvarOccurs { fn new<R: Reason>(tv: Tyvar, ty: &Ty<R>) -> Self { let mut acc = TyvarOccurs { tv, occurs: false }; ty.accept(&mut acc); acc } } impl<R: Reason> Visitor<R> for TyvarOccurs { fn object(&mut self) -> &mut dyn Visitor<R> { self } fn visit_local_ty(&mut self, ty: &Ty<R>) { match ty.deref() { Ty_::Tvar(tv2) if self.tv == *tv2 => { self.occurs = true; } _ => {} } if !self.occurs { ty.recurse(self.object()) } } } impl<R: Reason> FunType<R> { pub fn is_variadic(&self) -> bool { self.flags.contains(FunTypeFlags::VARIADIC) } pub fn non_variadic_and_variadic_arguments(&self) -> (&[FunParam<R>], Option<&FunParam<R>>) { if self.is_variadic() { let (var, non_var) = self.params.split_last().unwrap(); (non_var, Some(var)) } else { (&self.params, None) } } } impl<'a> ToOxidized<'a> for Exact { type Output = oxidized_by_ref::typing_defs::Exact<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use oxidized_by_ref::typing_defs::Exact as E; match &self { Exact::Exact => E::Exact, Exact::Nonexact => { let r = oxidized_by_ref::decl_defs::ClassRefinement { cr_consts: arena_collections::map::Map::empty(), }; E::Nonexact(&*arena.alloc(r)) } } } } impl<'a, R: Reason> ToOxidized<'a> for Ty<R> { type Output = oxidized_by_ref::typing_defs::Ty<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use oxidized_by_ref::typing_defs::Ty_ as OTy_; let r = arena.alloc(self.reason().to_oxidized(arena)); let ty = match &**self.node() { Ty_::Tvar(tv) => OTy_::Tvar((*tv).into()), Ty_::Tprim(x) => OTy_::Tprim(arena.alloc(*x)), Ty_::Toption(_) => todo!(), Ty_::Tunion(tys) => { OTy_::Tunion(&*arena.alloc_slice_fill_iter(tys.iter().map(|_ty| todo!()))) } Ty_::Tintersection(_) => todo!(), Ty_::Tfun(ft) => OTy_::Tfun(&*arena.alloc(ft.to_oxidized(arena))), Ty_::Tany => todo!(), Ty_::Tnonnull => todo!(), Ty_::Tgeneric(x, argl) => OTy_::Tgeneric(&*arena.alloc(( &*arena.alloc_str(x.as_str()), &*arena.alloc_slice_fill_iter(argl.iter().map(|_ty| todo!())), ))), Ty_::Tclass(pos_id, exact, tys) => OTy_::Tclass(&*arena.alloc(( pos_id.to_oxidized(arena), exact.to_oxidized(arena), &*arena.alloc_slice_fill_iter( tys.iter().map(|ty| &*arena.alloc(ty.to_oxidized(arena))), ), ))), }; oxidized_by_ref::typing_defs::Ty(r, ty) } } impl<'a, R: Reason> ToOxidized<'a> for FunType<R> { type Output = oxidized_by_ref::typing_defs::FunType<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use oxidized_by_ref::typing_defs::FunType as OFunType; use oxidized_by_ref::typing_defs::PossiblyEnforcedTy as OPossiblyEnforcedTy; let FunType { tparams, params, ret, flags, } = self; OFunType { tparams: tparams.to_oxidized(arena), where_constraints: &[], params: &*arena .alloc_slice_fill_iter(params.iter().map(|p| &*arena.alloc(p.to_oxidized(arena)))), implicit_params: &*arena.alloc(oxidized_by_ref::typing_defs_core::FunImplicitParams { capability: oxidized_by_ref::typing_defs_core::Capability::CapDefaults( oxidized_by_ref::pos::Pos::none(), ), }), ret: &*arena.alloc(OPossiblyEnforcedTy { enforced: oxidized::typing_defs_core::Enforcement::Enforced, type_: &*arena.alloc(ret.to_oxidized(arena)), }), flags: flags.clone(), ifc_decl: oxidized_by_ref::typing_defs_core::IfcFunDecl::FDInferFlows, cross_package: None, } } } impl<'a, R: Reason> ToOxidized<'a> for FunParam<R> { type Output = oxidized_by_ref::typing_defs::FunParam<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use oxidized_by_ref::typing_defs::FunParam as OFunParam; use oxidized_by_ref::typing_defs::PossiblyEnforcedTy as OPossiblyEnforcedTy; OFunParam { pos: self.pos.to_oxidized(arena), name: self.name.map(|n| n.to_oxidized(arena)), type_: &*arena.alloc(OPossiblyEnforcedTy { enforced: oxidized::typing_defs_core::Enforcement::Enforced, type_: &*arena.alloc(self.ty.to_oxidized(arena)), }), flags: oxidized::typing_defs_flags::FunParamFlags::from_bits_truncate(0), } } } impl<'a, R: Reason> ToOxidized<'a> for Tparam<R> { type Output = &'a oxidized_by_ref::typing_defs::Tparam<'a>; fn to_oxidized(&self, arena: &'a bumpalo::Bump) -> Self::Output { use oxidized_by_ref::typing_defs::Tparam as OTparam; let Tparam { variance, name, tparams, constraints, reified, user_attributes, } = self; assert!(constraints.is_empty(), "TODO"); let tp = OTparam { variance: variance.clone(), name: name.to_oxidized(arena), tparams: tparams.to_oxidized(arena), constraints: &[], reified: reified.clone(), user_attributes: user_attributes.to_oxidized(arena), }; arena.alloc(tp) } } #[cfg(test)] mod tests { use pos::NPos; use pos::Pos; use utils::core::IdentGen; use super::*; use crate::reason::NReason; #[test] fn test_non_var() { let ty_int = Ty::int(NReason::none()); let (covs, contravs) = ty_int.tyvars(|_| None); assert!(covs.is_empty()); assert!(contravs.is_empty()); } #[test] fn test_var() { let gen = IdentGen::new(); let tv0: Tyvar = gen.make().into(); let ty_v0 = Ty::var(NReason::none(), tv0.clone()); let (covs, contravs) = ty_v0.tyvars(|_| None); assert!(covs.contains(&tv0)); assert!(contravs.is_empty()); } #[test] fn test_union() { let gen = IdentGen::new(); let tv0: Tyvar = gen.make().into(); let ty_v0 = Ty::var(NReason::none(), tv0.clone()); let ty_union = Ty::union(NReason::none(), vec![ty_v0]); let (covs, contravs) = ty_union.tyvars(|_| None); assert!(covs.contains(&tv0)); assert!(contravs.is_empty()); } #[test] fn test_intersection() { let gen = IdentGen::new(); let tv0: Tyvar = gen.make().into(); let ty_v0 = Ty::var(NReason::none(), tv0.clone()); let ty_intersection = Ty::intersection(NReason::none(), vec![ty_v0]); let (covs, contravs) = ty_intersection.tyvars(|_| None); assert!(covs.contains(&tv0)); assert!(contravs.is_empty()); } #[test] fn test_fn_ty() { let gen = IdentGen::new(); let tv0: Tyvar = gen.make().into(); let tv1: Tyvar = gen.make().into(); let tv2: Tyvar = gen.make().into(); let params = vec![FunParam { pos: NPos::none(), name: None, ty: Ty::var(NReason::none(), tv0.clone()), }]; let ret = Ty::var(NReason::none(), tv1.clone()); // #0 -> #1 let ty_fn1 = Ty::fun( NReason::none(), FunType { tparams: vec![].into_boxed_slice(), params, flags: FunTypeFlags::empty(), ret, }, ); let (covs, contravs) = ty_fn1.tyvars(|_| None); assert!(covs.contains(&tv1)); assert!(contravs.contains(&tv0)); // (#0 -> #1) -> #2 let ty_fn2 = Ty::fun( NReason::none(), FunType { tparams: vec![].into_boxed_slice(), params: vec![FunParam { pos: NPos::none(), name: None, ty: ty_fn1, }], flags: FunTypeFlags::empty(), ret: Ty::var(NReason::none(), tv2.clone()), }, ); let (covs, contravs) = ty_fn2.tyvars(|_| None); assert!(covs.contains(&tv0)); assert!(covs.contains(&tv2)); assert!(contravs.contains(&tv1)); } #[test] fn test_occurs() { let gen = IdentGen::new(); let tv: Tyvar = gen.make().into(); let ty_v = Ty::var(NReason::none(), tv.clone()); let tint = Ty::int(NReason::none()); assert!(!tint.occurs(tv)); let tunion = Ty::union(NReason::none(), vec![ty_v, tint]); assert!(tunion.occurs(tv)); let tclass = Ty::class( NReason::none(), Positioned::new(Pos::none(), TypeName::new("C")), Exact::Exact, vec![tunion], ); assert!(tclass.occurs(tv)); } }
Rust
hhvm/hphp/hack/src/hackrs/ty/local/tyvar.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use pos::ToOxidized; use serde::Deserialize; use serde::Serialize; use utils::core::Ident; #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct Tyvar(Ident); impl From<Ident> for Tyvar { fn from(x: Ident) -> Self { Self(x) } } impl From<Tyvar> for Ident { fn from(x: Tyvar) -> Self { x.0 } } impl From<Tyvar> for isize { fn from(x: Tyvar) -> Self { x.0.into() } } impl<'a> ToOxidized<'a> for Tyvar { type Output = isize; fn to_oxidized(&self, _bump: &'a bumpalo::Bump) -> Self::Output { self.0.into() } }
Rust
hhvm/hphp/hack/src/hackrs/ty/local/variance.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. /// Variance of a type wrt to a given type parameter. /// /// Standard variance lattice. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Variance { /// R is bivariant (or constant) in X when [S/X]R <: [T/X]R for every S and T. Bivariant, /// R is covariant in X when Γ ⊢ [S/X]R <: [T/X]R iff Γ ⊢ S <: T. Covariant, /// R is contravariant in X when Γ ⊢ [T/X]R <: [S/X]R iff Γ ⊢ S <: T. Contravariant, /// R is invariant in X when Γ ⊢ [S/X]R <: [T/X]R iff both Γ ⊢ S <: T and Γ ⊢ T <: S Invariant, } impl Variance { pub fn appears_covariantly(&self) -> bool { match self { Variance::Covariant | Variance::Invariant => true, Variance::Contravariant | Variance::Bivariant => false, } } pub fn appears_contravariantly(&self) -> bool { match self { Variance::Contravariant | Variance::Invariant => true, Variance::Covariant | Variance::Bivariant => false, } } pub fn is_bivariant(&self) -> bool { matches!(self, Variance::Bivariant) } pub const TOP: Self = Variance::Bivariant; pub const BOTTOM: Self = Variance::Invariant; /// The least upper bound of two variances pub fn join(&self, other: &Self) -> Self { match (*self, *other) { (t, u) if t == u => t, (t, Variance::Invariant) | (Variance::Invariant, t) => t, _ => Variance::Bivariant, } } /// Compute the greatest lower bound of two variances pub fn meet(&self, other: &Self) -> Self { match (*self, *other) { (t, u) if t == u => t, (Variance::Bivariant, t) | (t, Variance::Bivariant) => t, _ => Variance::Invariant, } } } impl Default for Variance { fn default() -> Self { Variance::Bivariant } } #[cfg(test)] mod tests { use super::*; #[test] fn test_join_bottom() { let cov = Variance::Covariant; let contrav = Variance::Contravariant; let biv = Variance::Bivariant; let inv = Variance::Invariant; let bot = Variance::BOTTOM; assert_eq!(bot.join(&cov), cov); assert_eq!(bot.join(&contrav), contrav); assert_eq!(bot.join(&biv), biv); assert_eq!(bot.join(&inv), inv); } #[test] fn test_meet_bottom() { let cov = Variance::Covariant; let contrav = Variance::Contravariant; let biv = Variance::Bivariant; let inv = Variance::Invariant; let bot = Variance::BOTTOM; assert_eq!(bot.meet(&cov), bot); assert_eq!(bot.meet(&contrav), bot); assert_eq!(bot.meet(&biv), bot); assert_eq!(bot.meet(&inv), bot); } #[test] fn test_meet_top() { let cov = Variance::Covariant; let contrav = Variance::Contravariant; let biv = Variance::Bivariant; let inv = Variance::Invariant; let top = Variance::TOP; assert_eq!(top.meet(&cov), cov); assert_eq!(top.meet(&contrav), contrav); assert_eq!(top.meet(&biv), biv); assert_eq!(top.meet(&inv), inv); } #[test] fn test_join_top() { let cov = Variance::Covariant; let contrav = Variance::Contravariant; let biv = Variance::Bivariant; let inv = Variance::Invariant; let top = Variance::TOP; assert_eq!(top.join(&cov), top); assert_eq!(top.join(&contrav), top); assert_eq!(top.join(&biv), top); assert_eq!(top.join(&inv), top); } #[test] fn test_cov_contra() { let cov = Variance::Covariant; let contrav = Variance::Contravariant; let top = Variance::TOP; let bot = Variance::BOTTOM; assert_eq!(cov.join(&contrav), top); assert_eq!(contrav.join(&cov), top); assert_eq!(cov.meet(&contrav), bot); assert_eq!(contrav.meet(&cov), bot); } }
Rust
hhvm/hphp/hack/src/hackrs/ty/local_error/error_code.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use oxidized::error_codes::Typing; pub type TypingErrorCode = Typing;
Rust
hhvm/hphp/hack/src/hackrs/ty/local_error/error_primary.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use eq_modulo_pos::EqModuloPos; use pos::TypeName; use serde::Deserialize; use serde::Serialize; use crate::reason::Reason; #[derive(Clone, Debug, Eq, EqModuloPos, Hash, PartialEq, Serialize, Deserialize)] #[serde(bound = "R: Reason")] pub enum Primary<R: Reason> { Subtype, OccursCheck, InvalidTypeHint(R::Pos), ExpectingTypeHint(R::Pos), ExpectingReturnTypeHint(R::Pos), CyclicClassDef(R::Pos, Vec<TypeName>), WrongExtendKind { parent_pos: R::Pos, parent_kind: oxidized::ast_defs::ClassishKind, parent_name: TypeName, pos: R::Pos, kind: oxidized::ast_defs::ClassishKind, name: TypeName, }, }
Rust
hhvm/hphp/hack/src/hackrs/ty/local_error/error_reason.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. #![allow(dead_code)] use utils::Lazy; use crate::local_error::Primary; use crate::local_error::TypingError; use crate::local_error::TypingErrorCode; use crate::reason::Reason; #[derive(Clone, Debug)] enum Component { Code, Reasons, Quickfixes, } #[derive(Clone, Debug)] enum Enum<R: Reason> { Ignore, Always(TypingError<R>), OfError(TypingError<R>), WithCode(Box<Inner<R>>, TypingErrorCode), Retain(Box<Inner<R>>, Component), } // Wrapping here allows Enum variants and their fields to remain private. #[derive(Clone, Debug)] pub struct Inner<R: Reason>(Enum<R>); #[derive(Clone, Debug)] pub struct ReasonsCallback<'a, R: Reason>(Lazy<'a, Inner<R>>); impl<R: Reason> From<Enum<R>> for Inner<R> { fn from(other: Enum<R>) -> Self { Self(other) } } impl<R: Reason> From<Inner<R>> for Enum<R> { fn from(other: Inner<R>) -> Self { other.0 } } impl<'a, R: Reason + 'a> ReasonsCallback<'a, R> { pub fn new(mk: &'a dyn Fn() -> Inner<R>) -> Self { Self(Lazy::new(mk)) } pub fn ignore() -> Inner<R> { Enum::Ignore.into() } pub fn invalid_type_hint(pos: R::Pos) -> Inner<R> { Self::retain_quickfixes(Self::of_primary_error(Primary::InvalidTypeHint(pos))) } fn of_primary_error(prim_err: Primary<R>) -> Inner<R> { Enum::OfError(TypingError::primary(prim_err)).into() } fn retain_quickfixes(cb: Inner<R>) -> Inner<R> { Enum::Retain(Box::new(cb), Component::Quickfixes).into() } }
Rust
hhvm/hphp/hack/src/hackrs/ty/prop/constraint.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. // Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. #![allow(dead_code)] use im::HashSet; use oxidized::ast_defs::Variance; use pos::Symbol; use pos::TypeName; use crate::local::Ty; use crate::local::Tyvar; use crate::reason::Reason; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum Cstr<R: Reason> { Subtype(Ty<R>, Ty<R>), HasMethod { name: Symbol, ty: Ty<R>, class_id: Symbol, // TODO: oxidized::aast::TypeHint (along with most oxidized::aast types) // can't be used in hash-consed values because it contains `Rc`s, which // can't be shared across threads (and we share hash-consed values // across threads). We'll need to map the oxidized `TypeHint` to our own // representation which uses `R::Pos` (probably also `pos::Symbol`, // `pos::TypeName`, etc). ty_args: Vec<Ty<R>>, // was Vec<TypeHint<R>> }, HasProp { name: Symbol, ty: Ty<R>, class_id: Symbol, }, } impl<R: Reason> Cstr<R> { pub fn subtype(ty_sub: Ty<R>, ty_sup: Ty<R>) -> Self { Self::Subtype(ty_sub, ty_sup) } pub fn has_method( name: Symbol, ty: Ty<R>, class_id: Symbol, ty_args: Vec<Ty<R>>, // TODO ) -> Self { Self::HasMethod { name, ty, class_id, ty_args, } } pub fn has_prop(name: Symbol, ty: Ty<R>, class_id: Symbol) -> Self { Self::HasProp { name, ty, class_id } } pub fn tyvars<F>(&self, get_tparam_variance: &F) -> (HashSet<Tyvar>, HashSet<Tyvar>) where F: Fn(TypeName) -> Option<Vec<Variance>>, { match self { Cstr::Subtype(ty_sub, ty_sup) => { let (pos_sub, neg_sub) = ty_sub.tyvars(get_tparam_variance); let (pos_sup, neg_sup) = ty_sup.tyvars(get_tparam_variance); (pos_sub.union(pos_sup), neg_sub.union(neg_sup)) } Cstr::HasMethod { ty, .. } | Cstr::HasProp { ty, .. } => ty.tyvars(get_tparam_variance), } } }
Rust
hhvm/hphp/hack/src/hackrs/utils/core.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. mod ident; mod local_id; pub mod ns; pub use ident::Ident; pub use ident::IdentGen; pub use local_id::LocalId;
Rust
hhvm/hphp/hack/src/hackrs/utils/lazy.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. //! `once_cell::unsync::Lazy` equivalent that takes an arbitrary closure, //! instead of a function pointer. use std::cell::Cell; use std::cell::UnsafeCell; /// A lazy value that accepts a `dyn FnOnce`. pub struct Lazy<'a, T> { cell: UnsafeCell<Option<T>>, init: Cell<Option<&'a dyn Fn() -> T>>, } impl<'a, T: std::fmt::Debug> std::fmt::Debug for Lazy<'a, T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { Self::force(self).fmt(f) } } impl<'a, T: Clone> Clone for Lazy<'a, T> { fn clone(&self) -> Self { Self { cell: UnsafeCell::new(self.get().cloned()), init: self.init.clone(), } } } impl<'a, T> Lazy<'a, T> { pub fn new(f: &'a dyn Fn() -> T) -> Self { Self { cell: UnsafeCell::new(None), init: Cell::new(Some(f)), } } fn get(&self) -> Option<&T> { unsafe { &*self.cell.get() }.as_ref() } pub fn force(this: &Self) -> &T { if let Some(val) = this.get() { return val; } let f = this .init .take() .expect("Lazy value was previously poisoned"); let val = f(); let slot = unsafe { &mut *this.cell.get() }; // Safety: slot was None before. *slot = Some(val); this.get().unwrap() } }
Rust
hhvm/hphp/hack/src/hackrs/utils/utils.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. pub mod core; mod lazy; pub use lazy::Lazy;
TOML
hhvm/hphp/hack/src/hackrs/utils/cargo/utils/Cargo.toml
# @generated by autocargo [package] name = "utils" version = "0.0.0" edition = "2021" [lib] path = "../../utils.rs" [dependencies] eq_modulo_pos = { version = "0.0.0", path = "../../../../utils/eq_modulo_pos" } ocamlrep = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" } oxidized = { version = "0.0.0", path = "../../../../oxidized" } pos = { version = "0.0.0", path = "../../../pos/cargo/pos" } serde = { version = "1.0.176", features = ["derive", "rc"] }
Rust
hhvm/hphp/hack/src/hackrs/utils/core/ident.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::cell::RefCell; use std::rc::Rc; use eq_modulo_pos::EqModuloPos; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; use serde::Deserialize; use serde::Serialize; #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, EqModuloPos, PartialOrd, Ord)] #[derive(Serialize, Deserialize)] #[derive(ToOcamlRep, FromOcamlRep)] pub struct Ident(u64); impl From<u64> for Ident { fn from(x: u64) -> Self { Self(x) } } impl From<isize> for Ident { fn from(x: isize) -> Self { Self(x.try_into().unwrap()) } } impl From<Ident> for isize { fn from(x: Ident) -> isize { x.0 as isize } } impl std::fmt::Display for Ident { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "#{}", self.0) } } #[derive(Debug, Clone)] pub struct IdentGen { next: Rc<RefCell<u64>>, } impl IdentGen { pub fn new() -> Self { Self { next: Rc::new(RefCell::new(1)), } } pub fn make(&self) -> Ident { let mut r = self.next.borrow_mut(); let v = *r; *r += 1; Ident(v) } }
Rust
hhvm/hphp/hack/src/hackrs/utils/core/local_id.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use pos::Symbol; #[derive(Debug, Clone, Eq, Hash, PartialEq)] pub struct LocalId(u64, Symbol); impl LocalId { pub fn new(x: u64, name: Symbol) -> Self { LocalId(x, name) } pub fn new_unscoped(x: Symbol) -> Self { Self(0, x) } pub fn to_int(&self) -> u64 { self.0 } pub fn to_string(&self) -> &Symbol { &self.1 } } impl From<&oxidized::local_id::LocalId> for LocalId { fn from(li: &oxidized::local_id::LocalId) -> Self { LocalId::new(li.0.try_into().unwrap(), Symbol::new(&li.1)) } }
Rust
hhvm/hphp/hack/src/hackrs/utils/core/ns.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. pub fn strip_ns(s: &str) -> &str { s.strip_prefix('\\').unwrap_or(s) } pub fn add_ns(s: &str) -> String { if s.strip_prefix('\\').is_some() { s.to_string() } else { format!("\\{}", s) } }
TOML
hhvm/hphp/hack/src/hcons/Cargo.toml
# @generated by autocargo [package] name = "hcons" version = "0.0.0" edition = "2021" [lib] path = "lib.rs" [dependencies] dashmap = { version = "5.4", features = ["rayon", "serde"] } fnv = "1.0" ocamlrep = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" } once_cell = "1.12" serde = { version = "1.0.176", features = ["derive", "rc"] }
Rust
hhvm/hphp/hack/src/hcons/lib.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::fmt; use std::hash::Hash; use std::hash::Hasher; use std::ops::Deref; use std::sync::Arc; use std::sync::Weak; use dashmap::mapref::entry::Entry; use dashmap::DashMap; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; pub use once_cell::sync::Lazy; use serde::Deserialize; use serde::Deserializer; use serde::Serialize; use serde::Serializer; /// A hash-consed pointer. pub struct Hc<T>(Arc<T>); impl<T: Consable> Hc<T> { #[inline] pub fn new(value: T) -> Self { T::conser().mk(value) } } pub trait Consable: Eq + Hash + Sized + 'static { fn conser() -> &'static Conser<Self>; } impl<T> Clone for Hc<T> { #[inline] fn clone(&self) -> Self { Hc(Arc::clone(&self.0)) } } impl<T: fmt::Debug> fmt::Debug for Hc<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { self.0.fmt(f) } } impl<T: fmt::Display> fmt::Display for Hc<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } impl<T> Deref for Hc<T> { type Target = T; fn deref(&self) -> &T { &self.0 } } impl<T: Eq> Eq for Hc<T> {} impl<T: Hash> Hash for Hc<T> { fn hash<H: Hasher>(&self, state: &mut H) { // hashbrown-based hash tables use the upper byte of the hash code as a // tag which drives SIMD parallelism. If `state` is a Hasher which // doesn't distribute pointer hashes well (e.g., nohash_hasher), then // all upper bytes of Hc hash codes will be the same, and perf of // hashbrown tables containing Hc keys will suffer. If we carefully // avoid such hashers, we could probably just hash the (fat) pointer // here. But as a precaution for now, run it through FNV first. state.write_u64(fnv_hash(&Arc::as_ptr(&self.0))); } } impl<T: PartialEq> PartialEq for Hc<T> { fn eq(&self, other: &Self) -> bool { std::ptr::eq(self.0.as_ref(), other.0.as_ref()) } } impl<T: PartialEq> PartialEq<&Hc<T>> for Hc<T> { fn eq(&self, other: &&Hc<T>) -> bool { std::ptr::eq(self.0.as_ref(), other.0.as_ref()) } } impl<T: PartialEq> PartialEq<Hc<T>> for &Hc<T> { fn eq(&self, other: &Hc<T>) -> bool { std::ptr::eq(self.0.as_ref(), other.0.as_ref()) } } impl<T: PartialOrd> PartialOrd for Hc<T> { #[inline] fn partial_cmp(&self, other: &Hc<T>) -> Option<std::cmp::Ordering> { (**self).partial_cmp(&**other) } } impl<T: Ord> Ord for Hc<T> { #[inline] fn cmp(&self, other: &Hc<T>) -> std::cmp::Ordering { (**self).cmp(&**other) } } impl<T: Serialize> Serialize for Hc<T> { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { // TODO: The `intern` crate has a way of preserving sharing of interned // values in serde output; we may want to do the same here. (**self).serialize(serializer) } } impl<'de, T: Deserialize<'de> + Consable> Deserialize<'de> for Hc<T> { fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> { Deserialize::deserialize(deserializer).map(Hc::new) } } impl<T: ToOcamlRep + Consable> ToOcamlRep for Hc<T> { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { self.0.to_ocamlrep(alloc) } } impl<T: FromOcamlRep + Consable> FromOcamlRep for Hc<T> { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { Ok(Hc::new(T::from_ocamlrep(value)?)) } } fn fnv_hash<T: Hash>(value: &T) -> u64 { let mut hasher = fnv::FnvHasher::default(); value.hash(&mut hasher); hasher.finish() } #[derive(Debug)] pub struct Conser<T> { table: DashMap<u64, Weak<T>>, } impl<T: Consable> Conser<T> { pub fn new() -> Self { Conser { table: DashMap::new(), } } pub fn gc(&self) -> bool { let l = self.table.len(); self.table.retain(|_, v| v.strong_count() != 0); l != self.table.len() } pub fn clear(&self) { self.table.clear() } fn mk(&self, x: T) -> Hc<T> { let hash = fnv_hash(&x); let rc = match self.table.entry(hash) { Entry::Occupied(mut o) => match o.get().upgrade() { Some(rc) => { // TODO: handle collisions debug_assert!(x == *rc); rc } None => { let rc = Arc::new(x); o.insert(Arc::downgrade(&rc)); rc } }, Entry::Vacant(v) => { let rc = Arc::new(x); v.insert(Arc::downgrade(&rc)); rc } }; Hc(rc) } } #[cfg(test)] mod test { #[test] fn shared_hcs() { use ocamlrep::Arena; use super::*; impl Consable for (i32, i32) { fn conser() -> &'static Conser<Self> { static CONSER: Lazy<Conser<(i32, i32)>> = Lazy::new(Conser::new); &CONSER } } impl Consable for (Hc<(i32, i32)>, Hc<(i32, i32)>) { fn conser() -> &'static Conser<Self> { static CONSER: Lazy<Conser<(Hc<(i32, i32)>, Hc<(i32, i32)>)>> = Lazy::new(Conser::new); &CONSER } } let inner_tuple = Hc::new((1, 2)); let outer_tuple = Hc::new((Hc::clone(&inner_tuple), inner_tuple)); let arena = Arena::new(); let ocaml_tuple = arena.add_root(&outer_tuple); let outer_tuple = ocaml_tuple.as_block().unwrap(); // The tuple pointer in the first field is physically equal to the tuple // pointer in the second field. assert_eq!(outer_tuple[0].to_bits(), outer_tuple[1].to_bits()); } }
TOML
hhvm/hphp/hack/src/heap/Cargo.toml
# @generated by autocargo [package] name = "dump_saved_state_depgraph" version = "0.0.0" edition = "2021" [[bin]] name = "dump_saved_state_depgraph" path = "dump_saved_state_depgraph.rs" test = false [dependencies] clap = { version = "4.3.5", features = ["derive", "env", "string", "unicode", "wrap_help"] } depgraph_reader = { version = "0.0.0", path = "../depgraph/cargo/depgraph_reader" } indicatif = { version = "0.17.3", features = ["improved_unicode", "rayon", "tokio"] }
C/C++
hhvm/hphp/hack/src/heap/dictionary_data.h
/** * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * */ #ifndef DICTIONARY_DATA_H #define DICTIONARY_DATA_H extern const unsigned char dictionary_data[]; extern const unsigned int dictionary_data_len; #endif // DICTIONARY_DATA_H
Rust
hhvm/hphp/hack/src/heap/dump_saved_state_depgraph.rs
// Copyright (c) 2021, Facebook, Inc. // All rights reserved. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use depgraph_reader::Dep; use depgraph_reader::DepGraph; #[derive(Debug)] enum Error { IoError(std::io::Error), DepgraphError(String), Other(String), } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { Error::IoError(ref e) => ::std::fmt::Display::fmt(e, f), Error::DepgraphError(ref e) => f.write_str(e), Error::Other(ref e) => f.write_str(e), } } } impl std::error::Error for Error {} impl std::convert::From<std::io::Error> for Error { fn from(error: std::io::Error) -> Self { Error::IoError(error) } } type Result<T> = std::result::Result<T, Error>; const MAX_DIGITS_IN_HASH: usize = 20; // (u64::MAX as f64).log10() as usize + 1; fn print_edges_header() { println!( " {:>width$} dependent", "dependency", width = MAX_DIGITS_IN_HASH ); } /// Auxiliary function to print a 64-bit edge fn print_edge_u64(dependency: Dep, dependent: Dep, hex_dump: bool) { if hex_dump { println!( " {dependency:#016x} {dependent:#016x}", dependent = dependent, dependency = dependency ); } else { println!( " {dependency:>width$} {dependent}", dependent = dependent, dependency = dependency, width = MAX_DIGITS_IN_HASH ); } } /// Add edges to `es` given source vertex `k` and dest vertices `vs`. fn add_edges<T: Ord + Clone>(es: &mut Vec<(T, T)>, k: T, vs: &std::collections::BTreeSet<T>) { es.extend(vs.iter().map(|v| (k.clone(), v.clone()))); } /// Retrieve the adjacency list for `k` in `g`. /// /// This is the analog of `value_vertex` for 64-bit depgraphs. fn hashes(g: &DepGraph, k: Dep) -> std::collections::BTreeSet<Dep> { match g.hash_list_for(k) { None => std::collections::BTreeSet::new(), Some(hashes) => g.hash_list_hashes(hashes).collect(), } } /// Print an ASCII representation of a 64-bit depgraph to stdout. fn dump_depgraph64(file: &str, dependency_hash: Option<Dep>, hex_dump: bool) -> Result<()> { let dg = DepGraph::from_path(file)?; let () = dg.validate_hash_lists().map_err(Error::DepgraphError)?; let print_edges_for_key = |key: Dep| { let dests = hashes(&dg, key); for dst in dests { print_edge_u64(key, dst, hex_dump); } }; match dependency_hash { None => { for key in dg.all_hashes() { print_edges_for_key(key) } } Some(dependency_hash) => print_edges_for_key(dependency_hash), }; Ok(()) } /// Compare two 64-bit dependency graphs. /// /// Calculate the edges in `control_file` not in `test_file` (missing edges) and /// the edges in `test_file` not in `control_file` (extraneous edges). fn comp_depgraph64( no_progress_bar: bool, test_file: &str, control_file: &str, hex_dump: bool, ) -> Result<()> { let mut num_edges_missing = 0; let l_depgraph = DepGraph::from_path(test_file)?; let r_depgraph = DepGraph::from_path(control_file)?; match (|| { let ((), ()) = ( l_depgraph.validate_hash_lists()?, r_depgraph.validate_hash_lists()?, ); let (mut l_dependencies_iter, mut r_dependencies_iter) = (l_depgraph.all_hashes(), r_depgraph.all_hashes()); let (lnum_keys, rnum_keys) = (l_dependencies_iter.len(), r_dependencies_iter.len()); let (mut lproc, mut rproc) = (0, 0); let (mut in_r_not_l, mut in_l_not_r) = (vec![], vec![]); let (mut l_dependency_opt, mut r_dependency_opt) = (l_dependencies_iter.next(), r_dependencies_iter.next()); let (mut ledge_count, mut redge_count) = (0, 0); let bar = if no_progress_bar { None } else { Some(indicatif::ProgressBar::new( std::cmp::max(lnum_keys, rnum_keys) as u64, )) }; if let Some(bar) = bar.as_ref() { bar.println("Comparing graphs. Patience...") }; while l_dependency_opt.is_some() || r_dependency_opt.is_some() { match (l_dependency_opt, r_dependency_opt) { (None, Some(r_dependency)) => { // These edges are in `r` and not in `l`. let dependency = r_dependency; let dependents = hashes(&r_depgraph, dependency); redge_count += dependents.len(); add_edges(&mut in_r_not_l, dependency, &dependents); r_dependency_opt = r_dependencies_iter.next(); rproc += 1; if bar.is_some() && rnum_keys > lnum_keys { bar.as_ref().unwrap().inc(1); // We advanced `r` and there are more keys in `r` than `l`. } } (Some(l_dependency), None) => { // These edges are in `l` and not in `r`. let dependency = l_dependency; let dependents = hashes(&l_depgraph, dependency); l_dependency_opt = l_dependencies_iter.next(); ledge_count += dependents.len(); add_edges(&mut in_l_not_r, dependency, &dependents); lproc += 1; if bar.is_some() && lnum_keys > rnum_keys { bar.as_ref().unwrap().inc(1); // We advanced `l` and there are more keys in `l` than `r`. } } (Some(l_dependency), Some(r_dependency)) => { let (l_dependencies, r_dependencies) = ( hashes(&l_depgraph, l_dependency), hashes(&r_depgraph, r_dependency), ); if l_dependency < r_dependency { // These edges are in `l` but not in `r`. ledge_count += l_dependencies.len(); add_edges(&mut in_l_not_r, l_dependency, &l_dependencies); l_dependency_opt = l_dependencies_iter.next(); lproc += 1; if bar.is_some() && lnum_keys >= rnum_keys { bar.as_ref().unwrap().inc(1); // We advanced `l` and there are more keys in `l` than `r`. } continue; } if l_dependency > r_dependency { // These edges are in `r` but not in `l`. redge_count += r_dependencies.len(); add_edges(&mut in_r_not_l, r_dependency, &r_dependencies); r_dependency_opt = r_dependencies_iter.next(); rproc += 1; if bar.is_some() && rnum_keys > lnum_keys { bar.as_ref().unwrap().inc(1); // We advanced `r` and there are more keys in `r` than `l`. } continue; } ledge_count += l_dependencies.len(); redge_count += r_dependencies.len(); let mut dests: std::collections::BTreeSet<Dep> = std::collections::BTreeSet::new(); dests.extend( r_dependencies .iter() .filter(|&v| !l_dependencies.contains(v)), ); add_edges(&mut in_r_not_l, l_dependency, &dests); dests.clear(); dests.extend( l_dependencies .iter() .filter(|&v| !r_dependencies.contains(v)), ); add_edges(&mut in_l_not_r, l_dependency, &dests); l_dependency_opt = l_dependencies_iter.next(); r_dependency_opt = r_dependencies_iter.next(); lproc += 1; rproc += 1; if bar.is_some() { bar.as_ref().unwrap().inc(1) }; // No matter whether `l` or `r` has more keys, progress was made. } (None, None) => panic!("The impossible happened!"), } } if let Some(bar) = bar { bar.finish_and_clear() }; num_edges_missing = in_r_not_l.len(); println!("\nResults\n======="); println!("Processed {}/{} of nodes in 'test'", lproc, lnum_keys); println!("Processed {}/{} of nodes in 'control'", rproc, rnum_keys); println!("Edges in 'test': {}", ledge_count); println!("Edges in 'control': {}", redge_count); println!( "Edges in 'control' missing in 'test' (there are {}):", in_r_not_l.len() ); print_edges_header(); for (key, dst) in in_r_not_l { print_edge_u64(key, dst, hex_dump); } println!( "Edges in 'test' missing in 'control' (there are {}):", in_l_not_r.len() ); print_edges_header(); for (key, dst) in in_l_not_r { print_edge_u64(key, dst, hex_dump); } Ok(()) })() { Ok(()) => { if num_edges_missing == 0 { Ok(()) } else { // Rust 2018 semantics are such that this will result in a // non-zero error code // (https://doc.rust-lang.org/edition-guide/rust-2018/error-handling-and-panics/question-mark-in-main-and-tests.html). Err(Error::Other(format!( "{} missing edges detected", num_edges_missing ))) } } Err(msg) => Err(Error::DepgraphError(msg)), } } use clap::Parser; fn parse_hex_or_decimal(src: &str) -> std::result::Result<u64, std::num::ParseIntError> { let src_trim = src.trim_start_matches("0x"); if src_trim.len() != src.len() { u64::from_str_radix(src_trim, 16) } else { src_trim.parse::<u64>() } } #[derive(Debug, Parser)] #[clap( name = "dump_saved_state_depgraph", about = " Common usage is to provide two file arguments to compare, 'test' and 'control'. Example invocation: dump_saved_state_depgraph --bitness 32 \\ --test path/to/test.bin --control path/to/control.bin Exit code will be 0 if 'test' >= 'control' and 1 if 'test' < 'control'." )] struct Opt { #[clap(long = "with-progress-bar", help = "Enable progress bar display")] with_progress_bar: bool, #[clap(long = "dump", help = "graph to render as text")] dump: Option<String>, #[clap( long = "dependency-hash", help = "(with --dump; only for 64-bit) only dump edges for the given dependency hash", value_parser = parse_hex_or_decimal )] dependency_hash: Option<u64>, #[clap(long = "print-hex", help = "print hexadecimal hashes")] print_hex: bool, #[clap(long = "test", help = "'test' graph")] test: Option<String>, #[clap(long = "control", help = "'control' graph")] control: Option<String>, } fn main() -> std::result::Result<(), Box<dyn std::error::Error>> { let opt = Opt::try_parse()?; match match opt { Opt { dump: Some(file), dependency_hash, print_hex, .. } => dump_depgraph64(&file, dependency_hash.map(Dep::new), print_hex), Opt { with_progress_bar, test: Some(test), control: Some(control), print_hex, .. } => comp_depgraph64(!with_progress_bar, &test, &control, print_hex), _ => Ok(()), } { Ok(()) => Ok(()), Err(e) => Err(Box::new(e)), } }
hhvm/hphp/hack/src/heap/dune
(library (name heap_libc) (wrapped false) (modules) (foreign_stubs (language c) (names hh_assert hh_shared) (flags (:standard (:include config/c_flags.sexp)))) (c_library_flags (:standard (:include config/c_library_flags.sexp))) (preprocess (pps ppx_deriving.std visitors.ppx)) (libraries shmffi utils_core dictionary_data)) (library (name heap_ident) (wrapped false) (modules ident) (libraries collections utils_core) (preprocess (pps ppx_deriving.std ppx_hash visitors.ppx))) (library (name heap_shared_mem_hash) (wrapped false) (modules sharedMemHash) (preprocess (pps ppx_deriving.std visitors.ppx)) (libraries heap_libc shmffi)) (library (name heap_shared_mem) (wrapped false) (modules prefix sharedMem) (preprocess (pps ppx_deriving.std visitors.ppx)) (libraries global_config heap_libc logging_common shmffi utils_core worker_cancel)) (library (name worker_cancel) (wrapped false) (modules workerCancel) (preprocess (pps ppx_deriving.std visitors.ppx)) (libraries heap_libc shmffi utils_core)) (library (name heap_global_storage) (wrapped false) (modules globalStorage) (preprocess (pps ppx_deriving.std visitors.ppx)) (libraries heap_libc shmffi))
OCaml
hhvm/hphp/hack/src/heap/globalStorage.ml
(* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) (*****************************************************************************) (* Module implementing a global storage system, an efficient way for the * master to communicate data with the workers (cf hh_shared.c for the * underlying C implementation). * * The master can store data in the global storage, after that, the data * is visible to all the workers. *) (*****************************************************************************) module Make : functor (Value : sig type t end) -> sig (* "store v" stores the value v in the global storage. * Can only be called by the master. * 'hh_shared_init' must have been called prior to the first call. * The store must be empty. *) val store : Value.t -> unit (* "load()" returns the value stored in the global storage. * Can be called by any process (master or workers), "store" must have * been called by the master before the call. *) val load : unit -> Value.t (* "clear()" empties the global storage. * Can only be called by the master. *) val clear : unit -> unit end = functor (Value : sig type t end) -> struct external hh_shared_store : string -> unit = "hh_shared_store" external hh_shared_load : unit -> string = "hh_shared_load" external hh_shared_clear : unit -> unit = "hh_shared_clear" let store (x : Value.t) = hh_shared_store (Marshal.to_string x []) let load () : Value.t = Marshal.from_string (hh_shared_load ()) 0 let clear () = hh_shared_clear () end
C
hhvm/hphp/hack/src/heap/hh_assert.c
/** * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * */ #include "hh_assert.h" #define CAML_NAME_SPACE #include <caml/callback.h> #include <caml/fail.h> void raise_assertion_failure(char * msg) { caml_raise_with_string(*caml_named_value("c_assertion_failure"), msg); }
C/C++
hhvm/hphp/hack/src/heap/hh_assert.h
/** * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * */ #ifndef HH_ASSERT_H #define HH_ASSERT_H void raise_assertion_failure(char * msg); /** * Concatenate the __LINE__ and __FILE__ strings in a macro. */ #define S1(x) #x #define S2(x) S1(x) #define LOCATION __FILE__ " : " S2(__LINE__) #define assert(f) ((f) ? 0 : raise_assertion_failure(LOCATION)) #endif
OCaml
hhvm/hphp/hack/src/heap/hh_dummy.ml
(* * From ocaml 4.12, libraries with any .ml modules will no longer * generates libfoo.a files. * Buck v1 is still expecting these, so the easiest workaround until * Buck v2 is to provide an empty module to trigger the generation * of libfoo.a *) let () = ()
C
hhvm/hphp/hack/src/heap/hh_shared.c
/** * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * */ #include "hh_shared.h" /* For some reason this header file is not on path in OSS builds. * But we only lose the ability to trim the OCaml heap after a GC */ #if __has_include("malloc.h") # define MALLOC_TRIM # include <malloc.h> #endif /*****************************************************************************/ /* File Implementing the shared memory system for Hack. * * THIS CODE ONLY WORKS WITH HACK, IT MAY LOOK LIKE A GENERIC ATOMIC * HASHTABLE FOR OCAML: IT IS NOT! * BUT ... YOU WERE GOING TO SAY BUT? BUT ... * THERE IS NO BUT! DONNY YOU'RE OUT OF YOUR ELEMENT! * * The lock-free data structures implemented here only work because of how * the Hack phases are synchronized. * * There are 2 kinds of storage implemented in this file. * I) The global storage. Used by the master to efficiently transfer a blob * of data to the workers. This is used to share an environment in * read-only mode with all the workers. * The master stores, the workers read. * Only concurrent reads allowed. No concurrent write/read and write/write. * There are a few different OCaml modules that act as interfaces to this * global storage. They all use the same area of memory, so only one can be * active at any one time. The first word indicates the size of the global * storage currently in use; callers are responsible for setting it to zero * once they are done. * * II) The hashtable that maps string keys to string values. (The strings * are really serialized / marshalled representations of OCaml structures.) * Key observation of the table is that data with the same key are * considered equivalent, and so you can arbitrarily get any copy of it; * furthermore if data is missing it can be recomputed, so incorrectly * saying data is missing when it is being written is only a potential perf * loss. Note that "equivalent" doesn't necessarily mean "identical", e.g., * two alpha-converted types are "equivalent" though not literally byte- * identical. (That said, I'm pretty sure the Hack typechecker actually does * always write identical data, but the hashtable doesn't need quite that * strong of an invariant.) * * The operations implemented, and their limitations: * * -) Concurrent writes: SUPPORTED * One will win and the other will get dropped on the floor. There is no * way to tell which happened. Only promise is that after a write, the * one thread which did the write will see data in the table (though it * may be slightly different data than what was written, see above about * equivalent data). * * -) Concurrent reads: SUPPORTED * If interleaved with a concurrent write, the read will arbitrarily * say that there is no data at that slot or return the entire new data * written by the concurrent writer. * * -) Concurrent removes: NOT SUPPORTED * Only the master can remove, and can only do so if there are no other * concurrent operations (reads or writes). * * Since the values are variably sized and can get quite large, they are * stored separately from the hashes in a garbage-collected heap. * * Both II and III resolve hash collisions via linear probing. */ /*****************************************************************************/ /* For printing uint64_t * http://jhshi.me/2014/07/11/print-uint64-t-properly-in-c/index.html */ #define __STDC_FORMAT_MACROS /* define CAML_NAME_SPACE to ensure all the caml imports are prefixed with * 'caml_' */ #define CAML_NAME_SPACE #include <caml/mlvalues.h> #include <caml/callback.h> #include <caml/memory.h> #include <caml/alloc.h> #include <caml/fail.h> #include <caml/unixsupport.h> #include <caml/intext.h> #ifdef _WIN32 # include <windows.h> #else # include <fcntl.h> # include <pthread.h> # include <signal.h> # include <stdint.h> # include <stdio.h> # include <string.h> # include <sys/errno.h> # include <sys/mman.h> # include <sys/resource.h> # include <sys/stat.h> # include <sys/syscall.h> # include <sys/types.h> # include <unistd.h> #endif #include <inttypes.h> #include <lz4.h> #include <sys/time.h> #include <time.h> #include <zstd.h> #include "dictionary_data.h" // Some OCaml utility functions (introduced only in 4.12.0) // // TODO(hverr): Remove these when we move to 4.12.0 static value hh_shared_caml_alloc_some(value v) { CAMLparam1(v); value some = caml_alloc_small(1, 0); Store_field(some, 0, v); CAMLreturn(some); } # define Val_none Val_int(0) #include "hh_assert.h" #define UNUSED(x) \ ((void)(x)) #define UNUSED1 UNUSED #define UNUSED2(a, b) \ (UNUSED(a), UNUSED(b)) #define UNUSED3(a, b, c) \ (UNUSED(a), UNUSED(b), UNUSED(c)) #define UNUSED4(a, b, c, d) \ (UNUSED(a), UNUSED(b), UNUSED(c), UNUSED(d)) #define UNUSED5(a, b, c, d, e) \ (UNUSED(a), UNUSED(b), UNUSED(c), UNUSED(d), UNUSED(e)) // Ideally these would live in a handle.h file but our internal build system // can't support that at the moment. These are shared with handle_stubs.c #ifdef _WIN32 # define Val_handle(fd) (win_alloc_handle(fd)) #else # define Handle_val(fd) (Long_val(fd)) # define Val_handle(fd) (Val_long(fd)) #endif #define HASHTBL_WRITE_IN_PROGRESS ((heap_entry_t*)1) /**************************************************************************** * Quoting the linux manpage: memfd_create() creates an anonymous file * and returns a file descriptor that refers to it. The file behaves * like a regular file, and so can be modified, truncated, * memory-mapped, and so on. However, unlike a regular file, it lives * in RAM and has a volatile backing storage. Once all references to * the file are dropped, it is automatically released. Anonymous * memory is used for all backing pages of the file. Therefore, files * created by memfd_create() have the same semantics as other * anonymous memory allocations such as those allocated using mmap(2) * with the MAP_ANONYMOUS flag. The memfd_create() system call first * appeared in Linux 3.17. ****************************************************************************/ #ifdef __linux__ # define MEMFD_CREATE 1 // glibc only added support for memfd_create in version 2.27. # ifndef MFD_CLOEXEC // Linux version for the architecture must support syscall // memfd_create # ifndef SYS_memfd_create # if defined(__x86_64__) # define SYS_memfd_create 319 # elif defined(__aarch64__) # define SYS_memfd_create 385 # else # error "hh_shared.c requires an architecture that supports memfd_create" # endif //#if defined(__x86_64__) # endif //#ifndef SYS_memfd_create # include <asm/unistd.h> /* Originally this function would call uname(), parse the linux * kernel release version and make a decision based on whether * the kernel was >= 3.17 or not. However, syscall will return -1 * with an strerr(errno) of "Function not implemented" if the * kernel is < 3.17, and that's good enough. */ static int memfd_create(const char *name, unsigned int flags) { return syscall(SYS_memfd_create, name, flags); } # endif // #ifndef MFD_CLOEXEC #endif //#ifdef __linux__ #ifndef MAP_NORESERVE // This flag was unimplemented in FreeBSD and then later removed # define MAP_NORESERVE 0 #endif // The following 'typedef' won't be required anymore // when dropping support for OCaml < 4.03 #ifdef __MINGW64__ typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; #endif #ifdef _WIN32 static int win32_getpagesize(void) { SYSTEM_INFO siSysInfo; GetSystemInfo(&siSysInfo); return siSysInfo.dwPageSize; } # define getpagesize win32_getpagesize #endif /*****************************************************************************/ /* API to shmffi */ /*****************************************************************************/ extern void shmffi_init(void* mmap_address, size_t file_size, ssize_t max_evictable_bytes); extern void shmffi_attach(void* mmap_address, size_t file_size); extern value shmffi_add(_Bool evictable, uint64_t hash, value data); extern value shmffi_mem(uint64_t hash); extern value shmffi_get_and_deserialize(uint64_t hash); extern value shmffi_mem_status(uint64_t hash); extern value shmffi_get_size(uint64_t hash); extern void shmffi_move(uint64_t hash1, uint64_t hash2); extern value shmffi_remove(uint64_t hash); extern value shmffi_allocated_bytes(); extern value shmffi_num_entries(); extern value shmffi_add_raw(uint64_t hash, value data); extern value shmffi_get_raw(uint64_t hash); extern value shmffi_deserialize_raw(value data); extern value shmffi_serialize_raw(value data); /*****************************************************************************/ /* Config settings (essentially constants, so they don't need to live in shared * memory), initialized in hh_shared_init */ /*****************************************************************************/ /* Convention: .*_b = Size in bytes. */ static size_t global_size_b; static size_t global_size; static size_t heap_size; static size_t hash_table_pow; static size_t shm_use_sharded_hashtbl; static ssize_t shm_cache_size_b; /* Used for the shared hashtable */ static uint64_t hashtbl_size; static size_t hashtbl_size_b; /* Used for worker-local data */ static size_t locals_size_b; typedef enum { KIND_STRING = 1, KIND_SERIALIZED = !KIND_STRING } storage_kind; /* Too lazy to use getconf */ #define CACHE_LINE_SIZE (1 << 6) #define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) #define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) #define CACHE_ALIGN(x) ALIGN(x,CACHE_LINE_SIZE) /* Align heap entries on 64-bit boundaries */ #define HEAP_ALIGN(x) ALIGN(x,8) /* Fix the location of our shared memory so we can save and restore the * hashtable easily */ #ifdef _WIN32 /* We have to set differently our shared memory location on Windows. */ # define SHARED_MEM_INIT ((char *) 0x48047e00000ll) #elif defined __aarch64__ /* CentOS 7.3.1611 kernel does not support a full 48-bit VA space, so choose a * value low enough that the 100 GB's mmapped in do not interfere with anything * growing down from the top. 1 << 36 works. */ # define SHARED_MEM_INIT ((char *) 0x1000000000ll) # define SHARDED_HASHTBL_MEM_ADDR ((char *) 0x2000000000ll) # define SHARDED_HASHTBL_MEM_SIZE ((size_t)100 * 1024 * 1024 * 1024) #else # define SHARED_MEM_INIT ((char *) 0x500000000000ll) # define SHARDED_HASHTBL_MEM_ADDR ((char *) 0x510000000000ll) # define SHARDED_HASHTBL_MEM_SIZE ((size_t)200 * 1024 * 1024 * 1024) #endif /* As a sanity check when loading from a file */ static const uint64_t MAGIC_CONSTANT = 0xfacefacefaceb000ull; /* The VCS identifier (typically a git hash) of the build */ extern const char* const BuildInfo_kRevision; /*****************************************************************************/ /* Types */ /*****************************************************************************/ /* Per-worker data which can be quickly updated non-atomically. Will be placed * in cache-aligned array in the first few pages of shared memory, indexed by * worker id. */ typedef struct { uint64_t counter; } local_t; // Every heap entry starts with a 64-bit header with the following layout: // // 6 3 3 3 0 0 // 3 3 2 1 1 0 // +----------------------------------+-+-----------------------------------+-+ // |11111111 11111111 11111111 1111111|0| 11111111 11111111 11111111 1111111|1| // +----------------------------------+-+-----------------------------------+-+ // | | | | // | | | * 0 tag // | | | // | | * 31-1 uncompressed size (0 if uncompressed) // | | // | * 32 kind (0 = serialized, 1 = string) // | // * 63-33 size of heap entry // // The tag bit is always 1 and is used to differentiate headers from pointers // during garbage collection (see hh_collect). typedef uint64_t hh_header_t; #define Entry_size(x) ((x) >> 33) #define Entry_kind(x) (((x) >> 32) & 1) #define Entry_uncompressed_size(x) (((x) >> 1) & 0x7FFFFFFF) #define Heap_entry_total_size(header) sizeof(heap_entry_t) + Entry_size(header) /* Shared memory structures. hh_shared.h typedefs this to heap_entry_t. */ typedef struct { hh_header_t header; char data[]; } heap_entry_t; /* Cells of the Hashtable */ typedef struct { uint64_t hash; heap_entry_t* addr; } helt_t; /*****************************************************************************/ /* Globals */ /*****************************************************************************/ /* Total size of allocated shared memory */ static size_t shared_mem_size = 0; /* Beginning of shared memory */ static char* shared_mem = NULL; /* ENCODING: The first element is the size stored in bytes, the rest is * the data. The size is set to zero when the storage is empty. */ static value* global_storage = NULL; /* The hashtable containing the shared values. */ static helt_t* hashtbl = NULL; /* The number of nonempty slots in the hashtable. A nonempty slot has a * non-zero hash. We never clear hashes so this monotonically increases */ static uint64_t* hcounter = NULL; /* The number of nonempty filled slots in the hashtable. A nonempty filled slot * has a non-zero hash AND a non-null addr. It increments when we write data * into a slot with addr==NULL and decrements when we clear data from a slot */ static uint64_t* hcounter_filled = NULL; /* A counter increasing globally across all forks. */ static uintptr_t* counter = NULL; /* Each process reserves a range of values at a time from the shared counter. * Should be a power of two for more efficient modulo calculation. */ #define COUNTER_RANGE 2048 /* Logging level for shared memory statistics * 0 = nothing * 1 = log totals, averages, min, max bytes marshalled and unmarshalled */ static size_t* log_level = NULL; static double* sample_rate = NULL; static size_t* compression = NULL; static size_t* workers_should_exit = NULL; static size_t* allow_removes = NULL; /* Worker-local storage is cache line aligned. */ static char* locals; #define LOCAL(id) ((local_t *)(locals + id * CACHE_ALIGN(sizeof(local_t)))) /* This should only be used before forking */ static uintptr_t early_counter = 0; /* The top of the heap */ static char** heap = NULL; /* Useful to add assertions */ static pid_t* master_pid = NULL; static pid_t my_pid = 0; static size_t num_workers; /* This is a process-local value. The master process is 0, workers are numbered * starting at 1. This is an offset into the worker local values in the heap. */ static size_t worker_id; static size_t allow_hashtable_writes_by_current_process = 1; static size_t worker_can_exit = 1; /* Where the heap started (bottom) */ static char* heap_init = NULL; /* Where the heap will end (top) */ static char* heap_max = NULL; static size_t* wasted_heap_size = NULL; static size_t used_heap_size(void) { return *heap - heap_init; } static long removed_count = 0; static ZSTD_CCtx* zstd_cctx = NULL; static ZSTD_DCtx* zstd_dctx = NULL; /* Expose so we can display diagnostics */ CAMLprim value hh_used_heap_size(void) { if (shm_use_sharded_hashtbl) { return shmffi_allocated_bytes(); } return Val_long(used_heap_size()); } /* Part of the heap not reachable from hashtable entries. Can be reclaimed with * hh_collect. */ CAMLprim value hh_wasted_heap_size(void) { // TODO(hverr): Support sharded hash tables assert(wasted_heap_size != NULL); return Val_long(*wasted_heap_size); } CAMLprim value hh_log_level(void) { return Val_long(*log_level); } CAMLprim value hh_sample_rate(void) { CAMLparam0(); CAMLreturn(caml_copy_double(*sample_rate)); } CAMLprim value hh_hash_used_slots(void) { // TODO(hverr): For some reason this returns a tuple. // Fix this when the migration is complete. CAMLparam0(); CAMLlocal2(connector, num_entries); connector = caml_alloc_tuple(2); if (shm_use_sharded_hashtbl) { num_entries = shmffi_num_entries(); Store_field(connector, 0, num_entries); Store_field(connector, 1, num_entries); } else { Store_field(connector, 0, Val_long(*hcounter_filled)); Store_field(connector, 1, Val_long(*hcounter)); } CAMLreturn(connector); } CAMLprim value hh_hash_slots(void) { CAMLparam0(); if (shm_use_sharded_hashtbl) { // In the sharded hash table implementation, we dynamically resize // the tables. As such, this doesn't really make sense. Return the // number of entries for now. CAMLreturn(shmffi_num_entries()); } CAMLreturn(Val_long(hashtbl_size)); } #ifdef _WIN32 static struct timeval log_duration(const char *prefix, struct timeval start_t) { return start_t; // TODO } #else static struct timeval log_duration(const char *prefix, struct timeval start_t) { struct timeval end_t = {0}; gettimeofday(&end_t, NULL); time_t secs = end_t.tv_sec - start_t.tv_sec; suseconds_t usecs = end_t.tv_usec - start_t.tv_usec; double time_taken = secs + ((double)usecs / 1000000); fprintf(stderr, "%s took %.2lfs\n", prefix, time_taken); return end_t; } #endif #ifdef _WIN32 static HANDLE memfd; /************************************************************************** * We create an anonymous memory file, whose `handle` might be * inherited by subprocesses. * * This memory file is tagged "reserved" but not "committed". This * means that the memory space will be reserved in the virtual memory * table but the pages will not be bound to any physical memory * yet. Further calls to 'VirtualAlloc' will "commit" pages, meaning * they will be bound to physical memory. * * This is behavior that should reflect the 'MAP_NORESERVE' flag of * 'mmap' on Unix. But, on Unix, the "commit" is implicit. * * Committing the whole shared heap at once would require the same * amount of free space in memory (or in swap file). **************************************************************************/ static void memfd_init(const char *shm_dir, size_t shared_mem_size, uint64_t minimum_avail) { memfd = CreateFileMapping( INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE | SEC_RESERVE, shared_mem_size >> 32, shared_mem_size & ((1ll << 32) - 1), NULL); if (memfd == NULL) { win32_maperr(GetLastError()); uerror("CreateFileMapping", Nothing); } if (!SetHandleInformation(memfd, HANDLE_FLAG_INHERIT, HANDLE_FLAG_INHERIT)) { win32_maperr(GetLastError()); uerror("SetHandleInformation", Nothing); } } #else static int memfd_shared_mem = -1; static int memfd_shmffi = -1; static void raise_failed_anonymous_memfd_init(void) { static const value *exn = NULL; if (!exn) exn = caml_named_value("failed_anonymous_memfd_init"); caml_raise_constant(*exn); } static void raise_less_than_minimum_available(uint64_t avail) { value arg; static const value *exn = NULL; if (!exn) exn = caml_named_value("less_than_minimum_available"); arg = Val_long(avail); caml_raise_with_arg(*exn, arg); } #include <sys/statvfs.h> static void assert_avail_exceeds_minimum(const char *shm_dir, uint64_t minimum_avail) { struct statvfs stats; uint64_t avail; if (statvfs(shm_dir, &stats)) { uerror("statvfs", caml_copy_string(shm_dir)); } avail = stats.f_bsize * stats.f_bavail; if (avail < minimum_avail) { raise_less_than_minimum_available(avail); } } static int memfd_create_helper(const char *name, const char *shm_dir, size_t shared_mem_size, uint64_t minimum_avail) { int memfd = -1; if (shm_dir == NULL) { // This means that we should try to use the anonymous-y system calls #if defined(MEMFD_CREATE) memfd = memfd_create(name, 0); #endif #if defined(__APPLE__) if (memfd < 0) { char memname[255]; snprintf(memname, sizeof(memname), "/%s.%d", name, getpid()); // the ftruncate below will fail with errno EINVAL if you try to // ftruncate the same sharedmem fd more than once. We're seeing this in // some tests, which might imply that two flow processes with the same // pid are starting up. This shm_unlink should prevent that from // happening. Here's a stackoverflow about it // http://stackoverflow.com/questions/25502229/ftruncate-not-working-on-posix-shared-memory-in-mac-os-x shm_unlink(memname); memfd = shm_open(memname, O_CREAT | O_RDWR, 0666); if (memfd < 0) { uerror("shm_open", Nothing); } // shm_open sets FD_CLOEXEC automatically. This is undesirable, because // we want this fd to be open for other processes, so that they can // reconnect to the shared memory. int fcntl_flags = fcntl(memfd, F_GETFD); if (fcntl_flags == -1) { printf("Error with fcntl(memfd): %s\n", strerror(errno)); uerror("fcntl", Nothing); } // Unset close-on-exec fcntl(memfd, F_SETFD, fcntl_flags & ~FD_CLOEXEC); } #endif if (memfd < 0) { raise_failed_anonymous_memfd_init(); } } else { if (minimum_avail > 0) { assert_avail_exceeds_minimum(shm_dir, minimum_avail); } if (memfd < 0) { char template[1024]; if (!snprintf(template, 1024, "%s/%s-XXXXXX", shm_dir, name)) { uerror("snprintf", Nothing); }; memfd = mkstemp(template); if (memfd < 0) { uerror("mkstemp", caml_copy_string(template)); } unlink(template); } } if(ftruncate(memfd, shared_mem_size) == -1) { uerror("ftruncate", Nothing); } return memfd; } /************************************************************************** * The memdfd_init function creates a anonymous memory file that might * be inherited by `Daemon.spawned` processus (contrary to a simple * anonymous mmap). * * The preferred mechanism is memfd_create(2) (see the upper * description). Then we try shm_open(2) (on Apple OS X). As a safe fallback, * we use `mkstemp/unlink`. * * mkstemp is preferred over shm_open on Linux as it allows to * choose another directory that `/dev/shm` on system where this * partition is too small (e.g. the Travis containers). * * The resulting file descriptor should be mmaped with the memfd_map * function (see below). ****************************************************************************/ static void memfd_init(const char *shm_dir, size_t shared_mem_size, uint64_t minimum_avail) { memfd_shared_mem = memfd_create_helper("fb_heap", shm_dir, shared_mem_size, minimum_avail); if (shm_use_sharded_hashtbl) { memfd_shmffi = memfd_create_helper("fb_sharded_hashtbl", shm_dir, SHARDED_HASHTBL_MEM_SIZE, 0); } } #endif /*****************************************************************************/ /* Given a pointer to the shared memory address space, initializes all * the globals that live in shared memory. */ /*****************************************************************************/ #ifdef _WIN32 static char *memfd_map(HANDLE memfd, char *mem_addr, size_t shared_mem_size) { char *mem = NULL; mem = MapViewOfFileEx( memfd, FILE_MAP_ALL_ACCESS, 0, 0, 0, (char *)mem_addr); if (mem != mem_addr) { win32_maperr(GetLastError()); uerror("MapViewOfFileEx", Nothing); } return mem; } #else static char *memfd_map(int memfd, char *mem_addr, size_t shared_mem_size) { char *mem = NULL; /* MAP_NORESERVE is because we want a lot more virtual memory than what * we are actually going to use. */ int flags = MAP_SHARED | MAP_NORESERVE | MAP_FIXED; int prot = PROT_READ | PROT_WRITE; mem = (char*)mmap((void *)mem_addr, shared_mem_size, prot, flags, memfd, 0); if(mem == MAP_FAILED) { printf("Error initializing: %s\n", strerror(errno)); exit(2); } return mem; } #endif /**************************************************************************** * The function memfd_reserve force allocation of (mem -> mem+sz) in * the shared heap. This is mandatory on Windows. This is optional on * Linux but it allows to have explicit "Out of memory" error * messages. Otherwise, the kernel might terminate the process with * `SIGBUS`. ****************************************************************************/ static void raise_out_of_shared_memory(void) { static const value *exn = NULL; if (!exn) exn = caml_named_value("out_of_shared_memory"); caml_raise_constant(*exn); } #ifdef _WIN32 /* Reserves memory. This is required on Windows */ static void win_reserve(char * mem, size_t sz) { if (!VirtualAlloc(mem, sz, MEM_COMMIT, PAGE_READWRITE)) { win32_maperr(GetLastError()); raise_out_of_shared_memory(); } } /* On Linux, memfd_reserve is only used to reserve memory that is mmap'd to the * memfd file. Memory outside of that mmap does not need to be reserved, so we * don't call memfd_reserve on things like the temporary mmap used by * hh_collect. Instead, they use win_reserve() */ static void memfd_reserve(int memfd, char * mem, size_t sz) { (void)memfd; win_reserve(mem, sz); } #elif defined(__APPLE__) /* So OSX lacks fallocate, but in general you can do * fcntl(fd, F_PREALLOCATE, &store) * however it doesn't seem to work for a shm_open fd, so this function is * currently a no-op. This means that our OOM handling for OSX is a little * weaker than the other OS's */ static void memfd_reserve(int memfd, char * mem, size_t sz) { (void)memfd; (void)mem; (void)sz; } #else static void memfd_reserve(int memfd, char *mem, size_t sz) { off_t offset = (off_t)(mem - shared_mem); int err; do { err = posix_fallocate(memfd, offset, sz); } while (err == EINTR); if (err) { raise_out_of_shared_memory(); } } #endif // DON'T WRITE TO THE SHARED MEMORY IN THIS FUNCTION!!! This function just // calculates where the memory is and sets local globals. The shared memory // might not be ready for writing yet! If you want to initialize a bit of // shared memory, check out init_shared_globals static void define_globals(char * shared_mem_init) { size_t page_size = getpagesize(); char *mem = shared_mem_init; // Beginning of the shared memory shared_mem = mem; #ifdef MADV_DONTDUMP // We are unlikely to get much useful information out of the shared heap in // a core file. Moreover, it can be HUGE, and the extensive work done dumping // it once for each CPU can mean that the user will reboot their machine // before the much more useful stack gets dumped! madvise(shared_mem, shared_mem_size, MADV_DONTDUMP); #endif /* BEGINNING OF THE SMALL OBJECTS PAGE * We keep all the small objects in this page. * They are on different cache lines because we modify them atomically. */ /* The pointer to the top of the heap. * We will atomically increment *heap every time we want to allocate. */ heap = (char**)mem; // The number of elements in the hashtable assert(CACHE_LINE_SIZE >= sizeof(uint64_t)); hcounter = (uint64_t*)(mem + CACHE_LINE_SIZE); assert (CACHE_LINE_SIZE >= sizeof(uintptr_t)); counter = (uintptr_t*)(mem + 2*CACHE_LINE_SIZE); assert (CACHE_LINE_SIZE >= sizeof(pid_t)); master_pid = (pid_t*)(mem + 3*CACHE_LINE_SIZE); assert (CACHE_LINE_SIZE >= sizeof(size_t)); log_level = (size_t*)(mem + 4*CACHE_LINE_SIZE); assert (CACHE_LINE_SIZE >= sizeof(double)); sample_rate = (double*)(mem + 5*CACHE_LINE_SIZE); assert (CACHE_LINE_SIZE >= sizeof(size_t)); compression = (size_t*)(mem + 6*CACHE_LINE_SIZE); assert (CACHE_LINE_SIZE >= sizeof(size_t)); workers_should_exit = (size_t*)(mem + 7*CACHE_LINE_SIZE); assert (CACHE_LINE_SIZE >= sizeof(size_t)); wasted_heap_size = (size_t*)(mem + 8*CACHE_LINE_SIZE); assert (CACHE_LINE_SIZE >= sizeof(size_t)); allow_removes = (size_t*)(mem + 9*CACHE_LINE_SIZE); assert (CACHE_LINE_SIZE >= sizeof(size_t)); hcounter_filled = (size_t*)(mem + 10*CACHE_LINE_SIZE); mem += page_size; // Just checking that the page is large enough. assert(page_size > 11*CACHE_LINE_SIZE + (int)sizeof(int)); assert (CACHE_LINE_SIZE >= sizeof(local_t)); locals = mem; mem += locals_size_b; /* END OF THE SMALL OBJECTS PAGE */ /* Global storage initialization */ global_storage = (value*)mem; mem += global_size_b; /* Hashtable */ hashtbl = (helt_t*)mem; mem += hashtbl_size_b; /* Heap */ heap_init = mem; heap_max = heap_init + heap_size; #ifdef _WIN32 /* Reserve all memory space except the "huge" `global_size_b`. This is * required for Windows but we don't do this for Linux since it lets us run * more processes in parallel without running out of memory immediately * (though we do risk it later on) */ memfd_reserve(memfd_shared_mem, (char *)global_storage, sizeof(global_storage[0])); memfd_reserve(memfd_shared_mem, (char *)heap, heap_init - (char *)heap); #endif } /* The total size of the shared memory. Most of it is going to remain * virtual. */ static size_t get_shared_mem_size(void) { size_t page_size = getpagesize(); return (global_size_b + hashtbl_size_b + heap_size + page_size + locals_size_b); } // Must be called AFTER init_shared_globals / define_globals // once per process, during hh_shared_init / hh_connect static void init_zstd_compression() { // if use ZSTD if (*compression) { /* The resources below (dictionaries, contexts) technically leak, * we don't free them as there is no proper API from workers. * However, they are in use till the end of the process live. */ zstd_cctx = ZSTD_createCCtx(); zstd_dctx = ZSTD_createDCtx(); { ZSTD_CDict* zstd_cdict = ZSTD_createCDict(dictionary_data, dictionary_data_len, *compression); const size_t result = ZSTD_CCtx_refCDict(zstd_cctx, zstd_cdict); assert(!ZSTD_isError(result)); } { ZSTD_DDict* zstd_ddict = ZSTD_createDDict(dictionary_data, dictionary_data_len); const size_t result = ZSTD_DCtx_refDDict(zstd_dctx, zstd_ddict); assert(!ZSTD_isError(result)); } } } static void init_shared_globals( size_t config_log_level, double config_sample_rate, size_t config_compression ) { // Initial size is zero for global storage is zero global_storage[0] = 0; // Initialize the number of element in the table *hcounter = 0; *hcounter_filled = 0; // Ensure the global counter starts on a COUNTER_RANGE boundary *counter = ALIGN(early_counter + 1, COUNTER_RANGE); *log_level = config_log_level; *sample_rate = config_sample_rate; *compression = config_compression; *workers_should_exit = 0; *wasted_heap_size = 0; *allow_removes = 1; for (uint64_t i = 0; i <= num_workers; i++) { LOCAL(i)->counter = 0; } // Initialize top heap pointers *heap = heap_init; } static void set_sizes( uint64_t config_global_size, uint64_t config_heap_size, uint64_t config_hash_table_pow, uint64_t config_num_workers) { size_t page_size = getpagesize(); global_size = config_global_size; global_size_b = sizeof(global_storage[0]) + config_global_size; heap_size = config_heap_size; hash_table_pow = config_hash_table_pow; hashtbl_size = 1ul << config_hash_table_pow; hashtbl_size_b = hashtbl_size * sizeof(hashtbl[0]); // We will allocate a cache line for the master process and each worker // process, then pad that out to the nearest page. num_workers = config_num_workers; locals_size_b = ALIGN((1 + num_workers) * CACHE_LINE_SIZE, page_size); shared_mem_size = get_shared_mem_size(); } /*****************************************************************************/ /* Must be called by the master BEFORE forking the workers! */ /*****************************************************************************/ CAMLprim value hh_shared_init( value config_val, value shm_dir_val, value num_workers_val ) { CAMLparam3(config_val, shm_dir_val, num_workers_val); CAMLlocal4( config_global_size_val, config_heap_size_val, config_hash_table_pow_val, config_shm_use_sharded_hashtbl ); CAMLlocal1( config_shm_cache_size ); config_global_size_val = Field(config_val, 0); config_heap_size_val = Field(config_val, 1); config_hash_table_pow_val = Field(config_val, 2); config_shm_use_sharded_hashtbl = Field(config_val, 3); config_shm_cache_size = Field(config_val, 4); set_sizes( Long_val(config_global_size_val), Long_val(config_heap_size_val), Long_val(config_hash_table_pow_val), Long_val(num_workers_val) ); shm_use_sharded_hashtbl = Bool_val(config_shm_use_sharded_hashtbl); shm_cache_size_b = Long_val(config_shm_cache_size); // None -> NULL // Some str -> String_val(str) const char *shm_dir = NULL; if (shm_dir_val != Val_int(0)) { shm_dir = String_val(Field(shm_dir_val, 0)); } memfd_init( shm_dir, shared_mem_size, Long_val(Field(config_val, 6)) ); assert(memfd_shared_mem >= 0); char *shared_mem_init = memfd_map(memfd_shared_mem, SHARED_MEM_INIT, shared_mem_size); define_globals(shared_mem_init); if (shm_use_sharded_hashtbl) { assert(memfd_shmffi >= 0); assert(SHARED_MEM_INIT + shared_mem_size <= SHARDED_HASHTBL_MEM_ADDR); char *mem_addr = memfd_map(memfd_shmffi, SHARDED_HASHTBL_MEM_ADDR, SHARDED_HASHTBL_MEM_SIZE); shmffi_init(mem_addr, SHARDED_HASHTBL_MEM_SIZE, shm_cache_size_b); } // Keeping the pids around to make asserts. #ifdef _WIN32 *master_pid = 0; my_pid = *master_pid; #else *master_pid = getpid(); my_pid = *master_pid; #endif init_shared_globals( Long_val(Field(config_val, 7)), Double_val(Field(config_val, 8)), Long_val(Field(config_val, 9)) ); init_zstd_compression(); // Checking that we did the maths correctly. assert(*heap + heap_size == shared_mem + shared_mem_size); #ifndef _WIN32 // Uninstall ocaml's segfault handler. It's supposed to throw an exception on // stack overflow, but we don't actually handle that exception, so what // happens in practice is we terminate at toplevel with an unhandled exception // and a useless ocaml backtrace. A core dump is actually more useful. Sigh. struct sigaction sigact = { 0 }; sigact.sa_handler = SIG_DFL; sigemptyset(&sigact.sa_mask); sigact.sa_flags = 0; sigaction(SIGSEGV, &sigact, NULL); #endif CAMLreturn(hh_get_handle()); } /* Must be called by every worker before any operation is performed */ value hh_connect(value connector, value worker_id_val) { CAMLparam2(connector, worker_id_val); memfd_shared_mem = Handle_val(Field(connector, 0)); set_sizes( Long_val(Field(connector, 1)), Long_val(Field(connector, 2)), Long_val(Field(connector, 3)), Long_val(Field(connector, 4)) ); shm_use_sharded_hashtbl = Bool_val(Field(connector, 5)); shm_cache_size_b = Long_val(Field(connector, 6)); memfd_shmffi = Handle_val(Field(connector, 7)); worker_id = Long_val(worker_id_val); #ifdef _WIN32 my_pid = 1; // Trick #else my_pid = getpid(); #endif assert(memfd_shared_mem >= 0); char *shared_mem_init = memfd_map(memfd_shared_mem, SHARED_MEM_INIT, shared_mem_size); define_globals(shared_mem_init); init_zstd_compression(); if (shm_use_sharded_hashtbl) { assert(memfd_shmffi >= 0); char *mem_addr = memfd_map(memfd_shmffi, SHARDED_HASHTBL_MEM_ADDR, SHARDED_HASHTBL_MEM_SIZE); shmffi_attach(mem_addr, SHARDED_HASHTBL_MEM_SIZE); } CAMLreturn(Val_unit); } /* Can only be called after init or after earlier connect. */ value hh_get_handle(void) { CAMLparam0(); CAMLlocal1( connector ); connector = caml_alloc_tuple(8); Store_field(connector, 0, Val_handle(memfd_shared_mem)); Store_field(connector, 1, Val_long(global_size)); Store_field(connector, 2, Val_long(heap_size)); Store_field(connector, 3, Val_long(hash_table_pow)); Store_field(connector, 4, Val_long(num_workers)); Store_field(connector, 5, Val_bool(shm_use_sharded_hashtbl)); Store_field(connector, 6, Val_long(shm_cache_size_b)); Store_field(connector, 7, Val_handle(memfd_shmffi)); CAMLreturn(connector); } /*****************************************************************************/ /* Counter * * Provides a counter intended to be increasing over the lifetime of the program * including all forks. Uses a global variable until hh_shared_init is called, * so it's safe to use in the early init stages of the program (as long as you * fork after hh_shared_init of course). Wraps around at the maximum value of an * ocaml int, which is something like 30 or 62 bits on 32 and 64-bit * architectures respectively. */ /*****************************************************************************/ CAMLprim value hh_counter_next(void) { CAMLparam0(); CAMLlocal1(result); uintptr_t v = 0; if (counter) { v = LOCAL(worker_id)->counter; if (v % COUNTER_RANGE == 0) { v = __atomic_fetch_add(counter, COUNTER_RANGE, __ATOMIC_RELAXED); } ++v; LOCAL(worker_id)->counter = v; } else { v = ++early_counter; } result = Val_long(v % Max_long); // Wrap around. CAMLreturn(result); } /*****************************************************************************/ /* There are a bunch of operations that only the designated master thread is * allowed to do. This assert will fail if the current process is not the master * process */ /*****************************************************************************/ static void assert_master(void) { assert(my_pid == *master_pid); } static void assert_not_master(void) { assert(my_pid != *master_pid); } static void assert_allow_removes(void) { assert(*allow_removes); } static void assert_allow_hashtable_writes_by_current_process(void) { assert(allow_hashtable_writes_by_current_process); } CAMLprim value hh_assert_master(void) { CAMLparam0(); assert_master(); CAMLreturn(Val_unit); } /*****************************************************************************/ CAMLprim value hh_stop_workers(void) { CAMLparam0(); assert_master(); *workers_should_exit = 1; CAMLreturn(Val_unit); } CAMLprim value hh_resume_workers(void) { CAMLparam0(); assert_master(); *workers_should_exit = 0; CAMLreturn(Val_unit); } CAMLprim value hh_set_can_worker_stop(value val) { CAMLparam1(val); worker_can_exit = Bool_val(val); CAMLreturn(Val_unit); } CAMLprim value hh_set_allow_removes(value val) { CAMLparam1(val); *allow_removes = Bool_val(val); CAMLreturn(Val_unit); } CAMLprim value hh_set_allow_hashtable_writes_by_current_process(value val) { CAMLparam1(val); allow_hashtable_writes_by_current_process = Bool_val(val); CAMLreturn(Val_unit); } static void check_should_exit(void) { if (workers_should_exit == NULL) { caml_failwith( "`check_should_exit` failed: `workers_should_exit` was uninitialized. " "Did you forget to call one of `hh_connect` or `hh_shared_init` " "to initialize shared memory before accessing it?" ); } else if (*workers_should_exit) { static const value *exn = NULL; if (!exn) exn = caml_named_value("worker_should_exit"); caml_raise_constant(*exn); } } CAMLprim value hh_check_should_exit (void) { CAMLparam0(); check_should_exit(); CAMLreturn(Val_unit); } /*****************************************************************************/ /* Global storage */ /*****************************************************************************/ void hh_shared_store(value data) { CAMLparam1(data); size_t size = caml_string_length(data); assert_master(); // only the master can store assert(global_storage[0] == 0); // Is it clear? assert(size < global_size_b - sizeof(global_storage[0])); // Do we have enough space? global_storage[0] = size; memfd_reserve(memfd_shared_mem, (char *)&global_storage[1], size); memcpy(&global_storage[1], &Field(data, 0), size); CAMLreturn0; } /*****************************************************************************/ /* We are allocating ocaml values. The OCaml GC must know about them. * caml_alloc_string might trigger the GC, when that happens, the GC needs * to scan the stack to find the OCaml roots. The macros CAMLparam0 and * CAMLlocal1 register the roots. */ /*****************************************************************************/ CAMLprim value hh_shared_load(void) { CAMLparam0(); CAMLlocal1(result); size_t size = global_storage[0]; assert(size != 0); result = caml_alloc_string(size); memcpy(&Field(result, 0), &global_storage[1], size); CAMLreturn(result); } void hh_shared_clear(void) { assert_master(); global_storage[0] = 0; } value hh_check_heap_overflow(void) { if (shm_use_sharded_hashtbl) { return Val_bool(0); } if (*heap >= shared_mem + shared_mem_size) { return Val_bool(1); } return Val_bool(0); } /*****************************************************************************/ /* We compact the heap when it gets twice as large as its initial size. * Step one, copy the live values in a new heap. * Step two, memcopy the values back into the shared heap. * We could probably use something smarter, but this is fast enough. * * The collector should only be called by the master. */ /*****************************************************************************/ CAMLprim value hh_collect(void) { if (shm_use_sharded_hashtbl != 0) { return Val_unit; } // NOTE: explicitly do NOT call CAMLparam or any of the other functions/macros // defined in caml/memory.h . // This function takes a boolean and returns unit. // Those are both immediates in the OCaml runtime. assert_master(); assert_allow_removes(); // Step 1: Walk the hashtbl entries, which are the roots of our marking pass. for (size_t i = 0; i < hashtbl_size; i++) { // Skip empty slots if (hashtbl[i].addr == NULL) { continue; } // No workers should be writing at the moment. If a worker died in the // middle of a write, that is also very bad assert(hashtbl[i].addr != HASHTBL_WRITE_IN_PROGRESS); // The hashtbl addr will be wrong after we relocate the heap entry, but we // don't know where the heap entry will relocate to yet. We need to first // move the heap entry, then fix up the hashtbl addr. // // We accomplish this by storing the heap header in the now useless addr // field and storing a pointer to the addr field where the header used to // be. Then, after moving the heap entry, we can follow the pointer to // restore our original header and update the addr field to our relocated // address. // // This is all super unsafe and only works because we constrain the size of // an hh_header_t struct to the size of a pointer. // Location of the addr field (8 bytes) in the hashtable char **hashtbl_addr = (char **)&hashtbl[i].addr; // Location of the header (8 bytes) in the heap char *heap_addr = (char *)hashtbl[i].addr; // Swap hh_header_t header = *(hh_header_t *)heap_addr; *(hh_header_t *)hashtbl_addr = header; *(uintptr_t *)heap_addr = (uintptr_t)hashtbl_addr; } // Step 2: Walk the heap and relocate entries, updating the hashtbl to point // to relocated addresses. // Pointer to free space in the heap where moved values will move to. char *dest = heap_init; // Pointer that walks the heap from bottom to top. char *src = heap_init; size_t aligned_size; hh_header_t header; while (src < *heap) { if (*(uint64_t *)src & 1) { // If the lsb is set, this is a header. If it's a header, that means the // entry was not marked in the first pass and should be collected. Don't // move dest pointer, but advance src pointer to next heap entry. header = *(hh_header_t *)src; aligned_size = HEAP_ALIGN(Heap_entry_total_size(header)); } else { // If the lsb is 0, this is a pointer to the addr field of the hashtable // element, which holds the header bytes. This entry is live. char *hashtbl_addr = *(char **)src; header = *(hh_header_t *)hashtbl_addr; aligned_size = HEAP_ALIGN(Heap_entry_total_size(header)); // Fix the hashtbl addr field to point to our new location and restore the // heap header data temporarily stored in the addr field bits. *(uintptr_t *)hashtbl_addr = (uintptr_t)dest; *(hh_header_t *)src = header; // Move the entry as far to the left as possible. memmove(dest, src, aligned_size); dest += aligned_size; } src += aligned_size; } // TODO: Space between dest and *heap is unused, but will almost certainly // become used again soon. Currently we will never decommit, which may cause // issues when there is memory pressure. // // If the kernel supports it, we might consider using madvise(MADV_FREE), // which allows the kernel to reclaim the memory lazily under pressure, but // would not force page faults under healthy operation. *heap = dest; *wasted_heap_size = 0; return Val_unit; } CAMLprim value hh_malloc_trim(void) { #ifdef MALLOC_TRIM malloc_trim(0); #endif return Val_unit; } static void raise_heap_full(void) { static const value *exn = NULL; if (!exn) exn = caml_named_value("heap_full"); caml_raise_constant(*exn); } /*****************************************************************************/ /* Allocates in the shared heap. The chunks are cache aligned. */ /*****************************************************************************/ static heap_entry_t* hh_alloc(hh_header_t header, /*out*/size_t *total_size) { // the size of this allocation needs to be kept in sync with wasted_heap_size // modification in hh_remove size_t slot_size = HEAP_ALIGN(Heap_entry_total_size(header)); *total_size = slot_size; char *chunk = __sync_fetch_and_add(heap, (char*) slot_size); if (chunk + slot_size > heap_max) { raise_heap_full(); } memfd_reserve(memfd_shared_mem, chunk, slot_size); ((heap_entry_t *)chunk)->header = header; return (heap_entry_t *)chunk; } /*****************************************************************************/ /* Serializes an ocaml value into an Ocaml raw heap_entry (bytes) */ /*****************************************************************************/ value hh_serialize_raw(value data) { CAMLparam1(data); CAMLlocal1(result); char* data_value = NULL; size_t size = 0; size_t uncompressed_size = 0; storage_kind kind = 0; if (shm_use_sharded_hashtbl != 0) { CAMLreturn(shmffi_serialize_raw(data)); } // If the data is an Ocaml string it is more efficient to copy its contents // directly instead of serializing it. if (Is_block(data) && Tag_val(data) == String_tag) { size = caml_string_length(data); data_value = malloc(size); memcpy(data_value, String_val(data), size); kind = KIND_STRING; } else { intnat serialized_size; // We are responsible for freeing the memory allocated by this function // After copying data_value we need to make sure to free data_value caml_output_value_to_malloc( data, Val_int(0)/*flags*/, &data_value, &serialized_size); assert(serialized_size >= 0); size = (size_t) serialized_size; kind = KIND_SERIALIZED; } // We limit the size of elements we will allocate to our heap to ~2GB assert(size < 0x80000000); size_t max_compression_size = 0; char* compressed_data = NULL; size_t compressed_size = 0; if (*compression) { max_compression_size = ZSTD_compressBound(size); compressed_data = malloc(max_compression_size); compressed_size = ZSTD_compress2(zstd_cctx, compressed_data, max_compression_size, data_value, size); } else { max_compression_size = LZ4_compressBound(size); compressed_data = malloc(max_compression_size); compressed_size = LZ4_compress_default( data_value, compressed_data, size, max_compression_size); } if (compressed_size != 0 && compressed_size < size) { uncompressed_size = size; size = compressed_size; } // Both size and uncompressed_size will certainly fit in 31 bits, as the // original size fits per the assert above and we check that the compressed // size is less than the original size. hh_header_t header = size << 33 | (uint64_t)kind << 32 | uncompressed_size << 1 | 1; size_t ocaml_size = Heap_entry_total_size(header); result = caml_alloc_string(ocaml_size); heap_entry_t *addr = (heap_entry_t *)Bytes_val(result); addr->header = header; memcpy(&addr->data, uncompressed_size ? compressed_data : data_value, size); free(compressed_data); // We temporarily allocate memory using malloc to serialize the Ocaml object. // When we have finished copying the serialized data we need to free the // memory we allocated to avoid a leak. free(data_value); CAMLreturn(result); } /*****************************************************************************/ /* Allocates an ocaml value in the shared heap. * Any ocaml value is valid, except closures. It returns the address of * the allocated chunk. */ /*****************************************************************************/ static heap_entry_t* hh_store_ocaml( value data, /*out*/size_t *alloc_size, /*out*/size_t *orig_size, /*out*/size_t *total_size ) { char* data_value = NULL; size_t size = 0; size_t uncompressed_size = 0; storage_kind kind = 0; // If the data is an Ocaml string it is more efficient to copy its contents // directly in our heap instead of serializing it. if (Is_block(data) && Tag_val(data) == String_tag) { size = caml_string_length(data); data_value = malloc(size); memcpy(data_value, String_val(data), size); kind = KIND_STRING; } else { intnat serialized_size; // We are responsible for freeing the memory allocated by this function // After copying data_value into our object heap we need to make sure to free // data_value caml_output_value_to_malloc( data, Val_int(0)/*flags*/, &data_value, &serialized_size); assert(serialized_size >= 0); size = (size_t) serialized_size; kind = KIND_SERIALIZED; } // We limit the size of elements we will allocate to our heap to ~2GB assert(size < 0x80000000); *orig_size = size; size_t max_compression_size = 0; char* compressed_data = NULL; size_t compressed_size = 0; if (*compression) { max_compression_size = ZSTD_compressBound(size); compressed_data = malloc(max_compression_size); compressed_size = ZSTD_compress2(zstd_cctx, compressed_data, max_compression_size, data_value, size); } else { max_compression_size = LZ4_compressBound(size); compressed_data = malloc(max_compression_size); compressed_size = LZ4_compress_default( data_value, compressed_data, size, max_compression_size); } if (compressed_size != 0 && compressed_size < size) { uncompressed_size = size; size = compressed_size; } *alloc_size = size; // Both size and uncompressed_size will certainly fit in 31 bits, as the // original size fits per the assert above and we check that the compressed // size is less than the original size. hh_header_t header = size << 33 | (uint64_t)kind << 32 | uncompressed_size << 1 | 1; heap_entry_t* addr = hh_alloc(header, total_size); memcpy(&addr->data, uncompressed_size ? compressed_data : data_value, size); free(compressed_data); // We temporarily allocate memory using malloc to serialize the Ocaml object. // When we have finished copying the serialized data into our heap we need // to free the memory we allocated to avoid a leak. free(data_value); return addr; } /*****************************************************************************/ /* Given an OCaml string, returns the 8 first bytes in an unsigned long. * The key is generated using MD5, but we only use the first 8 bytes because * it allows us to use atomic operations. */ /*****************************************************************************/ static uint64_t get_hash(value key) { return *((uint64_t*)String_val(key)); } CAMLprim value hh_get_hash_ocaml(value key) { return caml_copy_int64(*((uint64_t*)String_val(key))); } /*****************************************************************************/ /* Writes the data in one of the slots of the hashtable. There might be * concurrent writers, when that happens, the first writer wins. * * Returns a tuple (compressed_size, original_size, total_size) where... * original_size ("orig_size") is the size in bytes of the marshalled value, * compressed_size ("alloc_size") is byte size after that blob has been compressed by ZSTD or LZ4, * total_size ("total_size") is byte size for that compressed blob, plus header, aligned. * If the slot was already written to, a negative value is returned for each element of the tuple. */ /*****************************************************************************/ static value write_at(unsigned int slot, value data) { CAMLparam1(data); CAMLlocal1(result); result = caml_alloc_tuple(3); // Try to write in a value to indicate that the data is being written. if( __sync_bool_compare_and_swap( &(hashtbl[slot].addr), NULL, HASHTBL_WRITE_IN_PROGRESS ) ) { assert_allow_hashtable_writes_by_current_process(); size_t alloc_size = 0; size_t orig_size = 0; size_t total_size = 0; hashtbl[slot].addr = hh_store_ocaml(data, &alloc_size, &orig_size, &total_size); Store_field(result, 0, Val_long(alloc_size)); Store_field(result, 1, Val_long(orig_size)); Store_field(result, 2, Val_long(total_size)); __sync_fetch_and_add(hcounter_filled, 1); } else { Store_field(result, 0, Min_long); Store_field(result, 1, Min_long); Store_field(result, 2, Min_long); } CAMLreturn(result); } static void raise_hash_table_full(void) { static const value *exn = NULL; if (!exn) exn = caml_named_value("hash_table_full"); caml_raise_constant(*exn); } /*****************************************************************************/ /* Adds a key value to the hashtable. This code is perf sensitive, please * check the perf before modifying. * * Returns a tuple (compressed_size, original_size, total_size) where * original_size ("orig_size") is the size in bytes of the marshalled value, * compressed_size ("alloc_size") is byte size after that blob has been compressed by ZSTD or LZ4, * total_size ("total_size") is byte size for that compressed blob, plus header, aligned. * But if nothing new was added, then all three numbers returned are negative. */ /*****************************************************************************/ value hh_add(value evictable, value key, value data) { CAMLparam3(evictable, key, data); uint64_t hash = get_hash(key); if (shm_use_sharded_hashtbl != 0) { _Bool eviction_enabled = shm_cache_size_b >= 0; CAMLreturn(shmffi_add(Bool_val(evictable) && eviction_enabled, hash, data)); } check_should_exit(); unsigned int slot = hash & (hashtbl_size - 1); unsigned int init_slot = slot; while(1) { uint64_t slot_hash = hashtbl[slot].hash; if(slot_hash == hash) { // overwrite previous value for this hash CAMLreturn(write_at(slot, data)); } if (*hcounter >= hashtbl_size) { // We're never going to find a spot raise_hash_table_full(); } if(slot_hash == 0) { // We think we might have a free slot, try to atomically grab it. if(__sync_bool_compare_and_swap(&(hashtbl[slot].hash), 0, hash)) { uint64_t size = __sync_fetch_and_add(hcounter, 1); // Sanity check assert(size < hashtbl_size); CAMLreturn(write_at(slot, data)); } // Grabbing it failed -- why? If someone else is trying to insert // the data we were about to, try to insert it ourselves too. // Otherwise, keep going. // Note that this read relies on the __sync call above preventing the // compiler from caching the value read out of memory. (And of course // isn't safe on any arch that requires memory barriers.) if(hashtbl[slot].hash == hash) { // Some other thread already grabbed this slot to write this // key, but they might not have written the address (or even // the sigil value) yet. We can't return from hh_add until we // know that hh_mem would succeed, which is to say that addr is // no longer null. To make sure hh_mem will work, we try // writing the value ourselves; either we insert it ourselves or // we know the address is now non-NULL. CAMLreturn(write_at(slot, data)); } } slot = (slot + 1) & (hashtbl_size - 1); if (slot == init_slot) { // We're never going to find a spot raise_hash_table_full(); } } } /*****************************************************************************/ /* Stores a raw bytes representation of an heap_entry in the shared heap. It * returns the address of the allocated chunk. */ /*****************************************************************************/ static heap_entry_t* hh_store_raw_entry( value data ) { size_t size = caml_string_length(data) - sizeof(heap_entry_t); size_t total_size = 0; heap_entry_t* entry = (heap_entry_t*)Bytes_val(data); hh_header_t header = entry->header; heap_entry_t* addr = hh_alloc(header, &total_size); memcpy(&addr->data, entry->data, size); return addr; } /*****************************************************************************/ /* Writes the raw serialized data in one of the slots of the hashtable. There * might be concurrent writers, when that happens, the first writer wins. * */ /*****************************************************************************/ static value write_raw_at(unsigned int slot, value data) { CAMLparam1(data); // Try to write in a value to indicate that the data is being written. if( __sync_bool_compare_and_swap( &(hashtbl[slot].addr), NULL, HASHTBL_WRITE_IN_PROGRESS ) ) { assert_allow_hashtable_writes_by_current_process(); hashtbl[slot].addr = hh_store_raw_entry(data); __sync_fetch_and_add(hcounter_filled, 1); } CAMLreturn(Val_unit); } /*****************************************************************************/ /* Adds a key and raw heap_entry (represented as bytes) to the hashtable. Used * for over the network proxying. * * Returns unit. */ /*****************************************************************************/ CAMLprim value hh_add_raw(value key, value data) { CAMLparam2(key, data); uint64_t hash = get_hash(key); if (shm_use_sharded_hashtbl != 0) { CAMLreturn(shmffi_add_raw(hash, data)); } check_should_exit(); unsigned int slot = hash & (hashtbl_size - 1); unsigned int init_slot = slot; while(1) { uint64_t slot_hash = hashtbl[slot].hash; if(slot_hash == hash) { CAMLreturn(write_raw_at(slot, data)); } if (*hcounter >= hashtbl_size) { // We're never going to find a spot raise_hash_table_full(); } if(slot_hash == 0) { // We think we might have a free slot, try to atomically grab it. if(__sync_bool_compare_and_swap(&(hashtbl[slot].hash), 0, hash)) { uint64_t size = __sync_fetch_and_add(hcounter, 1); // Sanity check assert(size < hashtbl_size); CAMLreturn(write_raw_at(slot, data)); } // Grabbing it failed -- why? If someone else is trying to insert // the data we were about to, try to insert it ourselves too. // Otherwise, keep going. // Note that this read relies on the __sync call above preventing the // compiler from caching the value read out of memory. (And of course // isn't safe on any arch that requires memory barriers.) if(hashtbl[slot].hash == hash) { // Some other thread already grabbed this slot to write this // key, but they might not have written the address (or even // the sigil value) yet. We can't return from hh_add until we // know that hh_mem would succeed, which is to say that addr is // no longer null. To make sure hh_mem will work, we try // writing the value ourselves; either we insert it ourselves or // we know the address is now non-NULL. CAMLreturn(write_raw_at(slot, data)); } } slot = (slot + 1) & (hashtbl_size - 1); if (slot == init_slot) { // We're never going to find a spot raise_hash_table_full(); } } CAMLreturn(Val_unit); } /*****************************************************************************/ /* Finds the slot corresponding to the key in a hash table. The returned slot * is either free or points to the key. */ /*****************************************************************************/ static unsigned int find_slot(value key) { uint64_t hash = get_hash(key); unsigned int slot = hash & (hashtbl_size - 1); unsigned int init_slot = slot; while(1) { if(hashtbl[slot].hash == hash) { return slot; } if(hashtbl[slot].hash == 0) { return slot; } slot = (slot + 1) & (hashtbl_size - 1); if (slot == init_slot) { raise_hash_table_full(); } } } static _Bool hh_is_slot_taken_for_key(unsigned int slot, value key) { _Bool good_hash = hashtbl[slot].hash == get_hash(key); _Bool non_null_addr = hashtbl[slot].addr != NULL; if (good_hash && non_null_addr) { // The data is currently in the process of being written, wait until it // actually is ready to be used before returning. time_t start = 0; while (hashtbl[slot].addr == HASHTBL_WRITE_IN_PROGRESS) { #if defined(__aarch64__) asm volatile("yield" : : : "memory"); #else asm volatile("pause" : : : "memory"); #endif // if the worker writing the data dies, we can get stuck. Timeout check // to prevent it. time_t now = time(0); if (start == 0 || start > now) { start = now; } else if (now - start > 60) { caml_failwith("hh_mem busy-wait loop stuck for 60s"); } } return 1; } return 0; } _Bool hh_mem_inner(value key) { check_should_exit(); unsigned int slot = find_slot(key); return hh_is_slot_taken_for_key(slot, key); } /*****************************************************************************/ /* Returns true if the key is present. We need to check both the hash and * the address of the data. This is due to the fact that we remove by setting * the address slot to NULL (we never remove a hash from the table, outside * of garbage collection). */ /*****************************************************************************/ value hh_mem(value key) { CAMLparam1(key); if (shm_use_sharded_hashtbl != 0) { CAMLreturn(shmffi_mem(get_hash(key))); } CAMLreturn(Val_bool(hh_mem_inner(key) == 1)); } /*****************************************************************************/ /* Deserializes the value pointed to by elt. */ /*****************************************************************************/ static CAMLprim value hh_deserialize(heap_entry_t *elt) { CAMLparam0(); CAMLlocal1(result); size_t size = Entry_size(elt->header); size_t uncompressed_size_exp = Entry_uncompressed_size(elt->header); char *src = elt->data; char *data = elt->data; if (uncompressed_size_exp) { data = malloc(uncompressed_size_exp); size_t uncompressed_size = 0; if (*compression) { uncompressed_size = ZSTD_decompressDCtx(zstd_dctx, data, uncompressed_size_exp, src, size); } else { uncompressed_size = LZ4_decompress_safe( src, data, size, uncompressed_size_exp); } assert(uncompressed_size == uncompressed_size_exp); size = uncompressed_size; } if (Entry_kind(elt->header) == KIND_STRING) { result = caml_alloc_initialized_string(size, data); } else { result = caml_input_value_from_block(data, size); } if (data != src) { free(data); } CAMLreturn(result); } /*****************************************************************************/ /* Returns the value associated to a given key, and deserialize it. */ /* Returns [None] if the slot for the key is empty. */ /*****************************************************************************/ CAMLprim value hh_get_and_deserialize(value key) { CAMLparam1(key); check_should_exit(); CAMLlocal2(deserialized_value, result); if (shm_use_sharded_hashtbl != 0) { CAMLreturn(shmffi_get_and_deserialize(get_hash(key))); } unsigned int slot = find_slot(key); if (!hh_is_slot_taken_for_key(slot, key)) { CAMLreturn(Val_none); } deserialized_value = hh_deserialize(hashtbl[slot].addr); result = hh_shared_caml_alloc_some(deserialized_value); CAMLreturn(result); } /*****************************************************************************/ /* Returns Ocaml bytes representing the raw heap_entry. */ /* Returns [None] if the slot for the key is empty. */ /*****************************************************************************/ CAMLprim value hh_get_raw(value key) { CAMLparam1(key); if (shm_use_sharded_hashtbl != 0) { CAMLreturn(shmffi_get_raw(get_hash(key))); } check_should_exit(); CAMLlocal2(result, bytes); unsigned int slot = find_slot(key); if (!hh_is_slot_taken_for_key(slot, key)) { CAMLreturn(Val_none); } heap_entry_t *elt = hashtbl[slot].addr; size_t size = Heap_entry_total_size(elt->header); char *data = (char *)elt; bytes = caml_alloc_string(size); memcpy(Bytes_val(bytes), data, size); result = hh_shared_caml_alloc_some(bytes); CAMLreturn(result); } /*****************************************************************************/ /* Returns result of deserializing and possibly uncompressing a raw heap_entry * passed in as Ocaml bytes. */ /*****************************************************************************/ CAMLprim value hh_deserialize_raw(value heap_entry) { CAMLparam1(heap_entry); CAMLlocal1(result); if (shm_use_sharded_hashtbl != 0) { CAMLreturn(shmffi_deserialize_raw(heap_entry)); } heap_entry_t* entry = (heap_entry_t*)Bytes_val(heap_entry); result = hh_deserialize(entry); CAMLreturn(result); } /*****************************************************************************/ /* Returns the compressed_size of the value associated to a given key. */ /* The key MUST be present. */ /*****************************************************************************/ CAMLprim value hh_get_size(value key) { CAMLparam1(key); if (shm_use_sharded_hashtbl != 0) { CAMLreturn(shmffi_get_size(get_hash(key))); } unsigned int slot = find_slot(key); assert(hashtbl[slot].hash == get_hash(key)); CAMLreturn(Val_long(Entry_size(hashtbl[slot].addr->header))); } /*****************************************************************************/ /* Moves the data associated to key1 to key2. * key1 must be present. * key2 must be free. * Only the master can perform this operation. */ /*****************************************************************************/ void hh_move(value key1, value key2) { if (shm_use_sharded_hashtbl != 0) { shmffi_move(get_hash(key1), get_hash(key2)); return; } unsigned int slot1 = find_slot(key1); unsigned int slot2 = find_slot(key2); assert_master(); assert_allow_removes(); assert(hashtbl[slot1].hash == get_hash(key1)); assert(hashtbl[slot2].addr == NULL); // We are taking up a previously empty slot. Let's increment the counter. // hcounter_filled doesn't change, since slot1 becomes empty and slot2 becomes // filled. if (hashtbl[slot2].hash == 0) { __sync_fetch_and_add(hcounter, 1); } hashtbl[slot2].hash = get_hash(key2); hashtbl[slot2].addr = hashtbl[slot1].addr; hashtbl[slot1].addr = NULL; } /*****************************************************************************/ /* Removes a key from the hash table, and returns the compressed_size that thing used to take. * Undefined behavior if the key doesn't exist. * Only the master can perform this operation. */ /*****************************************************************************/ CAMLprim value hh_remove(value key) { CAMLparam1(key); if (shm_use_sharded_hashtbl != 0) { CAMLreturn(shmffi_remove(get_hash(key))); } unsigned int slot = find_slot(key); assert_master(); assert_allow_removes(); assert(hashtbl[slot].hash == get_hash(key)); size_t entry_size = Entry_size(hashtbl[slot].addr->header); // see hh_alloc for the source of this size size_t slot_size = HEAP_ALIGN(Heap_entry_total_size(hashtbl[slot].addr->header)); __sync_fetch_and_add(wasted_heap_size, slot_size); hashtbl[slot].addr = NULL; removed_count += 1; __sync_fetch_and_sub(hcounter_filled, 1); CAMLreturn(Val_long(entry_size)); } CAMLprim value hh_removed_count(value ml_unit) { // TODO(hverr): Support sharded hash tables CAMLparam1(ml_unit); UNUSED(ml_unit); return Val_long(removed_count); }
C/C++
hhvm/hphp/hack/src/heap/hh_shared.h
#ifndef HH_SHARED_H #define HH_SHARED_H #define CAML_NAME_SPACE #include <caml/mlvalues.h> /*****************************************************************************/ /* Initialization & connection. */ /*****************************************************************************/ /* Initializes the shared heap. */ /* Must be called by the master BEFORE forking the workers! */ CAMLprim value hh_shared_init( value config_val, value shm_dir_val, value num_workers_val ); value hh_check_heap_overflow(void); /* Must be called by every worker before any operation is performed. */ value hh_connect(value connector, value worker_id_val); /* Can only be called after init or after earlier connect. */ value hh_get_handle(void); /*****************************************************************************/ /* Heap diagnostics. */ /*****************************************************************************/ CAMLprim value hh_used_heap_size(void); CAMLprim value hh_wasted_heap_size(void); CAMLprim value hh_log_level(void); CAMLprim value hh_sample_rate(void); CAMLprim value hh_hash_used_slots(void); CAMLprim value hh_hash_slots(void); /* Provides a counter which increases over the lifetime of the program * including all forks. Uses a global until hh_shared_init is called. * Safe to use in the early init stages of the program, as long as you fork * after hh_shared_init. Wraps around at the maximum value of an ocaml int. */ CAMLprim value hh_counter_next(void); /*****************************************************************************/ /* Worker management. */ /*****************************************************************************/ CAMLprim value hh_stop_workers(void); CAMLprim value hh_resume_workers(void); CAMLprim value hh_check_should_exit(void); CAMLprim value hh_set_can_worker_stop(value val); CAMLprim value hh_malloc_trim(void); CAMLprim value hh_set_allow_removes(value val); CAMLprim value hh_set_allow_hashtable_writes_by_current_process(value val); /*****************************************************************************/ /* Global storage. */ /*****************************************************************************/ void hh_shared_store(value data); CAMLprim value hh_shared_load(void); void hh_shared_clear(void); /*****************************************************************************/ /* Garbage collection. */ /*****************************************************************************/ CAMLprim value hh_collect(void); /*****************************************************************************/ /* Deserialization. */ /*****************************************************************************/ /* Returns the value associated to a given key, and deserialize it. */ /* The key MUST be present. */ CAMLprim value hh_get_and_deserialize(value key); /*****************************************************************************/ /* Raw access for network proxying. hh_get_raw key |> hh_deserialize_raw = hh_get key hh_serialize_raw data |> hh_add_raw key = hh_add key data */ /*****************************************************************************/ /* The key MUST be present. */ CAMLprim value hh_get_raw(value key); CAMLprim value hh_add_raw(value key, value heap_entry); CAMLprim value hh_serialize_raw(value data); CAMLprim value hh_deserialize_raw(value heap_entry); /*****************************************************************************/ /* Hashtable operations. */ /*****************************************************************************/ /* Returns the size of the value associated to a given key. * The key MUST be present. */ CAMLprim value hh_get_size(value key); /* Adds a key/value pair to the hashtable. Returns the number of bytes * allocated in the heap, or a negative number if no memory was allocated. */ value hh_add(value evictable, value key, value data); /* Returns true if the key is present in the hashtable. */ value hh_mem(value key); /* The following operations are only to be performed by the master. */ /* Moves the data associated to key1 to key2. * key1 must be present. key2 must be free. */ void hh_move(value key1, value key2); /* Removes a key from the hash table. */ CAMLprim value hh_remove(value key); /*****************************************************************************/ /* Utility */ /*****************************************************************************/ /* Get the hash of a string, based on MD5. */ CAMLprim value hh_get_hash_ocaml(value key); /* This assert will fail if the current process is not the master process. */ CAMLprim value hh_assert_master(void); #endif
OCaml
hhvm/hphp/hack/src/heap/ident.ml
(* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) open Hh_prelude external hh_counter_next : unit -> int = "hh_counter_next" type t = int [@@deriving eq, hash] let compare = Int.compare let track_names = ref false let trace = ref IMap.empty let tmp () = let res = hh_counter_next () in if !track_names then trace := IMap.add res ("__tmp" ^ string_of_int res) !trace; res let to_string x = match IMap.find_opt x !trace with | Some res -> res | None -> "v" ^ string_of_int x let debug ?normalize:(f = (fun x -> x)) x = let normalized_x = string_of_int (f x) in match IMap.find_opt x !trace with | Some result -> result ^ "[" ^ normalized_x ^ "]" | None -> "tvar_" ^ normalized_x [@@@warning "+3"] let get_name x = assert !track_names; IMap.find x !trace let set_name x y = trace := IMap.add x y !trace let make x = let res = hh_counter_next () in if !track_names then set_name res x; res let pp = Format.pp_print_int let not_equal x y = not @@ equal x y let hash_range_min = 100_000_000_000_000 let hash_range_max = 1_000_000_000_000_000 (* Probability of collision: if N = hash_range_max - hash_range_min and k is the * number of values to hash, the probability of collision is 1 - e^((-k*(k-1)/(2*N))). * So if k = 1'000'000, N = 10^14 gives a collision probability of 0.005 *) let from_string_hash s = let hash = Base.String.hash s in (* make this an int between hash_range_min and hash_range_max - 1 *) let hash = (hash % (hash_range_max - hash_range_min)) + hash_range_min in hash let immutable_mask = 1 lsl 62 let is_immutable i = i land immutable_mask <> 0 let make_immutable i = i lor immutable_mask
OCaml Interface
hhvm/hphp/hack/src/heap/ident.mli
(* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) type t = int [@@deriving eq, hash] val compare : t -> t -> int val track_names : bool ref val tmp : unit -> t val to_string : t -> string val debug : ?normalize:(int -> int) -> t -> string val get_name : t -> string val set_name : t -> string -> unit val make : string -> t val pp : Format.formatter -> t -> unit val not_equal : t -> t -> bool val from_string_hash : string -> t val is_immutable : int -> bool val make_immutable : int -> int
OCaml
hhvm/hphp/hack/src/heap/prefix.ml
(* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) (*****************************************************************************) (* The prefix is used to guarantee that we are not mixing different kind of * keys in the heap. * It just creates a new prefix every time its called. * The $ at the end of the prefix ensures that we don't have ambiguities if a key * happens to start with a digit. *) (*****************************************************************************) type t = string let make = let prefix_count = ref 0 in fun () -> incr prefix_count; string_of_int !prefix_count ^ "$" let make_key prefix k = prefix ^ k let remove prefix k = let prefix_size = String.length prefix in assert (String.sub k 0 prefix_size = prefix); String.sub k prefix_size (String.length k - prefix_size)
OCaml Interface
hhvm/hphp/hack/src/heap/prefix.mli
(* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) (*****************************************************************************) (* The prefix is used to guarantee that we are not mixing different kind of * keys in the heap. * It just creates a new prefix every time its called. *) (*****************************************************************************) type t val make : unit -> t (* Given a prefix and a key make me a prefixed key *) val make_key : t -> string -> string (* Removes the prefix from a key *) val remove : t -> string -> string
OCaml
hhvm/hphp/hack/src/heap/sharedMem.ml
(* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) open Hh_prelude module Hashtbl = Stdlib.Hashtbl module Queue = Stdlib.Queue module Set = Stdlib.Set type uses = Uses let ref_has_done_init = ref false (* Don't change the ordering of this record without updating hh_shared_init in * hh_shared.c, which indexes into config objects *) type config = { global_size: int; heap_size: int; hash_table_pow: int; shm_use_sharded_hashtbl: bool; shm_cache_size: int; shm_dirs: string list; shm_min_avail: int; log_level: int; sample_rate: float; (* 0 - lz4, others -- compression level for zstd*) compression: int; } [@@deriving show] let default_config = let gig = 1024 * 1024 * 1024 in { global_size = gig; heap_size = 20 * gig; hash_table_pow = 18; (* 1 << 18 *) shm_dirs = [GlobalConfig.shm_dir; GlobalConfig.tmp_dir]; shm_use_sharded_hashtbl = false; shm_cache_size = -1 (* eviction disabled *); shm_min_avail = gig / 2; (* Half a gig by default *) log_level = 0; sample_rate = 0.0; compression = 0; } let empty_config = { global_size = 0; heap_size = 0; hash_table_pow = 0; shm_dirs = []; shm_use_sharded_hashtbl = false; shm_cache_size = -1; shm_min_avail = 0; log_level = 0; sample_rate = 0.0; compression = 0; } (* Allocated in C only. NOTE: If you change the order, update hh_shared.c! *) type internal_handle = private { h_fd: Unix.file_descr; h_global_size: int; h_heap_size: int; _h_hash_table_pow_val: int; _h_num_workers_val: int; h_shm_use_sharded_hashtbl: bool; _h_shm_cache_size: int; h_sharded_hashtbl_fd: Unix.file_descr; } type handle = internal_handle * internal_handle option let get_heap_size ({ h_heap_size; _ }, _) = h_heap_size let get_global_size ({ h_global_size; _ }, _) = h_global_size let apply_on_pair ~f ({ h_fd; h_shm_use_sharded_hashtbl; h_sharded_hashtbl_fd; _ }, h2) = f h_fd; if h_shm_use_sharded_hashtbl then f h_sharded_hashtbl_fd; match h2 with (* We don't support 2 shared hashtables, so ignore it in the second part *) | Some { h_fd; _ } -> f h_fd | None -> () let set_close_on_exec handle = apply_on_pair ~f:Unix.set_close_on_exec handle let clear_close_on_exec handle = apply_on_pair ~f:Unix.clear_close_on_exec handle exception Out_of_shared_memory exception Hash_table_full exception Dep_table_full exception Heap_full exception Revision_length_is_zero exception Sql_assertion_failure of int exception Failed_anonymous_memfd_init exception Less_than_minimum_available of int exception Failed_to_use_shm_dir of string exception C_assertion_failure of string let () = Callback.register_exception "out_of_shared_memory" Out_of_shared_memory; Callback.register_exception "hash_table_full" Hash_table_full; Callback.register_exception "dep_table_full" Dep_table_full; Callback.register_exception "heap_full" Heap_full; Callback.register_exception "revision_length_is_zero" Revision_length_is_zero; Callback.register_exception "sql_assertion_failure" (Sql_assertion_failure 0); Callback.register_exception "failed_anonymous_memfd_init" Failed_anonymous_memfd_init; Callback.register_exception "less_than_minimum_available" (Less_than_minimum_available 0); Callback.register_exception "c_assertion_failure" (C_assertion_failure "dummy string") external hh_shared_init : config:config -> shm_dir:string option -> num_workers:int -> internal_handle = "hh_shared_init" let ref_shared_mem_callbacks = ref None let register_callbacks init_flash connect_flash get_handle_flash = ref_shared_mem_callbacks := Some (init_flash, connect_flash, get_handle_flash); () let anonymous_init config ~num_workers = hh_shared_init ~config ~shm_dir:None ~num_workers let rec shm_dir_init config ~num_workers = function | [] -> Hh_logger.log "We've run out of filesystems to use for shared memory"; raise Out_of_shared_memory | shm_dir :: shm_dirs -> let shm_min_avail = config.shm_min_avail in (* For some reason statvfs is segfaulting when the directory doesn't * exist, instead of returning -1 and an errno *) begin try if not (Sys.file_exists shm_dir) then raise (Failed_to_use_shm_dir "shm_dir does not exist"); hh_shared_init ~config ~shm_dir:(Some shm_dir) ~num_workers with | Less_than_minimum_available avail -> EventLogger.( log_if_initialized (fun () -> sharedmem_less_than_minimum_available ~shm_dir ~shm_min_avail ~avail)); Hh_logger.log "Filesystem %s only has %d bytes available, which is less than the minimum %d bytes" shm_dir avail config.shm_min_avail; shm_dir_init config ~num_workers shm_dirs | Unix.Unix_error (e, fn, arg) -> let fn_string = if String.equal fn "" then "" else Utils.spf " thrown by %s(%s)" fn arg in let reason = Utils.spf "Unix error%s: %s" fn_string (Unix.error_message e) in EventLogger.( log_if_initialized (fun () -> sharedmem_failed_to_use_shm_dir ~shm_dir ~reason)); Hh_logger.log "Failed to use shm dir `%s`: %s" shm_dir reason; shm_dir_init config ~num_workers shm_dirs | Failed_to_use_shm_dir reason -> EventLogger.( log_if_initialized (fun () -> sharedmem_failed_to_use_shm_dir ~shm_dir ~reason)); Hh_logger.log "Failed to use shm dir `%s`: %s" shm_dir reason; shm_dir_init config ~num_workers shm_dirs end let init config ~num_workers = ref_has_done_init := true; let fst = try anonymous_init config ~num_workers with | Failed_anonymous_memfd_init -> EventLogger.( log_if_initialized (fun () -> sharedmem_failed_anonymous_memfd_init ())); Hh_logger.log "Failed to use anonymous memfd init"; shm_dir_init config ~num_workers config.shm_dirs in let snd = match !ref_shared_mem_callbacks with | Some (init, _connect, _get) -> init config ~num_workers | None -> None in (fst, snd) external connect_internal_handle : internal_handle -> worker_id:int -> unit = "hh_connect" let connect (handle, maybe_handle) ~worker_id = let () = connect_internal_handle handle ~worker_id in let () = match !ref_shared_mem_callbacks with | Some (_init, connect, _get) -> connect maybe_handle ~worker_id | _ -> () in () external get_handle_internal_handle : unit -> internal_handle = "hh_get_handle" let get_handle () = let snd = match !ref_shared_mem_callbacks with | Some (_init, _connect, get) -> get () | None -> None in (get_handle_internal_handle (), snd) external set_allow_removes : bool -> unit = "hh_set_allow_removes" external set_allow_hashtable_writes_by_current_process : bool -> unit = "hh_set_allow_hashtable_writes_by_current_process" module RawAccess = struct (* Allocated in C only. *) type serialized = private bytes external mem_raw : string -> bool = "hh_mem" external get_raw : string -> serialized option = "hh_get_raw" external add_raw : string -> serialized -> unit = "hh_add_raw" external deserialize_raw : serialized -> 'a = "hh_deserialize_raw" external serialize_raw : 'a -> serialized = "hh_serialize_raw" end module SMTelemetry = struct (*****************************************************************************) (* Each cache can write telemetry about its current occupancy. * - Immediate caches - only records its existence * - WithLocalChanges caches - they do Obj.reachable_words to count up the stack * - Local caches - they do Obj.reachable_words * In the case of compound caches, e.g. HeapWithLocalCache which includes all three, * it doesn't have to report telemetry since each of its constituents already * reports telemetry on its own. * Anyway, each cache registers in the global "get_telemetry_list" so that * callers can do SharedMem.get_telemetry and pick up from all caches. * * Caveats: * Note that Obj.reachable_words may double-count stuff if it's in both * Local and WithLocalChanges cache. It may also take time, up to ~300ms. * And it will be meaningless if the items in the Local cache have references * into other parts of the system. It's up to the reader to make sense of it. * * The "WithLocalChanges" doesn't have a straightforward count of elements. * Instead it counts how many "actions" there are across all change-stacks: * how many adds, removes, replaces. *) (*****************************************************************************) external heap_size : unit -> int = "hh_used_heap_size" [@@noalloc] external wasted_heap_size : unit -> int = "hh_wasted_heap_size" [@@noalloc] external hh_log_level : unit -> int = "hh_log_level" [@@noalloc] external hh_sample_rate : unit -> float = "hh_sample_rate" external hash_used_slots : unit -> int * int = "hh_hash_used_slots" external hash_slots : unit -> int = "hh_hash_slots" let get_telemetry_list = ref [] let get_telemetry () : Telemetry.t = (* This function gets called by compute_tast, even in places which deliberately don't initialize shared memory. In these places, no-op, since otherwise reading from hh_log_level would segfault. *) if not !ref_has_done_init then Telemetry.create () else let start_time = Unix.gettimeofday () in let (hcounter, hcounter_filled) = hash_used_slots () in let telemetry = Telemetry.create () |> Telemetry.int_ ~key:"heap_size" ~value:(heap_size ()) |> Telemetry.int_ ~key:"wasted_heap_size" ~value:(wasted_heap_size ()) |> Telemetry.int_ ~key:"hash_used_slots" ~value:hcounter |> Telemetry.int_ ~key:"hash_used_slots_filled" ~value:hcounter_filled |> Telemetry.int_ ~key:"hash_slots" ~value:(hash_slots ()) in let telemetry = List.fold !get_telemetry_list ~init:telemetry ~f:(fun acc get_telemetry -> get_telemetry acc) in telemetry |> Telemetry.duration ~start_time type table_stats = { nonempty_slots: int; used_slots: int; slots: int; } let hash_stats () = let (used_slots, nonempty_slots) = hash_used_slots () in { nonempty_slots; used_slots; slots = hash_slots () } external hh_removed_count : unit -> int = "hh_removed_count" external is_heap_overflow : unit -> bool = "hh_check_heap_overflow" let value_size r = let w = Obj.reachable_words r in w * (Sys.word_size / 8) let init_done () = EventLogger.sharedmem_init_done (heap_size ()) end module GC = struct external hh_collect : unit -> unit = "hh_collect" [@@noalloc] let should_collect (effort : [ `gentle | `aggressive | `always_TEST ]) = let overhead = match effort with | `always_TEST -> 1.0 | `aggressive -> 1.2 | `gentle -> 2.0 in let used = SMTelemetry.heap_size () in let wasted = SMTelemetry.wasted_heap_size () in let reachable = used - wasted in used >= Float.iround_towards_zero_exn (float reachable *. overhead) let collect (effort : [ `gentle | `aggressive | `always_TEST ]) = let old_size = SMTelemetry.heap_size () in Stats.update_max_heap_size old_size; let start_t = Unix.gettimeofday () in if should_collect effort then hh_collect (); let new_size = SMTelemetry.heap_size () in let time_taken = Unix.gettimeofday () -. start_t in if old_size <> new_size then ( Hh_logger.log "Sharedmem GC: %d bytes before; %d bytes after; in %f seconds" old_size new_size time_taken; EventLogger.sharedmem_gc_ran effort old_size new_size time_taken ) end module type Key = sig type t val to_string : t -> string val compare : t -> t -> int end module type KeyHasher = sig type key type hash val hash : key -> hash val hash_old : key -> hash val to_bytes : hash -> string end module MakeKeyHasher (Key : Key) : KeyHasher with type key = Key.t = struct type key = Key.t type hash = string let prefix = Prefix.make () (* The prefix we use for old keys. The prefix guarantees that we never * mix old and new data, because a key can never start with the prefix * "old_", it always starts with a number (cf Prefix.make()). *) let old_prefix = "old_" let full_key (x : key) : string = Prefix.make_key prefix (Key.to_string x) let full_key_old (x : key) : string = old_prefix ^ Prefix.make_key prefix (Key.to_string x) let hash (key : key) : hash = Stdlib.Digest.string (full_key key) let hash_old (key : key) : hash = Stdlib.Digest.string (full_key_old key) let to_bytes (hash : hash) : string = hash end module type Value = sig type t val description : string end module type Evictability = sig val evictable : bool end module Evictable : Evictability = struct let evictable = true end module NonEvictable : Evictability = struct let evictable = false end module type Backend = functor (KeyHasher : KeyHasher) (Value : Value) -> sig val add : KeyHasher.hash -> Value.t -> unit val mem : KeyHasher.hash -> bool val get : KeyHasher.hash -> Value.t option val remove : KeyHasher.hash -> unit val move : KeyHasher.hash -> KeyHasher.hash -> unit end module type Capacity = sig val capacity : int end module ImmediateBackend (Evictability : Evictability) : Backend = functor (KeyHasher : KeyHasher) (Value : Value) -> struct (** Returns a tuple (compressed_size, original_size, total_size) where - original_size is number of bytes that the value takes after marshalling, - compressed_size is number of bytes after compressing that marshalled blob, - total_size is compressed_size plus hh_shared header plus byte alignment. If nothing was allocated (e.g. because the key already existed) then all three are negative. *) external hh_add : evictable:bool -> KeyHasher.hash -> Value.t -> int * int * int = "hh_add" external hh_mem : KeyHasher.hash -> bool = "hh_mem" (** Returns the compressed_size for this item. Undefined behavior if the key doesn't already exist. *) external hh_get_size : KeyHasher.hash -> int = "hh_get_size" external hh_get_and_deserialize : KeyHasher.hash -> Value.t option = "hh_get_and_deserialize" (** Removes the key. Undefined behavior if it doesn't exist. Returns the compressed_size of what was just removed. *) external hh_remove : KeyHasher.hash -> int = "hh_remove" external hh_move : KeyHasher.hash -> KeyHasher.hash -> unit = "hh_move" let measure_add = Value.description ^ " (bytes serialized into shared heap)" let measure_remove = Value.description ^ " (compressed bytes removed from shared heap)" let measure_get = Value.description ^ " (bytes deserialized from shared heap)" let log_serialize compressed original total = let compressed = float compressed in let original = float original in let total = float total in let saved = original -. compressed in let ratio = compressed /. original in Measure.sample (Value.description ^ " (total bytes including header and padding)") total; Measure.sample "ALL bytes (total bytes including header and padding)" total; Measure.sample measure_add compressed; Measure.sample "ALL bytes serialized into shared heap" compressed; Measure.sample (Value.description ^ " (bytes saved in shared heap due to compression)") saved; Measure.sample "ALL bytes saved in shared heap due to compression" saved; Measure.sample (Value.description ^ " (shared heap compression ratio)") ratio; Measure.sample "ALL bytes shared heap compression ratio" ratio let log_deserialize l r = let sharedheap = float l in Measure.sample measure_get sharedheap; Measure.sample "ALL bytes deserialized from shared heap" sharedheap; if SMTelemetry.hh_log_level () > 1 then ( (* value_size is a bit expensive to call this often, so only run with log levels >= 2 *) let localheap = float (SMTelemetry.value_size r) in Measure.sample (Value.description ^ " (bytes allocated for deserialized value)") localheap; Measure.sample "ALL bytes allocated for deserialized value" localheap ) let log_remove compressed = let compressed = float compressed in Measure.sample measure_remove compressed; Measure.sample "ALL compressed bytes removed from shared heap" compressed; () let add key value = let (compressed_size, original_size, total_size) = hh_add ~evictable:Evictability.evictable key value in (* compressed_size is a negative number if nothing new was added *) if SMTelemetry.hh_log_level () > 0 && compressed_size > 0 then log_serialize compressed_size original_size total_size let mem key = hh_mem key let log_hit_rate ~hit = Measure.sample (Value.description ^ " (shmem cache hit rate)") (if hit then 1. else 0.); Measure.sample "ALL shmem cache hit rate" (if hit then 1. else 0.) let get (key : KeyHasher.hash) : Value.t option = let v = hh_get_and_deserialize key in if SMTelemetry.hh_log_level () > 0 then begin log_hit_rate ~hit:(Option.is_some v); Option.iter ~f:(fun v -> log_deserialize (hh_get_size key) (Obj.repr v)) v end; v let remove key = let compressed_size = hh_remove key in (* hh_remove assumes the key is present *) if SMTelemetry.hh_log_level () > 0 then log_remove compressed_size; () let move from_key to_key = hh_move from_key to_key let get_telemetry (telemetry : Telemetry.t) : Telemetry.t = let simple_metric name = (Measure.get_count name, Measure.get_sum name) in let diff_metric left_name right_name = let diff left right = Option.merge left ~f:( -. ) right in let (left_count, left_bytes) = simple_metric left_name in let (right_count, right_bytes) = simple_metric right_name in (diff left_count right_count, diff left_bytes right_bytes) in (* Gather counts and sums for these metrics *) let metrics = [ ("get", simple_metric measure_get); ("add", simple_metric measure_add); ("remove", simple_metric measure_remove); ("entries", diff_metric measure_add measure_remove); ] in let is_none = function | (_, (None, None)) -> true | _ -> false in if List.for_all ~f:is_none metrics then telemetry else let make_obj t (key, (count, bytes)) = let count_val = Option.value_map ~default:0 ~f:int_of_float count in let bytes_val = Option.value_map ~default:0 ~f:int_of_float bytes in Telemetry.object_ ~key ~value: (Telemetry.create () |> Telemetry.int_ ~key:"count" ~value:count_val |> Telemetry.int_ ~key:"bytes" ~value:bytes_val) t in let value = List.fold ~f:make_obj ~init:(Telemetry.create ()) metrics in telemetry |> Telemetry.object_ ~key:(Value.description ^ "__shared") ~value let () = SMTelemetry.get_telemetry_list := get_telemetry :: !SMTelemetry.get_telemetry_list; () end (** Heap that provides direct access to shared memory, but with a layer of local changes that allows us to decide whether or not to commit specific values. *) module BackendWithLocalChanges : functor (Backend : Backend) (KeyHasher : KeyHasher) (Value : Value) -> sig include module type of Backend (KeyHasher) (Value) module LocalChanges : sig val push_stack : unit -> unit val pop_stack : unit -> unit end end = functor (Backend : Backend) (KeyHasher : KeyHasher) (Value : Value) -> struct module Backend = Backend (KeyHasher) (Value) (** Represents a set of local changes to the view of the shared memory heap WITHOUT materializing to the changes in the actual heap. This allows us to make speculative changes to the view of the world that can be reverted quickly and correctly. A LocalChanges maintains the same invariants as the shared heap. Except add are allowed to overwrite filled keys. This is for convenience so we do not need to remove filled keys upfront. *) module LocalChanges = struct type action = (* The value does not exist in the current stack. When committed this * action will invoke remove on the previous stack. *) | Remove (* The value is added to a previously empty slot. When committed this * action will invoke add on the previous stack. *) | Add of Value.t (* The value is replacing a value already associated with a key in the * previous stack. When committed this action will invoke remove then * add on the previous stack. *) | Replace of Value.t type t = { current: (KeyHasher.hash, action) Hashtbl.t; prev: t option; } let stack : t option ref = ref None let rec mem stack_opt key = match stack_opt with | None -> Backend.mem key | Some stack -> (match Hashtbl.find_opt stack.current key with | Some Remove -> false | Some _ -> true | None -> mem stack.prev key) let rec get stack_opt key = match stack_opt with | None -> Backend.get key | Some stack -> (match Hashtbl.find_opt stack.current key with | Some Remove -> None | Some (Replace value | Add value) -> Some value | None -> get stack.prev key) (* * For remove/add it is best to think of them in terms of a state machine. * A key can be in the following states: * * Remove: * Local changeset removes a key from the previous stack * Replace: * Local changeset replaces value of a key in previous stack * Add: * Local changeset associates a value with a key. The key is not * present in the previous stacks * Empty: * No local changes and key is not present in previous stack * Filled: * No local changes and key has an associated value in previous stack * *Error*: * This means an exception will occur *) (* * Transitions table: * Remove -> *Error* * Replace -> Remove * Add -> Empty * Empty -> *Error* * Filled -> Remove *) let remove stack_opt key = match stack_opt with | None -> Backend.remove key | Some stack -> (match Hashtbl.find_opt stack.current key with | Some Remove -> failwith "Trying to remove a non-existent value" | Some (Replace _) -> Hashtbl.replace stack.current key Remove | Some (Add _) -> Hashtbl.remove stack.current key | None -> if mem stack.prev key then Hashtbl.replace stack.current key Remove else failwith "Trying to remove a non-existent value") (* * Transitions table: * Remove -> Replace * Replace -> Replace * Add -> Add * Empty -> Add * Filled -> Replace *) let add stack_opt key value = match stack_opt with | None -> Backend.add key value | Some stack -> (match Hashtbl.find_opt stack.current key with | Some (Remove | Replace _) -> Hashtbl.replace stack.current key (Replace value) | Some (Add _) -> Hashtbl.replace stack.current key (Add value) | None -> if mem stack.prev key then Hashtbl.replace stack.current key (Replace value) else Hashtbl.replace stack.current key (Add value)) let move stack_opt from_key to_key = match stack_opt with | None -> Backend.move from_key to_key | Some _stack -> assert (mem stack_opt from_key); assert (not @@ mem stack_opt to_key); let value = Option.value_exn (get stack_opt from_key) in remove stack_opt from_key; add stack_opt to_key value (* Public API **) let push_stack () = stack := Some { current = Hashtbl.create 128; prev = !stack } let pop_stack () = match !stack with | None -> failwith "There are no active local change stacks. Nothing to pop!" | Some { prev; _ } -> stack := prev let get_telemetry (telemetry : Telemetry.t) : Telemetry.t = let rec rec_actions_and_depth acc_count acc_depth changeset_opt = match changeset_opt with | Some changeset -> rec_actions_and_depth (acc_count + Hashtbl.length changeset.current) (acc_depth + 1) changeset.prev | None -> (acc_count, acc_depth) in let (actions, depth) = rec_actions_and_depth 0 0 !stack in (* We count reachable words of the entire stack, to avoid double- counting in cases where a value appears in multiple stack frames. If instead we added up reachable words from each frame separately, then an item reachable from two frames would be double-counted. *) let bytes = if SMTelemetry.hh_log_level () > 0 then Some (Obj.reachable_words (Obj.repr !stack) * (Sys.word_size / 8)) else None in if actions = 0 then telemetry else telemetry |> Telemetry.object_ ~key:(Value.description ^ "__stack") ~value: (Telemetry.create () |> Telemetry.int_ ~key:"actions" ~value:actions |> Telemetry.int_opt ~key:"bytes" ~value:bytes |> Telemetry.int_ ~key:"depth" ~value:depth) let () = SMTelemetry.get_telemetry_list := get_telemetry :: !SMTelemetry.get_telemetry_list; () end let add key value = LocalChanges.(add !stack key value) let mem key = LocalChanges.(mem !stack key) let get key = LocalChanges.(get !stack key) let remove key = LocalChanges.(remove !stack key) let move from_key to_key = LocalChanges.(move !stack from_key to_key) end module type Heap = sig type key type value module KeyHasher : KeyHasher with type key = key module KeySet : Set.S with type elt = key module KeyMap : WrappedMap.S with type key = key val add : key -> value -> unit val get : key -> value option val get_old : key -> value option val get_batch : KeySet.t -> value option KeyMap.t val get_old_batch : KeySet.t -> value option KeyMap.t val remove : key -> unit val remove_old : key -> unit val remove_batch : KeySet.t -> unit val remove_old_batch : KeySet.t -> unit val mem : key -> bool val mem_old : key -> bool (** Equivalent to moving a set of entries (= key + value) to some heap of old entries. *) val oldify_batch : KeySet.t -> unit val revive_batch : KeySet.t -> unit module LocalChanges : sig val push_stack : unit -> unit val pop_stack : unit -> unit end end module type LocalCacheLayer = sig type key type value val add : key -> value -> unit val get : key -> value option val remove : key -> unit val clear : unit -> unit val get_telemetry_items_and_keys : unit -> Obj.t * key Seq.t end module Heap (Backend : Backend) (Key : Key) (Value : Value) : Heap with type key = Key.t and type value = Value.t and module KeyHasher = MakeKeyHasher(Key) and module KeySet = Set.Make(Key) and module KeyMap = WrappedMap.Make(Key) = struct module KeyHasher = MakeKeyHasher (Key) module KeySet = Set.Make (Key) module KeyMap = WrappedMap.Make (Key) (** Stacks that keeps track of local, non-committed changes. If no stacks are active, changs will be committed immediately to the shared-memory backend *) module WithLocalChanges = BackendWithLocalChanges (Backend) (KeyHasher) (Value) type key = Key.t type value = Value.t let hash_of_key x = KeyHasher.hash x let old_hash_of_key x = KeyHasher.hash_old x let add x y = WithLocalChanges.add (hash_of_key x) y let get x = let hash = hash_of_key x in WithLocalChanges.get hash let get_old x = let old_hash = old_hash_of_key x in WithLocalChanges.get old_hash let get_batch xs = KeySet.fold begin (fun key acc -> KeyMap.add key (get key) acc) end xs KeyMap.empty let get_old_batch xs = KeySet.fold begin (fun key acc -> KeyMap.add key (get_old key) acc) end xs KeyMap.empty let remove x = let hash = hash_of_key x in if WithLocalChanges.mem hash then WithLocalChanges.remove hash else () let remove_old x = let old_hash = old_hash_of_key x in if WithLocalChanges.mem old_hash then WithLocalChanges.remove old_hash else () let remove_batch xs = KeySet.iter remove xs let remove_old_batch xs = KeySet.iter remove_old xs let mem x = WithLocalChanges.mem (hash_of_key x) let mem_old x = WithLocalChanges.mem (old_hash_of_key x) (** Equivalent to moving an entry (= key + value) to some heap of old entries. *) let oldify x = if mem x then WithLocalChanges.move (hash_of_key x) (old_hash_of_key x) else () let revive x = if mem_old x then ( remove x; WithLocalChanges.move (old_hash_of_key x) (hash_of_key x) ) (** Equivalent to moving a set of entries (= key + value) to some heap of old entries. *) let oldify_batch xs = KeySet.iter begin fun key -> if mem key then oldify key else (* this is weird, semantics of `oldify x` and `oldify_batch {x}` are different for some mysterious reason *) remove_old key end xs let revive_batch xs = KeySet.iter begin fun key -> if mem_old key then revive key else (* this is weird, semantics of `revive x` and `revive {x}` are different for some mysterious reason *) remove key end xs module LocalChanges = struct include WithLocalChanges.LocalChanges end end (** Every time a new worker-local cache is created, a clearing function is registered for it here. **) let invalidate_local_caches_callback_list = ref [] let invalidate_local_caches () = List.iter !invalidate_local_caches_callback_list ~f:(fun callback -> callback ()) module FreqCache (Key : Key) (Value : Value) (Capacity : Capacity) : LocalCacheLayer with type key = Key.t and type value = Value.t = struct type key = Key.t type value = Value.t let (cache : (key, int ref * value) Hashtbl.t) = Hashtbl.create (2 * Capacity.capacity) let get_telemetry_items_and_keys () = (Obj.repr cache, Hashtbl.to_seq_keys cache) let clear () = Hashtbl.clear cache (** The collection function is called when we reach twice original capacity in size. When the collection is triggered, we only keep the most frequently used objects. So before collection: size = 2 * capacity After collection: size = capacity (with the most frequently used objects) *) let collect () = if Hashtbl.length cache < 2 * Capacity.capacity then () else let l = ref [] in Hashtbl.iter begin (fun key (freq, v) -> l := (key, !freq, v) :: !l) end cache; Hashtbl.clear cache; l := List.sort ~compare:(fun (_, x, _) (_, y, _) -> y - x) !l; let i = ref 0 in while !i < Capacity.capacity do match !l with | [] -> i := Capacity.capacity | (k, _freq, v) :: rl -> Hashtbl.replace cache k (ref 0, v); l := rl; incr i done; () let add x y = collect (); match Hashtbl.find_opt cache x with | Some (freq, y') -> incr freq; if phys_equal y' y then () else Hashtbl.replace cache x (freq, y) | None -> let elt = (ref 0, y) in Hashtbl.replace cache x elt; () let get x = match Hashtbl.find_opt cache x with | None -> None | Some (freq, value) -> incr freq; Some value let remove = Hashtbl.remove cache let get_telemetry (telemetry : Telemetry.t) : Telemetry.t = let (objs, keys) = get_telemetry_items_and_keys () in let count = Seq.fold_left (fun a _ -> a + 1) 0 keys in if count = 0 then telemetry else let bytes = if SMTelemetry.hh_log_level () > 0 then Some (Obj.reachable_words (Obj.repr objs) * Sys.word_size / 8) else None in telemetry |> Telemetry.object_ ~key:(Value.description ^ "__local") ~value: (Telemetry.create () |> Telemetry.int_ ~key:"count" ~value:count |> Telemetry.int_opt ~key:"bytes" ~value:bytes) let () = SMTelemetry.get_telemetry_list := get_telemetry :: !SMTelemetry.get_telemetry_list; invalidate_local_caches_callback_list := clear :: !invalidate_local_caches_callback_list end module OrderedCache (Key : Key) (Value : Value) (Capacity : Capacity) : LocalCacheLayer with type key = Key.t and type value = Value.t = struct type key = Key.t type value = Value.t let (cache : (key, value) Hashtbl.t) = Hashtbl.create Capacity.capacity let queue = Queue.create () let size = ref 0 let get_telemetry_items_and_keys () = (Obj.repr cache, Hashtbl.to_seq_keys cache) let clear () = Hashtbl.clear cache; size := 0; Queue.clear queue; () let add x y = (if !size >= Capacity.capacity then (* Remove oldest element - if it's still around. *) let elt = Queue.pop queue in if Hashtbl.mem cache elt then ( decr size; Hashtbl.remove cache elt )); (* Add the new element, but bump the size only if it's a new addition. *) Queue.push x queue; if not (Hashtbl.mem cache x) then incr size; Hashtbl.replace cache x y let get x = Hashtbl.find_opt cache x let remove x = if Hashtbl.mem cache x then begin decr size; Hashtbl.remove cache x end let get_telemetry (telemetry : Telemetry.t) : Telemetry.t = let (objs, keys) = get_telemetry_items_and_keys () in let count = Seq.fold_left (fun a _ -> a + 1) 0 keys in if count = 0 then telemetry else let bytes = if SMTelemetry.hh_log_level () > 0 then Some (Obj.reachable_words (Obj.repr objs) * Sys.word_size / 8) else None in telemetry |> Telemetry.object_ ~key:(Value.description ^ "__local") ~value: (Telemetry.create () |> Telemetry.int_ ~key:"count" ~value:count |> Telemetry.int_opt ~key:"bytes" ~value:bytes) let () = SMTelemetry.get_telemetry_list := get_telemetry :: !SMTelemetry.get_telemetry_list; invalidate_local_caches_callback_list := clear :: !invalidate_local_caches_callback_list end (** Create a new value, but append the "__cache" prefix to its description *) module ValueForCache (Value : Value) = struct include Value let description = Value.description ^ "__cache" end module HeapWithLocalCache (Backend : Backend) (Key : Key) (Value : Value) (Capacity : Capacity) : sig include Heap with type key = Key.t and type value = Value.t and module KeyHasher = MakeKeyHasher(Key) and module KeySet = Set.Make(Key) and module KeyMap = WrappedMap.Make(Key) val write_around : key -> value -> unit val get_no_cache : key -> value option module Cache : LocalCacheLayer with type key = key and type value = value end = struct module Direct = Heap (Backend) (Key) (Value) type key = Direct.key type value = Direct.value module KeyHasher = Direct.KeyHasher module KeySet = Direct.KeySet module KeyMap = Direct.KeyMap module Cache = FreqCache (Key) (ValueForCache (Value)) (Capacity) let add x y = Direct.add x y; Cache.add x y let get_no_cache = Direct.get let write_around x y = (* Note that we do not need to do any cache invalidation here because * Direct.add is a no-op if the key already exists. *) Direct.add x y let log_hit_rate ~hit = Measure.sample (Value.description ^ " (cache hit rate)") (if hit then 1. else 0.); Measure.sample "(ALL cache hit rate)" (if hit then 1. else 0.) let get x = match Cache.get x with | None -> let result = match Direct.get x with | None -> None | Some v as result -> Cache.add x v; result in if SMTelemetry.hh_log_level () > 0 then log_hit_rate ~hit:false; result | Some _ as result -> if SMTelemetry.hh_log_level () > 0 then log_hit_rate ~hit:true; result (* We don't cache old objects, they are not accessed often enough. *) let get_old = Direct.get_old let get_old_batch = Direct.get_old_batch let mem_old = Direct.mem_old let mem x = match get x with | None -> false | Some _ -> true let get_batch keys = KeySet.fold begin (fun key acc -> KeyMap.add key (get key) acc) end keys KeyMap.empty (** Equivalent to moving a set of entries (= key + value) to some heap of old entries. *) let oldify_batch keys = Direct.oldify_batch keys; KeySet.iter Cache.remove keys let revive_batch keys = Direct.revive_batch keys; KeySet.iter Cache.remove keys let remove x = Direct.remove x; Cache.remove x let remove_old x : unit = Direct.remove_old x let remove_batch xs = Direct.remove_batch xs; KeySet.iter Cache.remove xs let remove_old_batch = Direct.remove_old_batch let () = invalidate_local_caches_callback_list := begin (fun () -> Cache.clear ()) end :: !invalidate_local_caches_callback_list module LocalChanges = struct let push_stack () = Direct.LocalChanges.push_stack (); Cache.clear () let pop_stack () = Direct.LocalChanges.pop_stack (); Cache.clear () end end
OCaml Interface
hhvm/hphp/hack/src/heap/sharedMem.mli
(* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) (** This is just a sentinel for self-documenting purposes which some parts of the codebase use. They take a parameter "uses_sharedmem : SharedMem.uses" as a way to indicate to their callers that they read/write sharedmem. *) type uses = Uses exception Out_of_shared_memory exception Hash_table_full exception Heap_full exception Sql_assertion_failure of int exception C_assertion_failure of string (** Configuration object that initializes shared memory. *) type config = { global_size: int; heap_size: int; hash_table_pow: int; shm_use_sharded_hashtbl: bool; shm_cache_size: int; shm_dirs: string list; shm_min_avail: int; log_level: int; sample_rate: float; (* 0 - lz4, others -- compression level for zstd*) compression: int; } [@@deriving show] (** Default configuration object *) val default_config : config (** Empty configuration object. There are places where we don't expect to write to shared memory, and doing so would be a memory leak. But since shared memory is global, it's very easy to accidentally call a function that will attempt such write. This config initializes shared memory with zero sizes. As such, attempting to write to shared memory that was initialized with this config, will make the program fail immediately. *) val empty_config : config (** A handle to initialized shared memory. Used to connect other workers to shared memory. *) type handle (** Internal type for a handle, to enable additional low-level heaps attachments **) type internal_handle (** Exposed for testing **) val get_heap_size : handle -> int (** Exposed for testing **) val get_global_size : handle -> int val clear_close_on_exec : handle -> unit val set_close_on_exec : handle -> unit val register_callbacks : (config -> num_workers:int -> internal_handle option) -> (internal_handle option -> worker_id:int -> unit) -> (unit -> internal_handle option) -> unit (** Initialize shared memory. Must be called before forking. *) val init : config -> num_workers:int -> handle (** Connect other workers to shared memory *) val connect : handle -> worker_id:int -> unit (** Get the handle to shared memory. Returns nonsense if the current process hasn't yet connected to shared memory *) val get_handle : unit -> handle (** Allow or disallow remove operations. *) val set_allow_removes : bool -> unit (** Allow or disallow shared memory writes for the current process. *) val set_allow_hashtable_writes_by_current_process : bool -> unit (** Directly access the shared memory table. This can be used to provide proxying across the network *) module RawAccess : sig type serialized = private bytes val mem_raw : string -> bool val get_raw : string -> serialized option val add_raw : string -> serialized -> unit val deserialize_raw : serialized -> 'a val serialize_raw : 'a -> serialized end (** Some telemetry utilities *) module SMTelemetry : sig (** Get some shared-memory telemetry. Even works when shared memory hasn't been initialized yet. *) val get_telemetry : unit -> Telemetry.t (** Return the number of bytes allocated in shared memory. This includes bytes that were free'd but are not yet available for reuse. *) val heap_size : unit -> int (** Returns the number of bytes not reachable fro hashtable entries. *) val wasted_heap_size : unit -> int (** The logging level for shared memory statistics: - 0 = nothing - 1 = log totals, averages, min, max bytes marshalled and unmarshalled *) val hh_log_level : unit -> int (** Get the sample rate for shared memory statistics. *) val hh_sample_rate : unit -> float (** Get the number of used slots in our hashtable. *) val hash_used_slots : unit -> int * int (** Get the number of total slots in our hashtable. *) val hash_slots : unit -> int type table_stats = { nonempty_slots: int; used_slots: int; slots: int; } (** Combine [hash_used_slots] and [hash_slots] *) val hash_stats : unit -> table_stats (** Not sure. Return the removed number of entries? *) val hh_removed_count : unit -> int (** Did we overflow the heap? *) val is_heap_overflow : unit -> bool (** Compute the size of values in the garbage-collected heap. (???) *) val value_size : Obj.t -> int (** Log to our telemetry infra that we successfully initialized shared memory *) val init_done : unit -> unit end (** Interface to the garbage collector *) module GC : sig val should_collect : [ `aggressive | `always_TEST | `gentle ] -> bool val collect : [ `aggressive | `always_TEST | `gentle ] -> unit end (** A hasher that hashes user-defined keys. The resulting hash can be used to index the big shared-memory table. Each hash is built by concatenating an optional "old" prefix, a heap-prefix and an object-specific key, then hashing the concatenation. The unique heap-prefix is automatically generated when calling `MakeKeyHasher`. Currently we use MD5 as the hashing algorithm. Note that only the first 8 bytes are used to index the shared memory table. *) module type KeyHasher = sig (** The type of keys that OCaml-land callers try to insert. This key will be object-specific (unique within a heap), but might not be unique across heaps. *) type key (** The hash of an old or new key. This hash will be unique across all heaps. *) type hash val hash : key -> hash val hash_old : key -> hash (** Return the raw bytes of the digest. Note that this is not hex encoded. *) val to_bytes : hash -> string end (** The interface that all keys need to implement *) module type Key = sig type t val to_string : t -> string val compare : t -> t -> int end (** Make a new key that can be stored in shared-memory. *) module MakeKeyHasher (Key : Key) : KeyHasher with type key = Key.t (** The interface that all values need to implement *) module type Value = sig type t val description : string end (** Whether or not a backend is evictable. *) module type Evictability = sig val evictable : bool end (** Used to indicate that values can be evicted at all times. *) module Evictable : Evictability (** used to indicate that values must never be evicted from the backend. *) module NonEvictable : Evictability (** Module type for a shared-memory backend for a heap. Each backend provided raw access to the underlying shared hash table. *) module type Backend = functor (KeyHasher : KeyHasher) (Value : Value) -> sig val add : KeyHasher.hash -> Value.t -> unit val mem : KeyHasher.hash -> bool val get : KeyHasher.hash -> Value.t option val remove : KeyHasher.hash -> unit val move : KeyHasher.hash -> KeyHasher.hash -> unit end (** Backend that provides immediate access to the underlying hashtable. *) module ImmediateBackend (_ : Evictability) : Backend (** A heap for a user-defined type. Each heap supports "old" and "new" values. There are several cases where we need to compare the old and the new representations of objects to determine what has changed. The "old" representation is the value that was bound to that key in the last round of type-checking. *) module type Heap = sig type key type value (** [KeyHasher] created for this heap. A new [KeyHasher] with a unique prefix is automatically generated for each heap. Normally, you shouldn't have to use the [KeyHasher] directly, but Zoncolan does. *) module KeyHasher : KeyHasher with type key = key module KeySet : Set.S with type elt = key module KeyMap : WrappedMap.S with type key = key (** Adds a binding to the table. Note: TODO(hverr), currently the semantics of inserting a value for a key that's already in the heap are unclear and depend on whether you have a local-changes stack or not. *) val add : key -> value -> unit val get : key -> value option val get_old : key -> value option val get_batch : KeySet.t -> value option KeyMap.t val get_old_batch : KeySet.t -> value option KeyMap.t val remove : key -> unit val remove_old : key -> unit val remove_batch : KeySet.t -> unit val remove_old_batch : KeySet.t -> unit val mem : key -> bool val mem_old : key -> bool (** Equivalent to moving a set of entries (= key + value) to some heap of old entries. *) val oldify_batch : KeySet.t -> unit val revive_batch : KeySet.t -> unit module LocalChanges : sig val push_stack : unit -> unit val pop_stack : unit -> unit end end (** A heap for a user-defined type. Provides no worker-local caching. Directly stores to and queries from shared memory. *) module Heap (_ : Backend) (Key : Key) (Value : Value) : Heap with type key = Key.t and type value = Value.t and module KeyHasher = MakeKeyHasher(Key) and module KeySet = Set.Make(Key) and module KeyMap = WrappedMap.Make(Key) (** A worker-local cache layer. Each local cache defines its own eviction strategy. For example, we currently have [FreqCache] and [OrderedCache]. *) module type LocalCacheLayer = sig type key type value val add : key -> value -> unit val get : key -> value option val remove : key -> unit val clear : unit -> unit val get_telemetry_items_and_keys : unit -> Obj.t * key Seq.t end (** Invalidate all worker-local caches *) val invalidate_local_caches : unit -> unit (** Capacity of a worker-local cache. In number of elements. *) module type Capacity = sig val capacity : int end (** FreqCache is an LFU (Least Frequently Used) cache. It keeps count of how many times each item in the cache has been added/replaced/fetched and, when it reaches 2*capacity, then it flushes 1*capacity items and they lose their counts. This might result in a lucky few early items getting to stay in the cache while newcomers get evicted... It is Hashtbl.t-based with a bounded number of elements. *) module FreqCache (Key : Key) (Value : Value) (_ : Capacity) : LocalCacheLayer with type key = Key.t and type value = Value.t (** OrderedCache is an LRA (Least Recently Added) cache. Whenever you add an item beyond capacity, it will evict the oldest one to be added. It is Hashtbl.t-based with a bounded number of elements. *) module OrderedCache (Key : Key) (Value : Value) (_ : Capacity) : LocalCacheLayer with type key = Key.t and type value = Value.t (** Same as [Heap] but provides a layer of worker-local caching. *) module HeapWithLocalCache (_ : Backend) (Key : Key) (Value : Value) (_ : Capacity) : sig include Heap with type key = Key.t and type value = Value.t and module KeyHasher = MakeKeyHasher(Key) and module KeySet = Set.Make(Key) and module KeyMap = WrappedMap.Make(Key) val write_around : key -> value -> unit val get_no_cache : key -> value option module Cache : LocalCacheLayer with type key = key and type value = value end
OCaml
hhvm/hphp/hack/src/heap/sharedMemHash.ml
(* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) external get_hash : string -> int64 = "hh_get_hash_ocaml" let hash_string s = get_hash (Digest.string s)
OCaml Interface
hhvm/hphp/hack/src/heap/sharedMemHash.mli
(* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) (** Get the hash of a string, based on MD5. *) val hash_string : string -> int64
OCaml
hhvm/hphp/hack/src/heap/workerCancel.ml
(* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) (* Please read the documentation in the .mli file. *) exception Worker_should_exit let () = Callback.register_exception "worker_should_exit" Worker_should_exit external stop_workers : unit -> unit = "hh_stop_workers" external resume_workers : unit -> unit = "hh_resume_workers" external set_can_worker_stop : bool -> unit = "hh_set_can_worker_stop" let with_no_cancellations_nested = ref false let with_no_cancellations f = if !with_no_cancellations_nested then failwith "with_no_cancellations cannot be nested"; with_no_cancellations_nested := true; Utils.try_finally ~f: begin fun () -> set_can_worker_stop false; f () end ~finally:(fun () -> set_can_worker_stop true; with_no_cancellations_nested := false)
OCaml Interface
hhvm/hphp/hack/src/heap/workerCancel.mli
(* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) (* This exception might be thrown in code which executes in MultiWorker * workers. If you happen to catch it, the best course of action is to * re-throw it to guarantee speedy cancellation. *) exception Worker_should_exit val stop_workers : unit -> unit val resume_workers : unit -> unit val with_no_cancellations : (unit -> 'a) -> 'a
OCaml
hhvm/hphp/hack/src/heap/config/discover.ml
(** This is a dune configurator: https://jbuilder.readthedocs.io/en/latest/configurator.html *) module C = Configurator.V1 (* cmake should have prepared some information for us in the env: HACK_EXTRA_INCLUDE_PATHS HACK_EXTRA_LIB_PATHS HACK_EXTRA_NATIVE_LIBRARIES HACK_EXTRA_LINK_OPTS *) let query_env s = match Sys.getenv s with | "" -> [] | s -> String.split_on_char ';' s | exception Not_found -> [] let abs = let current_dir = Sys.getcwd () in (* we are in ./src/heap/config, locate . *) let root_dir = Filename.(dirname @@ dirname @@ dirname current_dir) in fun s -> if Filename.is_relative s then Filename.concat root_dir s else s let process_env () = let includes = query_env "HACK_EXTRA_INCLUDE_PATHS" |> List.map (fun s -> "-I" ^ abs s) in let dirs = query_env "HACK_EXTRA_LIB_PATHS" |> List.map (fun s -> "-L" ^ abs s) in let names = query_env "HACK_EXTRA_NATIVE_LIBRARIES" |> List.map (fun s -> "-l" ^ s) in let opaque_opts = query_env "HACK_EXTRA_LINK_OPTS" in (includes, dirs @ names @ opaque_opts) let () = C.main ~name:"heap" (fun (_ : C.t) -> let (cflags, cldflags) = process_env () in C.Flags.write_sexp "c_flags.sexp" cflags; C.Flags.write_sexp "c_library_flags.sexp" cldflags)
hhvm/hphp/hack/src/heap/config/dune
(executable (name discover) (libraries dune.configurator)) (rule (targets c_flags.sexp c_library_flags.sexp) (deps (env_var HACK_EXTRA_INCLUDE_PATHS) (env_var HACK_EXTRA_LIB_PATHS) (env_var HACK_EXTRA_NATIVE_LIBRARIES) (env_var HACK_EXTRA_LINK_OPTS)) (action (run ./discover.exe)))
hhvm/hphp/hack/src/heap/dictionary/dune
(* -*- tuareg -*- *) let library_entry name suffix = Printf.sprintf "(library (name %s) (wrapped false) (modules) (libraries %s_%s))" name name suffix let fb_entry name = library_entry name "fb" let stubs_entry name = library_entry name "stubs" let entry is_fb name = if is_fb then fb_entry name else stubs_entry name let () = (* test presence of fb subfolder *) let current_dir = Sys.getcwd () in (* we are in dictionary, locate dictionary/facebook *) let fb_dir = Filename.concat current_dir "facebook" in (* locate src/facebook/dune *) let fb_dune = Filename.concat fb_dir "dune" in let is_fb = Sys.file_exists fb_dune in let dictionary_data = entry is_fb "dictionary_data" in Jbuild_plugin.V1.send dictionary_data