language
stringlengths 0
24
| filename
stringlengths 9
214
| code
stringlengths 99
9.93M
|
---|---|---|
OCaml | hhvm/hphp/hack/src/decl/pos/positioned.ml | (*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
let make_for_decl : type a. Pos.t * a -> Decl_reference.t -> Pos_or_decl.t * a =
(fun (p, x) decl -> (Pos_or_decl.make_decl_pos p decl, x))
let make_for_decl_of_option :
type a. Pos.t * a -> Decl_reference.t option -> Pos_or_decl.t * a =
(fun (p, x) decl -> (Pos_or_decl.make_decl_pos_of_option p decl, x))
let unsafe_to_raw_positioned : type a. Pos_or_decl.t * a -> Pos.t * a =
(fun (p, x) -> (Pos_or_decl.unsafe_to_raw_pos p, x))
let of_raw_positioned (p, x) = (Pos_or_decl.of_raw_pos p, x) |
OCaml Interface | hhvm/hphp/hack/src/decl/pos/positioned.mli | (*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
(** Compress the position of a positioned value to be stored in the decl heap. *)
val make_for_decl : Pos.t * 'a -> Decl_reference.t -> Pos_or_decl.t * 'a
(** Compress the position of a positioned value to be stored in the decl heap.
If no decl reference is given, the position is not compressed but
simply converted. *)
val make_for_decl_of_option :
Pos.t * 'a -> Decl_reference.t option -> Pos_or_decl.t * 'a
(** This may become unsafe in the future as we change the implementation
of positions in the decl heap. Avoid using in new code.
Use an id from an AST instead of from a decl or type,
or resolve decl position to a raw position using a provider context. *)
val unsafe_to_raw_positioned : Pos_or_decl.t * 'a -> Pos.t * 'a
(** This is essentially an upcast. *)
val of_raw_positioned : Pos.t * 'a -> Pos_or_decl.t * 'a |
OCaml | hhvm/hphp/hack/src/decl/pos/pos_or_decl.ml | (*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
type t = Pos.t [@@deriving eq, hash, ord, show]
module Map = Pos.Map
(** The decl and file of a position. *)
type ctx = {
decl: Decl_reference.t option;
file: Relative_path.t;
}
[@@deriving show]
let none : t = Pos.none
let btw = Pos.btw
let get_raw_pos : t -> Pos.t option = (fun p -> Some p)
let of_raw_pos : Pos.t -> t = (fun p -> p)
let make_decl_pos : Pos.t -> Decl_reference.t -> t =
(fun p _decl -> (* TODO *) of_raw_pos p)
let make_decl_pos_of_option : Pos.t -> Decl_reference.t option -> t =
(fun p _decl -> (* TODO *) of_raw_pos p)
let is_hhi : t -> bool =
fun p ->
match get_raw_pos p with
| None -> (* TODO T81321312 *) false
| Some p -> Pos.is_hhi p
let set_from_reason : t -> t = Pos.set_from_reason
let unsafe_to_raw_pos : t -> Pos.t = (fun p -> p)
let filename : t -> Relative_path.t = (fun p -> Pos.filename p)
let line_start_end_columns : t -> int * int * int = Pos.info_pos
let json : t -> Hh_json.json = (fun p -> p |> Pos.to_absolute |> Pos.json)
let show_as_absolute_file_line_characters : t -> string =
(fun p -> p |> Pos.to_absolute |> Pos.string)
let fill_in_filename : Relative_path.t -> t -> Pos.t = (fun _filename p -> p)
let fill_in_filename_if_in_current_decl :
current_decl_and_file:ctx -> t -> Pos.t option =
fun ~current_decl_and_file p ->
let { file = current_file; decl = _current_decl } = current_decl_and_file in
(* TODO use current_decl *)
if Relative_path.equal (Pos.filename p) current_file then
Some p
else
None
let get_raw_pos_or_decl_reference :
t -> [> `Raw of Pos.t | `Decl_ref of Decl_reference.t ] =
(fun p -> `Raw p)
let to_span p = Pos.set_file () p |
OCaml Interface | hhvm/hphp/hack/src/decl/pos/pos_or_decl.mli | (*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
(** There are two kinds of positions: AST positions provided by the parser,
which are fully qualified with filename, line and column ranges, and
decl positions, which may be compressed and need resolving before being
used or printed.
AST positions are represented by Pos.t. Decl positions don't have their own
type yet but may have in the future.
This type is for either of these positions.
It's used in the decl heap and in places that can take any kind of positions,
e.g. error secondary positions. *)
type t [@@deriving eq, hash, ord, show]
module Map : WrappedMap.S with type key = t
(** The decl and file of a position. *)
type ctx = {
decl: Decl_reference.t option;
file: Relative_path.t;
}
[@@deriving show]
val none : t
(** Fill in the gap "between" first position and second position.
Not valid if from different files or second position precedes first *)
val btw : t -> t -> t
(** Essentially an upcast. *)
val of_raw_pos : Pos.t -> t
val set_from_reason : t -> t
(** Compress a position to be stored in the decl heap. *)
val make_decl_pos : Pos.t -> Decl_reference.t -> t
(** Compress a position to be stored in the decl heap.
If no decl reference is given, the position is not compressed but
simply converted. *)
val make_decl_pos_of_option : Pos.t -> Decl_reference.t option -> t
val is_hhi : t -> bool
val filename : t -> Relative_path.t
(** This may become unsafe in the future as we change the implementation
of positions in the decl heap. Avoid using in new code.
Use a position from an AST instead of from a decl or type,
or resolve decl position to a raw position using a provider context.
TODO: get rid of unsafe_to_raw_pos before changing implementation of t. T87777740 *)
val unsafe_to_raw_pos : t -> Pos.t
(** For spans over just one line, return the line number, start column and end column.
This returns a closed interval.
Undefined for multi-line spans. *)
val line_start_end_columns : t -> int * int * int
val json : t -> Hh_json.json
val show_as_absolute_file_line_characters : t -> string
(** Replace the decl reference part of the position with a filename. *)
val fill_in_filename : Relative_path.t -> t -> Pos.t
(** Check that the position is in the current decl and if it is, resolve
it with the current file. *)
val fill_in_filename_if_in_current_decl :
current_decl_and_file:ctx -> t -> Pos.t option
(** Returns either a raw position equivalent to this position or the decl
that this position belongs to. *)
val get_raw_pos_or_decl_reference :
t -> [> `Raw of Pos.t | `Decl_ref of Decl_reference.t ]
val to_span : t -> unit Pos.pos |
Rust | hhvm/hphp/hack/src/decl/rust_decl_ffi/rust_decl_ffi.rs | // Copyright (c) 2019, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use ast_and_decl_parser::Env;
use bumpalo::Bump;
use ocamlrep::bytes_from_ocamlrep;
use ocamlrep::ptr::UnsafeOcamlPtr;
use ocamlrep_caml_builtins::Int64;
use ocamlrep_ocamlpool::ocaml_ffi;
use ocamlrep_ocamlpool::ocaml_ffi_arena_result;
use ocamlrep_ocamlpool::ocaml_ffi_with_arena;
use oxidized::decl_parser_options::DeclParserOptions;
use oxidized_by_ref::direct_decl_parser::Decls;
use oxidized_by_ref::direct_decl_parser::ParsedFile;
use oxidized_by_ref::direct_decl_parser::ParsedFileWithHashes;
use parser_core_types::indexed_source_text::IndexedSourceText;
use relative_path::RelativePath;
#[derive(Debug, Clone)]
pub struct OcamlParsedFileWithHashes<'a>(ParsedFileWithHashes<'a>);
impl<'a> From<ParsedFileWithHashes<'a>> for OcamlParsedFileWithHashes<'a> {
fn from(file: ParsedFileWithHashes<'a>) -> Self {
Self(file)
}
}
// NB: Must keep in sync with OCaml type `Direct_decl_parser.parsed_file_with_hashes`.
// Written manually because the underlying type doesn't implement ToOcamlRep;
// even if it did, its self.0.decls structure stores hh24_types::DeclHash
// but we here need an Int64. Writing manually is slicker than constructing
// a temporary vec.
impl ocamlrep::ToOcamlRep for OcamlParsedFileWithHashes<'_> {
fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> {
let mut block = alloc.block_with_size(3);
alloc.set_field(&mut block, 0, alloc.add(&self.0.mode));
alloc.set_field(
&mut block,
1,
alloc.add_copy(Int64(self.0.file_decls_hash.as_u64() as i64)),
);
let mut hd = alloc.add(&());
for (name, decl, hash) in self.0.iter() {
let mut tuple = alloc.block_with_size(3);
alloc.set_field(&mut tuple, 0, alloc.add(name));
alloc.set_field(&mut tuple, 1, alloc.add(decl));
alloc.set_field(&mut tuple, 2, alloc.add_copy(Int64(hash.as_u64() as i64)));
let mut cons_cell = alloc.block_with_size(2);
alloc.set_field(&mut cons_cell, 0, tuple.build());
alloc.set_field(&mut cons_cell, 1, hd);
hd = cons_cell.build();
}
alloc.set_field(&mut block, 2, hd);
block.build()
}
}
ocaml_ffi_arena_result! {
fn hh_parse_decls_ffi<'a>(
arena: &'a Bump,
opts: DeclParserOptions,
filename: RelativePath,
text: UnsafeOcamlPtr,
) -> ParsedFile<'a> {
// SAFETY: Borrow the contents of the source file from the value on the
// OCaml heap rather than copying it over. This is safe as long as we
// don't call into OCaml within this function scope.
let text_value: ocamlrep::Value<'a> = unsafe { text.as_value() };
let text = bytes_from_ocamlrep(text_value).expect("expected string");
direct_decl_parser::parse_decls_for_typechecking(&opts, filename, text, arena)
}
fn hh_parse_and_hash_decls_ffi<'a>(
arena: &'a Bump,
opts: DeclParserOptions,
deregister_php_stdlib_if_hhi: bool,
filename: RelativePath,
text: UnsafeOcamlPtr,
) -> OcamlParsedFileWithHashes<'a> {
let prefix = filename.prefix();
// SAFETY: Borrow the contents of the source file from the value on the
// OCaml heap rather than copying it over. This is safe as long as we
// don't call into OCaml within this function scope.
let text_value: ocamlrep::Value<'a> = unsafe { text.as_value() };
let text = bytes_from_ocamlrep(text_value).expect("expected string");
let parsed_file = direct_decl_parser::parse_decls_for_typechecking(&opts, filename, text, arena);
let with_hashes = ParsedFileWithHashes::new(parsed_file, deregister_php_stdlib_if_hhi, prefix, arena);
with_hashes.into()
}
}
ocaml_ffi_with_arena! {
fn decls_hash<'a>(arena: &'a Bump, decls: Decls<'a>) -> Int64 {
Int64(hh_hash::position_insensitive_hash(&decls) as i64)
}
}
ocaml_ffi! {
fn checksum_addremove_ffi(
checksum: Int64,
symbol: Int64,
decl_hash: Int64,
path: relative_path::RelativePath
) -> Int64 {
// CARE! This implementation must be identical to the strongly-typed one in hh24_types.rs
// I wrote it out as a separate copy because I didn't want hh_server to take a dependency
// upon hh24_types
let checksum = checksum.0 as u64;
let checksum = checksum ^ hh_hash::hash(&(symbol, decl_hash, path));
Int64(checksum as i64)
}
}
#[no_mangle]
unsafe extern "C" fn hh_parse_ast_and_decls_ffi(env: usize, source_text: usize) -> usize {
fn inner(env: usize, source_text: usize) -> usize {
use ocamlrep::FromOcamlRep;
use ocamlrep_ocamlpool::to_ocaml;
use parser_core_types::source_text::SourceText;
// SAFETY: We can't call into OCaml while these values created via
// `from_ocaml` exist.
let env = unsafe { Env::from_ocaml(env).unwrap() };
let source_text = unsafe { SourceText::from_ocaml(source_text).unwrap() };
let indexed_source_text = IndexedSourceText::new(source_text);
let arena = &Bump::new();
let (ast_result, decls) = ast_and_decl_parser::from_text(&env, &indexed_source_text, arena);
// WARNING! this doesn't respect deregister_php_stdlib and is likely wrong.
let decls = ParsedFileWithHashes::new_without_deregistering_do_not_use(decls);
let decls = OcamlParsedFileWithHashes::from(decls);
// SAFETY: Requires no concurrent interaction with the OCaml runtime
unsafe { to_ocaml(&(ast_result, decls)) }
}
ocamlrep_ocamlpool::catch_unwind(|| inner(env, source_text))
} |
hhvm/hphp/hack/src/depgraph/dune | (data_only_dirs cargo depgraph_reader depgraph_writer)
(library
(name depgraph_reader)
(modules)
(wrapped false)
(foreign_archives depgraph_reader))
(rule
(targets libdepgraph_reader.a)
(deps
(source_tree %{workspace_root}/hack/src))
(locks /cargo)
(action
(run %{workspace_root}/hack/scripts/invoke_cargo.sh depgraph_reader depgraph_reader))) |
|
Rust | hhvm/hphp/hack/src/depgraph/balanced_partition/balance.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::cmp::Reverse;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::AtomicU32;
use std::sync::atomic::Ordering;
use rayon::prelude::*;
use crate::config::BalanceConfig;
use crate::timers::TimerGuard;
use crate::timers::Timers;
/// This is an opaque identifier for a Doc supplied by the user of this crate to
/// identify which Doc is which after `optimize_doc_order` permutes them.
#[derive(Default)]
pub struct ExternalId(pub u32);
pub struct Doc {
/// This is a sorted list of what we're calling row numbers. You can think of them as
/// representing a set of indexed terms in a Doc, or alternatively as in-edges in a
/// directed graph.
///
/// Our ultimate goal is to compress a set of Docs indexed by these row numbers. That
/// data structure would look like an array of Doc numbers for each row (indicating all
/// Docs that contain that row number). If we permute Doc numbers so that the numbers in
/// a row are clustered together, delta coding will make the representation of that row
/// smaller.
///
/// As an optimization, the exact meaning of row numbers changes throughout the
/// algorithm and ultimately this vec gets clobbered. As we recursively partition the
/// optimization space, we'll renumber the edge_lists for Docs in that partition to keep
/// the row numbering dense. This improves cache locality and reduces memory
/// utilization. For example, if we originally had 1000 different values across all
/// 10000 Docs, this would contain some number of values in the range [0, 1000). As we
/// recurse and (say) get down to 2500 docs, we may find ourselves with only 500 rows
/// mentioned by any of those Docs. So we renumber the edge_lists in those docs to only
/// contain [0, 500).
pub edge_list: Vec<u32>,
/// How many identical docs this represents.
pub weight: u32,
/// Unique identifier for this doc, so the caller can recognize it.
pub id: ExternalId,
}
/// A lookup table used to calculate `N * log2(N + 1)` quickly, where N as integer.
///
/// The results it produces are not the floating-point answer, but rather that answer
/// scaled up then rounded to an integer.
struct CostTable {
/// Holds N * log2(N + 1), but normalized so the largest table entry equals u32::MAX.
///
/// The intent here is to let us use integer arithmetic everywhere, as it is associative
/// and can be used with atomics if we want.
table: Vec<u32>,
}
impl CostTable {
/// Create a `CostTable` whose `cost` method accepts arguments up to and including `max_arg`.
fn new(max_arg: usize) -> Self {
// Compute the answer for `n` as an f64, which we'll normalize to a `u32`.
fn f64_value(n: usize) -> f64 {
let n = n as f64;
n * (n + 1.0).log2()
}
// Compute a scaling factor that lets us normalize everything to u32::MAX.
let max_value = f64_value(max_arg);
let scale = if max_value == 0.0 {
0.0
} else {
u32::MAX as f64 / max_value
};
let table: Vec<u32> = (0..=max_arg)
.into_par_iter()
.map(move |n| {
let f = f64_value(n) * scale;
let neg_cost_u64: u64 = f.round() as u64;
// Use try_into just in case rounding pushes us over the edge.
neg_cost_u64.try_into().unwrap_or(u32::MAX)
})
.collect();
Self { table }
}
/// Returns the cost (an integer proportional to `-(N * log2(N + 1)))`.
fn lookup(&self, n: u32) -> i64 {
-(self.table[n as usize] as i64)
}
/// An estimate for the encoding cost for a sequence of sorted integers,
/// where `deg_a` values are on the left half, and `deg_b` are on the
/// right half.
///
/// The idea here is that placing more values on one half or the other is
/// cheaper to encode, because the average delta between values get smaller,
/// meaning fewer bits are needed to encode it.
///
/// Stepping up a level, minimizing costs corresponds to numbering edge lists
/// densely in the original, untransposed graph.
fn cost(&self, deg_a: u32, deg_b: u32) -> i64 {
self.lookup(deg_a) + self.lookup(deg_b)
}
}
/// Similar to `vec![Default::default(); len]`, but for when `T` does not support `clone()`.
fn vec_with_default<T: Default>(len: usize) -> Vec<T> {
let mut v = Vec::with_capacity(len);
v.resize_with(len, Default::default);
v
}
fn compute_row_move_gains(
partition: &mut Partition,
cost_table: &CostTable,
) -> (i64, Vec<i64>, Vec<i64>) {
let mut total_cur_cost = 0i64;
// We could do this in parallel, but we don't spend enough time doing it to
// make it worthwhile.
let (l_to_r_gains, r_to_l_gains): (Vec<i64>, Vec<i64>) = partition
.total_degrees
.iter_mut()
.zip(partition.right_degrees.iter_mut())
.map(|(t, r)| {
let total_deg = *t.get_mut();
let right_deg = *r.get_mut();
let left_deg = total_deg - right_deg;
let cur_cost = cost_table.cost(left_deg, right_deg);
total_cur_cost += cur_cost;
let cost_after_move_l_to_r = cost_table.cost(left_deg.saturating_sub(1), right_deg + 1);
let l_to_r_move_gain = cur_cost - cost_after_move_l_to_r;
let cost_after_move_r_to_l = cost_table.cost(left_deg + 1, right_deg.saturating_sub(1));
let r_to_l_move_gain = cur_cost - cost_after_move_r_to_l;
(l_to_r_move_gain, r_to_l_move_gain)
})
.unzip();
(total_cur_cost, l_to_r_gains, r_to_l_gains)
}
fn compute_doc_move_gains<'a>(
docs: &'a mut [Doc],
l_to_r_gains: Vec<i64>,
r_to_l_gains: Vec<i64>,
left_partition_len: usize,
is_parallel: bool,
) -> Vec<(i64, &mut Doc)> {
let sum_gains = move |(i, doc): (usize, &'a mut Doc)| {
let mut gain = 0;
let gains = if i < left_partition_len {
&l_to_r_gains[..]
} else {
&r_to_l_gains[..]
};
for &row in doc.edge_list.iter() {
gain += gains[row as usize];
}
(gain, doc)
};
if is_parallel {
docs.par_iter_mut().enumerate().map(sum_gains).collect()
} else {
docs.iter_mut().enumerate().map(sum_gains).collect()
}
}
/// This approximately sorts `left_gains` and `right_gains` by their i64
/// cost field, putting the largest values first. But not quite, for speed.
///
/// If we didn't care about speed it would be completely correct to use
/// this implementation:
///
/// ```
/// left_gains.sort_unstable_by_key(|a| Reverse(a.0));
/// right_gains.sort_unstable_by_key(|a| Reverse(a.0));
/// ```
///
/// But we can do better, because we know the caller doesn't need a full sort.
/// The caller iterates through values in both arrays pairwise until the sum
/// is nonpositive, then stops. Any values it visits before stopping it swaps
/// to the other side of the partition, and the order in which it does this
/// does not matter.
///
/// Suppose we did fully sort both arrays. We would end up with three regions
/// (P == Positive, N = Nonpositive). We'll assume without loss of generality that
/// right has fewer positive values:
///
/// left right
/// ---- -----
/// P P |
/// ... ... |--- Positive/Positive
/// P P |
///
/// P N |
/// ... ... |--- Positive/Negative (this size is called `overlap` below)
/// P N |
///
/// N N |
/// ... ... |--- Negative/Negative
/// N N |
///
/// We only need a proper sort for the P/N section in the middle. Everything else
/// can just be roughly bucketed.
///
/// - In the P/P section, we're going to swap everything in both arrays across the partition
/// because their sums are positive. It's not important exactly which pairs get switched,
/// we only care that everything on left moves to right and vice versa.
/// - Similarly, we know in the N/N section we're going to swap nothing, so we don't care
/// about the exact ordering within either N section.
/// - For P/N, we fully sort both sides (technically even this isn't required, but that
/// starts to get complicated).
///
fn partially_sort_move_gains<'a, 'b: 'a>(
left_gains: &'a mut [(i64, &'b mut Doc)],
right_gains: &'a mut [(i64, &'b mut Doc)],
) {
let left_num_pos = itertools::partition(left_gains.iter_mut(), |g| g.0 > 0);
let right_num_pos = itertools::partition(right_gains.iter_mut(), |g| g.0 > 0);
if left_num_pos == right_num_pos {
// There is no P/N section, we are done.
return;
}
// Figure out which side has more P values, and call its positive values `pos`.
// Call the other side's negative values `neg`.
//
// These correspond to the left column P values and right column N values in
// the top-of-function comment.
let (mut neg, mut pos, overlap) = if left_num_pos > right_num_pos {
(
&mut right_gains[right_num_pos..],
&mut left_gains[..left_num_pos],
left_num_pos - right_num_pos,
)
} else {
(
&mut left_gains[left_num_pos..],
&mut right_gains[..right_num_pos],
right_num_pos - left_num_pos,
)
};
// Move the smallest P values into the overlap section. We could legally put the largest ones
// there, but empirically that seems to do a bit worse.
if overlap < pos.len() {
let (_, _, smallest) =
pos.select_nth_unstable_by_key(pos.len() - overlap, |a| Reverse(a.0));
pos = smallest;
}
// Sort to place the largest P values in the overlap section first.
pos.sort_unstable_by_key(|a| Reverse(a.0));
// Move the largest (that is, closest to 0.0) N values into the overlap section.
if overlap < neg.len() {
// Grab just the largest N values for the overlap section.
let (largest, _, _) = neg.select_nth_unstable_by_key(overlap - 1, |a| Reverse(a.0));
neg = largest;
}
// Sort to place the largest N values in the overlap section first.
neg.sort_unstable_by_key(|a| Reverse(a.0));
}
/// Update counters for the docs that moved.
fn par_update_partition_for_swaps(swaps: Vec<(&mut Doc, &mut Doc)>, partitions: &Partition) {
let right_degrees = &partitions.right_degrees[..];
swaps
.into_par_iter()
.for_each(move |(r_to_l_doc, l_to_r_doc)| {
let sub_weight = r_to_l_doc.weight;
for &edge in r_to_l_doc.edge_list.iter() {
right_degrees[edge as usize].fetch_sub(sub_weight, Ordering::Relaxed);
}
let add_weight = l_to_r_doc.weight;
for &edge in l_to_r_doc.edge_list.iter() {
right_degrees[edge as usize].fetch_add(add_weight, Ordering::Relaxed);
}
});
}
/// Serial version of par_update_partition_for_swaps. Avoids atomics.
fn ser_update_partition_for_swaps(swaps: Vec<(&mut Doc, &mut Doc)>, partitions: &mut Partition) {
let right_degrees = &mut partitions.right_degrees[..];
for (r_to_l_doc, l_to_r_doc) in swaps {
let sub_weight = r_to_l_doc.weight;
for &edge in r_to_l_doc.edge_list.iter() {
let n = right_degrees[edge as usize].get_mut();
*n = n.wrapping_sub(sub_weight);
}
let add_weight = l_to_r_doc.weight;
for &edge in l_to_r_doc.edge_list.iter() {
let n = right_degrees[edge as usize].get_mut();
*n = n.wrapping_add(add_weight);
}
}
}
/// Remove any trailing empty zeros from the Vec.
fn remove_trailing_zeros(v: &mut Vec<AtomicU32>) {
let last_nonzero = v.iter_mut().rposition(|n| *n.get_mut() != 0);
v.truncate(last_nonzero.map_or(0, |n| n + 1));
}
/// This semantically holds the total weights on the left and right halves of
/// the partition, which we need for our cost function.
///
/// For efficiency, we physically represent it as `(total, right)` rather
/// than `(left, right)`, with `left` implicitly being `total - right`.
/// This is so hot operations can just increment or decrement `right`, rather
/// than both a `left` and `right` vec.
#[derive(Default)]
struct Partition {
/// For each row, the sum of Doc weights across that row for Docs that have that row in
/// their `edge_list`.
total_degrees: Vec<AtomicU32>,
/// Like `total_degrees`, but only sums Docs on the right half of the partition.
///
/// The degrees for the left half can of course be computed as `total - right`.
right_degrees: Vec<AtomicU32>,
}
/// Compute the sum of weights across each row.
fn compute_degrees(docs: &[Doc], num_rows: usize, is_parallel: bool) -> Vec<AtomicU32> {
let mut degrees: Vec<AtomicU32> = vec_with_default(num_rows);
if is_parallel {
docs.par_iter().for_each(|doc| {
let weight = doc.weight;
for &row in doc.edge_list.iter() {
degrees[row as usize].fetch_add(weight, Ordering::Relaxed);
}
});
} else {
for doc in docs.iter() {
let weight = doc.weight;
for &row in doc.edge_list.iter() {
*degrees[row as usize].get_mut() += weight;
}
}
}
degrees
}
/// Simplify the problem as we recurse by deleting rows.
fn remove_useless_rows<'a>(
total_degrees: &mut Vec<AtomicU32>,
docs: &'a mut [Doc],
config: &BalanceConfig,
is_parallel: bool,
) -> &'a mut [Doc] {
// TODO: Only bother doing anything if we find X% of rows are useless?
// The full rows are of course costliest, they could be weighted more.
let num_docs = docs.len() as u32;
if num_docs == 0 {
return docs;
}
// A row with too few docs isn't worth optimizing. Similarly, a row with every
// possible docs in it isn't interesting as permuting never has any effect.
//
// TODO: Every doc except one is equivalently useless to one doc, so
// treat them the same?
let min_deg = config.min_row_degree;
let max_deg = std::cmp::min(num_docs - 1, config.max_row_degree);
let is_useless = move |total_deg: u32| total_deg < min_deg || total_deg > max_deg;
// Skip ahead to the first useless row, hoping we find none and can return early.
let first_discard_index = match total_degrees
.iter_mut()
.map(|n| *n.get_mut())
.position(is_useless)
{
Some(i) => i,
None => return docs,
};
// Because we're removing at least one row, we'll need to renumber the remaining rows
// to keep the numbering dense.
const NO_REMAP: u32 = u32::MAX;
let mut remap = vec![NO_REMAP; total_degrees.len()];
for (i, r) in remap.iter_mut().take(first_discard_index).enumerate() {
*r = i as u32;
}
let mut out = first_discard_index;
for (i, remap_ref) in remap.iter_mut().enumerate().skip(first_discard_index + 1) {
if !is_useless(*total_degrees[i].get_mut()) {
// Keep this row, but remap it later.
let n = *total_degrees[i].get_mut();
*total_degrees[out].get_mut() = n;
*remap_ref = out as u32;
out += 1;
}
}
total_degrees.truncate(out);
// Renumber and shrink docs. If we ever see an empty doc, we'll have extra cleanup to do.
let found_empty_doc = if is_parallel {
let found_empty_doc = AtomicBool::new(false);
docs.par_iter_mut().for_each(|doc| {
if doc
.edge_list
.last()
.map_or(false, |&n| (n as usize) >= first_discard_index)
{
doc.edge_list.retain_mut(|n| {
let r = remap[*n as usize];
*n = r;
r != NO_REMAP
});
}
if doc.edge_list.is_empty() {
found_empty_doc.store(true, Ordering::Relaxed);
}
});
found_empty_doc.into_inner()
} else {
let mut found_empty_doc = false;
for doc in docs.iter_mut() {
if doc
.edge_list
.last()
.map_or(false, |&n| (n as usize) >= first_discard_index)
{
doc.edge_list.retain_mut(|n| {
let r = remap[*n as usize];
*n = r;
r != NO_REMAP
});
}
if doc.edge_list.is_empty() {
found_empty_doc = true;
}
}
found_empty_doc
};
if found_empty_doc {
// We found some empty docs, move them to the end and forget about them.
let split = itertools::partition(docs.iter_mut(), |doc| !doc.edge_list.is_empty());
&mut docs[..split]
} else {
docs
}
}
/// Permute a slice of Docs using a greedy sliding window algorithm rather than
/// recursive bisection. We use this at the leaves of our recursion to try to maximize
/// the effectiveness of RLE.
fn optimize_base_case(docs: &mut [Doc], num_rows: usize, config: &BalanceConfig, timers: &Timers) {
if docs.len() <= 2 {
return;
}
let _timer_guard = TimerGuard::new(&timers.leaf_nanos);
// This is u32::MAX - 1 so that we can do `prev_index + 1` below without wraparound.
const NOT_YET_SEEN: u32 = u32::MAX - 1;
let mut latest: Vec<(u32, bool)> = vec![(NOT_YET_SEEN, false); num_rows];
// This is a simple greedy algorithm that just finds the next Doc in a sliding window that seems
// to be cheapest to emit, emits it, and moves on to the next. The intent is to place very similar
// Docs next to each other, or nearby, even if recursively partitioning didn't do it (e.g. because
// it didn't recurse all the way down).
for i in 0..docs.len() - 1 {
let mut best_index = i;
let mut best_score = i64::MIN;
for (j, doc) in docs
.iter()
.enumerate()
.skip(i)
.take(config.max_leaf_window_size)
{
let mut score = 0i64;
// This implementation is completely unprincipled and has little empirical basis,
// it's just some intuitive rough guess for a cost function that might help.
for &row in doc.edge_list.iter() {
let (prev_index, prev_is_rle) = latest[row as usize];
if prev_index == NOT_YET_SEEN {
// We've never seen this before (no recent repeats), so penalize it a bit.
score -= docs.len() as i64;
} else {
let distance = i as i64 - prev_index as i64;
if distance == 1 {
if prev_is_rle {
// Continuing an RLE block is ideal.
score += 10_000_000;
} else {
// Starting a new RLE block is good too.
score += 9_000_000;
}
} else {
// No RLE, but bias toward repeating recent occurrences
// since that will yield smaller deltas.
// Really this should involve a log2, but meh.
score -= distance;
}
}
}
if score > best_score {
best_score = score;
best_index = j;
}
}
docs.swap(i, best_index);
// We've made our choice, update `latest` to indicate what's been emitted.
let newly_placed_index = i as u32;
// Go ahead and grab the edge_list here, so we'll free it when we iterate
// through it below. No one should be looking at this Vec any more, we've
// been modifying its contents as we've recursed, and the contents aren't
// needed any more.
let edges = std::mem::take(&mut docs[i].edge_list);
for edge in edges {
// Read the old state.
let l = &mut latest[edge as usize];
let (prev_index, _was_rle) = *l;
// Write the new state.
let is_now_rle = prev_index + 1 == newly_placed_index;
*l = (newly_placed_index, is_now_rle);
}
}
}
/// This is the heart of the balanced partitioning algorithm; see
/// https://www.kdd.org/kdd2016/papers/files/rpp0883-dhulipalaAemb.pdf for
/// an explanation.
///
/// This permutes `docs` to try to make the left and right halves more
/// self-similar, so they compress better, then recurses over the left
/// and right halves.
///
/// For speed, rather than actually recursing we loop down the left side
/// and post a task for the right side to a `rayon::Scope`, so an idle
/// thread can steal that work if it wants.
fn recursively_balance<'a>(
scope: &rayon::Scope<'a>,
mut docs: &'a mut [Doc],
cost_table: &'a CostTable,
mut total_degrees: Vec<AtomicU32>,
mut depth: u32,
config: &'a BalanceConfig,
timers_per_depth: &'a [Timers],
mut num_log_tokens: usize,
) {
// This will loop to recurse down the left hand side.
loop {
let timers = &timers_per_depth[std::cmp::min(depth as usize, timers_per_depth.len() - 1)];
// Use parallel algos within this depth? See BalanceConfig::max_par_depth docs.
let is_parallel = depth < config.max_par_depth;
let remove_useless_timer = TimerGuard::new(&timers.remove_useless_nanos);
docs = remove_useless_rows(&mut total_degrees, docs, config, is_parallel);
let setup_timer = TimerGuard::handoff(remove_useless_timer, &timers.setup_nanos);
let left_partition_len = docs.len() / 2;
let right_partition_len = docs.len() - left_partition_len;
remove_trailing_zeros(&mut total_degrees);
let right_degrees = compute_degrees(
&docs[left_partition_len..],
total_degrees.len(),
is_parallel,
);
drop(setup_timer);
let mut partitions = Partition {
total_degrees,
right_degrees,
};
if num_log_tokens != 0 {
log::info!(
"Starting depth {}, {} rows, {} docs",
depth,
partitions.total_degrees.len(),
docs.len()
);
}
// We use this to notice when the cost stops improving.
let mut prev_cost = i64::MAX;
let mut swaps_capacity_guess = left_partition_len / 2;
// Repeatedly try to improve the left/right partitioning by swapping Docs across the boundary.
for round_number in 0..config.max_rounds {
if num_log_tokens != 0 {
log::info!("Starting depth {} round {}", depth, round_number);
}
let row_move_gains_timer = TimerGuard::new(&timers.row_move_gains_nanos);
let (cur_cost, l_to_r_gains, r_to_l_gains) =
compute_row_move_gains(&mut partitions, cost_table);
if cur_cost >= prev_cost {
// We didn't improve since last time, so just stop looking.
break;
}
prev_cost = cur_cost;
let doc_move_gains_timer =
TimerGuard::handoff(row_move_gains_timer, &timers.doc_move_gains_nanos);
let mut doc_move_gains = compute_doc_move_gains(
docs,
l_to_r_gains,
r_to_l_gains,
left_partition_len,
is_parallel,
);
// Place the largest values first.
let sort_timer = TimerGuard::handoff(doc_move_gains_timer, &timers.sort_nanos);
let (left_gains, right_gains) = doc_move_gains.split_at_mut(left_partition_len);
partially_sort_move_gains(left_gains, right_gains);
// Swap values across the partition as long as it seems net-profitable to do so.
// We'll start with the value that most wants to swap left to right, and the one
// that most wants to swap right to left. If the sum of the gains of this swap
// is positive, we do it. Then we move on to the second-most eager docs, etc.
let _swap_timer = TimerGuard::handoff(sort_timer, &timers.swap_nanos);
// Scratch space for our swaps array.
let mut swaps: Vec<(&mut Doc, &mut Doc)> = Vec::with_capacity(swaps_capacity_guess);
for ((left_gain, left_doc), (right_gain, right_doc)) in
left_gains.iter_mut().zip(right_gains)
{
if *left_gain + *right_gain <= 0 {
break;
}
let left_doc: &mut Doc = left_doc;
let right_doc: &mut Doc = right_doc;
std::mem::swap(left_doc, right_doc);
// Remember to actually swap them below.
swaps.push((left_doc, right_doc));
}
swaps_capacity_guess = swaps.len();
let num_docs_swapped = swaps.len() * 2;
if num_log_tokens != 0 {
log::info!(
"Depth {}, round {}, moved {} values",
depth,
round_number,
num_docs_swapped
);
}
if num_docs_swapped != 0 {
if is_parallel {
par_update_partition_for_swaps(swaps, &partitions);
} else {
ser_update_partition_for_swaps(swaps, &mut partitions);
}
}
if num_docs_swapped as f64 <= docs.len() as f64 * config.quiesced_fraction {
// If we didn't swap very much this time, just stop.
break;
}
}
let mut right_total_degrees = partitions.right_degrees;
let num_rows = right_total_degrees.len();
depth += 1;
if depth > config.max_depth
|| std::cmp::min(left_partition_len, right_partition_len) < config.min_num_docs
{
// Base case: if we're not recursing both ways, just stop.
//
// Someday, if we support imbalanced partitions, we might want to loop down
// just one side or the other.
optimize_base_case(docs, num_rows, config, timers);
break;
}
// Prepare to recurse down the left and right halves.
//
// We will manually tail-call down the left half by looping.
let (left_docs, right_docs) = docs.split_at_mut(left_partition_len);
// Create `left_total_degrees`. It starts as the total degrees from the previous
// recursion depth, so we need to subtract off the right degrees to get the totals
// just for the left side.
let mut left_total_degrees = partitions.total_degrees;
for (l, r) in left_total_degrees
.iter_mut()
.zip(right_total_degrees.iter_mut())
{
*l.get_mut() -= *r.get_mut();
}
// Divvy up the permission-to-log token so we log a limited amount overall.
let orig_num_tokens = num_log_tokens;
num_log_tokens = (num_log_tokens + 1) / 2;
let right_num_log_tokens = orig_num_tokens - num_log_tokens;
// Defer the right-side work for later.
scope.spawn(move |scope| {
recursively_balance(
scope,
right_docs,
cost_table,
right_total_degrees,
depth,
config,
timers_per_depth,
right_num_log_tokens,
);
});
// Recurse-via-looping to just the left subproblem.
total_degrees = left_total_degrees;
docs = left_docs;
}
}
/// Create a CostTable big enough to hold any lookup for `partition`.
fn create_cost_table(total_degrees: &mut [AtomicU32]) -> CostTable {
let max_degree = total_degrees
.iter_mut()
.map(|n| *n.get_mut())
.max()
.unwrap_or(0);
// Add 1 because `compute_doc_move_gains` will try adding 1.
CostTable::new(max_degree as usize + 1)
}
/// Permutes `docs` to be more compressible, a la
/// https://www.kdd.org/kdd2016/papers/files/rpp0883-dhulipalaAemb.pdf
pub fn optimize_doc_order(docs: &mut [Doc], config: &BalanceConfig) {
if config.max_rounds == 0 || docs.len() <= 2 {
return;
}
log::info!("Choosing balanced doc order with config {:?}", config);
let num_timers = docs.len().next_power_of_two().trailing_zeros() as usize;
let mut timers: Vec<Timers> = vec_with_default(num_timers);
// Logging at every level as we recurse would be way too much, so instead
// we use these log tokens to limit the total number of places we'll log while still
// giving a rough idea what's going on at various recursion levels.
let num_log_tokens = 4;
let setup_timer = TimerGuard::new(&timers[0].setup_nanos);
let num_rows = docs
.par_iter()
.map(|d| d.edge_list.last().map_or(0, |&n| n + 1))
.max()
.unwrap_or(0);
let mut total_degrees = compute_degrees(docs, num_rows as usize, true);
let cost_table = create_cost_table(&mut total_degrees);
drop(setup_timer);
rayon::scope(|s| {
recursively_balance(
s,
docs,
&cost_table,
total_degrees,
0,
config,
&timers,
num_log_tokens,
);
});
if log::log_enabled!(log::Level::Info) {
// Delete timers for depths where we have no data.
while timers.last().map_or(false, |t| t.is_empty()) {
timers.pop();
}
for (depth, timer) in timers.iter().enumerate() {
let title = if depth + 1 == num_timers {
format!("depth >= {}", depth)
} else {
format!("depth {}", depth)
};
timer.log(&title);
}
log::info!("Choosing balanced doc order done");
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_reorder() {
let config = BalanceConfig {
max_par_depth: 2,
min_num_docs: 8,
max_leaf_window_size: 4,
..BalanceConfig::default()
};
fn create_weight(i: u32) -> u32 {
if i % 37 != 0 { 1 } else { i % 7 }
}
// Create a dummy test case.
let mut docs: Vec<Doc> = (0..1024)
.map(|i| {
let id = i as u32;
let weight = create_weight(id);
let edge_list: Vec<u32> = (0..32).filter(|j| (id >> j) & 1 != 0).collect();
Doc {
edge_list,
weight,
id: ExternalId(id),
}
})
.collect();
optimize_doc_order(&mut docs, &config);
// Guarantee it resulted in some permutation of what we gave it.
let mut seen = vec![false; docs.len()];
for doc in docs.iter() {
assert_eq!(doc.weight, create_weight(doc.id.0));
let old = std::mem::replace(&mut seen[doc.id.0 as usize], true);
assert!(!old);
}
}
} |
Rust | hhvm/hphp/hack/src/depgraph/balanced_partition/config.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
/// Configuration for the balanced partitioning algorithm.
#[derive(Debug)]
pub struct BalanceConfig {
/// Maximum recursion depth.
pub max_depth: u32,
/// Maximum number of optimization rounds at each step.
pub max_rounds: u32,
/// Smallest row worth optimizing.
pub min_row_degree: u32,
/// Largest row worth optimizing (inclusive).
pub max_row_degree: u32,
/// Stop recursing once we get to this many docs. Must be >= 2.
pub min_num_docs: usize,
/// How many of the top levels of recursion should we run in
/// "parallel mode" before switching to "serial mode"?
///
/// We have both serial and parallel algorithms for some
/// steps. The serial ones are faster if there's only one thread
/// (e.g. don't use atomic ops, avoid rayon overhead), so we only
/// want to use parallel algos up to the point where all of our
/// threads have something useful to do. For example, at the top
/// level, before we have split at all, it's best for threads to
/// all help out there. But after enough bisections we know
/// there's enough work for all threads, so we don't need to use
/// parallelism within a step.
///
/// The default value for this attempts to maximize the amount of
/// work done in serial mode while keeping all threads busy, which
/// is about twice as fast as doing everything in parallel modes.
///
/// Even after going beyond parallel depth, each left+right recursion
/// always makes the right recursion available for Rayon work stealing,
/// so we don't give up on high-level parallelism, just on parallel
/// algorithms within a single step.
pub max_par_depth: u32,
/// A number [0.0, 1.0] indicating when we should stop early
/// swapping docs. When <= this fraction of docs get swapped, we stop
/// and proceed recursing down the left and right sides.
pub quiesced_fraction: f64,
/// When each step of the recursion finishes, we run a greedy algorithm on a
/// sliding window for whatever remains. This is the size of that window.
/// Larger windows will optimize better, but run slower.
pub max_leaf_window_size: usize,
}
impl Default for BalanceConfig {
fn default() -> Self {
// By default, run the top few levels of the recursion in
// parallel, so idle threads can help out. But once we've
// recursed enough, switch to running many single-threaded
// algorithms in parallel for different parts of the recursion
// tree, as those algorithms run faster than equivalent
// parallel algorithms (using atomics etc.) run on one thread.
//
// Note that each time we recurse down the tree we can always
// spawn off new subtrees in parallel, this only affects
// parallelism within a single tree node.
let max_par_depth = rayon::current_num_threads()
.next_power_of_two()
.trailing_zeros();
Self {
max_depth: u32::MAX,
max_rounds: 20,
min_row_degree: 2,
max_row_degree: u32::MAX,
min_num_docs: 64,
max_par_depth,
quiesced_fraction: 0.02,
max_leaf_window_size: 64,
}
}
} |
Rust | hhvm/hphp/hack/src/depgraph/balanced_partition/lib.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//! This crate implements the balanced graph partitioning algorithm described at
//! https://www.kdd.org/kdd2016/papers/files/rpp0883-dhulipalaAemb.pdf
//!
//! At a high level, it permutes an array of Doc to be more compressible by
//! swapping pairs of docs across the left and right halves of the array to reduce
//! a cost function, essentially clustering similar docs in each half. Then it
//! recurses on both halves to do the same thing there.
mod balance;
mod config;
mod timers;
pub use balance::optimize_doc_order;
pub use balance::Doc;
pub use balance::ExternalId;
pub use config::BalanceConfig; |
Rust | hhvm/hphp/hack/src/depgraph/balanced_partition/timers.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::cmp::Reverse;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use std::time::SystemTime;
/// These are some nanosecond counters we use to roughly bucket where time is going.
///
/// They just collect wall clock time, so can't distinguish between 10 threads working
/// on something versus just one. Use a real profiler if you want that kind of detail.
#[derive(Default)]
pub(crate) struct Timers {
pub(crate) doc_move_gains_nanos: AtomicU64,
pub(crate) leaf_nanos: AtomicU64,
pub(crate) remove_useless_nanos: AtomicU64,
pub(crate) row_move_gains_nanos: AtomicU64,
pub(crate) setup_nanos: AtomicU64,
pub(crate) sort_nanos: AtomicU64,
pub(crate) swap_nanos: AtomicU64,
}
impl Timers {
fn all_times(&self) -> Vec<(&'static str, u64)> {
let result: Vec<_> = [
("doc_move_gains", &self.doc_move_gains_nanos),
("leaf", &self.leaf_nanos),
("remove_useless", &self.remove_useless_nanos),
("row_move_gains", &self.row_move_gains_nanos),
("setup", &self.setup_nanos),
("sort", &self.sort_nanos),
("swap", &self.swap_nanos),
]
.into_iter()
.map(|(name, counter)| (name, counter.load(Ordering::Relaxed)))
.collect();
// Make sure we didn't forget any newly added fields above.
let num_timer_fields = std::mem::size_of::<Timers>() / std::mem::size_of::<AtomicU64>();
assert_eq!(result.len(), num_timer_fields);
result
}
pub(crate) fn is_empty(&self) -> bool {
self.all_times().into_iter().all(|(_, n)| n == 0)
}
pub(crate) fn log(&self, title: &str) {
let mut times = self.all_times();
times.sort_by_key(|x| (Reverse(x.1), x.0));
let total_nanos: u64 = times.iter().map(|t| t.1).sum();
let mut running_nanos = 0u64;
log::info!(
"Time breakdown, {}: {:.2}s total, {}",
title,
total_nanos as f64 * 1e-9,
times
.into_iter()
.map(|(name, nanos)| {
running_nanos += nanos;
// Avoid division by zero.
let denom = std::cmp::max(total_nanos, 1) as f64;
format!(
"{}: {:.2}s ({:.2}%, {:.2}% so far)",
name,
nanos as f64 * 1e-9,
nanos as f64 * 100.0 / denom,
running_nanos as f64 * 100.0 / denom
)
})
.collect::<Vec<_>>()
.join(", ")
);
}
}
pub(crate) struct TimerGuard<'a> {
start: SystemTime,
counter: Option<&'a AtomicU64>,
}
impl<'a> TimerGuard<'a> {
pub(crate) fn new(counter: &'a AtomicU64) -> Self {
Self {
start: SystemTime::now(),
counter: Some(counter),
}
}
fn finish(&mut self, now: &SystemTime) {
if let Some(counter) = self.counter.take() {
if let Ok(duration) = now.duration_since(self.start) {
counter.fetch_add(duration.as_nanos() as u64, Ordering::Relaxed);
}
}
}
/// End one timer at the same moment we start this one.
pub(crate) fn handoff(mut ending: TimerGuard<'_>, counter: &'a AtomicU64) -> Self {
let start = SystemTime::now();
ending.finish(&start);
Self {
start,
counter: Some(counter),
}
}
}
impl Drop for TimerGuard<'_> {
fn drop(&mut self) {
self.finish(&SystemTime::now());
}
} |
TOML | hhvm/hphp/hack/src/depgraph/balanced_partition/cargo/balanced_partition/Cargo.toml | # @generated by autocargo
[package]
name = "balanced_partition"
version = "0.0.0"
edition = "2021"
[lib]
path = "../../lib.rs"
crate-type = ["lib", "staticlib"]
[dependencies]
itertools = "0.10.3"
log = { version = "0.4.17", features = ["kv_unstable", "kv_unstable_std"] }
rayon = "1.2" |
TOML | hhvm/hphp/hack/src/depgraph/cargo/dep/Cargo.toml | # @generated by autocargo
[package]
name = "dep"
version = "0.0.0"
edition = "2021"
[lib]
path = "../../dep/dep.rs"
crate-type = ["lib", "staticlib"]
[dependencies]
bytemuck = { version = "1.12.3", features = ["derive"] }
ocamlrep = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" }
serde = { version = "1.0.176", features = ["derive", "rc"] } |
TOML | hhvm/hphp/hack/src/depgraph/cargo/depgraph_reader/Cargo.toml | # @generated by autocargo
[package]
name = "depgraph_reader"
version = "0.0.0"
edition = "2021"
[lib]
path = "../../depgraph_reader/reader.rs"
crate-type = ["lib", "staticlib"]
[dependencies]
bytemuck = { version = "1.12.3", features = ["derive"] }
dep = { version = "0.0.0", path = "../dep" }
memmap2 = "0.5.10"
rayon = "1.2"
rpds = "0.11.0"
static_assertions = "1.1.0"
vint64 = "1.0.1" |
TOML | hhvm/hphp/hack/src/depgraph/cargo/depgraph_writer/Cargo.toml | # @generated by autocargo
[package]
name = "depgraph_writer"
version = "0.0.0"
edition = "2021"
[lib]
path = "../../depgraph_writer/writer.rs"
crate-type = ["lib", "staticlib"]
[dependencies]
bytemuck = { version = "1.12.3", features = ["derive"] }
dep = { version = "0.0.0", path = "../dep" }
log = { version = "0.4.17", features = ["kv_unstable", "kv_unstable_std"] }
newtype = { version = "0.0.0", path = "../../../utils/newtype" } |
TOML | hhvm/hphp/hack/src/depgraph/cargo/human_readable_dep_map/Cargo.toml | # @generated by autocargo
[package]
name = "human_readable_dep_map"
version = "0.0.0"
edition = "2021"
[lib]
path = "../../human_readable_dep_map/human_readable_dep_map.rs"
crate-type = ["lib", "staticlib"]
[dependencies]
anyhow = "1.0.71"
depgraph_reader = { version = "0.0.0", path = "../depgraph_reader" }
hash = { version = "0.0.0", path = "../../../utils/hash" }
typing_deps_hash = { version = "0.0.0", path = "../../../deps/cargo/typing_deps_hash" }
[dev-dependencies]
tempfile = "3.5" |
Rust | hhvm/hphp/hack/src/depgraph/dep/dep.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::fmt::Display;
use std::fmt::Formatter;
use std::fmt::LowerHex;
use ocamlrep::from;
use ocamlrep::Allocator;
use ocamlrep::FromError;
use ocamlrep::FromOcamlRep;
use ocamlrep::ToOcamlRep;
use ocamlrep::Value;
use serde::Deserialize;
use serde::Serialize;
#[repr(transparent)]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
#[derive(bytemuck::Pod, bytemuck::Zeroable)]
#[derive(Serialize, Deserialize)]
pub struct Dep(u64);
impl Dep {
pub fn new(x: u64) -> Self {
Dep(x)
}
pub fn is_class(self) -> bool {
(self.0 & 1) != 0
}
pub fn class_to_extends(self) -> Option<Self> {
if !self.is_class() {
None
} else {
Some(Dep(self.0 & (!1)))
}
}
#[inline]
pub fn from_u64_slice(s: &[u64]) -> &[Dep] {
bytemuck::cast_slice(s)
}
}
impl Display for Dep {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
Display::fmt(&self.0, f)
}
}
impl LowerHex for Dep {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
LowerHex::fmt(&self.0, f)
}
}
impl From<Dep> for u64 {
fn from(dep: Dep) -> Self {
dep.0
}
}
impl FromOcamlRep for Dep {
fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> {
let x: isize = from::expect_int(value)?;
// In Rust, a numeric cast between two integers of the same size
// is a no-op. We require a 64-bit word size.
let x = x as u64;
// The conversion from an OCaml integer to a Rust integer involves a
// right arithmetic bitshift. Due to sign extension, if the OCaml
// integer was negative, the resulting Rust integer's MSB will be set.
// We don't want that, because we are disguising unsigned 63-bit Rust
// integers as signed 63-bit integers in OCaml.
let x = x & !(1 << 63);
Ok(Dep(x))
}
}
impl ToOcamlRep for Dep {
fn to_ocamlrep<'a, A: Allocator>(&'a self, _alloc: &'a A) -> Value<'a> {
let x: u64 = self.0;
// In Rust, a numeric cast between two integers of the same size
// is a no-op. We require a 64-bit word size.
let x = x as isize;
Value::int(x)
}
} |
Rust | hhvm/hphp/hack/src/depgraph/depgraph_compress/compress.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::path::Path;
use depgraph_writer::MemDepGraph;
use crate::*;
/// Write a `MemDepGraph` to disk, optionally optimizing it first.
pub fn write_dep_graph(
path: &Path,
mut m: MemDepGraph,
write_config: &WriteConfig,
optimize_config: &OptimizeConfig,
) -> std::io::Result<()> {
match optimize_config {
OptimizeConfig::Bisect(config) => {
let mut tg = transpose::transpose(&m);
balanced_partition::optimize_doc_order(&mut tg.docs, config);
renumber::apply_node_renumbering(&mut m, tg);
}
OptimizeConfig::Copy(path) => copy::copy_node_order(&mut m, path)?,
OptimizeConfig::None => log::info!("Skipping graph compression"),
}
write::write_to_disk(path, m, write_config)
} |
Rust | hhvm/hphp/hack/src/depgraph/depgraph_compress/config.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::path::PathBuf;
use balanced_partition::BalanceConfig;
pub enum OptimizeConfig {
None,
Bisect(BalanceConfig),
Copy(PathBuf),
}
impl Default for OptimizeConfig {
fn default() -> Self {
OptimizeConfig::Bisect(BalanceConfig::default())
}
}
pub enum WriteConfig {
/// Simple is experimental, and does not produce a usable output file.
/// The intent is to produce something without bespoke compression
/// (delta coding, varint, move to front) so we can try external compression
/// programs on it.
///
/// This will produce a file with a different magic number so you don't
/// accidentally try to use it.
Simple,
/// Normal write mode, uses some bespoke compression then runs it through zstd.
Zstd { compression_level: i32 },
}
impl Default for WriteConfig {
fn default() -> Self {
WriteConfig::Zstd {
compression_level: 12,
}
}
} |
Rust | hhvm/hphp/hack/src/depgraph/depgraph_compress/copy.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::path::Path;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use depgraph_reader::DepGraph;
use depgraph_writer::HashIndex;
use depgraph_writer::MemDepGraph;
use newtype::IdVec;
use rayon::prelude::*;
use crate::*;
pub(crate) fn copy_node_order(m: &mut MemDepGraph, path: &Path) -> std::io::Result<()> {
log::info!("Copying node order from existing file {}", path.display());
let dep_graph = DepGraph::from_path(path)?;
let old_dep_graph = match dep_graph {
DepGraph::New(g) => g,
DepGraph::Old(..) => {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Can only copy node ordering from new-format .hhdg files",
));
}
};
let num_hashes = m.hashes.len();
let in_use: IdVec<HashIndex, AtomicBool> =
IdVec::new_from_vec((0..num_hashes).map(|_| AtomicBool::new(false)).collect());
let mut remap_old_to_new: IdVec<HashIndex, HashIndex> =
IdVec::new_from_vec(vec![HashIndex::NONE; num_hashes]);
// Collect of the HashIndex slots where we weren't able to assign a mapping.
let gaps: Vec<&mut HashIndex> = m
.hashes
.par_iter()
.zip(remap_old_to_new.par_iter_mut())
.filter_map(|(&dep, remap_ref)| {
if let Some(index) = old_dep_graph.get_index(dep) {
// `old_graph` may have more HashIndexes than `m`, but we want to keep
// `m`'s numbering dense so if we see an out-of-range HashIndex just
// pretend we didn't see it.
let h = HashIndex(index);
if let Some(in_use_ref) = in_use.get(h) {
in_use_ref.store(true, Ordering::Relaxed);
*remap_ref = h;
return None;
}
}
// We failed to map, so remember that this slot needs to be filled in.
Some(remap_ref)
})
.collect();
// Find all HashIndex values not in use.
let available: Vec<HashIndex> = in_use
.vec
.into_par_iter()
.enumerate()
.filter_map(|(index, in_use)| {
if in_use.into_inner() {
None
} else {
Some(HashIndex::from_usize(index))
}
})
.collect();
// Fill in each gap with an unused HashIndex in increasing order.
assert_eq!(gaps.len(), available.len());
gaps.into_par_iter()
.zip(available.into_par_iter())
.for_each(|(gap, h)| *gap = h);
// Apply the new numbering we just chose to `m`.
renumber::renumber(m, remap_old_to_new);
Ok(())
} |
Rust | hhvm/hphp/hack/src/depgraph/depgraph_compress/lib.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
mod compress;
mod config;
mod copy;
mod renumber;
mod transpose;
mod write;
pub use balanced_partition::BalanceConfig;
pub use compress::write_dep_graph;
pub use config::OptimizeConfig;
pub use config::WriteConfig; |
Rust | hhvm/hphp/hack/src/depgraph/depgraph_compress/renumber.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::sync::atomic::AtomicU32;
use std::sync::atomic::Ordering;
use depgraph_writer::HashIndex;
use depgraph_writer::HashListIndex;
use depgraph_writer::MemDepGraph;
use newtype::IdVec;
use rayon::prelude::*;
use transpose::TransposedMemDepGraph;
use crate::*;
/// Apply the proposed node renumbering.
pub(crate) fn renumber(g: &mut MemDepGraph, remap_old_to_new: IdVec<HashIndex, HashIndex>) {
assert_eq!(g.hashes.len(), remap_old_to_new.len());
log::info!("Updating edge lists to use the new numbering system");
// Update edge lists to use the new numbering system.
g.edge_lists.par_iter_mut().for_each(|edge_list| {
for h in edge_list.iter_mut() {
*h = remap_old_to_new[*h];
}
edge_list.sort_unstable()
});
log::info!("Permuting hashes and edge_list_indices tables");
// Create the inverse mapping so we can permute hashes and edge_list_indices.
let remap_new_to_old: IdVec<HashIndex, AtomicU32> = IdVec::new_from_vec(
(0..remap_old_to_new.len())
.map(|_| AtomicU32::new(!0))
.collect(),
);
remap_old_to_new
.vec
.into_par_iter()
.enumerate()
.for_each(|(old_index, new_index)| {
remap_new_to_old[new_index].store(old_index as u32, Ordering::Relaxed)
});
// Permute `hashes` and `edge_list_indices` to use the new numbering system.
//
// I initially used an in-place permute, which doesn't need these temp arrays,
// or even `remap_new_to_old`, but that algorithm was single-threaded and thus
// much slower overall.
let old_hashes = g.hashes.clone();
let old_edge_list_indices = g.edge_list_indices.clone();
remap_new_to_old
.vec
.into_par_iter()
.zip(g.hashes.par_iter_mut())
.zip(g.edge_list_indices.par_iter_mut())
.for_each(move |((old_index, hash), edge_list_index)| {
let old_index = HashIndex(old_index.into_inner());
*hash = old_hashes[old_index];
*edge_list_index = old_edge_list_indices[old_index]
});
}
/// Take the node ordering from `tg` and apply it to `g`.
///
/// `tg` only cares about nodes with in-edges, so we need to also determine numbers
/// for the rest.
pub(crate) fn apply_node_renumbering(g: &mut MemDepGraph, tg: TransposedMemDepGraph) {
log::info!("Applying node numbering");
log::info!("Assigning node numbers for nodes with in-edges");
let num_hashes = g.hashes.len();
let mut remap_old_to_new: IdVec<HashIndex, HashIndex> =
IdVec::new_from_vec(vec![HashIndex(!0u32); num_hashes]);
// Number all of the nodes with any in-edges consecutively. We want those to have
// the smallest numbers because they get named using variable-length integers.
let mut out = 0;
for n in tg.docs.iter() {
for &e in tg.get_hash_indexes(n) {
remap_old_to_new[e] = HashIndex(out);
out += 1;
}
}
log::info!("Assigning node numbers for nodes with out-edges but no in-edges");
// Count how many not-yet-numbered nodes share each edge list.
let num_edge_list_users: IdVec<HashListIndex, AtomicU32> =
IdVec::new_from_vec((0..g.edge_lists.len()).map(|_| AtomicU32::new(0)).collect());
g.edge_list_indices
.par_iter()
.zip(remap_old_to_new.par_iter())
.for_each(|(&hash_list_index, &remap)| {
if remap == HashIndex(!0u32) {
num_edge_list_users[hash_list_index].fetch_add(1, Ordering::Relaxed);
}
});
// Figure out the range for each edge list, so we can group nodes that use the same
// edge list together with consecutive values.
let mut edge_list_users_start = num_edge_list_users;
edge_list_users_start
.iter_mut()
.for_each(|n| out += std::mem::replace(n.get_mut(), out));
// Next, number everything else not already numbered. We do it in such a way that nodes
// that share the same edge list tend to be numbered consecutively, which makes the way
// we later write out `MemDepGraph::edge_list_indices` more compact.
for (&hash_list_index, remap) in g.edge_list_indices.iter().zip(remap_old_to_new.iter_mut()) {
if *remap == HashIndex(!0u32) {
let next_index = edge_list_users_start[hash_list_index].get_mut();
*remap = HashIndex(*next_index);
*next_index += 1;
}
}
drop(edge_list_users_start);
renumber::renumber(g, remap_old_to_new);
log::info!("Applying node numbering done");
} |
Rust | hhvm/hphp/hack/src/depgraph/depgraph_compress/transpose.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::cmp::Reverse;
use std::sync::atomic::AtomicU32;
use std::sync::atomic::Ordering;
use balanced_partition::Doc;
use balanced_partition::ExternalId;
use depgraph_writer::HashIndex;
use depgraph_writer::MemDepGraph;
use hash::DashMap;
use newtype::IdVec;
use rayon::prelude::*;
use smallvec::SmallVec;
pub(crate) struct TransposedMemDepGraph {
pub(crate) docs: Vec<Doc>,
/// Concatenated slices of duplicated doc data.
dups: Vec<HashIndex>,
}
impl TransposedMemDepGraph {
pub(crate) fn get_hash_indexes<'a>(&'a self, doc: &'a Doc) -> &'a [HashIndex] {
let weight = doc.weight;
if weight == 1 {
let r: &HashIndex = bytemuck::cast_ref(&doc.id.0);
std::slice::from_ref(r)
} else {
let start = doc.id.0 as usize;
&self.dups[start..start + weight as usize]
}
}
}
pub(crate) fn transpose(g: &MemDepGraph) -> TransposedMemDepGraph {
log::info!("Starting to transpose graph");
// Count how many in-edges each doc has.
let mut num_in_edges: Vec<AtomicU32> = (0..g.hashes.len()).map(|_| AtomicU32::new(0)).collect();
g.edge_lists.par_iter().flat_map(|b| &b[..]).for_each(|&n| {
num_in_edges[n.as_usize()].fetch_add(1, Ordering::Relaxed);
});
// Figure out the starting index for each doc when all concatenated into
// a single array. For example if the first doc has 4 in edges, and the second
// has 3, then the slice for the third doc will start at array index 7.
let mut num_edges = 0;
let next_index: IdVec<HashIndex, AtomicU32> = num_in_edges
.iter_mut()
.map(|p| {
let r = num_edges;
num_edges += *p.get_mut();
AtomicU32::new(r)
})
.collect();
// Allocate a single block of storage to hold all the in-edges.
let in_edges: Vec<AtomicU32> = (0..num_edges).map(|_| AtomicU32::new(u32::MAX)).collect();
// Insert all of the edges (in nondeterministic order due to threading).
// So the in edges for each doc will be in a known slice, but the order of
// the values within that slice will be arbitrarily permuted.
g.edge_lists
.par_iter()
.enumerate()
.for_each(|(i, edge_list)| {
for &to_doc in edge_list.iter() {
let slot = next_index[to_doc].fetch_add(1, Ordering::Relaxed);
in_edges[slot as usize].store(i as u32, Ordering::Relaxed);
}
});
// TODO: Once the API has stabilized, use `in_edges.get_mut_slice()` since we don't need
// atomic operations on the `in_edges` values any more.
// Bucket identical docs together.
let canonical_doc: DashMap<Box<[u32]>, SmallVec<[HashIndex; 4]>> = DashMap::default();
next_index
.vec
.into_par_iter()
.zip(num_in_edges.into_par_iter())
.enumerate()
.for_each(|(i, (end, size))| {
let size = size.into_inner() as usize;
if size == 0 {
// Ignore docs with no in-edges.
return;
}
let end = end.into_inner() as usize;
let mut v: Box<[u32]> = in_edges[end - size..end]
.iter()
.map(|n| n.load(Ordering::Relaxed))
.collect();
v.sort_unstable();
canonical_doc
.entry(v)
.or_default()
.push(HashIndex::from_usize(i));
});
// `dups` is a single block of storage to hold slices for all of our duplicate doc slices.
//
// This capacity guess isn't exactly right, but doesn't need to be.
let mut dups = Vec::with_capacity(g.hashes.len() - canonical_doc.len());
let mut docs: Vec<Doc> = canonical_doc
.into_iter()
.map(|(edge_list, mut dups_with_edge_list)| {
let weight = dups_with_edge_list.len() as u32;
let id = if weight == 1 {
// Don't allocate a slice in `dups` if we have just one (common case).
dups_with_edge_list[0].0
} else {
// Canonicalize away the nondeterministic insertion order.
dups_with_edge_list.sort_unstable();
let start = dups.len() as u32;
dups.extend(dups_with_edge_list);
start
};
Doc {
edge_list: edge_list.into(),
weight,
id: ExternalId(id),
}
})
.collect();
dups.shrink_to_fit();
assert!(dups.len() <= u32::MAX as usize);
// Sort to put longest edge lists first.
docs.par_sort_unstable_by_key(|n| {
let r = Reverse(n.edge_list.len());
let id = n.id.0;
// For determinism, break ties using this doc's smallest hash_index.
let smallest_hash_index = if n.weight == 1 {
HashIndex(id)
} else {
dups[id as usize]
};
(r, smallest_hash_index)
});
log::info!("Graph transpose done");
TransposedMemDepGraph { docs, dups }
} |
Rust | hhvm/hphp/hack/src/depgraph/depgraph_compress/write.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::fs::File;
use std::fs::OpenOptions;
use std::io::BufWriter;
use std::io::Seek;
use std::io::SeekFrom;
use std::io::Write;
use std::ops::Range;
use std::path::Path;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use depgraph_reader::compress::CompressedHeader;
use depgraph_writer::HashIndex;
use depgraph_writer::HashListIndex;
use depgraph_writer::MemDepGraph;
use newtype::IdVec;
use rayon::prelude::*;
use zstd::stream::write::Encoder;
use crate::WriteConfig;
/// Convert an edge list into its final, RLE+delta-encoded form.
///
/// See `depgraph_reader/compress.rs` for a file format description.
fn serialize_edge_list(
edge_list: &[HashIndex],
most_rle_blocks: &AtomicUsize,
total_rle_blocks: &AtomicUsize,
config: &WriteConfig,
) -> Box<[u8]> {
// Rough capacity guess, probably wrong.
let mut rle_blocks: Vec<u8> = Vec::with_capacity(1 + edge_list.len() * 10);
let mut num_rle_blocks = 0usize;
let mut prev = 0u32;
let mut flush_range = |b: Range<u32>| {
match config {
WriteConfig::Simple => {
rle_blocks.extend(b.start.to_ne_bytes());
rle_blocks.extend(b.end.to_ne_bytes());
}
WriteConfig::Zstd { .. } => {
// Use our own delta-coding.
let delta = b.start - prev;
let has_repeat_count = b.len() > 1;
let delta_and_has_repeat_count: u64 =
(delta as u64) << 1 | (has_repeat_count as u64);
rle_blocks.extend(vint64::encode(delta_and_has_repeat_count).as_ref());
if has_repeat_count {
rle_blocks.extend(vint64::encode((b.len() - 2) as u64).as_ref());
}
// There must be a gap, or it would have been a larger block, so
// we can move `prev` one past `b.end`.
prev = b.end + 1;
}
}
num_rle_blocks += 1;
};
// Convert the edge_list into a sequence of Ranges, which we RLE-encode.
let mut edge_list_iter = edge_list.iter().copied();
if let Some(HashIndex(first_edge)) = edge_list_iter.next() {
// Establish a starting point.
let mut cur = Range {
start: first_edge,
end: first_edge + 1,
};
// Process the remaining edges, hopefully combining them into RLE blocks.
for HashIndex(edge) in edge_list_iter {
if cur.end == edge {
// They are contiguous, so expand the current block.
cur.end += 1;
} else {
// Noncontiguous; flush the previous block and start a new one.
// These had better be sorted.
assert!(edge > cur.end);
flush_range(cur);
cur = Range {
start: edge,
end: edge + 1,
}
}
}
// Flush the final range.
flush_range(cur);
}
// Update stats.
total_rle_blocks.fetch_add(num_rle_blocks, Ordering::Relaxed);
most_rle_blocks.fetch_max(num_rle_blocks, Ordering::Relaxed);
// Prepend each edge list with the number of bytes, rather than the number of blocks. This
// is a bit larger, but makes decompression easier since it can immediately tell how many
// bytes each record needs.
let encoded_len = vint64::encode(rle_blocks.len() as u64);
let encoded_num_rle_blocks = (num_rle_blocks as u32).to_ne_bytes();
let encoded_len = match config {
WriteConfig::Simple => &encoded_num_rle_blocks,
WriteConfig::Zstd { .. } => encoded_len.as_ref(),
};
// Create a final Vec of exactly the right size, so no additional copying
// happens when we convert it to a Boxed slice.
let mut result = Vec::with_capacity(encoded_len.len() + rle_blocks.len());
result.extend(encoded_len);
result.extend(rle_blocks);
result.into()
}
fn write_hashes(file: &mut File, m: &MemDepGraph) -> std::io::Result<u64> {
let mut out = BufWriter::new(file);
// Leave padding for the CompressedHeader we'll fill in later.
for _ in 0..std::mem::size_of::<CompressedHeader>() {
out.write_all(&[0])?;
}
out.write_all(bytemuck::cast_slice(&m.hashes))?;
// Let `into_inner()` flush here, so we notice disk full etc.
let file = out.into_inner()?;
// Return the number of bytes written (including the header padding).
file.stream_position()
}
/// This writes out the edge list index, which has one entry for every hash.
/// Entries are variable-length encoded; they aren't intended to be random-accessed
/// in .zhhdg files, only decompressed.
///
/// There are two possibilities for each entry:
///
/// - 0: This is the first time a particular edge list has appeared.
/// A new entry has been appended to the `edge_lists` section to match.
/// - Other (N >= 1): This is the same as some previous edge list. Look backwards N
/// slots to find the previous edge list with the same contents.
///
/// This is related to move-to-front coding, the idea is to use small numbers for consecutive
/// or nearby repeats, which are common due to how `apply_node_renumbering` assigns values.
fn write_compressed_edge_map<W: Write>(
out: &mut W,
m: &MemDepGraph,
) -> std::io::Result<Vec<HashListIndex>> {
let mut ser_order = Vec::with_capacity(m.edge_lists.len());
const NOT_EMITTED: u32 = u32::MAX;
let mut most_recent: IdVec<HashListIndex, u32> =
IdVec::new_from_vec(vec![NOT_EMITTED; m.edge_lists.len()]);
for (i, &edge_list_index) in m.edge_list_indices.iter().enumerate() {
let slot = &mut most_recent[edge_list_index];
let latest = std::mem::replace(slot, i as u32);
let delta = if latest == NOT_EMITTED {
ser_order.push(edge_list_index);
// A delta of 0 means we are introducing a new edge list here.
0
} else {
// This is a back-reference to the last time we emitted this `HashListIndex`.
i as u32 - latest
};
out.write_all(vint64::encode(delta as u64).as_ref())?;
}
Ok(ser_order)
}
/// For experimentation we don't compress here, just write out simple u32 values.
fn write_simple_edge_list_index(file: &mut File, m: &MemDepGraph) -> std::io::Result<()> {
file.write_all(bytemuck::cast_slice(&m.edge_list_indices.vec[..]))
}
/// Append both the edge lists index and the edge lists themselves to `path`.
///
/// Returns the byte size of the edges_index section.
fn write_edges(
path: &Path,
edges_index_start: u64,
m: &MemDepGraph,
config: &WriteConfig,
) -> std::io::Result<u64> {
// Open the file again, so we can write to a different section in parallel.
let mut file = OpenOptions::new().write(true).open(path)?;
file.seek(SeekFrom::Start(edges_index_start))?;
let (edge_index_status, ser_edge_lists) = rayon::join(
|| -> std::io::Result<(Option<Vec<HashListIndex>>, File)> {
// Write out the edge list index section.
match config {
WriteConfig::Simple => {
write_simple_edge_list_index(&mut file, m)?;
Ok((None, file))
}
WriteConfig::Zstd { compression_level } => {
let mut z = Encoder::new(file, *compression_level)?;
z.multithread((std::cmp::max(rayon::current_num_threads(), 2) - 1) as u32)?;
let ser_order = write_compressed_edge_map(&mut z, m)?;
let file = z.finish()?;
Ok((Some(ser_order), file))
}
}
},
|| {
// Compute the serialized edge_lists. We'll write them out below, after
// the file writes above have finished.
// Gather some statistics for logging.
let most_rle_blocks = AtomicUsize::new(0);
let total_rle_blocks = AtomicUsize::new(0);
let total_size = AtomicUsize::new(0);
let ser_edge_lists_vec: Vec<Box<[u8]>> = m
.edge_lists
.par_iter()
.with_min_len(1)
.with_max_len(1)
.map(|edge_list| {
let edges =
serialize_edge_list(edge_list, &most_rle_blocks, &total_rle_blocks, config);
total_size.fetch_add(edges.len(), Ordering::Relaxed);
edges
})
.collect();
let ser_edge_lists: IdVec<HashListIndex, Box<[u8]>> =
IdVec::new_from_vec(ser_edge_lists_vec);
log::info!(
"Uncompressed edge list size {}, RLE blocks {}, most RLE blocks in an edge list is {}",
total_size.into_inner(),
total_rle_blocks.into_inner(),
most_rle_blocks.into_inner()
);
ser_edge_lists
},
);
let (ser_order, mut file) = edge_index_status?;
// Remember where the edge index ends, and therefore the edge list starts.
let edges_index_end = file.stream_position()?;
let edge_map_size = edges_index_end - edges_index_start;
if log::log_enabled!(log::Level::Info) {
log::info!("Compressed edge list indices take {} bytes", edge_map_size);
if ser_edge_lists.len() > 10 {
// Find the biggest 10. No need to sort the entire vec.
let mut sizes: Vec<_> = ser_edge_lists.iter().map(|s| s.len()).collect();
let (_, _, biggest_10) = sizes.select_nth_unstable(ser_edge_lists.len() - 11);
biggest_10.sort_by_key(|&i| std::cmp::Reverse(i));
log::info!(
"Top serialized edge_list byte sizes are {}",
biggest_10
.iter()
.map(|&x| format!("{}", x))
.collect::<Vec<_>>()
.join(", ")
);
}
}
// Write a separate compressed section for the edge lists.
let mut file = match config {
WriteConfig::Simple => {
let mut buf = BufWriter::new(file);
for ser_edge_list in ser_edge_lists.iter() {
buf.write_all(ser_edge_list)?;
}
buf.into_inner()?
}
WriteConfig::Zstd { compression_level } => {
let mut z = Encoder::new(file, *compression_level)?;
z.multithread((std::cmp::max(rayon::current_num_threads(), 2) - 1) as u32)?;
for ser_index in ser_order.unwrap() {
z.write_all(&ser_edge_lists[ser_index])?;
}
z.finish()?
}
};
let edge_lists_end = file.stream_position()?;
drop(file);
log::info!(
"Compressed edge lists take {} bytes",
edge_lists_end - edges_index_end
);
Ok(edge_map_size)
}
fn write_header(
file: &mut File,
num_deps: u64,
edge_map_size: u64,
config: &WriteConfig,
) -> std::io::Result<()> {
let magic = match config {
WriteConfig::Simple => {
// Use a nonstandard magic number so we know the file isn't readable.
*b"HHZS"
}
WriteConfig::Zstd { .. } => CompressedHeader::MAGIC,
};
let header = CompressedHeader {
magic,
version: CompressedHeader::LATEST_VERSION,
num_deps,
edge_map_size,
};
// Write the header at the very beginning of the file.
file.rewind()?;
file.write_all(bytemuck::bytes_of(&header))
}
/// Write a `MemDepGraph` to the given path as a `.zhhdg` file.
///
/// See `depgraph_reader/compress.rs` for a file format description.
pub(crate) fn write_to_disk(
path: &Path,
m: MemDepGraph,
config: &WriteConfig,
) -> std::io::Result<()> {
log::info!("Writing file contents");
let header_size = std::mem::size_of::<CompressedHeader>() as u64;
let num_deps = m.hashes.len() as u64;
let edge_data_start = header_size + num_deps * 8;
// Create the file, and grow it now so we can write deps and append edge_list data in parallel.
let mut file = std::fs::File::create(path)?;
file.set_len(edge_data_start)?;
// Write the dep table (which is most of the file) and the edge data (which needs compression)
// in parallel, to different sections of the file.
let (num_dep_table_bytes_written, edge_lists_write_result) = rayon::join(
|| write_hashes(&mut file, &m),
|| write_edges(path, edge_data_start, &m, config),
);
// Sanity check that the dep section contains exactly the expected number of bytes.
let num_dep_table_bytes_written = num_dep_table_bytes_written?;
assert_eq!(num_dep_table_bytes_written, edge_data_start);
let edge_map_size = edge_lists_write_result?;
// Now that we know various file parameters, go back and write the header at the file start.
write_header(&mut file, num_deps, edge_map_size, config)?;
let file_size = file.metadata().unwrap().len();
drop(file);
log::info!(
"Writing depgraph file done. File size is {} bytes",
file_size
);
Ok(())
} |
TOML | hhvm/hphp/hack/src/depgraph/depgraph_compress/cargo/depgraph_compress/Cargo.toml | # @generated by autocargo
[package]
name = "depgraph_compress"
version = "0.0.0"
edition = "2021"
[lib]
path = "../../lib.rs"
crate-type = ["lib", "staticlib"]
[dependencies]
balanced_partition = { version = "0.0.0", path = "../../../balanced_partition/cargo/balanced_partition" }
bytemuck = { version = "1.12.3", features = ["derive"] }
depgraph_reader = { version = "0.0.0", path = "../../../cargo/depgraph_reader" }
depgraph_writer = { version = "0.0.0", path = "../../../cargo/depgraph_writer" }
hash = { version = "0.0.0", path = "../../../../utils/hash" }
log = { version = "0.4.17", features = ["kv_unstable", "kv_unstable_std"] }
newtype = { version = "0.0.0", path = "../../../../utils/newtype" }
rayon = "1.2"
smallvec = { version = "1.6.1", features = ["serde", "specialization", "union"] }
vint64 = "1.0.1"
zstd = { version = "0.11.2+zstd.1.5.2", features = ["experimental", "zstdmt"] } |
Rust | hhvm/hphp/hack/src/depgraph/depgraph_decompress/main.rs | use std::path::PathBuf;
use clap::Parser;
#[derive(Parser)]
struct Options {
input: PathBuf,
#[clap(short)]
output: PathBuf,
}
fn main() -> std::io::Result<()> {
let options = Options::parse();
decompress::decompress(&options.input, &options.output)
} |
TOML | hhvm/hphp/hack/src/depgraph/depgraph_decompress/cargo/depgraph_decompress/Cargo.toml | # @generated by autocargo
[package]
name = "depgraph_decompress"
version = "0.0.0"
edition = "2021"
[[bin]]
name = "depgraph_decompress"
path = "../../main.rs"
[dependencies]
clap = { version = "3.2.25", features = ["derive", "env", "regex", "unicode", "wrap_help"] }
decompress = { version = "0.0.0", path = "../../decompress" } |
TOML | hhvm/hphp/hack/src/depgraph/depgraph_decompress/decompress/Cargo.toml | # @generated by autocargo
[package]
name = "decompress"
version = "0.0.0"
edition = "2021"
[lib]
path = "lib.rs"
[dependencies]
bytemuck = { version = "1.12.3", features = ["derive"] }
depgraph_reader = { version = "0.0.0", path = "../../cargo/depgraph_reader" }
memmap2 = "0.5.10"
rayon = "1.2"
vint64 = "1.0.1"
zstd = { version = "0.11.2+zstd.1.5.2", features = ["experimental", "zstdmt"] } |
Rust | hhvm/hphp/hack/src/depgraph/depgraph_decompress/decompress/lib.rs | use std::fs::File;
use std::fs::OpenOptions;
use std::io::BufWriter;
use std::io::ErrorKind;
use std::io::Read;
use std::io::Write;
use std::ops::Deref;
use std::os::unix::fs::FileExt;
use std::path::Path;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use depgraph_reader::compress::ByteApproximatedLen;
use depgraph_reader::compress::CompressedHeader;
use depgraph_reader::compress::RleBlock;
use depgraph_reader::compress::UncompressedHeader;
use depgraph_reader::Dep;
use rayon::prelude::*;
const IN_HEADER_SIZE: usize = std::mem::size_of::<CompressedHeader>();
/// Write the file header to the start of the output file.
///
/// See depgraph/depgraph_reader/compress.rs for the file format.
fn write_header(
file: &File,
num_deps: usize,
adjacency_list_alignment_shift: u8,
) -> std::io::Result<()> {
let final_header = UncompressedHeader {
magic: UncompressedHeader::MAGIC,
version: UncompressedHeader::LATEST_VERSION,
num_deps: num_deps as u64,
adjacency_list_alignment_shift,
_alignment_padding: Default::default(),
};
file.write_all_at(bytemuck::bytes_of(&final_header), 0)
}
/// Write the `deps` and `deps_sort_order` sections.
///
/// See depgraph/depgraph_reader/compress.rs for the file format.
fn write_deps(file: &File, deps_bytes: &[u8]) -> std::io::Result<()> {
const OUT_HEADER_SIZE: u64 = std::mem::size_of::<UncompressedHeader>() as u64;
let (deps_write_status, sort_order_write_status) = rayon::join(
|| file.write_all_at(deps_bytes, OUT_HEADER_SIZE),
|| {
// Create a sorted map of the Dep order, so we can binary search straight from the mmap file.
let in_deps: &[Dep] = bytemuck::cast_slice(deps_bytes);
let mut deps_sort_order: Vec<u32> = (0..in_deps.len() as u32).collect();
deps_sort_order.par_sort_unstable_by_key(|&i| in_deps[i as usize]);
file.write_all_at(
bytemuck::cast_slice(&deps_sort_order),
OUT_HEADER_SIZE + deps_bytes.len() as u64,
)
},
);
deps_write_status?;
sort_order_write_status
}
/// Read in one variable-length integer from a stream.
fn read_vint64<R: Read>(mut r: R) -> std::io::Result<u64> {
let mut buf = [0u8; 9];
r.read_exact(&mut buf[..1])?;
let len = vint64::decoded_len(buf[0]);
r.read_exact(&mut buf[1..len])?;
let mut rbuf = &buf[..];
vint64::decode(&mut rbuf).map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("Corrupt vint64: {}", e),
)
})
}
/// Convert a list of edges from the format used in `.zhhdg` files to the larger but random-access
/// format used in `.hhdg` files.
///
/// See `serialize_edge_list` for the writer.
fn decode_one_edge_list(mut b: &[u8]) -> Box<[u8]> {
// Rough guess.
let mut result = Vec::with_capacity(b.len() / 8);
// Track the sum of sizes of all RLE blocks.
let mut total_rle_decoded_len = 0u32;
let mut num_rle_blocks = 0usize;
let mut delta_base = 0u32;
while !b.is_empty() {
// Decode the (start, len) pair.
let delta_and_has_repeat_count = vint64::decode(&mut b).unwrap();
let has_repeat_count = (delta_and_has_repeat_count & 1) != 0;
let delta = (delta_and_has_repeat_count >> 1) as u32;
let mut len = if has_repeat_count {
2 + vint64::decode(&mut b).unwrap() as u32
} else {
1
};
let mut start = delta_base + delta;
delta_base = start + len + 1;
total_rle_decoded_len += len;
// Turn that into RleBlocks (usually just one, but each one's size is limited, so maybe more).
while len != 0 {
let encoded_len = ByteApproximatedLen::encode(len);
let r = RleBlock { start, encoded_len };
result.extend(bytemuck::bytes_of(&r));
num_rle_blocks += 1;
let num_consumed = encoded_len.decode();
start += num_consumed;
len -= num_consumed;
}
}
// Prepend the RLE block count and the total length count.
let old_len = result.len();
result.extend(vint64::encode(num_rle_blocks as u64).as_ref());
result.extend(vint64::encode(total_rle_decoded_len as u64).as_ref());
result.rotate_left(old_len);
result.into()
}
/// Decompress and parse the edge lists.
///
/// There are two levels of compression to undo here:
/// - Everything is compressed with zstd, so we need to decompress that first.
/// - Each edge array is delta-coded by the `.zhhdg` file writer. We need to
/// convert each one into the more verbose format that we use in `.hhdg` files.
fn decompress_edge_lists(in_edge_lists: &[u8]) -> std::io::Result<(Vec<Box<[u8]>>, usize)> {
let mut z = zstd::stream::read::Decoder::with_buffer(in_edge_lists)?;
// First zstd-uncompress the raw edge list bytes. This is an inherently serial process.
let mut edge_lists: Vec<Box<[u8]>> = vec![];
loop {
let num_bytes = match read_vint64(&mut z) {
Ok(n) => n,
Err(e) if e.kind() == ErrorKind::UnexpectedEof => break,
Err(e) => return Err(e),
};
let mut v = vec![0; num_bytes as usize];
z.read_exact(&mut v).unwrap();
edge_lists.push(v.into());
}
drop(z);
// In parallel, convert each edge list from .zhhdg to .hhdg format.
let total_bytes = AtomicUsize::new(0);
edge_lists.par_iter_mut().for_each(|v| {
let d = decode_one_edge_list(v.as_ref());
total_bytes.fetch_add(d.len(), Ordering::Relaxed);
*v = d;
});
Ok((edge_lists, total_bytes.into_inner()))
}
/// Guarantee that all edge list section offsets will be able to fit in 32 bits,
/// by choosing how many low bits of their sections offsets are required to be zero.
/// Those zero bits can be shifted away in our section offset table.
///
/// The graph would have to be exotically large for this to return anything
/// other than zero, but it's possible.
fn compute_alignment_shift(num_edge_lists: usize, total_bytes: usize) -> u8 {
// Increase the guaranteed alignment of each serialized edge list so that
// scaled offsets fit in 32 bits. We expect to typically have no shifting at
// all, or maybe 1 bit someday (even-byte alignment), so this won't be very
// wasteful.
//
// For example if we had 7GB of edge lists, and we wanted to be able to
// reference each one with a 32-bit offset, we would align each edge list on
// an even byte boundary and use an alignment_shift of 1, allowing 32 bits to
// span 8GB of storage. In other words, (unscaled_section_offset << 1) would
// be the true section offset.
//
// The math here is slightly subtle -- increasing alignment also increases the
// worst-case file size due to the new alignment padding, which in turn could
// require increased alignment. For example, if our section were 8 GiB minus 1 byte,
// and we decided to increase alignment to make every offset even so we could
// address 8GiB, that alignment padding may push us over 8GiB, so we need even
// more alignment. We conservatively take that into account in our calculations by
// assuming the worst-case padding is introduced for every edge list.
let mut alignment_shift = 0;
while (total_bytes + (num_edge_lists * ((1 << alignment_shift) - 1))) >> alignment_shift
> std::u32::MAX as usize
{
alignment_shift += 1;
}
alignment_shift
}
/// Write the edge map, which maps a `HashIndex` to the section offset for its
/// list of edges.
fn write_edge_map(
file: &File,
num_deps: usize,
in_edges_index: &[u8],
edges_index_file_offset: u64,
edge_lists: &[Box<[u8]>],
alignment_shift: u8,
) -> std::io::Result<()> {
// Start decompressing the edge indices.
let mut z = zstd::stream::read::Decoder::with_buffer(in_edges_index)?;
let mut buf = [0u8; 9];
// Byte offset in the edge_list section for the next edge list.
let mut out_offset = 0u64;
let mut result: Vec<u32> = Vec::with_capacity(num_deps);
let mut edge_list_index = 0;
for i in 0..num_deps {
// Read in the value that tells us whether this is a new edge list or a reference
// to an earlier one. See `write_compressed_edge_map`.
let n = read_vint64(&mut z)?;
let val = if n != 0 {
// This is a back reference, copy whatever the past entry was.
result[i - n as usize]
} else {
// A new edge list; the corresponding edge_list section of the file will have
// a new edge list appended to match.
// Skip over any alignment padding to find the next serialized edge list.
let pad = out_offset.wrapping_neg() & ((1 << alignment_shift) - 1);
out_offset += pad;
// Drop always-zero alignment bits, so the offset still fits in 32 bits.
let shifted_offset: u32 = (out_offset >> alignment_shift).try_into().unwrap();
// Advance the file offset to take into account this edge list's contents.
out_offset += edge_lists[edge_list_index].len() as u64;
edge_list_index += 1;
shifted_offset
};
result.push(val);
}
// We should be exactly at EOF.
if z.read(&mut buf[..1])? != 0 {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Too many edge list index bytes",
));
}
file.write_all_at(bytemuck::cast_slice(&result), edges_index_file_offset)
}
/// Append the edge lists to the file at `path`.
fn write_edge_lists(
path: &Path,
edge_lists: &[Box<[u8]>],
alignment_shift: u8,
) -> std::io::Result<()> {
let file = OpenOptions::new().append(true).open(path)?;
let mut buf = BufWriter::new(file);
// Current byte offset into the `edge_lists` section of the file.
let mut offset = 0u64;
for edges in edge_lists.iter() {
// Insert alignment padding. `write_edge_list_index` does equivalent
// math so it computes the section offsets chosen here.
while offset & ((1 << alignment_shift) - 1) != 0 {
buf.write_all(&[0])?;
offset += 1;
}
// It would be nice to use `write_all_vectored` and skip a layer
// of copying, but it's still a nightly-only API.
buf.write_all(edges)?;
offset += edges.len() as u64;
}
// Flush `buf` and check for errors.
buf.into_inner()?;
Ok(())
}
/// Write both the `edge_lists_index` and the `edge_list` file sections.
///
/// Returns the alignment shift it computed, which needs to be added to the output
/// file header so readers know how to interpret the `edge_lists_index` section.
fn write_edges(
file: &File,
path: &Path,
num_deps: usize,
edges_index_file_offset: u64,
in_edges_index: &[u8],
in_edge_lists: &[u8],
) -> std::io::Result<u8> {
// Decompress the edge list section, which we need to decompress the edge list index.
let (edge_lists, total_edge_list_size) = decompress_edge_lists(in_edge_lists)?;
// Figure out what kind of alignment padding we're going to need.
let alignment_shift = compute_alignment_shift(edge_lists.len(), total_edge_list_size);
let (edge_list_index_status, edge_list_status) = rayon::join(
|| {
write_edge_map(
file,
num_deps,
in_edges_index,
edges_index_file_offset,
&edge_lists,
alignment_shift,
)
},
|| write_edge_lists(path, &edge_lists, alignment_shift),
);
edge_list_index_status?;
edge_list_status?;
Ok(alignment_shift)
}
/// Decompress the `.zhhdg` file at `in_path`, writing it to the `.hhdg` at `out_path`.
pub fn decompress(in_path: &Path, out_path: &Path) -> std::io::Result<()> {
let in_file = File::open(in_path)?;
// Safety: we rely on the memmap library to provide safety.
let in_mmap = unsafe { memmap2::Mmap::map(&in_file) }?;
drop(in_file);
let in_bytes = in_mmap.deref();
if in_bytes.len() < IN_HEADER_SIZE {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Missing file header",
));
}
let (header_bytes, in_rest) = in_bytes.split_at(std::mem::size_of::<CompressedHeader>());
let header: &CompressedHeader = bytemuck::from_bytes(header_bytes);
if header.magic != CompressedHeader::MAGIC {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Did not find expected file magic number",
));
}
let expected_version = CompressedHeader::LATEST_VERSION;
if header.version != expected_version {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!(
"Expected file version number {expected_version}, got {}",
header.version
),
));
}
let num_deps = header.num_deps as usize;
let deps_size = num_deps * std::mem::size_of::<Dep>();
let (in_deps_bytes, in_rest) = in_rest.split_at(deps_size);
let (in_edges_index, in_edge_lists) = in_rest.split_at(header.edge_map_size as usize);
// The output file is divided into 5 sections.
let out_header_size = std::mem::size_of::<UncompressedHeader>();
let deps_order_size = num_deps * 4;
let edge_map_size = num_deps * 4;
// Create the output file. This size does not include the edge lists which we'll append;
// we don't know their sizes yet.
let edges_index_offset = out_header_size + deps_size + deps_order_size;
let out_file_size = edges_index_offset + edge_map_size;
let out_file = File::create(out_path)?;
out_file.set_len(out_file_size as u64)?;
// Write out the deps and edges sections in parallel.
let (deps_status, edges_status) = rayon::join(
|| write_deps(&out_file, in_deps_bytes),
|| {
write_edges(
&out_file,
out_path,
num_deps,
edges_index_offset as u64,
in_edges_index,
in_edge_lists,
)
},
);
deps_status?;
let adjacency_list_alignment_shift = edges_status?;
write_header(&out_file, num_deps, adjacency_list_alignment_shift)
} |
TOML | hhvm/hphp/hack/src/depgraph/depgraph_decompress/ffi/Cargo.toml | # @generated by autocargo
[package]
name = "depgraph_decompress_rust_ffi"
version = "0.0.0"
edition = "2021"
[lib]
path = "depgraph_decompress_rust_ffi.rs"
[dependencies]
anyhow = "1.0.71"
decompress = { version = "0.0.0", path = "../decompress" }
ocamlrep_ocamlpool = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" } |
OCaml | hhvm/hphp/hack/src/depgraph/depgraph_decompress/ffi/depgraph_decompress_ffi.ml | (*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
external decompress : compressed_dg_path:string -> (string, string) result
= "depgraph_decompress_ffi" |
Rust | hhvm/hphp/hack/src/depgraph/depgraph_decompress/ffi/depgraph_decompress_rust_ffi.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::path::Path;
use anyhow::Result;
use ocamlrep_ocamlpool::ocaml_ffi;
const DECOMPRESSED_DG_FILE_NAME: &str = "hh_mini_saved_state_64bit_dep_graph_decompressed.hhdg";
ocaml_ffi! {
fn depgraph_decompress_ffi(compressed_dg_path: String) -> Result<String, String> {
let compressed_dg_path = Path::new(&compressed_dg_path);
let mut decompressed_dg_path = compressed_dg_path.to_path_buf();
decompressed_dg_path.set_file_name(DECOMPRESSED_DG_FILE_NAME);
if decompressed_dg_path.exists() {
return Ok(decompressed_dg_path.display().to_string());
}
decompress::decompress(compressed_dg_path, &decompressed_dg_path)
.map_err(|e| format!("{e:?}"))?;
let decompressed_dg_path = decompressed_dg_path.as_path().to_str()
.ok_or_else(|| "Failed to convert path to string".to_string())?
.to_string();
Ok(decompressed_dg_path)
}
} |
Rust | hhvm/hphp/hack/src/depgraph/depgraph_reader/byteutils.rs | use std::slice::SliceIndex;
#[inline(always)]
pub fn subslice<'a, I, T>(
bytes: &'a [T],
index: I,
descr: &str,
) -> Result<&'a <I as SliceIndex<[T]>>::Output, String>
where
I: SliceIndex<[T]>,
{
bytes
.get(index)
.ok_or_else(|| format!("not enough bytes while reading {}", descr))
}
/// Align a byte array to an u32-array.
///
/// Returns `None` if the byte array was not properly aligned.
///
/// Panics if `slice::align_to` behavior is unexpected.
#[inline(always)]
pub fn as_u32_slice(bytes: &[u8]) -> Option<&[u32]> {
// Safety: Safe because:
//
// 1. The u32 has no invalid states
// 2. The return type is well-defined
// 3. We don't transmute to a mutable type
// 4. We don't produce unbounded lifetimes
// 5. We explicitly check the behavior of `align_to`
let (prefix, slice, suffix) = unsafe { bytes.align_to::<u32>() };
if !prefix.is_empty() {
return None;
}
if suffix.len() >= std::mem::size_of::<u32>() {
panic!("suffix too long");
}
Some(slice)
}
/// Align a byte array to an u64-array.
///
/// Returns `None` if the byte array was not properly aligned.
///
/// Panics if `slice::align_to` behavior is unexpected.
#[inline(always)]
pub fn as_u64_slice(bytes: &[u8]) -> Option<&[u64]> {
// Safety: Safe because:
//
// 1. The u32 has no invalid states
// 2. The return type is well-defined
// 3. We don't transmute to a mutable type
// 4. We don't produce unbounded lifetimes
// 5. We explicitly check the behavior of `align_to`
let (prefix, slice, suffix) = unsafe { bytes.align_to::<u64>() };
if !prefix.is_empty() {
return None;
}
if suffix.len() >= std::mem::size_of::<u64>() {
panic!("suffix too long");
}
Some(slice)
}
#[inline(always)]
pub fn read_u32_ne(bytes: &[u8]) -> u32 {
u32::from_ne_bytes(bytes[0..4].try_into().unwrap())
}
#[inline(always)]
pub fn read_u64_ne(bytes: &[u8]) -> u64 {
u64::from_ne_bytes(bytes[0..8].try_into().unwrap())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_read_u32_ne() {
let v: u32 = 0x45831546;
let b: [u8; 4] = v.to_ne_bytes();
assert_eq!(v, read_u32_ne(&b));
}
#[test]
fn test_read_u64_ne() {
let v: u64 = 0x45831546;
let b: [u8; 8] = v.to_ne_bytes();
assert_eq!(v, read_u64_ne(&b));
}
} |
Rust | hhvm/hphp/hack/src/depgraph/depgraph_reader/compress.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use static_assertions::const_assert_eq;
// Compressed file (.zhhdg) file layout:
//
// ```txt
// CompressedHeader
// deps: [u64; num_deps]
// edge_map: [varint; num_deps]
// One varint-encoded integer per num_deps, indicating which edge array is used.
// - Edge arrays appear in the adjacency_list section in order of first mention in the edge map.
// - A value of 0 in the edge_map means this node is the first user of
// an edge array, so that edge list is the next one in the adjacency list
// section, and the decompressor implicitly knows which one that is.
// - Any other value N means "this is a repeat use of an edge array, look
// back N nodes in the edge map to find the most recent use of the same array."
// adjacency_list: [u8; ...]
// Each node's array of neighboring nodes has:
// - varint length
// - array of RLE blocks, where each RLE block is:
// - varint+delta coded offset from 1 past the end of the previous block, or from 0 for first block,
// except this number is doubled and the low bit set to indicate whether a repeat count follows.
// - if the low bit above is set, then a varint encoded length minus two. Else length is implicitly one.
// ```
//
// Uncompressed file (.hhdg) file layout:
//
// ```txt
// UncompressedHeader
// deps: [u64; num_deps]
// deps_sort_order: [u32; num_deps]
// edge_map: [u32; num_deps]
// adjacency_list: [struct { length: varint, rle_blocks: [RleBlock; length] }; ...]
// ```
/// Header in a .zhhdg file.
///
/// This is repr(C) so we can use its raw representation directly
/// from a memory-mapped file. Use explicit padding to avoid
/// "struct holes".
#[repr(C)]
#[derive(Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
pub struct CompressedHeader {
pub magic: [u8; 4], // HHDZ
pub version: u32,
pub num_deps: u64,
// Number of bytes used for the edge_map.
pub edge_map_size: u64,
}
impl CompressedHeader {
/// Magic number at the start of the file.
pub const MAGIC: [u8; 4] = *b"HHDZ";
/// Latest version number, in the file header.
pub const LATEST_VERSION: u32 = 1;
}
const_assert_eq!(std::mem::size_of::<CompressedHeader>() % 8, 0);
/// Header in a .hhdg file.
///
/// This is repr(C) so we can use its raw representation directly
/// from a memory-mapped file. Use explicit padding to avoid
/// "struct holes".
#[repr(C)]
#[derive(Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
pub struct UncompressedHeader {
pub magic: [u8; 4],
pub version: u32,
pub num_deps: u64,
/// Each edge list is stored at an offset in its section such that this many low bits
/// of the offset are zero. This allows section offsets to be stored in 32 bits even if
/// there are more than 4GB of edge lists.
pub adjacency_list_alignment_shift: u8,
pub _alignment_padding: [u8; 7],
}
impl UncompressedHeader {
/// Magic number at the start of the file.
pub const MAGIC: [u8; 4] = *b"HHDG";
/// Latest version number, in the file header.
pub const LATEST_VERSION: u32 = 1;
}
const_assert_eq!(std::mem::size_of::<UncompressedHeader>() % 8, 0);
/// This is a compact approximation for a u32 stored in a u8.
///
/// Any input <= 0x7f is represented precisely, but larger values are
/// approximated, (poorly, for values >= 2**27, but this is optimized for
/// smaller values). The approximation is never numerically larger than the
/// input.
///
/// This byte holds either a 7-bit value, or a 5-bit value that's left
/// shifted. Specifically, it's capable of representing any of the following
/// u32 values:
///
/// ```
/// 0000000000000000000000000xxxxxxx
/// 00000000000000000000xxxxx0000000
/// 000000000000000xxxxx000000000000
/// 0000000000xxxxx00000000000000000
/// 00000xxxxx0000000000000000000000
/// ```
///
/// Large values will often need to be represented as the sum of several encoded bytes.
///
/// The purpose of this function is to encode RLE block lengths using only a
/// single byte for the size (for compactness, because 0x7f is almost always
/// enough). We can't use a variable-length integer encoding because we need
/// a fixed size, so we can store RLE blocks in an array and binary search
/// them. When an RLE block size is so large we can't represent it exactly
/// as a `ByteApproximatedLen`, we create multiple consecutive RLE blocks
/// whose decoded sizes add up to the desired value; thus, the set of those
/// RLE blocks exactly covers the desired range.
///
/// Needing multiple blocks is not common so the overhead in practice is small.
#[derive(Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(transparent)]
pub struct ByteApproximatedLen(u8);
impl ByteApproximatedLen {
/// Return the largest encodable integer <= n.
pub fn encode(n: u32) -> Self {
let v = if n <= 0x7f {
n as u8
} else {
let max_representable: u32 = 0x1f << Self::exp_to_shift(0x3);
let n = std::cmp::min(n, max_representable);
let exp = (4 - n.leading_zeros() / 5) as u8;
let mantissa = (n >> Self::exp_to_shift(exp)) & 0x1f;
0x80 | (exp << 5) | mantissa as u8
};
Self(v)
}
/// Convert this value back to the `u32` it represents.
#[inline]
pub fn decode(self) -> u32 {
let n = self.0;
if n <= 0x7f {
n as u32
} else {
let exp = (n >> 5) & 0x3;
let mantissa = n & 0x1f;
(mantissa as u32) << Self::exp_to_shift(exp)
}
}
/// Converts our two-bit exponent to a shift count.
const fn exp_to_shift(exp: u8) -> u8 {
// Shift past the low 7 bits, reserved for the <= 0x7f case.
7 + 5 * exp
}
}
/// A run-length-encoded consecutive range of integers.
///
/// This is packed so we can store them in memory-mapped files as arrays of 5-byte values.
#[repr(C, packed(1))]
#[derive(Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
pub struct RleBlock {
pub start: u32,
pub encoded_len: ByteApproximatedLen,
}
impl RleBlock {
#[inline]
pub fn len(&self) -> u32 {
self.encoded_len.decode()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_byte_approximated_len() {
// Encode all the values we can losslessly.
for i in 0..128 {
assert_eq!(i, ByteApproximatedLen::encode(i).decode());
}
for shift in 0..4 {
for bits in 0..32 {
let n = bits << (7 + shift * 5);
assert_eq!(n, ByteApproximatedLen::encode(n).decode());
}
}
// Clamp at the max.
assert_eq!(ByteApproximatedLen::encode(!0u32).decode(), 0x1f << 22);
// Decoded value should never be bigger than the input.
for shift in 0..32 {
let n = !0u32 >> shift;
assert!(ByteApproximatedLen::encode(n).decode() <= n);
}
}
} |
Rust | hhvm/hphp/hack/src/depgraph/depgraph_reader/reader.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
pub mod byteutils;
pub mod compress;
use std::fs::File;
use std::iter::FusedIterator;
use std::ops::Deref;
use std::ops::Range;
pub use dep::Dep;
use memmap2::Mmap;
use rayon::iter::Either;
use rayon::prelude::*;
use rpds::HashTrieSet;
use crate::compress::*;
/// An opaque token that identifies a hash list.
///
/// This can be used to see whether two Deps have the same HashList.
/// It can also be used to get the actual HashList.
#[derive(Clone, Copy, Hash, PartialEq, Eq)]
pub struct HashListId(u32);
pub trait DepGraphTrait {
/// Make sure the database is not corrupt.
///
/// If you got this far, the indexer and lookup table were
/// successfully initialized. This function checks whether
/// all hash lists can be properly read from disk.
fn validate_hash_lists(&self) -> Result<(), String>;
/// Query the hash list for a given hash.
///
/// Returns `None` if there is no hash list related to the hash.
///
/// # Panics
///
/// Panics if the file is corrupt. Use `validate_hash_lists` when
/// initializing the reader to avoid these panics.
fn hash_list_for(&self, hash: Dep) -> Option<HashList<'_>> {
self.hash_list_id_for_dep(hash)
.map(|id| self.hash_list_for_id(id))
}
fn hash_list_id_for_dep(&self, hash: Dep) -> Option<HashListId>;
fn hash_list_id_for_index(&self, index: u32) -> Option<HashListId>;
fn hash_list_for_id(&self, id: HashListId) -> HashList<'_>;
/// Query the hash list for a given hash index.
///
/// Returns `None` if there is no hash list related to the hash.
fn hash_list_for_index(&self, index: u32) -> Option<HashList<'_>> {
self.hash_list_id_for_index(index)
.map(|id| self.hash_list_for_id(id))
}
fn dependent_dependency_edge_exists(&self, dependent: Dep, dependency: Dep) -> bool;
fn contains(&self, dep: Dep) -> bool;
}
pub enum DepGraph {
New(NewDepGraph),
Old(OldDepGraph),
}
impl DepGraph {
/// Open the dependency graph, or return an error description.
pub fn from_mmap(mmap: Mmap) -> Result<DepGraph, String> {
let in_bytes: &[u8] = mmap.as_ref();
if in_bytes.len() >= std::mem::size_of::<UncompressedHeader>() {
if let Ok(maybe_header) = bytemuck::try_from_bytes::<UncompressedHeader>(
&in_bytes[..std::mem::size_of::<UncompressedHeader>()],
) {
// It's technically possible for the first four bytes of an old file
// to randomly be "HHDG", but the version won't look like a small number.
// By the time we get to a large version number we'll have deleted OldDepGraph.
if maybe_header.magic == UncompressedHeader::MAGIC && maybe_header.version < 100 {
return Ok(DepGraph::New(NewDepGraph::from_mmap(mmap)?));
}
}
}
Ok(DepGraph::Old(OldDepGraph::from_mmap(mmap)?))
}
/// Create a dependency graph opener given an open file handle.
///
/// The file handle can be safely closed afterwards.
pub fn from_file(file: &std::fs::File) -> std::io::Result<Self> {
// Safety: we rely on the memmap library to provide safety.
let mmap = unsafe { Mmap::map(file) }?;
Self::from_mmap(mmap).map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
}
/// Create a dependency graph opener given a file path.
pub fn from_path<P: AsRef<std::path::Path>>(path: P) -> std::io::Result<Self> {
let f = File::open(path)?;
Self::from_file(&f)
}
/// Return an iterator over all hashes in a hash list.
pub fn hash_list_hashes<'a>(
&'a self,
hash_list: HashList<'a>,
) -> impl Iterator<Item = Dep> + 'a {
match (self, hash_list) {
(DepGraph::New(dg), HashList::New(h)) => Either::Left(dg.hash_list_hashes(h)),
(DepGraph::Old(dg), HashList::Old(h)) => Either::Right(dg.hash_list_hashes(h)),
_ => panic!("HashList type mismatch!"),
}
}
/// All unique dependency hashes in the graph.
pub fn all_hashes(&self) -> impl DoubleEndedIterator<Item = Dep> + ExactSizeIterator + '_ {
match self {
DepGraph::New(dg) => Either::Left(dg.all_hashes()),
DepGraph::Old(dg) => Either::Right(dg.all_hashes()),
}
}
/// All unique dependency hashes in the graph.
pub fn par_all_hashes(&self) -> impl IndexedParallelIterator<Item = Dep> + '_ {
match self {
DepGraph::New(dg) => Either::Left(dg.par_all_hashes()),
DepGraph::Old(dg) => Either::Right(dg.par_all_hashes()),
}
}
/// Add the direct typing dependents for one dependency (i.e. the fanout of
/// that one dependency).
pub fn add_typing_deps_for_dep(&self, acc: &mut HashTrieSet<Dep>, dep: Dep) {
if let Some(dept_hash_list) = self.hash_list_for(dep) {
for dept in self.hash_list_hashes(dept_hash_list) {
acc.insert_mut(dept);
}
}
}
/// Query the direct typing dependents for the given set of dependencies.
pub fn query_typing_deps_multi(&self, deps: &HashTrieSet<Dep>) -> HashTrieSet<Dep> {
let mut acc = deps.clone();
for dep in deps {
self.add_typing_deps_for_dep(&mut acc, *dep);
}
acc
}
}
impl Deref for DepGraph {
type Target = dyn DepGraphTrait + Send + Sync;
fn deref(&self) -> &Self::Target {
match self {
DepGraph::New(dg) => dg,
DepGraph::Old(dg) => dg,
}
}
}
/// An memory-mapped dependency graph.
pub struct NewDepGraph {
/// The file holding the storage for this graph.
storage: Mmap,
/// All Deps in the graph. These are NOT sorted -- use `deps_order` if you need sorting.
///
/// This holds the byte range in the mmap file for this data -- use `deps()` to access.
deps_range: Range<usize>,
/// Indices into `deps` providing sorted order, e.g. deps[deps_order[0]] is first.
/// One entry per entry in `deps`.
///
/// This holds the byte range in the mmap file for this data -- use `deps_order()` to access.
deps_order_range: Range<usize>,
/// Indices in `adjacency_lists` for the serialized edge list for the corresponding `deps`
/// entry. One entry per entry in `deps`.
///
/// Each entry in this array must be left shifted by `adjacency_list_alignment_shift`
/// before being used as an index. This is to support `edge_lists` larger than 4GB.
///
/// This holds the byte range in the mmap file for this data -- use `unshifted_edge_list_offset_range()` to access.
unshifted_edge_list_offset_range: Range<usize>,
/// Amount to left-shift unshifted_edge_list_offset to get a byte index into `adjacency_lists`.
adjacency_list_alignment_shift: u8,
/// Individually serialized edge lists. `NewHashList` knows how to deserialize.
/// This holds the byte range in the mmap file for this data -- use `unshifted_lists_range()` to access.
adjacency_lists_range: Range<usize>,
}
impl NewDepGraph {
fn from_mmap(mmap: Mmap) -> Result<Self, String> {
let data: &[u8] = mmap.as_ref();
let hlen = std::mem::size_of::<UncompressedHeader>();
if data.len() < hlen {
return Err("Missing header".to_string());
}
let header_bytes = &data[..hlen];
let header: &UncompressedHeader =
bytemuck::try_from_bytes(header_bytes).map_err(|e| format!("{}", e))?;
if header.magic != UncompressedHeader::MAGIC {
return Err("Incorrect header magic number".to_string());
}
let expected_version = UncompressedHeader::LATEST_VERSION;
if header.version != expected_version {
return Err(format!(
"Incorrect header version; expected {}, got {}",
expected_version, header.version
));
}
let num_deps = header.num_deps as usize;
let g = NewDepGraph {
deps_range: hlen..hlen + num_deps * 8,
deps_order_range: hlen + num_deps * 8..hlen + num_deps * 12,
unshifted_edge_list_offset_range: hlen + num_deps * 12..hlen + num_deps * 16,
adjacency_list_alignment_shift: header.adjacency_list_alignment_shift,
adjacency_lists_range: hlen + num_deps * 16..mmap.len(),
storage: mmap,
};
Ok(g)
}
/// Return `true` iff the given hash list contains the index for the given hash.
fn hash_list_contains(&self, hash_list: HashList<'_>, dep: Dep) -> bool {
if let Some(index) = self.get_index(dep) {
hash_list.has_index(index)
} else {
false
}
}
fn deps(&self) -> &[Dep] {
bytemuck::cast_slice(&self.storage[self.deps_range.clone()])
}
fn deps_order(&self) -> &[u32] {
bytemuck::cast_slice(&self.storage[self.deps_order_range.clone()])
}
fn unshifted_edge_list_offset(&self) -> &[u32] {
bytemuck::cast_slice(&self.storage[self.unshifted_edge_list_offset_range.clone()])
}
fn adjacency_lists(&self) -> &[u8] {
&self.storage[self.adjacency_lists_range.clone()]
}
/// Implementation helper for `DepGraph::hash_list_hashes`.
fn hash_list_hashes<'a>(
&'a self,
hash_list: NewHashList<'a>,
) -> impl Iterator<Item = Dep> + 'a {
let deps = self.deps();
hash_list.hash_indices().map(move |i| deps[i as usize])
}
/// Returns the internal, physical order for a Dep, or None if not found.
pub fn get_index(&self, dep: Dep) -> Option<u32> {
let deps = self.deps();
let deps_order = self.deps_order();
deps_order
.binary_search_by_key(&dep, move |&i| deps[i as usize])
.map_or(None, move |x| Some(deps_order[x]))
}
/// All unique dependency hashes in the graph, in sorted order.
pub fn all_hashes(
&self,
) -> impl DoubleEndedIterator<Item = Dep> + ExactSizeIterator + FusedIterator + '_ {
let deps = self.deps();
self.deps_order().iter().map(move |&i| deps[i as usize])
}
/// All unique dependency hashes in the graph, in sorted order, in parallel.
pub fn par_all_hashes(&self) -> impl IndexedParallelIterator<Item = Dep> + '_ {
let deps = self.deps();
self.deps_order().par_iter().map(move |&i| deps[i as usize])
}
/// Returns all hashes in internal node order. More efficient than `par_all_hashes`.
pub fn par_all_hashes_in_physical_order(
&self,
) -> impl IndexedParallelIterator<Item = Dep> + '_ {
self.deps().par_iter().copied()
}
}
impl DepGraphTrait for NewDepGraph {
fn validate_hash_lists(&self) -> Result<(), String> {
// TODO: What to check here?
Ok(())
}
fn hash_list_id_for_dep(&self, dep: Dep) -> Option<HashListId> {
self.hash_list_id_for_index(self.get_index(dep)?)
}
fn hash_list_id_for_index(&self, index: u32) -> Option<HashListId> {
// This function cannot fail, because we assume an index is always valid.
// It would be crazy to be asking about some random unknown index.
// Once OldDepGraph is gone, make this infallible.
let id = HashListId(self.unshifted_edge_list_offset()[index as usize]);
Some(id)
}
fn hash_list_for_id(&self, id: HashListId) -> HashList<'_> {
let start = (id.0 as usize) << self.adjacency_list_alignment_shift;
let bytes = &self.adjacency_lists()[start..];
HashList::New(NewHashList::new(bytes))
}
/// Return whether the given dependent-to-dependency edge is in the graph.
fn dependent_dependency_edge_exists(&self, dependent: Dep, dependency: Dep) -> bool {
match self.hash_list_for(dependency) {
Some(hash_list) => self.hash_list_contains(hash_list, dependent),
None => false,
}
}
fn contains(&self, dep: Dep) -> bool {
self.get_index(dep).is_some()
}
}
/// An memory-mapped dependency graph.
pub struct OldDepGraph {
/// The file holding the storage for this graph.
storage: Mmap,
/// The byte range for the Indexer in `storage`.
indexer_range: Range<usize>,
/// The byte range for the LookupTable in `storage`.
lookup_table_range: Range<usize>,
}
impl OldDepGraph {
/// Initialize a dependency graph using the byte array
/// from a memory map.
fn from_mmap(mmap: Mmap) -> Result<Self, String> {
let data = mmap.deref();
if data.len() < 4 * 2 {
return Err("not enough bytes to read header".to_string());
}
// Parse the header of the structure.
//
// Contains the offset to the indexer and the lookup table.
//
// Memory layout:
//
// ```txt
// 32 bits
// +-----------------------+
// | indexer offset |
// +-----------------------+
// | lookup table offset |
// +-----------------------+
// ```
let indexer_offset = byteutils::read_u32_ne(data);
let lookup_table_offset = byteutils::read_u32_ne(&data[4..]);
let indexer_offset: usize = indexer_offset.try_into().unwrap();
let lookup_table_offset: usize = lookup_table_offset.try_into().unwrap();
let (indexer_range, num_hashes) = Indexer::find_mmap_byte_range(data, indexer_offset)?;
let lookup_table_range =
LookupTable::find_mmap_byte_range(data, lookup_table_offset, num_hashes)?;
let g = OldDepGraph {
indexer_range,
lookup_table_range,
storage: mmap,
};
Ok(g)
}
fn indexer(&self) -> Indexer<'_> {
let hashes = bytemuck::cast_slice(&self.storage[self.indexer_range.clone()]);
Indexer { hashes }
}
fn lookup_table(&self) -> LookupTable<'_> {
let hash_list_offsets =
bytemuck::cast_slice(&self.storage[self.lookup_table_range.clone()]);
LookupTable { hash_list_offsets }
}
/// Return `true` iff the given hash list contains the index for the given hash.
fn hash_list_contains(&self, hash_list: HashList<'_>, hash: Dep) -> bool {
if let Some(index) = self.indexer().find(hash.into()) {
hash_list.has_index(index)
} else {
false
}
}
/// Implementation helper for `DepGraph::hash_list_hashes`.
fn hash_list_hashes<'a>(
&'a self,
hash_list: OldHashList<'a>,
) -> impl Iterator<Item = Dep> + ExactSizeIterator + FusedIterator + 'a {
let indexer = self.indexer();
hash_list
.indices
.iter()
.map(move |&index| Dep::new(indexer.hashes[index as usize]))
}
/// All unique dependency hashes in the graph.
fn all_hashes(&self) -> impl DoubleEndedIterator<Item = Dep> + ExactSizeIterator + '_ {
self.indexer().hashes.iter().copied().map(Dep::new)
}
/// All unique dependency hashes in the graph, in parallel.
fn par_all_hashes(&self) -> impl IndexedParallelIterator<Item = Dep> + '_ {
self.indexer().hashes.par_iter().copied().map(Dep::new)
}
}
impl DepGraphTrait for OldDepGraph {
fn validate_hash_lists(&self) -> Result<(), String> {
let len: usize = self.indexer().len();
let lookup_table = self.lookup_table();
let storage_bytes = &self.storage[..];
for index in 0..len {
match lookup_table.get(index as u32) {
Some(list_offset) => {
let data = byteutils::subslice(
storage_bytes,
list_offset as usize..,
"hash list data during validation",
)?;
let _ = OldHashList::new(data)?;
}
None => {}
}
}
Ok(())
}
/// Query the hash list for a given hash.
///
/// Returns `None` if there is no hash list related to the hash.
///
/// # Panics
///
/// Panics if the file is corrupt. Use `validate_hash_lists` when
/// initializing the reader to avoid these panics.
fn hash_list_for(&self, hash: Dep) -> Option<HashList<'_>> {
self.hash_list_id_for_dep(hash)
.map(|id| self.hash_list_for_id(id))
}
/// Map a `Dep` to the `HashListId` that uniquely identifies its `HashList`.
///
/// Unless you are interested in `HashList` identity, you want to call
/// `hash_list_for` instead.
fn hash_list_id_for_dep(&self, hash: Dep) -> Option<HashListId> {
let index = self.indexer().find(hash.into())?;
self.hash_list_id_for_index(index)
}
fn hash_list_id_for_index(&self, index: u32) -> Option<HashListId> {
Some(HashListId(self.lookup_table().get(index)?))
}
/// Maps a `HashListId` to its `HashList`.
fn hash_list_for_id(&self, id: HashListId) -> HashList<'_> {
let list_offset = id.0;
HashList::Old(OldHashList::new(&self.storage[list_offset as usize..]).unwrap())
}
/// Return whether the given dependent-to-dependency edge is in the graph.
fn dependent_dependency_edge_exists(&self, dependent: Dep, dependency: Dep) -> bool {
match self.hash_list_for(dependency) {
Some(hash_list) => self.hash_list_contains(hash_list, dependent),
None => false,
}
}
fn contains(&self, dep: Dep) -> bool {
self.indexer().find(dep.into()).is_some()
}
}
/// The indexer table.
///
/// The indexer table maps a hash to an index.
///
/// Memory layout:
///
/// ```txt
/// 64 bits
/// +===========+
/// | length |
/// +===========+
/// | hash1 |
/// +-----------+
/// | hash2 |
/// +-----------+
/// | ... |
/// +===========+
/// ```
#[derive(Clone, Copy)]
struct Indexer<'bytes> {
/// All hashes.
hashes: &'bytes [u64],
}
impl<'bytes> Indexer<'bytes> {
/// Determine which byte range in the given block of bytes holds the `Indexer`.
fn find_mmap_byte_range(
data: &[u8],
start_byte_offset: usize,
) -> Result<(Range<usize>, usize), String> {
if start_byte_offset % 8 != 0 {
return Err("indexer start offset misaligned".to_string());
}
let len_end = start_byte_offset + 8;
if data.len() < len_end {
return Err("not enough bytes to read indexer".to_string());
}
// Read in table length.
let num_hashes = bytemuck::pod_read_unaligned::<u64>(&data[start_byte_offset..len_end]);
if num_hashes > (1 << 32) - 1 {
return Err("indexer: length is too big".to_string());
}
let num_hashes = num_hashes as usize;
// Verify that the u64 array is big enough.
let table_start_offset = len_end;
let table_end_offset = table_start_offset + num_hashes * 8;
if table_end_offset > data.len() {
return Err("indexer: not enough hashes".to_string());
}
let file_byte_range = table_start_offset..table_end_offset;
Ok((file_byte_range, num_hashes))
}
/// The number if hashes in the indexer
#[inline]
fn len(self) -> usize {
self.hashes.len()
}
/// Binary search the indexer to find the index of a hash.
#[inline]
fn find(self, hash: u64) -> Option<u32> {
if let Ok(index) = self.hashes.binary_search(&hash) {
Some(index as u32)
} else {
None
}
}
}
/// The actual lookup table.
///
/// Currently this is a list of pointers to dependent-lists.
///
/// To index in a lookup table with a hash, you should first find
/// the hashes index using the indexer.
///
/// Memory layout:
///
/// ```txt
/// 32 bits
/// +========================+
/// | pointer for hash 1 |
/// +------------------------+
/// | pointer for hash 2 |
/// +------------------------+
/// | ... |
/// +========================+
/// ```
#[derive(Clone, Copy)]
struct LookupTable<'bytes> {
/// All hash list offsets in the table.
hash_list_offsets: &'bytes [u32],
}
impl<'bytes> LookupTable<'bytes> {
/// Determine which byte range in the given block of bytes holds the `LookupTable`.
fn find_mmap_byte_range(
data: &[u8],
start_byte_offset: usize,
num_hashes: usize,
) -> Result<Range<usize>, String> {
// Read in u32 array
if start_byte_offset % 4 != 0 {
return Err("lookup table: data is not properly aligned".to_string());
}
let end_byte_offset = start_byte_offset + num_hashes * 4;
if end_byte_offset > data.len() {
return Err("lookup table: not enough pointers".to_string());
}
Ok(start_byte_offset..end_byte_offset)
}
#[inline]
fn get(self, index: u32) -> Option<u32> {
let offset = self.hash_list_offsets.get(index as usize).copied()?;
if offset == 0 { None } else { Some(offset) }
}
}
/// A pointer to a list of hashes.
///
/// This data structure is read lazily.
///
/// Memory layout:
///
///
/// ```txt
/// 32 bits
/// +========================+
/// | length |
/// +========================+
/// | index of hash 1 |
/// +------------------------+
/// | index of hash 2 |
/// +------------------------+
/// | ... |
/// +========================+
/// ```
#[derive(Clone, Copy)]
pub struct OldHashList<'bytes> {
indices: &'bytes [u32],
}
impl<'bytes> OldHashList<'bytes> {
fn new(data: &'bytes [u8]) -> Result<Self, String> {
let data = byteutils::as_u32_slice(data)
.ok_or_else(|| "hash list: not properly aligned".to_string())?;
let len: u32 = *data
.first()
.ok_or_else(|| "hash list: couldn't read length".to_string())?;
let len: usize = len as usize;
let indices = byteutils::subslice(data, 1.., "hash list.data")?;
if indices.len() < len {
return Err("hash list: not enough indices".to_string());
}
let indices = &indices[..len];
Ok(OldHashList { indices })
}
#[inline]
pub fn len(&self) -> u32 {
self.indices.len() as u32
}
pub fn is_empty(&self) -> bool {
// Return true if there are any RLE blocks, no need to count them in O(n).
self.len() == 0
}
#[inline]
fn has_index(&self, index: u32) -> bool {
self.indices.binary_search(&index).is_ok()
}
/// Return all raw hash indices in this list.
pub fn hash_indices(&self) -> impl Iterator<Item = u32> + FusedIterator + '_ {
self.indices.iter().copied()
}
}
pub struct NewHashList<'bytes> {
blocks: &'bytes [RleBlock],
// The total number of indices that walking `hash_indices()` will yield.
//
// This is identical to the sum of all the `blocks` lengths, but it's
// precomputed here to keep the `len()` method O(1).
num_indices: u32,
}
impl<'bytes> NewHashList<'bytes> {
fn new(mut b: &'bytes [u8]) -> Self {
// The raw memory representation looks like two lengths followed by an array:
// num_blocks: vint64
// num_indices: vint64
// [RleBlock; num_blocks]
let num_blocks = vint64::decode(&mut b).unwrap() as usize;
// If the list is long enough, we'll have precomputed the number of indices.
// This way self.len() is O(1) not O(N).
let num_indices = vint64::decode(&mut b).unwrap() as u32;
Self {
blocks: bytemuck::cast_slice(&b[..num_blocks * std::mem::size_of::<RleBlock>()]),
num_indices,
}
}
pub fn is_empty(&self) -> bool {
self.num_indices == 0
}
/// Returns the number of indices that `hash_indices()` will visit. This may be far more than
/// the length of `self.blocks`, due to run-length encoding.
pub fn len(&self) -> u32 {
self.num_indices
}
fn has_index(&self, index: u32) -> bool {
match self.blocks.binary_search_by_key(&index, |b| b.start) {
Ok(_) => true,
Err(slot) => {
// Not an exact match for a block start, but maybe it's contained in a block.
// `slot` is the insertion point, so we actually want the previous block.
slot.checked_sub(1).map_or(false, |i| {
let b = &self.blocks[i];
// We already know b.start < index since binary_search got here,
// and we have no zero-length blocks so there's no ambiguity.
index - b.start < b.len()
})
}
}
}
/// Return raw hash indices in this list.
pub fn hash_indices(
&self,
) -> impl DoubleEndedIterator<Item = u32> + std::iter::FusedIterator + 'bytes {
// If we even care, we could create an ExactSizeIterator type using `self.num_indices`.
self.blocks.iter().flat_map(|b| Range {
start: b.start,
end: b.start + b.len(),
})
}
}
pub enum HashList<'bytes> {
Old(OldHashList<'bytes>),
New(NewHashList<'bytes>),
}
impl<'bytes> HashList<'bytes> {
// FIXME: Can we delete this? It's O(n) for NewHashList.
pub fn len(&self) -> u32 {
match self {
HashList::Old(x) => x.len(),
HashList::New(x) => x.len(),
}
}
pub fn is_empty(&self) -> bool {
match self {
HashList::Old(x) => x.is_empty(),
HashList::New(x) => x.is_empty(),
}
}
fn has_index(&self, index: u32) -> bool {
match self {
HashList::Old(x) => x.has_index(index),
HashList::New(x) => x.has_index(index),
}
}
/// Return all raw hash indices in this list.
pub fn hash_indices(&self) -> impl Iterator<Item = u32> + '_ {
match self {
HashList::Old(x) => Either::Left(x.hash_indices()),
HashList::New(x) => Either::Right(x.hash_indices()),
}
}
} |
Rust | hhvm/hphp/hack/src/depgraph/depgraph_writer/writer.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::path::Path;
use bytemuck::Pod;
use bytemuck::Zeroable;
pub use dep::Dep;
use log::info;
use newtype::newtype_int;
use newtype::IdVec;
// A 32-bit index into a table of Deps.
newtype_int!(HashIndex, u32, HashIndexMap, HashIndexSet, Pod, Zeroable);
// Type-safe hash list index wrapper
newtype_int!(
HashListIndex,
u32,
HashListIndexMap,
HashListIndexSet,
Pod,
Zeroable
);
pub struct MemDepGraph {
/// Unique hashes, in sorted order.
pub hashes: IdVec<HashIndex, Dep>,
/// Which edge list each member of `hashes` has (index int `edge_lists`).
/// Same length as `hashes`.
pub edge_list_indices: IdVec<HashIndex, HashListIndex>,
/// Unique edge lists in some canonical order.
pub edge_lists: IdVec<HashListIndex, Box<[HashIndex]>>,
}
impl MemDepGraph {
/// Returns a succession of dependency -> dependents mappings, containing all of the
/// dependents to which that dependency has an edge.
pub fn all_edges(&self) -> impl Iterator<Item = (Dep, impl Iterator<Item = Dep> + '_)> + '_ {
self.edge_list_indices
.iter()
.zip(self.hashes.iter())
.filter_map(|(&edge_list_index, &dependency)| {
let edges = &self.edge_lists[edge_list_index];
if edges.is_empty() {
None
} else {
Some((dependency, edges.iter().map(|&h| self.hashes[h])))
}
})
}
}
/// Write the given `MemDepGraph` to disk.
pub fn write_dep_graph(output: &Path, g: &MemDepGraph) -> std::io::Result<()> {
info!("Opening output file at {:?}", output);
let f = File::create(output)?;
let num_deps = g.hashes.len() as u64;
let header_size = 8;
let indexer_size = 8 + num_deps * 8;
let lookup_size = num_deps * 4;
info!("Calculating hash list offsets");
let hash_list_start = header_size + indexer_size + lookup_size;
let mut cur_offset = hash_list_start;
// These are where the hash lists end up in memory.
let edge_list_offsets: Vec<u32> = g
.edge_lists
.iter()
.map(|edges| {
if edges.is_empty() {
// As a special case, the empty list pretends to have file offset 0.
0
} else {
let offset = cur_offset;
cur_offset += 4 + 4 * edges.len() as u64;
offset as u32
}
})
.collect();
// Guarantee we didn't blow past the 4GiB file size limit.
assert!(cur_offset <= !0u32 as u64 + 1);
let mut out = BufWriter::new(f);
// Write out the sections other than the hash lists.
// Write the header and the size field at the start of the indexer.
info!("Writing header");
let indexer_offset = header_size;
let lookup_offset = indexer_offset + indexer_size;
out.write_all(&(indexer_offset as u32).to_ne_bytes())?;
out.write_all(&(lookup_offset as u32).to_ne_bytes())?;
// Write indexer.
info!("Writing indexer");
out.write_all(&num_deps.to_ne_bytes())?;
out.write_all(bytemuck::cast_slice(&g.hashes))?;
// Write hash list file offsets.
info!("Writing hash list lookup table");
for &i in g.edge_list_indices.iter() {
out.write_all(&edge_list_offsets[i.0 as usize].to_ne_bytes())?;
}
// Write edge lists.
info!("Writing edge lists");
for h in g.edge_lists.iter() {
if h.is_empty() {
// As a special case, empty edge lists aren't stored in the file.
continue;
}
out.write_all(&(h.len() as u32).to_ne_bytes())?;
out.write_all(bytemuck::cast_slice(&h[..]))?;
}
// Flush the file and close it before logging we are done.
drop(out.into_inner()?);
info!(".hhdg write complete");
Ok(())
} |
Rust | hhvm/hphp/hack/src/depgraph/hhdg/diff.rs | // Copyright (c) Meta Platforms, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::path::PathBuf;
use anyhow::Context;
use anyhow::Result;
use clap::Parser;
use depgraph_reader::Dep;
use depgraph_reader::DepGraph;
use hash::HashSet;
use human_readable_dep_map::HumanReadableDepMap;
use rayon::prelude::*;
/// A tool for comparing hhdg files
#[derive(Parser, Debug)]
pub struct Opts {
/// The first .hhdg file
dg1: PathBuf,
/// The second .hhdg file
dg2: PathBuf,
/// Optional .txt files providing human-readable hash -> (kind, Dependency) pairs
depmaps: Vec<PathBuf>,
}
/// Report nodes and edges in dg1 but not in dg2.
///
/// Returns the total number of differences found.
fn compare(dg1: &DepGraph, dg2: &DepGraph, prefix: &str, dep_map: &HumanReadableDepMap) -> usize {
let mut num_different = 0;
// list nodes in dg1 that are not in dg2
for h in dg1.all_hashes() {
if !dg2.contains(h) {
num_different += 1;
println!("{prefix} {h} {}", dep_map.fmt(h));
}
}
// list edges in dg1 that are not in dg2
num_different += dg1
.par_all_hashes()
.with_min_len(1)
.with_max_len(1)
.filter(|&dependency| {
let mut different = false;
if let Some(dg1_hash_list) = dg1.hash_list_for(dependency) {
let dg2_hashes: HashSet<Dep> = dg2
.hash_list_for(dependency)
.map_or_else(HashSet::default, |hl| dg2.hash_list_hashes(hl).collect());
for dependent in dg1.hash_list_hashes(dg1_hash_list) {
if !dg2_hashes.contains(&dependent) {
different = true;
println!(
"{prefix} {} -> {}",
dep_map.fmt(dependent),
dep_map.fmt(dependency)
);
}
}
}
different
})
.count();
num_different
}
pub(crate) fn run(opts: Opts) -> Result<usize> {
let dg1 = DepGraph::from_path(&opts.dg1).with_context(|| opts.dg1.display().to_string())?;
let dg2 = DepGraph::from_path(&opts.dg2).with_context(|| opts.dg2.display().to_string())?;
let dep_map = HumanReadableDepMap::default();
if !opts.depmaps.is_empty() {
// Only list unknown nodes if depmaps were provided
let lists_of_collisions = (opts.depmaps.par_iter())
.map(|path| dep_map.load(path))
.collect::<Result<Vec<Vec<_>>>>()?;
for collision in lists_of_collisions.into_iter().flatten() {
println!("{collision}");
}
// list unknown nodes in dg1
for h in dg1.all_hashes() {
if !dep_map.contains(h) {
println!("{}: {h:016x} ({h}): unknown hash", opts.dg1.display());
}
}
// list unknown nodes in dg2
for h in dg2.all_hashes() {
if !dep_map.contains(h) {
println!("{}: {h:016x} ({h}): unknown hash", opts.dg2.display());
}
}
}
Ok(compare(&dg1, &dg2, "-", &dep_map) + compare(&dg2, &dg1, "+", &dep_map))
} |
Rust | hhvm/hphp/hack/src/depgraph/hhdg/hhdg.rs | // Copyright (c) Meta Platforms, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
mod diff;
use anyhow::Result;
use clap::Parser;
/// A mult-tool for working with hhdg files
#[derive(Parser, Debug)]
pub enum Command {
/// Compare two hhdg files and show differences with human readable hash labels
Diff(diff::Opts),
}
fn main() -> Result<()> {
match Command::parse() {
Command::Diff(opts) => {
let num_different = diff::run(opts)?;
if num_different != 0 {
std::process::exit(1);
}
Ok(())
}
}
} |
TOML | hhvm/hphp/hack/src/depgraph/hhdg/cargo/hhdg/Cargo.toml | # @generated by autocargo
[package]
name = "hhdg"
version = "0.0.0"
edition = "2021"
[[bin]]
name = "hhdg"
path = "../../hhdg.rs"
[dependencies]
anyhow = "1.0.71"
clap = { version = "3.2.25", features = ["derive", "env", "regex", "unicode", "wrap_help"] }
depgraph_reader = { version = "0.0.0", path = "../../../cargo/depgraph_reader" }
hash = { version = "0.0.0", path = "../../../../utils/hash" }
human_readable_dep_map = { version = "0.0.0", path = "../../../cargo/human_readable_dep_map" }
rayon = "1.2" |
Rust | hhvm/hphp/hack/src/depgraph/human_readable_dep_map/human_readable_dep_map.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::fmt;
use std::fs::File;
use std::io::BufRead;
use std::io::BufReader;
use std::path::Path;
use depgraph_reader::Dep;
use hash::DashMap;
use typing_deps_hash::DepType;
/// A map of `Dep`s to a human readable names.
/// E.g. 9f886cce32ff8ed -> Type Child A
#[derive(Default)]
pub struct HumanReadableDepMap {
map: DashMap<Dep, (DepType, String)>,
}
impl HumanReadableDepMap {
pub fn contains(&self, dep: Dep) -> bool {
self.map.contains_key(&dep)
}
/// Parse a text file where each line has the format:
/// DEP_HASH DEP_TYPE SYMBOL_NAME
/// and stores the entry. Returns a list of collisions if the same hash is
/// loaded with a different symbol or kind.
///
/// The contents of a file might look like this:
///
/// 718470156085360007 Type ChildB
/// 1154245141631872205 Type Base
/// 8150603439003883592 Constructor Base
/// 1154245141631872204 Extends Base
/// 718472355108616429 Type ChildA
pub fn load(&self, path: &Path) -> anyhow::Result<Vec<DepMapCollision>> {
use std::str::FromStr;
let mut collisions = vec![];
for line in BufReader::new(File::open(path)?).lines() {
let line = line?;
let mut parts = line.split(' ');
let hash: u64 = match parts.next() {
Some(s) => s.parse()?,
None => anyhow::bail!("expected hash"),
};
let kind: DepType = match parts.next() {
Some(s) => DepType::from_str(s)?,
None => anyhow::bail!("expected DepType"),
};
let sym: &str = match parts.next() {
Some(s) => s,
None => anyhow::bail!("expected symbol"),
};
match self.map.insert(Dep::new(hash), (kind, sym.into())) {
Some((old_kind, old_sym)) if old_kind != kind || old_sym != sym => {
collisions.push(DepMapCollision {
hash,
old_kind,
old_symbol: old_sym.to_owned(),
new_kind: kind,
new_symbol: sym.to_owned(),
});
}
_ => {}
}
}
Ok(collisions)
}
/// Get the human readable name of a `Dep`. If the human readable name isn't
/// found, simply return the `Dep` hash.
pub fn fmt(&self, dep: Dep) -> String {
match self.map.get(&dep) {
Some(e) => {
let (kind, sym) = &*e;
format!("{kind:?} {sym}")
}
None => {
format!("{dep:016x} ({dep})")
}
}
}
}
#[derive(Clone, Debug, PartialEq)]
pub struct DepMapCollision {
pub hash: u64,
pub old_kind: DepType,
pub old_symbol: String,
pub new_kind: DepType,
pub new_symbol: String,
}
impl fmt::Display for DepMapCollision {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"(Hash Collision {}: ({:?},{}) != ({:?},{}))",
&self.hash, &self.old_kind, &self.old_symbol, &self.new_kind, &self.new_symbol,
)
}
}
#[cfg(test)]
mod tests {
use std::io::Write;
use tempfile::NamedTempFile;
use super::*;
#[test]
fn test_load_and_collisions() -> anyhow::Result<()> {
let dep_map = HumanReadableDepMap::default();
let mut f1 = NamedTempFile::new()?;
let mut f2 = NamedTempFile::new()?;
writeln!(f1, "123456 Type Foo")?;
writeln!(f2, "123456 Constructor Base")?;
assert_eq!(dep_map.load(f1.path())?, vec![]);
assert_eq!(
dep_map.load(f2.path())?,
vec![DepMapCollision {
hash: 123456u64,
old_kind: DepType::Type,
old_symbol: "Foo".to_owned(),
new_kind: DepType::Constructor,
new_symbol: "Base".to_owned(),
}],
);
Ok(())
}
} |
TOML | hhvm/hphp/hack/src/deps/Cargo.toml | # @generated by autocargo
[package]
name = "dep_graph_delta"
version = "0.0.0"
edition = "2021"
[lib]
path = "deps_rust/dep_graph_delta.rs"
[dependencies]
dep = { version = "0.0.0", path = "../depgraph/cargo/dep" }
hash = { version = "0.0.0", path = "../utils/hash" }
serde = { version = "1.0.176", features = ["derive", "rc"] } |
hhvm/hphp/hack/src/deps/dune | (library
(name file_info)
(wrapped false)
(modules fileInfo)
(libraries opaque_digest pos symbol_name utils_core)
(preprocess
(pps ppx_hash ppx_sexp_conv ppx_deriving.std)))
(library
(name symbol_name)
(wrapped false)
(modules symbol_name)
(libraries collections utils_core)
(preprocess
(pps ppx_deriving.std)))
(library
(name typing_deps)
(wrapped false)
(modules typing_deps typing_deps_mode)
(libraries
collections
decl_reference
depgraph_reader
file_info
heap_shared_mem
heap_shared_mem_hash
hh_fanout_rust_ffi_externs
hh_json
logging
relative_path
sqlite3
sqlite_utils
typing_deps_rust
worker_cancel)
(preprocess
(pps ppx_deriving.std)))
(library
(name typing_deps_rust)
(modules)
(wrapped false)
(foreign_archives deps_rust_ffi))
(library
(name typing_pessimisation_deps)
(wrapped false)
(modules typing_pessimisation_deps)
(libraries
typing_deps
provider_context
relative_path
sqlite3
sqlite_utils)
(preprocess
(pps ppx_deriving.std)))
(data_only_dirs cargo deps_rust)
(rule
(targets libdeps_rust_ffi.a)
(deps
(source_tree %{workspace_root}/hack/src))
(locks /cargo)
(action
(run %{workspace_root}/hack/scripts/invoke_cargo.sh deps_rust_ffi deps_rust_ffi)))
(library
(name hh_fanout_rust_ffi_externs)
(wrapped false)
(modules hh_fanout_rust_ffi_externs)
(libraries
hh_fanout_rust_ffi
file_scuba_logger_ffi_externs))
(library
(name hh_fanout_rust_ffi)
(wrapped false)
(modules)
(foreign_archives hh_fanout_rust_ffi))
(rule
(targets libhh_fanout_rust_ffi.a)
(deps
(source_tree %{workspace_root}/hack/src))
(locks /cargo)
(action
(run
%{workspace_root}/hack/scripts/invoke_cargo.sh
hh_fanout_rust_ffi
hh_fanout_rust_ffi))) |
|
OCaml | hhvm/hphp/hack/src/deps/fileInfo.ml | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
(*****************************************************************************)
(* This module defines the data structured used to describe the content of
* a file.
* The parser constructs FileInfo.t structs, that contain names and positions
* plus some extra info required for the build.
* After the names have been checked (Naming.make_env), we "simplify" the
* struct and only keep the names defined in the files we know about.
*)
(*****************************************************************************)
open Hh_prelude
open Prim_defs
(*****************************************************************************)
(* Parsing modes *)
(*****************************************************************************)
type mode =
| Mhhi (** just declare signatures, don't check anything *)
| Mstrict (** check everything! *)
[@@deriving eq, hash, show, enum, ord, sexp_of]
let is_strict = function
| Mstrict -> true
| Mhhi -> false
let is_hhi = function
| Mstrict -> false
| Mhhi -> true
let string_of_mode = function
| Mhhi -> "hhi"
| Mstrict -> "strict"
let pp_mode fmt mode =
Format.pp_print_string fmt
@@
match mode with
| Mhhi -> "Mhhi"
| Mstrict -> "Mstrict"
(*****************************************************************************)
(* Positions of names in a file *)
(*****************************************************************************)
type name_type =
| Fun [@value 3]
| Class [@value 0]
| Typedef [@value 1]
| Const [@value 4]
| Module [@value 5]
[@@deriving eq, show { with_path = false }, enum, ord]
(** We define two types of positions establishing the location of a given name:
* a Full position contains the exact position of a name in a file, and a
* File position contains just the file and the type of toplevel entity,
* allowing us to lazily retrieve the name's exact location if necessary.
*)
type pos =
| Full of Pos.t
| File of name_type * Relative_path.t
[@@deriving eq, show]
(** An id contains a pos, name and a optional decl hash. The decl hash is None
* only in the case when we didn't compute it for performance reasons
*)
type id = pos * string * Int64.t option [@@deriving eq, show]
let id_name (_, x, _) = x
(*****************************************************************************)
(* The record produced by the parsing phase. *)
(*****************************************************************************)
type hash_type = Int64.t option [@@deriving eq]
let pp_hash_type fmt hash =
match hash with
| None -> Format.fprintf fmt "None"
| Some hash -> Format.fprintf fmt "Some (%s)" (Int64.to_string hash)
(* NB: Type [t] must be manually kept in sync with Rust type [hackrs_provider_backend::FileInfo] *)
(** The record produced by the parsing phase. *)
type t = {
hash: hash_type;
file_mode: mode option;
funs: id list;
classes: id list;
typedefs: id list;
consts: id list;
modules: id list;
comments: (Pos.t * comment) list option;
(** None if loaded from saved state *)
}
[@@deriving show]
let empty_t =
{
hash = None;
file_mode = None;
funs = [];
classes = [];
typedefs = [];
consts = [];
modules = [];
comments = Some [];
}
let pos_full (p, name, hash) = (Full p, name, hash)
let get_pos_filename = function
| Full p -> Pos.filename p
| File (_, fn) -> fn
(*****************************************************************************)
(* The simplified record used after parsing. *)
(*****************************************************************************)
(** The simplified record used after parsing. *)
type names = {
n_funs: SSet.t;
n_classes: SSet.t;
n_types: SSet.t;
n_consts: SSet.t;
n_modules: SSet.t;
}
[@@deriving show]
(** The simplified record stored in saved-state.*)
type saved_names = {
sn_funs: SSet.t;
sn_classes: SSet.t;
sn_types: SSet.t;
sn_consts: SSet.t;
sn_modules: SSet.t;
}
(** Data structure stored in the saved state *)
type saved = {
s_names: saved_names;
s_hash: Int64.t option;
s_mode: mode option;
}
let empty_names =
{
n_funs = SSet.empty;
n_classes = SSet.empty;
n_types = SSet.empty;
n_consts = SSet.empty;
n_modules = SSet.empty;
}
(*****************************************************************************)
(* Functions simplifying the file information. *)
(*****************************************************************************)
let name_set_of_idl idl =
List.fold_left idl ~f:(fun acc (_, x, _) -> SSet.add x acc) ~init:SSet.empty
let simplify info =
let {
funs;
classes;
typedefs;
consts;
modules;
file_mode = _;
comments = _;
hash = _;
} =
info
in
let n_funs = name_set_of_idl funs in
let n_classes = name_set_of_idl classes in
let n_types = name_set_of_idl typedefs in
let n_consts = name_set_of_idl consts in
let n_modules = name_set_of_idl modules in
{ n_funs; n_classes; n_types; n_consts; n_modules }
let to_saved info =
let {
funs;
classes;
typedefs;
consts;
modules;
file_mode = s_mode;
hash = s_hash;
comments = _;
} =
info
in
let sn_funs = name_set_of_idl funs in
let sn_classes = name_set_of_idl classes in
let sn_types = name_set_of_idl typedefs in
let sn_consts = name_set_of_idl consts in
let sn_modules = name_set_of_idl modules in
let s_names = { sn_funs; sn_classes; sn_types; sn_consts; sn_modules } in
{ s_names; s_mode; s_hash }
let from_saved fn saved =
let { s_names; s_mode; s_hash } = saved in
let { sn_funs; sn_classes; sn_types; sn_consts; sn_modules } = s_names in
let funs =
List.map (SSet.elements sn_funs) ~f:(fun x -> (File (Fun, fn), x, None))
in
let classes =
List.map (SSet.elements sn_classes) ~f:(fun x ->
(File (Class, fn), x, None))
in
let typedefs =
List.map (SSet.elements sn_types) ~f:(fun x ->
(File (Typedef, fn), x, None))
in
let consts =
List.map (SSet.elements sn_consts) ~f:(fun x -> (File (Const, fn), x, None))
in
let modules =
List.map (SSet.elements sn_modules) ~f:(fun m ->
(File (Module, fn), m, None))
in
{
file_mode = s_mode;
hash = s_hash;
funs;
classes;
typedefs;
consts;
modules;
comments = None;
}
let saved_to_names saved =
{
n_funs = saved.s_names.sn_funs;
n_classes = saved.s_names.sn_classes;
n_types = saved.s_names.sn_types;
n_consts = saved.s_names.sn_consts;
n_modules = saved.s_names.sn_modules;
}
let merge_names t_names1 t_names2 =
let { n_funs; n_classes; n_types; n_consts; n_modules } = t_names1 in
{
n_funs = SSet.union n_funs t_names2.n_funs;
n_classes = SSet.union n_classes t_names2.n_classes;
n_types = SSet.union n_types t_names2.n_types;
n_consts = SSet.union n_consts t_names2.n_consts;
n_modules = SSet.union n_modules t_names2.n_modules;
}
let to_string defs_per_file =
let funs = List.map ~f:(fun (a, b, _) -> (a, b, None)) defs_per_file.funs in
let classes =
List.map ~f:(fun (a, b, _) -> (a, b, None)) defs_per_file.classes
in
let typedefs =
List.map ~f:(fun (a, b, _) -> (a, b, None)) defs_per_file.typedefs
in
let consts =
List.map ~f:(fun (a, b, _) -> (a, b, None)) defs_per_file.consts
in
let modules =
List.map ~f:(fun (a, b, _) -> (a, b, None)) defs_per_file.modules
in
[
("funs", funs);
("classes", classes);
("typedefs", typedefs);
("consts", consts);
("modules", modules);
]
|> List.filter ~f:(fun (_, l) -> not @@ List.is_empty l)
|> List.map ~f:(fun (kind, l) ->
Printf.sprintf
"%s: %s %s"
kind
(List.map l ~f:(fun (_, x, _) -> x) |> String.concat ~sep:",")
(List.map l ~f:(fun (_, _, hash) ->
Int64.to_string (Option.value hash ~default:Int64.zero))
|> String.concat ~sep:","))
|> String.concat ~sep:";"
type diff = {
removed_funs: SSet.t;
added_funs: SSet.t;
removed_classes: SSet.t;
added_classes: SSet.t;
removed_types: SSet.t;
added_types: SSet.t;
removed_consts: SSet.t;
added_consts: SSet.t;
removed_modules: SSet.t;
added_modules: SSet.t;
}
let diff f1 f2 =
let matches_hash =
match (f1.hash, f2.hash) with
| (Some h1, Some h2) -> Int64.equal h1 h2
| _ -> false
in
if matches_hash then
None
else
let diff_ids ids1 ids2 =
let removed_ids = SSet.diff ids1 ids2 in
let added_ids = SSet.diff ids2 ids1 in
(removed_ids, added_ids)
in
let f1 = simplify f1 in
let f2 = simplify f2 in
let (removed_funs, added_funs) = diff_ids f1.n_funs f2.n_funs in
let (removed_classes, added_classes) = diff_ids f1.n_classes f2.n_classes in
let (removed_types, added_types) = diff_ids f1.n_types f2.n_types in
let (removed_consts, added_consts) = diff_ids f1.n_consts f2.n_consts in
let (removed_modules, added_modules) = diff_ids f1.n_modules f2.n_modules in
let is_empty =
List.fold
~f:(fun acc s -> (not (SSet.is_empty s)) || acc)
[
removed_funs;
added_funs;
removed_classes;
added_classes;
removed_types;
added_types;
removed_consts;
added_consts;
removed_modules;
added_modules;
]
~init:false
in
if is_empty then
None
else
Some
{
removed_funs;
added_funs;
removed_classes;
added_classes;
removed_types;
added_types;
removed_consts;
added_consts;
removed_modules;
added_modules;
} |
OCaml Interface | hhvm/hphp/hack/src/deps/fileInfo.mli | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
(*****************************************************************************)
(* This module defines the data structured used to describe the content of
* a file.
* The parser constructs FileInfo.t structs, that contain names and positions
* plus some extra info required for the build.
* After the names have been checked (Naming.make_env), we "simplify" the
* struct and only keep the names defined in the files we know about.
*)
(*****************************************************************************)
open Prim_defs
(*****************************************************************************)
(* Parsing modes *)
(*****************************************************************************)
type mode =
| Mhhi (* just declare signatures, don't check anything *)
| Mstrict (* check everything! *)
[@@deriving eq, hash, show, enum, ord, sexp_of]
val is_strict : mode -> bool
val is_hhi : mode -> bool
val string_of_mode : mode -> string
(*****************************************************************************)
(* The record produced by the parsing phase. *)
(*****************************************************************************)
(** This type replicates what's in Naming_types.name_kind, but with less structure.
It'd be nice to unify them. *)
type name_type =
| Fun [@value 3]
| Class [@value 0]
| Typedef [@value 1]
| Const [@value 4]
| Module [@value 5]
[@@deriving eq, show, enum, ord]
type pos =
| Full of Pos.t
| File of name_type * Relative_path.t
[@@deriving eq, show]
type id = pos * string * Int64.t option [@@deriving eq, show]
val id_name : id -> string
val pos_full : Pos.t * string * Int64.t option -> id
val get_pos_filename : pos -> Relative_path.t
(** The hash value of a decl AST.
We use this to see if two versions of a file are "similar", i.e. their
declarations only differ by position information. *)
type hash_type = Int64.t option [@@deriving eq]
(** [FileInfo.t] is (1) what we get out of the parser, with Full positions;
(2) the API for putting stuff into and taking stuff out of saved-state naming table (with File positions)
*)
type t = {
hash: hash_type;
file_mode: mode option;
funs: id list;
classes: id list;
typedefs: id list;
consts: id list;
modules: id list;
comments: (Pos.t * comment) list option;
}
[@@deriving show]
val empty_t : t
(*****************************************************************************)
(* The simplified record used after parsing. *)
(*****************************************************************************)
(** [FileInfo.names] is a cut-down version of [FileInfo.t], one that we use internally
for decl-diffing and other fanout calculations. *)
type names = {
n_funs: SSet.t;
n_classes: SSet.t;
n_types: SSet.t;
n_consts: SSet.t;
n_modules: SSet.t;
}
[@@deriving show]
(*****************************************************************************)
(* The record used in our saved state. *)
(*****************************************************************************)
(** Although [FileInfo.t] is the public API for storing/retrieving entries in the naming-table,
we actually store the naming-table on disk as [FileInfo.saved] - it's basically the same but
has a slightly more compact representation in order to save space. *)
type saved
val empty_names : names
(*****************************************************************************)
(* Functions simplifying the file information. *)
(*****************************************************************************)
val simplify : t -> names
val merge_names : names -> names -> names
val to_saved : t -> saved
val from_saved : Relative_path.t -> saved -> t
val saved_to_names : saved -> names
val to_string : t -> string
type diff = {
removed_funs: SSet.t;
added_funs: SSet.t;
removed_classes: SSet.t;
added_classes: SSet.t;
removed_types: SSet.t;
added_types: SSet.t;
removed_consts: SSet.t;
added_consts: SSet.t;
removed_modules: SSet.t;
added_modules: SSet.t;
}
val diff : t -> t -> diff option |
OCaml | hhvm/hphp/hack/src/deps/hh_fanout_rust_ffi_externs.ml | (*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
type hh_fanout_rust_ffi
(* Keep order of arguments consistent with Rust FFI! *)
external make :
logger:File_scuba_logger_ffi_externs.logger ->
fanout_state_dir:string ->
decl_state_dir:string ->
hh_fanout_rust_ffi = "hh_fanout_ffi_make"
external make_hhdg_builder :
logger:File_scuba_logger_ffi_externs.logger ->
builder_state_dir:string ->
hh_fanout_rust_ffi = "hh_fanout_ffi_make_hhdg_builder" |
OCaml | hhvm/hphp/hack/src/deps/symbol_name.ml | (*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
open Hh_prelude
(*****************************************************************************)
(* Module types defined in the .mli *)
(*****************************************************************************)
module type S = sig
type t [@@deriving show, eq, ord]
val of_string : string -> t
val to_string_TRANSITIONAL : t -> string
val of_string_TRANSITIONAL : string -> t
end
module type I = sig
type t [@@deriving show, ord]
type s
val iequal : t -> t -> bool
val canonical : t -> s
val of_string : string -> t
val to_string_TRANSITIONAL : t -> string
val of_string_TRANSITIONAL : string -> t
end
module type Set = sig
include Caml.Set.S
val pp : Format.formatter -> t -> unit
val show : t -> string
val to_sset_TRANSITIONAL : t -> SSet.t
val of_sset_TRANSITIONAL : SSet.t -> t
end
(*****************************************************************************)
(* All our opaque types are currently implemented just by strings. *)
(* Here are the string-based implementations of each of them. *)
(*****************************************************************************)
(** [S_Impl] implements an opaque type that supports eq+ord, by string *)
module S_Impl = struct
type t = string [@@deriving ord, eq]
(** This show method just prints the string; the default deriving-show doubles up backslashes which I don't prefer *)
let show (t : t) : string = t
let pp (fmt : Format.formatter) (t : t) : unit = String.pp fmt t
let of_string (t : string) : t = t
let to_string_TRANSITIONAL (t : t) : string = t
let of_string_TRANSITIONAL (t : string) : t = t
end
(** [I_Impl] implements an opaque type that supports eq+ord+ieq, by string *)
module I_Impl = struct
type t = string [@@deriving ord]
type s = string
(** This show method just prints the string; the default deriving-show doubles up backslashes which I don't prefer *)
let show (t : t) : string = t
let pp (fmt : Format.formatter) (t : t) : unit = String.pp fmt t
let iequal (t1 : t) (t2 : t) : bool = String.equal t1 t2
let canonical (t : t) : s = t
let of_string (t : string) : t = Caml.String.lowercase_ascii t
let to_string_TRANSITIONAL (t : t) : string = t
let of_string_TRANSITIONAL (t : string) : t = t
end
(** [Set_Impl] implements an opaque set, via a SSet *)
module Set_Impl = struct
include Caml.Set.Make (StringKey)
let pp (fmt : Format.formatter) (t : t) =
Format.fprintf fmt "@[<2>{";
ignore
@@ List.fold_left
~f:(fun sep s ->
if sep then Format.fprintf fmt ";@ ";
String.pp fmt s;
true)
~init:false
(elements t);
Format.fprintf fmt "@,}@]"
let show (x : t) : string = Format.asprintf "%a" pp x
let to_sset_TRANSITIONAL (t : t) : SSet.t = t
let of_sset_TRANSITIONAL (ss : SSet.t) : t = ss
end
(*****************************************************************************)
(* All our opaque types are currently implemented just by strings. *)
(* Here's where we provide their implementations. *)
(*****************************************************************************)
module Fun = S_Impl
module IFun = I_Impl
module Type = S_Impl
module IType = I_Impl
module Const = S_Impl
module IConst = I_Impl
module FunSet = Set_Impl
module TypeSet = Set_Impl
module ConstSet = Set_Impl |
OCaml Interface | hhvm/hphp/hack/src/deps/symbol_name.mli | (*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
(** [Symbol_name.S] is really a string but we're transitioning to make it into an opaque type
which only supports eq+ord. *)
module type S = sig
type t [@@deriving show, eq, ord]
val of_string : string -> t
val to_string_TRANSITIONAL : t -> string
val of_string_TRANSITIONAL : string -> t
end
(** [Symbol_name.I] is really a string but we're transitioning to make it into an opaque type
which only supports deriving ord, plus case-insensitive ieq, plus case-sensitive eq.
Case-insensitive eq is done by [iequal]; it deliberately has an unconventional name because
the need for case-insensitivity is so rare it should not be accidentally stumbled upon.
Case-sensitive eq is done by [canonical] and then eq upon canonical. *)
module type I = sig
type t [@@deriving show, ord]
(** [s] is the case-sensitive version *)
type s
(** [iequal] does a case-insensitive comparison *)
val iequal : t -> t -> bool
(** [canonical] is the correctly-cased ("canonical") version of this name *)
val canonical : t -> s
val of_string : string -> t
val to_string_TRANSITIONAL : t -> string
val of_string_TRANSITIONAL : string -> t
end
(** [Symbol_name.Set] is a set that's currently implemented as SSet but we're
transitioning to make it into a set of an opaque type. *)
module type Set = sig
include Caml.Set.S
val pp : Format.formatter -> t -> unit
val show : t -> string
val to_sset_TRANSITIONAL : t -> SSet.t
val of_sset_TRANSITIONAL : SSet.t -> t
end
(*****************************************************************************)
(* Opaque distinct types for Fun, Type and Const keys *)
(*****************************************************************************)
module Fun : S
module Type : S
module Const : S
module FunSet : Set with type elt := Fun.t
module TypeSet : Set with type elt := Type.t
module ConstSet : Set with type elt := Const.t
(*****************************************************************************)
(* Opaque distinct types for case-insensitive Fun, Type and Const keys *)
(*****************************************************************************)
module IFun : I with type s = Fun.t
module IType : I with type s = Type.t
module IConst : I with type s = Const.t |
OCaml | hhvm/hphp/hack/src/deps/typing_deps.ml | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
open Hh_prelude
module Format = Stdlib.Format
module Hashtbl = Stdlib.Hashtbl
module Mode = Typing_deps_mode
open Typing_deps_mode
open Utils
let worker_id : int option ref = ref None
(******************************************)
(* Handling dependencies and their hashes *)
(******************************************)
module Dep = struct
type dependent
type dependency
(** NOTE: keep in sync with `typing_deps_hash.rs`. *)
type _ variant =
| GConst : string -> 'a variant
| Fun : string -> 'a variant
| Type : string -> 'a variant
| Extends : string -> dependency variant
| Const : string * string -> dependency variant
| Constructor : string -> dependency variant
| Prop : string * string -> dependency variant
| SProp : string * string -> dependency variant
| Method : string * string -> dependency variant
| SMethod : string * string -> dependency variant
| AllMembers : string -> dependency variant
| GConstName : string -> 'a variant
| Module : string -> 'a variant
let dependency_of_variant : type a. a variant -> dependency variant = function
| GConst s -> GConst s
| GConstName s -> GConstName s
| Type s -> Type s
| Fun s -> Fun s
| Module m -> Module m
| Const (cls, s) -> Const (cls, s)
| Prop (cls, s) -> Prop (cls, s)
| SProp (cls, s) -> SProp (cls, s)
| Method (cls, s) -> Method (cls, s)
| SMethod (cls, s) -> SMethod (cls, s)
| Constructor s -> Constructor s
| AllMembers s -> AllMembers s
| Extends s -> Extends s
(** NOTE: keep in sync with `typing_deps_hash.rs`. *)
type dep_kind =
| KGConst [@value 0]
| KFun [@value 1]
| KType [@value 2]
| KExtends [@value 3]
| KConst [@value 5]
| KConstructor [@value 6]
| KProp [@value 7]
| KSProp [@value 8]
| KMethod [@value 9]
| KSMethod [@value 10]
| KAllMembers [@value 11]
| KGConstName [@value 12]
| KModule [@value 13]
[@@deriving enum]
module Member = struct
type t =
| Method of string
| SMethod of string
| Prop of string
| SProp of string
| Constructor
| Const of string
| All
let method_ name = Method name
let smethod name = SMethod name
let prop name = Prop name
let sprop name = SProp name
let constructor = Constructor
let const name = Const name
let all = All
end
external hash1 : int -> string -> int = "hash1_ocaml" [@@noalloc]
external hash2 : int -> int -> string -> int = "hash2_ocaml" [@@noalloc]
type t = int
let to_int64 (t : t) : int64 =
(* The [t] is 63 bits of data followed by a trailing bit 0.
Since we're storing it in Dep.t, an ocaml int stored in two's complement, the first
of those 63 bits is considered a sign bit. The trailing bit 0 is because ocaml reserves
the trailing bit 0 to indicate that it's stored locally, not a reference. *)
let a = Int64.of_int t in
(* [a] is an Int64 version of that int, which unlike the built-in int is always stored
as a reference on the heap and hence does not need to reserve a leading bit and hence
uses all 64bits. The way two's complement works, when promoting from 63bit to 64bit,
is that the extra new leading bit becomes a copy of the previous leading bit.
e.g. decimal=-3, 7bit=111_1101, 8bit=1111_1101
e.g. decimal=+3, 7bit=000_0011, 8bit=0000_0011
If that's confusing, think of counting down from 2^bits, e.g.
-1 is 2^7 - 1 = 111_1111, or equivalently 2^8 - 1 = 1111_1111
-2 is 2^7 - 2 = 111_1110, or equivalently 2^8 - 1 = 1111_1110 *)
let b = Int64.shift_right_logical (Int64.shift_left a 1) 1 in
(* [b] has the top bit reset to 0. Thus, it's still the exact same leading
bit 0 followed by 63 bits of data as the original Dep.t we started with.
But whereas ocaml int and Dep.t might render this bit-pattern as a
negative number, the Int64 will always render it a positive number.
In particular, if we write this Int64 to a Sqlite.INT column then sqlite
will show it as a positive integer. *)
b
let to_int hash = hash
let ordinal_variant (type a) : a variant -> int = function
| GConst _ -> 0
| Fun _ -> 1
| Type _ -> 2
| Extends _ -> 3
| Const _ -> 4
| Constructor _ -> 5
| Prop _ -> 6
| SProp _ -> 7
| Method _ -> 8
| SMethod _ -> 9
| AllMembers _ -> 10
| GConstName _ -> 11
| Module _ -> 12
let compare_variant (type a) (v1 : a variant) (v2 : a variant) : int =
match (v1, v2) with
| (GConst x1, GConst x2)
| (Fun x1, Fun x2)
| (Type x1, Type x2)
| (Extends x1, Extends x2)
| (Constructor x1, Constructor x2)
| (AllMembers x1, AllMembers x2)
| (GConstName x1, GConstName x2) ->
String.compare x1 x2
| (Prop (c1, m1), Prop (c2, m2))
| (SProp (c1, m1), SProp (c2, m2))
| (Method (c1, m1), Method (c2, m2))
| (SMethod (c1, m1), SMethod (c2, m2))
| (Const (c1, m1), Const (c2, m2)) ->
let res = String.compare c1 c2 in
if Int.( <> ) res 0 then
res
else
String.compare m1 m2
| ( _,
( GConst _ | Fun _ | Type _ | Extends _ | Const _ | Constructor _
| Prop _ | SProp _ | Method _ | SMethod _ | AllMembers _ | GConstName _
| Module _ ) ) ->
ordinal_variant v1 - ordinal_variant v2
let dep_kind_of_variant : type a. a variant -> dep_kind = function
| GConst _ -> KGConst
| GConstName _ -> KGConstName
| Const _ -> KConst
| Type _ -> KType
| Fun _ -> KFun
| Prop _ -> KProp
| SProp _ -> KSProp
| Method _ -> KMethod
| SMethod _ -> KSMethod
| Constructor _ -> KConstructor
| AllMembers _ -> KAllMembers
| Extends _ -> KExtends
| Module _ -> KModule
let make_member_dep_from_type_dep : t -> Member.t -> t =
fun type_hash -> function
| Member.Const name -> hash2 (dep_kind_to_enum KConst) type_hash name
| Member.Constructor -> hash2 (dep_kind_to_enum KConstructor) type_hash ""
| Member.Prop name -> hash2 (dep_kind_to_enum KProp) type_hash name
| Member.SProp name -> hash2 (dep_kind_to_enum KSProp) type_hash name
| Member.Method name -> hash2 (dep_kind_to_enum KMethod) type_hash name
| Member.SMethod name -> hash2 (dep_kind_to_enum KSMethod) type_hash name
| Member.All -> hash2 (dep_kind_to_enum KAllMembers) type_hash ""
(* Keep in sync with the tags for `DepType` in `typing_deps_hash.rs`. *)
let rec make : type a. a variant -> t = function
(* Deps on defs *)
| GConst name1 -> hash1 (dep_kind_to_enum KGConst) name1
| Fun name1 -> hash1 (dep_kind_to_enum KFun) name1
| Type name1 -> hash1 (dep_kind_to_enum KType) name1
| Extends name1 -> hash1 (dep_kind_to_enum KExtends) name1
| GConstName name1 -> hash1 (dep_kind_to_enum KGConstName) name1
| Module mname -> hash1 (dep_kind_to_enum KModule) mname
(* Deps on members *)
| Constructor name1 ->
make_member_dep_from_type_dep (make (Type name1)) Member.Constructor
| Const (name1, name2) ->
make_member_dep_from_type_dep (make (Type name1)) (Member.Const name2)
| Prop (name1, name2) ->
make_member_dep_from_type_dep (make (Type name1)) (Member.Prop name2)
| SProp (name1, name2) ->
make_member_dep_from_type_dep (make (Type name1)) (Member.SProp name2)
| Method (name1, name2) ->
make_member_dep_from_type_dep (make (Type name1)) (Member.Method name2)
| SMethod (name1, name2) ->
make_member_dep_from_type_dep (make (Type name1)) (Member.SMethod name2)
| AllMembers name1 ->
make_member_dep_from_type_dep (make (Type name1)) Member.All
let is_class x = x land 1 = 1
let extends_of_class x = x lxor 1
let compare = Int.compare
let extract_name : type a. a variant -> string = function
| GConst s -> Utils.strip_ns s
| GConstName s -> Utils.strip_ns s
| Const (cls, s) -> spf "%s::%s" (Utils.strip_ns cls) s
| Type s -> Utils.strip_ns s
| Fun s -> Utils.strip_ns s
| Prop (cls, s) -> spf "%s::%s" (Utils.strip_ns cls) s
| SProp (cls, s) -> spf "%s::%s" (Utils.strip_ns cls) s
| Method (cls, s) -> spf "%s::%s" (Utils.strip_ns cls) s
| SMethod (cls, s) -> spf "%s::%s" (Utils.strip_ns cls) s
| Constructor s -> Utils.strip_ns s
| AllMembers s -> Utils.strip_ns s
| Extends s -> Utils.strip_ns s
| Module m -> m
let extract_root_name : type a. ?strip_namespace:bool -> a variant -> string =
fun ?(strip_namespace = true) variant ->
match variant with
| GConst s
| GConstName s
| Constructor s
| AllMembers s
| Extends s
| Module s
| Type s
| Fun s
| Prop (s, _)
| SProp (s, _)
| Method (s, _)
| SMethod (s, _)
| Const (s, _) ->
if strip_namespace then
Utils.strip_ns s
else
s
let extract_member_name : type a. a variant -> string option = function
| GConst _
| GConstName _
| Constructor _
| AllMembers _
| Extends _
| Module _
| Type _
| Fun _ ->
None
| Const (_cls, s)
| Prop (_cls, s)
| SProp (_cls, s)
| Method (_cls, s)
| SMethod (_cls, s) ->
Some s
let to_decl_reference : type a. a variant -> Decl_reference.t = function
| Type s -> Decl_reference.Type s
| Const (s, _) -> Decl_reference.Type s
| Extends s -> Decl_reference.Type s
| AllMembers s -> Decl_reference.Type s
| Constructor s -> Decl_reference.Type s
| Prop (s, _) -> Decl_reference.Type s
| SProp (s, _) -> Decl_reference.Type s
| Method (s, _) -> Decl_reference.Type s
| SMethod (s, _) -> Decl_reference.Type s
| GConst s
| GConstName s ->
Decl_reference.GlobalConstant s
| Fun s -> Decl_reference.Function s
| Module m -> Decl_reference.Module m
let to_debug_string = string_of_int
let of_debug_string = int_of_string
let to_hex_string = Printf.sprintf "0x%016x"
let pp fmt dep = Format.fprintf fmt "%s" (to_hex_string dep)
let of_hex_string = int_of_string
let variant_to_string : type a. a variant -> string =
fun dep ->
let prefix =
match dep with
| GConst _ -> "GConst"
| GConstName _ -> "GConstName"
| Const _ -> "Const"
| Type _ -> "Type"
| Fun _ -> "Fun"
| Prop _ -> "Prop"
| SProp _ -> "SProp"
| Method _ -> "Method"
| SMethod _ -> "SMethod"
| Constructor _ -> "Constructor"
| AllMembers _ -> "AllMembers"
| Extends _ -> "Extends"
| Module _ -> "Module"
in
prefix ^ " " ^ extract_name dep
let pp_variant fmt variant =
Format.fprintf fmt "%s" (variant_to_string variant)
end
module DepMap = struct
include WrappedMap.Make (Dep)
let pp pp_data = make_pp Dep.pp pp_data
let show pp_data x = Format.asprintf "%a" (pp pp_data) x
end
(***********************************************)
(* Dependency tracing *)
(***********************************************)
(** Whether or not to trace new dependency edges *)
let trace = ref true
(** List of callbacks, called when discovering dependency edges *)
let dependency_callbacks = Caml.Hashtbl.create 0
let add_dependency_callback ~name cb =
Caml.Hashtbl.replace dependency_callbacks name cb
(** Set of dependencies used for the custom system
The type `t` is an abstract type managed by `typing_deps.rs`.
It is a pointer to an `HashTrieSet<Dep>`, a persistent Rust map *)
module DepSet = struct
type t (* Abstract type *)
type elt = Dep.t
external make : unit -> t = "hh_dep_set_make"
external singleton : elt -> t = "hh_dep_set_singleton"
external add : t -> elt -> t = "hh_dep_set_add"
external union : t -> t -> t = "hh_dep_set_union"
external inter : t -> t -> t = "hh_dep_set_inter"
external diff : t -> t -> t = "hh_dep_set_diff"
external mem : t -> elt -> bool = "hh_dep_set_mem"
external elements : t -> elt list = "hh_dep_set_elements"
external cardinal : t -> int = "hh_dep_set_cardinal"
external is_empty : t -> bool = "hh_dep_set_is_empty"
external of_list : elt list -> t = "hh_dep_set_of_list"
let iter s ~f = List.iter (elements s) ~f
let fold : 'a. t -> init:'a -> f:(elt -> 'a -> 'a) -> 'a =
fun s ~init ~f ->
let l = elements s in
List.fold l ~init ~f:(fun x acc -> f acc x)
let pp fmt s =
let open Format in
pp_print_string fmt "{ ";
iter s ~f:(fun x ->
let str = Printf.sprintf "%x; " x in
pp_print_string fmt str);
pp_print_string fmt "}"
let show s = Format.asprintf "%a" pp s
end
module DepHashKey = struct
type t = Dep.t
let compare = Int.compare
let to_string t = string_of_int t
end
module VisitedSet = struct
type t (* abstract type managed by Rust, RefCell<BTreeSet<Dep>> *)
external hh_visited_set_make : unit -> t = "hh_visited_set_make"
let make () : t = hh_visited_set_make ()
end
(** Graph management in the new system with custom file format. *)
module CustomGraph = struct
external hh_custom_dep_graph_register_custom_types : unit -> unit
= "hh_custom_dep_graph_register_custom_types"
external assert_master : unit -> unit = "hh_assert_master"
let allow_reads_ref = ref false
let allow_dependency_table_reads flag =
assert_master ();
let prev = !allow_reads_ref in
allow_reads_ref := flag;
prev
external hh_custom_dep_graph_replace : Mode.t -> unit
= "hh_custom_dep_graph_replace"
[@@noalloc]
external hh_custom_dep_graph_has_edge : Mode.t -> Dep.t -> Dep.t -> bool
= "hh_custom_dep_graph_has_edge"
[@@noalloc]
external get_ideps_from_hash : Mode.t -> Dep.t -> DepSet.t
= "hh_custom_dep_graph_get_ideps_from_hash"
external add_typing_deps : Mode.t -> DepSet.t -> DepSet.t
= "hh_custom_dep_graph_add_typing_deps"
external add_extend_deps : Mode.t -> DepSet.t -> DepSet.t
= "hh_custom_dep_graph_add_extend_deps"
external get_extend_deps :
Mode.t -> VisitedSet.t -> Dep.t -> DepSet.t -> DepSet.t
= "hh_custom_dep_graph_get_extend_deps"
external register_discovered_dep_edge : Dep.t -> Dep.t -> unit
= "hh_custom_dep_graph_register_discovered_dep_edge"
[@@noalloc]
external dep_graph_delta_num_edges : unit -> int
= "hh_custom_dep_graph_dep_graph_delta_num_edges"
[@@noalloc]
external save_delta : string -> bool -> int = "hh_custom_dep_graph_save_delta"
external load_delta : Mode.t -> string -> int
= "hh_custom_dep_graph_load_delta"
let add_all_deps mode x = x |> add_extend_deps mode |> add_typing_deps mode
type dep_edge = {
idependent: Dep.t; (** The node depending on the dependency *)
idependency: Dep.t; (** The node the dependent depends upon *)
}
module DepEdgeSet = Caml.Set.Make (struct
type t = dep_edge
let compare x y =
let d1 = Int.compare x.idependent y.idependent in
if d1 = 0 then
Int.compare x.idependency y.idependency
else
d1
end)
(** A batch of discovered dependency edges, of which some might
already be in the dependency graph! *)
let discovered_deps_batch : (dep_edge, unit) Hashtbl.t =
(* There isn't really any reason why I choose Hashtbl over Set here. *)
Hashtbl.create 1000
(** A batch of dependency edges that are not yet in the dependency graph. *)
let filtered_deps_batch : DepEdgeSet.t ref =
(* We use a Set, because a Hashtbl is way too expensive to serialize/
deserialize in OCaml. *)
ref DepEdgeSet.empty
(** Filter out the discovered dep edges which are already in the dep graph.
Get [!filtered_deps_batch] to obtain the result. *)
let filter_discovered_deps_batch mode =
(* Empty discovered_deps_bach by checking for each edge whether it's already
* in the dependency graph. If it is not, add it to the filtered deps batch. *)
let s = !filtered_deps_batch in
let s =
Hashtbl.fold
begin
fun ({ idependent; idependency } as edge) () s ->
if not (hh_custom_dep_graph_has_edge mode idependent idependency)
then
DepEdgeSet.add edge s
else
s
end
discovered_deps_batch
s
in
filtered_deps_batch := s;
Hashtbl.clear discovered_deps_batch
let register_discovered_dep_edges : DepEdgeSet.t -> unit =
fun s ->
assert_master ();
DepEdgeSet.iter
begin
fun { idependent; idependency } ->
register_discovered_dep_edge idependent idependency
end
s
let add_idep mode dependent dependency =
let idependent = Dep.make dependent in
let idependency = Dep.make dependency in
if idependent = idependency then
()
else (
Caml.Hashtbl.iter (fun _ f -> f dependent dependency) dependency_callbacks;
if !trace then begin
Hashtbl.replace discovered_deps_batch { idependent; idependency } ();
if Hashtbl.length discovered_deps_batch >= 1000 then
filter_discovered_deps_batch mode
end
)
let dump_current_edge_buffer ?deps_to_symbol_map () =
let hash_to_string dep =
match deps_to_symbol_map with
| None -> Dep.to_hex_string dep
| Some map ->
(match DepMap.find_opt dep map with
| None -> Dep.to_hex_string dep
| Some symbol -> Dep.variant_to_string symbol)
in
Hashtbl.iter
(fun { idependent; idependency } () ->
Printf.printf
"%s -> %s\n"
(hash_to_string idependency)
(hash_to_string idependent))
discovered_deps_batch
end
module SaveHumanReadableDepMap : sig
(** Add a dep to the current set of human readable deps. *)
val add : Typing_deps_mode.t -> 'a Dep.variant * int -> unit
(** Take the current set of human readable deps and writes them to disk.
Reset the set of human readable deps to be empty.
If [flush], flush the channel after the write. *)
val export_to_disk : ?flush:bool -> Typing_deps_mode.t -> unit
end = struct
let should_save mode =
match mode with
| SaveToDiskMode { human_readable_dep_map_dir = Some _; _ }
| HhFanoutRustMode { human_readable_dep_map_dir = Some _; _ } ->
true
| _ -> false
let human_readable_dep_map_channel_ref : Out_channel.t option ref = ref None
let human_readable_dep_map_channel mode =
match !human_readable_dep_map_channel_ref with
| None ->
let directory =
match mode with
| SaveToDiskMode { human_readable_dep_map_dir = Some d; _ }
| HhFanoutRustMode { human_readable_dep_map_dir = Some d; _ } ->
d
| _ -> failwith "programming error: no human_readable_dep_map_dir"
in
let () =
if (not (Sys.file_exists directory)) || not (Sys.is_directory directory)
then
Sys_utils.mkdir_p directory
in
let worker_id = Option.value_exn !worker_id in
(* To avoid multiple processes interleaving writes to the same file, rely on
* unique process id's to have each process write to separate files.
* Use ~append:true to have each new process write to the previously created logs
* if they happened to share the same process id.
*)
let filepath =
Filename.concat
directory
(Printf.sprintf "human-readable-dep-map-%d.txt" worker_id)
in
let handle = Out_channel.create ~append:true ~perm:0o600 filepath in
let () = human_readable_dep_map_channel_ref := Some handle in
handle
| Some h -> h
let set_max_size = 20000
(** The set of the seen (hashcode, dependency name)s, per worker.
Use the name as well as the hashcode in case of hashcode conflicts. *)
let seen_set_ref : (int * string, unit) Hashtbl.t option ref = ref None
let seen_set () =
match !seen_set_ref with
| None ->
let tbl = Hashtbl.create set_max_size in
let () = seen_set_ref := Some tbl in
tbl
| Some tbl -> tbl
(** Take the current set of human readable deps and writes them to disk.
Reset the set of human readable deps to be empty.
If [flush], flush the channel after the write. *)
let export_to_disk ?(flush = false) mode =
if should_save mode then
let ss = seen_set () in
let out_channel = human_readable_dep_map_channel mode in
let () =
Hashtbl.iter
(fun (hash, name) () ->
Printf.fprintf out_channel "%u %s\n" hash name)
ss
in
let () = Hashtbl.reset ss in
if flush then Out_channel.flush out_channel
(** Add a dep to the current set of human readable deps. *)
let add mode (dep, hash) =
if should_save mode then
let ss = seen_set () in
let name = Dep.variant_to_string dep in
if Hashtbl.mem ss (hash, name) then
()
else
let () = Hashtbl.add ss (hash, name) () in
if Hashtbl.length ss >= set_max_size then export_to_disk mode
end
module SaveCustomGraph : sig
val add_idep :
Mode.t -> Dep.dependent Dep.variant -> Dep.dependency Dep.variant -> unit
(** Write to disk the dep edges which are not already in the depgraph. *)
val filter_discovered_deps_batch : flush:bool -> Mode.t -> unit
(** Move the source file to the worker's depgraph directory. *)
val save_delta : Typing_deps_mode.t -> source:string -> int
end = struct
(** [hh_save_custom_dep_graph_save_delta src dest_dir]
moves the [src] file to the [dest_dir] directory. *)
external hh_save_custom_dep_graph_save_delta : string -> string -> int
= "hh_save_custom_dep_graph_save_delta"
let discovered_deps_batch : (CustomGraph.dep_edge, unit) Hashtbl.t =
Hashtbl.create 1000
let destination_file_handle_ref : Out_channel.t option ref = ref None
let destination_filepath mode =
match mode with
| SaveToDiskMode { new_edges_dir; _ } ->
let worker_id = Base.Option.value_exn !worker_id in
Filename.concat
new_edges_dir
(Printf.sprintf "new-edges-worker-%d.bin" worker_id)
| _ -> failwith "programming error: wrong mode"
let destination_file_handle mode =
match !destination_file_handle_ref with
| Some handle -> handle
| None ->
let filepath = destination_filepath mode in
let handle =
Out_channel.create ~binary:true ~append:true ~perm:0o600 filepath
in
destination_file_handle_ref := Some handle;
handle
let destination_dir mode =
match mode with
| SaveToDiskMode { new_edges_dir; _ } -> new_edges_dir
| _ -> failwith "programming error: wrong mode"
(** Write to disk the dep edges which are not already in the depgraph. *)
let filter_discovered_deps_batch ~flush mode =
let handle = destination_file_handle mode in
Hashtbl.iter
begin
fun CustomGraph.{ idependent; idependency } () ->
if
not
(CustomGraph.hh_custom_dep_graph_has_edge
mode
idependent
idependency)
then begin
(* To be kept in sync with typing_deps.rs::hh_custom_dep_graph_save_delta! *)
(* Write dependency. *)
for i = 0 to 6 do
Out_channel.output_byte handle (idependency lsr (i * 8))
done;
(* Set a tag bit to indicate this is a dependency *)
Out_channel.output_byte handle ((idependency lsr 56) + 128);
(* Write dependent. *)
for i = 0 to 7 do
Out_channel.output_byte handle (idependent lsr (i * 8))
done
end
end
discovered_deps_batch;
if flush then Out_channel.flush handle;
Hashtbl.clear discovered_deps_batch
let add_idep mode dependent dependency =
let idependent = Dep.make dependent in
let idependency = Dep.make dependency in
if idependent = idependency then
()
else (
Caml.Hashtbl.iter (fun _ f -> f dependent dependency) dependency_callbacks;
if !trace then begin
Hashtbl.replace
discovered_deps_batch
CustomGraph.{ idependent; idependency }
();
if Hashtbl.length discovered_deps_batch >= 1000 then
filter_discovered_deps_batch ~flush:false mode
end;
let () = SaveHumanReadableDepMap.add mode (dependent, idependent) in
SaveHumanReadableDepMap.add mode (dependency, idependency)
)
(** Move the source file to the worker's depgraph directory. *)
let save_delta mode ~source =
let dest = destination_dir mode in
hh_save_custom_dep_graph_save_delta source dest
end
module HhFanout : sig
val add_idep :
Mode.t -> Dep.dependent Dep.variant -> Dep.dependency Dep.variant -> unit
val flush_edges : Hh_fanout_rust_ffi_externs.hh_fanout_rust_ffi -> unit
end = struct
(* The list is of (dependency, dependent). This was moved from `hh_fanout_rust_ffi_externs` so that it could have access to `Dep.t` *)
external commit_edges :
Hh_fanout_rust_ffi_externs.hh_fanout_rust_ffi ->
(Dep.t * Dep.t) list ->
unit = "hh_fanout_ffi_add_idep_batch"
let discovered_deps_batch : (CustomGraph.dep_edge, unit) Hashtbl.t =
Hashtbl.create 1000
let flush_edges hh_fanout_ffi =
let edges =
Hashtbl.fold
begin
fun CustomGraph.{ idependent; idependency } () acc ->
(idependency, idependent) :: acc
end
discovered_deps_batch
[]
in
commit_edges hh_fanout_ffi edges;
Hashtbl.clear discovered_deps_batch
let add_idep mode dependent dependency =
let hh_fanout_ffi =
(* TODO(toyang): ideally, this function would only take hh_fanout_ffi
instead of doing this match. For now, we keep this consistent with the
other `add_idep`s. *)
match mode with
| HhFanoutRustMode { hh_fanout; _ } -> hh_fanout
| _ -> failwith "programming error: wrong mode"
in
let idependent = Dep.make dependent in
let idependency = Dep.make dependency in
if idependent = idependency then
()
else (
Caml.Hashtbl.iter (fun _ f -> f dependent dependency) dependency_callbacks;
if !trace then begin
Hashtbl.replace
discovered_deps_batch
CustomGraph.{ idependent; idependency }
();
if Hashtbl.length discovered_deps_batch >= 1000 then
flush_edges hh_fanout_ffi
end;
SaveHumanReadableDepMap.add mode (dependent, idependent);
SaveHumanReadableDepMap.add mode (dependency, idependency)
)
end
(** Registers Rust custom types with the OCaml runtime, supporting deserialization *)
let () = CustomGraph.hh_custom_dep_graph_register_custom_types ()
let deps_of_file_info (file_info : FileInfo.t) : Dep.t list =
let {
FileInfo.funs;
classes;
typedefs;
consts;
modules;
comments = _;
file_mode = _;
hash = _;
} =
file_info
in
let defs =
List.fold_left
consts
~f:
begin
(fun acc (_, const_id, _) -> Dep.make (Dep.GConst const_id) :: acc)
end
~init:[]
in
let defs =
List.fold_left
funs
~f:
begin
(fun acc (_, fun_id, _) -> Dep.make (Dep.Fun fun_id) :: acc)
end
~init:defs
in
let defs =
List.fold_left
classes
~f:
begin
(fun acc (_, class_id, _) -> Dep.make (Dep.Type class_id) :: acc)
end
~init:defs
in
let defs =
List.fold_left
typedefs
~f:
begin
(fun acc (_, type_id, _) -> Dep.make (Dep.Type type_id) :: acc)
end
~init:defs
in
let defs =
List.fold_left
modules
~f:
begin
(fun acc (_, type_id, _) -> Dep.make (Dep.Module type_id) :: acc)
end
~init:defs
in
defs
module Telemetry = struct
let depgraph_delta_num_edges mode =
match mode with
| InMemoryMode _ -> Some (CustomGraph.dep_graph_delta_num_edges ())
| SaveToDiskMode _ -> None
| HhFanoutRustMode _ -> None
end
type dep_edge = CustomGraph.dep_edge
type dep_edges = CustomGraph.DepEdgeSet.t option
(** As part of few optimizations (prechecked files, interruptible typechecking), we
allow the dependency table to get out of date (in order to be able to prioritize
other work, like reporting errors in currently open file). This flag is there
to avoid accidental reads of this stale data - anyone attempting to do so would
either acknowledge that they don't care about accuracy (by setting this flag
themselves), or plug in to a hh_server mechanic that will delay executing such
command until dependency table is back up to date.
*)
let allow_dependency_table_reads mode flag =
match mode with
| InMemoryMode _
| SaveToDiskMode _ ->
CustomGraph.allow_dependency_table_reads flag
(* TODO(toyang): I don't think the re-architecture will have similar staleness issues. *)
| HhFanoutRustMode _ -> true
let add_idep mode dependent dependency =
match mode with
| InMemoryMode _ -> CustomGraph.add_idep mode dependent dependency
| SaveToDiskMode _ -> SaveCustomGraph.add_idep mode dependent dependency
| HhFanoutRustMode _ -> HhFanout.add_idep mode dependent dependency
let replace mode =
match mode with
| InMemoryMode _ -> CustomGraph.hh_custom_dep_graph_replace mode
| _ -> ()
let dep_edges_make () : dep_edges = Some CustomGraph.DepEdgeSet.empty
(** Depending on [mode], either return discovered edges
which are not already in the dep graph
or write those edges to disk. *)
let flush_ideps_batch mode : dep_edges =
match mode with
| InMemoryMode _ ->
(* Make sure we don't miss any dependencies! *)
CustomGraph.filter_discovered_deps_batch mode;
let old_batch = !CustomGraph.filtered_deps_batch in
CustomGraph.filtered_deps_batch := CustomGraph.DepEdgeSet.empty;
Some old_batch
| SaveToDiskMode _ ->
SaveCustomGraph.filter_discovered_deps_batch ~flush:true mode;
SaveHumanReadableDepMap.export_to_disk ~flush:true mode;
None
(* This function is used by *)
| HhFanoutRustMode _ -> failwith "HhFanoutRustMode not supported"
let hh_fanout_flush_ideps mode : unit =
match mode with
| HhFanoutRustMode { hh_fanout; _ } ->
HhFanout.flush_edges hh_fanout;
SaveHumanReadableDepMap.export_to_disk ~flush:true mode
| _ -> failwith "should only be called in HhFanoutRustMode"
let merge_dep_edges (x : dep_edges) (y : dep_edges) : dep_edges =
match (x, y) with
| (Some x, Some y) -> Some (CustomGraph.DepEdgeSet.union x y)
| _ -> None
(** Register the provided dep edges in the dep table delta in [typing_deps.rs] *)
let register_discovered_dep_edges : dep_edges -> unit = function
| None -> ()
| Some batch -> CustomGraph.register_discovered_dep_edges batch
let save_discovered_edges mode ~dest ~reset_state_after_saving =
match mode with
| InMemoryMode _ -> CustomGraph.save_delta dest reset_state_after_saving
| SaveToDiskMode _ ->
failwith "save_discovered_edges not supported for SaveToDiskMode"
| HhFanoutRustMode _ ->
failwith "save_discovered_edges not supported for HhFanoutRustMode"
let load_discovered_edges mode source =
match mode with
| InMemoryMode _ -> CustomGraph.load_delta mode source
| SaveToDiskMode _ -> SaveCustomGraph.save_delta mode ~source
| HhFanoutRustMode _ ->
failwith "load_discovered_edges not supported for HhFanoutRustMode"
let get_ideps_from_hash mode hash =
match mode with
| InMemoryMode _
| SaveToDiskMode _ ->
CustomGraph.get_ideps_from_hash mode hash
| HhFanoutRustMode _ ->
failwith "get_ideps_from_hash not supported for HhFanoutRustMode"
let get_ideps mode dependency = get_ideps_from_hash mode (Dep.make dependency)
let get_extend_deps ~mode ~visited ~source_class ~acc =
CustomGraph.get_extend_deps mode visited source_class acc
let add_extend_deps mode acc = CustomGraph.add_extend_deps mode acc
let add_typing_deps mode acc = CustomGraph.add_typing_deps mode acc
let add_all_deps mode acc = CustomGraph.add_all_deps mode acc
let dump_current_edge_buffer_in_memory_mode =
CustomGraph.dump_current_edge_buffer |
OCaml Interface | hhvm/hphp/hack/src/deps/typing_deps.mli | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
module Mode = Typing_deps_mode
module Dep : sig
(** A node in the dependency graph that must be rechecked when its dependencies change. *)
type dependent
(** A node in the dependency graph that, when changed, must recheck all of its dependents. *)
type dependency
(** A type of dependency.
An ['a variant] can be either the dependent or the dependency. For example,
a function body can itself depend on a [Fun] (when it must be rechecked if
the other function changed).
A [dependency variant] can only be a dependency. Other symbols can take on
this kind of dependency on something, but this kind of thing can't take a
dependency on other symbols. For example, an "extends" is not a symbol in
the code, so [Extends] cannot take on dependencies on other things. *)
type _ variant =
| GConst : string -> 'a variant
(** Represents a global constant depending on something, or something
depending on a global constant. *)
| Fun : string -> 'a variant
(** Represents either a global function depending on something, or
something depending on a global function. *)
| Type : string -> 'a variant
(** Represents either a class/typedef/recorddef/trait/interface depending on something,
or something depending on one. *)
| Extends : string -> dependency variant
(** Represents another class depending on a class via an
inheritance-like mechanism (`extends`, `implements`, `use`, `require
extends`, `require implements`, etc.) *)
| Const : string * string -> dependency variant
(** Represents something depending on a class constant. *)
| Constructor : string -> dependency variant
(** Represents something depending on a class constructor. *)
| Prop : string * string -> dependency variant
(** Represents something depending on a class's instance property. *)
| SProp : string * string -> dependency variant
(** Represents something depending on a class's static property. *)
| Method : string * string -> dependency variant
(** Represents something depending on a class's instance method. *)
| SMethod : string * string -> dependency variant
(** Represents something depending on a class's static method. *)
| AllMembers : string -> dependency variant
(** Represents something depending on all members of a class.
Particularly useful for switch exhaustiveness-checking. We establish
a dependency on all members of an enum in that case. *)
| GConstName : string -> 'a variant
(** Like [GConst], but used only in conservative redecl. May not be
necessary anymore. *)
| Module : string -> 'a variant
(** Represents a toplevel symbol being defined as a member of
this module *)
val dependency_of_variant : 'a variant -> dependency variant
type dep_kind =
| KGConst
| KFun
| KType
| KExtends
| KConst
| KConstructor
| KProp
| KSProp
| KMethod
| KSMethod
| KAllMembers
| KGConstName
| KModule
[@@deriving enum]
val dep_kind_of_variant : 'a variant -> dep_kind
module Member : sig
type t
val method_ : string -> t
val smethod : string -> t
val prop : string -> t
val sprop : string -> t
val constructor : t
val const : string -> t
val all : t
end
(** A 63bit hash *)
type t
val make : 'a variant -> t
val make_member_dep_from_type_dep : t -> Member.t -> t
val compare_variant : 'a variant -> 'a variant -> int
(** A 64bit representation of the 63bit hash. *)
val to_int64 : t -> int64
val to_int : t -> int
val is_class : t -> bool
val extends_of_class : t -> t
val compare : t -> t -> int
val extract_name : 'a variant -> string
val extract_root_name : ?strip_namespace:bool -> 'a variant -> string
val extract_member_name : 'a variant -> string option
val to_decl_reference : 'a variant -> Decl_reference.t
val to_debug_string : t -> string
val of_debug_string : string -> t
val to_hex_string : t -> string
val of_hex_string : string -> t
val variant_to_string : 'a variant -> string
val pp_variant : Format.formatter -> 'a variant -> unit
end
module DepHashKey : sig
type t = Dep.t
val compare : t -> t -> int
val to_string : t -> string
end
module DepSet : sig
type t [@@deriving show]
type elt = Dep.t
val make : unit -> t
val singleton : elt -> t
val add : t -> elt -> t
val union : t -> t -> t
val inter : t -> t -> t
val diff : t -> t -> t
val iter : t -> f:(elt -> unit) -> unit
val fold : t -> init:'a -> f:(elt -> 'a -> 'a) -> 'a
val mem : t -> elt -> bool
val elements : t -> elt list
val cardinal : t -> int
val is_empty : t -> bool
val of_list : elt list -> t
end
module DepMap : sig
include WrappedMap_sig.S with type key = Dep.t
val pp : (Format.formatter -> 'a -> unit) -> Format.formatter -> 'a t -> unit
val show : (Format.formatter -> 'a -> unit) -> 'a t -> string
end
module VisitedSet : sig
type t
val make : unit -> t
end
val deps_of_file_info : FileInfo.t -> Dep.t list
type dep_edge
type dep_edges
val worker_id : int option ref
val trace : bool ref
val add_dependency_callback :
name:string ->
(Dep.dependent Dep.variant -> Dep.dependency Dep.variant -> unit) ->
unit
(** Return the previous value of the flag *)
val allow_dependency_table_reads : Mode.t -> bool -> bool
val add_idep :
Mode.t -> Dep.dependent Dep.variant -> Dep.dependency Dep.variant -> unit
val replace : Mode.t -> unit
val dep_edges_make : unit -> dep_edges
(** Depending on [mode], either return discovered edges
which are not already in the dep graph
or write those edges to disk. *)
val flush_ideps_batch : Mode.t -> dep_edges
(** Re-architecture-specific (should only be used with
`Typing_deps_mode.HhFanoutRustMode`). Flush remaining buffered discovered
edges by committing them via hh_fanout. Also flushes human readable dep maps
to disk, if provided.
*)
val hh_fanout_flush_ideps : Mode.t -> unit
val merge_dep_edges : dep_edges -> dep_edges -> dep_edges
(** Register the provided dep edges in the dep table delta in [typing_deps.rs] *)
val register_discovered_dep_edges : dep_edges -> unit
(** Save discovered edges to a binary file.
- If mode is [InMemoryMode], the dep table delta in [typing_deps.rs] is saved.
- If mode is [SaveToDiskMode], an exception is raised.
Setting [reset_state_after_saving] will empty the dep table delta in
[typing_deps.rs]. *)
val save_discovered_edges :
Mode.t -> dest:string -> reset_state_after_saving:bool -> int
(** Load discovered edges from a binary file.
- If mode is [InMemoryMode], the binary file is assumed to contain 64-bit
hashes and they will be added to the dep table delta in [typing_deps.rs].
If we have an existing table attached, we will first filter out edges
that are already present in the attached table.
- If mode is [SaveToDiskMode], the file is assumed to contain 64-bit
hashes and they will be added ot the current worker's on-disk
dependency edge file. *)
val load_discovered_edges : Mode.t -> string -> int
val get_ideps_from_hash : Mode.t -> Dep.t -> DepSet.t
val get_ideps : Mode.t -> Dep.dependency Dep.variant -> DepSet.t
(** Add to accumulator all extend dependencies of source_class. Visited is used
to avoid processing nodes reachable in multiple ways more than once. In other
words: use DFS to find all nodes reachable by "extends" edges starting from
source class *)
val get_extend_deps :
mode:Mode.t ->
visited:VisitedSet.t ->
source_class:Dep.t ->
acc:DepSet.t ->
DepSet.t
(** Grow input set by adding all its extend dependencies (including recursive) *)
val add_extend_deps : Mode.t -> DepSet.t -> DepSet.t
(** Grow input set by adding all its typing dependencies (direct only) *)
val add_typing_deps : Mode.t -> DepSet.t -> DepSet.t
(** add_extend_deps and add_typing_deps chained together *)
val add_all_deps : Mode.t -> DepSet.t -> DepSet.t
module Telemetry : sig
val depgraph_delta_num_edges : Mode.t -> int option
end
val dump_current_edge_buffer_in_memory_mode :
?deps_to_symbol_map:'a Dep.variant DepMap.t -> unit -> unit |
Rust | hhvm/hphp/hack/src/deps/typing_deps_hash.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::hash::Hasher;
use fnv::FnvHasher;
/// Variant types used in the depgraph table.
///
/// NOTE: Keep in sync with the order of the fields in `Typing_deps.ml`.
#[derive(Copy, Clone, Debug, PartialEq, Eq, strum::EnumString)]
#[repr(u8)]
pub enum DepType {
GConst = 0,
Fun = 1,
Type = 2,
Extends = 3,
Const = 5,
Constructor = 6,
Prop = 7,
SProp = 8,
Method = 9,
SMethod = 10,
AllMembers = 11,
GConstName = 12,
Module = 13,
}
impl DepType {
pub fn as_u8(self) -> u8 {
self as u8
}
pub fn from_u8(tag: u8) -> Option<Self> {
match tag {
0 => Some(DepType::GConst),
1 => Some(DepType::Fun),
2 => Some(DepType::Type),
3 => Some(DepType::Extends),
5 => Some(DepType::Const),
6 => Some(DepType::Constructor),
7 => Some(DepType::Prop),
8 => Some(DepType::SProp),
9 => Some(DepType::Method),
10 => Some(DepType::SMethod),
11 => Some(DepType::AllMembers),
12 => Some(DepType::GConstName),
13 => Some(DepType::Module),
_ => None,
}
}
}
/// Select the hashing algorithm to use for dependency hashes.
///
/// FnvHasher appears to produce better hashes (fewer collisions) than
/// `std::collections::hash_map::DefaultHasher` on our workloads. However, other
/// hashing algorithms may perform better still.
fn make_hasher() -> FnvHasher {
Default::default()
}
fn postprocess_hash(dep_type: DepType, hash: u64) -> u64 {
let hash: u64 = match dep_type {
DepType::Type => {
// For class dependencies, set the lowest bit to 1. For extends
// dependencies, the lowest bit will be 0 (in the case below), so we'll
// be able to convert from a class hash to its extends hash without
// reversing the hash.
(hash << 1) | 1
}
_ => {
// Ensure that only classes have the lowest bit set to 1, so that we
// don't try to transitively traverse the subclasses of non-class
// dependencies.
hash << 1
}
};
hash & !(1 << 63)
}
fn get_dep_type_hash_key(dep_type: DepType) -> u8 {
match dep_type {
DepType::Type | DepType::Extends => {
// Use the same tag for classes and extends dependencies, so that we can
// convert between them without reversing the hash.
DepType::Type as u8
}
_ => dep_type as u8,
}
}
/// Hash a one-argument `Typing_deps.Dep.variant`'s fields.
pub fn hash1(dep_type: DepType, name1: &[u8]) -> u64 {
let mut hasher = make_hasher();
hasher.write_u8(get_dep_type_hash_key(dep_type));
hasher.write(name1);
postprocess_hash(dep_type, hasher.finish())
}
/// Hash a two-argument `Typing_deps.Dep.variant`'s fields.
pub fn hash2(dep_type: DepType, type_hash: u64, name2: &[u8]) -> u64 {
let mut hasher = make_hasher();
hasher.write_u8(get_dep_type_hash_key(dep_type));
hasher.write_u64(type_hash);
hasher.write(name2);
postprocess_hash(dep_type, hasher.finish())
} |
OCaml | hhvm/hphp/hack/src/deps/typing_deps_mode.ml | (*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
(* CAUTION: This type must be kept in sync with typing_deps.rs *)
(** Which dependency graph format are we using? *)
type t =
| InMemoryMode of string option
(** Keep track of newly discovered edges in an in-memory delta.
*
* Optionally, the in-memory delta is backed by a pre-computed
* dependency graph stored using a custom file format.
*)
| SaveToDiskMode of {
graph: string option;
new_edges_dir: string;
human_readable_dep_map_dir: string option;
}
(** Mode that writes newly discovered edges to binary files on disk
* (one file per disk). Those binary files can then be post-processed
* using a tool of choice.
*
* The first parameter is (optionally) a path to an existing custom 64-bit
* dependency graph. If it is present, only new edges will be written,
* of not, all edges will be written. *)
| HhFanoutRustMode of {
hh_fanout: Hh_fanout_rust_ffi_externs.hh_fanout_rust_ffi;
human_readable_dep_map_dir: string option;
} (** Mode that keeps track of edges via hh_fanout's Rust API **)
let to_opaque_json (t : t) : Hh_json.json =
let open Hh_json in
let opaque opt = Option.map (fun _ -> "<opaque>") opt in
match t with
| InMemoryMode base ->
JSON_Object
[
("mode", string_ "InMemoryMode");
("props", JSON_Object [("base", opt_string_to_json (opaque base))]);
]
| SaveToDiskMode { graph; new_edges_dir = _; human_readable_dep_map_dir } ->
JSON_Object
[
("mode", string_ "SaveToDiskMode");
( "props",
JSON_Object
[
("graph", opt_string_to_json (opaque graph));
( "human_readable_dep_map_dir",
opt_string_to_json (opaque human_readable_dep_map_dir) );
] );
]
| HhFanoutRustMode _ -> failwith "TODO"
[@@deriving show] |
OCaml | hhvm/hphp/hack/src/deps/typing_pessimisation_deps.ml | (*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
open Hh_prelude
type dependent_member =
| Constructor
| Method of string
| SMethod of string
type dependency = Typing_deps.Dep.dependency Typing_deps.Dep.variant
type coarse_dependent = Typing_deps.Dep.dependent Typing_deps.Dep.variant
(** [Typing_deps.Dep.dependency Typing_deps.Dep.variant] contains additionally
* constructors that we don't need here.
* Also, the fact that we use it for dependents rather than dependencies may
* be confusing. But this type is not part of the module's interface *)
type fine_dependent = Typing_deps.Dep.dependency Typing_deps.Dep.variant
(** For debugging: When enabled, should record exactly the same dependencies as
* the original/coarse grained dependency tracking implemented in
* [Typing_deps] *)
let record_coarse_only = false
type cache_set_entry = {
dependency: dependency;
is_override: bool;
}
type cache = (fine_dependent, cache_set_entry Hash_set.t) Hashtbl.t
module SQLitePersistence : sig
val worker_file_name_glob : string
val persist_cache : cache -> Typing_deps_mode.t -> unit
val close_db : unit -> unit
end = struct
module SU = Sqlite_utils
module S = Sqlite3
(** Contains the database connection once opened and a "statement cache",
* which stores compiled versions of SQL queries for later re-use *)
let db_stmt_cache : SU.StatementCache.t option ref = ref None
let worker_file_name_glob = "finedeps-*.sqlite3"
let target_folder_from_mode = function
| Typing_deps_mode.SaveToDiskMode { new_edges_dir; _ } -> new_edges_dir
| _ -> failwith "Can only record fine dependencies in SaveToDiskMode"
let worker_file_path mode =
let folder = target_folder_from_mode mode in
let worker_id = Option.value ~default:0 !Typing_deps.worker_id in
let file_name =
String.substr_replace_first
worker_file_name_glob
~pattern:"*"
~with_:(Int.to_string worker_id)
in
Path.concat (Path.make folder) file_name |> Path.to_string
let exec_and_check db sql = S.exec db sql |> SU.check_rc db
let bind_and_exec stmt_cache stmt data =
let open SU.StatementCache in
S.bind_values stmt data |> SU.check_rc stmt_cache.db;
S.step stmt |> SU.check_rc stmt_cache.db
let open_db path =
let db = S.db_open path in
exec_and_check db "PRAGMA synchronous = OFF;";
exec_and_check db "PRAGMA journal_mode = MEMORY;";
db
let close_db () =
Option.iter !db_stmt_cache ~f:(fun stmt_cache ->
SU.StatementCache.close stmt_cache;
S.db_close stmt_cache.SU.StatementCache.db |> ignore;
db_stmt_cache := None)
let create_tables db =
let create_nodes_table =
"CREATE TABLE IF NOT EXISTS nodes(
root TEXT NOT NULL,
kind INTEGER NOT NULL,
member TEXT,
hash INTEGER NOT NULL
);"
in
let create_dependencies_table =
"CREATE TABLE IF NOT EXISTS dependencies(
dependent_hash INTEGER NOT NULL,
is_override INTEGER NOT NULL,
dependency_hash INTEGER NOT NULL
);"
in
exec_and_check db create_nodes_table;
exec_and_check db create_dependencies_table
let get_stmt_cache mode =
let path = worker_file_path mode in
match !db_stmt_cache with
| None ->
let new_db = open_db path in
create_tables new_db;
let new_stmt_cache = SU.StatementCache.make ~db:new_db in
db_stmt_cache := Some new_stmt_cache;
new_stmt_cache
| Some stmt_cache -> stmt_cache
let write_node stmt_cache data hash =
let (root, member_opt, kind_id) = data in
let sql =
"INSERT INTO nodes(root, kind, member, hash) VALUES (?, ?, ?, ?)"
in
let stmt = SU.StatementCache.make_stmt stmt_cache sql in
let data =
let open SU.Data_shorthands in
[text root; int kind_id; opt_text member_opt; int hash]
in
bind_and_exec stmt_cache stmt data
let write_dependency_edge
stmt_cache is_override dependent_hash dependency_hash =
let sql =
"INSERT INTO dependencies(dependent_hash, is_override, dependency_hash) VALUES (?, ?, ?)"
in
let stmt = SU.StatementCache.make_stmt stmt_cache sql in
let data =
let open SU.Data_shorthands in
[int dependent_hash; bool is_override; int dependency_hash]
in
bind_and_exec stmt_cache stmt data
let persist_cache (cache : cache) mode =
let db = get_stmt_cache mode in
let all_nodes = Hash_set.Poly.create () in
let data_of_dep dep =
let open Typing_deps.Dep in
( extract_root_name ~strip_namespace:false dep,
extract_member_name dep,
dep_kind_of_variant dep |> dep_kind_to_enum )
in
let hash_of_dep dep = Typing_deps.Dep.make dep |> Typing_deps.Dep.to_int in
let handle_dependency is_override dependent_hash dependency =
let dependency_hash = hash_of_dep dependency in
write_dependency_edge db is_override dependent_hash dependency_hash
in
let handle_dependent fine_dependent dependency_entries =
let dependent_hash = hash_of_dep fine_dependent in
Hash_set.add all_nodes fine_dependent;
Hash_set.iter dependency_entries ~f:(fun { dependency; is_override } ->
Hash_set.add all_nodes dependency;
handle_dependency is_override dependent_hash dependency)
in
Hashtbl.iteri cache ~f:(fun ~key ~data -> handle_dependent key data);
Hash_set.iter all_nodes ~f:(fun dep ->
write_node db (data_of_dep dep) (hash_of_dep dep))
end
module Backend = struct
let cache = ref None
(* Let's (conservatively) assume each entry is 100 bytes, and we want to
* allow 10MB of cache (per worker) *)
let cache_max_size = 10_000_000 / 100
let cache_used_size = ref 0
let make_cache () = Hashtbl.Poly.create ()
let make_set () = Hash_set.Poly.create ()
let get_cache () =
match !cache with
| None ->
let new_cache = make_cache () in
cache := Some new_cache;
new_cache
| Some cache -> cache
let flush_cache mode =
let cache = get_cache () in
SQLitePersistence.persist_cache cache mode;
Hashtbl.clear cache;
cache_used_size := 0
let add_dependency_edge ~is_override mode fine_dependent dependency =
let cache = get_cache () in
let inc_and_make () =
cache_used_size := !cache_used_size + 1;
make_set ()
in
let set = Hashtbl.find_or_add cache fine_dependent ~default:inc_and_make in
let entry = { dependency; is_override } in
if Result.is_ok @@ Hash_set.strict_add set entry then
cache_used_size := !cache_used_size + 1;
if !cache_used_size >= cache_max_size then flush_cache mode;
()
let add_node mode node =
let cache = get_cache () in
let inc_and_make () =
cache_used_size := !cache_used_size + 1;
make_set ()
in
Hashtbl.find_or_add cache node ~default:inc_and_make |> ignore;
if !cache_used_size >= cache_max_size then flush_cache mode
let finalize mode =
flush_cache mode;
SQLitePersistence.close_db ()
end
let should_ignore_node node =
let root = Typing_deps.Dep.extract_root_name ~strip_namespace:false node in
let kind = Typing_deps.Dep.dep_kind_of_variant node in
(* This is a sufficient, but not a necessary condition for being an hhi
* definition. However, the alternative would be to lookup the path for
* every defininition here. *)
let is_hh_def = Option.is_some @@ String.chop_prefix root ~prefix:"HH\\" in
(* Checks that this is the function call created by [Naming.invalid_expr_],
* which doesn't provide a nicer way for checking this. Note the missing
* toplevel \ *)
let is_invalid_expr_sentinel_fun =
match kind with
| Typing_deps.Dep.KFun -> String.(root = "invalid_expr")
| _ -> false
in
let has_useful_kind =
match kind with
| Typing_deps.Dep.KFun
| Typing_deps.Dep.KMethod
| Typing_deps.Dep.KSMethod ->
(* Dependencies that we currently utilize *)
true
| Typing_deps.Dep.KGConst
| Typing_deps.Dep.KConst
| Typing_deps.Dep.KConstructor
| Typing_deps.Dep.KProp
| Typing_deps.Dep.KSProp ->
(* Dependencies that we may utilize at some point, but aren't at the
* moment *)
false
| Typing_deps.Dep.KType
| Typing_deps.Dep.KExtends
| Typing_deps.Dep.KAllMembers
| Typing_deps.Dep.KGConstName
| Typing_deps.Dep.KModule ->
(* Dependencies that we will most likely never utilize *)
false
in
is_hh_def || (not @@ has_useful_kind) || is_invalid_expr_sentinel_fun
let should_ignore_edge fine_dependent dependency =
Poly.(fine_dependent = dependency)
|| should_ignore_node fine_dependent
|| should_ignore_node dependency
let add_fine_dep mode fine_dependent dependency =
if not @@ should_ignore_edge fine_dependent dependency then
Backend.add_dependency_edge
~is_override:false
mode
fine_dependent
dependency
let add_coarse_dep mode coarse_dep =
add_fine_dep mode (Typing_deps.Dep.dependency_of_variant coarse_dep)
let dependency_variant_of_member class_name = function
| Constructor -> Typing_deps.Dep.Constructor class_name
| Method m -> Typing_deps.Dep.Method (class_name, m)
| SMethod m -> Typing_deps.Dep.SMethod (class_name, m)
let fine_dependent_of_coarse_and_member :
coarse_dependent -> dependent_member option -> fine_dependent =
fun coarse member ->
let member =
if record_coarse_only then
None
else
member
in
match (coarse, member) with
| (root, None) -> Typing_deps.Dep.dependency_of_variant root
| (Typing_deps.Dep.Type t, Some member) ->
dependency_variant_of_member t member
| (_, Some _) ->
failwith
"Only types/classes can have members for the purposes of dependency tracking!"
let try_add_fine_dep mode coarse member dependency =
let member =
if record_coarse_only then
None
else
member
in
match (coarse, member) with
| (None, None) -> ()
| (None, Some _) -> failwith "Cannot have member dependent without root"
| (Some root, member_opt) ->
let dependent = fine_dependent_of_coarse_and_member root member_opt in
add_fine_dep mode dependent dependency
let add_override_dep mode ~child_name ~parent_name member =
let dependent = dependency_variant_of_member parent_name member in
let dependency = dependency_variant_of_member child_name member in
if not @@ should_ignore_edge dependent dependency then
Backend.add_dependency_edge ~is_override:true mode dependent dependency
let add_node mode coarse member =
let node = fine_dependent_of_coarse_and_member coarse member in
if not @@ should_ignore_node node then Backend.add_node mode node
let finalize = Backend.finalize |
OCaml Interface | hhvm/hphp/hack/src/deps/typing_pessimisation_deps.mli | (*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
(** A dependency is something (e.g., a method) that some other piece of code
* depends on (e.g., due to calling it) *)
type dependency = Typing_deps.Dep.dependency Typing_deps.Dep.variant
(** A dependent (= something with dependencies) as used by the original,
* "coarse" dependency graph mechanism *)
type coarse_dependent = Typing_deps.Dep.dependent Typing_deps.Dep.variant
(** A [dependent_member] augments a [coarse_dependent]. It denotes a
* child/member of the toplevel definition represented by
* [coarse_dependent] *)
type dependent_member =
| Constructor
| Method of string
| SMethod of string
val add_coarse_dep :
Typing_deps_mode.t -> coarse_dependent -> dependency -> unit
val try_add_fine_dep :
Typing_deps_mode.t ->
coarse_dependent option ->
dependent_member option ->
dependency ->
unit
val add_override_dep :
Typing_deps_mode.t ->
child_name:string ->
parent_name:string ->
dependent_member ->
unit
(** Informs the pessimisation dependency graph about the existence of a node.
This allows ensuring that certain nodes are added to the the graph even if
they may not have any incoming or outgoing edges. *)
val add_node :
Typing_deps_mode.t -> coarse_dependent -> dependent_member option -> unit
(** Persists all currently cached dependencies to disk *)
val finalize : Typing_deps_mode.t -> unit
module SQLitePersistence : sig
(** A glob pattern for the file names used by the per-worker output files.
A star is used in place of the worker id *)
val worker_file_name_glob : string
end |
TOML | hhvm/hphp/hack/src/deps/cargo/deps_rust/Cargo.toml | # @generated by autocargo
[package]
name = "deps_rust"
version = "0.0.0"
edition = "2021"
[lib]
path = "../../deps_rust/typing_deps.rs"
crate-type = ["lib", "staticlib"]
[dependencies]
dep_graph_delta = { version = "0.0.0", path = "../.." }
depgraph_reader = { version = "0.0.0", path = "../../../depgraph/cargo/depgraph_reader" }
hash = { version = "0.0.0", path = "../../../utils/hash" }
ocamlrep = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" }
ocamlrep_custom = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" }
once_cell = "1.12"
parking_lot = { version = "0.12.1", features = ["send_guard"] }
rpds = "0.11.0" |
TOML | hhvm/hphp/hack/src/deps/cargo/deps_rust_ffi/Cargo.toml | # @generated by autocargo
[package]
name = "deps_rust_ffi"
version = "0.0.0"
edition = "2021"
[lib]
path = "../../deps_rust/deps_rust_ffi.rs"
crate-type = ["lib", "staticlib"]
[dependencies]
dep = { version = "0.0.0", path = "../../../depgraph/cargo/dep" }
deps_rust = { version = "0.0.0", path = "../deps_rust" }
hash = { version = "0.0.0", path = "../../../utils/hash" }
ocamlrep = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" }
ocamlrep_custom = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" }
ocamlrep_ocamlpool = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" }
rpds = "0.11.0"
typing_deps_hash = { version = "0.0.0", path = "../typing_deps_hash" } |
TOML | hhvm/hphp/hack/src/deps/cargo/hh_fanout_rust_ffi/Cargo.toml | # @generated by autocargo
[package]
name = "hh_fanout_rust_ffi"
version = "0.0.0"
edition = "2021"
[lib]
path = "../../hh_fanout_rust/hh_fanout_rust_ffi.rs"
test = false
doctest = false
crate-type = ["lib", "staticlib"]
[dependencies]
dep = { version = "0.0.0", path = "../../../depgraph/cargo/dep" }
hh24_types = { version = "0.0.0", path = "../../../utils/hh24_types" }
ocamlrep_custom = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" }
ocamlrep_ocamlpool = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" } |
TOML | hhvm/hphp/hack/src/deps/cargo/typing_deps_hash/Cargo.toml | # @generated by autocargo
[package]
name = "typing_deps_hash"
version = "0.0.0"
edition = "2021"
[lib]
path = "../../typing_deps_hash.rs"
[dependencies]
fnv = "1.0"
strum = { version = "0.24", features = ["derive"] } |
Rust | hhvm/hphp/hack/src/deps/deps_rust/deps_rust_ffi.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
#![cfg_attr(use_unstable_features, feature(test))]
use std::cell::RefCell;
use std::collections::VecDeque;
use std::ffi::OsString;
use std::fs::File;
use std::path::Path;
use dep::Dep;
use deps_rust::dep_graph_delta_with;
use deps_rust::dep_graph_delta_with_mut;
use deps_rust::dep_graph_override;
use deps_rust::dep_graph_with_default;
use deps_rust::dep_graph_with_option;
use deps_rust::DepSet;
use deps_rust::RawTypingDepsMode;
use deps_rust::VisitedSet;
use hash::HashSet;
use ocamlrep::Value;
use ocamlrep_custom::CamlSerialize;
use ocamlrep_custom::Custom;
use ocamlrep_ocamlpool::ocaml_ffi;
use rpds::HashTrieSet;
use typing_deps_hash::hash1;
use typing_deps_hash::hash2;
use typing_deps_hash::DepType;
fn tag_to_dep_type(tag: u8) -> DepType {
match DepType::from_u8(tag) {
Some(dep_type) => dep_type,
None => panic!("Invalid dep type: {:?}", tag),
}
}
/// Hashes an `int` and `string`, arising from one of the one-argument cases of
/// `Typing_deps.Dep.variant`.
///
/// # Safety
///
/// `name1` must point to a valid OCaml string. It must not be concurrently
/// modified while this function holds a reference to it.
///
/// This function is only called from OCaml, and it is passed a value allocated
/// entirely by OCaml code, so the argument will be a valid string. The OCaml
/// runtime is interrupted by the FFI call to this function, none of the
/// transitively called functions from here call into the OCaml runtime, and we
/// do not spawn threads in our OCaml code, so the pointed-to value will not be
/// concurrently modified.
#[no_mangle]
unsafe extern "C" fn hash1_ocaml(dep_type_tag: usize, name1: usize) -> usize {
fn do_hash(dep_type_tag: Value<'_>, name1: Value<'_>) -> Value<'static> {
let dep_type_tag = dep_type_tag
.as_int()
.expect("dep_type_tag could not be converted to int");
let dep_type = tag_to_dep_type(dep_type_tag as u8);
let name1 = name1
.as_byte_string()
.expect("name1 could not be converted to byte string");
let result: u64 = hash1(dep_type, name1);
// In Rust, a numeric cast between two integers of the same size
// is a no-op. We require a 64-bit word size.
let result = result as isize;
Value::int(result)
}
let dep_type_tag = Value::from_bits(dep_type_tag);
let name1 = Value::from_bits(name1);
let result = do_hash(dep_type_tag, name1);
Value::to_bits(result)
}
/// Hashes an `int` and two `string`s, arising from one of the two-argument cases of
/// `Typing_deps.Dep.variant`.
///
/// # Safety
///
/// `type_hash` must be a valid OCaml int.
/// `member_name` must point to a valid OCaml string. It must not be concurrently
/// modified while this function holds a reference to it.
///
/// This function is only called from OCaml, and it is passed values allocated
/// entirely by OCaml code, so the argument will be a valid string. The OCaml
/// runtime is interrupted by the FFI call to this function, none of the
/// transitively called functions from here call into the OCaml runtime, and we
/// do not spawn threads in our OCaml code, so the pointed-to value will not be
/// concurrently modified.
#[no_mangle]
unsafe extern "C" fn hash2_ocaml(
dep_type_tag: usize,
type_hash: usize,
member_name: usize,
) -> usize {
fn do_hash(
dep_type_tag: Value<'_>,
type_hash: Value<'_>,
member_name: Value<'_>,
) -> Value<'static> {
let dep_type_tag = dep_type_tag
.as_int()
.expect("dep_type_tag could not be converted to int");
let dep_type = tag_to_dep_type(dep_type_tag as u8);
// Ocaml ints are i63, signed extended to i64. clear the MSB while
// converting to u64, to match FromOcamlRep for Dep.
let type_hash = type_hash
.as_int()
.expect("type_hash could not be converted to int");
let type_hash = type_hash as u64 & !(1 << 63);
let member_name = member_name
.as_byte_string()
.expect("member_name could not be converted to byte string");
let result: u64 = hash2(dep_type, type_hash, member_name);
// In Rust, a numeric cast between two integers of the same size
// is a no-op. We require a 64-bit word size.
let result = result as isize;
Value::int(result)
}
let dep_type_tag = Value::from_bits(dep_type_tag);
let type_hash = Value::from_bits(type_hash);
let member_name = Value::from_bits(member_name);
let result = do_hash(dep_type_tag, type_hash, member_name);
Value::to_bits(result)
}
// Functions to register custom Rust types with the OCaml runtime
ocaml_ffi! {
fn hh_custom_dep_graph_register_custom_types() {
// Safety: The OCaml runtime is currently interrupted by a call into
// this function, so it's safe to interact with it.
unsafe {
DepSet::register();
VisitedSet::register();
}
}
}
// Functions to query the dependency graph
ocaml_ffi! {
fn hh_custom_dep_graph_replace(mode: RawTypingDepsMode) {
// Safety: we don't call into OCaml again, so mode will remain valid.
dep_graph_override(mode);
}
fn hh_custom_dep_graph_has_edge(mode: RawTypingDepsMode, dependent: Dep, dependency: Dep) -> bool {
// Safety: we don't call into OCaml again, so mode will remain valid.
dep_graph_with_default(mode, false, move |g| {
g.dependent_dependency_edge_exists(dependent, dependency)
})
}
fn hh_custom_dep_graph_get_ideps_from_hash(mode: RawTypingDepsMode, dep: Dep) -> Custom<DepSet> {
let mut deps = HashTrieSet::new();
dep_graph_delta_with(|delta| {
if let Some(delta_deps) = delta.get(dep) {
for delta_dep in delta_deps {
deps.insert_mut(*delta_dep)
}
}
});
// Safety: we don't call into OCaml again, so mode will remain valid.
dep_graph_with_default(mode, (), |g| {
if let Some(hash_list) = g.hash_list_for(dep) {
for hash in g.hash_list_hashes(hash_list) {
deps.insert_mut(hash);
}
}
});
Custom::from(DepSet::from(deps))
}
fn hh_custom_dep_graph_add_typing_deps(mode: RawTypingDepsMode, query: Custom<DepSet>) -> Custom<DepSet> {
// Safety: we don't call into OCaml again, so mode will remain valid.
let mut s = dep_graph_with_option(mode, |g| match g {
Some(g) => g.query_typing_deps_multi(&query),
None => query.clone(),
});
dep_graph_delta_with(|delta| {
for dep in query.iter() {
if let Some(depies) = delta.get(*dep) {
for depy in depies {
s.insert_mut(*depy);
}
}
}
});
Custom::from(DepSet::from(s))
}
fn hh_custom_dep_graph_add_extend_deps(mode: RawTypingDepsMode, query: Custom<DepSet>) -> Custom<DepSet> {
let mut visited = HashSet::default();
let mut queue = VecDeque::new();
let mut acc = query.clone();
for source_class in query.iter() {
// Safety: we don't call into OCaml again, so mode will remain valid.
unsafe {
get_extend_deps_visit(mode, &mut visited, &mut queue, *source_class, &mut acc);
}
}
while let Some(source_class) = queue.pop_front() {
// Safety: we don't call into OCaml again, so mode will remain valid.
unsafe {
get_extend_deps_visit(mode, &mut visited, &mut queue, source_class, &mut acc);
}
}
Custom::from(acc.into())
}
fn hh_custom_dep_graph_get_extend_deps(
mode: RawTypingDepsMode,
visited: Custom<VisitedSet>,
source_class: Dep,
acc: Custom<DepSet>,
) -> Custom<DepSet> {
let mut visited = visited.borrow_mut();
let mut queue = VecDeque::new();
let mut acc = acc.clone();
// Safety: we don't call into OCaml again, so mode will remain valid.
unsafe {
get_extend_deps_visit(mode, &mut visited, &mut queue, source_class, &mut acc);
while let Some(source_class) = queue.pop_front() {
get_extend_deps_visit(mode, &mut visited, &mut queue, source_class, &mut acc);
}
}
Custom::from(acc.into())
}
fn hh_custom_dep_graph_register_discovered_dep_edge(
dependent: Dep,
dependency: Dep,
) {
dep_graph_delta_with_mut(move |s| {
s.insert(dependent, dependency);
});
}
fn hh_custom_dep_graph_dep_graph_delta_num_edges() -> usize {
dep_graph_delta_with(|s| s.len())
}
fn hh_custom_dep_graph_save_delta(dest: OsString, reset_state_after_saving: bool) -> usize {
let f = std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(dest).unwrap();
let hashes_added = dep_graph_delta_with(move |s| {
let mut w = std::io::BufWriter::new(f);
let hashes_added = s.write_to(&mut w).unwrap();
w.into_inner().unwrap();
hashes_added
});
if reset_state_after_saving {
dep_graph_delta_with_mut(|s| {
s.clear();
});
}
hashes_added
}
fn hh_custom_dep_graph_load_delta(mode: RawTypingDepsMode, source: OsString) -> usize {
let f = File::open(source).unwrap();
let mut r = std::io::BufReader::new(f);
// Safety: we don't call into OCaml again, so mode will remain valid.
dep_graph_with_option(mode, move |g| {
dep_graph_delta_with_mut(|s| {
let result = match g {
Some(g) => {
s.read_from(
&mut r,
|dependent, dependency| {
// Only add when it's not already in
// the graph!
!g.dependent_dependency_edge_exists(
dependent,
dependency,
)
},
)
}
None => s.read_from(&mut r, |_, _| true),
};
result.unwrap()
})
})
}
// Moves the source file to the destination directory.
fn hh_save_custom_dep_graph_save_delta(source: OsString, dest_dir: OsString) -> usize {
let dest_file = Path::new(&dest_dir)
.join(source.to_str().unwrap().replace('/', "-"));
std::fs::rename(&source, dest_file).unwrap();
// Technically we loaded 0 deps into the hh_server dep graph
0
}
}
/// Helper function to recursively get extend deps
///
/// # Safety
///
/// The dependency graph mode must be a pointer to an OCaml value that's
/// still valid.
unsafe fn get_extend_deps_visit(
mode: RawTypingDepsMode,
visited: &mut HashSet<Dep>,
queue: &mut VecDeque<Dep>,
source_class: Dep,
acc: &mut HashTrieSet<Dep>,
) {
if !visited.insert(source_class) {
return;
}
let extends_hash = match source_class.class_to_extends() {
None => return,
Some(hash) => hash,
};
let mut handle_extends_dep = |dep: Dep| {
if dep.is_class() {
if !acc.contains(&dep) {
acc.insert_mut(dep);
queue.push_back(dep);
}
}
};
dep_graph_delta_with(|delta| {
if let Some(delta_deps) = delta.get(extends_hash) {
delta_deps.iter().copied().for_each(&mut handle_extends_dep);
}
});
dep_graph_with_default(mode, (), |g| {
if let Some(hash_list) = g.hash_list_for(extends_hash) {
g.hash_list_hashes(hash_list)
.for_each(&mut handle_extends_dep);
}
})
}
// Auxiliary functions for Typing_deps.DepSet/Typing_deps.VisitedSet
ocaml_ffi! {
fn hh_visited_set_make() -> Custom<VisitedSet> {
Custom::from(RefCell::new(HashSet::default()).into())
}
fn hh_dep_set_make() -> Custom<DepSet> {
Custom::from(HashTrieSet::new().into())
}
fn hh_dep_set_singleton(dep: Dep) -> Custom<DepSet> {
let mut s = HashTrieSet::new();
s.insert_mut(dep);
Custom::from(s.into())
}
fn hh_dep_set_add(s: Custom<DepSet>, dep: Dep) -> Custom<DepSet> {
let mut s = s.clone();
s.insert_mut(dep);
Custom::from(s.into())
}
fn hh_dep_set_union(s1: Custom<DepSet>, s2: Custom<DepSet>) -> Custom<DepSet> {
Custom::from(s1.union(&s2))
}
fn hh_dep_set_inter(s1: Custom<DepSet>, s2: Custom<DepSet>) -> Custom<DepSet> {
Custom::from(s1.intersect(&s2))
}
fn hh_dep_set_diff(s1: Custom<DepSet>, s2: Custom<DepSet>) -> Custom<DepSet> {
Custom::from(s1.difference(&s2))
}
fn hh_dep_set_mem(s: Custom<DepSet>, dep: Dep) -> bool {
s.contains(&dep)
}
fn hh_dep_set_elements(s: Custom<DepSet>) -> Vec<Dep> {
s.iter().copied().map(Dep::from).collect()
}
fn hh_dep_set_cardinal(s: Custom<DepSet>) -> usize {
s.size()
}
fn hh_dep_set_is_empty(s: Custom<DepSet>) -> bool {
s.is_empty()
}
fn hh_dep_set_of_list(xs: Vec<Dep>) -> Custom<DepSet> {
Custom::from(HashTrieSet::from_iter(xs).into())
}
}
#[cfg(all(test, use_unstable_features))]
mod tests {
extern crate test;
use ocamlrep::Arena;
use ocamlrep::ToOcamlRep;
use ocamlrep::Value;
use test::Bencher;
const SHORT_CLASS_NAME: &str = "\\Foo";
const LONG_CLASS_NAME: &str = "\\EntReasonablyLongClassNameSinceSomeClassNamesAreLong";
#[bench]
fn bench_hash1_short(b: &mut Bencher) {
b.iter(|| {
crate::hash1(crate::DepType::Type, SHORT_CLASS_NAME.as_bytes());
});
}
#[bench]
fn bench_hash1_long(b: &mut Bencher) {
b.iter(|| {
crate::hash1(crate::DepType::Type, LONG_CLASS_NAME.as_bytes());
});
}
#[bench]
fn bench_hash2_short(b: &mut Bencher) {
b.iter(|| {
let type_hash = crate::hash1(crate::DepType::Type, SHORT_CLASS_NAME.as_bytes());
crate::hash2(crate::DepType::Const, type_hash, b"\\T");
});
}
#[bench]
fn bench_hash2_long(b: &mut Bencher) {
b.iter(|| {
let type_hash = crate::hash1(crate::DepType::Type, LONG_CLASS_NAME.as_bytes());
crate::hash2(crate::DepType::Const, type_hash, b"\\TSomeTypeConstant");
});
}
#[bench]
fn bench_hash1_ocaml(b: &mut Bencher) {
let arena = Arena::new();
let dep_type = crate::DepType::Type;
let name1 = arena.add(LONG_CLASS_NAME);
b.iter(|| unsafe {
crate::hash1_ocaml(Value::int(dep_type as isize).to_bits(), name1.to_bits())
});
}
#[bench]
fn bench_hash2_ocaml(b: &mut Bencher) {
let arena = Arena::new();
let dep_type = crate::DepType::Const;
let type_hash = crate::hash1(crate::DepType::Type, LONG_CLASS_NAME.as_bytes());
let member_name = arena.add("\\TSomeTypeConstant");
b.iter(|| unsafe {
crate::hash2_ocaml(
Value::int(dep_type as isize).to_bits(),
type_hash.to_ocamlrep(&arena).to_bits(),
member_name.to_bits(),
)
});
}
} |
Rust | hhvm/hphp/hack/src/deps/deps_rust/dep_graph_delta.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::io;
use std::io::Read;
use std::io::Write;
use dep::Dep;
use hash::HashMap;
use hash::HashSet;
use serde::Deserialize;
use serde::Serialize;
/// Structure to keep track of the dependency graph delta.
#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct DepGraphDelta {
/// Maps each dependency to a set of dependents
rdeps: HashMap<Dep, HashSet<Dep>>,
/// Total number of edges. Tracks the sum:
/// `rdeps.values().map(|set|set.len()).sum()`
num_edges: usize,
}
impl DepGraphDelta {
// The high bit of a value being set distinguishes dependency from dependent.
const DEPENDENCY_TAG: u64 = 1 << 63;
pub fn insert(&mut self, dependent: Dep, dependency: Dep) {
if (self.rdeps.entry(dependency))
.or_default()
.insert(dependent)
{
self.num_edges += 1;
}
}
pub fn extend(&mut self, other: Self) {
use std::collections::hash_map::Entry::*;
for (dependency, dependents) in other.rdeps {
match self.rdeps.entry(dependency) {
Occupied(e) => {
let ds = e.into_mut();
let n = ds.len();
ds.extend(dependents);
self.num_edges += ds.len() - n;
}
Vacant(e) => {
self.num_edges += dependents.len();
e.insert(dependents);
}
}
}
}
pub fn get(&self, dependency: Dep) -> Option<&HashSet<Dep>> {
self.rdeps.get(&dependency)
}
/// Return an iterator over this dependency graph delta.
///
/// Iterates over (dependent, dependency) pairs
pub fn iter(&self) -> impl Iterator<Item = (Dep, Dep)> + '_ {
self.rdeps.iter().flat_map(|(&dependency, dependents_set)| {
dependents_set
.iter()
.map(move |&dependent| (dependent, dependency))
})
}
pub fn into_rdeps(self) -> impl Iterator<Item = (Dep, HashSet<Dep>)> {
self.rdeps.into_iter()
}
/// Return the number of edges in the dep graph delta.
pub fn len(&self) -> usize {
self.num_edges
}
pub fn is_empty(&self) -> bool {
self.num_edges == 0
}
/// Write one (dependency, dependents) list.
///
/// The format is as follows. Each dependency hash can be followed by
/// an arbitrary number of accompanying dependent hashes. To distinguish
/// between dependency and dependent hashes, we make use of the fact that
/// hashes are 63-bit (due to the OCaml limitation). We set the MSB for
/// dependency hashes.
fn write_list<W: Write>(
mut w: W,
dependency: Dep,
dependents: impl Iterator<Item = Dep> + ExactSizeIterator,
) -> io::Result<()> {
if dependents.len() != 0 {
let dependency: u64 = dependency.into();
w.write_all(&(dependency | Self::DEPENDENCY_TAG).to_ne_bytes())?;
for dependent in dependents {
let dependent: u64 = dependent.into();
w.write_all(&dependent.to_ne_bytes())?;
}
}
Ok(())
}
/// Write all edges in the delta to the writer in a custom format.
///
/// The output is deterministic if the insertion order is deterministic,
/// but is arbitrary since we're iterating HashMap & HashSet.
pub fn write_to<W: Write>(&self, mut w: W) -> io::Result<usize> {
let mut edges_added = 0;
for (&dependency, dependents) in self.rdeps.iter() {
Self::write_list(&mut w, dependency, dependents.iter().copied())?;
edges_added += dependents.len();
}
Ok(edges_added)
}
/// Write all edges in the delta to the writer in a custom format.
///
/// The output is deterministic sorted order.
pub fn write_sorted<W: Write>(&self, mut w: W) -> io::Result<()> {
let mut dependencies: Vec<_> = self.rdeps.iter().collect();
dependencies.sort_unstable_by_key(|(dep, _)| *dep);
for (&dependency, dependents) in dependencies {
let mut dependents: Vec<Dep> = dependents.iter().copied().collect();
dependents.sort_unstable();
Self::write_list(&mut w, dependency, dependents.into_iter())?;
}
Ok(())
}
/// Load all edges into the delta.
///
/// The predicate determines whether or not to add a loaded edge to the delta.
/// If the predicate returns true for a given dependent-dependency edge
/// (in that order), the edge is added.
///
/// Returns the number of edges actually read.
///
/// See write_to() for details about the file format.
pub fn read_from<R: Read>(
&mut self,
mut r: R,
f: impl Fn(Dep, Dep) -> bool,
) -> io::Result<usize> {
let mut edges_read = 0;
let mut dependency: Option<Dep> = None;
loop {
let mut bytes: [u8; 8] = [0; 8];
match r.read_exact(&mut bytes) {
Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => {
break;
}
r => r?,
};
let hash = u64::from_ne_bytes(bytes);
if (hash & Self::DEPENDENCY_TAG) != 0 {
// This is a dependency hash.
let hash = hash & !Self::DEPENDENCY_TAG;
dependency = Some(Dep::new(hash));
} else {
// This is a dependent hash.
let dependent = Dep::new(hash);
let dependency =
dependency.expect("Expected a dependent hash before a dependency hash");
if f(dependent, dependency) {
self.insert(dependent, dependency);
edges_read += 1;
}
}
}
Ok(edges_read)
}
pub fn clear(&mut self) {
self.rdeps.clear();
self.num_edges = 0;
}
}
/// An iterator that yields a sequence of (dependency, dependents) pairs
/// from a DepGraphDelta file.
///
/// It does so in a zero-copy way, simply pointing into the DepGraphDelta
/// representation (e.g. a memory-mapped file).
pub struct DepGraphDeltaIterator<'a> {
raw_data: &'a [u64],
}
impl<'a> DepGraphDeltaIterator<'a> {
pub fn new(raw_data: &'a [u64]) -> Self {
Self { raw_data }
}
}
impl<'a> Iterator for DepGraphDeltaIterator<'a> {
type Item = (Dep, &'a [Dep]);
fn next(&mut self) -> Option<Self::Item> {
self.raw_data.split_first().map(|(&first, rest)| {
// Find the next dependency hash, which is indicated by the high
// bit being set. Everything in between is a dependent, and we can
// just point to them directly.
let end = rest
.iter()
.position(|&x| (x & DepGraphDelta::DEPENDENCY_TAG) != 0)
.unwrap_or(rest.len());
// Advance the iterator to the next edge list (if any).
let (dependents, rest) = rest.split_at(end);
self.raw_data = rest;
debug_assert_ne!(first & DepGraphDelta::DEPENDENCY_TAG, 0);
let dependency = Dep::new(first & !DepGraphDelta::DEPENDENCY_TAG);
let dependents = Dep::from_u64_slice(dependents);
(dependency, dependents)
})
}
}
impl<'a> std::iter::FusedIterator for DepGraphDeltaIterator<'a> {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_dep_graph_delta_serialize_empty() {
let x = DepGraphDelta::default();
let mut bytes = Vec::new();
x.write_to(&mut bytes).unwrap();
let mut y = DepGraphDelta::default();
let mut bytes_read: &[u8] = &bytes;
let num_loaded = y.read_from(&mut bytes_read, |_, _| true).unwrap();
assert_eq!(num_loaded, 0);
assert_eq!(x, y);
}
#[test]
fn test_dep_graph_delta_serialize_non_empty() {
let mut x = DepGraphDelta::default();
x.insert(Dep::new(10), Dep::new(1));
x.insert(Dep::new(10), Dep::new(2));
x.insert(Dep::new(11), Dep::new(2));
x.insert(Dep::new(12), Dep::new(3));
let mut bytes = Vec::new();
x.write_to(&mut bytes).unwrap();
let mut y = DepGraphDelta::default();
let mut bytes_read: &[u8] = &bytes;
let num_loaded = y.read_from(&mut bytes_read, |_, _| true).unwrap();
assert_eq!(num_loaded, 4);
assert_eq!(x, y);
}
#[test]
fn test_dep_graph_delta_iter_empty() {
let x = DepGraphDelta::default();
let v: Vec<_> = x.iter().collect();
assert_eq!(v.len(), 0);
}
#[test]
fn test_dep_graph_delta_iter_non_empty() {
let mut x = DepGraphDelta::default();
let edges = vec![
(Dep::new(10), Dep::new(1)),
(Dep::new(10), Dep::new(2)),
(Dep::new(11), Dep::new(2)),
(Dep::new(12), Dep::new(3)),
];
for (dependency, dependent) in edges.iter() {
x.insert(*dependency, *dependent)
}
let mut v: Vec<_> = x.iter().collect();
v.sort();
assert_eq!(v, edges);
}
} |
Rust | hhvm/hphp/hack/src/deps/deps_rust/typing_deps.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::cell::RefCell;
use std::io::Write;
use dep_graph_delta::DepGraphDelta;
pub use depgraph_reader::Dep;
use depgraph_reader::DepGraph;
use hash::HashSet;
use ocamlrep::ptr::UnsafeOcamlPtr;
use ocamlrep::FromError;
use ocamlrep::FromOcamlRep;
use ocamlrep::ToOcamlRep;
use ocamlrep::Value;
use ocamlrep_custom::caml_serialize_default_impls;
use ocamlrep_custom::CamlSerialize;
use once_cell::sync::OnceCell;
use parking_lot::Mutex;
use rpds::HashTrieSet;
/// A structure wrapping the memory-mapped dependency graph.
/// Each worker will itself lazily (or eagerly upon request)
/// open a memory-mapping to the dependency graph.
///
/// It's an option, because custom mode might be enabled without
/// an existing saved-state.
static DEP_GRAPH: Mutex<Option<DepGraph>> = Mutex::new(None);
fn _static_assert() {
// The use of 64-bit (actually 63-bit) dependency hashes requires that we
// are compiling for a 64-bit architecture. Let's assert that at compile time.
//
// OCaml only supports unboxed integers of WORD SIZE - 1 bits. We don't want to
// be boxing dependency hashes, so we require a 64-bit word size.
//
// If this check fails, it would be impossible to correctly convert back and
// forth between OCaml's native integer type and Rust's u64.
let _ = [(); 0 - (!(8 == std::mem::size_of::<usize>()) as usize)];
}
/// Which dependency graph format are we using?
#[derive(FromOcamlRep, ToOcamlRep)]
#[repr(C, u8)]
// CAUTION: This must be kept in sync with typing_deps_mode.ml
pub enum TypingDepsMode {
/// Keep track of newly discovered edges in an in-memory delta.
///
/// Optionally, the in-memory delta is backed by a pre-computed
/// dependency graph stored using a custom file format.
InMemoryMode(Option<String>),
/// Mode that writes newly discovered edges to binary files on disk
/// (one file per disk). Those binary files can then be post-processed
/// using a tool of choice.
///
/// The first parameter is (optionally) a path to an existing custom 64-bit
/// dependency graph. If it is present, only new edges will be written,
/// of not, all edges will be written.
SaveToDiskMode {
graph: Option<String>,
new_edges_dir: String,
// This is unused.
human_readable_dep_map_dir: Option<String>,
},
/// Mode that keeps track of edges via hh_fanout's Rust API. We include this
/// here to match the OCaml version of `TypingDepsMode`, but this is unused.
HhFanoutRustMode { hh_fanout: UnsafeOcamlPtr },
}
/// A raw OCaml pointer to the dependency mode.
///
/// We use this raw pointer because we don't want to constantly
/// convert between the OCaml and Rust value (which involves copying)
/// when its not needed. Rather, we only convert when we first open the
/// dependency graph.
#[derive(Debug, Clone, Copy)]
pub struct RawTypingDepsMode(usize);
impl FromOcamlRep for RawTypingDepsMode {
fn from_ocamlrep(value: Value<'_>) -> Result<Self, FromError> {
Ok(Self(value.to_bits()))
}
}
impl RawTypingDepsMode {
/// Convert the raw pointer into a Rust value
///
/// # Safety
///
/// Only safe if the OCaml pointer underlying `self` is still valid!
/// You should not use this method if the OCaml runtime has had a chance
/// to run between obtaining `self` and calling this method!
unsafe fn to_rust(self) -> Result<TypingDepsMode, FromError> {
let value: Value<'_> = Value::from_bits(self.0);
TypingDepsMode::from_ocamlrep(value)
}
}
/// Load the graph using the given mode.
///
/// The mode is only used on the first call, to establish some global state, and
/// then ignored for future calls.
///
/// # Safety
///
/// The pointer to the dependency graph mode should still be pointing
/// to a valid OCaml object.
fn load_global_dep_graph(mode: RawTypingDepsMode) -> Result<(), String> {
let mut dep_graph_guard = DEP_GRAPH.lock();
if dep_graph_guard.is_none() {
let mode = unsafe { mode.to_rust().unwrap() };
let dep_graph: Result<Option<DepGraph>, String> = match mode {
TypingDepsMode::InMemoryMode(None)
| TypingDepsMode::SaveToDiskMode {
graph: None,
new_edges_dir: _,
human_readable_dep_map_dir: _,
} => {
// Enabled, but we don't have a saved-state, so we can't open it
Ok(None)
}
TypingDepsMode::InMemoryMode(Some(depgraph_fn))
| TypingDepsMode::SaveToDiskMode {
graph: Some(depgraph_fn),
new_edges_dir: _,
human_readable_dep_map_dir: _,
} => {
// We are opening and intializing the dep graph while holding onto the mutex...
// Which typically isn't great, but since ocaml is single threaded, it's ok.
let depgraph = DepGraph::from_path(depgraph_fn)
.map_err(|err| format!("could not open dep graph file: {:?}", err))?;
Ok(Some(depgraph))
}
TypingDepsMode::HhFanoutRustMode { hh_fanout: _ } => {
// HhFanoutRustMode doesn't load the dep graph this way.
// This path shouldn't be reached.
unimplemented!()
}
};
*dep_graph_guard = dep_graph?;
}
Ok(())
}
pub fn replace_dep_graph(mode: RawTypingDepsMode) -> Result<(), String> {
let mut dep_graph_guard = DEP_GRAPH.lock();
// # Safety
//
// The pointer to the dependency graph mode should still be pointing
// to a valid OCaml object.
let mode = unsafe { mode.to_rust().unwrap() };
let dep_graph: Result<Option<DepGraph>, String> = match mode {
TypingDepsMode::InMemoryMode(None)
| TypingDepsMode::SaveToDiskMode {
graph: None,
new_edges_dir: _,
human_readable_dep_map_dir: _,
} => {
// Enabled, but we don't have a saved-state, so we can't open it
Ok(None)
}
TypingDepsMode::InMemoryMode(Some(depgraph_fn))
| TypingDepsMode::SaveToDiskMode {
graph: Some(depgraph_fn),
new_edges_dir: _,
human_readable_dep_map_dir: _,
} => {
// We are opening and intializing the dep graph while holding onto the mutex...
// Which typically isn't great, but since ocaml is single threaded, it's ok.
let depgraph = DepGraph::from_path(depgraph_fn)
.map_err(|err| format!("could not open dep graph file: {:?}", err))?;
Ok(Some(depgraph))
}
TypingDepsMode::HhFanoutRustMode { hh_fanout: _ } => {
// HhFanoutRustMode doesn't load the dep graph this way.
// This path shouldn't be reached.
unimplemented!()
}
};
*dep_graph_guard = dep_graph?;
Ok(())
}
/// Override the loaded dep graph.
///
/// # Panics
///
/// Panics if the graph is not loaded, and custom mode was not enabled.
///
/// Panics if the graph is not yet loaded, and opening
/// the graph results in an error.
///
/// # Safety
///
/// The pointer to the dependency graph mode should still be pointing
/// to a valid OCaml object.
pub fn dep_graph_override(mode: RawTypingDepsMode) {
replace_dep_graph(mode).unwrap();
}
/// Run the closure with the loaded dep graph. If the custom dep graph
/// mode was enabled without a saved-state, return the passed default
/// value.
///
/// # Panics
///
/// Panics if the graph is not loaded, and custom mode was not enabled.
///
/// Panics if the graph is not yet loaded, and opening
/// the graph results in an error.
///
/// # Safety
///
/// The pointer to the dependency graph mode should still be pointing
/// to a valid OCaml object.
pub fn dep_graph_with_default<F, R>(mode: RawTypingDepsMode, default: R, f: F) -> R
where
F: FnOnce(&DepGraph) -> R,
{
load_global_dep_graph(mode).unwrap();
DEP_GRAPH.lock().as_ref().map_or(default, f)
}
/// Run the closure with the loaded dep graph. If the custom dep graph
/// mode was enabled without a saved-state, the closure is run without
/// a dep graph.
///
/// The mode is only used on the first call, to establish some global state, and
/// then ignored for future calls.
///
/// # Panics
///
/// Panics if the graph is not loaded, and custom mode was not enabled.
///
/// Panics if the graph is not yet loaded, and opening
/// the graph results in an error.
///
/// # Safety
///
/// The pointer to the dependency graph mode should still be pointing
/// to a valid OCaml object.
pub fn dep_graph_with_option<F, R>(mode: RawTypingDepsMode, f: F) -> R
where
F: FnOnce(Option<&DepGraph>) -> R,
{
load_global_dep_graph(mode).unwrap();
f(DEP_GRAPH.lock().as_ref())
}
pub fn dep_graph_delta_with_cell<R>(f: impl FnOnce(&Mutex<DepGraphDelta>) -> R) -> R {
/// The dependency graph delta.
///
/// Even though this is only used in a single-threaded context (from OCaml)
/// we wrap it in a `Mutex` to ensure safety.
static DEP_GRAPH_DELTA: OnceCell<Mutex<DepGraphDelta>> = OnceCell::new();
f(DEP_GRAPH_DELTA.get_or_init(Default::default))
}
/// Run the closure with the dep graph delta.
///
/// # Panics
///
/// When another reference to delta is still active, but that
/// isn't likely,given that we only have one thread, and the
/// `with`/`with_mut` auxiliary functions disallow the reference
/// to escape.
pub fn dep_graph_delta_with<R>(f: impl FnOnce(&DepGraphDelta) -> R) -> R {
dep_graph_delta_with_cell(|cell| f(&cell.lock()))
}
/// Run the closure with the mutable dep graph delta.
///
/// # Panics
///
/// See `with`
pub fn dep_graph_delta_with_mut<R>(f: impl FnOnce(&mut DepGraphDelta) -> R) -> R {
dep_graph_delta_with_cell(|cell| f(&mut cell.lock()))
}
/// Rust set of dependencies that can be transferred from
/// OCaml to Rust memory.
#[derive(Debug, Eq, PartialEq)]
pub struct DepSet(HashTrieSet<Dep>);
impl std::ops::Deref for DepSet {
type Target = HashTrieSet<Dep>;
fn deref(&self) -> &HashTrieSet<Dep> {
&self.0
}
}
impl From<HashTrieSet<Dep>> for DepSet {
fn from(x: HashTrieSet<Dep>) -> Self {
Self(x)
}
}
impl CamlSerialize for DepSet {
caml_serialize_default_impls!();
fn serialize(&self) -> Vec<u8> {
let num_elems = self.size();
let mut buf = Vec::with_capacity(std::mem::size_of::<u64>() * num_elems);
for &x in self.iter() {
let x: u64 = x.into();
buf.write_all(&x.to_le_bytes()).unwrap();
}
buf
}
fn deserialize(data: &[u8]) -> Self {
const U64_SIZE: usize = std::mem::size_of::<u64>();
let num_elems = data.len() / U64_SIZE;
let max_index = num_elems * U64_SIZE;
let mut s: HashTrieSet<Dep> = HashTrieSet::new();
let mut index = 0;
while index < max_index {
let x = u64::from_le_bytes(data[index..index + U64_SIZE].try_into().unwrap());
s.insert_mut(Dep::new(x));
index += U64_SIZE;
}
s.into()
}
}
impl DepSet {
/// Returns the union of two sets.
///
/// The underlying data structure does not implement union. So let's
/// implement it here.
pub fn union(&self, other: &Self) -> Self {
// `HashTrieSet`'s insert is O(1) on average, O(n) worst-case, so let's
// make sure we loop over the smaller set.
//
// Note that the sizes of the arguments are expected to be
// very skewed.
let (bigger, smaller) = if self.size() > other.size() {
(self, other)
} else {
(other, self)
};
let mut bigger = bigger.0.clone();
for dep in smaller.iter() {
bigger.insert_mut(*dep);
}
bigger.into()
}
/// Returns the intersection of two sets.
///
/// The underlying data structure does not implement intersection. So let's
/// implement it here.
pub fn intersect(&self, other: &Self) -> Self {
// Let's make sure we loop over the smaller set.
let (bigger, smaller) = if self.size() > other.size() {
(self, other)
} else {
(other, self)
};
let mut result = HashTrieSet::new();
for dep in smaller.iter() {
if bigger.contains(dep) {
result.insert_mut(*dep);
}
}
result.into()
}
/// Returns the difference of two sets, i.e. all elements in the first
/// set but not in the second set.
///
/// The underlying data structure does not implement intersection. So let's
/// implement it here.
pub fn difference(&self, other: &Self) -> Self {
let mut result = self.0.clone();
// Let's make sure we loop over the smaller set.
if self.size() < other.size() {
for dep in self.iter() {
if other.contains(dep) {
result.remove_mut(dep);
}
}
} else {
for dep in other.iter() {
result.remove_mut(dep);
}
}
result.into()
}
}
/// Rust set of visited hashes
#[derive(Debug)]
pub struct VisitedSet(RefCell<HashSet<Dep>>);
impl std::ops::Deref for VisitedSet {
type Target = RefCell<HashSet<Dep>>;
fn deref(&self) -> &RefCell<HashSet<Dep>> {
&self.0
}
}
impl From<RefCell<HashSet<Dep>>> for VisitedSet {
fn from(x: RefCell<HashSet<Dep>>) -> Self {
Self(x)
}
}
impl CamlSerialize for VisitedSet {
caml_serialize_default_impls!();
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_dep_set_serialize() {
let mut x: HashTrieSet<Dep> = HashTrieSet::new();
x.insert_mut(Dep::new(1));
x.insert_mut(Dep::new(2));
let x: DepSet = x.into();
let buf = x.serialize();
let y = DepSet::deserialize(&buf);
assert_eq!(x, y);
}
#[test]
fn test_dep_set_union() {
let s = |x: &[u64]| DepSet::from(HashTrieSet::from_iter(x.iter().copied().map(Dep::new)));
assert_eq!(s(&[4, 7]).union(&s(&[1, 4, 3])), s(&[1, 4, 3, 7]));
assert_eq!(s(&[1, 4, 3]).union(&s(&[4, 7])), s(&[1, 4, 3, 7]));
}
#[test]
fn test_dep_set_inter() {
let s = |x: &[u64]| DepSet::from(HashTrieSet::from_iter(x.iter().copied().map(Dep::new)));
assert_eq!(s(&[4, 7]).intersect(&s(&[1, 4, 3])), s(&[4]));
assert_eq!(s(&[1, 4, 3]).intersect(&s(&[4, 7])), s(&[4]));
}
#[test]
fn test_dep_set_diff() {
let s = |x: &[u64]| DepSet::from(HashTrieSet::from_iter(x.iter().copied().map(Dep::new)));
assert_eq!(s(&[4, 7]).difference(&s(&[1, 4, 3, 9, 8, 10])), s(&[7]));
assert_eq!(
s(&[1, 4, 3, 9, 8, 10]).difference(&s(&[4, 11])),
s(&[1, 3, 9, 8, 10])
);
}
} |
Rust | hhvm/hphp/hack/src/deps/hh_fanout_rust/hh_fanout_rust_ffi.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::path::PathBuf;
use dep::Dep;
use ocamlrep_custom::Custom;
#[cfg(fbcode_build)]
pub struct HhFanoutRustFfi(std::cell::RefCell<Box<dyn hh_fanout_api::HhFanoutEdgeAccumulator>>);
#[cfg(not(fbcode_build))]
pub struct HhFanoutRustFfi;
impl ocamlrep_custom::CamlSerialize for HhFanoutRustFfi {
ocamlrep_custom::caml_serialize_default_impls!();
}
#[cfg(fbcode_build)]
ocamlrep_ocamlpool::ocaml_ffi! {
fn hh_fanout_ffi_make(logger: Custom<file_scuba_logger_ffi::FileScubaLoggerFfi>, fanout_state_dir: PathBuf, decl_state_dir: PathBuf) -> Custom<HhFanoutRustFfi> {
let hh_decl = Box::new(hh_decl_shmem::DeclShmem::new(logger.0.clone(), decl_state_dir));
let hh_fanout = hh_fanout_lib::HhFanoutImpl::new(logger.0.clone(), fanout_state_dir, hh_decl);
Custom::from(HhFanoutRustFfi(std::cell::RefCell::new(Box::new(hh_fanout))))
}
fn hh_fanout_ffi_make_hhdg_builder(logger: Custom<file_scuba_logger_ffi::FileScubaLoggerFfi>, builder_state_dir: PathBuf) -> Custom<HhFanoutRustFfi> {
let hhdg_builder = hhdg_builder::HhdgBuilder::new(logger.0.clone(), builder_state_dir);
Custom::from(HhFanoutRustFfi(std::cell::RefCell::new(Box::new(hhdg_builder))))
}
// Each edge is a tuple of (dependency, dependent).
fn hh_fanout_ffi_add_idep_batch(hh_fanout: Custom<HhFanoutRustFfi>, edges: Vec<(Dep, Dep)>) {
// TODO: the conversion of Vec<(Dep, Dep)> to DepGraphEdge probably
// isn't too efficient. The construction of the Rust Vec from OCaml list
// might also be inefficient. Left for later optimization.
if let Err(err) = hh_fanout.0.borrow_mut().commit_edges(
edges.into_iter().map(|(dependency, dependent)| hh24_types::DepGraphEdge {
dependency: hh24_types::DependencyHash(dependency.into()),
dependent: hh24_types::ToplevelSymbolHash::from_u64(dependent.into()) }).collect()
) {
eprintln!("Error: {err}");
todo!("deal with hh errors like checksum mismatch");
};
}
}
#[cfg(not(fbcode_build))]
// This FFI only works for fbcode builds at the moment, due to trickiness with
// dune working with cargo and not playing well with some dependencies.
ocamlrep_ocamlpool::ocaml_ffi! {
fn hh_fanout_ffi_make(_fanout_state_dir: PathBuf, _decl_state_dir: PathBuf) -> Custom<HhFanoutRustFfi> {
unimplemented!()
}
fn hh_fanout_ffi_make_hhdg_builder(_builder_state_dir: PathBuf) -> Custom<HhFanoutRustFfi> {
unimplemented!()
}
fn hh_fanout_ffi_add_idep_batch(_hh_fanout: Custom<HhFanoutRustFfi>, _edges: Vec<(Dep, Dep)>) {
unimplemented!()
}
} |
Rust | hhvm/hphp/hack/src/deps/rust/file_info.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//
// @generated <<SignedSource::*O*zOeWoEQle#+L!plEphiEmie@IsG>>
//
// To regenerate this file, run:
// hphp/hack/src/oxidized_regen.sh
use arena_trait::TrivialDrop;
use eq_modulo_pos::EqModuloPos;
use no_pos_hash::NoPosHash;
use ocamlrep::FromOcamlRep;
use ocamlrep::FromOcamlRepIn;
use ocamlrep::ToOcamlRep;
use ocamlrep_caml_builtins::Int64;
use serde::Deserialize;
use serde::Serialize;
#[allow(unused_imports)]
use crate::*;
pub use prim_defs::*;
#[derive(
Clone,
Copy,
Debug,
Deserialize,
Eq,
EqModuloPos,
FromOcamlRep,
FromOcamlRepIn,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep,
)]
#[rust_to_ocaml(attr = "deriving (eq, hash, show, enum, ord, sexp_of)")]
#[repr(u8)]
pub enum Mode {
/// just declare signatures, don't check anything
Mhhi,
/// check everything!
Mstrict,
}
impl TrivialDrop for Mode {}
arena_deserializer::impl_deserialize_in_arena!(Mode);
#[derive(
Clone,
Copy,
Debug,
Deserialize,
Eq,
FromOcamlRep,
FromOcamlRepIn,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep,
)]
#[rust_to_ocaml(attr = "deriving (eq, (show { with_path = false }), enum, ord)")]
#[repr(u8)]
pub enum NameType {
Fun = 3,
Class = 0,
Typedef = 1,
Const = 4,
Module = 5,
}
impl TrivialDrop for NameType {}
arena_deserializer::impl_deserialize_in_arena!(NameType);
/// We define two types of positions establishing the location of a given name:
/// a Full position contains the exact position of a name in a file, and a
/// File position contains just the file and the type of toplevel entity,
/// allowing us to lazily retrieve the name's exact location if necessary.
#[derive(
Clone,
Debug,
Deserialize,
Eq,
FromOcamlRep,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep,
)]
#[rust_to_ocaml(attr = "deriving (eq, show)")]
#[repr(C, u8)]
pub enum Pos {
Full(pos::Pos),
File(NameType, std::sync::Arc<relative_path::RelativePath>),
}
/// An id contains a pos, name and a optional decl hash. The decl hash is None
/// only in the case when we didn't compute it for performance reasons
#[derive(
Clone,
Debug,
Deserialize,
Eq,
FromOcamlRep,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep,
)]
#[rust_to_ocaml(attr = "deriving (eq, show)")]
#[repr(C)]
pub struct Id(pub Pos, pub String, pub Option<Int64>);
#[derive(
Clone,
Debug,
Deserialize,
Eq,
EqModuloPos,
FromOcamlRep,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep,
)]
#[rust_to_ocaml(attr = "deriving eq")]
#[repr(C)]
pub struct HashType(pub Option<Int64>);
/// The record produced by the parsing phase.
#[derive(
Clone,
Debug,
Deserialize,
Eq,
FromOcamlRep,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep,
)]
#[rust_to_ocaml(attr = "deriving show")]
#[repr(C)]
pub struct FileInfo {
pub hash: HashType,
pub file_mode: Option<Mode>,
pub funs: Vec<Id>,
pub classes: Vec<Id>,
pub typedefs: Vec<Id>,
pub consts: Vec<Id>,
pub modules: Vec<Id>,
/// None if loaded from saved state
pub comments: Option<Vec<(pos::Pos, Comment)>>,
}
/// The simplified record used after parsing.
#[derive(
Clone,
Debug,
Deserialize,
Eq,
FromOcamlRep,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep,
)]
#[rust_to_ocaml(attr = "deriving show")]
#[rust_to_ocaml(prefix = "n_")]
#[repr(C)]
pub struct Names {
pub funs: s_set::SSet,
pub classes: s_set::SSet,
pub types: s_set::SSet,
pub consts: s_set::SSet,
pub modules: s_set::SSet,
}
/// The simplified record stored in saved-state.
#[derive(
Clone,
Debug,
Deserialize,
Eq,
FromOcamlRep,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep,
)]
#[rust_to_ocaml(prefix = "sn_")]
#[repr(C)]
pub struct SavedNames {
pub funs: s_set::SSet,
pub classes: s_set::SSet,
pub types: s_set::SSet,
pub consts: s_set::SSet,
pub modules: s_set::SSet,
}
#[derive(
Clone,
Debug,
Deserialize,
Eq,
FromOcamlRep,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep,
)]
#[repr(C)]
pub struct Diff {
pub removed_funs: s_set::SSet,
pub added_funs: s_set::SSet,
pub removed_classes: s_set::SSet,
pub added_classes: s_set::SSet,
pub removed_types: s_set::SSet,
pub added_types: s_set::SSet,
pub removed_consts: s_set::SSet,
pub added_consts: s_set::SSet,
pub removed_modules: s_set::SSet,
pub added_modules: s_set::SSet,
} |
Rust | hhvm/hphp/hack/src/deps/rust/file_info_lib.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
pub(crate) use rc_pos as pos;
mod file_info;
pub use file_info::*;
pub mod prim_defs;
pub use prim_defs::*;
mod s_set {
pub type SSet = std::collections::BTreeSet<String>;
}
use naming_types::KindOfType;
use relative_path::RelativePath;
use rusqlite::types::FromSql;
use rusqlite::types::FromSqlError;
use rusqlite::types::FromSqlResult;
use rusqlite::types::ValueRef;
impl From<Mode> for parser_core_types::FileMode {
fn from(mode: Mode) -> Self {
match mode {
Mode::Mhhi => Self::Hhi,
Mode::Mstrict => Self::Strict,
}
}
}
impl From<parser_core_types::FileMode> for Mode {
fn from(mode: parser_core_types::FileMode) -> Self {
match mode {
parser_core_types::FileMode::Hhi => Self::Mhhi,
parser_core_types::FileMode::Strict => Self::Mstrict,
}
}
}
impl std::cmp::PartialEq<parser_core_types::FileMode> for Mode {
fn eq(&self, other: &parser_core_types::FileMode) -> bool {
self.eq(&Self::from(*other))
}
}
impl std::cmp::PartialEq<Mode> for parser_core_types::FileMode {
fn eq(&self, other: &Mode) -> bool {
self.eq(&Self::from(*other))
}
}
impl Pos {
pub fn path(&self) -> &RelativePath {
match self {
Pos::Full(pos) => pos.filename(),
Pos::File(_, path) => path,
}
}
}
impl From<KindOfType> for NameType {
fn from(kind: KindOfType) -> Self {
match kind {
KindOfType::TClass => NameType::Class,
KindOfType::TTypedef => NameType::Typedef,
}
}
}
#[derive(Copy, Clone, Debug, thiserror::Error)]
#[error("Expected type kind, but got: {0:?}")]
pub struct FromNameTypeError(NameType);
impl TryFrom<NameType> for KindOfType {
type Error = FromNameTypeError;
fn try_from(name_type: NameType) -> Result<Self, Self::Error> {
match name_type {
NameType::Class => Ok(KindOfType::TClass),
NameType::Typedef => Ok(KindOfType::TTypedef),
_ => Err(FromNameTypeError(name_type)),
}
}
}
impl From<NameType> for typing_deps_hash::DepType {
fn from(name_type: NameType) -> Self {
match name_type {
NameType::Fun => Self::Fun,
NameType::Class => Self::Type,
NameType::Typedef => Self::Type,
NameType::Const => Self::GConst,
NameType::Module => Self::Module,
}
}
}
impl FromSql for NameType {
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
match value {
ValueRef::Integer(i) => {
if i == NameType::Fun as i64 {
Ok(NameType::Fun)
} else if i == NameType::Const as i64 {
Ok(NameType::Const)
} else if i == NameType::Class as i64 {
Ok(NameType::Class)
} else if i == NameType::Typedef as i64 {
Ok(NameType::Typedef)
} else if i == NameType::Module as i64 {
Ok(NameType::Module)
} else {
Err(FromSqlError::OutOfRange(i))
}
}
_ => Err(FromSqlError::InvalidType),
}
}
}
impl rusqlite::ToSql for NameType {
fn to_sql(&self) -> rusqlite::Result<rusqlite::types::ToSqlOutput<'_>> {
Ok(rusqlite::types::ToSqlOutput::from(*self as i64))
}
}
impl FileInfo {
pub fn get_ids(&self) -> Vec<(NameType, Id)> {
let FileInfo {
hash: _,
file_mode: _,
comments: _,
funs,
classes,
typedefs,
consts,
modules,
} = self;
funs.iter()
.map(|id| (NameType::Fun, id.clone()))
.chain(classes.iter().map(|id| (NameType::Class, id.clone())))
.chain(typedefs.iter().map(|id| (NameType::Typedef, id.clone())))
.chain(consts.iter().map(|id| (NameType::Const, id.clone())))
.chain(modules.iter().map(|id| (NameType::Module, id.clone())))
.collect()
}
} |
Rust | hhvm/hphp/hack/src/deps/rust/prim_defs.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//
// @generated <<SignedSource::*O*zOeWoEQle#+L!plEphiEmie@IsG>>
//
// To regenerate this file, run:
// hphp/hack/src/oxidized_regen.sh
use eq_modulo_pos::EqModuloPos;
use no_pos_hash::NoPosHash;
use ocamlrep::FromOcamlRep;
use ocamlrep::ToOcamlRep;
use serde::Deserialize;
use serde::Serialize;
#[allow(unused_imports)]
use crate::*;
#[derive(
Clone,
Debug,
Deserialize,
Eq,
EqModuloPos,
FromOcamlRep,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep,
)]
#[rust_to_ocaml(attr = "deriving (eq, show)")]
#[repr(C, u8)]
pub enum Comment {
CmtLine(String),
CmtBlock(String),
} |
TOML | hhvm/hphp/hack/src/deps/rust/file_info/Cargo.toml | # @generated by autocargo
[package]
name = "file_info"
version = "0.0.0"
edition = "2021"
[lib]
path = "../file_info_lib.rs"
[dependencies]
arena_deserializer = { version = "0.0.0", path = "../../../utils/arena_deserializer" }
arena_trait = { version = "0.0.0", path = "../../../arena_trait" }
eq_modulo_pos = { version = "0.0.0", path = "../../../utils/eq_modulo_pos" }
naming_types = { version = "0.0.0", path = "../../../naming/rust/naming_types" }
no_pos_hash = { version = "0.0.0", path = "../../../utils/no_pos_hash" }
ocamlrep = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" }
ocamlrep_caml_builtins = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" }
parser_core_types = { version = "0.0.0", path = "../../../parser/cargo/core_types" }
rc_pos = { version = "0.0.0", path = "../../../utils/rust/pos" }
relative_path = { version = "0.0.0", path = "../../../utils/rust/relative_path" }
rusqlite = { version = "0.29.0", features = ["backup", "blob", "column_decltype", "limits"] }
serde = { version = "1.0.176", features = ["derive", "rc"] }
thiserror = "1.0.43"
typing_deps_hash = { version = "0.0.0", path = "../../cargo/typing_deps_hash" } |
OCaml | hhvm/hphp/hack/src/deps/utils/dep_hash_to_symbol.ml | (*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
open Hh_prelude
open Typing_deps
let report_collision hash sym1 sym2 =
failwith
(Printf.sprintf
"Hash collision detected! Hash: %s; Symbols: %s, %s"
(Dep.to_hex_string hash)
(Dep.variant_to_string sym1)
(Dep.variant_to_string sym2))
let from_nast (nast : Nast.program) : _ Dep.variant DepMap.t =
let mapping variant = DepMap.singleton (Dep.make variant) variant in
let mappings variants =
List.fold variants ~init:DepMap.empty ~f:(fun map variant ->
let dep = Dep.make variant in
DepMap.add ~combine:(report_collision dep) dep variant map)
in
let visitor =
object (this)
inherit [_] Aast.reduce as super
method zero = DepMap.empty
method plus = DepMap.union ~combine:report_collision
method! on_fun_def env fd =
this#plus
(mapping (Dep.Fun (snd fd.Aast.fd_name)))
(super#on_fun_def env fd)
method! on_method_ cls x =
this#plus
(mapping
(if x.Aast.m_static then
Dep.SMethod (Option.value_exn cls, snd x.Aast.m_name)
else
Dep.Method (Option.value_exn cls, snd x.Aast.m_name)))
(super#on_method_ cls x)
method! on_class_ _cls x =
this#plus
(mappings
[
Dep.Type (snd x.Aast.c_name);
Dep.Constructor (snd x.Aast.c_name);
Dep.Extends (snd x.Aast.c_name);
Dep.AllMembers (snd x.Aast.c_name);
])
(super#on_class_ (Some (snd x.Aast.c_name)) x)
method! on_class_const cls x =
this#plus
(mapping (Dep.Const (Option.value_exn cls, snd x.Aast.cc_id)))
(super#on_class_const cls x)
method! on_class_typeconst_def cls x =
this#plus
(mapping (Dep.Const (Option.value_exn cls, snd x.Aast.c_tconst_name)))
(super#on_class_typeconst_def cls x)
method! on_class_var cls x =
this#plus
(mapping
(if x.Aast.cv_is_static then
Dep.SProp (Option.value_exn cls, snd x.Aast.cv_id)
else
Dep.Prop (Option.value_exn cls, snd x.Aast.cv_id)))
(super#on_class_var cls x)
method! on_typedef _cls x =
this#plus
(mapping (Dep.Type (snd x.Aast.t_name)))
(super#on_typedef (Some (snd x.Aast.t_name)) x)
method! on_gconst cls x =
this#plus
(mapping (Dep.GConst (snd x.Aast.cst_name)))
(super#on_gconst cls x)
end
in
visitor#on_program None nast
let from_nasts nasts =
List.fold nasts ~init:DepMap.empty ~f:(fun acc nast ->
DepMap.union ~combine:report_collision (from_nast nast) acc)
let dump nast =
let map = from_nast nast in
DepMap.iter
(fun dep variant ->
Printf.printf
"%s %s\n"
(Dep.to_hex_string dep)
(Dep.variant_to_string variant))
map |
OCaml Interface | hhvm/hphp/hack/src/deps/utils/dep_hash_to_symbol.mli | (*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
val from_nasts :
Nast.program list ->
Typing_deps.Dep.dependency Typing_deps.Dep.variant Typing_deps.DepMap.t
val dump : Nast.program -> unit |
hhvm/hphp/hack/src/deps/utils/dune | (library
(name dep_hash_to_symbol)
(modules dep_hash_to_symbol)
(libraries
annotated_ast
core
nast
typing_deps)) |
|
OCaml | hhvm/hphp/hack/src/dfind/dfindAddFile.ml | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
(*****************************************************************************)
(* Adds a new file or directory to the environment *)
(*****************************************************************************)
open Hh_prelude
module Set = Stdlib.Set
open DfindEnv
open DfindMaybe
(*****************************************************************************)
(* helpers *)
(*****************************************************************************)
(* Gives back a set of all the files in the directory.
* A directory handle is of type Unix.dir_handle, it is the result of
* a call to Unix.opendir. Not to be confused with dfind handles.
* The path argument is useful because we want this function to give us
* "full" paths. If I am in the directory "/tmp/bla" and I iterate
* over the elements of the directory, the result I want is:
* /tmp/bla/file1
* /tmp/bla/file2
* As opposed to:
* file1
* file2
*)
let get_files path dir_handle =
let paths = ref SSet.empty in
try
while true do
let file = Unix.readdir dir_handle in
if String.(file = "." || file = "..") then
()
else
let path = Filename.concat path file in
paths := SSet.add path !paths
done;
assert false
with
| _ -> !paths
(* Gets rid of the '/' or '\' at the end of a directory name *)
let normalize path =
let size = String.length path in
if String.equal (Char.escaped path.[size - 1]) Filename.dir_sep then
String.sub path ~pos:0 ~len:(size - 1)
else
path
(*****************************************************************************)
(* The entry point
* 1) We add a watch to the entry point + all the sub elements of the entry
* point when it is a directory
*
* 2) We add all the files conservatively to the TimeTree. That is, files
* are never removed. If you want them to be removed reboot the server.
* It is much more complicated to try to keep an accurate view of the state
* of the world. I leave that to smarter people than me.
*
* 3) All the operations are performed in the maybe monad, so that we never
* fail. Any operation could fail, because files could be removed while
* we are working on them.
*
*)
(*****************************************************************************)
module ISet = Set.Make (struct
type t = int
let compare = compare
end)
(* This used to be an environment variable, but it is too complicated
* for now. Hardcoding! Yay!
*)
let blacklist =
List.map
~f:Str.regexp
[".*/wiki/images/.*"; ".*/\\.git"; ".*/\\.svn"; ".*/\\.hg"]
let is_blacklisted path =
try
List.iter blacklist ~f:(fun re ->
if Str.string_match re path 0 then
raise Exit
else
());
false
with
| Exit -> true
let rec add_file links env path =
let path = normalize path in
match is_blacklisted path with
| true -> return ()
| false when not (SSet.mem path env.new_files) -> add_new_file links env path
| _ -> return ()
and add_watch links env path =
call (add_fsnotify_watch env) path >>= function
| None -> return ()
| Some _watch -> add_file links env path
and add_fsnotify_watch env path = return (Fsnotify.add_watch env.fsnotify path)
and add_new_file links env path =
let time = Time.get () in
env.files <- TimeFiles.add (time, path) env.files;
env.new_files <- SSet.add path env.new_files;
call (wrap Unix.lstat) path >>= fun ({ Unix.st_kind = kind; _ } as st) ->
if ISet.mem st.Unix.st_ino links then
return ()
else
let links = ISet.add st.Unix.st_ino links in
match kind with
| Unix.S_LNK when ISet.mem st.Unix.st_ino links -> return ()
| Unix.S_LNK -> return ()
(* TODO add an option to support symlinks *)
(* call (wrap Unix.readlink) path >>= add_file links env *)
| Unix.S_DIR ->
call (add_watch links env) path >>= fun () ->
call (wrap Unix.opendir) path >>= fun dir_handle ->
let files = get_files path dir_handle in
SSet.iter (fun x -> ignore (add_file links env x)) files;
(try Unix.closedir dir_handle with
| _ -> ());
let prev_files =
match SMap.find_opt path env.dirs with
| Some files -> files
| None -> SSet.empty
in
let prev_files = SSet.union files prev_files in
let files =
SSet.fold
begin
fun file all_files ->
match SMap.find_opt file env.dirs with
| Some sub_dir -> SSet.union sub_dir all_files
| None -> SSet.add file all_files
end
files
prev_files
in
env.dirs <- SMap.add path files env.dirs;
return ()
| _ -> return ()
(* This is the only thing we want to expose *)
let path env x = ignore (add_file ISet.empty env x) |
OCaml Interface | hhvm/hphp/hack/src/dfind/dfindAddFile.mli | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
(*****************************************************************************)
(* Adds a new file or directory to the environment *)
(*****************************************************************************)
val path : DfindEnv.t -> string -> unit
(*****************************************************************************)
(* Find all the files in a directory *)
(*****************************************************************************)
val get_files : string -> Unix.dir_handle -> SSet.t |
OCaml | hhvm/hphp/hack/src/dfind/dfindEnv.ml | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
(*****************************************************************************)
(* The environment shared by everyone *)
(*****************************************************************************)
module Time = struct
type t = int
let counter = ref 0
let get () =
incr counter;
!counter
let compare = Int.compare
(* The beginning of times *)
let bot = 0
let to_string x = string_of_int x
end
module TimeFiles = MonoidAvl.Make (struct
(* Timestamp + filename *)
type elt = Time.t * string
let compare (_, x) (_, y) = String.compare x y
type monoelt = Time.t
let neutral = Time.bot
let make = fst
let compose = max
end)
type t = {
(* The fsnotify environment, we use this for interacting with fsnotify *)
fsnotify: Fsnotify.env;
(* The set of files with their timestamp *)
mutable files: TimeFiles.t;
(* The set of new files (files created during an event) *)
mutable new_files: SSet.t;
(* The directories (and the files they contain) *)
mutable dirs: SSet.t SMap.t;
}
(*****************************************************************************)
(* Building the original environment, this call is called only once
* by the server (cf dfindServer.ml)
*)
(*****************************************************************************)
let make roots =
let fsnotify = Fsnotify.init roots in
{
fsnotify;
files = TimeFiles.empty;
new_files = SSet.empty;
dirs = SMap.empty;
} |
OCaml Interface | hhvm/hphp/hack/src/dfind/dfindEnv.mli | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
(*****************************************************************************)
(* The environment shared by everyone *)
(*****************************************************************************)
(* This is in fact a fake time module, we don't want to use the "real"
* unix timestamps, because we would run into atomicity problems.
* Problems like, what happens if the file was modified between the moment
* where I checked the time stamp and now etc ...
* So we maintain our own clock. It is incremented by one on every event.
*)
module Time : sig
type t
val get : unit -> t
val compare : t -> t -> int
(* The beginning of times *)
val bot : t
val to_string : t -> string
end
(* Our fancy Avl (cf monoidAvl.ml) *)
module TimeFiles :
MonoidAvl.S with type elt = Time.t * string with type monoelt = Time.t
type t = {
(* The fsnotify environment, we use this for interacting with fsnotify *)
fsnotify: Fsnotify.env;
(* The set of files with their timestamp *)
mutable files: TimeFiles.t;
(* The set of new files (files created during an event) *)
mutable new_files: SSet.t;
(* The directories (and the files they contain) *)
mutable dirs: SSet.t SMap.t;
}
(*****************************************************************************)
(* Building the original environment, this call is called only once
* by the server (cf dfindServer.ml)
*)
(*****************************************************************************)
val make : string list -> t |
OCaml | hhvm/hphp/hack/src/dfind/dfindLib.ml | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
module type MARSHAL_TOOLS = sig
type 'a result
type fd
val return : 'a -> 'a result
val ( >>= ) : 'a result -> ('a -> 'b result) -> 'b result
val descr_of_in_channel : 'a Daemon.in_channel -> fd
val descr_of_out_channel : 'a Daemon.out_channel -> fd
val to_fd_with_preamble :
?timeout:Timeout.t ->
?flags:Marshal.extern_flags list ->
fd ->
'a ->
int result
val from_fd_with_preamble : ?timeout:Timeout.t -> fd -> 'a result
end
module DFindLibFunctor (Marshal_tools : MARSHAL_TOOLS) : sig
type t
val init :
Unix.file_descr * Unix.file_descr * Unix.file_descr ->
string * Path.t list ->
t
val wait_until_ready : t -> unit Marshal_tools.result
val pid : t -> int
val get_changes : ?timeout:Timeout.t -> t -> SSet.t Marshal_tools.result
val stop : t -> unit
end = struct
let ( >>= ) = Marshal_tools.( >>= )
type t = {
infd: Marshal_tools.fd;
outfd: Marshal_tools.fd;
daemon_handle: (DfindServer.msg, unit) Daemon.handle;
}
let init log_fds (scuba_table, roots) =
let name =
Printf.sprintf "file watching process for server %d" (Unix.getpid ())
in
let ({ Daemon.channels = (ic, oc); _ } as daemon_handle) =
Daemon.spawn ~name log_fds DfindServer.entry_point (scuba_table, roots)
in
{
infd = Marshal_tools.descr_of_in_channel ic;
outfd = Marshal_tools.descr_of_out_channel oc;
daemon_handle;
}
let pid handle = handle.daemon_handle.Daemon.pid
let wait_until_ready handle =
Marshal_tools.from_fd_with_preamble handle.infd >>= fun msg ->
assert (msg = DfindServer.Ready);
Marshal_tools.return ()
let request_changes ?timeout handle =
Marshal_tools.to_fd_with_preamble handle.outfd () >>= fun _ ->
Marshal_tools.from_fd_with_preamble ?timeout handle.infd
let get_changes ?timeout daemon =
let rec loop acc =
(request_changes ?timeout daemon >>= function
| DfindServer.Updates s -> Marshal_tools.return s
| DfindServer.Ready -> assert false)
>>= fun diff ->
if SSet.is_empty diff then
Marshal_tools.return acc
else
let acc = SSet.union diff acc in
loop acc
in
loop SSet.empty
let stop handle = Daemon.force_quit handle.daemon_handle
end
module RegularMarshalTools :
MARSHAL_TOOLS with type 'a result = 'a and type fd = Unix.file_descr = struct
include Marshal_tools
type 'a result = 'a
type fd = Unix.file_descr
let return x = x
let ( >>= ) x f = f x
let descr_of_in_channel = Daemon.descr_of_in_channel
let descr_of_out_channel = Daemon.descr_of_out_channel
end
include DFindLibFunctor (RegularMarshalTools) |
OCaml Interface | hhvm/hphp/hack/src/dfind/dfindLib.mli | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
type t
val init :
Unix.file_descr * Unix.file_descr * Unix.file_descr ->
string * Path.t list ->
t
val wait_until_ready : t -> unit
val pid : t -> int
val get_changes : ?timeout:Timeout.t -> t -> SSet.t
val stop : t -> unit
module type MARSHAL_TOOLS = sig
type 'a result
type fd
val return : 'a -> 'a result
val ( >>= ) : 'a result -> ('a -> 'b result) -> 'b result
val descr_of_in_channel : 'a Daemon.in_channel -> fd
val descr_of_out_channel : 'a Daemon.out_channel -> fd
val to_fd_with_preamble :
?timeout:Timeout.t ->
?flags:Marshal.extern_flags list ->
fd ->
'a ->
int result
val from_fd_with_preamble : ?timeout:Timeout.t -> fd -> 'a result
end
module DFindLibFunctor (Marshal_tools : MARSHAL_TOOLS) : sig
type t
val init :
Unix.file_descr * Unix.file_descr * Unix.file_descr ->
string * Path.t list ->
t
val wait_until_ready : t -> unit Marshal_tools.result
val pid : t -> int
val get_changes : ?timeout:Timeout.t -> t -> SSet.t Marshal_tools.result
val stop : t -> unit
end |
OCaml | hhvm/hphp/hack/src/dfind/dfindLibLwt.ml | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
module MarshalToolsLwt :
DfindLib.MARSHAL_TOOLS
with type 'a result = 'a Lwt.t
and type fd = Lwt_unix.file_descr = struct
type 'a result = 'a Lwt.t
type fd = Lwt_unix.file_descr
let return = Lwt.return
let ( >>= ) = Lwt.( >>= )
let descr_of_in_channel ic =
Lwt_unix.of_unix_file_descr
~blocking:false
~set_flags:true
(Daemon.descr_of_in_channel ic)
let descr_of_out_channel oc =
Lwt_unix.of_unix_file_descr
~blocking:false
~set_flags:true
(Daemon.descr_of_out_channel oc)
let to_fd_with_preamble ?timeout ?flags fd v =
if timeout <> None then raise (Invalid_argument "Use lwt timeouts directly");
Marshal_tools_lwt.to_fd_with_preamble ?flags fd v
let from_fd_with_preamble ?timeout fd =
if timeout <> None then raise (Invalid_argument "Use lwt timeouts directly");
Marshal_tools_lwt.from_fd_with_preamble fd
end
include DfindLib.DFindLibFunctor (MarshalToolsLwt)
(* The Timeout module probably doesn't work terribly well with Lwt. Luckily, timeouts are super easy
* to write in Lwt, so we don't **really** need them *)
let get_changes handle = get_changes handle |
OCaml Interface | hhvm/hphp/hack/src/dfind/dfindLibLwt.mli | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
type t
val init :
Unix.file_descr * Unix.file_descr * Unix.file_descr ->
string * Path.t list ->
t
val wait_until_ready : t -> unit Lwt.t
val pid : t -> int
val get_changes : t -> SSet.t Lwt.t
val stop : t -> unit |
OCaml | hhvm/hphp/hack/src/dfind/dfindMaybe.ml | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
(*****************************************************************************)
(* A modified maybe monad
* Most of the time, I prefer to use exceptions, I like things to blow up
* if something went wrong.
* However, in the case of dfind, exceptions are painful. We don't want things
* to blow-up, we want to carry-on whatever happens.
* So this monad never fails, it logs very nasty errors, for example, it will
* log the fact that a watch couldn't be created, when the file still exists.
*)
(*****************************************************************************)
let log = ref stderr
let set_log oc = log := oc
type 'a t = 'a option
let ( >>= ) x f =
match x with
| None -> None
| Some x -> f x
let return x = Some x
let handle_file_exn path = function
| Fsnotify.Error (_, Unix.ENOENT) ->
() (* The file got deleted in the mean time ... we don't care *)
| Fsnotify.Error (reason, _) ->
(* This is bad ... *)
Printf.fprintf !log "Error: could not add watch to %s [%s]\n" path reason
| _ when Sys.file_exists path ->
(* Logging this makes the system very noisy. There are too many
* cases where a file has been removed etc ...
*)
()
| _ -> ()
(* Calls (f path), never fails, logs the nasty exceptions *)
let call f path =
try f path with
| e ->
handle_file_exn path e;
None
let wrap f x = return (f x) |
OCaml Interface | hhvm/hphp/hack/src/dfind/dfindMaybe.mli | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
(*****************************************************************************)
(* A modified maybe monad
* Most of the time, I prefer to use exceptions, I like things to blow up
* if something went wrong.
* However, in the case of dfind, exceptions are painful. We don't want things
* to blow-up, we want to carry-on whatever happens.
* So this monad never fails, it logs very nasty errors, for example, it will
* log the fact that a watch couldn't be created, when the file still exists.
*)
(*****************************************************************************)
type 'a t
(* Called at the initialization of the server (cf server.ml) *)
val set_log : out_channel -> unit
val ( >>= ) : 'a t -> ('a -> 'b t) -> 'b t
val return : 'a -> 'a t
(* Calls (f path), never fails, logs the nasty exceptions *)
val call : (string -> 'a t) -> string -> 'a t
val wrap : ('a -> 'b) -> 'a -> 'b t |
OCaml | hhvm/hphp/hack/src/dfind/dfindServer.ml | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
(*****************************************************************************)
(* Code relative to the client/server communication *)
(*****************************************************************************)
open Hh_prelude
open DfindEnv
type msg =
| Ready
| Updates of SSet.t
(*****************************************************************************)
(* Processing an fsnotify event *)
(*****************************************************************************)
let (process_fsnotify_event : DfindEnv.t -> SSet.t -> Fsnotify.event -> SSet.t)
=
fun env dirty event ->
let { Fsnotify.path; wpath } = event in
(* Tell everybody that this file has changed *)
let dirty = SSet.add path dirty in
(* Is it a directory? Be conservative, everything we know about this
* directory is now "dirty"
*)
let dirty =
if SMap.mem path env.dirs then
SSet.union dirty (SMap.find path env.dirs)
else
let dir_content =
match SMap.find_opt wpath env.dirs with
| Some content -> content
| None -> SSet.empty
in
env.dirs <- SMap.add wpath (SSet.add path dir_content) env.dirs;
dirty
in
env.new_files <- SSet.empty;
(* Add the file, plus all of the sub elements if it is a directory *)
DfindAddFile.path env path;
(* Add everything new we found in this directory
* (empty when it's a regular file)
*)
let dirty = SSet.union env.new_files dirty in
dirty
let run_daemon (scuba_table, roots) (ic, oc) =
Printexc.record_backtrace true;
let t = Unix.gettimeofday () in
let infd = Daemon.descr_of_in_channel ic in
let outfd = Daemon.descr_of_out_channel oc in
let roots = List.map roots ~f:Path.to_string in
let env = DfindEnv.make roots in
List.iter roots ~f:(DfindAddFile.path env);
EventLogger.dfind_ready scuba_table t;
Marshal_tools.to_fd_with_preamble outfd Ready |> ignore;
ignore @@ Hh_logger.log_duration "Initialization" t;
let acc = ref SSet.empty in
let descr_in = Daemon.descr_of_in_channel ic in
let fsnotify_callback events =
acc := List.fold_left events ~f:(process_fsnotify_event env) ~init:!acc
in
let message_in_callback () =
let () = Marshal_tools.from_fd_with_preamble infd in
let count = SSet.cardinal !acc in
if count > 0 then Hh_logger.log "Sending %d file updates\n%!" count;
Marshal_tools.to_fd_with_preamble outfd (Updates !acc) |> ignore;
acc := SSet.empty
in
while true do
let read_fdl = [(descr_in, message_in_callback)] in
let timeout = -1.0 in
Fsnotify.select env.fsnotify ~read_fdl ~timeout fsnotify_callback
done
let entry_point = Daemon.register_entry_point "dfind" run_daemon |
OCaml Interface | hhvm/hphp/hack/src/dfind/dfindServer.mli | (*
* Copyright (c) 2015, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
type msg =
| Ready
| Updates of SSet.t
val entry_point : (string * Path.t list, unit, msg) Daemon.entry |
hhvm/hphp/hack/src/dfind/dune | (library
(name dfind)
(wrapped false)
(modules :standard \ dfindLibLwt)
(libraries avl collections fsnotify logging_common marshal_tools sys_utils))
(library
(name dfind_lwt)
(wrapped false)
(modules dfindLibLwt)
(libraries dfind lwt lwt.unix marshal_tools_lwt sys_utils)) |
|
hhvm/hphp/hack/src/dfind/README | dfind is a tool to quickly find what has changed in a directory.
It's a "difference finder".
The way it works
$ dfind your_directory your_handle
On the first call, it will give you all the files in this directory.
If you call dfind again with the same directory and the same handle,
it will only print the files that have changed.
Note that you can do: dfind -f, if you want to get the differences incrementally.
What happens under the hood:
If you are the first one to call dfind, it forks, and creates a server.
The dfind server is shared across all the users of the machine.
You can find the log in /tmp/dfind.log and the pid in /tmp/dfind.pid
NOTE: dfind is very dumb, and very conservative. It can give you MORE files that what has actually changed. But it will never give you less (unless there is a bug). The idea is to use dfind to narrow down the results, not to give and accurate view of the current state of the world. |
|
OCaml | hhvm/hphp/hack/src/diff/parse_diff.ml | (*
* Copyright (c) 2017, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the "hack" directory of this source tree.
*
*)
open Hh_prelude
(*****************************************************************************)
(* Section building a list of intervals per file for a given diff.
*
* Typically, the output of git/hg looks like:
* --- a/path/filename
* +++ b/path/filename
* @@ -line,length +line, length @@
*
* The information that we are interested in is the filename after +++
* and the line number after '+' in the header section (the section after @@).
* That's because we don't really care about what has been removed.
* What we want is to format the new content, not the old one.
* ParseDiff builds a list of intervals of modified lines for each file in
* a diff.
*
* For example: ["myfile1", [4, 6; 7, 7]] means that the file named "myfile1"
* has modified lines, from line 4 to 6 (inclusive) and the line 7.
*)
(*****************************************************************************)
type filename = string
type interval = int * int
type file_diff = filename * interval list
type env = {
(* The file we are currently parsing (None for '/dev/null') *)
mutable file: string option;
(* The list of lines that have been modified *)
mutable modified: int list;
(* The current line *)
mutable line: int;
(* The accumulator (for the result) *)
mutable result: file_diff list;
}
(* The entry point *)
let rec go content =
let env = { file = None; modified = []; line = 0; result = [] } in
let lines = String_utils.split_on_newlines content in
start env lines;
List.rev env.result
(* Skip the text before the first +++ (to make things work with git show) *)
and start env = function
| [] -> ()
| line :: lines when String.is_prefix line ~prefix:"+++" ->
header env line;
modified env 0 lines
| _ :: lines -> start env lines
(* Parses the content of a line starting with +++ (extracts the filename) *)
and header env line =
add_file env;
let filename = String.sub line ~pos:4 ~len:(String.length line - 4) in
(* Getting rid of the prefix b/ *)
let filename =
if String.equal filename Sys_utils.null_path then
None
else if
String.length filename >= 2
&& String.equal (String.sub filename ~pos:0 ~len:2) "b/"
then
Some (String.sub filename ~pos:2 ~len:(String.length filename - 2))
else
Some filename
in
env.file <- filename;
env.modified <- []
(* Parses the lines *)
and modified env nbr = function
| [] -> add_file env
| line :: lines
when String.length line > 4
&& String.equal (String.sub line ~pos:0 ~len:3) "+++" ->
header env line;
modified env 0 lines
| line :: lines
when String.length line > 2
&& String.equal (String.sub line ~pos:0 ~len:2) "@@" ->
(* Find the position right after '+' in '@@ -line,len +line, len@@' *)
let _ = Str.search_forward (Str.regexp "[+][0-9]+") line 0 in
let matched = Str.matched_string line in
let matched = String.sub matched ~pos:1 ~len:(String.length matched - 1) in
let nbr = int_of_string matched in
modified env nbr lines
| line :: lines
when String.length line >= 1
&& String.equal (String.sub line ~pos:0 ~len:1) "+" ->
(* Adds the line to the list of modified lines *)
env.line <- env.line + 1;
env.modified <- nbr :: env.modified;
modified env (nbr + 1) lines
| line :: lines
when String.length line >= 1
&& String.equal (String.sub line ~pos:0 ~len:1) "-" ->
(* Skips the line (we don't care about removed code) *)
modified env nbr lines
| _ :: lines -> modified env (nbr + 1) lines
and add_file env =
(* Given a list of modified lines => returns a list of intervals *)
let lines_modified = List.rev_map env.modified ~f:(fun x -> (x, x)) in
let lines_modified = normalize_intervals [] lines_modified in
(* Adds the file to the list of results *)
match env.file with
| None -> ()
| Some filename -> env.result <- (filename, lines_modified) :: env.result
(* Merges intervals when necessary.
* For example: '[(1, 2), (2, 2), (2, 5); ...]' becomes '[(1, 5); ...]'.
*)
and normalize_intervals acc = function
| [] -> List.rev acc
| (start1, end1) :: (start2, end2) :: rl when end1 + 1 >= start2 ->
normalize_intervals acc ((min start1 start2, max end1 end2) :: rl)
| x :: rl -> normalize_intervals (x :: acc) rl |
OCaml | hhvm/hphp/hack/src/dune_config/discover.ml | (** This is a dune configurator:
https://jbuilder.readthedocs.io/en/latest/configurator.html *)
module C = Configurator.V1
let () =
C.main ~name:"hphpdir" (fun (c : C.t) ->
let _split s =
if s = "" then
[]
else
String.split_on_char ' ' s
in
let flags = ["-ccopt"; "-lpthread"] in
let flags =
match C.ocaml_config_var_exn c "system" with
(* ocaml builds with `-no_compact_unwind`, which breaks libunwind on
* MacOS; we need libunwind to work for rust std::panic::catch_unwind.
*
* This fix is included in ocaml 4.08
* (https://github.com/ocaml/ocaml/pull/8673) - but we're still on 4.07 *)
| "macosx" -> flags @ ["-ccopt"; "-Wl,-keep_dwarf_unwind"]
| _ -> flags
in
C.Flags.write_sexp "ld-opts.sexp" flags) |
hhvm/hphp/hack/src/dune_config/dune | (executable
(name discover)
(libraries dune.configurator))
(rule
(targets ld-opts.sexp)
(action
(run ./discover.exe))) |
|
TOML | hhvm/hphp/hack/src/elab/Cargo.toml | # @generated by autocargo
[package]
name = "elab"
version = "0.0.0"
edition = "2021"
[lib]
path = "elab.rs"
[dependencies]
bitflags = "1.3"
bstr = { version = "1.4.0", features = ["serde", "std", "unicode"] }
core_utils_rust = { version = "0.0.0", path = "../utils/core" }
elaborate_namespaces_visitor = { version = "0.0.0", path = "../naming/cargo/elaborate_namespaces" }
file_info = { version = "0.0.0", path = "../deps/rust/file_info" }
hack_macros = { version = "0.0.0", path = "../utils/hack_macros/cargo/hack_macros" }
hash = { version = "0.0.0", path = "../utils/hash" }
itertools = "0.10.3"
naming_special_names_rust = { version = "0.0.0", path = "../naming" }
oxidized = { version = "0.0.0", path = "../oxidized" }
relative_path = { version = "0.0.0", path = "../utils/rust/relative_path" }
vec1 = { version = "1", features = ["serde"] } |
Rust | hhvm/hphp/hack/src/elab/elab.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
#![feature(box_patterns)]
#![cfg_attr(not(rust_lib_feature = "let_chains"), feature(let_chains))]
/// Used to combine multiple types implementing `Pass` into nested `Passes` types
/// without requiring them to hand write it so :
/// `passes![p1, p2, p3]` => `Passes(p1, Passes(p2, p3))`
macro_rules! passes {
( $p:expr $(,$ps:expr)+ $(,)? ) => {
$crate::pass::Passes { fst: $p, snd: passes!($($ps),*) }
};
( $p:expr $(,)? ) => {
$p
};
}
mod elab_utils;
mod env;
mod lambda_captures;
mod pass;
mod passes;
mod transform;
mod typed_local;
/// Private convenience module for simplifying imports in pass implementations.
mod prelude {
pub use std::ops::ControlFlow;
pub use std::ops::ControlFlow::Break;
pub use std::ops::ControlFlow::Continue;
pub use std::rc::Rc;
pub use naming_special_names_rust as sn;
pub use oxidized::naming_error::NamingError;
pub use oxidized::naming_error::UnsupportedFeature;
pub use oxidized::naming_phase_error::ExperimentalFeature;
pub use oxidized::naming_phase_error::NamingPhaseError;
pub use oxidized::nast;
pub use oxidized::nast_check_error::NastCheckError;
pub use oxidized::parsing_error::ParsingError;
pub(crate) use crate::elab_utils;
pub use crate::env::Env;
pub use crate::pass::Pass;
pub use crate::transform::Transform;
}
use std::sync::Arc;
use env::Env;
use env::ProgramSpecificOptions;
use oxidized::namespace_env;
use oxidized::naming_phase_error::NamingPhaseError;
use oxidized::nast;
use oxidized::typechecker_options::TypecheckerOptions;
use pass::Pass;
use relative_path::RelativePath;
use transform::Transform;
use vec1::Vec1;
/// Provided for use in hackc as a simple collection of knobs outside of namespace env.
#[derive(Clone, Debug, Default)]
pub struct CodegenOpts {
pub textual_remove_memoize: bool,
}
/// Provided for use in hackc, where we have an `ns_env` in hand already.
/// Expected to behave the same as `elaborate_program` when `po_codegen` is
/// `true`.
pub fn elaborate_program_for_codegen(
ns_env: Arc<namespace_env::Env>,
path: &RelativePath,
program: &mut nast::Program,
opts: &CodegenOpts,
) -> Result<(), Vec1<NamingPhaseError>> {
assert!(ns_env.is_codegen);
let tco = TypecheckerOptions {
po_codegen: true,
po_disable_xhp_element_mangling: ns_env.disable_xhp_element_mangling,
// Do not copy the auto_ns_map; it's not read in this crate except via
// elaborate_namespaces_visitor, which uses the one in `ns_env` here
..Default::default()
};
elaborate_namespaces_visitor::elaborate_program(ns_env, program);
let mut env = make_env(&tco, path);
elaborate_common(&env, program);
elaborate_package_expr(&env, program);
elaborate_for_codegen(&env, program, opts);
// Passes below here can emit errors
typed_local::elaborate_program(&mut env, program, tco.po_codegen);
let errs = env.into_errors();
match Vec1::try_from_vec(errs) {
Err(_) => Ok(()),
Ok(v) => Err(v),
}
}
pub fn elaborate_program(
tco: &TypecheckerOptions,
path: &RelativePath,
program: &mut nast::Program,
) -> Vec<NamingPhaseError> {
elaborate_namespaces_visitor::elaborate_program(ns_env(tco), program);
let mut env = make_env(tco, path);
elaborate_common(&env, program);
if tco.po_codegen {
return env.into_errors();
}
lambda_captures::elaborate_program(&mut env, program);
typed_local::elaborate_program(&mut env, program, false);
elaborate_for_typechecking(env, program)
}
pub fn elaborate_fun_def(
tco: &TypecheckerOptions,
path: &RelativePath,
f: &mut nast::FunDef,
) -> Vec<NamingPhaseError> {
elaborate_namespaces_visitor::elaborate_fun_def(ns_env(tco), f);
let mut env = make_env(tco, path);
elaborate_common(&env, f);
if tco.po_codegen {
return env.into_errors();
}
lambda_captures::elaborate_fun_def(&mut env, f);
typed_local::elaborate_fun_def(&mut env, f, false);
elaborate_for_typechecking(env, f)
}
pub fn elaborate_class_(
tco: &TypecheckerOptions,
path: &RelativePath,
c: &mut nast::Class_,
) -> Vec<NamingPhaseError> {
elaborate_namespaces_visitor::elaborate_class_(ns_env(tco), c);
let mut env = make_env(tco, path);
elaborate_common(&env, c);
if tco.po_codegen {
return env.into_errors();
}
lambda_captures::elaborate_class_(&mut env, c);
typed_local::elaborate_class_(&mut env, c, false);
elaborate_for_typechecking(env, c)
}
pub fn elaborate_module_def(
tco: &TypecheckerOptions,
path: &RelativePath,
m: &mut nast::ModuleDef,
) -> Vec<NamingPhaseError> {
elaborate_namespaces_visitor::elaborate_module_def(ns_env(tco), m);
let mut env = make_env(tco, path);
elaborate_common(&env, m);
if tco.po_codegen {
return env.into_errors();
}
lambda_captures::elaborate_module_def(&mut env, m);
elaborate_for_typechecking(env, m)
}
pub fn elaborate_gconst(
tco: &TypecheckerOptions,
path: &RelativePath,
c: &mut nast::Gconst,
) -> Vec<NamingPhaseError> {
elaborate_namespaces_visitor::elaborate_gconst(ns_env(tco), c);
let mut env = make_env(tco, path);
elaborate_common(&env, c);
if tco.po_codegen {
return env.into_errors();
}
lambda_captures::elaborate_gconst(&mut env, c);
elaborate_for_typechecking(env, c)
}
pub fn elaborate_typedef(
tco: &TypecheckerOptions,
path: &RelativePath,
t: &mut nast::Typedef,
) -> Vec<NamingPhaseError> {
elaborate_namespaces_visitor::elaborate_typedef(ns_env(tco), t);
let mut env = make_env(tco, path);
elaborate_common(&env, t);
if tco.po_codegen {
return env.into_errors();
}
lambda_captures::elaborate_typedef(&mut env, t);
elaborate_for_typechecking(env, t)
}
fn ns_env(tco: &TypecheckerOptions) -> Arc<namespace_env::Env> {
Arc::new(namespace_env::Env::empty(
tco.po_auto_namespace_map.clone(),
tco.po_codegen,
tco.po_disable_xhp_element_mangling,
))
}
fn make_env(tco: &TypecheckerOptions, rel_path: &RelativePath) -> Env {
let is_hhi = rel_path.is_hhi();
let path = rel_path.path();
let allow_module_declarations = tco.tco_allow_all_files_for_module_declarations
|| tco
.tco_allowed_files_for_module_declarations
.iter()
.any(|spec| {
!spec.is_empty()
&& (spec.ends_with('*') && path.starts_with(&spec[..spec.len() - 1])
|| path == std::path::Path::new(spec))
});
Env::new(
tco,
&ProgramSpecificOptions {
is_hhi,
allow_module_declarations,
},
)
}
/// Run the passes which are common to codegen and typechecking.
/// For now, these passes may not emit errors.
fn elaborate_common<T: Transform>(env: &Env, node: &mut T) {
#[derive(Copy, Clone)]
struct NoopPass;
impl Pass for NoopPass {}
#[rustfmt::skip]
let mut passes = passes![
NoopPass
];
node.transform(env, &mut passes);
env.assert_no_errors();
}
fn elaborate_for_typechecking<T: Transform>(env: Env, node: &mut T) -> Vec<NamingPhaseError> {
#[rustfmt::skip]
let mut passes = passes![
// Stop on `Invalid` expressions
passes::guard_invalid::GuardInvalidPass::default(),
// -- Canonicalization passes -----------------------------------------
// Remove top-level file attributes, noop and markup statements
passes::elab_defs::ElabDefsPass::default(),
// Remove function bodies when in hhi mode
passes::elab_func_body::ElabFuncBodyPass::default(),
// Flatten `Block` statements
passes::elab_block::ElabBlockPass::default(),
// Strip `Hsoft` hints or replace with `Hlike`
passes::elab_hint_hsoft::ElabHintHsoftPass::default(),
// Elaborate `Happly` to canonical representation, if any
passes::elab_hint_happly::ElabHintHapplyPass::default(),
// Elaborate class identifier expressions (`CIexpr`) to canonical
// representation: `CIparent`, `CIself`, `CIstatic`, `CI` _or_
// `CIexpr (_,_, Lvar _ | This )`
passes::elab_class_id::ElabClassIdPass::default(),
// Strip type parameters from type parameters when HKTs are not enabled
passes::elab_hkt::ElabHktPass::default(),
// Elaborate `Collection` to `ValCollection` or `KeyValCollection`
passes::elab_expr_collection::ElabExprCollectionPass::default(),
// Deduplicate user attributes
passes::elab_user_attributes::ElabUserAttributesPass::default(),
// Replace import expressions with invalid expression marker
passes::elab_expr_import::ElabExprImportPass::default(),
// Elaborate local variables to canonical representation
passes::elab_expr_lvar::ElabExprLvarPass::default(),
// Warn of explicit use of builtin enum classes; make subtyping of
// enum classes explicit
passes::elab_enum_class::ElabEnumClassPass::default(),
// Elaborate class members & xhp attributes
passes::elab_class_vars::ElabClassVarsPass::default(),
// Elaborate special function calls to canonical representation, if any
passes::validate_expr_call_echo::ValidateExprCallEchoPass::default(),
passes::elab_expr_call_call_user_func::ElabExprCallCallUserFuncPass::default(),
passes::elab_expr_call_hh_meth_caller::ElabExprCallHhMethCallerPass::default(),
// Elaborate invariant calls to canonical representation
passes::elab_expr_call_hh_invariant::ElabExprCallHhInvariantPass::default(),
// -- Mark invalid hints and expressions & miscellaneous validation ---
// Replace invalid uses of `void` and `noreturn` with `Herr`
passes::elab_hint_retonly::ElabHintRetonlyPass::default(),
// Replace invalid uses of wildcard hints with `Herr`
passes::elab_hint_wildcard::ElabHintWildcardPass::default(),
// Replace uses to `self` in shape field names with referenced class
passes::elab_shape_field_name::ElabShapeFieldNamePass::default(),
// Replace invalid uses of `this` hints with `Herr`
passes::elab_hint_this::ElabHintThisPass::default(),
// Replace invalid `Haccess` root hints with `Herr`
passes::elab_hint_haccess::ElabHintHaccessPass::default(),
// Replace empty `Tuple`s with invalid expression marker
passes::elab_expr_tuple::ElabExprTuplePass::default(),
// Validate / replace invalid uses of dynamic classes in `New` and `Class_get`
// expressions
passes::elab_dynamic_class_name::ElabDynamicClassNamePass::default(),
// Replace non-constant class or global constant with invalid expression marker
passes::elab_const_expr::ElabConstExprPass::default(),
// Replace malformed key / value bindings in as expressions with invalid
// local var markers
passes::elab_as_expr::ElabAsExprPass::default(),
// Validate hints used in `Cast` expressions
passes::validate_expr_cast::ValidateExprCastPass::default(),
// Validate where `dynamic` can be used in a hint
passes::validate_dynamic_hint::ValidateDynamicHintPass::default(),
// Check for duplicate function parameter names
passes::validate_fun_params::ValidateFunParamsPass::default(),
// Validate use of `require implements`, `require extends` and
// `require class` declarations for traits, interfaces and classes
passes::validate_class_req::ValidateClassReqPass::default(),
// Validation dealing with common xhp naming errors
passes::validate_xhp_name::ValidateXhpNamePass::default(),
// -- Elaboration & validation under typechecker options --------------
// Add `supportdyn` and `Like` wrappers everywhere - under `everything-sdt`
// typechecker option
passes::elab_everything_sdt::ElabEverythingSdtPass::default(),
// Validate use of `Hlike` hints - depends on `enable-like-type-hints`
// and `everything_sdt` typechecker options
passes::validate_like_hint::ValidateLikeHintPass::default(),
// Validate constructors under
// `consistent-explicit_consistent_constructors` typechecker option
passes::validate_class_consistent_construct::ValidateClassConsistentConstructPass::default(),
// Validate use of `SupportDyn` class - depends on `enable-supportdyn`
// and `everything_sdt` typechecker options
passes::validate_supportdyn::ValidateSupportDynPass::default(),
// Validate use of module definitions - depends on:
// - `allow_all_files_for_module_declarations`
// - `allowed_files_for_module_declarations`
// typechecker options
passes::validate_module::ValidateModulePass::default(),
// // -- Old 'NAST checks' ------------------------------------------------
// Validate use of the `__Const` attribute on classes - depends on
// `const_attribute` typechecker option
passes::validate_class_user_attribute_const::ValidateClassUserAttributeConstPass::default(),
// Validate use of the `__Const` attribute on static class vars - depends
// on the `const_static_props` typechecker option
passes::validate_class_var_user_attribute_const::ValidateClassVarUserAttributeConstPass::default(),
passes::validate_class_var_user_attribute_lsb::ValidateClassVarUserAttributeLsbPass::default(),
// Validate `inout` `FunParam`s ensuring they are not used in functions with
// special semantics or in memoized functions
passes::validate_fun_param_inout::ValidateFunParamInoutPass::default(),
// Validate use of `Await` in sync functions and return in generators.
passes::validate_coroutine::ValidateCoroutinePass::default(),
// Checks for the presence of a function body in methods, use of traits
// and instance and static member variables in an interface definition
passes::validate_interface::ValidateInterfacePass::default(),
// Checks for use of reserved names in functions, methods, class identifiers
// and class constants
passes::validate_illegal_name::ValidateIllegalNamePass::default(),
passes::validate_control_context::ValidateControlContextPass::default(),
passes::validate_class_tparams::ValidateClassTparamsPass::default(),
passes::validate_user_attribute_dynamically_callable::ValidateUserAttributeDynamicallyCallable::default(),
passes::validate_hint_habstr::ValidateHintHabstrPass::default(),
passes::validate_class_methods::ValidateClassMethodsPass::default(),
passes::validate_global_const::ValidateGlobalConstPass::default(),
passes::validate_class_member::ValidateClassMemberPass::default(),
passes::validate_shape_name::ValidateShapeNamePass::default(),
passes::validate_php_lambda::ValidatePhpLambdaPass::default(),
passes::validate_xhp_attribute::ValidateXhpAttributePass::default(),
passes::validate_user_attribute_arity::ValidateUserAttributeArityPass::default(),
passes::validate_user_attribute_deprecated_static::ValidateUserAttributeDeprecatedStaticPass::default(),
passes::validate_user_attribute_entry_point::ValidateUserAttributeEntryPointPass::default(),
passes::validate_user_attribute_no_auto_dynamic::ValidateUserAttributeNoAutoDynamic::default(),
passes::validate_user_attribute_infer_flows::ValidateUserAttributeInferFlowsPass::default(),
passes::validate_user_attribute_memoize::ValidateUserAttributeMemoizePass::default(),
passes::validate_user_attribute_soft_internal::ValidateUserAttributeSoftInternalPass::default(),
passes::validate_method_private_final::ValidateMethodPrivateFinalPass::default(),
passes::validate_trait_internal::ValidateTraitInternalPass::default(),
passes::validate_hint_hrefinement::ValidateHintHrefinementPass::default(),
passes::validate_expr_function_pointer::ValidateExprFunctionPointerPass::default(),
passes::validate_expr_array_get::ValidateExprArrayGetPass::default(),
passes::validate_expr_list::ValidateExprListPass::default(),
passes::validate_like_hint::ValidateLikeHintPass::default(),
];
node.transform(&env, &mut passes);
env.into_errors()
}
fn elaborate_package_expr<T: Transform>(env: &Env, node: &mut T) {
let mut passes = passes![passes::elab_expr_package::ElabExprPackagePass::default()];
node.transform(env, &mut passes);
env.assert_no_errors();
}
fn elaborate_for_codegen<T: Transform>(env: &Env, node: &mut T, opts: &CodegenOpts) {
if opts.textual_remove_memoize {
node.transform(
env,
&mut passes::remove_memo_attr::RemoveMemoAttr::default(),
);
}
env.assert_no_errors();
} |
Rust | hhvm/hphp/hack/src/elab/elab_utils.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//! Utilities for elaboration passes.
/// Factory functions constructing positions.
pub(crate) mod pos {
use oxidized::nast::Pos;
/// Construct a "null" position.
#[inline(always)]
pub(crate) fn null() -> Pos {
Pos::NONE
}
}
/// Factory functions constructing hints.
pub(crate) mod hint {
use oxidized::aast::Hint;
use oxidized::aast::Hint_;
/// Construct a "null" hint.
#[inline(always)]
pub(crate) fn null() -> Hint {
Hint(super::pos::null(), Box::new(Hint_::Hnothing))
}
}
/// Factory functions constructing expressions.
pub(crate) mod expr {
use oxidized::nast::Expr;
use oxidized::nast::Expr_;
use oxidized::nast::Pos;
/// Construct a "null" expression.
#[inline(always)]
pub(crate) fn null() -> Expr {
from_expr_(Expr_::Null)
}
/// Construct an "invalid" expression.
#[inline(always)]
pub(crate) fn invalid(expr: Expr) -> Expr {
let Expr(_, pos, _) = &expr;
from_expr__with_pos_(pos.clone(), Expr_::Invalid(Box::new(Some(expr))))
}
/// Construct an expression (with a null position) from an `Expr_`.
#[inline(always)]
pub(crate) fn from_expr_(expr_: Expr_) -> Expr {
from_expr__with_pos_(super::pos::null(), expr_)
}
/// Construct an expression from a `Pos` and an `Expr_`.
#[inline(always)]
#[allow(non_snake_case)]
pub(crate) fn from_expr__with_pos_(pos: Pos, expr_: Expr_) -> Expr {
Expr((), pos, expr_)
}
} |
Rust | hhvm/hphp/hack/src/elab/env.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::cell::RefCell;
use bitflags::bitflags;
use oxidized::naming_phase_error::NamingPhaseError;
use oxidized::typechecker_options::TypecheckerOptions;
#[derive(Debug, Clone, Default)]
pub struct ProgramSpecificOptions {
pub is_hhi: bool,
pub allow_module_declarations: bool,
}
bitflags! {
struct Flags: u16 {
const SOFT_AS_LIKE = 1 << 0;
const HKT_ENABLED = 1 << 1;
const IS_HHI = 1 << 2;
const IS_SYSTEMLIB = 1 << 3;
const LIKE_TYPE_HINTS_ENABLED = 1 << 4;
const CONST_ATTRIBUTE = 1 << 5;
const CONST_STATIC_PROPS = 1 << 6;
const ALLOW_MODULE_DECLARATIONS = 1 << 7;
const ERROR_PHP_LAMBDAS = 1 << 9;
const INFER_FLOWS = 1 << 10;
const EVERYTHING_SDT = 1 << 11;
const SUPPORTDYNAMIC_TYPE_HINT_ENABLED = 1 << 12;
const NO_AUTO_DYNAMIC_ENABLED = 1 << 13;
}
}
impl Flags {
pub fn new(tco: &TypecheckerOptions, pso: &ProgramSpecificOptions) -> Self {
let mut flags: Self = Flags::empty();
flags.set(
Self::SOFT_AS_LIKE,
tco.po_interpret_soft_types_as_like_types,
);
flags.set(Self::HKT_ENABLED, tco.tco_higher_kinded_types);
flags.set(Self::IS_SYSTEMLIB, tco.tco_is_systemlib);
flags.set(Self::LIKE_TYPE_HINTS_ENABLED, tco.tco_like_type_hints);
flags.set(
Self::NO_AUTO_DYNAMIC_ENABLED,
tco.tco_enable_no_auto_dynamic,
);
flags.set(
Self::SUPPORTDYNAMIC_TYPE_HINT_ENABLED,
tco.tco_experimental_features
.contains("supportdynamic_type_hint"),
);
flags.set(Self::EVERYTHING_SDT, tco.tco_everything_sdt);
flags.set(Self::CONST_ATTRIBUTE, tco.tco_const_attribute);
flags.set(Self::CONST_STATIC_PROPS, tco.tco_const_static_props);
flags.set(Self::ERROR_PHP_LAMBDAS, tco.tco_error_php_lambdas);
flags.set(Self::IS_HHI, pso.is_hhi);
flags.set(
Self::ALLOW_MODULE_DECLARATIONS,
pso.allow_module_declarations,
);
flags.set(
Self::INFER_FLOWS,
tco.tco_experimental_features
.contains(EXPERIMENTAL_INFER_FLOWS),
);
flags
}
}
#[derive(Debug)]
pub struct Env {
flags: Flags,
errors: RefCell<Vec<NamingPhaseError>>,
pub consistent_ctor_level: isize,
}
impl Default for Env {
fn default() -> Self {
Self::new(
&TypecheckerOptions::default(),
&ProgramSpecificOptions::default(),
)
}
}
impl Env {
pub fn new(tco: &TypecheckerOptions, pso: &ProgramSpecificOptions) -> Self {
Self {
flags: Flags::new(tco, pso),
errors: RefCell::new(vec![]),
consistent_ctor_level: tco.tco_explicit_consistent_constructors,
}
}
pub fn emit_error(&self, err: impl Into<NamingPhaseError>) {
self.errors.borrow_mut().push(err.into())
}
pub fn assert_no_errors(&self) {
assert!(self.errors.borrow().is_empty());
}
pub fn into_errors(self) -> Vec<NamingPhaseError> {
self.errors.into_inner()
}
pub fn soft_as_like(&self) -> bool {
self.flags.contains(Flags::SOFT_AS_LIKE)
}
pub fn error_php_lambdas(&self) -> bool {
self.flags.contains(Flags::ERROR_PHP_LAMBDAS)
}
pub fn allow_module_declarations(&self) -> bool {
self.flags.contains(Flags::ALLOW_MODULE_DECLARATIONS)
}
pub fn hkt_enabled(&self) -> bool {
self.flags.contains(Flags::HKT_ENABLED)
}
pub fn is_systemlib(&self) -> bool {
self.flags.contains(Flags::IS_SYSTEMLIB)
}
pub fn like_type_hints_enabled(&self) -> bool {
self.flags.contains(Flags::LIKE_TYPE_HINTS_ENABLED)
}
pub fn supportdynamic_type_hint_enabled(&self) -> bool {
self.flags.contains(Flags::SUPPORTDYNAMIC_TYPE_HINT_ENABLED)
}
pub fn no_auto_dynamic_enabled(&self) -> bool {
self.flags.contains(Flags::NO_AUTO_DYNAMIC_ENABLED)
}
pub fn everything_sdt(&self) -> bool {
self.flags.contains(Flags::EVERYTHING_SDT)
}
pub fn is_hhi(&self) -> bool {
self.flags.contains(Flags::IS_HHI)
}
pub fn const_attribute(&self) -> bool {
self.flags.contains(Flags::CONST_ATTRIBUTE)
}
pub fn const_static_props(&self) -> bool {
self.flags.contains(Flags::CONST_STATIC_PROPS)
}
pub fn infer_flows(&self) -> bool {
self.flags.contains(Flags::INFER_FLOWS)
}
}
const EXPERIMENTAL_INFER_FLOWS: &str = "ifc_infer_flows"; |
Rust | hhvm/hphp/hack/src/elab/lambda_captures.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//! Walk the NAST, track free variables, and add them to capture lists in
//! lambdas.
//!
//! A free variable is any local that isn't bound as a parameter or directly
//! defined.
//!
//! ```notrust
//! ($a) ==> {
//! $b = $a;
//! $c;
//! }
//! ```
//!
//! In this example, only $c is free.
use std::collections::BTreeMap;
use naming_special_names_rust as sn;
use nast::Binop;
use nast::CaptureLid;
use nast::Expr;
use nast::Expr_;
use nast::Lid;
use nast::LocalId;
use nast::Pos;
use oxidized::aast_visitor::AstParams;
use oxidized::aast_visitor::NodeMut;
use oxidized::aast_visitor::VisitorMut;
use oxidized::local_id;
use oxidized::naming_error::NamingError;
use oxidized::nast;
use crate::env::Env;
#[derive(Clone, Default)]
pub struct Visitor {
bound: BTreeMap<LocalId, Pos>,
free: BTreeMap<LocalId, Pos>,
}
impl Visitor {
fn add_local_def(&mut self, lid: Lid) {
self.bound.insert(lid.1, lid.0);
}
fn add_local_defs(&mut self, lids: impl Iterator<Item = CaptureLid>) {
for capture_lid in lids {
let CaptureLid(_, lid) = capture_lid;
self.add_local_def(lid);
}
}
fn add_param(&mut self, param: &nast::FunParam) {
self.add_local_def(Lid(param.pos.clone(), local_id::make_unscoped(¶m.name)));
}
fn add_params(&mut self, f: &nast::Fun_) {
for param in &f.params {
self.add_param(param);
}
}
fn add_local_defs_from_lvalue(&mut self, Expr(_, _, e): &Expr) {
match e {
Expr_::List(lv) => lv.iter().for_each(|e| self.add_local_defs_from_lvalue(e)),
Expr_::Lvar(box lid) => self.add_local_def(lid.clone()),
_ => {}
}
}
fn add_local_ref(&mut self, lid: &Lid) {
let local_id = lid.as_local_id();
if !self.bound.contains_key(local_id) {
self.free.insert(local_id.clone(), lid.pos().clone());
}
}
}
impl<'ast> VisitorMut<'ast> for Visitor {
type Params = AstParams<Env, ()>;
fn object(&mut self) -> &mut dyn VisitorMut<'ast, Params = Self::Params> {
self
}
fn visit_expr_(&mut self, env: &mut Env, e: &mut Expr_) -> Result<(), ()> {
match e {
Expr_::Lvar(lv) => {
self.add_local_ref(lv);
e.recurse(env, self.object())
}
Expr_::Binop(box Binop { bop, lhs, .. }) => {
if let nast::Bop::Eq(None) = bop {
// Introducing a new local variable.
//
// $x = ...
self.add_local_defs_from_lvalue(lhs);
}
e.recurse(env, self.object())
}
Expr_::Lfun(box (f, idl)) => {
let outer_vars = std::mem::take(self);
// We want to know about free variables inside the lambda, but
// we don't want its bound variables.
self.add_params(f);
f.recurse(env, self.object())?;
let inner_free = std::mem::take(&mut self.free);
*idl = inner_free
.iter()
.rev()
.map(|(lid, pos)| CaptureLid((), Lid(pos.clone(), lid.clone())))
.collect();
*self = outer_vars;
self.free.extend(inner_free);
Ok(())
}
_ => e.recurse(env, self.object()),
}
}
fn visit_as_expr(&mut self, env: &mut Env, ae: &mut nast::AsExpr) -> Result<(), ()> {
// `as` inside a foreach loop introduces a new local variable.
//
// foreach(... as $x) { ... }
match ae {
nast::AsExpr::AsV(e) | nast::AsExpr::AwaitAsV(_, e) => {
self.add_local_defs_from_lvalue(e);
}
nast::AsExpr::AsKv(k, v) | nast::AsExpr::AwaitAsKv(_, k, v) => {
self.add_local_defs_from_lvalue(k);
self.add_local_defs_from_lvalue(v);
}
}
ae.recurse(env, self.object())
}
fn visit_stmt_(&mut self, env: &mut Env, s: &mut nast::Stmt_) -> Result<(), ()> {
// `concurrent` blocks are desugared to a list of expressions,
// which can introduce new locals.
//
// concurrent {
// $x = await foo();
// await bar();
// }
if let nast::Stmt_::Awaitall(box (el, _)) = s {
for (lid_opt, _) in el {
if let Some(lid) = lid_opt {
self.add_local_def(lid.clone())
}
}
}
s.recurse(env, self.object())
}
fn visit_catch(&mut self, env: &mut Env, c: &mut nast::Catch) -> Result<(), ()> {
// `catch` introduces a new local variable.
//
// try { ... } catch (Foo $x) { ... } *)
let nast::Catch(_, lv, _) = c;
self.add_local_def(lv.clone());
c.recurse(env, self.object())
}
fn visit_efun(&mut self, env: &mut Env, efun: &mut nast::Efun) -> Result<(), ()> {
let outer_vars = std::mem::take(self);
let idl = efun.use_.clone();
// We want to know about free variables inside the lambda, but
// we don't want its bound variables.
self.add_params(&efun.fun);
self.add_local_defs(idl.iter().cloned());
efun.recurse(env, self.object())?;
let inner_free = std::mem::take(&mut self.free);
*self = outer_vars;
self.free.extend(inner_free);
// Efun syntax requires that the user specifies the captures.
//
// function() use($captured1, $captured2) { ... }
//
// We just check that they haven't tried to explicitly capture $this.
let idl = idl
.into_iter()
.filter(|CaptureLid(_, Lid(p, lid))| {
let is_this = local_id::get_name(lid) == sn::special_idents::THIS;
if is_this {
env.emit_error(NamingError::ThisAsLexicalVariable(p.clone()));
}
!is_this
})
.collect();
efun.use_ = idl;
Ok(())
}
}
pub fn elaborate_program(env: &mut Env, program: &mut nast::Program) {
Visitor::default().visit_program(env, program).unwrap();
}
pub fn elaborate_fun_def(env: &mut Env, fd: &mut nast::FunDef) {
Visitor::default().visit_fun_def(env, fd).unwrap();
}
pub fn elaborate_class_(env: &mut Env, c: &mut nast::Class_) {
Visitor::default().visit_class_(env, c).unwrap();
}
pub fn elaborate_module_def(env: &mut Env, m: &mut nast::ModuleDef) {
Visitor::default().visit_module_def(env, m).unwrap();
}
pub fn elaborate_gconst(env: &mut Env, cst: &mut nast::Gconst) {
Visitor::default().visit_gconst(env, cst).unwrap();
}
pub fn elaborate_typedef(env: &mut Env, td: &mut nast::Typedef) {
Visitor::default().visit_typedef(env, td).unwrap();
} |
Rust | hhvm/hphp/hack/src/elab/pass.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//
// @generated SignedSource<<52f2ea8b732cb52d43c43d39da79a6f7>>
//
// To regenerate this file, run:
// hphp/hack/src/oxidized_regen.sh
#![allow(unused_variables, non_snake_case)]
use std::ops::ControlFlow;
use std::ops::ControlFlow::Continue;
use oxidized::aast_defs::*;
use oxidized::ast_defs::*;
use crate::env::Env;
type Ex = ();
type En = ();
pub trait Pass {
#[inline(always)]
fn on_ty_program_top_down(&mut self, env: &Env, elem: &mut Program<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_program_bottom_up(
&mut self,
env: &Env,
elem: &mut Program<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_stmt_top_down(&mut self, env: &Env, elem: &mut Stmt<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_stmt_bottom_up(&mut self, env: &Env, elem: &mut Stmt<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_stmt__top_down(&mut self, env: &Env, elem: &mut Stmt_<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_stmt__bottom_up(&mut self, env: &Env, elem: &mut Stmt_<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_using_stmt_top_down(
&mut self,
env: &Env,
elem: &mut UsingStmt<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_using_stmt_bottom_up(
&mut self,
env: &Env,
elem: &mut UsingStmt<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_as_expr_top_down(&mut self, env: &Env, elem: &mut AsExpr<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_as_expr_bottom_up(&mut self, env: &Env, elem: &mut AsExpr<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_block_top_down(&mut self, env: &Env, elem: &mut Block<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_block_bottom_up(&mut self, env: &Env, elem: &mut Block<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_finally_block_top_down(
&mut self,
env: &Env,
elem: &mut FinallyBlock<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_finally_block_bottom_up(
&mut self,
env: &Env,
elem: &mut FinallyBlock<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_stmt_match_top_down(
&mut self,
env: &Env,
elem: &mut StmtMatch<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_stmt_match_bottom_up(
&mut self,
env: &Env,
elem: &mut StmtMatch<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_stmt_match_arm_top_down(
&mut self,
env: &Env,
elem: &mut StmtMatchArm<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_stmt_match_arm_bottom_up(
&mut self,
env: &Env,
elem: &mut StmtMatchArm<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_pattern_top_down(&mut self, env: &Env, elem: &mut Pattern) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_pattern_bottom_up(&mut self, env: &Env, elem: &mut Pattern) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_pat_var_top_down(&mut self, env: &Env, elem: &mut PatVar) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_pat_var_bottom_up(&mut self, env: &Env, elem: &mut PatVar) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_pat_refinement_top_down(
&mut self,
env: &Env,
elem: &mut PatRefinement,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_pat_refinement_bottom_up(
&mut self,
env: &Env,
elem: &mut PatRefinement,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_id_top_down(
&mut self,
env: &Env,
elem: &mut ClassId<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_id_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassId<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_id__top_down(
&mut self,
env: &Env,
elem: &mut ClassId_<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_id__bottom_up(
&mut self,
env: &Env,
elem: &mut ClassId_<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_expr_top_down(&mut self, env: &Env, elem: &mut Expr<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_expr_bottom_up(&mut self, env: &Env, elem: &mut Expr<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_collection_targ_top_down(
&mut self,
env: &Env,
elem: &mut CollectionTarg<Ex>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_collection_targ_bottom_up(
&mut self,
env: &Env,
elem: &mut CollectionTarg<Ex>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_function_ptr_id_top_down(
&mut self,
env: &Env,
elem: &mut FunctionPtrId<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_function_ptr_id_bottom_up(
&mut self,
env: &Env,
elem: &mut FunctionPtrId<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_expression_tree_top_down(
&mut self,
env: &Env,
elem: &mut ExpressionTree<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_expression_tree_bottom_up(
&mut self,
env: &Env,
elem: &mut ExpressionTree<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_expr__top_down(&mut self, env: &Env, elem: &mut Expr_<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_expr__bottom_up(&mut self, env: &Env, elem: &mut Expr_<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_hole_source_top_down(&mut self, env: &Env, elem: &mut HoleSource) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_hole_source_bottom_up(&mut self, env: &Env, elem: &mut HoleSource) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_binop_top_down(&mut self, env: &Env, elem: &mut Binop<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_binop_bottom_up(&mut self, env: &Env, elem: &mut Binop<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_binop_lhs_top_down(&mut self, env: &Env, elem: &mut Expr<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_binop_lhs_bottom_up(
&mut self,
env: &Env,
elem: &mut Expr<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_binop_rhs_top_down(&mut self, env: &Env, elem: &mut Expr<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_binop_rhs_bottom_up(
&mut self,
env: &Env,
elem: &mut Expr<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_get_expr_top_down(
&mut self,
env: &Env,
elem: &mut ClassGetExpr<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_get_expr_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassGetExpr<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_case_top_down(&mut self, env: &Env, elem: &mut Case<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_case_bottom_up(&mut self, env: &Env, elem: &mut Case<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_default_case_top_down(
&mut self,
env: &Env,
elem: &mut DefaultCase<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_default_case_bottom_up(
&mut self,
env: &Env,
elem: &mut DefaultCase<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_catch_top_down(&mut self, env: &Env, elem: &mut Catch<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_catch_bottom_up(&mut self, env: &Env, elem: &mut Catch<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_field_top_down(&mut self, env: &Env, elem: &mut Field<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_field_bottom_up(&mut self, env: &Env, elem: &mut Field<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_afield_top_down(&mut self, env: &Env, elem: &mut Afield<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_afield_bottom_up(&mut self, env: &Env, elem: &mut Afield<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_xhp_simple_top_down(
&mut self,
env: &Env,
elem: &mut XhpSimple<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_xhp_simple_bottom_up(
&mut self,
env: &Env,
elem: &mut XhpSimple<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_xhp_attribute_top_down(
&mut self,
env: &Env,
elem: &mut XhpAttribute<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_xhp_attribute_bottom_up(
&mut self,
env: &Env,
elem: &mut XhpAttribute<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_fun_param_top_down(
&mut self,
env: &Env,
elem: &mut FunParam<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_fun_param_bottom_up(
&mut self,
env: &Env,
elem: &mut FunParam<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_fun__top_down(&mut self, env: &Env, elem: &mut Fun_<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_fun__bottom_up(&mut self, env: &Env, elem: &mut Fun_<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_fun__ret_top_down(&mut self, env: &Env, elem: &mut TypeHint<Ex>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_fun__ret_bottom_up(&mut self, env: &Env, elem: &mut TypeHint<Ex>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_capture_lid_top_down(
&mut self,
env: &Env,
elem: &mut CaptureLid<Ex>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_capture_lid_bottom_up(
&mut self,
env: &Env,
elem: &mut CaptureLid<Ex>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_efun_top_down(&mut self, env: &Env, elem: &mut Efun<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_efun_bottom_up(&mut self, env: &Env, elem: &mut Efun<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_func_body_top_down(
&mut self,
env: &Env,
elem: &mut FuncBody<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_func_body_bottom_up(
&mut self,
env: &Env,
elem: &mut FuncBody<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_type_hint_top_down(&mut self, env: &Env, elem: &mut TypeHint<Ex>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_type_hint_bottom_up(&mut self, env: &Env, elem: &mut TypeHint<Ex>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_targ_top_down(&mut self, env: &Env, elem: &mut Targ<Ex>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_targ_bottom_up(&mut self, env: &Env, elem: &mut Targ<Ex>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_call_expr_top_down(
&mut self,
env: &Env,
elem: &mut CallExpr<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_call_expr_bottom_up(
&mut self,
env: &Env,
elem: &mut CallExpr<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_user_attribute_top_down(
&mut self,
env: &Env,
elem: &mut UserAttribute<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_user_attribute_bottom_up(
&mut self,
env: &Env,
elem: &mut UserAttribute<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_file_attribute_top_down(
&mut self,
env: &Env,
elem: &mut FileAttribute<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_file_attribute_bottom_up(
&mut self,
env: &Env,
elem: &mut FileAttribute<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_tparam_top_down(&mut self, env: &Env, elem: &mut Tparam<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_tparam_bottom_up(&mut self, env: &Env, elem: &mut Tparam<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class__top_down(&mut self, env: &Env, elem: &mut Class_<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class__bottom_up(&mut self, env: &Env, elem: &mut Class_<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__tparams_top_down(
&mut self,
env: &Env,
elem: &mut Vec<Tparam<Ex, En>>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__tparams_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<Tparam<Ex, En>>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__extends_top_down(
&mut self,
env: &Env,
elem: &mut Vec<ClassHint>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__extends_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<ClassHint>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__uses_top_down(
&mut self,
env: &Env,
elem: &mut Vec<TraitHint>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__uses_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<TraitHint>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__xhp_attr_uses_top_down(
&mut self,
env: &Env,
elem: &mut Vec<XhpAttrHint>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__xhp_attr_uses_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<XhpAttrHint>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__reqs_top_down(
&mut self,
env: &Env,
elem: &mut Vec<ClassReq>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__reqs_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<ClassReq>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__implements_top_down(
&mut self,
env: &Env,
elem: &mut Vec<ClassHint>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__implements_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<ClassHint>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__consts_top_down(
&mut self,
env: &Env,
elem: &mut Vec<ClassConst<Ex, En>>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__consts_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<ClassConst<Ex, En>>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__xhp_attrs_top_down(
&mut self,
env: &Env,
elem: &mut Vec<XhpAttr<Ex, En>>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__xhp_attrs_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<XhpAttr<Ex, En>>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__user_attributes_top_down(
&mut self,
env: &Env,
elem: &mut UserAttributes<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class__user_attributes_bottom_up(
&mut self,
env: &Env,
elem: &mut UserAttributes<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_req_top_down(&mut self, env: &Env, elem: &mut ClassReq) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_req_bottom_up(&mut self, env: &Env, elem: &mut ClassReq) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_xhp_attr_top_down(
&mut self,
env: &Env,
elem: &mut XhpAttr<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_xhp_attr_bottom_up(
&mut self,
env: &Env,
elem: &mut XhpAttr<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_const_kind_top_down(
&mut self,
env: &Env,
elem: &mut ClassConstKind<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_const_kind_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassConstKind<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_const_top_down(
&mut self,
env: &Env,
elem: &mut ClassConst<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_const_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassConst<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_abstract_typeconst_top_down(
&mut self,
env: &Env,
elem: &mut ClassAbstractTypeconst,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_abstract_typeconst_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassAbstractTypeconst,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class_abstract_typeconst_default_top_down(
&mut self,
env: &Env,
elem: &mut Option<Hint>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class_abstract_typeconst_default_bottom_up(
&mut self,
env: &Env,
elem: &mut Option<Hint>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_concrete_typeconst_top_down(
&mut self,
env: &Env,
elem: &mut ClassConcreteTypeconst,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_concrete_typeconst_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassConcreteTypeconst,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_typeconst_top_down(
&mut self,
env: &Env,
elem: &mut ClassTypeconst,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_typeconst_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassTypeconst,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_typeconst_def_top_down(
&mut self,
env: &Env,
elem: &mut ClassTypeconstDef<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_typeconst_def_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassTypeconstDef<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_xhp_attr_info_top_down(
&mut self,
env: &Env,
elem: &mut XhpAttrInfo,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_xhp_attr_info_bottom_up(
&mut self,
env: &Env,
elem: &mut XhpAttrInfo,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_var_top_down(
&mut self,
env: &Env,
elem: &mut ClassVar<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_class_var_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassVar<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class_var_type__top_down(
&mut self,
env: &Env,
elem: &mut TypeHint<Ex>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_class_var_type__bottom_up(
&mut self,
env: &Env,
elem: &mut TypeHint<Ex>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_method__top_down(&mut self, env: &Env, elem: &mut Method_<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_method__bottom_up(
&mut self,
env: &Env,
elem: &mut Method_<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_method__ret_top_down(
&mut self,
env: &Env,
elem: &mut TypeHint<Ex>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_method__ret_bottom_up(
&mut self,
env: &Env,
elem: &mut TypeHint<Ex>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_typedef_top_down(&mut self, env: &Env, elem: &mut Typedef<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_typedef_bottom_up(
&mut self,
env: &Env,
elem: &mut Typedef<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_typedef_kind_top_down(&mut self, env: &Env, elem: &mut Hint) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_typedef_kind_bottom_up(&mut self, env: &Env, elem: &mut Hint) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_gconst_top_down(&mut self, env: &Env, elem: &mut Gconst<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_gconst_bottom_up(&mut self, env: &Env, elem: &mut Gconst<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_gconst_value_top_down(
&mut self,
env: &Env,
elem: &mut Expr<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_gconst_value_bottom_up(
&mut self,
env: &Env,
elem: &mut Expr<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_fun_def_top_down(&mut self, env: &Env, elem: &mut FunDef<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_fun_def_bottom_up(&mut self, env: &Env, elem: &mut FunDef<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_module_def_top_down(
&mut self,
env: &Env,
elem: &mut ModuleDef<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_module_def_bottom_up(
&mut self,
env: &Env,
elem: &mut ModuleDef<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_def_top_down(&mut self, env: &Env, elem: &mut Def<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_def_bottom_up(&mut self, env: &Env, elem: &mut Def<Ex, En>) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_xhp_child_top_down(&mut self, env: &Env, elem: &mut XhpChild) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_xhp_child_bottom_up(&mut self, env: &Env, elem: &mut XhpChild) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_hint_top_down(&mut self, env: &Env, elem: &mut Hint) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_hint_bottom_up(&mut self, env: &Env, elem: &mut Hint) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_user_attributes_top_down(
&mut self,
env: &Env,
elem: &mut UserAttributes<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_user_attributes_bottom_up(
&mut self,
env: &Env,
elem: &mut UserAttributes<Ex, En>,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_contexts_top_down(&mut self, env: &Env, elem: &mut Contexts) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_contexts_bottom_up(&mut self, env: &Env, elem: &mut Contexts) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_hint_fun_top_down(&mut self, env: &Env, elem: &mut HintFun) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_hint_fun_bottom_up(&mut self, env: &Env, elem: &mut HintFun) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_hint_fun_return_ty_top_down(
&mut self,
env: &Env,
elem: &mut Hint,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_fld_hint_fun_return_ty_bottom_up(
&mut self,
env: &Env,
elem: &mut Hint,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_hint__top_down(&mut self, env: &Env, elem: &mut Hint_) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_hint__bottom_up(&mut self, env: &Env, elem: &mut Hint_) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_refinement_top_down(&mut self, env: &Env, elem: &mut Refinement) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_refinement_bottom_up(&mut self, env: &Env, elem: &mut Refinement) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_type_refinement_top_down(
&mut self,
env: &Env,
elem: &mut TypeRefinement,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_type_refinement_bottom_up(
&mut self,
env: &Env,
elem: &mut TypeRefinement,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_type_refinement_bounds_top_down(
&mut self,
env: &Env,
elem: &mut TypeRefinementBounds,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_type_refinement_bounds_bottom_up(
&mut self,
env: &Env,
elem: &mut TypeRefinementBounds,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_ctx_refinement_top_down(
&mut self,
env: &Env,
elem: &mut CtxRefinement,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_ctx_refinement_bottom_up(
&mut self,
env: &Env,
elem: &mut CtxRefinement,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_ctx_refinement_bounds_top_down(
&mut self,
env: &Env,
elem: &mut CtxRefinementBounds,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_ctx_refinement_bounds_bottom_up(
&mut self,
env: &Env,
elem: &mut CtxRefinementBounds,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_shape_field_info_top_down(
&mut self,
env: &Env,
elem: &mut ShapeFieldInfo,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_shape_field_info_bottom_up(
&mut self,
env: &Env,
elem: &mut ShapeFieldInfo,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_nast_shape_info_top_down(
&mut self,
env: &Env,
elem: &mut NastShapeInfo,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_nast_shape_info_bottom_up(
&mut self,
env: &Env,
elem: &mut NastShapeInfo,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_enum__top_down(&mut self, env: &Env, elem: &mut Enum_) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_enum__bottom_up(&mut self, env: &Env, elem: &mut Enum_) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_where_constraint_hint_top_down(
&mut self,
env: &Env,
elem: &mut WhereConstraintHint,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_where_constraint_hint_bottom_up(
&mut self,
env: &Env,
elem: &mut WhereConstraintHint,
) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_id_top_down(&mut self, env: &Env, elem: &mut Id) -> ControlFlow<()> {
Continue(())
}
#[inline(always)]
fn on_ty_id_bottom_up(&mut self, env: &Env, elem: &mut Id) -> ControlFlow<()> {
Continue(())
}
}
pub struct Passes<P, Q>
where
P: Pass,
Q: Pass,
{
pub fst: P,
pub snd: Q,
}
impl<P, Q> Clone for Passes<P, Q>
where
P: Pass + Clone,
Q: Pass + Clone,
{
fn clone(&self) -> Self {
Passes {
fst: self.fst.clone(),
snd: self.snd.clone(),
}
}
}
impl<P, Q> Pass for Passes<P, Q>
where
P: Pass,
Q: Pass,
{
#[inline(always)]
fn on_ty_program_top_down(&mut self, env: &Env, elem: &mut Program<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_program_top_down(env, elem)?;
self.snd.on_ty_program_top_down(env, elem)
}
#[inline(always)]
fn on_ty_program_bottom_up(
&mut self,
env: &Env,
elem: &mut Program<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_program_bottom_up(env, elem)?;
self.snd.on_ty_program_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_stmt_top_down(&mut self, env: &Env, elem: &mut Stmt<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_stmt_top_down(env, elem)?;
self.snd.on_ty_stmt_top_down(env, elem)
}
#[inline(always)]
fn on_ty_stmt_bottom_up(&mut self, env: &Env, elem: &mut Stmt<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_stmt_bottom_up(env, elem)?;
self.snd.on_ty_stmt_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_stmt__top_down(&mut self, env: &Env, elem: &mut Stmt_<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_stmt__top_down(env, elem)?;
self.snd.on_ty_stmt__top_down(env, elem)
}
#[inline(always)]
fn on_ty_stmt__bottom_up(&mut self, env: &Env, elem: &mut Stmt_<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_stmt__bottom_up(env, elem)?;
self.snd.on_ty_stmt__bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_using_stmt_top_down(
&mut self,
env: &Env,
elem: &mut UsingStmt<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_using_stmt_top_down(env, elem)?;
self.snd.on_ty_using_stmt_top_down(env, elem)
}
#[inline(always)]
fn on_ty_using_stmt_bottom_up(
&mut self,
env: &Env,
elem: &mut UsingStmt<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_using_stmt_bottom_up(env, elem)?;
self.snd.on_ty_using_stmt_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_as_expr_top_down(&mut self, env: &Env, elem: &mut AsExpr<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_as_expr_top_down(env, elem)?;
self.snd.on_ty_as_expr_top_down(env, elem)
}
#[inline(always)]
fn on_ty_as_expr_bottom_up(&mut self, env: &Env, elem: &mut AsExpr<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_as_expr_bottom_up(env, elem)?;
self.snd.on_ty_as_expr_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_block_top_down(&mut self, env: &Env, elem: &mut Block<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_block_top_down(env, elem)?;
self.snd.on_ty_block_top_down(env, elem)
}
#[inline(always)]
fn on_ty_block_bottom_up(&mut self, env: &Env, elem: &mut Block<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_block_bottom_up(env, elem)?;
self.snd.on_ty_block_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_finally_block_top_down(
&mut self,
env: &Env,
elem: &mut FinallyBlock<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_finally_block_top_down(env, elem)?;
self.snd.on_ty_finally_block_top_down(env, elem)
}
#[inline(always)]
fn on_ty_finally_block_bottom_up(
&mut self,
env: &Env,
elem: &mut FinallyBlock<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_finally_block_bottom_up(env, elem)?;
self.snd.on_ty_finally_block_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_stmt_match_top_down(
&mut self,
env: &Env,
elem: &mut StmtMatch<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_stmt_match_top_down(env, elem)?;
self.snd.on_ty_stmt_match_top_down(env, elem)
}
#[inline(always)]
fn on_ty_stmt_match_bottom_up(
&mut self,
env: &Env,
elem: &mut StmtMatch<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_stmt_match_bottom_up(env, elem)?;
self.snd.on_ty_stmt_match_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_stmt_match_arm_top_down(
&mut self,
env: &Env,
elem: &mut StmtMatchArm<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_stmt_match_arm_top_down(env, elem)?;
self.snd.on_ty_stmt_match_arm_top_down(env, elem)
}
#[inline(always)]
fn on_ty_stmt_match_arm_bottom_up(
&mut self,
env: &Env,
elem: &mut StmtMatchArm<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_stmt_match_arm_bottom_up(env, elem)?;
self.snd.on_ty_stmt_match_arm_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_pattern_top_down(&mut self, env: &Env, elem: &mut Pattern) -> ControlFlow<()> {
self.fst.on_ty_pattern_top_down(env, elem)?;
self.snd.on_ty_pattern_top_down(env, elem)
}
#[inline(always)]
fn on_ty_pattern_bottom_up(&mut self, env: &Env, elem: &mut Pattern) -> ControlFlow<()> {
self.fst.on_ty_pattern_bottom_up(env, elem)?;
self.snd.on_ty_pattern_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_pat_var_top_down(&mut self, env: &Env, elem: &mut PatVar) -> ControlFlow<()> {
self.fst.on_ty_pat_var_top_down(env, elem)?;
self.snd.on_ty_pat_var_top_down(env, elem)
}
#[inline(always)]
fn on_ty_pat_var_bottom_up(&mut self, env: &Env, elem: &mut PatVar) -> ControlFlow<()> {
self.fst.on_ty_pat_var_bottom_up(env, elem)?;
self.snd.on_ty_pat_var_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_pat_refinement_top_down(
&mut self,
env: &Env,
elem: &mut PatRefinement,
) -> ControlFlow<()> {
self.fst.on_ty_pat_refinement_top_down(env, elem)?;
self.snd.on_ty_pat_refinement_top_down(env, elem)
}
#[inline(always)]
fn on_ty_pat_refinement_bottom_up(
&mut self,
env: &Env,
elem: &mut PatRefinement,
) -> ControlFlow<()> {
self.fst.on_ty_pat_refinement_bottom_up(env, elem)?;
self.snd.on_ty_pat_refinement_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_class_id_top_down(
&mut self,
env: &Env,
elem: &mut ClassId<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_class_id_top_down(env, elem)?;
self.snd.on_ty_class_id_top_down(env, elem)
}
#[inline(always)]
fn on_ty_class_id_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassId<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_class_id_bottom_up(env, elem)?;
self.snd.on_ty_class_id_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_class_id__top_down(
&mut self,
env: &Env,
elem: &mut ClassId_<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_class_id__top_down(env, elem)?;
self.snd.on_ty_class_id__top_down(env, elem)
}
#[inline(always)]
fn on_ty_class_id__bottom_up(
&mut self,
env: &Env,
elem: &mut ClassId_<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_class_id__bottom_up(env, elem)?;
self.snd.on_ty_class_id__bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_expr_top_down(&mut self, env: &Env, elem: &mut Expr<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_expr_top_down(env, elem)?;
self.snd.on_ty_expr_top_down(env, elem)
}
#[inline(always)]
fn on_ty_expr_bottom_up(&mut self, env: &Env, elem: &mut Expr<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_expr_bottom_up(env, elem)?;
self.snd.on_ty_expr_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_collection_targ_top_down(
&mut self,
env: &Env,
elem: &mut CollectionTarg<Ex>,
) -> ControlFlow<()> {
self.fst.on_ty_collection_targ_top_down(env, elem)?;
self.snd.on_ty_collection_targ_top_down(env, elem)
}
#[inline(always)]
fn on_ty_collection_targ_bottom_up(
&mut self,
env: &Env,
elem: &mut CollectionTarg<Ex>,
) -> ControlFlow<()> {
self.fst.on_ty_collection_targ_bottom_up(env, elem)?;
self.snd.on_ty_collection_targ_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_function_ptr_id_top_down(
&mut self,
env: &Env,
elem: &mut FunctionPtrId<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_function_ptr_id_top_down(env, elem)?;
self.snd.on_ty_function_ptr_id_top_down(env, elem)
}
#[inline(always)]
fn on_ty_function_ptr_id_bottom_up(
&mut self,
env: &Env,
elem: &mut FunctionPtrId<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_function_ptr_id_bottom_up(env, elem)?;
self.snd.on_ty_function_ptr_id_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_expression_tree_top_down(
&mut self,
env: &Env,
elem: &mut ExpressionTree<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_expression_tree_top_down(env, elem)?;
self.snd.on_ty_expression_tree_top_down(env, elem)
}
#[inline(always)]
fn on_ty_expression_tree_bottom_up(
&mut self,
env: &Env,
elem: &mut ExpressionTree<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_expression_tree_bottom_up(env, elem)?;
self.snd.on_ty_expression_tree_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_expr__top_down(&mut self, env: &Env, elem: &mut Expr_<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_expr__top_down(env, elem)?;
self.snd.on_ty_expr__top_down(env, elem)
}
#[inline(always)]
fn on_ty_expr__bottom_up(&mut self, env: &Env, elem: &mut Expr_<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_expr__bottom_up(env, elem)?;
self.snd.on_ty_expr__bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_hole_source_top_down(&mut self, env: &Env, elem: &mut HoleSource) -> ControlFlow<()> {
self.fst.on_ty_hole_source_top_down(env, elem)?;
self.snd.on_ty_hole_source_top_down(env, elem)
}
#[inline(always)]
fn on_ty_hole_source_bottom_up(&mut self, env: &Env, elem: &mut HoleSource) -> ControlFlow<()> {
self.fst.on_ty_hole_source_bottom_up(env, elem)?;
self.snd.on_ty_hole_source_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_binop_top_down(&mut self, env: &Env, elem: &mut Binop<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_binop_top_down(env, elem)?;
self.snd.on_ty_binop_top_down(env, elem)
}
#[inline(always)]
fn on_ty_binop_bottom_up(&mut self, env: &Env, elem: &mut Binop<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_binop_bottom_up(env, elem)?;
self.snd.on_ty_binop_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_binop_lhs_top_down(&mut self, env: &Env, elem: &mut Expr<Ex, En>) -> ControlFlow<()> {
self.fst.on_fld_binop_lhs_top_down(env, elem)?;
self.snd.on_fld_binop_lhs_top_down(env, elem)
}
#[inline(always)]
fn on_fld_binop_lhs_bottom_up(
&mut self,
env: &Env,
elem: &mut Expr<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_fld_binop_lhs_bottom_up(env, elem)?;
self.snd.on_fld_binop_lhs_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_binop_rhs_top_down(&mut self, env: &Env, elem: &mut Expr<Ex, En>) -> ControlFlow<()> {
self.fst.on_fld_binop_rhs_top_down(env, elem)?;
self.snd.on_fld_binop_rhs_top_down(env, elem)
}
#[inline(always)]
fn on_fld_binop_rhs_bottom_up(
&mut self,
env: &Env,
elem: &mut Expr<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_fld_binop_rhs_bottom_up(env, elem)?;
self.snd.on_fld_binop_rhs_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_class_get_expr_top_down(
&mut self,
env: &Env,
elem: &mut ClassGetExpr<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_class_get_expr_top_down(env, elem)?;
self.snd.on_ty_class_get_expr_top_down(env, elem)
}
#[inline(always)]
fn on_ty_class_get_expr_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassGetExpr<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_class_get_expr_bottom_up(env, elem)?;
self.snd.on_ty_class_get_expr_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_case_top_down(&mut self, env: &Env, elem: &mut Case<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_case_top_down(env, elem)?;
self.snd.on_ty_case_top_down(env, elem)
}
#[inline(always)]
fn on_ty_case_bottom_up(&mut self, env: &Env, elem: &mut Case<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_case_bottom_up(env, elem)?;
self.snd.on_ty_case_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_default_case_top_down(
&mut self,
env: &Env,
elem: &mut DefaultCase<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_default_case_top_down(env, elem)?;
self.snd.on_ty_default_case_top_down(env, elem)
}
#[inline(always)]
fn on_ty_default_case_bottom_up(
&mut self,
env: &Env,
elem: &mut DefaultCase<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_default_case_bottom_up(env, elem)?;
self.snd.on_ty_default_case_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_catch_top_down(&mut self, env: &Env, elem: &mut Catch<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_catch_top_down(env, elem)?;
self.snd.on_ty_catch_top_down(env, elem)
}
#[inline(always)]
fn on_ty_catch_bottom_up(&mut self, env: &Env, elem: &mut Catch<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_catch_bottom_up(env, elem)?;
self.snd.on_ty_catch_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_field_top_down(&mut self, env: &Env, elem: &mut Field<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_field_top_down(env, elem)?;
self.snd.on_ty_field_top_down(env, elem)
}
#[inline(always)]
fn on_ty_field_bottom_up(&mut self, env: &Env, elem: &mut Field<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_field_bottom_up(env, elem)?;
self.snd.on_ty_field_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_afield_top_down(&mut self, env: &Env, elem: &mut Afield<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_afield_top_down(env, elem)?;
self.snd.on_ty_afield_top_down(env, elem)
}
#[inline(always)]
fn on_ty_afield_bottom_up(&mut self, env: &Env, elem: &mut Afield<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_afield_bottom_up(env, elem)?;
self.snd.on_ty_afield_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_xhp_simple_top_down(
&mut self,
env: &Env,
elem: &mut XhpSimple<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_xhp_simple_top_down(env, elem)?;
self.snd.on_ty_xhp_simple_top_down(env, elem)
}
#[inline(always)]
fn on_ty_xhp_simple_bottom_up(
&mut self,
env: &Env,
elem: &mut XhpSimple<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_xhp_simple_bottom_up(env, elem)?;
self.snd.on_ty_xhp_simple_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_xhp_attribute_top_down(
&mut self,
env: &Env,
elem: &mut XhpAttribute<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_xhp_attribute_top_down(env, elem)?;
self.snd.on_ty_xhp_attribute_top_down(env, elem)
}
#[inline(always)]
fn on_ty_xhp_attribute_bottom_up(
&mut self,
env: &Env,
elem: &mut XhpAttribute<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_xhp_attribute_bottom_up(env, elem)?;
self.snd.on_ty_xhp_attribute_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_fun_param_top_down(
&mut self,
env: &Env,
elem: &mut FunParam<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_fun_param_top_down(env, elem)?;
self.snd.on_ty_fun_param_top_down(env, elem)
}
#[inline(always)]
fn on_ty_fun_param_bottom_up(
&mut self,
env: &Env,
elem: &mut FunParam<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_fun_param_bottom_up(env, elem)?;
self.snd.on_ty_fun_param_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_fun__top_down(&mut self, env: &Env, elem: &mut Fun_<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_fun__top_down(env, elem)?;
self.snd.on_ty_fun__top_down(env, elem)
}
#[inline(always)]
fn on_ty_fun__bottom_up(&mut self, env: &Env, elem: &mut Fun_<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_fun__bottom_up(env, elem)?;
self.snd.on_ty_fun__bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_fun__ret_top_down(&mut self, env: &Env, elem: &mut TypeHint<Ex>) -> ControlFlow<()> {
self.fst.on_fld_fun__ret_top_down(env, elem)?;
self.snd.on_fld_fun__ret_top_down(env, elem)
}
#[inline(always)]
fn on_fld_fun__ret_bottom_up(&mut self, env: &Env, elem: &mut TypeHint<Ex>) -> ControlFlow<()> {
self.fst.on_fld_fun__ret_bottom_up(env, elem)?;
self.snd.on_fld_fun__ret_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_capture_lid_top_down(
&mut self,
env: &Env,
elem: &mut CaptureLid<Ex>,
) -> ControlFlow<()> {
self.fst.on_ty_capture_lid_top_down(env, elem)?;
self.snd.on_ty_capture_lid_top_down(env, elem)
}
#[inline(always)]
fn on_ty_capture_lid_bottom_up(
&mut self,
env: &Env,
elem: &mut CaptureLid<Ex>,
) -> ControlFlow<()> {
self.fst.on_ty_capture_lid_bottom_up(env, elem)?;
self.snd.on_ty_capture_lid_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_efun_top_down(&mut self, env: &Env, elem: &mut Efun<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_efun_top_down(env, elem)?;
self.snd.on_ty_efun_top_down(env, elem)
}
#[inline(always)]
fn on_ty_efun_bottom_up(&mut self, env: &Env, elem: &mut Efun<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_efun_bottom_up(env, elem)?;
self.snd.on_ty_efun_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_func_body_top_down(
&mut self,
env: &Env,
elem: &mut FuncBody<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_func_body_top_down(env, elem)?;
self.snd.on_ty_func_body_top_down(env, elem)
}
#[inline(always)]
fn on_ty_func_body_bottom_up(
&mut self,
env: &Env,
elem: &mut FuncBody<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_func_body_bottom_up(env, elem)?;
self.snd.on_ty_func_body_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_type_hint_top_down(&mut self, env: &Env, elem: &mut TypeHint<Ex>) -> ControlFlow<()> {
self.fst.on_ty_type_hint_top_down(env, elem)?;
self.snd.on_ty_type_hint_top_down(env, elem)
}
#[inline(always)]
fn on_ty_type_hint_bottom_up(&mut self, env: &Env, elem: &mut TypeHint<Ex>) -> ControlFlow<()> {
self.fst.on_ty_type_hint_bottom_up(env, elem)?;
self.snd.on_ty_type_hint_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_targ_top_down(&mut self, env: &Env, elem: &mut Targ<Ex>) -> ControlFlow<()> {
self.fst.on_ty_targ_top_down(env, elem)?;
self.snd.on_ty_targ_top_down(env, elem)
}
#[inline(always)]
fn on_ty_targ_bottom_up(&mut self, env: &Env, elem: &mut Targ<Ex>) -> ControlFlow<()> {
self.fst.on_ty_targ_bottom_up(env, elem)?;
self.snd.on_ty_targ_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_call_expr_top_down(
&mut self,
env: &Env,
elem: &mut CallExpr<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_call_expr_top_down(env, elem)?;
self.snd.on_ty_call_expr_top_down(env, elem)
}
#[inline(always)]
fn on_ty_call_expr_bottom_up(
&mut self,
env: &Env,
elem: &mut CallExpr<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_call_expr_bottom_up(env, elem)?;
self.snd.on_ty_call_expr_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_user_attribute_top_down(
&mut self,
env: &Env,
elem: &mut UserAttribute<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_user_attribute_top_down(env, elem)?;
self.snd.on_ty_user_attribute_top_down(env, elem)
}
#[inline(always)]
fn on_ty_user_attribute_bottom_up(
&mut self,
env: &Env,
elem: &mut UserAttribute<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_user_attribute_bottom_up(env, elem)?;
self.snd.on_ty_user_attribute_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_file_attribute_top_down(
&mut self,
env: &Env,
elem: &mut FileAttribute<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_file_attribute_top_down(env, elem)?;
self.snd.on_ty_file_attribute_top_down(env, elem)
}
#[inline(always)]
fn on_ty_file_attribute_bottom_up(
&mut self,
env: &Env,
elem: &mut FileAttribute<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_file_attribute_bottom_up(env, elem)?;
self.snd.on_ty_file_attribute_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_tparam_top_down(&mut self, env: &Env, elem: &mut Tparam<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_tparam_top_down(env, elem)?;
self.snd.on_ty_tparam_top_down(env, elem)
}
#[inline(always)]
fn on_ty_tparam_bottom_up(&mut self, env: &Env, elem: &mut Tparam<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_tparam_bottom_up(env, elem)?;
self.snd.on_ty_tparam_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_class__top_down(&mut self, env: &Env, elem: &mut Class_<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_class__top_down(env, elem)?;
self.snd.on_ty_class__top_down(env, elem)
}
#[inline(always)]
fn on_ty_class__bottom_up(&mut self, env: &Env, elem: &mut Class_<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_class__bottom_up(env, elem)?;
self.snd.on_ty_class__bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_class__tparams_top_down(
&mut self,
env: &Env,
elem: &mut Vec<Tparam<Ex, En>>,
) -> ControlFlow<()> {
self.fst.on_fld_class__tparams_top_down(env, elem)?;
self.snd.on_fld_class__tparams_top_down(env, elem)
}
#[inline(always)]
fn on_fld_class__tparams_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<Tparam<Ex, En>>,
) -> ControlFlow<()> {
self.fst.on_fld_class__tparams_bottom_up(env, elem)?;
self.snd.on_fld_class__tparams_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_class__extends_top_down(
&mut self,
env: &Env,
elem: &mut Vec<ClassHint>,
) -> ControlFlow<()> {
self.fst.on_fld_class__extends_top_down(env, elem)?;
self.snd.on_fld_class__extends_top_down(env, elem)
}
#[inline(always)]
fn on_fld_class__extends_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<ClassHint>,
) -> ControlFlow<()> {
self.fst.on_fld_class__extends_bottom_up(env, elem)?;
self.snd.on_fld_class__extends_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_class__uses_top_down(
&mut self,
env: &Env,
elem: &mut Vec<TraitHint>,
) -> ControlFlow<()> {
self.fst.on_fld_class__uses_top_down(env, elem)?;
self.snd.on_fld_class__uses_top_down(env, elem)
}
#[inline(always)]
fn on_fld_class__uses_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<TraitHint>,
) -> ControlFlow<()> {
self.fst.on_fld_class__uses_bottom_up(env, elem)?;
self.snd.on_fld_class__uses_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_class__xhp_attr_uses_top_down(
&mut self,
env: &Env,
elem: &mut Vec<XhpAttrHint>,
) -> ControlFlow<()> {
self.fst.on_fld_class__xhp_attr_uses_top_down(env, elem)?;
self.snd.on_fld_class__xhp_attr_uses_top_down(env, elem)
}
#[inline(always)]
fn on_fld_class__xhp_attr_uses_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<XhpAttrHint>,
) -> ControlFlow<()> {
self.fst.on_fld_class__xhp_attr_uses_bottom_up(env, elem)?;
self.snd.on_fld_class__xhp_attr_uses_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_class__reqs_top_down(
&mut self,
env: &Env,
elem: &mut Vec<ClassReq>,
) -> ControlFlow<()> {
self.fst.on_fld_class__reqs_top_down(env, elem)?;
self.snd.on_fld_class__reqs_top_down(env, elem)
}
#[inline(always)]
fn on_fld_class__reqs_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<ClassReq>,
) -> ControlFlow<()> {
self.fst.on_fld_class__reqs_bottom_up(env, elem)?;
self.snd.on_fld_class__reqs_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_class__implements_top_down(
&mut self,
env: &Env,
elem: &mut Vec<ClassHint>,
) -> ControlFlow<()> {
self.fst.on_fld_class__implements_top_down(env, elem)?;
self.snd.on_fld_class__implements_top_down(env, elem)
}
#[inline(always)]
fn on_fld_class__implements_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<ClassHint>,
) -> ControlFlow<()> {
self.fst.on_fld_class__implements_bottom_up(env, elem)?;
self.snd.on_fld_class__implements_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_class__consts_top_down(
&mut self,
env: &Env,
elem: &mut Vec<ClassConst<Ex, En>>,
) -> ControlFlow<()> {
self.fst.on_fld_class__consts_top_down(env, elem)?;
self.snd.on_fld_class__consts_top_down(env, elem)
}
#[inline(always)]
fn on_fld_class__consts_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<ClassConst<Ex, En>>,
) -> ControlFlow<()> {
self.fst.on_fld_class__consts_bottom_up(env, elem)?;
self.snd.on_fld_class__consts_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_class__xhp_attrs_top_down(
&mut self,
env: &Env,
elem: &mut Vec<XhpAttr<Ex, En>>,
) -> ControlFlow<()> {
self.fst.on_fld_class__xhp_attrs_top_down(env, elem)?;
self.snd.on_fld_class__xhp_attrs_top_down(env, elem)
}
#[inline(always)]
fn on_fld_class__xhp_attrs_bottom_up(
&mut self,
env: &Env,
elem: &mut Vec<XhpAttr<Ex, En>>,
) -> ControlFlow<()> {
self.fst.on_fld_class__xhp_attrs_bottom_up(env, elem)?;
self.snd.on_fld_class__xhp_attrs_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_class__user_attributes_top_down(
&mut self,
env: &Env,
elem: &mut UserAttributes<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_fld_class__user_attributes_top_down(env, elem)?;
self.snd.on_fld_class__user_attributes_top_down(env, elem)
}
#[inline(always)]
fn on_fld_class__user_attributes_bottom_up(
&mut self,
env: &Env,
elem: &mut UserAttributes<Ex, En>,
) -> ControlFlow<()> {
self.fst
.on_fld_class__user_attributes_bottom_up(env, elem)?;
self.snd.on_fld_class__user_attributes_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_class_req_top_down(&mut self, env: &Env, elem: &mut ClassReq) -> ControlFlow<()> {
self.fst.on_ty_class_req_top_down(env, elem)?;
self.snd.on_ty_class_req_top_down(env, elem)
}
#[inline(always)]
fn on_ty_class_req_bottom_up(&mut self, env: &Env, elem: &mut ClassReq) -> ControlFlow<()> {
self.fst.on_ty_class_req_bottom_up(env, elem)?;
self.snd.on_ty_class_req_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_xhp_attr_top_down(
&mut self,
env: &Env,
elem: &mut XhpAttr<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_xhp_attr_top_down(env, elem)?;
self.snd.on_ty_xhp_attr_top_down(env, elem)
}
#[inline(always)]
fn on_ty_xhp_attr_bottom_up(
&mut self,
env: &Env,
elem: &mut XhpAttr<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_xhp_attr_bottom_up(env, elem)?;
self.snd.on_ty_xhp_attr_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_class_const_kind_top_down(
&mut self,
env: &Env,
elem: &mut ClassConstKind<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_class_const_kind_top_down(env, elem)?;
self.snd.on_ty_class_const_kind_top_down(env, elem)
}
#[inline(always)]
fn on_ty_class_const_kind_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassConstKind<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_class_const_kind_bottom_up(env, elem)?;
self.snd.on_ty_class_const_kind_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_class_const_top_down(
&mut self,
env: &Env,
elem: &mut ClassConst<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_class_const_top_down(env, elem)?;
self.snd.on_ty_class_const_top_down(env, elem)
}
#[inline(always)]
fn on_ty_class_const_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassConst<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_class_const_bottom_up(env, elem)?;
self.snd.on_ty_class_const_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_class_abstract_typeconst_top_down(
&mut self,
env: &Env,
elem: &mut ClassAbstractTypeconst,
) -> ControlFlow<()> {
self.fst
.on_ty_class_abstract_typeconst_top_down(env, elem)?;
self.snd.on_ty_class_abstract_typeconst_top_down(env, elem)
}
#[inline(always)]
fn on_ty_class_abstract_typeconst_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassAbstractTypeconst,
) -> ControlFlow<()> {
self.fst
.on_ty_class_abstract_typeconst_bottom_up(env, elem)?;
self.snd.on_ty_class_abstract_typeconst_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_class_abstract_typeconst_default_top_down(
&mut self,
env: &Env,
elem: &mut Option<Hint>,
) -> ControlFlow<()> {
self.fst
.on_fld_class_abstract_typeconst_default_top_down(env, elem)?;
self.snd
.on_fld_class_abstract_typeconst_default_top_down(env, elem)
}
#[inline(always)]
fn on_fld_class_abstract_typeconst_default_bottom_up(
&mut self,
env: &Env,
elem: &mut Option<Hint>,
) -> ControlFlow<()> {
self.fst
.on_fld_class_abstract_typeconst_default_bottom_up(env, elem)?;
self.snd
.on_fld_class_abstract_typeconst_default_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_class_concrete_typeconst_top_down(
&mut self,
env: &Env,
elem: &mut ClassConcreteTypeconst,
) -> ControlFlow<()> {
self.fst
.on_ty_class_concrete_typeconst_top_down(env, elem)?;
self.snd.on_ty_class_concrete_typeconst_top_down(env, elem)
}
#[inline(always)]
fn on_ty_class_concrete_typeconst_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassConcreteTypeconst,
) -> ControlFlow<()> {
self.fst
.on_ty_class_concrete_typeconst_bottom_up(env, elem)?;
self.snd.on_ty_class_concrete_typeconst_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_class_typeconst_top_down(
&mut self,
env: &Env,
elem: &mut ClassTypeconst,
) -> ControlFlow<()> {
self.fst.on_ty_class_typeconst_top_down(env, elem)?;
self.snd.on_ty_class_typeconst_top_down(env, elem)
}
#[inline(always)]
fn on_ty_class_typeconst_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassTypeconst,
) -> ControlFlow<()> {
self.fst.on_ty_class_typeconst_bottom_up(env, elem)?;
self.snd.on_ty_class_typeconst_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_class_typeconst_def_top_down(
&mut self,
env: &Env,
elem: &mut ClassTypeconstDef<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_class_typeconst_def_top_down(env, elem)?;
self.snd.on_ty_class_typeconst_def_top_down(env, elem)
}
#[inline(always)]
fn on_ty_class_typeconst_def_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassTypeconstDef<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_class_typeconst_def_bottom_up(env, elem)?;
self.snd.on_ty_class_typeconst_def_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_xhp_attr_info_top_down(
&mut self,
env: &Env,
elem: &mut XhpAttrInfo,
) -> ControlFlow<()> {
self.fst.on_ty_xhp_attr_info_top_down(env, elem)?;
self.snd.on_ty_xhp_attr_info_top_down(env, elem)
}
#[inline(always)]
fn on_ty_xhp_attr_info_bottom_up(
&mut self,
env: &Env,
elem: &mut XhpAttrInfo,
) -> ControlFlow<()> {
self.fst.on_ty_xhp_attr_info_bottom_up(env, elem)?;
self.snd.on_ty_xhp_attr_info_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_class_var_top_down(
&mut self,
env: &Env,
elem: &mut ClassVar<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_class_var_top_down(env, elem)?;
self.snd.on_ty_class_var_top_down(env, elem)
}
#[inline(always)]
fn on_ty_class_var_bottom_up(
&mut self,
env: &Env,
elem: &mut ClassVar<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_class_var_bottom_up(env, elem)?;
self.snd.on_ty_class_var_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_class_var_type__top_down(
&mut self,
env: &Env,
elem: &mut TypeHint<Ex>,
) -> ControlFlow<()> {
self.fst.on_fld_class_var_type__top_down(env, elem)?;
self.snd.on_fld_class_var_type__top_down(env, elem)
}
#[inline(always)]
fn on_fld_class_var_type__bottom_up(
&mut self,
env: &Env,
elem: &mut TypeHint<Ex>,
) -> ControlFlow<()> {
self.fst.on_fld_class_var_type__bottom_up(env, elem)?;
self.snd.on_fld_class_var_type__bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_method__top_down(&mut self, env: &Env, elem: &mut Method_<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_method__top_down(env, elem)?;
self.snd.on_ty_method__top_down(env, elem)
}
#[inline(always)]
fn on_ty_method__bottom_up(
&mut self,
env: &Env,
elem: &mut Method_<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_method__bottom_up(env, elem)?;
self.snd.on_ty_method__bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_method__ret_top_down(
&mut self,
env: &Env,
elem: &mut TypeHint<Ex>,
) -> ControlFlow<()> {
self.fst.on_fld_method__ret_top_down(env, elem)?;
self.snd.on_fld_method__ret_top_down(env, elem)
}
#[inline(always)]
fn on_fld_method__ret_bottom_up(
&mut self,
env: &Env,
elem: &mut TypeHint<Ex>,
) -> ControlFlow<()> {
self.fst.on_fld_method__ret_bottom_up(env, elem)?;
self.snd.on_fld_method__ret_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_typedef_top_down(&mut self, env: &Env, elem: &mut Typedef<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_typedef_top_down(env, elem)?;
self.snd.on_ty_typedef_top_down(env, elem)
}
#[inline(always)]
fn on_ty_typedef_bottom_up(
&mut self,
env: &Env,
elem: &mut Typedef<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_typedef_bottom_up(env, elem)?;
self.snd.on_ty_typedef_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_typedef_kind_top_down(&mut self, env: &Env, elem: &mut Hint) -> ControlFlow<()> {
self.fst.on_fld_typedef_kind_top_down(env, elem)?;
self.snd.on_fld_typedef_kind_top_down(env, elem)
}
#[inline(always)]
fn on_fld_typedef_kind_bottom_up(&mut self, env: &Env, elem: &mut Hint) -> ControlFlow<()> {
self.fst.on_fld_typedef_kind_bottom_up(env, elem)?;
self.snd.on_fld_typedef_kind_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_gconst_top_down(&mut self, env: &Env, elem: &mut Gconst<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_gconst_top_down(env, elem)?;
self.snd.on_ty_gconst_top_down(env, elem)
}
#[inline(always)]
fn on_ty_gconst_bottom_up(&mut self, env: &Env, elem: &mut Gconst<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_gconst_bottom_up(env, elem)?;
self.snd.on_ty_gconst_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_gconst_value_top_down(
&mut self,
env: &Env,
elem: &mut Expr<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_fld_gconst_value_top_down(env, elem)?;
self.snd.on_fld_gconst_value_top_down(env, elem)
}
#[inline(always)]
fn on_fld_gconst_value_bottom_up(
&mut self,
env: &Env,
elem: &mut Expr<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_fld_gconst_value_bottom_up(env, elem)?;
self.snd.on_fld_gconst_value_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_fun_def_top_down(&mut self, env: &Env, elem: &mut FunDef<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_fun_def_top_down(env, elem)?;
self.snd.on_ty_fun_def_top_down(env, elem)
}
#[inline(always)]
fn on_ty_fun_def_bottom_up(&mut self, env: &Env, elem: &mut FunDef<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_fun_def_bottom_up(env, elem)?;
self.snd.on_ty_fun_def_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_module_def_top_down(
&mut self,
env: &Env,
elem: &mut ModuleDef<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_module_def_top_down(env, elem)?;
self.snd.on_ty_module_def_top_down(env, elem)
}
#[inline(always)]
fn on_ty_module_def_bottom_up(
&mut self,
env: &Env,
elem: &mut ModuleDef<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_module_def_bottom_up(env, elem)?;
self.snd.on_ty_module_def_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_def_top_down(&mut self, env: &Env, elem: &mut Def<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_def_top_down(env, elem)?;
self.snd.on_ty_def_top_down(env, elem)
}
#[inline(always)]
fn on_ty_def_bottom_up(&mut self, env: &Env, elem: &mut Def<Ex, En>) -> ControlFlow<()> {
self.fst.on_ty_def_bottom_up(env, elem)?;
self.snd.on_ty_def_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_xhp_child_top_down(&mut self, env: &Env, elem: &mut XhpChild) -> ControlFlow<()> {
self.fst.on_ty_xhp_child_top_down(env, elem)?;
self.snd.on_ty_xhp_child_top_down(env, elem)
}
#[inline(always)]
fn on_ty_xhp_child_bottom_up(&mut self, env: &Env, elem: &mut XhpChild) -> ControlFlow<()> {
self.fst.on_ty_xhp_child_bottom_up(env, elem)?;
self.snd.on_ty_xhp_child_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_hint_top_down(&mut self, env: &Env, elem: &mut Hint) -> ControlFlow<()> {
self.fst.on_ty_hint_top_down(env, elem)?;
self.snd.on_ty_hint_top_down(env, elem)
}
#[inline(always)]
fn on_ty_hint_bottom_up(&mut self, env: &Env, elem: &mut Hint) -> ControlFlow<()> {
self.fst.on_ty_hint_bottom_up(env, elem)?;
self.snd.on_ty_hint_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_user_attributes_top_down(
&mut self,
env: &Env,
elem: &mut UserAttributes<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_user_attributes_top_down(env, elem)?;
self.snd.on_ty_user_attributes_top_down(env, elem)
}
#[inline(always)]
fn on_ty_user_attributes_bottom_up(
&mut self,
env: &Env,
elem: &mut UserAttributes<Ex, En>,
) -> ControlFlow<()> {
self.fst.on_ty_user_attributes_bottom_up(env, elem)?;
self.snd.on_ty_user_attributes_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_contexts_top_down(&mut self, env: &Env, elem: &mut Contexts) -> ControlFlow<()> {
self.fst.on_ty_contexts_top_down(env, elem)?;
self.snd.on_ty_contexts_top_down(env, elem)
}
#[inline(always)]
fn on_ty_contexts_bottom_up(&mut self, env: &Env, elem: &mut Contexts) -> ControlFlow<()> {
self.fst.on_ty_contexts_bottom_up(env, elem)?;
self.snd.on_ty_contexts_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_hint_fun_top_down(&mut self, env: &Env, elem: &mut HintFun) -> ControlFlow<()> {
self.fst.on_ty_hint_fun_top_down(env, elem)?;
self.snd.on_ty_hint_fun_top_down(env, elem)
}
#[inline(always)]
fn on_ty_hint_fun_bottom_up(&mut self, env: &Env, elem: &mut HintFun) -> ControlFlow<()> {
self.fst.on_ty_hint_fun_bottom_up(env, elem)?;
self.snd.on_ty_hint_fun_bottom_up(env, elem)
}
#[inline(always)]
fn on_fld_hint_fun_return_ty_top_down(
&mut self,
env: &Env,
elem: &mut Hint,
) -> ControlFlow<()> {
self.fst.on_fld_hint_fun_return_ty_top_down(env, elem)?;
self.snd.on_fld_hint_fun_return_ty_top_down(env, elem)
}
#[inline(always)]
fn on_fld_hint_fun_return_ty_bottom_up(
&mut self,
env: &Env,
elem: &mut Hint,
) -> ControlFlow<()> {
self.fst.on_fld_hint_fun_return_ty_bottom_up(env, elem)?;
self.snd.on_fld_hint_fun_return_ty_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_hint__top_down(&mut self, env: &Env, elem: &mut Hint_) -> ControlFlow<()> {
self.fst.on_ty_hint__top_down(env, elem)?;
self.snd.on_ty_hint__top_down(env, elem)
}
#[inline(always)]
fn on_ty_hint__bottom_up(&mut self, env: &Env, elem: &mut Hint_) -> ControlFlow<()> {
self.fst.on_ty_hint__bottom_up(env, elem)?;
self.snd.on_ty_hint__bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_refinement_top_down(&mut self, env: &Env, elem: &mut Refinement) -> ControlFlow<()> {
self.fst.on_ty_refinement_top_down(env, elem)?;
self.snd.on_ty_refinement_top_down(env, elem)
}
#[inline(always)]
fn on_ty_refinement_bottom_up(&mut self, env: &Env, elem: &mut Refinement) -> ControlFlow<()> {
self.fst.on_ty_refinement_bottom_up(env, elem)?;
self.snd.on_ty_refinement_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_type_refinement_top_down(
&mut self,
env: &Env,
elem: &mut TypeRefinement,
) -> ControlFlow<()> {
self.fst.on_ty_type_refinement_top_down(env, elem)?;
self.snd.on_ty_type_refinement_top_down(env, elem)
}
#[inline(always)]
fn on_ty_type_refinement_bottom_up(
&mut self,
env: &Env,
elem: &mut TypeRefinement,
) -> ControlFlow<()> {
self.fst.on_ty_type_refinement_bottom_up(env, elem)?;
self.snd.on_ty_type_refinement_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_type_refinement_bounds_top_down(
&mut self,
env: &Env,
elem: &mut TypeRefinementBounds,
) -> ControlFlow<()> {
self.fst.on_ty_type_refinement_bounds_top_down(env, elem)?;
self.snd.on_ty_type_refinement_bounds_top_down(env, elem)
}
#[inline(always)]
fn on_ty_type_refinement_bounds_bottom_up(
&mut self,
env: &Env,
elem: &mut TypeRefinementBounds,
) -> ControlFlow<()> {
self.fst.on_ty_type_refinement_bounds_bottom_up(env, elem)?;
self.snd.on_ty_type_refinement_bounds_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_ctx_refinement_top_down(
&mut self,
env: &Env,
elem: &mut CtxRefinement,
) -> ControlFlow<()> {
self.fst.on_ty_ctx_refinement_top_down(env, elem)?;
self.snd.on_ty_ctx_refinement_top_down(env, elem)
}
#[inline(always)]
fn on_ty_ctx_refinement_bottom_up(
&mut self,
env: &Env,
elem: &mut CtxRefinement,
) -> ControlFlow<()> {
self.fst.on_ty_ctx_refinement_bottom_up(env, elem)?;
self.snd.on_ty_ctx_refinement_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_ctx_refinement_bounds_top_down(
&mut self,
env: &Env,
elem: &mut CtxRefinementBounds,
) -> ControlFlow<()> {
self.fst.on_ty_ctx_refinement_bounds_top_down(env, elem)?;
self.snd.on_ty_ctx_refinement_bounds_top_down(env, elem)
}
#[inline(always)]
fn on_ty_ctx_refinement_bounds_bottom_up(
&mut self,
env: &Env,
elem: &mut CtxRefinementBounds,
) -> ControlFlow<()> {
self.fst.on_ty_ctx_refinement_bounds_bottom_up(env, elem)?;
self.snd.on_ty_ctx_refinement_bounds_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_shape_field_info_top_down(
&mut self,
env: &Env,
elem: &mut ShapeFieldInfo,
) -> ControlFlow<()> {
self.fst.on_ty_shape_field_info_top_down(env, elem)?;
self.snd.on_ty_shape_field_info_top_down(env, elem)
}
#[inline(always)]
fn on_ty_shape_field_info_bottom_up(
&mut self,
env: &Env,
elem: &mut ShapeFieldInfo,
) -> ControlFlow<()> {
self.fst.on_ty_shape_field_info_bottom_up(env, elem)?;
self.snd.on_ty_shape_field_info_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_nast_shape_info_top_down(
&mut self,
env: &Env,
elem: &mut NastShapeInfo,
) -> ControlFlow<()> {
self.fst.on_ty_nast_shape_info_top_down(env, elem)?;
self.snd.on_ty_nast_shape_info_top_down(env, elem)
}
#[inline(always)]
fn on_ty_nast_shape_info_bottom_up(
&mut self,
env: &Env,
elem: &mut NastShapeInfo,
) -> ControlFlow<()> {
self.fst.on_ty_nast_shape_info_bottom_up(env, elem)?;
self.snd.on_ty_nast_shape_info_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_enum__top_down(&mut self, env: &Env, elem: &mut Enum_) -> ControlFlow<()> {
self.fst.on_ty_enum__top_down(env, elem)?;
self.snd.on_ty_enum__top_down(env, elem)
}
#[inline(always)]
fn on_ty_enum__bottom_up(&mut self, env: &Env, elem: &mut Enum_) -> ControlFlow<()> {
self.fst.on_ty_enum__bottom_up(env, elem)?;
self.snd.on_ty_enum__bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_where_constraint_hint_top_down(
&mut self,
env: &Env,
elem: &mut WhereConstraintHint,
) -> ControlFlow<()> {
self.fst.on_ty_where_constraint_hint_top_down(env, elem)?;
self.snd.on_ty_where_constraint_hint_top_down(env, elem)
}
#[inline(always)]
fn on_ty_where_constraint_hint_bottom_up(
&mut self,
env: &Env,
elem: &mut WhereConstraintHint,
) -> ControlFlow<()> {
self.fst.on_ty_where_constraint_hint_bottom_up(env, elem)?;
self.snd.on_ty_where_constraint_hint_bottom_up(env, elem)
}
#[inline(always)]
fn on_ty_id_top_down(&mut self, env: &Env, elem: &mut Id) -> ControlFlow<()> {
self.fst.on_ty_id_top_down(env, elem)?;
self.snd.on_ty_id_top_down(env, elem)
}
#[inline(always)]
fn on_ty_id_bottom_up(&mut self, env: &Env, elem: &mut Id) -> ControlFlow<()> {
self.fst.on_ty_id_bottom_up(env, elem)?;
self.snd.on_ty_id_bottom_up(env, elem)
}
} |
Rust | hhvm/hphp/hack/src/elab/passes.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
pub mod elab_as_expr;
pub mod elab_block;
pub mod elab_class_id;
pub mod elab_class_vars;
pub mod elab_const_expr;
pub mod elab_defs;
pub mod elab_dynamic_class_name;
pub mod elab_enum_class;
pub mod elab_everything_sdt;
pub mod elab_expr_call_call_user_func;
pub mod elab_expr_call_hh_invariant;
pub mod elab_expr_call_hh_meth_caller;
pub mod elab_expr_collection;
pub mod elab_expr_import;
pub mod elab_expr_lvar;
pub mod elab_expr_package;
pub mod elab_expr_tuple;
pub mod elab_func_body;
pub mod elab_hint_haccess;
pub mod elab_hint_happly;
pub mod elab_hint_hsoft;
pub mod elab_hint_retonly;
pub mod elab_hint_this;
pub mod elab_hint_wildcard;
pub mod elab_hkt;
pub mod elab_shape_field_name;
pub mod elab_user_attributes;
pub mod guard_invalid;
pub mod remove_memo_attr;
pub mod validate_class_consistent_construct;
pub mod validate_class_member;
pub mod validate_class_methods;
pub mod validate_class_req;
pub mod validate_class_tparams;
pub mod validate_class_user_attribute_const;
pub mod validate_class_var_user_attribute_const;
pub mod validate_class_var_user_attribute_lsb;
pub mod validate_control_context;
pub mod validate_coroutine;
pub mod validate_dynamic_hint;
pub mod validate_expr_array_get;
pub mod validate_expr_call_echo;
pub mod validate_expr_cast;
pub mod validate_expr_function_pointer;
pub mod validate_expr_list;
pub mod validate_fun_param_inout;
pub mod validate_fun_params;
pub mod validate_global_const;
pub mod validate_hint_habstr;
pub mod validate_hint_hrefinement;
pub mod validate_illegal_name;
pub mod validate_interface;
pub mod validate_like_hint;
pub mod validate_method_private_final;
pub mod validate_module;
pub mod validate_php_lambda;
pub mod validate_shape_name;
pub mod validate_supportdyn;
pub mod validate_trait_internal;
pub mod validate_user_attribute_arity;
pub mod validate_user_attribute_deprecated_static;
pub mod validate_user_attribute_dynamically_callable;
pub mod validate_user_attribute_entry_point;
pub mod validate_user_attribute_infer_flows;
pub mod validate_user_attribute_memoize;
pub mod validate_user_attribute_no_auto_dynamic;
pub mod validate_user_attribute_soft_internal;
pub mod validate_xhp_attribute;
pub mod validate_xhp_name; |
Rust | hhvm/hphp/hack/src/elab/transform.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//
// @generated SignedSource<<792c74b8f3cef5d315ed84c1ef179b2e>>
//
// To regenerate this file, run:
// hphp/hack/src/oxidized_regen.sh
#![allow(unused_variables)]
#![allow(unused_braces)]
#![allow(clippy::match_single_binding)]
use std::ops::ControlFlow::Break;
use oxidized::nast::*;
use crate::env::Env;
use crate::Pass;
pub trait Transform {
#[inline(always)]
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
self.traverse(env, pass);
}
#[inline(always)]
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {}
}
impl Transform for () {}
impl Transform for bool {}
impl Transform for isize {}
impl Transform for String {}
impl Transform for bstr::BString {}
impl Transform for oxidized::pos::Pos {}
impl Transform for oxidized::file_info::Mode {}
impl Transform for oxidized::namespace_env::Env {}
impl Transform for oxidized::LocalIdMap<(Pos, ())> {}
impl<T> Transform for &mut T
where
T: Transform,
{
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
(**self).transform(env, &mut pass.clone())
}
}
impl<T> Transform for Box<T>
where
T: Transform,
{
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
(**self).transform(env, &mut pass.clone())
}
}
impl<L, R> Transform for itertools::Either<L, R>
where
L: Transform,
R: Transform,
{
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Self::Left(x) => x.transform(env, &mut pass.clone()),
Self::Right(x) => x.transform(env, &mut pass.clone()),
}
}
}
impl<T> Transform for Vec<T>
where
T: Transform,
{
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
for x in self.iter_mut() {
x.transform(env, &mut pass.clone());
}
}
}
impl<T> Transform for Option<T>
where
T: Transform,
{
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Some(x) => x.transform(env, &mut pass.clone()),
None => {}
}
}
}
impl<T> Transform for oxidized::lazy::Lazy<T>
where
T: Transform,
{
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
self.0.transform(env, &mut pass.clone())
}
}
impl<K, V> Transform for std::collections::BTreeMap<K, V>
where
K: Transform,
V: Transform,
{
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
for x in self.values_mut() {
x.transform(env, &mut pass.clone());
}
}
}
impl<T> Transform for std::sync::Arc<T>
where
T: Transform,
{
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
if let Some(x) = std::sync::Arc::get_mut(self) {
x.transform(env, &mut pass.clone());
}
}
}
impl<T> Transform for std::rc::Rc<T>
where
T: Transform,
{
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
if let Some(x) = std::rc::Rc::get_mut(self) {
x.transform(env, &mut pass.clone());
}
}
}
impl<T1, T2> Transform for (T1, T2)
where
T1: Transform,
T2: Transform,
{
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
self.0.transform(env, &mut pass.clone());
self.1.transform(env, &mut pass.clone());
}
}
impl<T1, T2, T3> Transform for (T1, T2, T3)
where
T1: Transform,
T2: Transform,
T3: Transform,
{
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
self.0.transform(env, &mut pass.clone());
self.1.transform(env, &mut pass.clone());
self.2.transform(env, &mut pass.clone());
}
}
impl<T1, T2, T3, T4> Transform for (T1, T2, T3, T4)
where
T1: Transform,
T2: Transform,
T3: Transform,
T4: Transform,
{
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
self.0.transform(env, &mut pass.clone());
self.1.transform(env, &mut pass.clone());
self.2.transform(env, &mut pass.clone());
self.3.transform(env, &mut pass.clone());
}
}
impl<T1, T2, T3, T4, T5> Transform for (T1, T2, T3, T4, T5)
where
T1: Transform,
T2: Transform,
T3: Transform,
T4: Transform,
T5: Transform,
{
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
self.0.transform(env, &mut pass.clone());
self.1.transform(env, &mut pass.clone());
self.2.transform(env, &mut pass.clone());
self.3.transform(env, &mut pass.clone());
self.4.transform(env, &mut pass.clone());
}
}
const _: () = {
impl Transform for Lid {}
};
impl Transform for Program {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_program_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_program_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Program(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
impl Transform for Stmt {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_stmt_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_stmt_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Stmt(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for Stmt_ {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_stmt__top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_stmt__bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Stmt_::Fallthrough => {}
Stmt_::Expr(ref mut __binding_0) => __binding_0.transform(env, pass),
Stmt_::Break => {}
Stmt_::Continue => {}
Stmt_::Throw(ref mut __binding_0) => __binding_0.transform(env, pass),
Stmt_::Return(ref mut __binding_0) => __binding_0.transform(env, pass),
Stmt_::YieldBreak => {}
Stmt_::Awaitall(ref mut __binding_0) => __binding_0.transform(env, pass),
Stmt_::If(ref mut __binding_0) => __binding_0.transform(env, pass),
Stmt_::Do(ref mut __binding_0) => __binding_0.transform(env, pass),
Stmt_::While(ref mut __binding_0) => __binding_0.transform(env, pass),
Stmt_::Using(ref mut __binding_0) => __binding_0.transform(env, pass),
Stmt_::For(ref mut __binding_0) => __binding_0.transform(env, pass),
Stmt_::Switch(ref mut __binding_0) => __binding_0.transform(env, pass),
Stmt_::Match(ref mut __binding_0) => __binding_0.transform(env, pass),
Stmt_::Foreach(ref mut __binding_0) => __binding_0.transform(env, pass),
Stmt_::Try(ref mut __binding_0) => __binding_0.transform(env, pass),
Stmt_::Noop => {}
Stmt_::DeclareLocal(ref mut __binding_0) => __binding_0.transform(env, pass),
Stmt_::Block(ref mut __binding_0) => __binding_0.transform(env, pass),
_ => {}
}
}
}
const _: () = {
impl Transform for EnvAnnot {}
};
impl Transform for UsingStmt {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_using_stmt_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_using_stmt_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
UsingStmt {
is_block_scoped: ref mut __binding_0,
has_await: ref mut __binding_1,
exprs: ref mut __binding_2,
block: ref mut __binding_3,
} => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{
__binding_2.transform(env, pass)
}
{ __binding_3.transform(env, pass) }
}
}
}
}
impl Transform for AsExpr {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_as_expr_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_as_expr_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
AsExpr::AsV(ref mut __binding_0) => __binding_0.transform(env, pass),
AsExpr::AsKv(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
AsExpr::AwaitAsV(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
AsExpr::AwaitAsKv(ref mut __binding_0, ref mut __binding_1, ref mut __binding_2) => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{ __binding_2.transform(env, pass) }
}
}
}
}
impl Transform for Block {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_block_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_block_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Block(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
impl Transform for FinallyBlock {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_finally_block_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_finally_block_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
FinallyBlock(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
impl Transform for StmtMatch {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_stmt_match_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_stmt_match_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
StmtMatch {
expr: ref mut __binding_0,
arms: ref mut __binding_1,
} => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for StmtMatchArm {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_stmt_match_arm_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_stmt_match_arm_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
StmtMatchArm {
pat: ref mut __binding_0,
body: ref mut __binding_1,
} => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for Pattern {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_pattern_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_pattern_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Pattern::PVar(ref mut __binding_0) => __binding_0.transform(env, pass),
Pattern::PRefinement(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
impl Transform for PatVar {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_pat_var_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_pat_var_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
PatVar {
id: ref mut __binding_1,
..
} => __binding_1.transform(env, pass),
}
}
}
impl Transform for PatRefinement {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_pat_refinement_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_pat_refinement_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
PatRefinement {
id: ref mut __binding_1,
hint: ref mut __binding_2,
..
} => {
{
__binding_1.transform(env, pass)
}
{ __binding_2.transform(env, pass) }
}
}
}
}
impl Transform for ClassId {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_class_id_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_class_id_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
ClassId(ref mut __binding_0, ref mut __binding_1, ref mut __binding_2) => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{ __binding_2.transform(env, pass) }
}
}
}
}
impl Transform for ClassId_ {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_class_id__top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_class_id__bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
ClassId_::CIparent => {}
ClassId_::CIself => {}
ClassId_::CIstatic => {}
ClassId_::CIexpr(ref mut __binding_0) => __binding_0.transform(env, pass),
ClassId_::CI(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
impl Transform for Expr {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_expr_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_expr_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Expr(ref mut __binding_0, ref mut __binding_1, ref mut __binding_2) => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{ __binding_2.transform(env, pass) }
}
}
}
}
impl Transform for CollectionTarg {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_collection_targ_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_collection_targ_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
CollectionTarg::CollectionTV(ref mut __binding_0) => __binding_0.transform(env, pass),
CollectionTarg::CollectionTKV(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for FunctionPtrId {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_function_ptr_id_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_function_ptr_id_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
FunctionPtrId::FPId(ref mut __binding_0) => __binding_0.transform(env, pass),
FunctionPtrId::FPClassConst(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for ExpressionTree {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_expression_tree_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_expression_tree_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
ExpressionTree {
hint: ref mut __binding_0,
splices: ref mut __binding_1,
function_pointers: ref mut __binding_2,
virtualized_expr: ref mut __binding_3,
runtime_expr: ref mut __binding_4,
..
} => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{
__binding_2.transform(env, pass)
}
{
__binding_3.transform(env, pass)
}
{ __binding_4.transform(env, pass) }
}
}
}
}
impl Transform for Expr_ {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_expr__top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_expr__bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Expr_::Darray(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Varray(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Shape(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::ValCollection(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::KeyValCollection(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Null => {}
Expr_::This => {}
Expr_::True => {}
Expr_::False => {}
Expr_::Omitted => {}
Expr_::Invalid(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Id(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Lvar(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Dollardollar(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Clone(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::ArrayGet(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::ObjGet(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::ClassGet(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::ClassConst(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Call(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::FunctionPointer(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Int(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Float(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::String2(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::PrefixedString(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Yield(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Await(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::ReadonlyExpr(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Tuple(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::List(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Cast(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Unop(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Binop(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Pipe(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Eif(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Is(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::As(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Upcast(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::New(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Efun(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Lfun(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Xml(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Import(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Collection(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::ExpressionTree(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::MethodCaller(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Pair(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::ETSplice(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::EnumClassLabel(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Hole(ref mut __binding_0) => __binding_0.transform(env, pass),
Expr_::Package(ref mut __binding_0) => __binding_0.transform(env, pass),
_ => {}
}
}
}
impl Transform for HoleSource {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_hole_source_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_hole_source_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
HoleSource::Typing => {}
HoleSource::UnsafeCast(ref mut __binding_0) => __binding_0.transform(env, pass),
HoleSource::UnsafeNonnullCast => {}
HoleSource::EnforcedCast(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
impl Transform for Binop {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_binop_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_binop_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Binop {
lhs: ref mut __binding_1,
rhs: ref mut __binding_2,
..
} => {
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_binop_lhs_top_down(env, __binding_1) {
return;
}
__binding_1.transform(env, pass);
in_pass.on_fld_binop_lhs_bottom_up(env, __binding_1);
}
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_binop_rhs_top_down(env, __binding_2) {
return;
}
__binding_2.transform(env, pass);
in_pass.on_fld_binop_rhs_bottom_up(env, __binding_2);
}
}
}
}
}
}
impl Transform for ClassGetExpr {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_class_get_expr_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_class_get_expr_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
ClassGetExpr::CGstring(ref mut __binding_0) => __binding_0.transform(env, pass),
ClassGetExpr::CGexpr(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
impl Transform for Case {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_case_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_case_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Case(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for DefaultCase {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_default_case_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_default_case_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
DefaultCase(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for Catch {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_catch_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_catch_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Catch(ref mut __binding_0, ref mut __binding_1, ref mut __binding_2) => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{ __binding_2.transform(env, pass) }
}
}
}
}
impl Transform for Field {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_field_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_field_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Field(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for Afield {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_afield_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_afield_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Afield::AFvalue(ref mut __binding_0) => __binding_0.transform(env, pass),
Afield::AFkvalue(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for XhpSimple {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_xhp_simple_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_xhp_simple_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
XhpSimple {
type_: ref mut __binding_1,
expr: ref mut __binding_2,
..
} => {
{
__binding_1.transform(env, pass)
}
{ __binding_2.transform(env, pass) }
}
}
}
}
impl Transform for XhpAttribute {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_xhp_attribute_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_xhp_attribute_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
XhpAttribute::XhpSimple(ref mut __binding_0) => __binding_0.transform(env, pass),
XhpAttribute::XhpSpread(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
impl Transform for FunParam {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_fun_param_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_fun_param_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
FunParam {
annotation: ref mut __binding_0,
type_hint: ref mut __binding_1,
is_variadic: ref mut __binding_2,
name: ref mut __binding_4,
expr: ref mut __binding_5,
user_attributes: ref mut __binding_8,
..
} => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{
__binding_2.transform(env, pass)
}
{
__binding_4.transform(env, pass)
}
{
__binding_5.transform(env, pass)
}
{ __binding_8.transform(env, pass) }
}
}
}
}
impl Transform for Fun_ {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_fun__top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_fun__bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Fun_ {
annotation: ref mut __binding_2,
ret: ref mut __binding_4,
params: ref mut __binding_5,
ctxs: ref mut __binding_6,
unsafe_ctxs: ref mut __binding_7,
body: ref mut __binding_8,
user_attributes: ref mut __binding_10,
external: ref mut __binding_11,
doc_comment: ref mut __binding_12,
..
} => {
{
__binding_2.transform(env, pass)
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_fun__ret_top_down(env, __binding_4) {
return;
}
__binding_4.transform(env, pass);
in_pass.on_fld_fun__ret_bottom_up(env, __binding_4);
}
}
{
__binding_5.transform(env, pass)
}
{
__binding_6.transform(env, pass)
}
{
__binding_7.transform(env, pass)
}
{
__binding_8.transform(env, pass)
}
{
__binding_10.transform(env, pass)
}
{
__binding_11.transform(env, pass)
}
{ __binding_12.transform(env, pass) }
}
}
}
}
impl Transform for CaptureLid {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_capture_lid_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_capture_lid_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
CaptureLid(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for Efun {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_efun_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_efun_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Efun {
fun: ref mut __binding_0,
use_: ref mut __binding_1,
closure_class_name: ref mut __binding_2,
} => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{ __binding_2.transform(env, pass) }
}
}
}
}
impl Transform for FuncBody {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_func_body_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_func_body_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
FuncBody {
fb_ast: ref mut __binding_0,
} => __binding_0.transform(env, pass),
}
}
}
impl Transform for TypeHint {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_type_hint_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_type_hint_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
TypeHint(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for Targ {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_targ_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_targ_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Targ(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for CallExpr {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_call_expr_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_call_expr_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
CallExpr {
func: ref mut __binding_0,
targs: ref mut __binding_1,
args: ref mut __binding_2,
unpacked_arg: ref mut __binding_3,
} => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{
__binding_2.transform(env, pass)
}
{ __binding_3.transform(env, pass) }
}
}
}
}
impl Transform for UserAttribute {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_user_attribute_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_user_attribute_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
UserAttribute {
name: ref mut __binding_0,
params: ref mut __binding_1,
} => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for FileAttribute {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_file_attribute_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_file_attribute_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
FileAttribute {
user_attributes: ref mut __binding_0,
namespace: ref mut __binding_1,
} => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for Tparam {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_tparam_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_tparam_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Tparam {
name: ref mut __binding_1,
parameters: ref mut __binding_2,
constraints: ref mut __binding_3,
user_attributes: ref mut __binding_5,
..
} => {
{
__binding_1.transform(env, pass)
}
{
__binding_2.transform(env, pass)
}
{
__binding_3.transform(env, pass)
}
{ __binding_5.transform(env, pass) }
}
}
}
}
const _: () = {
impl Transform for RequireKind {}
};
const _: () = {
impl Transform for EmitId {}
};
impl Transform for Class_ {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_class__top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_class__bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Class_ {
annotation: ref mut __binding_1,
final_: ref mut __binding_3,
is_xhp: ref mut __binding_4,
has_xhp_keyword: ref mut __binding_5,
name: ref mut __binding_7,
tparams: ref mut __binding_8,
extends: ref mut __binding_9,
uses: ref mut __binding_10,
xhp_attr_uses: ref mut __binding_11,
reqs: ref mut __binding_13,
implements: ref mut __binding_14,
where_constraints: ref mut __binding_15,
consts: ref mut __binding_16,
typeconsts: ref mut __binding_17,
vars: ref mut __binding_18,
methods: ref mut __binding_19,
xhp_children: ref mut __binding_20,
xhp_attrs: ref mut __binding_21,
namespace: ref mut __binding_22,
user_attributes: ref mut __binding_23,
file_attributes: ref mut __binding_24,
docs_url: ref mut __binding_25,
enum_: ref mut __binding_26,
doc_comment: ref mut __binding_27,
emit_id: ref mut __binding_28,
internal: ref mut __binding_29,
module: ref mut __binding_30,
..
} => {
{
__binding_1.transform(env, pass)
}
{
__binding_3.transform(env, pass)
}
{
__binding_4.transform(env, pass)
}
{
__binding_5.transform(env, pass)
}
{
__binding_7.transform(env, pass)
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_class__tparams_top_down(env, __binding_8) {
return;
}
__binding_8.transform(env, pass);
in_pass.on_fld_class__tparams_bottom_up(env, __binding_8);
}
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_class__extends_top_down(env, __binding_9) {
return;
}
__binding_9.transform(env, pass);
in_pass.on_fld_class__extends_bottom_up(env, __binding_9);
}
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_class__uses_top_down(env, __binding_10) {
return;
}
__binding_10.transform(env, pass);
in_pass.on_fld_class__uses_bottom_up(env, __binding_10);
}
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) =
pass.on_fld_class__xhp_attr_uses_top_down(env, __binding_11)
{
return;
}
__binding_11.transform(env, pass);
in_pass.on_fld_class__xhp_attr_uses_bottom_up(env, __binding_11);
}
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_class__reqs_top_down(env, __binding_13) {
return;
}
__binding_13.transform(env, pass);
in_pass.on_fld_class__reqs_bottom_up(env, __binding_13);
}
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_class__implements_top_down(env, __binding_14)
{
return;
}
__binding_14.transform(env, pass);
in_pass.on_fld_class__implements_bottom_up(env, __binding_14);
}
}
{
__binding_15.transform(env, pass)
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_class__consts_top_down(env, __binding_16) {
return;
}
__binding_16.transform(env, pass);
in_pass.on_fld_class__consts_bottom_up(env, __binding_16);
}
}
{
__binding_17.transform(env, pass)
}
{
__binding_18.transform(env, pass)
}
{
__binding_19.transform(env, pass)
}
{
__binding_20.transform(env, pass)
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_class__xhp_attrs_top_down(env, __binding_21)
{
return;
}
__binding_21.transform(env, pass);
in_pass.on_fld_class__xhp_attrs_bottom_up(env, __binding_21);
}
}
{
__binding_22.transform(env, pass)
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) =
pass.on_fld_class__user_attributes_top_down(env, __binding_23)
{
return;
}
__binding_23.transform(env, pass);
in_pass.on_fld_class__user_attributes_bottom_up(env, __binding_23);
}
}
{
__binding_24.transform(env, pass)
}
{
__binding_25.transform(env, pass)
}
{
__binding_26.transform(env, pass)
}
{
__binding_27.transform(env, pass)
}
{
__binding_28.transform(env, pass)
}
{
__binding_29.transform(env, pass)
}
{ __binding_30.transform(env, pass) }
}
}
}
}
impl Transform for ClassReq {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_class_req_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_class_req_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
ClassReq(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
const _: () = {
impl Transform for XhpAttrTag {}
};
impl Transform for XhpAttr {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_xhp_attr_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_xhp_attr_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
XhpAttr(
ref mut __binding_0,
ref mut __binding_1,
ref mut __binding_2,
ref mut __binding_3,
) => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{
__binding_2.transform(env, pass)
}
{ __binding_3.transform(env, pass) }
}
}
}
}
impl Transform for ClassConstKind {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_class_const_kind_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_class_const_kind_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
ClassConstKind::CCAbstract(ref mut __binding_0) => __binding_0.transform(env, pass),
ClassConstKind::CCConcrete(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
impl Transform for ClassConst {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_class_const_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_class_const_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
ClassConst {
user_attributes: ref mut __binding_0,
type_: ref mut __binding_1,
id: ref mut __binding_2,
kind: ref mut __binding_3,
doc_comment: ref mut __binding_5,
..
} => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{
__binding_2.transform(env, pass)
}
{
__binding_3.transform(env, pass)
}
{ __binding_5.transform(env, pass) }
}
}
}
}
impl Transform for ClassAbstractTypeconst {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_class_abstract_typeconst_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_class_abstract_typeconst_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
ClassAbstractTypeconst {
as_constraint: ref mut __binding_0,
super_constraint: ref mut __binding_1,
default: ref mut __binding_2,
} => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) =
pass.on_fld_class_abstract_typeconst_default_top_down(env, __binding_2)
{
return;
}
__binding_2.transform(env, pass);
in_pass.on_fld_class_abstract_typeconst_default_bottom_up(env, __binding_2);
}
}
}
}
}
}
impl Transform for ClassConcreteTypeconst {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_class_concrete_typeconst_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_class_concrete_typeconst_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
ClassConcreteTypeconst {
c_tc_type: ref mut __binding_0,
} => __binding_0.transform(env, pass),
}
}
}
impl Transform for ClassTypeconst {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_class_typeconst_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_class_typeconst_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
ClassTypeconst::TCAbstract(ref mut __binding_0) => __binding_0.transform(env, pass),
ClassTypeconst::TCConcrete(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
impl Transform for ClassTypeconstDef {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_class_typeconst_def_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_class_typeconst_def_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
ClassTypeconstDef {
user_attributes: ref mut __binding_0,
name: ref mut __binding_1,
kind: ref mut __binding_2,
doc_comment: ref mut __binding_4,
is_ctx: ref mut __binding_5,
..
} => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{
__binding_2.transform(env, pass)
}
{
__binding_4.transform(env, pass)
}
{ __binding_5.transform(env, pass) }
}
}
}
}
impl Transform for XhpAttrInfo {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_xhp_attr_info_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_xhp_attr_info_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
XhpAttrInfo {
tag: ref mut __binding_1,
..
} => __binding_1.transform(env, pass),
}
}
}
impl Transform for ClassVar {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_class_var_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_class_var_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
ClassVar {
final_: ref mut __binding_0,
xhp_attr: ref mut __binding_1,
abstract_: ref mut __binding_2,
readonly: ref mut __binding_3,
type_: ref mut __binding_5,
id: ref mut __binding_6,
expr: ref mut __binding_7,
user_attributes: ref mut __binding_8,
doc_comment: ref mut __binding_9,
is_promoted_variadic: ref mut __binding_10,
is_static: ref mut __binding_11,
..
} => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{
__binding_2.transform(env, pass)
}
{
__binding_3.transform(env, pass)
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_class_var_type__top_down(env, __binding_5) {
return;
}
__binding_5.transform(env, pass);
in_pass.on_fld_class_var_type__bottom_up(env, __binding_5);
}
}
{
__binding_6.transform(env, pass)
}
{
__binding_7.transform(env, pass)
}
{
__binding_8.transform(env, pass)
}
{
__binding_9.transform(env, pass)
}
{
__binding_10.transform(env, pass)
}
{ __binding_11.transform(env, pass) }
}
}
}
}
impl Transform for Method_ {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_method__top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_method__bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Method_ {
annotation: ref mut __binding_1,
final_: ref mut __binding_2,
abstract_: ref mut __binding_3,
static_: ref mut __binding_4,
readonly_this: ref mut __binding_5,
name: ref mut __binding_7,
tparams: ref mut __binding_8,
where_constraints: ref mut __binding_9,
params: ref mut __binding_10,
ctxs: ref mut __binding_11,
unsafe_ctxs: ref mut __binding_12,
body: ref mut __binding_13,
user_attributes: ref mut __binding_15,
ret: ref mut __binding_17,
external: ref mut __binding_18,
doc_comment: ref mut __binding_19,
..
} => {
{
__binding_1.transform(env, pass)
}
{
__binding_2.transform(env, pass)
}
{
__binding_3.transform(env, pass)
}
{
__binding_4.transform(env, pass)
}
{
__binding_5.transform(env, pass)
}
{
__binding_7.transform(env, pass)
}
{
__binding_8.transform(env, pass)
}
{
__binding_9.transform(env, pass)
}
{
__binding_10.transform(env, pass)
}
{
__binding_11.transform(env, pass)
}
{
__binding_12.transform(env, pass)
}
{
__binding_13.transform(env, pass)
}
{
__binding_15.transform(env, pass)
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_method__ret_top_down(env, __binding_17) {
return;
}
__binding_17.transform(env, pass);
in_pass.on_fld_method__ret_bottom_up(env, __binding_17);
}
}
{
__binding_18.transform(env, pass)
}
{ __binding_19.transform(env, pass) }
}
}
}
}
impl Transform for Typedef {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_typedef_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_typedef_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Typedef {
annotation: ref mut __binding_0,
name: ref mut __binding_1,
tparams: ref mut __binding_2,
as_constraint: ref mut __binding_3,
super_constraint: ref mut __binding_4,
kind: ref mut __binding_5,
user_attributes: ref mut __binding_6,
file_attributes: ref mut __binding_7,
namespace: ref mut __binding_10,
emit_id: ref mut __binding_12,
is_ctx: ref mut __binding_13,
internal: ref mut __binding_14,
module: ref mut __binding_15,
docs_url: ref mut __binding_16,
doc_comment: ref mut __binding_17,
..
} => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{
__binding_2.transform(env, pass)
}
{
__binding_3.transform(env, pass)
}
{
__binding_4.transform(env, pass)
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_typedef_kind_top_down(env, __binding_5) {
return;
}
__binding_5.transform(env, pass);
in_pass.on_fld_typedef_kind_bottom_up(env, __binding_5);
}
}
{
__binding_6.transform(env, pass)
}
{
__binding_7.transform(env, pass)
}
{
__binding_10.transform(env, pass)
}
{
__binding_12.transform(env, pass)
}
{
__binding_13.transform(env, pass)
}
{
__binding_14.transform(env, pass)
}
{
__binding_15.transform(env, pass)
}
{
__binding_16.transform(env, pass)
}
{ __binding_17.transform(env, pass) }
}
}
}
}
impl Transform for Gconst {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_gconst_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_gconst_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Gconst {
annotation: ref mut __binding_0,
name: ref mut __binding_2,
type_: ref mut __binding_3,
value: ref mut __binding_4,
namespace: ref mut __binding_5,
emit_id: ref mut __binding_7,
..
} => {
{
__binding_0.transform(env, pass)
}
{
__binding_2.transform(env, pass)
}
{
__binding_3.transform(env, pass)
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_gconst_value_top_down(env, __binding_4) {
return;
}
__binding_4.transform(env, pass);
in_pass.on_fld_gconst_value_bottom_up(env, __binding_4);
}
}
{
__binding_5.transform(env, pass)
}
{ __binding_7.transform(env, pass) }
}
}
}
}
impl Transform for FunDef {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_fun_def_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_fun_def_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
FunDef {
namespace: ref mut __binding_0,
file_attributes: ref mut __binding_1,
name: ref mut __binding_3,
fun: ref mut __binding_4,
internal: ref mut __binding_5,
module: ref mut __binding_6,
tparams: ref mut __binding_7,
where_constraints: ref mut __binding_8,
..
} => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{
__binding_3.transform(env, pass)
}
{
__binding_4.transform(env, pass)
}
{
__binding_5.transform(env, pass)
}
{
__binding_6.transform(env, pass)
}
{
__binding_7.transform(env, pass)
}
{ __binding_8.transform(env, pass) }
}
}
}
}
impl Transform for ModuleDef {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_module_def_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_module_def_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
ModuleDef {
annotation: ref mut __binding_0,
user_attributes: ref mut __binding_2,
file_attributes: ref mut __binding_3,
doc_comment: ref mut __binding_6,
exports: ref mut __binding_7,
imports: ref mut __binding_8,
..
} => {
{
__binding_0.transform(env, pass)
}
{
__binding_2.transform(env, pass)
}
{
__binding_3.transform(env, pass)
}
{
__binding_6.transform(env, pass)
}
{
__binding_7.transform(env, pass)
}
{ __binding_8.transform(env, pass) }
}
}
}
}
const _: () = {
impl Transform for MdNameKind {}
};
impl Transform for Def {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_def_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_def_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Def::Fun(ref mut __binding_0) => __binding_0.transform(env, pass),
Def::Class(ref mut __binding_0) => __binding_0.transform(env, pass),
Def::Stmt(ref mut __binding_0) => __binding_0.transform(env, pass),
Def::Typedef(ref mut __binding_0) => __binding_0.transform(env, pass),
Def::Constant(ref mut __binding_0) => __binding_0.transform(env, pass),
Def::Namespace(ref mut __binding_0) => __binding_0.transform(env, pass),
Def::NamespaceUse(ref mut __binding_0) => __binding_0.transform(env, pass),
Def::SetNamespaceEnv(ref mut __binding_0) => __binding_0.transform(env, pass),
Def::FileAttributes(ref mut __binding_0) => __binding_0.transform(env, pass),
Def::Module(ref mut __binding_0) => __binding_0.transform(env, pass),
Def::SetModule(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
const _: () = {
impl Transform for NsKind {}
};
const _: () = {
impl Transform for ImportFlavor {}
};
impl Transform for XhpChild {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_xhp_child_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_xhp_child_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
XhpChild::ChildName(ref mut __binding_0) => __binding_0.transform(env, pass),
XhpChild::ChildList(ref mut __binding_0) => __binding_0.transform(env, pass),
XhpChild::ChildUnary(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
XhpChild::ChildBinary(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
const _: () = {
impl Transform for XhpChildOp {}
};
impl Transform for Hint {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_hint_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_hint_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Hint(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for UserAttributes {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_user_attributes_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_user_attributes_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
UserAttributes(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
impl Transform for Contexts {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_contexts_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_contexts_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Contexts(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
const _: () = {
impl Transform for HfParamInfo {}
};
impl Transform for HintFun {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_hint_fun_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_hint_fun_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
HintFun {
param_tys: ref mut __binding_1,
param_info: ref mut __binding_2,
variadic_ty: ref mut __binding_3,
ctxs: ref mut __binding_4,
return_ty: ref mut __binding_5,
..
} => {
{
__binding_1.transform(env, pass)
}
{
__binding_2.transform(env, pass)
}
{
__binding_3.transform(env, pass)
}
{
__binding_4.transform(env, pass)
}
{
{
let pass = &mut pass.clone();
let mut in_pass = pass.clone();
if let Break(..) = pass.on_fld_hint_fun_return_ty_top_down(env, __binding_5)
{
return;
}
__binding_5.transform(env, pass);
in_pass.on_fld_hint_fun_return_ty_bottom_up(env, __binding_5);
}
}
}
}
}
}
impl Transform for Hint_ {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_hint__top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_hint__bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Hint_::Hoption(ref mut __binding_0) => __binding_0.transform(env, pass),
Hint_::Hlike(ref mut __binding_0) => __binding_0.transform(env, pass),
Hint_::Hfun(ref mut __binding_0) => __binding_0.transform(env, pass),
Hint_::Htuple(ref mut __binding_0) => __binding_0.transform(env, pass),
Hint_::Happly(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
Hint_::Hshape(ref mut __binding_0) => __binding_0.transform(env, pass),
Hint_::Haccess(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
Hint_::Hsoft(ref mut __binding_0) => __binding_0.transform(env, pass),
Hint_::Hrefinement(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
Hint_::Hany => {}
Hint_::Herr => {}
Hint_::Hmixed => {}
Hint_::Hwildcard => {}
Hint_::Hnonnull => {}
Hint_::Habstr(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
Hint_::HvecOrDict(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
Hint_::Hprim(ref mut __binding_0) => __binding_0.transform(env, pass),
Hint_::Hthis => {}
Hint_::Hdynamic => {}
Hint_::Hnothing => {}
Hint_::Hunion(ref mut __binding_0) => __binding_0.transform(env, pass),
Hint_::Hintersection(ref mut __binding_0) => __binding_0.transform(env, pass),
Hint_::HfunContext(ref mut __binding_0) => __binding_0.transform(env, pass),
Hint_::Hvar(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
impl Transform for Refinement {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_refinement_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_refinement_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Refinement::Rctx(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
Refinement::Rtype(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for TypeRefinement {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_type_refinement_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_type_refinement_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
TypeRefinement::TRexact(ref mut __binding_0) => __binding_0.transform(env, pass),
TypeRefinement::TRloose(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
impl Transform for TypeRefinementBounds {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_type_refinement_bounds_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_type_refinement_bounds_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
TypeRefinementBounds {
lower: ref mut __binding_0,
upper: ref mut __binding_1,
} => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for CtxRefinement {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_ctx_refinement_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_ctx_refinement_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
CtxRefinement::CRexact(ref mut __binding_0) => __binding_0.transform(env, pass),
CtxRefinement::CRloose(ref mut __binding_0) => __binding_0.transform(env, pass),
}
}
}
impl Transform for CtxRefinementBounds {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_ctx_refinement_bounds_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_ctx_refinement_bounds_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
CtxRefinementBounds {
lower: ref mut __binding_0,
upper: ref mut __binding_1,
} => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for ShapeFieldInfo {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_shape_field_info_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_shape_field_info_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
ShapeFieldInfo {
optional: ref mut __binding_0,
hint: ref mut __binding_1,
..
} => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
impl Transform for NastShapeInfo {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_nast_shape_info_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_nast_shape_info_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
NastShapeInfo {
allows_unknown_fields: ref mut __binding_0,
field_map: ref mut __binding_1,
} => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
const _: () = {
impl Transform for KvcKind {}
};
const _: () = {
impl Transform for VcKind {}
};
impl Transform for Enum_ {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_enum__top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_enum__bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Enum_ {
base: ref mut __binding_0,
constraint: ref mut __binding_1,
includes: ref mut __binding_2,
} => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{ __binding_2.transform(env, pass) }
}
}
}
}
impl Transform for WhereConstraintHint {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_where_constraint_hint_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_where_constraint_hint_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
WhereConstraintHint(ref mut __binding_0, ref mut __binding_1, ref mut __binding_2) => {
{
__binding_0.transform(env, pass)
}
{
__binding_1.transform(env, pass)
}
{ __binding_2.transform(env, pass) }
}
}
}
}
impl Transform for Id {
fn transform(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
let mut in_pass = pass.clone();
if let Break(..) = pass.on_ty_id_top_down(env, self) {
return;
}
self.traverse(env, pass);
in_pass.on_ty_id_bottom_up(env, self);
}
fn traverse(&mut self, env: &Env, pass: &mut (impl Pass + Clone)) {
match self {
Id(ref mut __binding_0, ref mut __binding_1) => {
{
__binding_0.transform(env, pass)
}
{ __binding_1.transform(env, pass) }
}
}
}
}
const _: () = {
impl Transform for ShapeFieldName {}
};
const _: () = {
impl Transform for Variance {}
};
const _: () = {
impl Transform for ConstraintKind {}
};
const _: () = {
impl Transform for Abstraction {}
};
const _: () = {
impl Transform for ClassishKind {}
};
const _: () = {
impl Transform for ParamKind {}
};
const _: () = {
impl Transform for ReadonlyKind {}
};
const _: () = {
impl Transform for OgNullFlavor {}
};
const _: () = {
impl Transform for PropOrMethod {}
};
const _: () = {
impl Transform for FunKind {}
};
const _: () = {
impl Transform for Bop {}
};
const _: () = {
impl Transform for Uop {}
};
const _: () = {
impl Transform for Visibility {}
};
const _: () = {
impl Transform for XhpEnumValue {}
};
const _: () = {
impl Transform for Tprim {}
};
const _: () = {
impl Transform for TypedefVisibility {}
};
const _: () = {
impl Transform for ReifyKind {}
}; |
Rust | hhvm/hphp/hack/src/elab/typed_local.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::collections::BTreeMap;
use std::collections::HashSet;
#[allow(unused_imports)]
use hack_macros::hack_stmts;
use nast::AsExpr;
use nast::Binop;
use nast::Block;
use nast::CaptureLid;
use nast::Expr;
use nast::Expr_;
use nast::Hint;
use nast::Hint_;
use nast::Lid;
use nast::NastShapeInfo;
use nast::Pos;
use nast::ReifyKind;
use nast::ShapeFieldInfo;
use nast::Stmt;
use nast::Stmt_;
use nast::UsingStmt;
use oxidized::aast_visitor::AstParams;
use oxidized::aast_visitor::NodeMut;
use oxidized::aast_visitor::VisitorMut;
use oxidized::ast_defs::Bop;
use oxidized::local_id;
use oxidized::naming_error::NamingError;
use oxidized::nast;
use crate::Env;
#[derive(Debug, Clone, Default)]
pub struct TypedLocal {
// The locals in scope. The hint is present iff this is a typed local.
// Union at join points to keep the locals that might be in scope.
// The position is where the local was first declared. The bool if true
// if the hint could potentially be a shape or a tuple. If there is an
// assignment like `$x[e1] = e2` than we should enforce after (instead of on the rhs)
// in case the assignment changed the shape or tuple type at runtime. Shapes and tuples are
// special because their contents are enforced.
locals: BTreeMap<String, (Option<(Hint, bool)>, Pos)>,
// The subset of typed locals declared in the current scope, rather than preceding it.
// declared_ids and assigned_ids should be disjoint
declared_ids: HashSet<String>,
// The subset of untyped locals first assigned in the current scope, rather than preceding it.
assigned_ids: HashSet<String>,
// The in-scope erased generics
erased_generics: HashSet<String>,
should_elab: bool,
}
impl TypedLocal {
fn add_local(&mut self, id: String, hint: Option<(Hint, bool)>, pos: &Pos) {
self.locals.insert(id, (hint, pos.clone()));
}
fn add_assigned_id(&mut self, id: String) {
self.assigned_ids.insert(id);
}
fn add_declared_id(&mut self, id: &str) {
self.declared_ids.insert(id.to_string());
}
fn get_local(&self, id: &str) -> Option<&(Option<(Hint, bool)>, Pos)> {
self.locals.get(id)
}
fn get_local_pos(&self, id: &str) -> Pos {
if let Some((_, pos)) = self.get_local(id) {
pos.clone()
} else {
Pos::NONE
}
}
fn new_block_env(&self) -> Self {
TypedLocal {
locals: self.locals.clone(),
erased_generics: self.erased_generics.clone(),
should_elab: self.should_elab,
..Default::default()
}
}
fn restrict_env(&self, ids: &Vec<CaptureLid>) -> TypedLocal {
let mut new_env = TypedLocal {
erased_generics: self.erased_generics.clone(),
should_elab: self.should_elab,
..Default::default()
};
for cid in ids {
let id = &cid.1.1.1;
if let Some((hint, pos)) = self.get_local(id) {
new_env.add_local(id.to_string(), hint.clone(), pos);
}
if self.declared_ids.contains(id) {
new_env.add_declared_id(id);
}
if self.assigned_ids.contains(id) {
new_env.add_assigned_id(id.clone());
}
}
new_env
}
fn clear(&mut self) {
self.assigned_ids.clear();
self.declared_ids.clear();
self.locals.clear();
}
// Add the new_env into self, updating the error map
fn join2(&mut self, new_env: &mut Self, error_map: &mut BTreeMap<String, (Pos, Pos)>) {
for id in new_env.assigned_ids.intersection(&self.declared_ids) {
let assign_pos = new_env.get_local_pos(id);
let decl_pos = self.get_local_pos(id);
error_map.insert(id.clone(), (decl_pos, assign_pos));
}
for id in new_env.declared_ids.intersection(&self.assigned_ids) {
let assign_pos = self.get_local_pos(id);
let decl_pos = new_env.get_local_pos(id);
error_map.insert(id.clone(), (decl_pos, assign_pos));
}
for id in new_env.declared_ids.intersection(&self.declared_ids) {
let assign_pos = self.get_local_pos(id);
let decl_pos = new_env.get_local_pos(id);
error_map.insert(id.clone(), (decl_pos, assign_pos));
}
// self.locals contains the cumulative bindings down the self branch.
// For the bindings newly defined in new_env branch, insert them into
// the self locals if they aren't already declared there
let diff = new_env.assigned_ids.difference(&self.declared_ids);
for id in new_env.declared_ids.iter().chain(diff) {
if let Some(x) = new_env.get_local(id) {
self.locals.insert(id.to_string(), x.clone());
}
}
let declared_ids = std::mem::take(&mut new_env.declared_ids);
let assigned_ids = std::mem::take(&mut new_env.assigned_ids);
self.declared_ids.extend(declared_ids);
self.assigned_ids.extend(assigned_ids);
// If the id was declared then we need to remove it from the assigned
// list to keep them disjoint. This situation could arise when
// self has a declaration and new_env and assignment (or vice versa).
for id in &self.declared_ids {
self.assigned_ids.remove(id);
}
}
// join a list of TypedLocals together. self should be the TypedLocal from
// before the split
fn join(&mut self, envs: &mut [Self], env: &mut Env) {
let mut error_map = BTreeMap::default();
let before_declared_ids = std::mem::take(&mut self.declared_ids);
let before_assigned_ids = std::mem::take(&mut self.assigned_ids);
for env in envs.iter_mut().rev() {
self.join2(env, &mut error_map);
}
for (id, (decl_pos, assign_pos)) in error_map {
env.emit_error(NamingError::IllegalTypedLocal {
join: true,
id_pos: decl_pos,
id_name: id.clone(),
def_pos: assign_pos,
})
}
self.declared_ids.extend(before_declared_ids);
self.assigned_ids.extend(before_assigned_ids);
}
// find the hint for the id, or if it hasn't been assigned, mark it as assigned with None
fn get_hint_or_add_assign(&mut self, lid: &Lid, pos: &Pos) -> Option<(Hint, bool)> {
let name = local_id::get_name(&lid.1);
if let Some((hint_opt, _pos)) = self.get_local(name) {
hint_opt.clone()
} else {
self.add_local(name.to_string(), None, pos);
self.add_assigned_id(name.to_string());
None
}
}
// Get the hint from a lvalue expression if it is a variable. Report an error
// if the lvalue might need further processing, which is the case with list(..) and
// $x[e] lvalues. Other lvalues won't need enforcement, so just return None
fn get_lvar_hint(&mut self, expr: &Expr) -> Result<Option<(Hint, bool)>, ()> {
match expr {
Expr(_, pos, Expr_::Lvar(box lid)) => Ok(self.get_hint_or_add_assign(lid, pos)),
Expr(_, _pos, Expr_::List(..)) | Expr(_, _pos, Expr_::ArrayGet(box (_, Some(_)))) => {
Err(())
}
_ => Ok(None),
}
}
fn wrap_rhs_with_as(&self, rhs: &mut Expr, hint: Hint, pos: &Pos) {
if self.should_elab {
let mut init_expr = Expr((), Pos::NONE, Expr_::Null);
std::mem::swap(rhs, &mut init_expr);
let as_expr_ = Expr_::As(Box::new((init_expr, hint, false)));
let as_expr = Expr((), pos.clone(), as_expr_);
*rhs = as_expr;
}
}
// if the expression is an assignment where the type of the lhs can be enforced
// on the rhs, update the expression. Otherwise if it's an assignment that will
// need enforcement that cannot be done on the rhs, return an error
fn enforce_assign_expr_rhs(&mut self, expr: &mut Expr) -> Result<(), ()> {
match expr {
Expr(
(),
pos,
Expr_::Binop(box Binop {
bop: Bop::Eq(None),
lhs,
rhs,
}),
) => match self.get_lvar_hint(lhs) {
Ok(Some((hint, _))) => {
self.wrap_rhs_with_as(rhs, hint, pos);
Ok(())
}
Ok(None) => Ok(()),
Err(()) => Err(()),
},
Expr(
(),
_pos,
Expr_::Binop(box Binop {
bop: Bop::Eq(Some(_)),
lhs: _,
rhs: _,
}),
) => Err(()),
_ => Ok(()),
}
}
// Collect all of the variables a lhs might need to enforce and put them in hints as 'as' expressions
// These include list() variables, and $x[e] array index where the hint to enforce might be a shape or tuple
fn get_vars_to_enforce_lhs(&mut self, expr: &Expr, in_array_get: bool, hints: &mut Vec<Expr>) {
match expr {
Expr(_, pos, Expr_::Lvar(box lid)) => {
if let Some((hint, could_be_shape_or_tuple)) = self.get_hint_or_add_assign(lid, pos)
{
if !in_array_get || could_be_shape_or_tuple {
let mut expr = expr.clone();
self.wrap_rhs_with_as(&mut expr, hint, pos);
hints.push(expr);
}
}
}
Expr(_, _pos, Expr_::List(exprs)) => {
for expr in exprs {
self.get_vars_to_enforce_lhs(expr, in_array_get, hints)
}
}
Expr(_, _pos, Expr_::ArrayGet(box (lhs, Some(_)))) => {
self.get_vars_to_enforce_lhs(lhs, true, hints)
}
_ => {}
}
}
fn get_vars_to_enforce(&mut self, expr: &Expr, hints: &mut Vec<Expr>) {
match expr {
Expr(
(),
_pos,
Expr_::Binop(box Binop {
bop: Bop::Eq(_),
lhs,
rhs: _,
}),
) => self.get_vars_to_enforce_lhs(lhs, false, hints),
_ => {}
}
}
fn visit_fun_helper(&mut self, env: &mut Env, elem: &mut nast::Fun_) -> Result<(), ()> {
for param in elem.params.iter() {
self.add_local(param.name.clone(), None, ¶m.pos);
self.add_assigned_id(param.name.clone());
}
elem.body.fb_ast.recurse(env, self.object())
}
fn add_enforcement_exprs_in_list(
&mut self,
env: &mut Env,
init_exprs: &mut Vec<Expr>,
) -> Result<Vec<Expr>, ()> {
let mut new_init_expr = vec![];
for mut init_expr in init_exprs.drain(..) {
init_expr.recurse(env, self.object())?;
match self.enforce_assign_expr_rhs(&mut init_expr) {
Ok(()) => new_init_expr.push(init_expr),
Err(()) => {
let mut exprs = vec![];
self.get_vars_to_enforce(&init_expr, &mut exprs);
new_init_expr.push(init_expr);
if self.should_elab {
for expr in exprs.drain(..) {
new_init_expr.push(expr);
}
}
}
}
}
Ok(new_init_expr)
}
// Replaces definitely unenforceable parts of the hint with _
fn simplify_hint(&self, hint: &mut Hint) -> bool {
let Hint(_, box hint_) = hint;
match hint_ {
Hint_::Hoption(h) | Hint_::Hlike(h) => self.simplify_hint(h),
Hint_::Htuple(hints) => {
for h in hints {
self.simplify_hint(h);
}
true
}
Hint_::Happly(cn, hints) => match cn.1.as_str() {
"\\HH\\void"
| "\\HH\\int"
| "\\HH\\bool"
| "\\HH\\float"
| "\\HH\\string"
| "\\HH\\resource"
| "\\HH\\num"
| "\\HH\\noreturn"
| "\\HH\\arraykey"
| "\\HH\\mixed"
| "\\HH\\dict"
| "\\HH\\vec"
| "\\HH\\keyset"
| "\\HH\\vec_or_dict"
| "\\HH\\nonnull"
| "\\HH\\darray"
| "\\HH\\varray"
| "\\HH\\varray_or_darray"
| "\\HH\\anyarray"
| "\\HH\\null"
| "\\HH\\nothing" => {
if self.should_elab {
for Hint(_, box hint_) in hints {
let _ = std::mem::replace(hint_, Hint_::Hwildcard);
}
}
false
}
"\\HH\\dynamic" => {
if self.should_elab {
let _ = std::mem::replace(hint_, Hint_::Hwildcard);
}
false
}
s => {
if self.erased_generics.contains(s) {
if self.should_elab {
let _ = std::mem::replace(hint_, Hint_::Hwildcard);
}
false
} else {
if self.should_elab {
for h in hints {
self.simplify_hint(h);
}
}
true
}
}
},
Hint_::Hshape(NastShapeInfo {
allows_unknown_fields: _,
field_map,
}) => {
for ShapeFieldInfo {
optional: _,
hint,
name: _,
} in field_map
{
self.simplify_hint(hint);
}
true
}
Hint_::Haccess(h, _) => {
self.simplify_hint(h);
true
}
Hint_::HvecOrDict(None, Hint(_, box hint_)) => {
if self.should_elab {
let _ = std::mem::replace(hint_, Hint_::Hwildcard);
}
false
}
Hint_::HvecOrDict(Some(Hint(_, box hint_1)), Hint(_, box hint_2)) => {
if self.should_elab {
let _ = std::mem::replace(hint_1, Hint_::Hwildcard);
let _ = std::mem::replace(hint_2, Hint_::Hwildcard);
}
false
}
Hint_::Hnonnull | Hint_::Hprim(_) | Hint_::Hthis | Hint_::Hnothing | Hint_::Hmixed => {
false
}
// The following are unenforced, so we replace with a wildcard
Hint_::Hfun(_)
| Hint_::Hany
| Hint_::Herr
| Hint_::Hwildcard
| Hint_::Hdynamic
| Hint_::Hunion(_)
| Hint_::Hintersection(_)
| Hint_::Hrefinement(_, _)
| Hint_::Hsoft(_)
| Hint_::HfunContext(_)
| Hint_::Hvar(_)
| Hint_::Habstr(_, _) => {
if self.should_elab {
let _ = std::mem::replace(hint_, Hint_::Hwildcard);
}
false
}
}
}
}
fn exprs_to_stmts(expr: Option<Expr>, mut exprs: Vec<Expr>) -> Vec<Stmt> {
let mut stmts = vec![];
if let Some(expr) = expr {
stmts.push(Stmt(expr.1.clone(), Stmt_::Expr(Box::new(expr))));
}
for expr in exprs.drain(..) {
stmts.push(Stmt(expr.1.clone(), Stmt_::Expr(Box::new(expr))));
}
stmts
}
fn add_to_block(mut stmts: Vec<Stmt>, block: &mut Block) {
match block {
Block(stmts2) => {
stmts.append(stmts2);
std::mem::swap(&mut stmts, stmts2);
}
}
}
impl<'a> VisitorMut<'a> for TypedLocal {
type Params = AstParams<Env, ()>;
fn object(&mut self) -> &mut dyn VisitorMut<'a, Params = Self::Params> {
self
}
fn visit_stmt(&mut self, env: &mut Env, elem: &mut nast::Stmt) -> Result<(), ()> {
let Stmt(pos, stmt_) = elem;
match stmt_ {
Stmt_::DeclareLocal(box (lid, hint, expr)) => {
expr.recurse(env, self.object())?;
let name = local_id::get_name(&lid.1);
if let Some((_, def_pos)) = self.get_local(name) {
env.emit_error(NamingError::IllegalTypedLocal {
join: false,
id_pos: pos.clone(),
id_name: name.clone(),
def_pos: def_pos.clone(),
});
} else {
let mut as_hint = Hint(Pos::NONE, Box::new(Hint_::Hnothing));
std::mem::swap(hint, &mut as_hint);
let maybe_shape_or_tuple = self.simplify_hint(&mut as_hint);
self.add_local(
name.to_string(),
Some((as_hint.clone(), maybe_shape_or_tuple)),
pos,
);
self.add_declared_id(name);
if self.should_elab && let Some(expr) = expr {
self.wrap_rhs_with_as(expr, as_hint, pos);
let mut init_lid = Lid(Pos::NONE, (0, "".to_string()));
std::mem::swap(&mut init_lid, lid);
let mut init_expr = Expr((), Pos::NONE, Expr_::Null);
std::mem::swap(expr, &mut init_expr);
let assign_expr_ = Expr_::Binop(Box::new(Binop {
bop: Bop::Eq(None),
lhs: Expr((), pos.clone(), Expr_::Lvar(Box::new(init_lid))),
rhs: init_expr,
}));
let assign_expr = Expr((), pos.clone(), assign_expr_);
*stmt_ = Stmt_::Expr(Box::new(assign_expr));
} else if self.should_elab {
*stmt_ = Stmt_::Noop;
} else{
std::mem::swap(hint, &mut as_hint);
}
};
Ok(())
}
Stmt_::Expr(box expr) => {
expr.recurse(env, self.object())?;
if let Err(()) = self.enforce_assign_expr_rhs(expr) {
let mut exprs = vec![];
self.get_vars_to_enforce(expr, &mut exprs);
if !exprs.is_empty() && self.should_elab {
let mut new_expr = Expr((), Pos::NONE, Expr_::Null);
std::mem::swap(expr, &mut new_expr);
let stmts = exprs_to_stmts(Some(new_expr), exprs);
let mut block = Stmt_::Block(Block(stmts));
std::mem::swap(stmt_, &mut block);
}
}
Ok(())
}
Stmt_::If(box (cond, then_block, else_block)) => {
cond.recurse(env, self.object())?;
let mut then_env = self.new_block_env();
let mut else_env = self.new_block_env();
then_block.recurse(env, then_env.object())?;
else_block.recurse(env, else_env.object())?;
self.join(&mut vec![then_env, else_env], env);
Ok(())
}
Stmt_::For(box (init_exprs, cond, update_exprs, body)) => {
let mut new_init_exprs = self.add_enforcement_exprs_in_list(env, init_exprs)?;
std::mem::swap(&mut new_init_exprs, init_exprs);
cond.recurse(env, self.object())?;
body.recurse(env, self.object())?;
let mut new_update_exprs = self.add_enforcement_exprs_in_list(env, update_exprs)?;
std::mem::swap(&mut new_update_exprs, update_exprs);
Ok(())
}
Stmt_::Using(box UsingStmt {
is_block_scoped: _,
has_await: _,
exprs,
block,
}) => {
for expr in exprs.1.iter_mut() {
expr.recurse(env, self.object())?;
if let Err(()) = self.enforce_assign_expr_rhs(expr) {
// This shouldn't happen. If the expression in the using is not
// an assignment to a variable, then the general prohibition on
// assignments in expressions should kick in.
}
}
block.recurse(env, self.object())
}
Stmt_::Switch(box (cond, cases, default)) => {
cond.recurse(env, self.object())?;
let mut envs = cases
.iter_mut()
.map(|nast::Case(ref mut expr, ref mut block)| {
let _ = expr.recurse(env, self.object());
let mut new_env = self.new_block_env();
let _ = block.recurse(env, new_env.object());
new_env
})
.collect::<Vec<_>>();
if let Some(default_block) = default {
let mut default_env = self.new_block_env();
let _ = default_block.recurse(env, default_env.object());
envs.push(default_env);
}
self.join(&mut envs, env);
Ok(())
}
Stmt_::Match(box nast::StmtMatch { expr, arms }) => {
expr.recurse(env, self.object())?;
let mut envs = arms
.iter_mut()
.map(|nast::StmtMatchArm { pat, body }| {
let _ = pat.recurse(env, self.object());
let mut new_env = self.new_block_env();
let _ = body.recurse(env, new_env.object());
new_env
})
.collect::<Vec<_>>();
self.join(&mut envs, env);
Ok(())
}
Stmt_::Foreach(box (expr, as_expr, block)) => {
expr.recurse(env, self.object())?;
let mut hints = vec![];
match as_expr {
AsExpr::AsV(e) | AsExpr::AwaitAsV(_, e) => {
e.recurse(env, self.object())?;
self.get_vars_to_enforce_lhs(e, false, &mut hints)
}
AsExpr::AsKv(e1, e2) | AsExpr::AwaitAsKv(_, e1, e2) => {
e1.recurse(env, self.object())?;
e2.recurse(env, self.object())?;
self.get_vars_to_enforce_lhs(e1, false, &mut hints);
self.get_vars_to_enforce_lhs(e2, false, &mut hints);
}
}
block.recurse(env, self.object())?;
if self.should_elab {
let stmts = exprs_to_stmts(None, hints);
add_to_block(stmts, block);
}
Ok(())
}
Stmt_::Try(box (try_block, catches, finally_block)) => {
try_block.recurse(env, self.object())?;
let mut envs = catches
.iter_mut()
.map(|nast::Catch(_cn, Lid(pos, name), ref mut block)| {
if !self.locals.contains_key(&name.1) {
self.add_local(name.1.to_string(), None, pos);
}
let mut new_env = self.new_block_env();
let _ = block.recurse(env, new_env.object());
new_env
})
.collect::<Vec<_>>();
self.join(&mut envs, env);
finally_block.recurse(env, self.object())?;
Ok(())
}
// Just need to visit these, no additional logic is required
Stmt_::Fallthrough
| Stmt_::Awaitall(_)
| Stmt_::Break
| Stmt_::Continue
| Stmt_::Throw(_)
| Stmt_::Return(_)
| Stmt_::YieldBreak
| Stmt_::Do(_)
| Stmt_::While(_)
| Stmt_::Noop
| Stmt_::Block(_)
| Stmt_::Markup(_)
| Stmt_::AssertEnv(_) => elem.recurse(env, self.object()),
}
}
fn visit_fun_(&mut self, env: &mut Env, elem: &mut nast::Fun_) -> Result<(), ()> {
let old = self.clone();
self.visit_fun_helper(env, elem)?;
// ensure that checking a lammbda expression doesn't change self, so
// that we know that recursing on an expr will have no effect
*self = old;
Ok(())
}
fn visit_efun(&mut self, env: &mut Env, elem: &mut nast::Efun) -> Result<(), ()> {
let old = self.clone();
let mut fun_env = self.restrict_env(&elem.use_);
fun_env.visit_fun_helper(env, &mut elem.fun)?;
*self = old;
Ok(())
}
fn visit_fun_def(&mut self, env: &mut Env, elem: &mut nast::FunDef) -> Result<(), ()> {
self.clear();
let mut generics = self.erased_generics.clone();
for tp in &elem.tparams {
if tp.reified != ReifyKind::Reified {
generics.insert(tp.name.1.clone());
}
}
std::mem::swap(&mut self.erased_generics, &mut generics);
self.visit_fun_helper(env, &mut elem.fun)?;
std::mem::swap(&mut self.erased_generics, &mut generics);
Ok(())
}
fn visit_method_(&mut self, env: &mut Env, elem: &mut nast::Method_) -> Result<(), ()> {
self.clear();
for param in elem.params.iter() {
self.add_local(param.name.clone(), None, ¶m.pos);
self.add_assigned_id(param.name.clone());
}
let mut generics = self.erased_generics.clone();
for tp in &elem.tparams {
if tp.reified != ReifyKind::Reified {
generics.insert(tp.name.1.clone());
}
}
std::mem::swap(&mut self.erased_generics, &mut generics);
elem.body.fb_ast.recurse(env, self.object())?;
std::mem::swap(&mut self.erased_generics, &mut generics);
Ok(())
}
fn visit_class_(&mut self, env: &mut Env, elem: &mut nast::Class_) -> Result<(), ()> {
let mut generics = HashSet::<String>::default();
for tp in &elem.tparams {
if tp.reified != ReifyKind::Reified {
generics.insert(tp.name.1.clone());
}
}
let _ = std::mem::replace(&mut self.erased_generics, generics);
elem.methods.recurse(env, self.object())?;
Ok(())
}
}
pub fn elaborate_program(env: &mut Env, program: &mut nast::Program, should_elab: bool) {
let mut tl = TypedLocal {
should_elab,
..Default::default()
};
tl.visit_program(env, program).unwrap();
}
pub fn elaborate_fun_def(env: &mut Env, f: &mut nast::FunDef, should_elab: bool) {
let mut tl = TypedLocal {
should_elab,
..Default::default()
};
tl.visit_fun_def(env, f).unwrap();
}
pub fn elaborate_class_(env: &mut Env, c: &mut nast::Class_, should_elab: bool) {
let mut tl = TypedLocal {
should_elab,
..Default::default()
};
tl.visit_class_(env, c).unwrap();
}
#[cfg(test)]
mod tests {
use nast::Block;
use nast::Def;
use nast::Program;
use super::*;
fn build_program(stmts: Vec<Stmt>) -> Program {
nast::Program(vec![Def::Stmt(Box::new(Stmt(
Pos::NONE,
Stmt_::Block(Block(stmts)),
)))])
}
#[test]
fn test_init() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!("let $x: int = 1;"));
let res = build_program(hack_stmts!("$x = 1 as int;"));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_init2() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!("let $x: int;"));
let res = build_program(hack_stmts!(""));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_seq1() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!("let $x: vec<int> = 1; $x = vec[];"));
let res = build_program(hack_stmts!("$x = 1 as vec<int>; $x = vec[] as vec<int>;"));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_seq2() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!("let $x: vec<int>; $x = vec[];"));
let noop = Stmt(Pos::NONE, Stmt_::Noop);
let res = build_program(hack_stmts!("#noop; $x = vec[] as vec<int>;"));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_if1() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!(
"let $x: vec<string> = 1; if ($b) {$x = vec[];} else {$x = 1;}; $x = 4;"
));
let res = build_program(hack_stmts!(
"$x = 1 as vec<string>; if ($b) {$x = vec[] as vec<string>;} else {$x = 1 as vec<string>;}; $x = 4 as vec<string>;"
));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_if2() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!(
"if ($b) {let $x: t = vec[]; $x = 1;} else {let $y: t2 = vec[]; $y = 1;}; $x = 4; $y = 44;"
));
let res = build_program(hack_stmts!(
"if ($b) {$x = vec[] as t; $x = 1 as t;} else {$y = vec[] as t2; $y = 1 as t2;}; $x = 4 as t; $y = 44 as t2;"
));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_for1() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!(
"let $x: t = 1; let $y: t2 = 2; for ($x = 1, $y = 2; ; $x = 1, $y = 2) { $x = 1; }; $y = 3;"
));
let res = build_program(hack_stmts!(
"$x = 1 as t; $y = 2 as t2; for ($x = 1 as t, $y = 2 as t2; ; $x= 1 as t, $y = 2 as t2) { $x = 1 as t; }; $y = 3 as t2;"
));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_for2() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!(
"for (; ; $x = 1) { let $x: t = 1; $x = 22;} $x = 3;"
));
let res = build_program(hack_stmts!(
"for (; ; $x = 1 as t) { $x = 1 as t; $x = 22 as t;} $x = 3 as t;"
));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_for3() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!(
"let $x:t = 1; for (list($x, $y) = vec[1,2]; ; $x[0] = 1, $y = 0) { }"
));
let res = build_program(hack_stmts!(
"$x = 1 as t; for (list($x, $y) = vec[1,2], $x as t; ; $x[0] = 1, $x as t, $y = 0) { }"
));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_simplify_hint1() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!(
"let $x:\\HH\\vec<\\HH\\int> = vec[]; $x = vec[];"
));
let res = build_program(hack_stmts!(
"$x = vec[] as \\HH\\vec<_>; $x = vec[] as \\HH\\vec<_>;"
));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_simplify_hint2() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!(
"let $x:C<\\HH\\vec<\\HH\\int>> = vec[]; $x = vec[];"
));
let res = build_program(hack_stmts!(
"$x = vec[] as C<\\HH\\vec<_>>; $x = vec[] as C<\\HH\\vec<_>>;"
));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_simplify_hint3() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!("let $x:C<T> = vec[]; $x = vec[];"));
let res = build_program(hack_stmts!("$x = vec[] as C<T>; $x = vec[] as C<T>;"));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_simplify_hint4() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!("let $x:C<T> = vec[]; $x = vec[];"));
let res = build_program(hack_stmts!("$x = vec[] as C<_>; $x = vec[] as C<_>;"));
let mut tl = TypedLocal {
should_elab: true,
..Default::default()
};
tl.erased_generics.insert("T".to_string());
tl.visit_program(&mut env, &mut orig).unwrap();
assert_eq!(orig, res);
}
#[test]
fn test_list1() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!("let $x:int = 1; list($x, $y) = vec[1, 2];"));
let res = build_program(hack_stmts!(
"$x = 1 as int; {list($x, $y) = vec[1, 2]; $x as int;}"
));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_list2() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!(
"let $x:int = 1; let $y: int = 1; list($x, list ($z, $y)) = vec[1, vec[2, 3]];"
));
let res = build_program(hack_stmts!(
"$x = 1 as int; $y = 1as int; {list($x, list ($z, $y)) = vec[1, vec[2, 3]]; $x as int; $y as int;}"
));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_array1() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!("let $x:\\HH\\vec<int> = vec[1]; $x[0] = 1;"));
let res = build_program(hack_stmts!("$x = vec[1] as \\HH\\vec<_>; $x[0] = 1;"));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_array2() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!("let $x:t = vec[1]; $x[0] = 1;"));
let res = build_program(hack_stmts!("$x = vec[1] as t; {$x[0] = 1; $x as t;}"));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_array3() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!("let $x:t = vec[1]; $x[] = 1;"));
let res = build_program(hack_stmts!("$x = vec[1] as t; $x[] = 1;"));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_array4() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!("let $x:t = vec[1]; $x[0][] = 1;"));
let res = build_program(hack_stmts!("$x = vec[1] as t; $x[0][] = 1;"));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_foreach1() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!("let $x:int = 1; foreach (e as $x) { }"));
let res = build_program(hack_stmts!(
"$x = 1 as int; foreach (e as $x) { $x as int; }"
));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_foreach2() {
let mut env = Env::default();
let mut orig = build_program(hack_stmts!(
"let $x:t = 1; let $y:t = 1; foreach (e as $x[0] => $y[0]) { 1; }"
));
let res = build_program(hack_stmts!(
"$x = 1 as t; $y = 1 as t; foreach (e as $x[0] => $y[0]) { $x as t; $y as t; 1;}"
));
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
#[test]
fn test_eqop() {
let mut env = Env::default();
// hack_stmts! macro brolen on += operator
let mut orig = build_program(vec![
Stmt(
Pos::NONE,
Stmt_::DeclareLocal(Box::new((
Lid(Pos::NONE, (0, "$x".to_string())),
Hint(
Pos::NONE,
Box::new(Hint_::Happly(nast::Id(Pos::NONE, "t".to_string()), vec![])),
),
Some(Expr((), Pos::NONE, Expr_::Int("1".to_string()))),
))),
),
Stmt(
Pos::NONE,
Stmt_::Expr(Box::new(Expr(
(),
Pos::NONE,
Expr_::Binop(Box::new(Binop {
bop: Bop::Eq(Some(Box::new(Bop::Plus))),
lhs: Expr(
(),
Pos::NONE,
Expr_::Lvar(Box::new(Lid(Pos::NONE, (0, "$x".to_string())))),
),
rhs: Expr((), Pos::NONE, Expr_::Int("1".to_string())),
})),
))),
),
]);
let res = build_program(vec![
Stmt(
Pos::NONE,
Stmt_::Expr(Box::new(Expr(
(),
Pos::NONE,
Expr_::Binop(Box::new(Binop {
bop: Bop::Eq(None),
lhs: Expr(
(),
Pos::NONE,
Expr_::Lvar(Box::new(Lid(Pos::NONE, (0, "$x".to_string())))),
),
rhs: Expr(
(),
Pos::NONE,
Expr_::As(Box::new((
Expr((), Pos::NONE, Expr_::Int("1".to_string())),
Hint(
Pos::NONE,
Box::new(Hint_::Happly(
nast::Id(Pos::NONE, "t".to_string()),
vec![],
)),
),
false,
))),
),
})),
))),
),
Stmt(
Pos::NONE,
Stmt_::Block(Block(vec![
Stmt(
Pos::NONE,
Stmt_::Expr(Box::new(Expr(
(),
Pos::NONE,
Expr_::Binop(Box::new(Binop {
bop: Bop::Eq(Some(Box::new(Bop::Plus))),
lhs: Expr(
(),
Pos::NONE,
Expr_::Lvar(Box::new(Lid(Pos::NONE, (0, "$x".to_string())))),
),
rhs: Expr((), Pos::NONE, Expr_::Int("1".to_string())),
})),
))),
),
Stmt(
Pos::NONE,
Stmt_::Expr(Box::new(Expr(
(),
Pos::NONE,
Expr_::As(Box::new((
Expr(
(),
Pos::NONE,
Expr_::Lvar(Box::new(Lid(Pos::NONE, (0, "$x".to_string())))),
),
Hint(
Pos::NONE,
Box::new(Hint_::Happly(
nast::Id(Pos::NONE, "t".to_string()),
vec![],
)),
),
false,
))),
))),
),
])),
),
]);
self::elaborate_program(&mut env, &mut orig, true);
assert_eq!(orig, res);
}
} |
Rust | hhvm/hphp/hack/src/elab/passes/elab_as_expr.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use nast::AsExpr;
use nast::Expr;
use nast::Expr_;
use nast::Lid;
use oxidized::local_id;
use crate::prelude::*;
#[derive(Copy, Clone, Default)]
pub struct ElabAsExprPass;
impl Pass for ElabAsExprPass {
fn on_ty_as_expr_bottom_up(&mut self, env: &Env, elem: &mut AsExpr) -> ControlFlow<()> {
match elem {
AsExpr::AsV(e) | AsExpr::AwaitAsV(_, e) => elab_value(env, e),
AsExpr::AsKv(ek, ev) | AsExpr::AwaitAsKv(_, ek, ev) => {
elab_key(env, ek);
elab_value(env, ev);
}
}
Continue(())
}
}
fn elab_value(env: &Env, expr: &mut Expr) {
let Expr(_, pos, expr_) = expr;
if matches!(expr_, Expr_::Id(..)) {
env.emit_error(NamingError::ExpectedVariable(pos.clone()));
*expr_ = Expr_::Lvar(Box::new(Lid(
pos.clone(),
local_id::make_unscoped("__internal_placeholder"),
)))
}
}
fn elab_key(env: &Env, expr: &mut Expr) {
let Expr(_, pos, expr_) = expr;
match expr_ {
Expr_::Lvar(..) | Expr_::Lplaceholder(..) => (),
_ => {
env.emit_error(NamingError::ExpectedVariable(pos.clone()));
*expr_ = Expr_::Lvar(Box::new(Lid(
pos.clone(),
local_id::make_unscoped("__internal_placeholder"),
)))
}
}
}
#[cfg(test)]
mod tests {
use nast::Id;
use nast::Pos;
use super::*;
#[test]
fn test_value_invalid() {
let env = Env::default();
let mut pass = ElabAsExprPass;
let mut elem = AsExpr::AsV(Expr(
(),
Pos::default(),
Expr_::Id(Box::new(Id(Pos::default(), String::default()))),
));
elem.transform(&env, &mut pass);
assert!(matches!(
env.into_errors().as_slice(),
[NamingPhaseError::Naming(NamingError::ExpectedVariable(..))]
));
assert!(match elem {
AsExpr::AsV(Expr(_, _, Expr_::Lvar(box Lid(_, lid)))) =>
lid.1 == "__internal_placeholder",
_ => false,
})
}
#[test]
fn test_value_valid() {
let env = Env::default();
let mut pass = ElabAsExprPass;
let mut elem = AsExpr::AsV(elab_utils::expr::null());
elem.transform(&env, &mut pass);
assert!(env.into_errors().is_empty());
assert!(matches!(elem, AsExpr::AsV(Expr(_, _, Expr_::Null))))
}
#[test]
fn test_key_invalid() {
let env = Env::default();
let mut pass = ElabAsExprPass;
let mut elem = AsExpr::AsKv(elab_utils::expr::null(), elab_utils::expr::null());
elem.transform(&env, &mut pass);
assert!(matches!(
env.into_errors().as_slice(),
[NamingPhaseError::Naming(NamingError::ExpectedVariable(..))]
));
assert!(match elem {
AsExpr::AsKv(Expr(_, _, Expr_::Lvar(box Lid(_, lid))), Expr(_, _, Expr_::Null)) =>
lid.1 == "__internal_placeholder",
_ => false,
})
}
#[test]
fn test_key_valid() {
let env = Env::default();
let mut pass = ElabAsExprPass;
let mut elem = AsExpr::AsKv(
Expr((), Pos::default(), Expr_::Lplaceholder(Box::default())),
elab_utils::expr::null(),
);
elem.transform(&env, &mut pass);
assert!(env.into_errors().is_empty());
assert!(matches!(
elem,
AsExpr::AsKv(Expr(_, _, Expr_::Lplaceholder(..)), Expr(_, _, Expr_::Null))
))
}
} |
Rust | hhvm/hphp/hack/src/elab/passes/elab_block.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::collections::VecDeque;
use nast::Block;
use nast::Stmt;
use nast::Stmt_;
use nast::UsingStmt;
use crate::prelude::*;
#[derive(Clone, Copy, Default)]
pub struct ElabBlockPass;
impl Pass for ElabBlockPass {
fn on_ty_block_top_down(&mut self, _: &Env, elem: &mut Block) -> ControlFlow<()> {
let mut q: VecDeque<_> = elem.drain(0..).collect();
while let Some(Stmt(pos, stmt_)) = q.pop_front() {
match stmt_ {
Stmt_::Block(xs) => xs.into_iter().rev().for_each(|x| q.push_front(x)),
_ => elem.push(Stmt(pos, stmt_)),
}
}
Continue(())
}
fn on_ty_using_stmt_top_down(&mut self, _: &Env, elem: &mut UsingStmt) -> ControlFlow<()> {
elem.is_block_scoped = false;
Continue(())
}
}
#[cfg(test)]
mod tests {
use nast::Block;
use nast::Pos;
use nast::Stmt;
use nast::Stmt_;
use super::*;
#[test]
fn test() {
let env = Env::default();
let mut pass = ElabBlockPass;
let mut elem: Block = Block(vec![Stmt(
Pos::NONE,
Stmt_::Block(Block(vec![
Stmt(Pos::NONE, Stmt_::Noop),
Stmt(
Pos::NONE,
Stmt_::Block(Block(vec![
Stmt(Pos::NONE, Stmt_::Noop),
Stmt(
Pos::NONE,
Stmt_::Block(Block(vec![
Stmt(Pos::NONE, Stmt_::Noop),
Stmt(
Pos::NONE,
Stmt_::Block(Block(vec![
Stmt(Pos::NONE, Stmt_::Noop),
Stmt(
Pos::NONE,
Stmt_::Block(Block(vec![Stmt(Pos::NONE, Stmt_::Noop)])),
),
Stmt(Pos::NONE, Stmt_::Noop),
])),
),
Stmt(Pos::NONE, Stmt_::Noop),
])),
),
Stmt(Pos::NONE, Stmt_::Noop),
])),
),
Stmt(Pos::NONE, Stmt_::Noop),
])),
)]);
elem.transform(&env, &mut pass);
assert_eq!(elem.len(), 9);
assert!(elem.into_iter().all(|s| matches!(s.1, Stmt_::Noop)));
}
} |
Rust | hhvm/hphp/hack/src/elab/passes/elab_class_id.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use nast::ClassId;
use nast::ClassId_;
use nast::Expr;
use nast::Expr_;
use nast::Id;
use nast::Lid;
use nast::Pos;
use nast::Sid;
use oxidized::local_id;
use crate::prelude::*;
#[derive(Clone, Copy, Default)]
pub struct ElabClassIdPass {
in_class: bool,
}
impl Pass for ElabClassIdPass {
/*
The lowerer will give us CIexpr (Id _ | Lvar _ ); here we:
- convert CIexpr(_,_,Id _) to CIparent, CIself, CIstatic and CI.
- convert CIexpr(_,_,Lvar $this) to CIexpr(_,_,This)
If there is a CIexpr with anything other than an Lvar or This after this
elaboration step, it is an error and will be raised in subsequent
validation passes
TODO[mjt] We're defining `on_ty_class_id` rather than `on_ty_class_id_`
since the legacy code mangles positions by using the inner `class_id_`
position in the output `class_id` tuple. This looks to be erroneous.
TODO[mjt] The Lvar(..,this) => This rule is applied during lvar elaboration
so we should probably drop it here
TODO[mjt] Lowering gives us a very specific representation but we don't
enforce this invariant at all here
*/
fn on_ty_class_id_top_down(&mut self, env: &Env, elem: &mut ClassId) -> ControlFlow<()> {
let ClassId(_annot, pos, class_id_) = elem;
if let ClassId_::CIexpr(Expr(_, expr_pos, expr_)) = class_id_ as &mut ClassId_ {
// [mjt] For some reason the legacy code modifies the position of
// the surrounding [ClassId]. This seems wrong and causes a clone
*pos = expr_pos.clone();
match expr_ {
Expr_::Id(sid) => {
// If the id is a special ref to a class, it is only
// valid if we are in a class
let Id(id_pos, cname) = sid as &mut Sid;
if cname == sn::classes::PARENT {
if !self.in_class {
let err_pos = std::mem::replace(id_pos, Pos::NONE);
env.emit_error(NamingError::ParentOutsideClass(err_pos));
let ci_pos = std::mem::replace(expr_pos, Pos::NONE);
*class_id_ = ClassId_::CI(Id(ci_pos, sn::classes::UNKNOWN.to_string()))
} else {
*class_id_ = ClassId_::CIparent
}
} else if cname == sn::classes::SELF {
if !self.in_class {
let err_pos = std::mem::replace(id_pos, Pos::NONE);
env.emit_error(NamingError::SelfOutsideClass(err_pos));
let ci_pos = std::mem::replace(expr_pos, Pos::NONE);
*class_id_ = ClassId_::CI(Id(ci_pos, sn::classes::UNKNOWN.to_string()))
} else {
*class_id_ = ClassId_::CIself
}
} else if cname == sn::classes::STATIC {
if !self.in_class {
let err_pos = std::mem::replace(id_pos, Pos::NONE);
env.emit_error(NamingError::StaticOutsideClass(err_pos));
let ci_pos = std::mem::replace(expr_pos, Pos::NONE);
*class_id_ = ClassId_::CI(Id(ci_pos, sn::classes::UNKNOWN.to_string()))
} else {
*class_id_ = ClassId_::CIstatic
}
} else {
// Otherwise, replace occurrences of CIexpr(_,_,Id(..))
// with CI(..)
let ci_pos = std::mem::replace(expr_pos, Pos::NONE);
let ci_name = std::mem::take(cname);
*class_id_ = ClassId_::CI(Id(ci_pos, ci_name))
}
Continue(())
}
Expr_::Lvar(lid) => {
// Convert Lvar(this) => this; note that this overlaps
// with lvar elaboration
let Lid(_lid_pos, lcl_id) = &**lid;
if local_id::get_name(lcl_id) == sn::special_idents::THIS {
*expr_ = Expr_::This
}
Continue(())
}
_ => Continue(()),
}
} else {
// We only ever expect a `CIexpr(..)` to come from lowering.
// We should change the lowered AST repr to make this impossible.
Continue(())
}
}
fn on_ty_class__top_down(&mut self, _: &Env, _: &mut nast::Class_) -> ControlFlow<()> {
self.in_class = true;
Continue(())
}
/* The attributes applied to a class exist outside the current class so
references to `self` are invalid */
fn on_fld_class__user_attributes_top_down(
&mut self,
_: &Env,
_: &mut nast::UserAttributes,
) -> ControlFlow<()> {
self.in_class = false;
Continue(())
}
}
#[cfg(test)]
mod tests {
use super::*;
// Elaboration of CIexpr(..,..,Id(..,..)) when the id refers to a class
#[test]
fn test_ciexpr_id_class_ref() {
let env = Env::default();
let mut pass = ElabClassIdPass::default();
let cases = vec![
(sn::classes::SELF, ClassId_::CIself),
(sn::classes::PARENT, ClassId_::CIparent),
(sn::classes::STATIC, ClassId_::CIstatic),
];
for (cname, repr) in cases {
let mut elem_outside = ClassId(
(),
Pos::NONE,
ClassId_::CIexpr(Expr(
(),
Pos::NONE,
Expr_::Id(Box::new(Id(Pos::NONE, cname.to_string()))),
)),
);
let mut elem_inside = elem_outside.clone();
// transforming when outside a class
// expect CI(Id(.., UNKNOWN))
pass.in_class = false;
elem_outside.transform(&env, &mut pass);
let ClassId(_, _, class_id_) = elem_outside;
assert!(match class_id_ {
ClassId_::CI(Id(_, nm)) => nm == sn::classes::UNKNOWN,
_ => false,
});
// transforming when inside a class
// expect
pass.in_class = true;
elem_inside.transform(&env, &mut pass);
let ClassId(_, _, class_id_) = elem_inside;
assert_eq!(class_id_, repr)
}
}
// Elaboration of CIexpr(..,..,Id(..,..)) when the id does not refer
// to a class
#[test]
fn test_ciexpr_id_non_class_ref() {
let env = Env::default();
let mut pass = ElabClassIdPass::default();
let cname = "Classy";
let mut elem_outside = ClassId(
(),
Pos::NONE,
ClassId_::CIexpr(Expr(
(),
Pos::NONE,
Expr_::Id(Box::new(Id(Pos::NONE, cname.to_string()))),
)),
);
let mut elem_inside = elem_outside.clone();
// transforming when outside a class
// expect CI(Id(.., cname))
pass.in_class = false;
elem_outside.transform(&env, &mut pass);
let ClassId(_, _, class_id_) = elem_outside;
assert!(match class_id_ {
ClassId_::CI(Id(_, nm)) => nm == cname,
_ => false,
});
// transforming when inside a class
// expect CI(Id(.., cname))
pass.in_class = true;
elem_inside.transform(&env, &mut pass);
let ClassId(_, _, class_id_) = elem_inside;
assert!(match class_id_ {
ClassId_::CI(Id(_, nm)) => nm == cname,
_ => false,
});
}
// Elaboration of CIexpr(..,..,Lvar(..,this)) => CIexpr(..,..,This)
#[test]
fn test_ciexpr_lvar_this() {
let env = Env::default();
let mut pass = ElabClassIdPass::default();
let mut elem_outside = ClassId(
(),
Pos::NONE,
ClassId_::CIexpr(Expr(
(),
Pos::NONE,
Expr_::Lvar(Box::new(Lid(
Pos::NONE,
local_id::make_unscoped(sn::special_idents::THIS),
))),
)),
);
let mut elem_inside = elem_outside.clone();
// transforming when outside a class
// expect CIexpr(_,_,This)
pass.in_class = false;
elem_outside.transform(&env, &mut pass);
let ClassId(_, _, class_id_) = elem_outside;
assert!(matches!(
class_id_,
ClassId_::CIexpr(Expr(_, _, Expr_::This))
));
// transforming when inside a class
// expect
pass.in_class = true;
elem_inside.transform(&env, &mut pass);
let ClassId(_, _, class_id_) = elem_inside;
assert!(matches!(
class_id_,
ClassId_::CIexpr(Expr(_, _, Expr_::This))
));
}
// Elaboration of CIexpr(..,..,expr_)
// for any expression other than `Id` or `Lvar(_,this)`, we expect the
// elaborated ClassId_ to still have the same Expr_
// Note[mjt]: in practice, I think we only ever see `Id` and `Lvar` expressions
// in this position
#[test]
fn test_ciexpr_fallthrough() {
let env = Env::default();
let mut pass = ElabClassIdPass::default();
let exprs_ = vec![
Expr_::Lvar(Box::new(Lid(
Pos::NONE,
local_id::make_unscoped("wut".to_string()),
))),
Expr_::Null,
];
for expr_ in exprs_ {
let mut elem_outside = ClassId(
(),
Pos::NONE,
ClassId_::CIexpr(Expr((), Pos::NONE, expr_.clone())),
);
let mut elem_inside = elem_outside.clone();
pass.in_class = false;
elem_outside.transform(&env, &mut pass);
let ClassId(_, _, class_id_) = elem_outside;
assert!(match class_id_ {
ClassId_::CIexpr(Expr(_, _, ci_expr_)) => ci_expr_ == expr_,
_ => false,
});
pass.in_class = true;
elem_inside.transform(&env, &mut pass);
let ClassId(_, _, class_id_) = elem_inside;
assert!(match class_id_ {
ClassId_::CIexpr(Expr(_, _, ci_expr_)) => ci_expr_ == expr_,
_ => false,
})
}
}
} |
Rust | hhvm/hphp/hack/src/elab/passes/elab_class_vars.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use nast::ClassVar;
use nast::Class_;
use nast::ClassishKind;
use nast::Expr;
use nast::Expr_;
use nast::Hint;
use nast::Hint_;
use nast::Id;
use nast::Tprim;
use nast::TypeHint;
use nast::UserAttribute;
use nast::XhpAttr;
use nast::XhpAttrInfo;
use crate::prelude::*;
#[derive(Copy, Clone, Default)]
pub struct ElabClassVarsPass;
impl Pass for ElabClassVarsPass {
// TODO[mjt] split out elaboration of `XhpAttr`s?
fn on_ty_class__top_down(&mut self, env: &Env, elem: &mut Class_) -> ControlFlow<()> {
let const_user_attr_opt = elem
.user_attributes
.iter()
.find(|ua| ua.name.1 == sn::user_attributes::CONST);
// Modify static and instance props
elem.vars.iter_mut().for_each(|var| {
// apply to both static and instance props
let Id(_, nm) = &var.id;
if nm.starts_with(':') {
var.xhp_attr = Some(XhpAttrInfo {
like: None,
tag: None,
enum_values: vec![],
})
} else {
var.xhp_attr = None
}
// For instance props only, add CONST user attr const is the class is has it
if !var.is_static {
if let Some(ua) = const_user_attr_opt &&
!var.user_attributes.iter().any(|ua| ua.name.1 == sn::user_attributes::CONST) {
var.user_attributes.push(UserAttribute {
name: ua.name.clone(),
params: vec![],
});
}
}
});
// Represent xhp_attrs as vars
elem.xhp_attrs
.drain(0..)
.for_each(|xhp_attr| elem.vars.push(class_var_of_xhp_attr(xhp_attr, env)));
// If this is an interface mark all methods as abstract
if matches!(elem.kind, ClassishKind::Cinterface) {
elem.methods.iter_mut().for_each(|m| m.abstract_ = true)
}
Continue(())
}
}
// Convert an `XhpAttr` into a `ClassVar`
// The `XhpAttr` is represented by a 4-tuple of
// 1) a `TypeHint` which is a position paired with an optional `Hint`
// 2) a `ClassVar`
// 3) an `XhpAttrTag` which is either `Required` or `LateInit`
// 4) a position paired with a vector of `Expr`, which corresponds to an
// attribute declared with `enum {...}` rather than an explicit hint
// To represent as a `ClassVar`, we use the provided `ClassVar` then
// - clear the user attributes
// - update the `ClassVar` `XhpAttrInfo` `tag` field with the `XhpAttrTag`
// - update the `ClassVar` `type_` field using either the provided `TypeHint`
// (when it is defined) or a hint inferred from the `enum` `Expr`s when it is
// not
//
// TODO[mjt] This elaboration step is quite involved and seemingly has no docs
// The representation of `XhpAttr` doesn't help here:
// i) we can represent an attribute having neither an explicit hint or an enum declaration
// ii) the list of `Expr` can actually only be int or string literals
// iii) `ClassVar` `XhpAttrInfo` `enum_values` already contains a validated and restricted
// representation of the `Expr`s
fn class_var_of_xhp_attr(xhp_attr: XhpAttr, env: &Env) -> ClassVar {
let XhpAttr(type_hint, mut cv, xhp_attr_tag_opt, enum_opt) = xhp_attr;
let is_required = xhp_attr_tag_opt.is_some();
let has_default = if let Some(Expr(_, _, expr_)) = &cv.expr {
!matches!(expr_, Expr_::Null)
} else {
false
};
if let Some(xhp_attr) = &mut cv.xhp_attr {
xhp_attr.tag = xhp_attr_tag_opt
}
cv.user_attributes.clear();
let TypeHint(type_hint_pos, type_hint_hint_opt) = type_hint;
// If we have `enum_opt`, a list of expressions, try and build a hint based on the
// occurrence of expression literals
// If we dont, use the (optional) hint within `type_hint`
let mut hint_opt: Option<Hint> = enum_opt
.map(|(pos, items)| Hint(pos, Box::new(xhp_attr_hint(items))))
.or(type_hint_hint_opt);
// Now examine the hint, if we have it, removing any `Hlike`
// - if we have an `Hoption` but `is_required` is true, raise and error
// - if we have mixed, do nothing
// - if we have `is_required` or `has_default` do nothing
// - otherwise, wrap the hint in an `Hoption`
if let Some(hint) = &mut hint_opt {
// Swap out the hint_
let hint_ = std::mem::replace(&mut *hint.1, Hint_::Herr);
match strip_like(&hint_) {
// If we have an `Hoption` but `is_required` is true, raise an
// error and put back the `Hint_`
Hint_::Hoption(_) if is_required => {
let Id(_, attr_name) = &cv.id;
env.emit_error(NamingError::XhpOptionalRequiredAttr {
pos: hint.0.clone(),
attr_name: attr_name.clone(),
});
*hint.1 = hint_
}
// If the hint is `Hmixed` or we have either `is_required` or
// `has_default`, just put back the `Hint_`
Hint_::Hmixed => *hint.1 = hint_,
_ if is_required || has_default => *hint.1 = hint_,
// Otherwise, wrap the hint in `Hoption`
_ => *hint.1 = Hint_::Hoption(Hint(hint.0.clone(), Box::new(hint_))),
}
}
// Finally, map our optional hint and the optional position from `cv`s
// `xhp_hint`
// If both are present wrap the hint in an `Hlike` using the position;
// raise an error if like hints aren't enabled
if !env.like_type_hints_enabled() {
if let Some((pos, Hint(_, hint_))) = cv
.xhp_attr
.as_ref()
.and_then(|xai| xai.like.as_ref())
.zip(hint_opt.as_ref())
{
if matches!(hint_ as &Hint_, Hint_::Hlike(_)) {
env.emit_error(ExperimentalFeature::LikeType(pos.clone()))
}
}
}
cv.type_ = TypeHint(type_hint_pos, hint_opt);
cv
}
#[derive(Copy, Clone)]
enum XhpHint {
Neither,
Int,
String,
Both,
}
// If we have a like `Hint_` return a reference to the inner `Hint_` otherwise
// return the reference to the outer hint
fn strip_like(hint_: &Hint_) -> &Hint_ {
if let Hint_::Hlike(Hint(_, inner_hint_)) = hint_ {
inner_hint_
} else {
hint_
}
}
impl XhpHint {
pub fn combine(self, other: Self) -> ControlFlow<Self, Self> {
match (self, other) {
(Self::Both, _) => Break(self),
(_, Self::Both) => Break(other),
(Self::String, Self::Int) | (Self::Int, Self::String) => Break(Self::Both),
(Self::Neither, _) => Continue(other),
(_, Self::Neither) => Continue(self),
(Self::Int, Self::Int) | (Self::String, Self::String) => Continue(self),
}
}
pub fn to_hint_(self) -> Hint_ {
match self {
XhpHint::Int => Hint_::Hprim(Tprim::Tint),
XhpHint::String => Hint_::Hprim(Tprim::Tstring),
_ => Hint_::Hmixed,
}
}
}
// TODO[mjt] This function seems a little odd; we are folding over the
// expressions and short-circuiting when have seen both an `Int` and
// `String` / `String2` expression. At that point we say the hint should
// be `Hmixed`. It isn't clear why (1) we are doing a limited version of
// inference and (2) why we aren't inferring `Hprim Tarraykey` in this case
fn xhp_attr_hint(items: Vec<Expr>) -> Hint_ {
match items
.into_iter()
.try_fold(XhpHint::Neither, |acc, Expr(_, _, expr_)| match expr_ {
Expr_::Int(_) => acc.combine(XhpHint::Int),
Expr_::String(_) | Expr_::String2(_) => acc.combine(XhpHint::String),
_ => Continue(acc),
}) {
Continue(xhp_hint) | Break(xhp_hint) => xhp_hint.to_hint_(),
}
} |
Rust | hhvm/hphp/hack/src/elab/passes/elab_const_expr.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use bitflags::bitflags;
use nast::Binop;
use nast::Bop;
use nast::CallExpr;
use nast::ClassConstKind;
use nast::ClassId;
use nast::ClassId_;
use nast::Class_;
use nast::ClassishKind;
use nast::Expr;
use nast::Expr_;
use nast::FunDef;
use nast::Gconst;
use nast::Hint;
use nast::Hint_;
use nast::KvcKind;
use nast::ModuleDef;
use nast::Typedef;
use nast::Uop;
use nast::VcKind;
use crate::prelude::*;
#[derive(Copy, Clone)]
pub struct ElabConstExprPass {
mode: file_info::Mode,
flags: Flags,
}
bitflags! {
#[derive(Default)]
struct Flags: u8 {
const IN_ENUM_CLASS = 1 << 0;
const ENFORCE_CONST_EXPR = 1 << 1;
}
}
impl ElabConstExprPass {
pub fn in_enum_class(&self) -> bool {
self.flags.contains(Flags::IN_ENUM_CLASS)
}
pub fn set_in_enum_class(&mut self, value: bool) {
self.flags.set(Flags::IN_ENUM_CLASS, value)
}
pub fn enforce_const_expr(&self) -> bool {
self.flags.contains(Flags::ENFORCE_CONST_EXPR)
}
pub fn set_enforce_const_expr(&mut self, value: bool) {
self.flags.set(Flags::ENFORCE_CONST_EXPR, value)
}
}
impl Default for ElabConstExprPass {
fn default() -> Self {
ElabConstExprPass {
mode: file_info::Mode::Mstrict,
flags: Flags::default(),
}
}
}
impl Pass for ElabConstExprPass {
// We can determine that certain expressions are invalid based on one-level
// pattern matching. We prefer to do this since we can stop the transformation
// early in these cases. For cases where we need to pattern match on the
// expression more deeply, we use the bottom-up pass
fn on_ty_expr_top_down(&mut self, env: &Env, elem: &mut Expr) -> ControlFlow<()> {
if !self.enforce_const_expr() {
Continue(())
} else {
let Expr(_, pos, expr_) = elem;
let invalid = |expr_: &mut Expr_| {
let inner_expr_ = std::mem::replace(expr_, Expr_::Null);
let inner_expr = Expr(Default::default(), pos.clone(), inner_expr_);
*expr_ = Expr_::Invalid(Box::new(Some(inner_expr)));
Break(())
};
match &expr_ {
// -- always valid ---------------------------------------------
Expr_::Id(..)
| Expr_::Null
| Expr_::True
| Expr_::False
| Expr_::Int(_)
| Expr_::Float(_)
| Expr_::String(_)
| Expr_::FunctionPointer(..)
| Expr_::Eif(..)
| Expr_::Darray(..)
| Expr_::Varray(..)
| Expr_::Tuple(..)
| Expr_::Shape(..)
| Expr_::Upcast(..) => Continue(()),
// -- markers --------------------------------------------------
Expr_::Invalid(..) | Expr_::Hole(..) => Continue(()),
// -- handled bottom-up ----------------------------------------
Expr_::ClassConst(_) => Continue(()),
// -- conditionally valid --------------------------------------
Expr_::As(box (_, Hint(_, box hint_), _)) => match hint_ {
// NB we can perform this top-down since the all valid hints
// are already in canonical
// TODO[mjt] another example of inconsistency around error positions
Hint_::Happly(id, _) => {
env.emit_error(NamingError::IllegalConstant(id.0.clone()));
invalid(expr_)
}
_ => {
env.emit_error(NamingError::IllegalConstant(pos.clone()));
invalid(expr_)
}
},
Expr_::Unop(box (uop, _)) => match uop {
Uop::Uplus | Uop::Uminus | Uop::Utild | Uop::Unot => Continue(()),
_ => {
env.emit_error(NamingError::IllegalConstant(pos.clone()));
invalid(expr_)
}
},
Expr_::Binop(box Binop { bop, .. }) => match bop {
Bop::Eq(_) => {
env.emit_error(NamingError::IllegalConstant(pos.clone()));
invalid(expr_)
}
_ => Continue(()),
},
Expr_::ValCollection(box ((_, vc_kind), _, _)) => match vc_kind {
VcKind::Vec | VcKind::Keyset => Continue(()),
_ => {
env.emit_error(NamingError::IllegalConstant(pos.clone()));
invalid(expr_)
}
},
Expr_::KeyValCollection(box ((_, kvc_kind), _, _)) => match kvc_kind {
KvcKind::Dict => Continue(()),
_ => {
env.emit_error(NamingError::IllegalConstant(pos.clone()));
invalid(expr_)
}
},
Expr_::Call(box CallExpr {
func: Expr(_, _, call_expr_),
..
}) => match call_expr_ {
Expr_::Id(box id)
if id.name() == sn::std_lib_functions::ARRAY_MARK_LEGACY
|| id.name() == sn::pseudo_functions::UNSAFE_CAST
|| id.name() == sn::pseudo_functions::UNSAFE_NONNULL_CAST =>
{
Continue(())
}
_ => {
env.emit_error(NamingError::IllegalConstant(pos.clone()));
invalid(expr_)
}
},
Expr_::Omitted => {
if matches!(self.mode, file_info::Mode::Mhhi) {
Continue(())
} else {
env.emit_error(NamingError::IllegalConstant(pos.clone()));
invalid(expr_)
}
}
// -- always invalid -------------------------------------------
Expr_::This
| Expr_::Lvar(..)
| Expr_::Lplaceholder(..)
| Expr_::ArrayGet(..)
| Expr_::Await(..)
| Expr_::Cast(..)
| Expr_::ClassGet(..)
| Expr_::Clone(..)
| Expr_::Dollardollar(..)
| Expr_::ETSplice(..)
| Expr_::Efun(..)
| Expr_::EnumClassLabel(..)
| Expr_::ExpressionTree(..)
| Expr_::Is(..)
| Expr_::Lfun(..)
| Expr_::List(..)
| Expr_::MethodCaller(..)
| Expr_::New(..)
| Expr_::ObjGet(..)
| Expr_::Pair(..)
| Expr_::Pipe(..)
| Expr_::PrefixedString(..)
| Expr_::ReadonlyExpr(..)
| Expr_::String2(..)
| Expr_::Yield(..)
| Expr_::Xml(..)
| Expr_::Package(..) => {
env.emit_error(NamingError::IllegalConstant(pos.clone()));
invalid(expr_)
}
// -- unexpected expressions -----------------------------------
Expr_::Import(..) | Expr_::Collection(..) => panic!("Unexpected Expr"),
}
}
}
// Handle non-constant expressions which require pattern matching on some
// element of the expression which is not yet transformed in the top-down pass
fn on_ty_expr_bottom_up(&mut self, env: &Env, elem: &mut Expr) -> ControlFlow<()> {
if !self.enforce_const_expr() {
Continue(())
} else {
let Expr(_, pos, expr_) = elem;
let invalid = |expr_: &mut Expr_| {
let inner_expr_ = std::mem::replace(expr_, Expr_::Null);
let inner_expr = Expr(Default::default(), pos.clone(), inner_expr_);
*expr_ = Expr_::Invalid(Box::new(Some(inner_expr)));
Break(())
};
match expr_ {
Expr_::ClassConst(box (ClassId(_, _, class_id_), _)) => match class_id_ {
ClassId_::CIstatic => {
env.emit_error(NamingError::IllegalConstant(pos.clone()));
invalid(expr_)
}
ClassId_::CIparent | ClassId_::CIself | ClassId_::CI(..) => Continue(()),
ClassId_::CIexpr(Expr(_, _, expr_)) => match expr_ {
Expr_::This | Expr_::Id(..) => Continue(()),
_ => {
env.emit_error(NamingError::IllegalConstant(pos.clone()));
invalid(expr_)
}
},
},
_ => Continue(()),
}
}
}
fn on_ty_class__top_down(&mut self, _: &Env, elem: &mut Class_) -> ControlFlow<()> {
self.set_in_enum_class(match elem.kind {
ClassishKind::CenumClass(_) => true,
ClassishKind::Cclass(..)
| ClassishKind::Cinterface
| ClassishKind::Cenum
| ClassishKind::Ctrait => false,
});
self.mode = elem.mode;
Continue(())
}
fn on_ty_class_const_kind_top_down(
&mut self,
_: &Env,
elem: &mut ClassConstKind,
) -> ControlFlow<()> {
self.set_enforce_const_expr(
!self.in_enum_class() && matches!(elem, ClassConstKind::CCConcrete(_)),
);
Continue(())
}
fn on_ty_typedef_top_down(&mut self, _: &Env, elem: &mut Typedef) -> ControlFlow<()> {
self.mode = elem.mode;
Continue(())
}
fn on_ty_gconst_top_down(&mut self, _: &Env, elem: &mut Gconst) -> ControlFlow<()> {
self.mode = elem.mode;
self.set_enforce_const_expr(true);
Continue(())
}
fn on_ty_fun_def_top_down(&mut self, _: &Env, elem: &mut FunDef) -> ControlFlow<()> {
self.mode = elem.mode;
Continue(())
}
fn on_ty_module_def_top_down(&mut self, _: &Env, elem: &mut ModuleDef) -> ControlFlow<()> {
self.mode = elem.mode;
Continue(())
}
} |
Rust | hhvm/hphp/hack/src/elab/passes/elab_defs.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::collections::VecDeque;
use nast::Def;
use nast::Program;
use nast::Stmt;
use nast::Stmt_;
use crate::prelude::*;
#[derive(Clone, Copy, Default)]
pub struct ElabDefsPass;
impl Pass for ElabDefsPass {
fn on_ty_program_top_down(&mut self, _: &Env, elem: &mut Program) -> ControlFlow<()> {
let Program(defs) = elem;
let mut q: VecDeque<_> = defs.drain(0..).collect();
while let Some(e) = q.pop_front() {
match e {
// Flatten nested namespaces; prepend the elements onto our
// queue and carry on
Def::Namespace(ns) => {
let (_, ns_defs) = *ns;
ns_defs.into_iter().rev().for_each(|x| q.push_front(x))
}
// Remove the following top-level definitions
Def::FileAttributes(_) | Def::NamespaceUse(_) | Def::SetNamespaceEnv(_) => (),
// Retain the following top-level definitions
Def::Fun(_)
| Def::Class(_)
| Def::Typedef(_)
| Def::Constant(_)
| Def::Module(_)
| Def::SetModule(_) => defs.push(e),
// Retain all non [Noop] and [Markup] top-level statements
// note that these statements may still appear in non-top-level
// positions
Def::Stmt(ref stmt) => {
let Stmt(_, stmt_) = &**stmt;
match &stmt_ {
Stmt_::Noop => (),
Stmt_::Markup(_) => (),
_ => defs.push(e),
}
}
}
}
Continue(())
}
}
#[cfg(test)]
mod tests {
use nast::Def;
use nast::Id;
use nast::Pos;
use nast::Program;
use nast::Stmt;
use nast::Stmt_;
use super::*;
#[test]
fn test() {
let env = Env::default();
let mut elem = Program(vec![
Def::Stmt(Box::new(Stmt(Pos::NONE, Stmt_::Break))),
Def::NamespaceUse(Vec::default()),
Def::Stmt(Box::new(Stmt(Pos::NONE, Stmt_::Noop))),
Def::Namespace(Box::new((
Id(Pos::NONE, String::default()),
vec![
Def::NamespaceUse(Vec::default()),
Def::Stmt(Box::new(Stmt(Pos::NONE, Stmt_::Fallthrough))),
Def::Stmt(Box::new(Stmt(Pos::NONE, Stmt_::Noop))),
Def::Namespace(Box::new((
Id(Pos::NONE, String::default()),
vec![
Def::Stmt(Box::new(Stmt(Pos::NONE, Stmt_::Break))),
Def::NamespaceUse(Vec::default()),
Def::Stmt(Box::new(Stmt(Pos::NONE, Stmt_::Noop))),
],
))),
],
))),
Def::Stmt(Box::new(Stmt(Pos::NONE, Stmt_::Fallthrough))),
Def::NamespaceUse(Vec::default()),
Def::Stmt(Box::new(Stmt(Pos::NONE, Stmt_::Noop))),
]);
let mut pass = ElabDefsPass;
elem.transform(&env, &mut pass);
// Given our initial program:
//
// [ Break
// ; NamespaceUse(..)
// ; Noop
// ; Namespace(..,
// [ NamespaceUse(..)
// ; Fallthrough
// ; Noop
// ; Namespace(..,
// [ Break
// ; NamespaceUse(..)
// ; Noop
// ]
// )
// ]
// )
// ; Fallthrough
// ; NamespaceUse(..)
// ; Noop
// ]
//
// We expect the transformed program:
//
// [ Break
// ; Fallthrough
// ; Break
// ; Fallthrough
// ]
assert_eq!(elem.0.len(), 4);
let mut q = VecDeque::from(elem.0);
// First def is Break
assert!(matches!(
q.pop_front(),
Some(Def::Stmt(box Stmt(_, Stmt_::Break)))
));
// Second def is Fallthrough
assert!(matches!(
q.pop_front(),
Some(Def::Stmt(box Stmt(_, Stmt_::Fallthrough)))
));
// Third def is Break
assert!(matches!(
q.pop_front(),
Some(Def::Stmt(box Stmt(_, Stmt_::Break)))
));
// Last def is Fallthrough
assert!(matches!(
q.pop_front(),
Some(Def::Stmt(box Stmt(_, Stmt_::Fallthrough)))
));
}
} |
Rust | hhvm/hphp/hack/src/elab/passes/elab_dynamic_class_name.rs | // Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use nast::ClassGetExpr;
use nast::ClassId;
use nast::ClassId_;
use nast::Expr;
use nast::Expr_;
use nast::FunctionPtrId;
use nast::Id;
use nast::PropOrMethod;
use crate::prelude::*;
#[derive(Copy, Clone, Default)]
pub struct ElabDynamicClassNamePass;
impl Pass for ElabDynamicClassNamePass {
fn on_ty_expr_bottom_up(&mut self, env: &Env, elem: &mut Expr) -> ControlFlow<()> {
let invalid = |expr_: &mut Expr_| {
let inner_expr_ = std::mem::replace(expr_, Expr_::Null);
let inner_expr = elab_utils::expr::from_expr__with_pos_(elem.1.clone(), inner_expr_);
*expr_ = Expr_::Invalid(Box::new(Some(inner_expr)));
Break(())
};
match &mut elem.2 {
Expr_::New(box (class_id, _, _, _, _)) if is_dynamic(class_id) => {
env.emit_error(NamingError::DynamicNewInStrictMode(class_id.1.clone()));
class_id.2 = ClassId_::CI(Id(class_id.1.clone(), sn::classes::UNKNOWN.to_string()));
Continue(())
}
Expr_::ClassGet(box (class_id, ClassGetExpr::CGstring(..), _))
if !is_dynamic(class_id) =>
{
Continue(())
}
Expr_::ClassGet(box (
class_id,
ClassGetExpr::CGexpr(cg_expr),
PropOrMethod::IsMethod,
)) if !is_dynamic(class_id) => {
env.emit_error(NamingError::DynamicMethodAccess(cg_expr.1.clone()));
Continue(())
}
Expr_::ClassGet(box (
ClassId(_, _, ClassId_::CIexpr(ci_expr)),
ClassGetExpr::CGstring(..)
| ClassGetExpr::CGexpr(Expr(_, _, Expr_::Lvar(..) | Expr_::This)),
_,
)) => {
env.emit_error(NamingError::DynamicClassNameInStrictMode(ci_expr.1.clone()));
invalid(&mut elem.2)
}
Expr_::ClassGet(box (
ClassId(_, _, ClassId_::CIexpr(ci_expr)),
ClassGetExpr::CGexpr(cg_expr),
_,
)) => {
env.emit_error(NamingError::DynamicClassNameInStrictMode(ci_expr.1.clone()));
env.emit_error(NamingError::DynamicClassNameInStrictMode(cg_expr.1.clone()));
invalid(&mut elem.2)
}
Expr_::ClassGet(box (_, ClassGetExpr::CGexpr(cg_expr), _)) => {
env.emit_error(NamingError::DynamicClassNameInStrictMode(cg_expr.1.clone()));
invalid(&mut elem.2)
}
Expr_::FunctionPointer(box (FunctionPtrId::FPClassConst(class_id, _), _))
if is_dynamic(class_id) =>
{
invalid(&mut elem.2)
}
Expr_::ClassConst(box (class_id, _)) if is_dynamic(class_id) => invalid(&mut elem.2),
_ => Continue(()),
}
}
}
fn is_dynamic(class_id: &ClassId) -> bool {
match &class_id.2 {
ClassId_::CIparent
| ClassId_::CIself
| ClassId_::CIstatic
| ClassId_::CI(..)
| ClassId_::CIexpr(Expr(_, _, Expr_::Lvar(..) | Expr_::This | Expr_::Dollardollar(..))) => {
false
}
ClassId_::CIexpr(_) => true,
}
}
#[cfg(test)]
mod tests {
use nast::Pos;
use super::*;
fn mk_dynamic_class_id() -> ClassId {
ClassId(
(),
Pos::default(),
ClassId_::CIexpr(elab_utils::expr::null()),
)
}
fn mk_non_dynamic_class_id() -> ClassId {
ClassId((), Pos::default(), ClassId_::CIself)
}
// -- in `New` expressions -------------------------------------------------
#[test]
fn test_new_valid() {
let env = Env::default();
let mut pass = ElabDynamicClassNamePass;
let mut elem = Expr(
(),
Pos::default(),
Expr_::New(Box::new((
mk_non_dynamic_class_id(),
Default::default(),
Default::default(),
Default::default(),
Default::default(),
))),
);
elem.transform(&env, &mut pass);
assert!(env.into_errors().is_empty());
let Expr(_, _, expr_) = elem;
assert!(matches!(
expr_,
Expr_::New(box (ClassId(_, _, ClassId_::CIself), _, _, _, _))
));
}
#[test]
fn test_new_invalid() {
let env = Env::default();
let mut pass = ElabDynamicClassNamePass;
let mut elem = Expr(
(),
Pos::default(),
Expr_::New(Box::new((
mk_dynamic_class_id(),
Default::default(),
Default::default(),
Default::default(),
Default::default(),
))),
);
elem.transform(&env, &mut pass);
let err_opt = env.into_errors().pop();
assert!(matches!(
err_opt,
Some(NamingPhaseError::Naming(
NamingError::DynamicNewInStrictMode(..)
))
));
let Expr(_, _, expr_) = elem;
assert!(matches!(
expr_,
Expr_::New(box (ClassId(_, _, ClassId_::CI(..)), _, _, _, _))
))
}
// -- in `FunctionPointer` expressions -------------------------------------
#[test]
fn test_function_pointer_valid() {
let env = Env::default();
let mut pass = ElabDynamicClassNamePass;
let mut elem = Expr(
(),
Pos::default(),
Expr_::FunctionPointer(Box::new((
FunctionPtrId::FPClassConst(mk_non_dynamic_class_id(), Default::default()),
Default::default(),
))),
);
elem.transform(&env, &mut pass);
assert!(env.into_errors().is_empty());
let Expr(_, _, expr_) = elem;
assert!(matches!(
expr_,
Expr_::FunctionPointer(box (
FunctionPtrId::FPClassConst(ClassId(_, _, ClassId_::CIself), _),
_,
))
));
}
#[test]
fn test_function_pointer_invalid() {
let env = Env::default();
let mut pass = ElabDynamicClassNamePass;
let mut elem = Expr(
(),
Pos::default(),
Expr_::FunctionPointer(Box::new((
FunctionPtrId::FPClassConst(mk_dynamic_class_id(), Default::default()),
Default::default(),
))),
);
elem.transform(&env, &mut pass);
assert!(env.into_errors().is_empty());
let Expr(_, _, expr_) = elem;
assert!(matches!(expr_, Expr_::Invalid(_)))
}
// -- in `ClassConst` expressions ------------------------------------------
#[test]
fn test_class_const_valid() {
let env = Env::default();
let mut pass = ElabDynamicClassNamePass;
let mut elem = Expr(
(),
Pos::default(),
Expr_::ClassConst(Box::new((mk_non_dynamic_class_id(), Default::default()))),
);
elem.transform(&env, &mut pass);
assert!(env.into_errors().is_empty());
assert!(matches!(
elem,
Expr(_, _, Expr_::ClassConst(box (ClassId(_, _, ClassId_::CIself), _)))
))
}
#[test]
fn test_class_const_invalid() {
let env = Env::default();
let mut pass = ElabDynamicClassNamePass;
let mut elem = Expr(
(),
Pos::default(),
Expr_::ClassConst(Box::new((mk_dynamic_class_id(), Default::default()))),
);
elem.transform(&env, &mut pass);
assert!(env.into_errors().is_empty());
assert!(matches!(
elem,
Expr(
_,
_,
Expr_::Invalid(box Some(Expr(
_,
_,
Expr_::ClassConst(box (ClassId(_, _, ClassId_::CIexpr(..)), _)),
))),
)
))
}
// -- in `ClassGet` expressions --------------------------------------------
#[test]
fn test_class_get_cg_string_valid() {
let env = Env::default();
let mut pass = ElabDynamicClassNamePass;
let mut elem = Expr(
(),
Pos::default(),
Expr_::ClassGet(Box::new((
mk_non_dynamic_class_id(),
ClassGetExpr::CGstring(Default::default()),
PropOrMethod::IsProp,
))),
);
elem.transform(&env, &mut pass);
assert!(env.into_errors().is_empty());
assert!(matches!(
elem,
Expr(_, _, Expr_::ClassGet(box (ClassId(_, _, ClassId_::CIself), _, _)))
))
}
#[test]
fn test_class_get_cg_string_invalid() {
let env = Env::default();
let mut pass = ElabDynamicClassNamePass;
let mut elem = Expr(
(),
Pos::default(),
Expr_::ClassGet(Box::new((
mk_dynamic_class_id(),
ClassGetExpr::CGstring(Default::default()),
PropOrMethod::IsProp,
))),
);
elem.transform(&env, &mut pass);
let err_opt = env.into_errors().pop();
assert!(matches!(
err_opt,
Some(NamingPhaseError::Naming(
NamingError::DynamicClassNameInStrictMode(..)
))
));
assert!(matches!(elem, Expr(_, _, Expr_::Invalid(_))))
}
#[test]
fn test_class_get_cg_expr_invalid() {
let env = Env::default();
let mut pass = ElabDynamicClassNamePass;
let mut elem = Expr(
(),
Pos::default(),
Expr_::ClassGet(Box::new((
mk_dynamic_class_id(),
ClassGetExpr::CGexpr(elab_utils::expr::null()),
PropOrMethod::IsProp,
))),
);
elem.transform(&env, &mut pass);
assert!(matches!(
env.into_errors().as_slice(),
[
NamingPhaseError::Naming(NamingError::DynamicClassNameInStrictMode(..)),
NamingPhaseError::Naming(NamingError::DynamicClassNameInStrictMode(..))
]
));
assert!(matches!(elem, Expr(_, _, Expr_::Invalid(_))))
}
} |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.