language
stringlengths
0
24
filename
stringlengths
9
214
code
stringlengths
99
9.93M
hhvm/hphp/hack/src/utils/ignore/dune
(rule (target filesToIgnore.fb.ml) (action (copy# facebook/filesToIgnore.ml filesToIgnore.fb.ml))) (library (name ignore) (wrapped false) (libraries core_kernel hh_json str string base core (select filesToIgnore.ml from (facebook -> filesToIgnore.fb.ml) (-> filesToIgnore.stubs.ml))))
OCaml Interface
hhvm/hphp/hack/src/utils/ignore/filesToIgnore.mli
(** This is invoked in serverMain and other entry points, upon reading .hhconfig ignore_paths= directive. *) val ignore_path : Str.regexp -> unit (** FindUtils.file_filter calls [should_ignore], which consults the list of ignore_path regexps, to determine whether hh_server recognizes a given file as part of the project. *) val should_ignore : string -> bool (** [get_paths_to_ignore] retrieves the current global mutable list of ignore paths, in preparation for marshalling it to a different proess. *) val get_paths_to_ignore : unit -> Str.regexp list (** [set_paths_to_ignore] is for when we've unmarshalled a list of ignore paths, and wish to store it in the global mutable list of ingore paths. *) val set_paths_to_ignore : Str.regexp list -> unit val watchman_monitor_expression_terms : Hh_json.json list val watchman_server_expression_terms : Hh_json.json list val watchman_watcher_expression_terms : Hh_json.json list
OCaml
hhvm/hphp/hack/src/utils/ignore/filesToIgnore.stubs.ml
open Core module J = Hh_json_helpers.AdhocJsonHelpers let paths_to_ignore = ref [] let get_paths_to_ignore () = !paths_to_ignore let set_paths_to_ignore x = paths_to_ignore := x let ignore_path regexp = paths_to_ignore := regexp :: !paths_to_ignore let should_ignore path = List.exists ~f:(fun p -> Str.string_match p path 0) !paths_to_ignore (* ignore shell script wrappers generated by Composer (PHP package manager) *) || String.is_prefix path ~prefix:"vendor/bin/" let watchman_monitor_expression_terms = [ J.pred "not" @@ [ J.pred "anyof" @@ [ J.strlist ["name"; ".hg"]; J.strlist ["dirname"; ".hg"]; J.strlist ["name"; ".git"]; J.strlist ["dirname"; ".git"]; J.strlist ["name"; ".svn"]; J.strlist ["dirname"; ".svn"]; ]; ]; ] let hg_dirname = J.strlist ["dirname"; ".hg"] let git_dirname = J.strlist ["dirname"; ".git"] let svn_dirname = J.strlist ["dirname"; ".svn"] let watchman_server_expression_terms = [ J.strlist ["type"; "f"]; J.pred "anyof" @@ [ J.strlist ["name"; ".hhconfig"]; J.pred "anyof" @@ [ J.strlist ["suffix"; "php"]; J.strlist ["suffix"; "phpt"]; J.strlist ["suffix"; "hack"]; J.strlist ["suffix"; "hackpartial"]; J.strlist ["suffix"; "hck"]; J.strlist ["suffix"; "hh"]; J.strlist ["suffix"; "hhi"]; J.strlist ["suffix"; "xhp"]; ]; ]; J.pred "not" @@ [J.pred "anyof" @@ [hg_dirname; git_dirname; svn_dirname]]; ] let watchman_watcher_expression_terms = [J.strlist ["type"; "f"]; J.strlist ["name"; "updatestate"]]
TOML
hhvm/hphp/hack/src/utils/intern/Cargo.toml
# @generated by autocargo [package] name = "intern" version = "0.1.0" edition = "2021" description = "Intern data into a 32-bit id temporarily copied from the relay directory to enable Hack OSS" [dependencies] fnv = "1.0" hashbrown = { version = "0.12.3", features = ["raw", "serde"] } indexmap = { version = "1.9.2", features = ["arbitrary", "rayon", "serde-1"] } once_cell = "1.12" parking_lot = { version = "0.12.1", features = ["send_guard"] } serde = { version = "1.0.176", features = ["derive", "rc"] } serde_bytes = "0.11" serde_derive = "1.0.176" smallvec = { version = "1.6.1", features = ["serde", "union"] } [dev-dependencies] bincode = "1.3.3" rand = { version = "0.8", features = ["small_rng"] } serde_json = { version = "1.0.100", features = ["float_roundtrip", "unbounded_depth"] }
Rust
hhvm/hphp/hack/src/utils/intern/src/atomic_arena.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use std::cell::UnsafeCell; use std::fmt; use std::fmt::Debug; use std::fmt::Display; use std::marker::PhantomData; use std::mem::ManuallyDrop; use std::mem::MaybeUninit; use std::num::NonZeroU32; use std::ptr; use std::ptr::NonNull; use std::sync::atomic::AtomicPtr; use std::sync::atomic::AtomicU32; use std::sync::atomic::Ordering; use parking_lot::Mutex; const MIN_SHIFT: u32 = 7; const U32_BITS: usize = 32; const MIN_SIZE: u32 = 1 << MIN_SHIFT; const NUM_SIZES: usize = U32_BITS - MIN_SHIFT as usize; const MAX_INDEX: u32 = std::u32::MAX - MIN_SIZE; // Memory consistency assertions provide a lot of checking of the internals, // but have a huge runtime cost. Be warned! These are really for active // debugging of atomic_arena itself. macro_rules! memory_consistency_assert { ($($arg:tt)*) => (if cfg!(memory_consistency_assertions) { assert!($($arg)*); }) } macro_rules! memory_consistency_assert_eq { ($($arg:tt)*) => (if cfg!(memory_consistency_assertions) { assert_eq!($($arg)*); }) } /// `AtomicArena<'a, T>` is a nearly-lock-free arena of `T`. It offers O(1) /// lock-free `get` and O(1) nearly lock-free `add`, and can be accessed /// through the `Ref<'a, T>` type which is internally stored as `NonZeroU32`. /// This means that `Ref<...>` and `Option<Ref<...>>` both take 4 bytes. /// /// It is unsafe to get a `Ref` that was not produced by `add` to the same /// `AtomicArena`; the type arguments on `Ref` and `AtomicArena` are intended /// to enforce that separation. /// /// How it works /// /// Internally, `AtomicArena` uses a telescoping array of buckets, arranged in /// reverse order. The last bucket holds the first `MIN_SIZE` objects /// allocated, and each bucket in use holds twice as many objects as the one /// above it. So when `MIN_SIZE` = 128, initially things look like this: /// ```ignore /// next_biased_index: 128 /// buckets: /// +----+ /// | | /// | 0 | null /// +----+ /// | | /// | 1 | null /// +----+ /// : : : /// +----+ /// | | /// | 23 | null /// +----+ /// | | +--+--+....--------------------------+ /// | 24 | --> | | | array of 128 uninitialized T | /// +----+ +--+--+....--------------------------+ /// ``` /// After allocating 537 objects, things look as follows: /// ```ignore /// next_biased_index: 665 = 537 + 128 /// buckets: /// +----+ /// | 0 | null /// +----+ /// | 1 | null /// +----+ /// : : : /// +----+ /// | | Ref 664 /// | 21 | null | /// +----+ v /// | | +--+--+....----------------+--+-------------------------------+ /// | 22 | --> | | | objects 512 -- 663 | | empty entries 665 -- 1023 | /// +----+ +--+--+....----------------+--+-------------------------------+ /// | | +--+--+....-----------------------------------+ /// | 23 | --> | | | objects for Refs 256 -- 511 | /// +----+ +--+--+....-----------------------------------+ /// | | +--+--+....--------------------------+ /// | 24 | --> | | | objects for Refs 128 -- 255 | /// +----+ +--+--+....--------------------------+ /// ^ ^ /// Ref 128 -' `- Ref 129 /// ``` /// /// Note that the first object allocted gets index `MIN_SIZE`, and that we cap /// the extra space allocated for currently empty entries at /// `2 * len() + MIN_SIZE - 1`. /// /// Why are the buckets in reverse order like this? It's for ease of index /// computation in `get()`. In particular, note that the bucket index is equal /// to the number of leading zeros in the `Ref`, and the offset in the bucket /// is the trailing bits of the Ref after clearing the topmost 1 bit. /// We could waste somewhat less storage than this with some more bit /// trickery, but this is currently good enough for our purposes. /// /// In the common case, allocation is simply a matter of atomically /// incrementing `next_biased_index` and initializing the empty entry. When a /// bucket fills (we discover the bucket pointer we fetched is null in /// `slice_for_slot`), we fall back to locking in `slice_for_slot_slow` by /// taking the `bucket_alloc_mutex`. This re-checks the entry in `buckets` and /// allocates a fresh bucket if it's still empty. pub struct AtomicArena<'a, T> { phantom_life: PhantomData<&'a ()>, next_biased_index: AtomicU32, /// buckets in reverse order, starting from the back and working /// forwards. Capacity for bucket i is bucket_capacity(i). buckets: [AtomicPtr<MaybeUninit<T>>; NUM_SIZES], bucket_alloc_mutex: Mutex<()>, } /// `Ref<'a, T>` is a 32-bit nonzero reference to an object of type `T` held in /// the `AtomicArena<'a, T>`. It acts similar to `&'a T` in that it is `Copy` /// and `Clone` regardless of `T` and dereferences via `arena.get(...)` to `&T`. #[derive(PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] // Same repr & ABI as NonZeroU32 & u32. pub struct Ref<'a, T> { phantom: PhantomData<T>, phantom_life: PhantomData<&'a ()>, biased_index: NonZeroU32, } /// `Ref<'a, T>` requires manual implementations of `Clone` and `Copy` since `T` is a /// phantom parameter and not required to support `Clone` or `Copy`. impl<'a, T> Clone for Ref<'a, T> { fn clone(&self) -> Self { *self } } impl<'a, T> Copy for Ref<'a, T> {} impl<'a, T> Ref<'a, T> { /// Gets an index suitable for array indexing out of a `Ref<'a, T>`. /// The resulting index *is* 0-based, and < the corresponding /// `arena.len()`. pub fn index(&self) -> u32 { self.biased_index.get() - MIN_SIZE // Unbias to be 0-based. } /// Re-create ref from biased index. pub unsafe fn from_index(index: u32) -> Self { // OK because MIN_SIZE must be > 0 for algorithmic correctness. Self::from_raw(NonZeroU32::new_unchecked(index + MIN_SIZE)) } /// Internal value of ref; only use this if you know what you're /// doing. Actually `>= MIN_SIZE`. pub(crate) fn raw(&self) -> NonZeroU32 { self.biased_index } /// Re-create ref from raw index. pub unsafe fn from_raw(biased_index: NonZeroU32) -> Self { Ref { phantom: PhantomData, phantom_life: PhantomData, biased_index, } } /// Very unsafe rebranding of one ref type into another. /// Use for serialization / deserialization of refs. pub(crate) unsafe fn rebrand<'b, U>(&self) -> Ref<'b, U> { Ref { phantom: PhantomData, phantom_life: PhantomData, biased_index: self.biased_index, } } } impl<'a, T> Debug for Ref<'a, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "atomic_arena::Ref({:?})", self.biased_index) } } impl<'a, T> Display for Ref<'a, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "#{}", self.index()) } } /// Capacity of bucket at bucket index `a`. fn bucket_capacity(a: usize) -> usize { (1 << 31) >> (a as u32) } /// Transform external biased index `i` into (bucket index, index in bucket) fn index(i: u32) -> (usize, usize) { memory_consistency_assert!(i >= MIN_SIZE); memory_consistency_assert!(i - MIN_SIZE <= MAX_INDEX); memory_consistency_assert!(i as u64 <= std::usize::MAX as u64); let a = i.leading_zeros() as usize; memory_consistency_assert!(a < NUM_SIZES, "{} < {}", a, NUM_SIZES); let b = (i & ((bucket_capacity(0) as u32 - 1) >> a)) as usize; memory_consistency_assert!(b < bucket_capacity(a)); memory_consistency_assert_eq!(i, bucket_capacity(a) as u32 + b as u32); (a, b) } /// A default instance makes it easier to create sharded instances. impl<'a, T> Default for AtomicArena<'a, T> { fn default() -> Self { Self::new() } } impl<'a, T> AtomicArena<'a, T> { pub const fn new() -> Self { AtomicArena { phantom_life: PhantomData, next_biased_index: AtomicU32::new(MIN_SIZE), buckets: [ AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), ], bucket_alloc_mutex: parking_lot::const_mutex(()), } } /// Get pointer to bucket for slot `a`. Normally this just fetches /// the pointer from `self.buckets`, but if this is the first `add` /// it calls `slice_for_slot_slow` to allocate it. #[inline] fn slice_for_slot(&self, a: usize) -> NonNull<MaybeUninit<T>> { if let Some(curr) = NonNull::new(self.buckets[a].load(Ordering::Acquire)) { curr } else { self.slice_for_slot_slow(a) } } /// Try to allocate a new bucket. Ordinarily this will be the /// first `add` attempt to that bucket, but there can be a race to /// create a fresh bucket among multiple `add` threads. fn slice_for_slot_slow(&self, a: usize) -> NonNull<MaybeUninit<T>> { // Take the allocation mutex, and double-check that the bucket still // needs to be allocated. Double-checked locking is fine because the // buckets are `AtomicPtr` with the unlocked read and locked write // as an `Acquire / Release` pair. let lock = self.bucket_alloc_mutex.lock(); // Relaxed load because we know we're competing with prior lock holders now. if let Some(curr) = NonNull::new(self.buckets[a].load(Ordering::Relaxed)) { return curr; } let cap = bucket_capacity(a); // Allocate bucket as vector, then prise it apart since we // only care about capacity and pointer. We use MaybeUninit // because we are tracking slot validity across all buckets // separately using self.size. // https://doc.rust-lang.org/std/vec/struct.Vec.html#guarantees // notes that "Vec::with_capacity(n), will produce a Vec with // exactly the requested capacity". let mut v: ManuallyDrop<Vec<MaybeUninit<T>>> = ManuallyDrop::new(Vec::with_capacity(cap)); let len = v.len(); let acap = v.capacity(); let ptr = v.as_mut_ptr(); // Ensure with_capacity has obeyed its guarantee. memory_consistency_assert!(acap == cap || std::mem::size_of::<T>() == 0); memory_consistency_assert_eq!(len, 0); if let Some(nn_ptr) = NonNull::new(ptr) { self.buckets[a].store(ptr, Ordering::Release); drop(lock); nn_ptr } else { panic!("Vec with non-0 len and null ptr!") } } #[inline] /// Number of allocated objects in the arena as of the time of call. pub fn len(&self) -> usize { (self.next_biased_index.load(Ordering::Relaxed) - MIN_SIZE) as usize } #[inline] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Returns both `Ref` to the added value and the reference that /// would be returned by `get(id)`. Equivalent to: /// ```ignore /// let id = arena.add(element); /// let r = arena.get(id); /// (id, r) /// ``` pub fn add_get(&self, element: T) -> (Ref<'a, T>, &T) { // Atomically obtain an id, thus resolving conflicts among // concurrent add() operations. let s = self.next_biased_index.fetch_add(1, Ordering::Relaxed); // Linearization point for add(). assert!(s >= MIN_SIZE); // Panic on wraparound ( == overflow). let biased_index = NonZeroU32::new(s).unwrap(); // Succeeds after above check. let (a, b) = index(s); let e_ptr = self.slice_for_slot(a).as_ptr(); // This is checked in index_test with monotonicity checks // at boundaries. Note that s and thus (a, b) are unique, // so this is the first and only write to this slot. // // This is also the point where using MaybeUninit is // important. This write will otherwise attempt to Drop // the current (uninitialized) contents of this bucket // entry before writing the new contents. This can yield // a hard-to-debug segfault in the internals of malloc. let e_ptr: *mut MaybeUninit<T> = unsafe { e_ptr.add(b) }; unsafe { *e_ptr = MaybeUninit::new(element); } let e: &T = unsafe { &*(*e_ptr).as_ptr() }; ( Ref { phantom: PhantomData, phantom_life: PhantomData, biased_index, }, e, ) } /// Returns `Ref` to the the added value, which can safely be /// passed to `get`. `self.get(self.add(t)) == &t` pub fn add(&self, element: T) -> Ref<'a, T> { let (biased_index, _) = self.add_get(element); biased_index } /// `get(i)` is safe if, like a pointer we might dereference, `i` is a /// `Ref` returned by `self.add(...)` obtained in a thread-safe way /// (unsafe example: load via Ordering::Relaxed). pub fn get(&self, r: Ref<'a, T>) -> &T { let i = r.biased_index.get(); // In debug mode, bounds check and panic. Note that this bounds check // won't catch all unsafe accesses, since add() increments size *before* // storing to the bucket. if cfg!(debug_assertions) { let l = self.next_biased_index.load(Ordering::Relaxed); debug_assert!(i < l, "{} < {}", i, l); } let (a, b) = index(i); let e_ptr = unsafe { // Get bucket address, but do *not* allocate a bucket. // Ordering::Relaxed is OK because we got a Ref in a // thread-safe way in get(..). self.buckets.get_unchecked(a).load(Ordering::Relaxed) }; // Sanity check bucket. Again, won't catch all unsafe accesses. memory_consistency_assert!(!e_ptr.is_null()); unsafe { // Read the added element, and strip the MaybeUnit wrapper to yield a &T. let r: &MaybeUninit<T> = &*e_ptr.add(b); &*r.as_ptr() } } } impl<'a, T> Debug for AtomicArena<'a, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "AtomicArena[{}]", self.len()) } } impl<'a, T> Drop for AtomicArena<'a, T> { fn drop(&mut self) { // At this point all other outstanding operations on self must // be complete since we have a &mut reference. let l = self.next_biased_index.load(Ordering::Relaxed); if l == MIN_SIZE { // Nothing to do, and otherwise there's a fencepost error // in index(l-1) below. return; } let (last_a, last_b) = index(l - 1); for (a, bucket) in self.buckets.iter_mut().enumerate().rev() { // We turn each allocated bucket into a `Vec<T>` whose `capacity` // matches the bucket capacity and whose `len` matches the number // of entries in use (equal to `capacity` except for `last_a` which // is only using `last_b` entries). We can then `drop` the // resulting `Vec`, freeing any storage held by the allocated // objects in addition to the storage for the bucket itself. if a < last_a { break; } let mut b_ptr = if let Some(nn) = NonNull::new(bucket.load(Ordering::Relaxed)) { nn } else { panic!("Null bucket pointer before length. Shouldn't happen.") }; let cap = bucket_capacity(a); // Every bucket after last_a is full, every bucket before it is empty. // last_b is the last element in last_a that has been initialized. let sz = if a == last_a { last_b + 1 } else { cap }; let iv: Vec<T> = unsafe { // View b_ptr as a *mut T rather than a NonNull<MaybeUninit<T>>. let b_ptr: *mut T = (b_ptr.as_mut()).as_mut_ptr(); // We know that the first sz elements at *b_ptr are // initialized, and c elements were allocated. Build // a well-formed Vec<T> (stripping away MaybeUninit<>) // and deallocate that. This should be at least as // efficient as simply calling Drop on individual // elements, and is vastly simpler than calling the // mem apis directly. Vec::from_raw_parts(b_ptr, sz, cap) }; drop(iv); bucket.store(ptr::null_mut(), Ordering::Relaxed) } } } /// Sometimes we want to create a static `AtomicArena` with a distinguished /// `ZERO` element pre-added at compile time. (Example: string interning with /// a distinguished empty string). The `Zero<T>` struct allows us to do that /// using a series of top-level constants. /// /// IMPORTANT: A GIVEN INSTANCE OF `Zero<T>` CAN BE PASSED TO AT MOST ONE CALL /// TO `AtomicArena::with_zero`!!!! This cannot be statically enforced, which /// is one reason there's a macro in `intern` that wraps this all up safely. pub struct Zero<T> { last_bucket: [UnsafeCell<MaybeUninit<T>>; MIN_SIZE as usize], } impl<T> Zero<T> { #[inline(always)] pub const fn zero() -> Ref<'static, T> { Ref { phantom: PhantomData, phantom_life: PhantomData, biased_index: unsafe { NonZeroU32::new_unchecked(MIN_SIZE) }, } } fn zero_value(&self) -> &T { let r: &MaybeUninit<T> = unsafe { &*self.last_bucket[0].get() }; unsafe { &*r.as_ptr() } } pub const fn new(zero: T) -> Self { Zero { last_bucket: [ UnsafeCell::new(MaybeUninit::new(zero)), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), UnsafeCell::new(MaybeUninit::uninit()), ], } } } unsafe impl<T> Sync for Zero<T> {} impl<T: Debug> Debug for Zero<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Zero({:?})", self.zero_value()) } } impl<T> AtomicArena<'static, T> { pub const fn with_zero(z: &'static Zero<T>) -> Self { let p: *mut MaybeUninit<T> = z.last_bucket[0].get(); AtomicArena { phantom_life: PhantomData, next_biased_index: AtomicU32::new(MIN_SIZE + 1), buckets: [ AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(ptr::null_mut()), AtomicPtr::new(p), ], bucket_alloc_mutex: parking_lot::const_mutex(()), } } } #[cfg(test)] mod tests { use std::sync::Arc; use std::thread; use parking_lot::Condvar; use parking_lot::Mutex; use rand::thread_rng; use rand::Rng; use super::*; static mut ZERO: Zero<&str> = Zero::new("zero"); static STRING_ARENA: AtomicArena<'static, &str> = AtomicArena::with_zero(unsafe { &ZERO }); /// For internal testing purposes we permit the unsafe synthesis of Refs. fn mk_ref<'a, T>(index: u32) -> Ref<'a, T> { Ref { phantom: PhantomData, phantom_life: PhantomData, biased_index: NonZeroU32::new(index + MIN_SIZE).unwrap(), } } #[test] fn empty_drop() { let vi: AtomicArena<'_, String> = AtomicArena::new(); drop(vi); } #[test] fn sizing() { // A bunch of checking for off-by-one errors of various sorts // in the vector sizes for the telescoping array. for a in 0..(NUM_SIZES - 1) { assert_eq!(1 << (U32_BITS - 1 - a), bucket_capacity(a)); } assert_eq!(MIN_SIZE as usize, bucket_capacity(NUM_SIZES - 1)); } #[test] fn indexing() { let (a, b) = index(MIN_SIZE); assert_eq!(a, NUM_SIZES - 1); assert_eq!(b, 0); let (a, b) = index(std::u32::MAX); assert_eq!(a, 0); assert_eq!(b, (1 << 31) - 1); assert_eq!(b, bucket_capacity(0) - 1); // Check thresholds for s in (MIN_SHIFT + 1)..(U32_BITS as u32) { let i = 1 << s; let (a0, b0) = index(i - 1); let (a1, b1) = index(i); assert_eq!(a0, U32_BITS - s as usize); assert_eq!( a1, U32_BITS - s as usize - 1, "at {} -> ({}, {})", i, a1, b1 ); assert_eq!(b0, bucket_capacity(a0) - 1); assert_eq!(b1, 0); } // Check monotonicity. for i in MIN_SIZE..9 * MIN_SIZE { let (a0, b0) = index(i); let (a1, b1) = index(i + 1); assert!( (a0 == a1 && b0 < b1) || (a0 == (a1 + 1) && b1 == 0), "{} {} {} >= {} {} {}", i - 1, a0, b0, i, a1, b1 ); } } #[test] fn add_read() { let v: AtomicArena<'_, usize> = AtomicArena::new(); assert_eq!(v.len(), 0); for i in 0..10_000 { let ii = i as u32; let r = v.add(i); assert_eq!(r.index(), ii); assert_eq!(v.len(), i + 1); assert_eq!(*v.get(r), i); for j in 0..=ii { assert_eq!(*v.get(mk_ref(j)), j as usize); } } // For the rest of the range, don't do the O(n^2) full check. for i in 10_000..10_000_000 { let r = v.add(i); assert_eq!(r.index(), i as u32); assert_eq!(v.len(), i + 1); assert_eq!(*v.get(r), i); } } #[test] fn add_read_static() { assert_eq!(Zero::<&str>::zero().index(), 0); assert_eq!(STRING_ARENA.len(), 1); assert_eq!(STRING_ARENA.get(Zero::<&str>::zero()), &"zero"); for i in 1..1_000 { let r = STRING_ARENA.add(Box::leak(format!("{}", i).into())); assert_eq!(r.index(), i); assert_eq!(STRING_ARENA.len(), i as usize + 1); assert_eq!(STRING_ARENA.get(r), &format!("{}", i)); assert_eq!(STRING_ARENA.get(Zero::<&str>::zero()), &"zero"); for j in 1..=i { assert_eq!(STRING_ARENA.get(mk_ref(j)), &format!("{}", j)); } } // For the rest of the range, don't do the O(n^2) full check. for i in 1_000..1_000_000 { let r = STRING_ARENA.add(Box::leak(format!("{}", i).into())); assert_eq!(r.index(), i); assert_eq!(STRING_ARENA.len(), i as usize + 1); assert_eq!(STRING_ARENA.get(r), &format!("{}", i)); assert_eq!(STRING_ARENA.get(Zero::<&str>::zero()), &"zero"); } } #[test] fn add_parallel_read() { const N: u32 = 1_000_000; let arena: Arc<AtomicArena<'_, String>> = Arc::new(AtomicArena::new()); // Make sure we don't just run the producer or all the // consumers without interleaving them. let progress = Arc::new((Mutex::new(0u32), Condvar::new())); let mut consumers = Vec::new(); let len = Arc::new(AtomicU32::new(0)); for r in 0..10 { // Consumers let arena = arena.clone(); let progress = progress.clone(); let len = len.clone(); consumers.push(thread::spawn(move || { const I: u32 = N * 3 / 2; let mut rng = thread_rng(); let mut next_poke = 1500; let mut next_seek = 1000; let mut n_seen = 0; for ii in 0..I { let n = len.load(Ordering::Acquire); if n > 0 { // First reader always checks latest completed add. let i = if r == 0 { n - 1 } else { rng.gen_range(0..n) }; let s = arena.get(mk_ref(i)); assert_eq!(s, &format!("{}", i)); if r == 0 { // n should trail arena.len(). let l = arena.len(); assert!(n as usize <= l); } n_seen += 1; } if ii == next_poke { let (lock, cvar) = &*progress; let mut l = lock.lock(); while *l < next_seek { cvar.wait(&mut l); } next_poke *= 10; next_seek *= 10; } } assert!(n_seen > 0); })); } { // Producer in main thread. let mut next_poke = 1000; for i in 0..N { let r = arena.add(format!("{}", i)); assert_eq!(r.index(), i); len.store(i + 1, Ordering::Release); if i == next_poke { let (lock, cvar) = &*progress; *lock.lock() = i; cvar.notify_all(); next_poke *= 10; } } } for c in consumers { c.join().unwrap(); } } #[test] fn parallel_add_parallel_read() { use std::sync::atomic::AtomicU32; const N: u32 = 2_000_000; const WRITERS: u32 = 10; let arena: Arc<AtomicArena<'_, usize>> = Arc::new(AtomicArena::new()); let mut avail: Arc<Vec<AtomicU32>> = Arc::new(Vec::with_capacity(N as usize)); Arc::get_mut(&mut avail) .unwrap() .resize_with(N as usize, || AtomicU32::new(10 * N)); // Make sure we don't just run the producer or all the // consumers without interleaving them. let progress = Arc::new((Mutex::new(0u32), Condvar::new())); let mut producers = Vec::new(); let mut consumers = Vec::new(); for k in 0..WRITERS { let arena = arena.clone(); let avail = avail.clone(); let progress = progress.clone(); producers.push(thread::spawn(move || { let mut next_poke = 10; for i in 0..N / WRITERS { let n = i * WRITERS + k; let id = arena.add(n as usize); assert!(id.index() < N); assert_eq!(avail[id.index() as usize].load(Ordering::Acquire), 10 * N); avail[id.index() as usize].store(n, Ordering::Release); if k == 0 && i == next_poke { let (lock, cvar) = &*progress; *lock.lock() = i; next_poke *= 10; cvar.notify_all(); } } })); } for _ in 0..10 { const I: u32 = N * 3 / 2; let arena = arena.clone(); let avail = avail.clone(); let progress = progress.clone(); consumers.push(thread::spawn(move || { let mut rng = thread_rng(); let mut next_poke = 150; let mut next_seek = 10; let mut n_seen = 0; for ii in 0..I { let i = rng.gen_range(0..N); let expect = avail[i as usize].load(Ordering::Acquire); if expect < N { let s = arena.get(mk_ref(i)); assert_eq!(*s, expect as usize); n_seen += 1; } if ii == next_poke { let (lock, cvar) = &*progress; let mut l = lock.lock(); while *l < next_seek { cvar.wait(&mut l); } next_poke *= 10; next_seek *= 10; if next_seek >= N { next_seek = 0; } } } assert!(n_seen > 0); })); } for p in producers { p.join().unwrap(); } assert_eq!(arena.len(), N as usize); let mut fail: Vec<(u32, u32)> = Vec::new(); for i in 0..N { let a = avail[i as usize].load(Ordering::Relaxed); if a >= N { fail.push((a, i)); } } assert!(fail.is_empty(), "{:?}", fail); for c in consumers { c.join().unwrap(); } } }
Rust
hhvm/hphp/hack/src/utils/intern/src/idhasher.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use std::hash::BuildHasherDefault; use std::hash::Hasher; use std::marker::PhantomData; pub type BuildIdHasher<T> = BuildHasherDefault<IdHasher<T>>; /// A simple fast multiplicative hasher for Ids. /// /// It's tempting to use `IdHasher` for Ids --- but /// [HashBrown](https://crates.io/crates/hashbrown) and /// [std::collections::HashMap](https://doc.rust-lang.org/std/collections/struct.HashMap.html) /// use the upper 7 bits of the hash for a tag, then compare 8-16 tags in /// parallel. Without the multiply, typical low-valued u32 ids would all have /// tag 0. #[derive(Debug, Default, Clone, Copy)] pub struct IdHasher<T>(u64, PhantomData<T>); /// Marker interface to allow supported types. Additional primitive types /// can be supported by adding an `IsEnabled` decl. pub trait IsEnabled {} impl IsEnabled for u32 {} impl<T: IsEnabled> Hasher for IdHasher<T> { fn write(&mut self, _: &[u8]) { unimplemented!() } fn write_u32(&mut self, n: u32) { debug_assert_eq!(self.0, 0); // Allow one write per Hasher instance. self.0 = (n as u64).wrapping_mul(0x9e3779b97f4a7c15); } fn finish(&self) -> u64 { self.0 } }
Rust
hhvm/hphp/hack/src/utils/intern/src/intern.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use std::borrow::Borrow; use std::cell::RefCell; use std::collections::HashMap; use std::fmt; use std::fmt::Debug; use std::hash::Hash; use std::hash::Hasher; use std::num::NonZeroU32; use std::sync::atomic::AtomicU32; use std::sync::atomic::Ordering; use std::u32; use once_cell::sync::OnceCell; use serde::Deserialize; use serde::Deserializer; use serde::Serialize; use serde::Serializer; use crate::atomic_arena; use crate::atomic_arena::AtomicArena; use crate::idhasher::BuildIdHasher; use crate::sharded_set::ShardedSet; /// `InternId`s wrap the `Ref<T>` type. #[doc(hidden)] pub type Ref<T> = atomic_arena::Ref<'static, T>; /// The `InternId` trait is applied to the identifiers of interned data, and /// `InternId::Intern` is the type of the data that was interned. You should /// not implement `InternId` manually; instead, use the [intern_struct] macro. pub trait InternId: 'static + Eq + Copy { /// Actual type of the interned data. type Intern: Hash + Eq + 'static + Borrow<Self::Lookup>; /// Type by which interned data can be looked up. This lets us support /// (for example) looking up a `StringId` using `&str` in `get_interned`. /// In most cases it will be the same as `Intern`. type Lookup: Hash + Eq + ?Sized; /// Return the static intern table for this type, which encapsulates the /// mapping from `InternId`s to their corresponding data. fn table() -> &'static InternTable<Self, Self::Intern>; /// Wrap and unwrap references. #[doc(hidden)] fn wrap(r: Ref<Self::Intern>) -> Self; #[doc(hidden)] fn unwrap(self) -> Ref<Self::Intern>; // Methods from here on are implemented for you, // and you should not override them. /// Intern a value. fn intern<U>(t: U) -> Self where U: Into<Self::Intern> + Borrow<Self::Lookup>, AsInterned<Self>: Borrow<Self::Lookup>, { Self::table().intern(t) } /// Fetch an existing interned value if it exists. You can use /// any type that you would use for a hash table lookup keyed by /// `Self::Intern`; by contrast `intern()` requires that you be able /// to convert the key `Into<Self::Intern>` if it hasn't been /// interned yet. fn get_interned<U>(t: &U) -> Option<Self> where U: Borrow<Self::Lookup>, AsInterned<Self>: Borrow<Self::Lookup>, { Self::table().get_interned(t) } /// Given an `InternId`, retrieve the corresponding data. Equivalent to /// `Deref::deref`, but specifies the resulting reference is `'static`. #[inline] fn get(self) -> &'static Self::Intern { Self::table().get(self) } /// 0-based index of interned value among all interned values of this type, /// suitable for indexing arrays with interned values. #[inline] fn index(self) -> u32 { self.unwrap().index() } /// Reverse the results of `index()`, and check the index boundary. #[inline] fn from_index_checked(i: u32) -> Option<Self> { if (i as usize) < Self::table().len() { // This is safe because `from_index` uses `NonZeroU32::new_unchecked` // and the value is always > 0 let ref_ = unsafe { Ref::from_index(i) }; Some(Self::wrap(ref_)) } else { None } } /// Unsafely reverse the results of `index()`. #[doc(hidden)] #[inline] unsafe fn from_index(i: u32) -> Self { Self::wrap(Ref::from_index(i)) } /// Raw index for internal use only. #[doc(hidden)] #[inline] fn raw(self) -> NonZeroU32 { self.unwrap().raw() } /// Unsafely reverse the results of `raw()`. #[doc(hidden)] #[inline] unsafe fn from_raw(i: NonZeroU32) -> Self { Self::wrap(Ref::from_raw(i)) } } /// There are two very different ways you might want to use an `InternId` as a /// hash table key. The default if you `derive(Hash, PartialEq, Eq)` is to use /// the representation of the `Id` itself (roughly equivalent to `.index()`) as /// the hash code. This yields fast equality and hash, but you can only look /// up interned values. The default `Set` and `Map` types created by the macro /// use this hash code with the custom fast hasher /// `idhasher::BuildIdHasher<T>`. /// /// `AsInterned` is a simple wrapper that supports the opposite view: /// treat the `InternId` as a reference to `InternId::Intern` and use the /// hash code of `InternId::Intern`. This allows you to compare to /// uninterned data, by comparing the results of `InternId::get`. /// `AsInterned` is public so that it can be used if you need it (for /// example if you're using a mix of interned and uninterned values to /// avoid interning intermediate data that you want to throw away). /// /// It's unsafe to `impl Borrow<InternId::Intern> for InternId`; `Borrow` /// requires hash codes to be equal. You can't directly use `InternId` as a /// hash table key and then expect to be able to use `InternId::Intern` to /// perform lookups without interning. /// An explicit `impl Borrow<InternId::Intern> for AsInterned<InternId>` is /// still required for every `InternId`. The [intern_struct] macro provides /// this implementation as it's used internally for the intern process itself. #[derive(Debug, Copy, Clone)] #[repr(transparent)] pub struct AsInterned<Id>(pub Id); impl<Id: InternId> Hash for AsInterned<Id> { fn hash<H: Hasher>(&self, h: &mut H) { self.0.get().hash(h) } } impl<Id: InternId> PartialEq for AsInterned<Id> { fn eq(&self, other: &Self) -> bool { // Note: intern yields equal ids *iff* the underlying equality did so, // and the underlying hashing is consistent with equality (which is an // invariant of Hash). That's because the underlying equality and hash // are used when the data is interned in the first place. Thus this is // safe. self.0 == other.0 } } impl<Id: InternId> Eq for AsInterned<Id> {} impl<Id> Ord for AsInterned<Id> where Id: InternId, Id::Intern: Ord, { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.0.get().cmp(other.0.get()) } } impl<Id> PartialOrd for AsInterned<Id> where Id: InternId, Id::Intern: PartialOrd, { fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { self.0.get().partial_cmp(other.0.get()) } } type Shards<Id> = ShardedSet<AsInterned<Id>, std::hash::BuildHasherDefault<fnv::FnvHasher>>; /// An `InternTable` manages all the storage associated with the `Id`. #[derive(Default)] pub struct InternTable<Id, Type> { shards: OnceCell<Shards<Id>>, // Allocated lazily. arena: AtomicArena<'static, Type>, // Static. serdes_type_index: AtomicU32, // Initialized lazily. } static NEXT_SERDES_TYPE_INDEX: AtomicU32 = AtomicU32::new(1); impl<Id, Type> InternTable<Id, Type> { /// Create new `InternTable` #[doc(hidden)] pub const fn new() -> Self { InternTable { shards: OnceCell::new(), arena: AtomicArena::new(), serdes_type_index: AtomicU32::new(u32::MAX), } } /// Create new `InternTable` with a distinguished constant as the first /// interned value. #[doc(hidden)] pub const fn with_zero(z: &'static atomic_arena::Zero<Type>) -> Self { InternTable { shards: OnceCell::new(), arena: AtomicArena::with_zero(z), serdes_type_index: AtomicU32::new(u32::MAX), } } /// Yields the current count of interned values; this can be used /// in conjunction with `.index()` on those values to allocate and /// index a direct-mapped array. pub fn len(&self) -> usize { self.arena.len() } pub fn is_empty(&self) -> bool { self.arena.is_empty() } } impl<Id: InternId> InternTable<Id, Id::Intern> { /// The methods from here on are internal and private. fn shards(&'static self) -> &Shards<Id> { self.shards.get_or_init(|| { let shards: Shards<Id> = ShardedSet::default(); if !self.arena.is_empty() { let iwz = AsInterned(Id::wrap(atomic_arena::Zero::zero())); shards.unchecked_insert(iwz); } shards }) } /// Intern `t`, and return the resulting `Id`. fn intern<U>(&'static self, t: U) -> Id where U: Into<Id::Intern> + Borrow<Id::Lookup>, AsInterned<Id>: Borrow<Id::Lookup>, { let wt = t.borrow(); // Optimistically try to write lock (because it only requires // one lock acquisition check for the new-string case), but if // there's write contention take a slower path. let shards = self.shards(); let mut insert_lock = match shards.get_or_insert_lock(wt) { Ok(AsInterned(id)) => return id, Err(insert_lock) => insert_lock, }; let id = Id::wrap(self.arena.add(t.into())); insert_lock.insert(AsInterned(id)); id } /// If `t` has already been interned, return the corresponding `Id`. /// Note that this only borrows `t`, and thus avoids /// creating an `Id::Intern`. fn get_interned<U>(&'static self, t: &U) -> Option<Id> where U: Borrow<Id::Lookup>, AsInterned<Id>: Borrow<Id::Lookup>, { if let Some(AsInterned(id)) = self.shards().get(t.borrow()) { Some(id) } else { None } } /// Get a shared reference to the underlying `Id::Intern`. /// Usually you can rely on `deref` to do this implicitly. #[inline] fn get(&'static self, r: Id) -> &Id::Intern { self.arena.get(r.unwrap()) } /// Getter that checks for the need to allocate. fn serdes_type_index(&'static self) -> u32 { let i = self.serdes_type_index.load(Ordering::Acquire); if i != u32::MAX { i } else { self.serdes_type_index_slow() } } fn serdes_type_index_slow(&'static self) -> u32 { let i = NEXT_SERDES_TYPE_INDEX.fetch_add(1, Ordering::Relaxed); assert!(i != u32::MAX); // Or we've overflowed. // Now, we might be racing another thread to assign self.type_index. // So CAS it in, keeping any entry that was already there (since it's // already being used). if let Err(winner) = self.serdes_type_index.compare_exchange( u32::MAX, i, Ordering::AcqRel, Ordering::Acquire, ) { winner } else { i } } } impl<Id, Type> Debug for InternTable<Id, Type> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "InternTable[{} entries]", self.len()) } } // `PerInternIdVec` can be used to allocate a data structure for each // distinct `Id` type. Note, however, that the underlying type `T` has to // be the same for all `Id` types. #[derive(Default, Debug)] struct PerInternIdVec<T> { v: Vec<T>, } impl<T: Default> PerInternIdVec<T> { /// Ensure there is an element for all currently-extant `Id` types, /// extending with default elements if they are missing. fn ensure_default(&mut self) { let n = NEXT_SERDES_TYPE_INDEX.load(Ordering::Relaxed); if n as usize > self.v.len() { self.v.resize_with(n as usize, T::default); } } /// Discard all memory. fn clear(&mut self) { self.v = Default::default(); } // Make sure there's an entry for `Id` and return its index. fn ensure_and_index<Id: InternId>(&mut self) -> u32 { let i = Id::table().serdes_type_index(); // May increment NEXT_SERDES_TYPE_INDEX, do first. self.ensure_default(); i } fn for_id<Id: InternId>(&mut self) -> &T { let i = self.ensure_and_index::<Id>(); &self.v[i as usize] } fn for_id_mut<Id: InternId>(&mut self) -> &mut T { let i = self.ensure_and_index::<Id>(); &mut self.v[i as usize] } } #[derive(Default)] struct SerState { next_index: u32, ref_to_index: HashMap<Ref<usize>, u32, BuildIdHasher<u32>>, } thread_local! { static REF_TO_INDEX: RefCell<PerInternIdVec<SerState>> = Default::default(); static INDEX_TO_REF: RefCell<PerInternIdVec<Vec<Ref<usize>>>> = Default::default(); } /// Create a `SerGuard::default()` before serializing types that transitively /// contain `InternId`s, then drop the resulting `SerGuard` when serialization /// is complete. #[derive(Debug)] pub struct SerGuard { _v: (), // Prevent construction except via Default } impl Default for SerGuard { fn default() -> Self { REF_TO_INDEX.with(|rti| rti.borrow_mut().ensure_default()); SerGuard { _v: () } } } impl Drop for SerGuard { fn drop(&mut self) { REF_TO_INDEX.with(|rti| rti.borrow_mut().clear()); } } /// Create a `DeGuard::default()` before deserializing types that transitively /// contain `InternId`s, then drop the resulting `DeGuard` when deserialization /// is complete. #[derive(Debug)] pub struct DeGuard { _v: (), // Prevent construction except via Default } impl Default for DeGuard { fn default() -> Self { INDEX_TO_REF.with(|itr| itr.borrow_mut().ensure_default()); DeGuard { _v: () } } } impl Drop for DeGuard { fn drop(&mut self) { INDEX_TO_REF.with(|itr| itr.borrow_mut().clear()); } } /// Wrap an object in WithIntern to set the necessary context to /// serialize the interned objects it contains. #[derive(Debug)] pub struct WithIntern<T>(pub T); impl<T> WithIntern<T> { pub fn strip<E>(t: Result<Self, E>) -> Result<T, E> { t.map(|WithIntern(r)| r) } } impl<T: Serialize> Serialize for WithIntern<T> { fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { let _guard = SerGuard::default(); self.0.serialize(s) } } impl<'de, T: Deserialize<'de>> Deserialize<'de> for WithIntern<T> { fn deserialize<D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> { let _guard = DeGuard::default(); Ok(WithIntern(T::deserialize(d)?)) } } /// `InternId`s do serdes via `InternSerdes`. #[derive(Debug)] #[repr(transparent)] pub struct InternSerdes<Id: InternId>(pub Id); impl<Id: InternId> From<Id> for InternSerdes<Id> { fn from(id: Id) -> Self { InternSerdes(id) } } #[derive(Debug, Serialize, Deserialize)] enum InternEnum<T> { Value(T), Id(u32), } impl<Id> Serialize for InternSerdes<Id> where Id: InternId, Id::Intern: Serialize, { fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { // strip the actual type off self's internal Ref. let r: Ref<usize> = unsafe { self.0.unwrap().rebrand() }; debug_assert_eq!(r.index(), self.0.unwrap().index()); REF_TO_INDEX.with(|cell| { let mut tls = cell.borrow_mut(); let state = tls.for_id::<Id>(); let opt_ser_id = state.ref_to_index.get(&r).copied(); drop(tls); if let Some(ser_id) = opt_ser_id { let ie: InternEnum<&Id::Intern> = InternEnum::Id(ser_id); ie.serialize(s) } else { let ie: InternEnum<&Id::Intern> = InternEnum::Value(self.0.get()); let res = ie.serialize(s)?; let mut tls = cell.borrow_mut(); let state = tls.for_id_mut::<Id>(); // Grab the next backref index. This is not necessarily the same as // `ref_to_index.len()` in certain recursive cases. Incrementing a simple // counter matches the index numbering the deserializer expects. let index = state.next_index; state.next_index = index + 1; // Associate this Ref with the new index, for back references. However, if it // already got associated with a back reference when we recursively serialized // the value above, just leave it alone. Either index will work in the // deserializer, since it just deserialized (and interned) the same value // twice, but we might as well consistently use the lowest-numbered ID. state.ref_to_index.entry(r).or_insert(index); Ok(res) } }) } } impl<'de, Id> Deserialize<'de> for InternSerdes<Id> where Id: InternId, Id::Intern: Deserialize<'de>, AsInterned<Id>: Borrow<Id::Lookup>, { fn deserialize<D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> { let ie: InternEnum<Id::Intern> = InternEnum::deserialize(d)?; match ie { InternEnum::Value(w) => { let id: Id = Id::intern(w); let r: Ref<usize> = unsafe { id.unwrap().rebrand() }; debug_assert_eq!(r.index(), id.unwrap().index()); INDEX_TO_REF.with(|itr| itr.borrow_mut().for_id_mut::<Id>().push(r)); Ok(InternSerdes(id)) } InternEnum::Id(i) => { let r: Ref<usize> = INDEX_TO_REF.with(|itr| itr.borrow_mut().for_id_mut::<Id>()[i as usize]); let id: Id = Id::wrap(unsafe { r.rebrand() }); debug_assert_eq!(r.index(), id.unwrap().index()); Ok(InternSerdes(id)) } } } } /// The body of intern_struct! is a sequence of declarations (the ? lines are optional): /// ```ignore /// #[attr]* /// [pub] struct IdTypeName = Intern<InternType> { /// serdes("intern::InternSerdes<IdTypeName>"); ? // Enables serdes /// type Lookup = LookupType; ? // Intern lookup at LookupType /// type Set = IdSetName; ? // Name of set type /// type Map = IdMapName; ? // Name of map type /// const ZERO_NAME = idtype_expr; ? // Zero element /// } /// ``` #[macro_export] macro_rules! intern_struct { () => { }; ($(#[$attr:meta])* struct $Name:ident = Intern<$T:ty> { $(serdes($l:expr);)? $(type Lookup = $L:ty;)? $(type Set = $S:ident;)? $(type Map = $M:ident;)? $(const $Z:ident = $ze:expr;)? } $($rest:tt)*) => { intern_struct!(@DOIT, ($(#[$attr])*), (), $Name, $T, ( $($L)? ), ( $($Z, $ze)? ), ( $($l)? ), ( $($S)? ), ( $($M)? ) ); intern_struct!{ $($rest)* } }; ($(#[$attr:meta])* pub struct $Name:ident = Intern<$T:ty> { $(serdes($l:expr);)? $(type Lookup = $L:ty;)? $(type Set = $S:ident;)? $(type Map = $M:ident;)? $(const $Z:ident = $ze:expr;)? } $($rest:tt)*) => { intern_struct!(@DOIT, ($(#[$attr])*), (pub), $Name, $T, ( $($L)? ), ( $($Z, $ze)? ), ( $($l)? ), ( $($S)? ), ( $($M)? ) ); intern_struct!{ $($rest)* } }; ($(#[$attr:meta])* pub(crate) struct $Name:ident = Intern<$T:ty> { $(serdes($l:expr);)? $(type Lookup = $L:ty;)? $(type Set = $S:ident;)? $(type Map = $M:ident;)? $(const $Z:ident = $ze:expr;)? } $($rest:tt)*) => { intern_struct!(@DOIT, ($(#[$attr])*), (pub(crate)), $Name, $T, ( $($L)? ), ( $($Z, $ze)? ), ( $($l)? ), ( $($S)? ), ( $($M)? ) ); intern_struct!{ $($rest)* } }; (@SERDESDERIVE(); $($decl:tt)*) => { $($decl)* }; (@SERDESDERIVE($l:expr); $($decl:tt)*) => { #[derive(serde_derive::Deserialize, serde_derive::Serialize)] #[serde(from = $l)] #[serde(into = $l)] $($decl)* }; (@LOOKUPTYPE($T:ty, ())) => { type Lookup = $T; }; (@LOOKUPTYPE($T:ty, ($Lookup:ty))) => { type Lookup = $Lookup; }; (@BORROW($T:ty, $Name:ty, ())) => { impl std::borrow::Borrow<$T> for $crate::intern::AsInterned<$Name> { #[inline] fn borrow(&self) -> & $T { use $crate::intern::InternId; self.0.get() } } }; (@BORROW($T:ty, $Name:ty, ($Lookup:ty))) => { impl std::borrow::Borrow<$Lookup> for $crate::intern::AsInterned<$Name> { #[inline] fn borrow(&self) -> & $Lookup { use $crate::intern::InternId; self.0.get().borrow() } } }; (@MAP(($($vis:tt)*), $Name:ident, ())) => { }; (@MAP(($($vis:tt)*), $Name:ident, ($M:ident))) => { $($vis)* type $M<V> = std::collections::HashMap<$Name, V, $crate::idhasher::BuildIdHasher<u32>>; }; (@SET(($($vis:tt)*), $Name:ident, ())) => { }; (@SET(($($vis:tt)*), $Name:ident, ($S:ident))) => { $($vis)* type $S = std::collections::HashSet<$Name, $crate::idhasher::BuildIdHasher<u32>>; }; (@TABLE($T:ty, ())) => { $crate::intern::InternTable::new() }; (@TABLE($T:ty, ($v:ident, $zero:expr))) => {{ static ZERO: $crate::Zero<$T> = $crate::Zero::new($zero); $crate::intern::InternTable::with_zero(&ZERO) }}; (@ZERO($Name:ident, ())) => { }; (@ZERO($Name:ident, ($v:ident, $zero:expr))) => { impl $Name { pub const $v: Self = $Name($crate::Zero::zero()); } }; (@DOIT, ($(#[$attr:meta])*), ($($vis:tt)*), $Name:ident, $T:ty, ( $($Lookup:ty)? ), ( $($Z:ident, $ze:expr)* ), ( $($serdes:tt)? ), ( $($S:ident)? ), ( $($M:ident)? ) ) => { intern_struct!{ @SERDESDERIVE($($serdes)?); $(#[$attr])* #[derive(Copy, Clone, Eq, PartialEq, Hash)] #[repr(transparent)] $($vis)* struct $Name($crate::intern::Ref<$T>); } impl $crate::intern::InternId for $Name { type Intern = $T; intern_struct!(@LOOKUPTYPE($T, ($($Lookup)?))); #[inline] fn wrap(r: $crate::intern::Ref<$T>) -> Self { $Name(r) } #[inline] fn unwrap(self) -> $crate::intern::Ref<$T> { self.0 } #[inline] fn table() -> &'static $crate::intern::InternTable<$Name, $T> { static TABLE: $crate::intern::InternTable<$Name, $T> = intern_struct!(@TABLE($T, ($($Z, $ze)?))); &TABLE } } intern_struct!(@ZERO($Name, ($($Z, $ze)?))); intern_struct!(@BORROW($T, $Name, ($($Lookup)*))); intern_struct!(@SET(($($vis)*), $Name, ($($S)?))); intern_struct!(@MAP(($($vis)*), $Name, ($($M)?))); impl std::convert::From<$crate::intern::InternSerdes<$Name>> for $Name { #[inline] fn from(iid: $crate::intern::InternSerdes<$Name>) -> Self { iid.0 } } impl std::ops::Deref for $Name { type Target = $T; #[inline] fn deref(&self) -> & $T { use $crate::intern::InternId; self.get() } } impl std::fmt::Debug for $Name { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { use $crate::intern::InternId; self.get().fmt(f) } } }; } #[cfg(test)] mod tests { use serde_derive::Deserialize; use serde_derive::Serialize; use super::*; #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize)] struct MyType { v: i64, } intern_struct! { struct MyId = Intern<MyType> { serdes("InternSerdes<MyId>"); type Set = _MyIdSet; type Map = _MyIdMap; } } impl std::cmp::PartialOrd for MyId { fn partial_cmp(&self, other: &Self) -> std::option::Option<std::cmp::Ordering> { self.get().partial_cmp(other.get()) } } impl std::cmp::Ord for MyId { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.get().cmp(other.get()) } } #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize)] pub struct PubType { v: i64, } intern_struct! { pub struct PubId = Intern<PubType> { serdes("InternSerdes<PubId>"); type Set = _PubIdSet; type Map = _PubIdMap; } } #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize)] pub(crate) struct CrateType { v: i64, } intern_struct! { pub(crate) struct CrateId = Intern<CrateType> { serdes("InternSerdes<CrateId>"); type Set = _CrateIdSet; type Map = _CrateIdMap; } } #[test] fn test() { let m1 = MyType { v: 1 }; let m2 = MyType { v: 1 }; let m3 = MyType { v: -57 }; let i1 = MyId::intern(m1); let i2 = MyId::intern(m2); let i3 = MyId::intern(m3); assert_eq!(i1, i2); assert_eq!(i1.get().v, 1); assert_ne!(i1, i3); assert_eq!(i3.v, -57); // Uses Deref assert!(i3 < i1); } #[test] fn pub_test() { let m1 = PubType { v: 1 }; let m2 = PubType { v: 1 }; let m3 = PubType { v: -57 }; let i1 = PubId::intern(m1); let i2 = PubId::intern(m2); let i3 = PubId::intern(m3); assert_eq!(i1, i2); assert_eq!(i1.get().v, 1); assert_ne!(i1, i3); assert_eq!(i3.v, -57); // Uses Deref } #[test] fn crate_test() { let m1 = CrateType { v: 1 }; let m2 = CrateType { v: 1 }; let m3 = CrateType { v: -57 }; let i1 = CrateId::intern(m1); let i2 = CrateId::intern(m2); let i3 = CrateId::intern(m3); assert_eq!(i1, i2); assert_eq!(i1.get().v, 1); assert_ne!(i1, i3); assert_eq!(i3.v, -57); // Uses Deref } #[test] fn test_bincode_works_with_serialize_into() { let m1 = CrateType { v: 1 }; let m2 = CrateType { v: -57 }; let i1 = CrateId::intern(m1); let i2 = CrateId::intern(m2); let val = (i1, i2, i1); let mut serialized: Vec<u8> = vec![]; { let _guard = SerGuard::default(); bincode::serialize_into(&mut serialized, &val).unwrap() } let deserialized: (CrateId, CrateId, CrateId) = { let _guard = DeGuard::default(); bincode::deserialize(&serialized).unwrap() }; assert_eq!(deserialized, val); } #[test] fn round_trip_bincode() { let m1 = CrateType { v: 1 }; let m2 = CrateType { v: -57 }; let i1 = CrateId::intern(m1); let i2 = CrateId::intern(m2); let val = (i1, i2, i1); let serialized: Vec<u8> = { bincode::serialize(&WithIntern(&val)).unwrap() }; let deserialized: (CrateId, CrateId, CrateId) = { WithIntern::strip(bincode::deserialize(&serialized)).unwrap() }; assert_eq!(deserialized, val); } #[test] fn round_trip_json() { use serde_json; let m1 = CrateType { v: 1 }; let m2 = CrateType { v: -57 }; let i1 = CrateId::intern(m1); let i2 = CrateId::intern(m2); let val = (i1, i2, i1); let serialized: String = { serde_json::to_string_pretty(&WithIntern(&val)).unwrap() }; let deserialized: (CrateId, CrateId, CrateId) = { WithIntern::strip(serde_json::from_str(&serialized)).unwrap() }; assert_eq!(deserialized, val); } }
Rust
hhvm/hphp/hack/src/utils/intern/src/lib.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ //! Compact, serializable, mostly-lock-free interning. //! //! A library for [interning](https://en.wikipedia.org/wiki/String_interning) //! strings, byte slices, and other data in Rust. Interned values are //! deduplicated. A reference to interned data can be represented in 32 bits, //! so that frequently-used references take up half as much space on a system //! with 64-bit pointers. //! //! # Why another intern crate? //! //! Compared to similar crates (such as //! [internment](https://crates.io/crates/internment)), this crate offers the //! following advantages: //! * Compact 32-bit ids. This crate was written to save space on internal //! program analysis data structures. (If you don't need this, you can use //! `get()` to turn an `Id` into a `&'static T`.) //! * Sharded locking when interning new objects. We use //! [rayon](https://docs.rs/rayon/1.5.1/rayon/) in our code and need to be //! able to intern data in parallel. //! * Lock-free constant-time `deref()` of intern ids. Again, we need to be //! able read interned data from numerous threads. Compared to using a //! pointer, this requires a single extra load and no extra storage beyond //! the fixed-size part of the arena. //! * Support for deduplicating serialization and deserialization using //! [serde](https://serde.rs/). This allows us to preserve sharing when //! doing I/O, and to automatically re-intern data when it is read. //! * Support for fast hashing using the `IdHasher` type. //! //! # Data is leaked //! //! There is one important downside to this crate that is shared by some, but //! not all, intern crates: interned data is leaked and will not be reclaimed //! until the program terminates. If that's a problem you may need to consider //! a scoped interning crate (but note that efficient threading support may be //! hard to find). //! //! # Using string interning //! //! Simply import `intern::string` and go to town: //! ``` //! use intern::string::{self, StringId}; //! //! let a: StringId = string::intern("a"); //! let b = string::intern("b"); //! let a_again: Option<StringId> = Some(string::intern("a")); // still 32 bits //! assert_eq!(Some(a), a_again); //! assert_eq!(format!("{} {} {}", a, b, a_again.unwrap()), "a b a"); //! ``` //! //! # How to define an interned type //! //! Say we want to intern `MyType`. Simply use the //! [`intern_struct!`](intern_struct) macro as shown below. Now //! `MyId::intern(my_type)` will intern an object and `&*my_id` will retrieve a //! static reference to the interned object referred to by `my_id`. //! ``` //! # #[macro_use] //! use intern::{InternId, InternSerdes, intern_struct}; //! use serde_derive::{Deserialize, Serialize}; //! //! #[derive(Debug, PartialEq, Eq, Hash, Deserialize, Serialize)] //! struct MyType{ v: i64 } //! //! intern_struct! { //! struct MyId = Intern<MyType> { serdes("InternSerdes<MyId>"); } //! } //! //! # fn main() { //! let m1 = MyType{ v: 1 }; //! let m2 = MyType{ v: 1 }; //! let m3 = MyType{ v: -57 }; //! let i1 = MyId::intern(m1); //! let i2 = MyId::intern(m2); //! let i3 = MyId::intern(m3); //! assert_eq!(i1, i2); //! assert_eq!(i1.get().v, 1); //! assert!(i1 != i3); //! assert_eq!(i3.v, -57); // Uses Deref //! # } //! ``` //! //! # Using InternId serde support //! //! `InternId`s defined with a `serdes` clause as shown above support //! de-duplication of common ids during serialization. This can be //! especially useful for interned strings, but is applicable to any //! intern type whose target is serializable. Because this //! deduplication is stateful but the serde api is stateless, you'll //! need to wrap the entire data structure you serialize in //! `WithIntern` during serialization and deserialization: //! ``` //! # use bincode::Result; //! # use crate::intern::WithIntern; //! # type MyId = crate::intern::string::StringId; //! pub fn serialize(v: &[MyId]) -> Result<Vec<u8>> { //! bincode::serialize(&WithIntern(v)) //! } //! //! pub fn deserialize(encoded: &[u8]) -> Result<Vec<MyId>> { //! WithIntern::strip(bincode::deserialize(encoded)) //! } //! ``` //! Note in particular that dropping a context resets sharing; if you create //! and drop a context during serialization, you must create and drop it at the //! same point during deserialization and vice versa. mod atomic_arena; #[doc(hidden)] pub mod idhasher; pub mod intern; pub mod path; mod sharded_set; mod small_bytes; pub mod string; pub mod string_key; #[doc(hidden)] pub use crate::atomic_arena::Zero; #[doc(inline)] pub use crate::idhasher::BuildIdHasher; #[doc(inline)] pub use crate::idhasher::IdHasher; #[doc(inline)] pub use crate::intern::AsInterned; #[doc(inline)] pub use crate::intern::DeGuard; #[doc(inline)] pub use crate::intern::InternId; #[doc(inline)] pub use crate::intern::InternSerdes; #[doc(inline)] pub use crate::intern::SerGuard; #[doc(inline)] pub use crate::intern::WithIntern;
Rust
hhvm/hphp/hack/src/utils/intern/src/path.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use std::cmp::Ordering; use std::ffi::OsStr; #[cfg(unix)] use std::os::unix::ffi::OsStrExt; use std::path::Path; use std::path::PathBuf; use serde_derive::Deserialize; use serde_derive::Serialize; use crate::idhasher::BuildIdHasher; use crate::intern_struct; use crate::string; use crate::InternId; use crate::InternSerdes; intern_struct! { /// A path whose path components are interned. pub struct PathId = Intern<PathNode> { serdes("InternSerdes<PathId>"); type Set = PathIdSet; type Map = PathIdMap; } } pub type PathIdIndexMap<V> = indexmap::IndexMap<PathId, V, BuildIdHasher<u32>>; pub type PathIdIndexSet = indexmap::IndexSet<PathId, BuildIdHasher<u32>>; impl PathId { /// Intern the given path one `std::path::Component` at a time using the /// `Path::components()` iterator, which normalizes during parsing. Panic /// on empty paths such as `/`. #[cfg(unix)] pub fn intern<P: AsRef<Path>>(mut parent: Option<PathId>, path: P) -> Self { for c in path.as_ref().iter() { let p = InternId::intern(PathNode { name: string::intern_bytes(c.as_bytes()), parent, }); parent = Some(p); } parent.unwrap() } /// Intern the given path one `std::path::Component` at a time using the /// `Path::components()` iterator, which normalizes during parsing. Panic /// on empty paths such as `/`. #[cfg(not(unix))] pub fn intern<P: AsRef<Path>>(mut parent: Option<PathId>, path: P) -> Self { for c in path.as_ref().iter() { let p = InternId::intern(PathNode { name: string::intern(c.to_str().unwrap()), parent, }); parent = Some(p); } parent.unwrap() } pub fn parent(&self) -> Option<PathId> { self.parent } /// Returns the final component as an &OsStr. pub fn file_name(&self) -> &OsStr { self.get().file_name() } /// Linearize this path as a PathBuf. pub fn to_path_buf(&self) -> PathBuf { self.get().to_path_buf() } /// Linearize this path, appending to an existing PathBuf. pub fn push_to(&self, buf: &mut PathBuf) { self.get().push_to(buf); } } impl<P: AsRef<Path>> From<P> for PathId { fn from(path: P) -> Self { Self::intern(None, path) } } impl From<PathId> for PathBuf { fn from(id: PathId) -> Self { id.to_path_buf() } } impl std::fmt::Display for PathId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.to_path_buf().display().fmt(f) } } impl std::cmp::Ord for PathId { /// Compare the linearized components of self to other in top-down order. fn cmp(&self, other: &Self) -> Ordering { type LinearPath = smallvec::SmallVec<[PathComponentId; 20]>; fn linearize(mut p: PathId) -> LinearPath { let mut v: LinearPath = Default::default(); v.push(p.name); while let Some(parent) = p.parent { v.push(parent.name); p = parent; } v } linearize(*self) .iter() .rev() .cmp(linearize(*other).iter().rev()) } } impl std::cmp::PartialOrd for PathId { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } #[cfg(unix)] type PathComponentId = string::BytesId; #[cfg(not(unix))] type PathComponentId = string::StringId; /// A `PathNode` represents a `Path` as a list, starting with a leaf component /// and walking up the directory tree. This is really an internal detail, but /// is exposed by `PathId::Intern`. #[doc(hidden)] #[derive(Hash, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] pub struct PathNode { name: PathComponentId, parent: Option<PathId>, } impl PathNode { /// Returns the final component as an &OsStr. #[cfg(unix)] fn file_name(&self) -> &OsStr { OsStr::from_bytes(self.name.as_bytes()) } /// Returns the final component as an &OsStr. #[cfg(not(unix))] fn file_name(&self) -> &OsStr { OsStr::new(self.name.as_str()) } /// Linearize this path as a PathBuf. fn to_path_buf(&self) -> PathBuf { let mut path = match self.parent { Some(parent) => parent.to_path_buf(), None => PathBuf::new(), }; path.push(self.file_name()); path } /// Linearize this path, appending to an existing PathBuf. fn push_to(&self, buf: &mut PathBuf) { if let Some(parent) = self.parent { parent.push_to(buf); } buf.push(self.file_name()); } } impl std::fmt::Debug for PathNode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.to_path_buf().fmt(f) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_eq() { assert_eq!(PathId::from("a"), PathId::from("a/")); assert_ne!(PathId::from("a"), PathId::from("/a")); assert_eq!(PathId::from("a/b"), PathId::from("a//b")); } #[test] fn test_cmp() { assert_eq!(PathId::from("a").cmp(&PathId::from("a")), Ordering::Equal); assert_eq!(PathId::from("a").cmp(&PathId::from("b")), Ordering::Less); assert_eq!(PathId::from("a").cmp(&PathId::from("a/a")), Ordering::Less); assert_eq!(PathId::from("a").cmp(&PathId::from("a/_")), Ordering::Less); assert_eq!(PathId::from("b").cmp(&PathId::from("a")), Ordering::Greater); assert_eq!( PathId::from("a/a").cmp(&PathId::from("a")), Ordering::Greater ); assert_eq!( PathId::from("a/_").cmp(&PathId::from("a")), Ordering::Greater ); assert_eq!( PathId::from("a/b").cmp(&PathId::from("a.b")), Ordering::Less ); } #[test] fn test_intern() { assert_eq!(PathId::from("a/b"), PathId::intern(None, "a/b")); assert_eq!( PathId::from("a/b/c"), PathId::intern(Some(PathId::from("a")), "b/c"), ); } }
Rust
hhvm/hphp/hack/src/utils/intern/src/sharded_set.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use std::borrow::Borrow; use std::collections::hash_map::RandomState; use std::fmt; use std::hash::BuildHasher; use std::hash::Hash; use std::hash::Hasher; use hashbrown::raw::RawTable; use parking_lot::RwLock; use parking_lot::RwLockWriteGuard; const SHARD_SHIFT: usize = 6; const SHARDS: usize = 1 << SHARD_SHIFT; pub struct ShardedSet<T, S = RandomState> { build_hasher: S, shards: [RwLock<RawTable<T>>; SHARDS], } impl<T, S> fmt::Debug for ShardedSet<T, S> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "ShardedSet") } } impl<T, S: BuildHasher + Default> Default for ShardedSet<T, S> { fn default() -> Self { Self::with_hasher(Default::default()) } } impl<T, S: BuildHasher> ShardedSet<T, S> { pub fn with_hasher(h: S) -> Self { Self { build_hasher: h, shards: [ Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), Default::default(), ], } } } fn hash_one<B: BuildHasher, T: Hash>(build_hasher: &B, x: T) -> u64 { let mut hasher = build_hasher.build_hasher(); x.hash(&mut hasher); hasher.finish() } pub struct InsertLock<'a, T, S = RandomState> { build_hasher: &'a S, hash: u64, shard: RwLockWriteGuard<'a, RawTable<T>>, } impl<'a, T, S> fmt::Debug for InsertLock<'a, T, S> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "InsertLock(hash: {:x})", self.hash) } } impl<T: Eq + Hash, S: BuildHasher> ShardedSet<T, S> { /// Return the hash, the shard index corresponding to the hash. Since /// hashbrown uses the upper 7 bits for disambiguation and the lower bits /// for bucket indexing, we take the bits just above the top 7. #[inline(always)] fn hash_and_shard<Q>(&self, q: &Q) -> (u64, &RwLock<RawTable<T>>) where T: Borrow<Q>, Q: ?Sized + Hash, { let hash = hash_one(&self.build_hasher, q); ( hash, &self.shards[(hash >> (64 - 7 - SHARD_SHIFT)) as usize & (SHARDS - 1)], ) } /// Clone out the entry corresponding to `q` if it exists, otherwise return /// `Err(InsertLock)` so that it can subsequently be inserted. pub fn get_or_insert_lock<'a, Q>(&'a self, q: &Q) -> Result<T, InsertLock<'a, T, S>> where T: Borrow<Q> + Clone, Q: ?Sized + Hash + Eq, { let (hash, shard) = self.hash_and_shard(q); // Assume load is low and try to take lock for writing. // We don't faff around with upgradability right now. let shard = if let Some(write_lock) = shard.try_write() { write_lock } else { // Write contention. Try reading first to see if the entry already exists. if let Some(t) = shard.read().get(hash, |other| q == other.borrow()) { // Already exists. return Ok(t.clone()); } // Unconditionally write lock. shard.write() }; // Now check for the data. We need to do this even if we already // checked in the write contention case above. We don't use an // upgradable read lock because those are exclusive from one another // just like write locks. if let Some(t) = shard.get(hash, |other| q == other.borrow()) { return Ok(t.clone()); } Err(InsertLock { build_hasher: &self.build_hasher, hash, shard, }) } /// Clone out the entry corresponding to `q` if it exists. pub fn get<Q>(&self, q: &Q) -> Option<T> where T: Borrow<Q> + Clone, Q: ?Sized + Hash + Eq, { let (hash, shard) = self.hash_and_shard(q); shard .read() .get(hash, |other| q == other.borrow()) .map(Clone::clone) } /// Unconditionally insert `t` without checking if it's in the set. pub fn unchecked_insert(&self, t: T) { let build_hasher = &self.build_hasher; let (hash, shard) = self.hash_and_shard(&t); shard.write().insert(hash, t, |v| hash_one(build_hasher, v)); } } impl<T: Sized + Hash, S: BuildHasher> InsertLock<'_, T, S> { /// Insert the given value into the set. This value must borrow-match the /// original value from get_or_insert_lock. pub fn insert<Q: Into<T>>(&mut self, q: Q) { let build_hasher = self.build_hasher; self.shard .insert(self.hash, q.into(), |v| hash_one(build_hasher, v)); } }
Rust
hhvm/hphp/hack/src/utils/intern/src/small_bytes.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use std::ascii::escape_default; use std::borrow::Borrow; use std::fmt::Debug; use std::fmt::Formatter; use std::fmt::Result; use std::hash::Hash; use std::hash::Hasher; use std::mem::size_of; use std::ops::Deref; use serde_derive::Deserialize; use serde_derive::Serialize; const SMALL_MAX_LEN: usize = 3 * size_of::<usize>() - 2; /// A SmallBytes is 3 pointer-sized words (the same size as a vec); /// one byte is used for the enum tag and one is used for the length so /// the longest string that can be stored inline is 2 bytes shorter /// than that. #[derive(Eq, Clone, Deserialize, Serialize)] #[serde(from = "serde_bytes::ByteBuf")] #[serde(into = "serde_bytes::ByteBuf")] pub enum SmallBytes { Small { len: u8, bytes: [u8; SMALL_MAX_LEN] }, Large(Box<[u8]>), } use SmallBytes::*; impl SmallBytes { pub const fn empty() -> SmallBytes { Small { len: 0, bytes: [0; SMALL_MAX_LEN], } } pub fn len(&self) -> usize { match self { Small { len, .. } => *len as usize, Large(b) => b.len(), } } pub fn is_empty(&self) -> bool { self.len() == 0 } } impl Debug for SmallBytes { fn fmt(&self, f: &mut Formatter<'_>) -> Result { let s: String = String::from_utf8(self.iter().copied().flat_map(escape_default).collect()).unwrap(); match self { Small { len, .. } => write!(f, "Small{{len:{},bytes:b\"{}\"}}", *len, s), Large(_) => write!(f, "Large(b\"{}\")", s), } } } // If a slice is small enough to make a Small, we need to copy across // the bytes we need. Don't attempt to take ownership of the existing // value. fn make_small(b: &[u8]) -> Option<SmallBytes> { let l = b.len(); if l <= SMALL_MAX_LEN { let mut bytes = [0; SMALL_MAX_LEN]; bytes[0..l].copy_from_slice(b); Some(Small { len: l as u8, bytes, }) } else { None } } impl Deref for SmallBytes { type Target = [u8]; fn deref(&self) -> &[u8] { match self { Small { len, bytes } => &bytes[0..*len as usize], Large(b) => b, } } } impl AsRef<[u8]> for SmallBytes { fn as_ref(&self) -> &[u8] { self.deref() } } impl From<&[u8]> for SmallBytes { fn from(u: &[u8]) -> SmallBytes { if let Some(r) = make_small(u) { r } else { Large(u.into()) } } } impl From<Box<[u8]>> for SmallBytes { fn from(u: Box<[u8]>) -> SmallBytes { if let Some(r) = make_small(&u) { r } else { Large(u) } } } impl From<Vec<u8>> for SmallBytes { fn from(u: Vec<u8>) -> SmallBytes { if let Some(r) = make_small(&u) { r } else { Large(u.into()) } } } impl From<&str> for SmallBytes { #[inline] fn from(u: &str) -> SmallBytes { str::as_bytes(u).into() } } impl From<Box<str>> for SmallBytes { #[inline] fn from(u: Box<str>) -> SmallBytes { u.into_boxed_bytes().into() } } impl From<String> for SmallBytes { #[inline] fn from(u: String) -> SmallBytes { u.into_bytes().into() } } impl Borrow<[u8]> for SmallBytes { #[inline] fn borrow(&self) -> &[u8] { self.deref() } } impl From<serde_bytes::ByteBuf> for SmallBytes { fn from(bb: serde_bytes::ByteBuf) -> Self { bb.into_vec().into() // into_vec is just an unwrap. } } impl From<SmallBytes> for serde_bytes::ByteBuf { fn from(sb: SmallBytes) -> Self { let v: Vec<u8> = match sb { Small { len, bytes } => (&bytes[0..len as usize]).into(), Large(b) => b.into(), }; serde_bytes::ByteBuf::from(v) } } impl PartialEq for SmallBytes { fn eq(&self, other: &Self) -> bool { self.deref() == other.deref() } } impl Hash for SmallBytes { fn hash<H: Hasher>(&self, state: &mut H) { self.deref().hash(state) } } #[cfg(test)] mod tests { use super::*; // Make sure we can use empty() in a const context. const EMPTY: SmallBytes = SmallBytes::empty(); fn hash<H: Hash>(h: H) -> u64 { use std::collections::hash_map::DefaultHasher; let mut hasher = DefaultHasher::new(); h.hash(&mut hasher); hasher.finish() } #[test] fn empty() { assert_eq!(3 * size_of::<usize>(), size_of::<SmallBytes>()); assert_eq!(EMPTY, SmallBytes::empty()); let e: &'static [u8] = b""; let l = SmallBytes::from(e); assert_eq!(l, EMPTY); assert_eq!(&*l, e); assert_eq!(&*EMPTY, e); assert_eq!(hash(&l), hash(e)); let v: Vec<u8> = Vec::new(); let ll = SmallBytes::from(v); // Consumes v. let ls: &[u8] = &ll; assert_eq!(ll, l); assert_eq!(ls, b""); let vs: &[u8] = &Vec::new(); assert_eq!(ls, vs); assert_eq!(hash(ll), hash(e)); // Check that all zeros is empty, and fail compile if sizes are wrong. let zeros = [0usize; 3]; let lz: SmallBytes = unsafe { std::mem::transmute(zeros) }; assert_eq!(EMPTY, lz); } #[test] fn small() { let h: &'static [u8] = b"hello"; let hi: &'static [u8] = b"hi"; let l = SmallBytes::from(h); let li = SmallBytes::from(hi); assert_eq!(&*l, h); assert_eq!(hi, &*li); assert!(l != EMPTY); assert!(li != EMPTY); assert!(l != li); assert!(&*l != hi); assert!(hi != &*l); assert_eq!(hash(&l), hash(h)); assert_eq!(hash(&li), hash(hi)); let mut v = Vec::new(); v.extend(b"hello"); let ll = SmallBytes::from(v); assert_eq!(l, ll); assert_eq!(h, &*ll); assert_eq!(hash(&ll), hash(&l)); } #[test] fn large() { #![allow(clippy::op_ref)] let abc: &'static [u8] = b"Jackdaws love my big pink sphinx of quartz."; let fox: &'static [u8] = b"The quick brown fox jumps over the lazy dg."; let labc = SmallBytes::from(abc); let lfox = SmallBytes::from(fox); assert_eq!(&*labc, abc); assert_eq!(fox, &*lfox); assert!(labc != lfox); assert!(&labc != &lfox); assert!(&*labc != &*lfox); assert_eq!(hash(abc), hash(&labc)); assert_eq!(hash(fox), hash(&lfox)); let mut v = Vec::new(); v.extend(abc); let lv = SmallBytes::from(v); let mut w: Vec<u8> = Vec::new(); w.extend(abc); assert_eq!(&*lv, &w[..]); assert_eq!(&*lv, abc); assert_eq!(lv, labc); assert_eq!(hash(&lv), hash(labc)); } }
Rust
hhvm/hphp/hack/src/utils/intern/src/string.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use std::borrow::Borrow; use std::borrow::Cow; use std::cmp::Ordering; use std::collections::HashMap; use std::collections::HashSet; use std::fmt; use std::fmt::Formatter; use std::hash::Hash; use std::str::FromStr; use std::str::Utf8Error; #[doc(hidden)] pub use once_cell::sync::Lazy; // For macros use serde_derive::Deserialize; use serde_derive::Serialize; use crate::idhasher::BuildIdHasher; use crate::intern::InternId; use crate::intern::InternSerdes; use crate::intern_struct; use crate::small_bytes::SmallBytes; intern_struct! { /// An opaque token corresponding to an interned &[u8]. pub struct BytesId = Intern<SmallBytes> { serdes("InternSerdes<BytesId>"); type Lookup = [u8]; const EMPTY = SmallBytes::empty(); } } impl BytesId { /// Recover the original interned bytes. #[inline] pub fn as_bytes(self) -> &'static [u8] { // Safe because BytesId can only be generated // by a call to intern, which returns the result // of id_to_bytes.push. self.get() } } impl PartialOrd for BytesId { fn partial_cmp(&self, other: &BytesId) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for BytesId { fn cmp(&self, other: &BytesId) -> Ordering { if self == other { Ordering::Equal } else { self.get().cmp(other.get()) } } } pub type BytesIdMap<V> = HashMap<BytesId, V, BuildIdHasher<u32>>; pub type BytesIdSet = HashSet<BytesId, BuildIdHasher<u32>>; /// An opaque token corresponding to an interned &str. /// /// You can recover the str with id.as_str() or using format!. #[derive(Copy, Clone, Hash, Eq, PartialEq)] #[derive(Serialize, Deserialize)] #[repr(transparent)] pub struct StringId(BytesId); impl StringId { /// We always pre-reserve a blank entry. pub const EMPTY: StringId = StringId(BytesId::EMPTY); /// Convert from raw bytes, which can only succeed if the bytes are valid utf-8. pub fn from_bytes(bytes: BytesId) -> Result<StringId, Utf8Error> { match std::str::from_utf8(bytes.as_bytes()) { Ok(_) => Ok(StringId(bytes)), Err(e) => Err(e), } } /// Convert to a static string. pub fn as_str(self) -> &'static str { // This is actually safe because the bytes we are converting originally came // from a str when we interned it. So they must be well-formed UTF8. unsafe { std::str::from_utf8_unchecked(self.0.as_bytes()) } } /// Intern index for the underlying bytes. pub fn index(self) -> u32 { (self.0).0.index() } pub fn from_index_checked(index: u32) -> Option<Self> { BytesId::from_index_checked(index).map(Self) } pub unsafe fn from_index(index: u32) -> Self { Self(BytesId::from_index(index)) } /// 0-cost conversion to interned bytes. pub fn as_bytes(self) -> BytesId { self.0 } pub fn is_empty(self) -> bool { self == Self::EMPTY } } impl fmt::Display for StringId { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.write_str(self.as_str()) } } impl fmt::Debug for StringId { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.write_str(self.as_str()) } } impl PartialOrd for StringId { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for StringId { fn cmp(&self, other: &Self) -> Ordering { self.as_str().cmp(other.as_str()) } } // Describes types that can be viewed as a `[u8]`. pub trait IntoUtf8Bytes: Sized { type Bytes: Into<SmallBytes> + Borrow<[u8]>; /// Convert into utf-8 encoded bytes while preserving the /// ownedness of the underlying storage if possible. fn into_bytes(self) -> Self::Bytes; } impl<'a> IntoUtf8Bytes for &'a str { type Bytes = &'a [u8]; fn into_bytes(self) -> &'a [u8] { self.as_ref() } } impl IntoUtf8Bytes for Box<str> { type Bytes = Box<[u8]>; fn into_bytes(self) -> Box<[u8]> { From::from(self) } } impl<'a> IntoUtf8Bytes for Cow<'a, str> { type Bytes = Vec<u8>; fn into_bytes(self) -> Self::Bytes { self.into_owned().into_bytes() } } impl IntoUtf8Bytes for String { type Bytes = Vec<u8>; fn into_bytes(self) -> Vec<u8> { From::from(self) } } impl<'a> IntoUtf8Bytes for &'a String { type Bytes = &'a [u8]; fn into_bytes(self) -> &'a [u8] { self.as_ref() } } impl FromStr for StringId { type Err = std::convert::Infallible; fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(intern(s)) } } pub type StringIdMap<V> = HashMap<StringId, V, BuildIdHasher<u32>>; pub type StringIdSet = HashSet<StringId, BuildIdHasher<u32>>; // A copy-on-write object that can be an interned string, a reference to a // string, or an owned but uninterned string. #[derive(Debug)] pub enum CowStringId<'a> { Id(StringId), Ref(&'a str), Owned(String), } impl CowStringId<'_> { pub fn as_bytes(&self) -> &[u8] { self.as_str().as_bytes() } pub fn as_str(&self) -> &str { match self { CowStringId::Id(s) => s.as_str(), CowStringId::Ref(s) => s, CowStringId::Owned(s) => s, } } pub fn into_owned(self) -> String { match self { CowStringId::Id(s) => s.as_str().to_owned(), CowStringId::Ref(s) => s.to_owned(), CowStringId::Owned(s) => s, } } pub fn to_string_id(&self) -> StringId { match self { CowStringId::Id(s) => *s, CowStringId::Ref(s) => intern(*s), CowStringId::Owned(s) => intern(s.as_str()), } } pub fn to_bytes_id(&self) -> BytesId { self.to_string_id().as_bytes() } } impl fmt::Display for CowStringId<'_> { fn fmt(&self, fmt: &mut Formatter<'_>) -> std::fmt::Result { self.as_str().fmt(fmt) } } impl From<StringId> for CowStringId<'_> { fn from(id: StringId) -> Self { CowStringId::Id(id) } } impl<'a> From<&'a str> for CowStringId<'a> { fn from(s: &'a str) -> CowStringId<'a> { CowStringId::Ref(s) } } impl<'a> From<String> for CowStringId<'a> { fn from(s: String) -> Self { CowStringId::Owned(s) } } pub fn intern<S: IntoUtf8Bytes>(s: S) -> StringId { StringId(intern_bytes(s.into_bytes())) } #[inline] pub fn intern_bytes<S>(s: S) -> BytesId where S: Into<SmallBytes> + Borrow<[u8]>, { let b: SmallBytes = s.into(); BytesId::intern(b) } /// Statically declare an interned string. #[macro_export] macro_rules! string_id { ($value:literal) => {{ static INSTANCE: $crate::string::Lazy<$crate::string::StringId> = $crate::string::Lazy::new(|| $crate::string::intern($value)); *INSTANCE }}; ($_:expr) => { compile_error!("string_id! macro can only be used with string literals.") }; } /// Statically declare some interned bytes. #[macro_export] macro_rules! bytes_id { ($value:literal) => {{ static INSTANCE: $crate::string::Lazy<$crate::string::BytesId> = $crate::string::Lazy::new(|| $crate::string::intern_bytes($value as &[u8])); *INSTANCE }}; ($_:expr) => { compile_error!("bytes_id! macro can only be used with literals.") }; } #[cfg(test)] mod tests { use super::*; #[test] fn simple_bytes() { // Test EMPTY first to catch race with pool init. assert_eq!(BytesId::EMPTY.as_bytes(), b""); let e = intern_bytes(&b""[..]); assert_eq!(BytesId::EMPTY, e); let ek: &'static [u8] = b""; let ee = intern_bytes(ek); assert_eq!(BytesId::EMPTY, ee); let a = intern_bytes(&b"this is interned bytes"[..]); let b = intern("this is interned bytes").as_bytes(); let c = intern_bytes(&b"this is different"[..]); assert_eq!(a, b); assert_ne!(a, c); assert_eq!(a.as_bytes(), b"this is interned bytes"); assert_eq!(c.as_bytes(), b"this is different"); } #[test] fn simple() { let a = intern("this is an interned string"); let b = intern("this is an interned string".to_string()); let c = intern("this is different"); assert_eq!(a, b); assert_ne!(a, c); assert_eq!(a.to_string(), "this is an interned string"); assert_eq!(c.to_string(), "this is different"); assert_eq!(StringId::EMPTY.to_string(), ""); } fn test_interning(strs: Vec<String>) { // Make sure interning produces the same tokens. let ids1: Vec<StringId> = strs.iter().map(|s| intern(s.clone())).collect(); let ids2: Vec<StringId> = strs.iter().map(|s| intern(s.as_ref() as &str)).collect(); assert_eq!(ids1, ids2); // Make sure they map back to the original strings. let strs2: Vec<String> = ids1.iter().map(|sid| sid.to_string()).collect(); assert_eq!(strs, strs2); } #[test] fn many() { let strs: Vec<String> = (0..3000).map(|n| format!("some {}", n)).collect(); test_interning(strs) } #[test] fn big() { let long = format!("{:900}", ""); let strs: Vec<String> = (0..20).map(|n| format!("{}{}", long, n)).collect(); test_interning(strs); } #[test] fn serde() { use crate::intern::DeGuard; use crate::intern::SerGuard; let original = intern("hello world"); let mut encoded = Vec::new(); let g = SerGuard::default(); bincode::serialize_into(&mut encoded, &original).unwrap(); drop(g); assert!(encoded.len() > 11); let g = DeGuard::default(); let decoded: StringId = bincode::deserialize(&encoded).unwrap(); drop(g); assert_eq!(original, decoded); } #[test] fn multithreaded() { use std::sync::atomic::AtomicU32; use std::sync::atomic::Ordering; use std::sync::Arc; use std::thread; use std::u32; use rand::thread_rng; use rand::Rng; // Load test lots of threads creating strings, with load // gradually getting heavier on later (popular) strings. const N: usize = 20_000_000; const WRITERS: usize = 100; const MAX: usize = N / WRITERS; // Array to track index issued to each string. let mut avail: Arc<Vec<AtomicU32>> = Arc::new(Vec::with_capacity(MAX)); Arc::get_mut(&mut avail) .unwrap() .resize_with(N, || AtomicU32::new(u32::MAX)); let mut workers = Vec::new(); for k in 0..WRITERS { let avail = avail.clone(); workers.push(thread::spawn(move || { let mut rng = thread_rng(); for i in 0..MAX { let r = if k == 0 { i } else { rng.gen_range(i..MAX) }; let id = intern(r.to_string()); let ix = id.0.index(); let av = avail[r].load(Ordering::Relaxed); if av == u32::MAX { avail[r].store(ix, Ordering::Relaxed); } else { assert_eq!(av, ix); } } })); } for w in workers { w.join().unwrap(); } } #[test] fn all_kinds_of_bytes() { let d: &[u8] = b"Hello"; let e: Vec<u8> = d.into(); let f: Box<[u8]> = d.into(); let di = intern_bytes(d); let ei = intern_bytes(e); let fi = intern_bytes(f); assert_eq!(di, ei); assert_eq!(di, fi); } #[test] fn all_kinds_of_strings() { let a: &str = "Hello"; let b: String = a.into(); let c: Box<str> = a.into(); let ai = intern(a); let bi = intern(b); let ci = intern(c); assert_eq!(ai, bi); assert_eq!(ai, ci); } }
Rust
hhvm/hphp/hack/src/utils/intern/src/string_key.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use std::collections::HashMap; use std::collections::HashSet; use std::fmt; use std::fmt::Formatter; use std::str::FromStr; use indexmap::IndexMap; use serde::Deserialize; use serde::Deserializer; use serde::Serialize; use serde::Serializer; use crate::idhasher::BuildIdHasher; use crate::string; use crate::string::IntoUtf8Bytes; use crate::string::StringId; // StringKey is a small impedence matcher around StringId. // NOTE in particular that it does NOT do de-duplicating serde. #[derive(Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] #[repr(transparent)] pub struct StringKey(StringId); pub type StringKeyMap<V> = HashMap<StringKey, V, BuildIdHasher<u32>>; pub type StringKeySet = HashSet<StringKey, BuildIdHasher<u32>>; pub type StringKeyIndexMap<V> = IndexMap<StringKey, V, BuildIdHasher<u32>>; pub trait Intern: IntoUtf8Bytes { fn intern(self) -> StringKey { StringKey(string::intern(self)) } } impl<T: IntoUtf8Bytes> Intern for T {} impl StringKey { pub fn lookup(self) -> &'static str { self.0.as_str() } pub fn index(self) -> u32 { self.0.index() } pub fn from_index_checked(index: u32) -> Option<Self> { StringId::from_index_checked(index).map(Self) } pub unsafe fn from_index(index: u32) -> Self { Self(StringId::from_index(index)) } } impl fmt::Display for StringKey { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}", self.lookup()) } } impl fmt::Debug for StringKey { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{:?}", self.lookup()) } } impl Serialize for StringKey { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { serializer.serialize_str(self.lookup()) } } impl<'de> Deserialize<'de> for StringKey { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { Deserialize::deserialize(deserializer).map(|s: String| s.intern()) } } impl FromStr for StringKey { type Err = std::convert::Infallible; fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(s.intern()) } } #[macro_export] macro_rules! intern { ($value:literal) => {{ use $crate::string::Lazy; use $crate::string_key::Intern; static INSTANCE: Lazy<$crate::string_key::StringKey> = Lazy::new(|| $value.intern()); *INSTANCE }}; ($_:expr) => { compile_error!("intern! macro can only be used with string literals.") }; }
hhvm/hphp/hack/src/utils/jsonrpc/dune
(library (name jsonrpc) (wrapped false) (libraries buffered_line_reader hh_json http_lite marshal_tools marshal_tools_lwt sys_utils) (preprocess (pps lwt_ppx ppx_deriving.std ppx_deriving.enum)))
OCaml
hhvm/hphp/hack/src/utils/jsonrpc/jsonrpc.ml
(* Wrapper for handling JSON-RPC *) (* Spec: http://www.jsonrpc.org/specification *) (* Practical readbable guide: https://github.com/Microsoft/language-server-protocol/blob/master/protocol.md#base-protocol-json-structures *) open Hh_prelude (************************************************************** HOW THIS ALL WORKS. GOAL 1: is to always be ready to read on stdin so we can timestamp accurately the moment that a request was delivered to us. If our stdin pipe gets so full that a client can't write requests to us, or if we're busy doing something else before we read on stdin, both things would result in incorrect timestamps. GOAL 2: when we process a message, then all subsequent messages that have already been presented to our stdin should already be in our own queue data-structure (rather than an OS pipe buffer) in case any of the messages involved cancellation. GOAL 3: we're in ocaml, so our threading possibilties are limited, and unfortunately our caller has cpu-blocking chunks of code. We kick off single background process called "daemon", running a loop in [internal_run_daemon]. It will take in messages from stdin, queue them up in its queue `let messages_to_send = Queue.create ()`, and write them over a pipe to the main (calling) process. See type [daemon_next_action] how it choses what to do. Callers of this library will invoke an Lwt API [get_message]. This maintains its own queue [queue.messages] of items that it has so far received over the pipe from the daemon. When a caller invokes [get_message] then we block if necessary until at least one message has come over the pipe into the queue, but we also slurp up any further messages that have come over the pipe as well. This way, if the client calls [find_already_queued_message] then they have a better chance of success. CARE!!! Jsonrpc is vulnerable to incomplete requests and malformed Content-length headers... The way it works around lack of threading in ocaml is with the assumption that if any data is available on stdin then a complete jsonrpc request can be read from stdin. If this is violated e.g. if the Content-length header is one byte short, then Jsonrpc will read a json string that lacks the final }, and will report this as a recoverable error (malformed json). Next, Jsonrpc will see that there is more data available on stdin, namely that final }, and so will block until a header+body has been read on stdin -- but nothing further will come beyond that }, so it blocks indefinitely. The only solution is to take care that Content-length is exact! ***************************************************************) type writer = Hh_json.json -> unit type timestamped_json = { json: Hh_json.json; timestamp: float; } (** These messages are the ones stored in the daemon's internal queue, that are marshalled between daemon and main process, that are stored in the main process queue, and that are handed to callers. *) type queue_message = | Timestamped_json of timestamped_json | Fatal_exception of Marshal_tools.remote_exception_data | Recoverable_exception of Marshal_tools.remote_exception_data (** This is the abstraction that callers use to get messages. It resides in the caller's process. The fd is where we read from the pipe with the daemon, and the queue holds all messages that we've read from the daemon's pipe so far. *) type t = { daemon_in_fd: Unix.file_descr; (* fd used by main process to read messages from queue *) messages: queue_message Queue.t; } (** The daemon uses a 'select' syscall. It has to deal with stdin pipe of messages from client that it has to read whenever available (but which might be blocked if the client hasn't yet provided any further messages); it has to deal with its queue of messages which it wants to write to the main process (but not if such a write would be blocking). This type says which option it will chose based on (1) what the 'select' syscall says is available, (2) its own further logic. *) type daemon_next_action = | Daemon_end_due_to_stdin_eof_and_empty_queue (** We received an EOF on stdin, and our queue is empty, so the daemon has nothing left to do. *) | Daemon_write_to_main_process_pipe (** There is no data to be read from stdin, and there are items in the daemon queue, and the pipe to the main process is open enough for us to write them without blocking. *) | Daemon_read_from_stdin (** EITHER there is data to be read from stdin so we prioritize that above all else, OR there are no items in the daemon queue so we might as well block on stdin until something arrives. *) (* Try to read a message from the daemon's stdin, which is where all of the editor messages can be read from. May throw if the message is malformed. *) let internal_read_message (reader : Buffered_line_reader.t) : timestamped_json = let message = reader |> Http_lite.read_message_utf8 in let json = Hh_json.json_of_string message in let timestamp = Unix.gettimeofday () in { json; timestamp } (* Reads messages from the editor on stdin, parses them, and sends them to the main process. This runs in a different process because we also timestamp the messages, so we need to read them as soon as they come in. That is, we can't wait for any server computation to finish if we want to get an accurate timestamp. *) let internal_run_daemon' (oc : queue_message Daemon.out_channel) : unit = let out_fd = Daemon.descr_of_out_channel oc in let reader = Buffered_line_reader.create Unix.stdin in let messages_to_send = Queue.create () in let rec loop ~allowed_to_read : unit = let daemon_next_action = if Buffered_line_reader.has_buffered_content reader then Daemon_read_from_stdin else let read_fds = if allowed_to_read then [Unix.stdin] else [] in let write_fds = if not (Queue.is_empty messages_to_send) then [out_fd] else [] in if List.is_empty read_fds && List.is_empty write_fds then Daemon_end_due_to_stdin_eof_and_empty_queue else (* An indefinite wait until we're able to either read or write. Reading will always take priority. *) let (readable_fds, _, _) = Unix.select read_fds write_fds [] (-1.0) in let ready_for_read = not (List.is_empty readable_fds) in if ready_for_read then Daemon_read_from_stdin else Daemon_write_to_main_process_pipe in let (should_continue, allowed_to_read) = match daemon_next_action with | Daemon_read_from_stdin -> (try let timestamped_json = internal_read_message reader in Queue.enqueue messages_to_send (Timestamped_json timestamped_json); (true, allowed_to_read) with | exn -> let e = Exception.wrap exn in let edata = Marshal_tools.of_exception e in let (allowed_to_read, message) = match exn with | Hh_json.Syntax_error _ -> (true, Recoverable_exception edata) | End_of_file | _ -> (false, Fatal_exception edata) in Queue.enqueue messages_to_send message; (true, allowed_to_read)) | Daemon_write_to_main_process_pipe -> assert (not (Queue.is_empty messages_to_send)); let message = Queue.dequeue_exn messages_to_send in (* We can assume that the entire write will succeed, since otherwise Marshal_tools.to_fd_with_preamble will throw an exception. *) Marshal_tools.to_fd_with_preamble out_fd message |> ignore; (true, allowed_to_read) | Daemon_end_due_to_stdin_eof_and_empty_queue -> (false, false) in if should_continue then loop ~allowed_to_read else () in loop ~allowed_to_read:true; () (* Main function for the daemon process. *) let internal_run_daemon (_dummy_param : unit) (_ic, (oc : queue_message Daemon.out_channel)) = Printexc.record_backtrace true; try internal_run_daemon' oc with | exn -> let e = Exception.wrap exn in (* An exception that's gotten here is not simply a parse error, but something else, so we should terminate the daemon at this point. *) (try let out_fd = Daemon.descr_of_out_channel oc in Marshal_tools.to_fd_with_preamble out_fd (Fatal_exception (Marshal_tools.of_exception e)) |> ignore with | _ -> (* There may be a broken pipe, for example. We should just give up on reporting the error. *) ()) let internal_entry_point : (unit, unit, queue_message) Daemon.entry = Daemon.register_entry_point "Jsonrpc" internal_run_daemon (************************************************) (* Queue functions that run in the main process *) (************************************************) let make_t () : t = let handle = Daemon.spawn ~channel_mode:`pipe (* We don't technically need to inherit stdout or stderr, but this might be useful in the event that we throw an unexpected exception in the daemon. It's also useful for print-statement debugging of the daemon. *) (Unix.stdin, Unix.stdout, Unix.stderr) internal_entry_point () in let (ic, _) = handle.Daemon.channels in { daemon_in_fd = Daemon.descr_of_in_channel ic; messages = Queue.create () } (* Read a message into the queue, and return the just-read message. *) let read_single_message_into_queue_wait (t : t) : queue_message Lwt.t = let%lwt message = try%lwt let%lwt message = Marshal_tools_lwt.from_fd_with_preamble (Lwt_unix.of_unix_file_descr t.daemon_in_fd) in Lwt.return message with | (End_of_file | Unix.Unix_error (Unix.EBADF, _, _)) as exn -> let e = Exception.wrap exn in (* This is different from when the client hangs up. It handles the case that the daemon process exited: for example, if it was killed. *) Lwt.return (Fatal_exception (Marshal_tools.of_exception e)) in Queue.enqueue t.messages message; Lwt.return message let rec read_messages_into_queue_no_wait (t : t) : unit Lwt.t = let is_readable = Lwt_unix.readable (Lwt_unix.of_unix_file_descr t.daemon_in_fd) in let%lwt () = if is_readable then (* We're expecting this not to block because we just checked to make sure that there's something there. *) let%lwt message = read_single_message_into_queue_wait t in (* Now read any more messages that might be queued up. Only try to read more messages if the daemon is still available to read from. Otherwise, we may infinite loop as a result of `Unix.select` returning that a file descriptor is available to read on. *) match message with | Fatal_exception _ -> Lwt.return_unit | _ -> let%lwt () = read_messages_into_queue_no_wait t in Lwt.return_unit else Lwt.return_unit in Lwt.return_unit let has_message (t : t) : bool = let is_readable = Lwt_unix.readable (Lwt_unix.of_unix_file_descr t.daemon_in_fd) in is_readable || not (Queue.is_empty t.messages) let await_until_message (t : t) = (* The next message will come either from the queue or (if it's empty) then from some data coming in from [daemon_in_fd]. *) if Queue.is_empty t.messages then `Wait_for_data_here t.daemon_in_fd else `Already_has_message let find_already_queued_message ~(f : timestamped_json -> bool) (t : t) : timestamped_json option = Queue.fold ~f:(fun found message -> match (found, message) with | (Some found, _) -> Some found | (None, Timestamped_json message) when f message -> Some message | _ -> None) ~init:None t.messages let get_message (t : t) = (* Read one in a blocking manner to ensure that we have one. *) let%lwt () = if Queue.is_empty t.messages then let%lwt (_message : queue_message) = read_single_message_into_queue_wait t in Lwt.return_unit else Lwt.return_unit in (* Then read any others that got queued up so that we can see the maximum number of messages at once for invalidation purposes. *) let%lwt () = read_messages_into_queue_no_wait t in let item = Queue.dequeue_exn t.messages in match item with | Timestamped_json timestamped_json -> Lwt.return (`Message timestamped_json) | Fatal_exception data -> Lwt.return (`Fatal_exception data) | Recoverable_exception data -> Lwt.return (`Recoverable_exception data) (************************************************) (* Output functions for request *) (************************************************) let requests_counter : IMap.key ref = ref 0 let get_next_request_id () : int = incr requests_counter; !requests_counter
OCaml Interface
hhvm/hphp/hack/src/utils/jsonrpc/jsonrpc.mli
(* * Copyright (c) 2017, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) type writer = Hh_json.json -> unit type timestamped_json = { json: Hh_json.json; timestamp: float; } type t (** must call Daemon.entry_point at start of your main *) val make_t : unit -> t (** says whether there's already an item on the queue, or stdin is readable meaning that there's something pending on it *) val has_message : t -> bool (** similar to [has_message], but can be used to power a 'select' syscall which will fire when a message is available. *) val await_until_message : t -> [> `Already_has_message | `Wait_for_data_here of Unix.file_descr ] (** says whether the things we've already enqueued from stdin contain a message that matches the predicate *) val find_already_queued_message : f:(timestamped_json -> bool) -> t -> timestamped_json option val get_message : t -> [> `Message of timestamped_json | `Fatal_exception of Marshal_tools.remote_exception_data | `Recoverable_exception of Marshal_tools.remote_exception_data ] Lwt.t val get_next_request_id : unit -> int
TOML
hhvm/hphp/hack/src/utils/line_break_map/Cargo.toml
# @generated by autocargo [package] name = "line_break_map" version = "0.0.0" edition = "2021" [lib] path = "../line_break_map.rs"
TOML
hhvm/hphp/hack/src/utils/lint/Cargo.toml
# @generated by autocargo [package] name = "lint_rust" version = "0.0.0" edition = "2021" [lib] path = "lint_rust.rs" [dependencies] arena_deserializer = { version = "0.0.0", path = "../arena_deserializer" } arena_trait = { version = "0.0.0", path = "../../arena_trait" } no_pos_hash = { version = "0.0.0", path = "../no_pos_hash" } ocamlrep = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" } oxidized = { version = "0.0.0", path = "../../oxidized" } rc_pos = { version = "0.0.0", path = "../rust/pos" } serde = { version = "1.0.176", features = ["derive", "rc"] }
hhvm/hphp/hack/src/utils/lint/dune
(library (name utils_lint) (wrapped false) (preprocess (pps ppx_deriving.std)) (libraries typing_ast lints_core pos))
OCaml
hhvm/hphp/hack/src/utils/lint/lint.ml
(* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) open Lints_core module Codes = Lint_codes let spf = Printf.sprintf let internal_error pos msg = add 0 Lint_error pos ("Internal error: " ^ msg) let mk_lowercase_constant pos cst = let lower = String.lowercase_ascii cst in Lints_core. { code = Codes.to_enum Codes.LowercaseConstant; severity = Lint_warning; pos; message = spf "Please use %s instead of %s" (Markdown_lite.md_codify lower) (Markdown_lite.md_codify cst); bypass_changed_lines = false; autofix = None; check_status = None; } let lowercase_constant pos cst = add_lint (mk_lowercase_constant pos cst) let use_collection_literal pos coll = let coll = Utils.strip_ns coll in add (Codes.to_enum Codes.UseCollectionLiteral) Lint_warning pos ("Use " ^ Markdown_lite.md_codify (coll ^ " {...}") ^ " instead of " ^ Markdown_lite.md_codify ("new " ^ coll ^ "(...)")) let static_string ?(no_consts = false) pos = add (Codes.to_enum Codes.StaticString) Lint_warning pos begin if no_consts then "This should be a string literal so that lint can analyze it." else "This should be a string literal or string constant so that lint can " ^ "analyze it." end let shape_idx_access_required_field field_pos name = add (Codes.to_enum Codes.ShapeIdxRequiredField) Lint_warning field_pos ("The field " ^ Markdown_lite.md_codify name ^ " is required to exist in the shape. Consider using a subscript-expression instead, such as " ^ Markdown_lite.md_codify ("$myshape['" ^ name ^ "']")) let sealed_not_subtype verb parent_pos parent_name child_name child_kind = let parent_name = Utils.strip_ns parent_name in let child_name = Utils.strip_ns child_name in add (Codes.to_enum Codes.SealedNotSubtype) Lint_error parent_pos (child_kind ^ " " ^ Markdown_lite.md_codify child_name ^ " in sealed allowlist for " ^ Markdown_lite.md_codify parent_name ^ ", but does not " ^ verb ^ " " ^ Markdown_lite.md_codify parent_name) let option_mixed pos = add (Codes.to_enum Codes.OptionMixed) Lint_warning pos "`?mixed` is a redundant typehint - just use `mixed`" let option_null pos = add (Codes.to_enum Codes.OptionNull) Lint_warning pos "`?null` is a redundant typehint - just use `null`"
OCaml Interface
hhvm/hphp/hack/src/utils/lint/lint.mli
(* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) val internal_error : Pos.t -> string -> unit val lowercase_constant : Pos.t -> string -> unit val mk_lowercase_constant : Pos.t -> string -> Pos.t Lints_core.t val use_collection_literal : Pos.t -> string -> unit val static_string : ?no_consts:bool -> Pos.t -> unit val shape_idx_access_required_field : Pos.t -> string -> unit val sealed_not_subtype : string -> Pos.t -> string -> string -> string -> unit val option_mixed : Pos.t -> unit val option_null : Pos.t -> unit
Rust
hhvm/hphp/hack/src/utils/lint/lint.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. // // @generated <<SignedSource::*O*zOeWoEQle#+L!plEphiEmie@IsG>> // // To regenerate this file, run: // hphp/hack/src/oxidized_regen.sh use arena_trait::TrivialDrop; use no_pos_hash::NoPosHash; use ocamlrep::FromOcamlRep; use ocamlrep::FromOcamlRepIn; use ocamlrep::ToOcamlRep; use serde::Deserialize; use serde::Serialize; #[allow(unused_imports)] use crate::*; /// These severity levels are based on those provided by Arcanist. "Advice" /// means notify the user of the lint without requiring confirmation if the lint /// is benign; "Warning" will raise a confirmation prompt if the lint applies to /// a line that was changed in the given diff; and "Error" will always raise a /// confirmation prompt, regardless of where the lint occurs in the file. #[derive( Clone, Copy, Debug, Deserialize, Eq, FromOcamlRep, FromOcamlRepIn, Hash, NoPosHash, Ord, PartialEq, PartialOrd, Serialize, ToOcamlRep, )] #[rust_to_ocaml(attr = "deriving show")] #[repr(u8)] pub enum Severity { #[rust_to_ocaml(name = "Lint_error")] LintError, #[rust_to_ocaml(name = "Lint_warning")] LintWarning, #[rust_to_ocaml(name = "Lint_advice")] LintAdvice, } impl TrivialDrop for Severity {} arena_deserializer::impl_deserialize_in_arena!(Severity); #[derive( Clone, Debug, Deserialize, Eq, FromOcamlRep, Hash, NoPosHash, Ord, PartialEq, PartialOrd, Serialize, ToOcamlRep, )] #[rust_to_ocaml(attr = "deriving show")] #[repr(C)] pub struct LintsCore<Pos> { pub code: isize, pub severity: Severity, #[rust_to_ocaml(attr = "opaque")] pub pos: Pos, pub message: String, /// Normally, lint warnings and lint advice only get shown by arcanist if the /// lines they are raised on overlap with lines changed in a diff. This /// flag bypasses that behavior pub bypass_changed_lines: bool, pub autofix: Option<(String, pos::Pos)>, pub check_status: Option<tast::CheckStatus>, }
OCaml
hhvm/hphp/hack/src/utils/lint/lint_codes.ml
(* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) type t = | LowercaseConstant [@value 5001] | UseCollectionLiteral | StaticString | ShapeIdxRequiredField [@value 5005] | OptClosedShapeIdxMissingField [@value 5006] | SealedNotSubtype [@value 5007] | OptionMixed [@value 5008] | OptionNull [@value 5009] (* EXTEND HERE WITH NEW VALUES IF NEEDED *) [@@deriving enum] let err_code = to_enum (* Values 5501 - 5999 are reserved for FB-internal use *)
Rust
hhvm/hphp/hack/src/utils/lint/lint_rust.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. pub(crate) use rc_pos as pos; mod lint; pub use lint::*; use oxidized::tast; #[derive(Clone, Copy, Debug, Eq, PartialEq)] #[derive(ocamlrep::FromOcamlRep, ocamlrep::ToOcamlRep)] #[repr(isize)] pub enum LintCode { LowercaseConstant = 5001, UseCollectionLiteral, StaticString, ShapeIdxRequiredField = 5005, } pub type LintError = LintsCore<pos::Pos>; impl LintError { pub fn lowercase_constant(p: pos::Pos, cst: &str) -> Self { let lower = cst.to_ascii_lowercase(); let message = format!("Please use `{}` instead of `{}`", lower, cst); Self { code: LintCode::LowercaseConstant as isize, severity: Severity::LintWarning, pos: p, message, bypass_changed_lines: false, autofix: None, check_status: None::<tast::CheckStatus>, } } }
hhvm/hphp/hack/src/utils/linting/dune
(* -*- tuareg -*- *) let library_entry name suffix = Printf.sprintf "(library (name %s) (wrapped false) (modules) (libraries %s_%s))" name name suffix let fb_entry name = library_entry name "fb" let stubs_entry name = library_entry name "stubs" let entry is_fb name = if is_fb then fb_entry name else stubs_entry name let () = (* test presence of fb subfolder *) let current_dir = Sys.getcwd () in (* we are in src/utils/linting, locate src/facebook *) let src_dir = Filename.dirname @@ Filename.dirname current_dir in let fb_dir = Filename.concat src_dir "facebook" in (* locate src/facebook/dune *) let fb_dune = Filename.concat fb_dir "dune" in let is_fb = Sys.file_exists fb_dune in let linting = entry is_fb "linting" in Jbuild_plugin.V1.send linting
hhvm/hphp/hack/src/utils/load_script/dune
(* -*- tuareg -*- *) let library_entry name suffix = Printf.sprintf "(library (name %s) (wrapped false) (modules) (libraries %s_%s))" name name suffix let fb_entry name = library_entry name "fb" let stubs_entry name = library_entry name "stubs" let entry is_fb name = if is_fb then fb_entry name else stubs_entry name let () = (* test presence of fb subfolder *) let current_dir = Sys.getcwd () in (* we are in src/utils/load_script, locate src/facebook *) let src_dir = Filename.dirname @@ Filename.dirname current_dir in let fb_dir = Filename.concat src_dir "facebook" in (* locate src/facebook/dune *) let fb_dune = Filename.concat fb_dir "dune" in let is_fb = Sys.file_exists fb_dune in let load_script = entry is_fb "load_script" in Jbuild_plugin.V1.send load_script
hhvm/hphp/hack/src/utils/logging/dune
(* -*- tuareg -*- *) let library_entry name suffix = Printf.sprintf "(library (name %s) (wrapped false) (modules) (libraries %s_%s))" name name suffix let fb_entry name = library_entry name "fb" let stubs_entry name = library_entry name "stubs" let entry is_fb name = if is_fb then fb_entry name else stubs_entry name let () = (* test presence of fb subfolder *) let current_dir = Sys.getcwd () in (* we are in src/utils/logging, locate src/facebook *) let src_dir = Filename.dirname @@ Filename.dirname current_dir in let fb_dir = Filename.concat src_dir "facebook" in (* locate src/facebook/dune *) let fb_dune = Filename.concat fb_dir "dune" in let is_fb = Sys.file_exists fb_dune in let logging = entry is_fb "logging" in Jbuild_plugin.V1.send @@ String.concat "\n" [ logging ]
hhvm/hphp/hack/src/utils/logging/common/dune
(* -*- tuareg -*- *) let library_entry name suffix = Printf.sprintf "(library (name %s) (wrapped false) (modules) (libraries %s_%s))" name name suffix let fb_entry name = library_entry name "fb" let stubs_entry name = library_entry name "stubs" let entry is_fb name = if is_fb then fb_entry name else stubs_entry name let () = (* test presence of fb subfolder *) let current_dir = Sys.getcwd () in (* we are in src/utils/logging/common, locate src/facebook *) let src_dir = Filename.(dirname @@ dirname @@ dirname current_dir) in let fb_dir = Filename.concat src_dir "facebook" in (* locate src/facebook/dune *) let fb_dune = Filename.concat fb_dir "dune" in let is_fb = Sys.file_exists fb_dune in let logging_common = entry is_fb "logging_common" in Jbuild_plugin.V1.send logging_common
hhvm/hphp/hack/src/utils/lsp/dune
(library (name lsp) (wrapped false) (libraries base file_content file_url hh_json jsonrpc logging utils_core symbol pos relative_path) (preprocess (pps lwt_ppx ppx_deriving.std ppx_deriving.enum ppx_deriving.eq ppx_deriving.show)))
OCaml
hhvm/hphp/hack/src/utils/lsp/lsp.ml
(* * Copyright (c) 2016, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) type lsp_id = | NumberId of int | StringId of string type partial_result_token = PartialResultToken of string type documentUri = DocumentUri of string [@@deriving eq] let uri_of_string (s : string) : documentUri = DocumentUri s let string_of_uri (DocumentUri s) : string = s type position = { line: int; character: int; } [@@deriving eq] type range = { start: position; end_: position; } [@@deriving eq] type textDocumentSaveReason = | Manual [@value 1] | AfterDelay [@value 2] | FocusOut [@value 3] [@@deriving enum] module Location = struct type t = { uri: documentUri; range: range; } [@@deriving eq] end module DefinitionLocation = struct type t = { location: Location.t; title: string option; } end type markedString = | MarkedString of string | MarkedCode of string * string module Command = struct type t = { title: string; command: string; arguments: Hh_json.json list; } end module TextEdit = struct type t = { range: range; newText: string; } end module TextDocumentIdentifier = struct type t = { uri: documentUri } end module VersionedTextDocumentIdentifier = struct type t = { uri: documentUri; version: int; } end module TextDocumentEdit = struct type t = { textDocument: VersionedTextDocumentIdentifier.t; edits: TextEdit.t list; } end module WorkspaceEdit = struct type t = { changes: TextEdit.t list SMap.t } end module TextDocumentItem = struct type t = { uri: documentUri; languageId: string; version: int; text: string; } end module CodeLens = struct type t = { range: range; command: Command.t; data: Hh_json.json option; } end module TextDocumentPositionParams = struct type t = { textDocument: TextDocumentIdentifier.t; position: position; } end module DocumentFilter = struct type t = { language: string option; scheme: string option; pattern: string option; } end module DocumentSelector = struct type t = DocumentFilter.t list end module SymbolInformation = struct type symbolKind = | File [@value 1] | Module [@value 2] | Namespace [@value 3] | Package [@value 4] | Class [@value 5] | Method [@value 6] | Property [@value 7] | Field [@value 8] | Constructor [@value 9] | Enum [@value 10] | Interface [@value 11] | Function [@value 12] | Variable [@value 13] | Constant [@value 14] | String [@value 15] | Number [@value 16] | Boolean [@value 17] | Array [@value 18] | Object [@value 19] | Key [@value 20] | Null [@value 21] | EnumMember [@value 22] | Struct [@value 23] | Event [@value 24] | Operator [@value 25] | TypeParameter [@value 26] [@@deriving enum] type t = { name: string; kind: symbolKind; location: Location.t; containerName: string option; } end module CallHierarchyItem = struct type t = { name: string; kind: SymbolInformation.symbolKind; detail: string option; uri: documentUri; range: range; selectionRange: range; } end module CallHierarchyCallsRequestParam = struct type t = { item: CallHierarchyItem.t } end module MessageType = struct type t = | ErrorMessage [@value 1] | WarningMessage [@value 2] | InfoMessage [@value 3] | LogMessage [@value 4] [@@deriving eq, enum] end module CodeActionKind = struct (** CodeActionKind.t uses a pair to represent a non-empty list and we provide utility functions for creation, membership, printing.*) type t = string * string list let is_kind : t -> t -> bool = let rec is_prefix_of ks xs = match (ks, xs) with | ([], _) -> true | (k :: ks, x :: xs) when String.equal k x -> is_prefix_of ks xs | (_, _) -> false in (fun (k, ks) (x, xs) -> String.equal k x && is_prefix_of ks xs) let contains_kind k ks = List.exists (is_kind k) ks let contains_kind_opt ~default k ks = match ks with | Some ks -> contains_kind k ks | None -> default let kind_of_string : string -> t = fun s -> match String.split_on_char '.' s with | [] -> failwith "split_on_char does not return an empty list" | k :: ks -> (k, ks) let string_of_kind : t -> string = (fun (k, ks) -> String.concat "." (k :: ks)) let sub_kind : t -> string -> t = let cons_to_end (ss : string list) (s : string) = Base.List.(fold_right ss ~f:cons ~init:[s]) in (fun (k, ks) s -> (k, cons_to_end ks s)) let quickfix = kind_of_string "quickfix" let refactor = kind_of_string "refactor" let source = kind_of_string "source" end module CancelRequest = struct type params = cancelParams and cancelParams = { id: lsp_id } end module SetTraceNotification = struct type params = | Verbose | Off end module Initialize = struct type textDocumentSyncKind = | NoSync [@value 0] | FullSync [@value 1] | IncrementalSync [@value 2] [@@deriving enum] module CodeActionOptions = struct type t = { resolveProvider: bool } end module CompletionOptions = struct type t = { resolveProvider: bool; completion_triggerCharacters: string list; } end module ServerExperimentalCapabilities = struct type t = { snippetTextEdit: bool } end module ClientExperimentalCapabilities = struct type t = { snippetTextEdit: bool } end type params = { processId: int option; rootPath: string option; rootUri: documentUri option; initializationOptions: initializationOptions; client_capabilities: client_capabilities; trace: trace; } and result = { server_capabilities: server_capabilities } and errorData = { retry: bool } and trace = | Off | Messages | Verbose and initializationOptions = { namingTableSavedStatePath: string option; namingTableSavedStateTestDelay: float; delayUntilDoneInit: bool; skipLspServerOnTypeFormatting: bool; } and client_capabilities = { workspace: workspaceClientCapabilities; textDocument: textDocumentClientCapabilities; window: windowClientCapabilities; telemetry: telemetryClientCapabilities; client_experimental: ClientExperimentalCapabilities.t; } and workspaceClientCapabilities = { applyEdit: bool; workspaceEdit: workspaceEdit; didChangeWatchedFiles: dynamicRegistration; } and dynamicRegistration = { dynamicRegistration: bool } and workspaceEdit = { documentChanges: bool } and textDocumentClientCapabilities = { synchronization: synchronization; completion: completion; codeAction: codeAction; definition: definition; typeDefinition: typeDefinition; declaration: declaration; implementation: implementation; } and synchronization = { can_willSave: bool; can_willSaveWaitUntil: bool; can_didSave: bool; } and completion = { completionItem: completionItem } and completionItem = { snippetSupport: bool } and codeAction = { codeAction_dynamicRegistration: bool; codeActionLiteralSupport: codeActionliteralSupport option; } and definition = { definitionLinkSupport: bool } and typeDefinition = { typeDefinitionLinkSupport: bool } and declaration = { declarationLinkSupport: bool } and implementation = { implementationLinkSupport: bool } and codeActionliteralSupport = { codeAction_valueSet: CodeActionKind.t list } and windowClientCapabilities = { status: bool } and telemetryClientCapabilities = { connectionStatus: bool } and server_capabilities = { textDocumentSync: textDocumentSyncOptions; hoverProvider: bool; completionProvider: CompletionOptions.t option; signatureHelpProvider: signatureHelpOptions option; definitionProvider: bool; typeDefinitionProvider: bool; referencesProvider: bool; callHierarchyProvider: bool; documentHighlightProvider: bool; documentSymbolProvider: bool; workspaceSymbolProvider: bool; codeActionProvider: CodeActionOptions.t option; codeLensProvider: codeLensOptions option; documentFormattingProvider: bool; documentRangeFormattingProvider: bool; documentOnTypeFormattingProvider: documentOnTypeFormattingOptions option; renameProvider: bool; documentLinkProvider: documentLinkOptions option; executeCommandProvider: executeCommandOptions option; implementationProvider: bool; rageProviderFB: bool; (** Nuclide-specific feature *) server_experimental: ServerExperimentalCapabilities.t option; } and signatureHelpOptions = { sighelp_triggerCharacters: string list } and codeLensOptions = { codelens_resolveProvider: bool } and documentOnTypeFormattingOptions = { firstTriggerCharacter: string; moreTriggerCharacter: string list; } and documentLinkOptions = { doclink_resolveProvider: bool } and executeCommandOptions = { commands: string list } and textDocumentSyncOptions = { want_openClose: bool; want_change: textDocumentSyncKind; want_willSave: bool; want_willSaveWaitUntil: bool; want_didSave: saveOptions option; } and saveOptions = { includeText: bool } end module Error = struct type code = | ParseError [@value -32700] | InvalidRequest [@value -32600] | MethodNotFound [@value -32601] | InvalidParams [@value -32602] | InternalError [@value -32603] | ServerErrorStart [@value -32099] | ServerErrorEnd [@value -32000] | ServerNotInitialized [@value -32002] | UnknownErrorCode [@value -32001] | RequestCancelled [@value -32800] | ContentModified [@value -32801] [@@deriving show, enum] type t = { code: code; message: string; data: Hh_json.json option; } exception LspException of t end module RageFB = struct type result = rageItem list and rageItem = { title: string option; data: string; } end module CodeLensResolve = struct type params = CodeLens.t and result = CodeLens.t end module Hover = struct type params = TextDocumentPositionParams.t and result = hoverResult option and hoverResult = { contents: markedString list; range: range option; } end module PublishDiagnostics = struct type diagnosticSeverity = | Error [@value 1] | Warning [@value 2] | Information [@value 3] | Hint [@value 4] [@@deriving eq, enum] type params = publishDiagnosticsParams and publishDiagnosticsParams = { uri: documentUri; diagnostics: diagnostic list; isStatusFB: bool; } and diagnostic = { range: range; severity: diagnosticSeverity option; code: diagnosticCode; source: string option; message: string; relatedInformation: diagnosticRelatedInformation list; relatedLocations: relatedLocation list; } [@@deriving eq] and diagnosticCode = | IntCode of int | StringCode of string | NoCode [@@deriving eq] and diagnosticRelatedInformation = { relatedLocation: Location.t; relatedMessage: string; } [@@deriving eq] and relatedLocation = diagnosticRelatedInformation end module DidOpen = struct type params = didOpenTextDocumentParams and didOpenTextDocumentParams = { textDocument: TextDocumentItem.t } end module DidClose = struct type params = didCloseTextDocumentParams and didCloseTextDocumentParams = { textDocument: TextDocumentIdentifier.t } end module DidSave = struct type params = didSaveTextDocumentParams and didSaveTextDocumentParams = { textDocument: TextDocumentIdentifier.t; text: string option; } end module DidChange = struct type params = didChangeTextDocumentParams and didChangeTextDocumentParams = { textDocument: VersionedTextDocumentIdentifier.t; contentChanges: textDocumentContentChangeEvent list; } and textDocumentContentChangeEvent = { range: range option; rangeLength: int option; text: string; } end module WillSaveWaitUntil = struct type params = willSaveWaitUntilTextDocumentParams and willSaveWaitUntilTextDocumentParams = { textDocument: TextDocumentIdentifier.t; reason: textDocumentSaveReason; } and result = TextEdit.t list end module DidChangeWatchedFiles = struct type registerOptions = { watchers: fileSystemWatcher list } and fileSystemWatcher = { globPattern: string } type fileChangeType = | Created [@value 1] | Updated [@value 2] | Deleted [@value 3] [@@deriving enum] type params = { changes: fileEvent list } and fileEvent = { uri: documentUri; type_: fileChangeType; } end module Definition = struct type params = TextDocumentPositionParams.t and result = DefinitionLocation.t list end module TypeDefinition = struct type params = TextDocumentPositionParams.t and result = DefinitionLocation.t list end module Implementation = struct type params = TextDocumentPositionParams.t and result = Location.t list end (* textDocument/codeAction and codeAction/resolve *) module CodeAction = struct type 'a t = { title: string; kind: CodeActionKind.t; diagnostics: PublishDiagnostics.diagnostic list; action: 'a edit_and_or_command; } and 'a edit_and_or_command = | EditOnly of WorkspaceEdit.t | CommandOnly of Command.t | BothEditThenCommand of (WorkspaceEdit.t * Command.t) | UnresolvedEdit of 'a (** UnresolvedEdit is for this flow: client --textDocument/codeAction --> server --response_with_unresolved_fields--> client --codeAction/resolve --> server --response_with_all_fields--> client *) type 'a command_or_action_ = | Command of Command.t | Action of 'a t type resolved_marker = | type resolved_command_or_action = resolved_marker command_or_action_ type command_or_action = unit command_or_action_ type result = command_or_action list end module CodeActionRequest = struct type params = { textDocument: TextDocumentIdentifier.t; range: range; context: codeActionContext; } and codeActionContext = { diagnostics: PublishDiagnostics.diagnostic list; only: CodeActionKind.t list option; } end module CodeActionResolve = struct type result = (CodeAction.resolved_command_or_action, Error.t) Result.t end (** method="codeAction/resolve" *) module CodeActionResolveRequest = struct type params = { data: CodeActionRequest.params; title: string; } end (* Completion request, method="textDocument/completion" *) module Completion = struct type completionItemKind = | Text [@value 1] | Method [@value 2] | Function [@value 3] | Constructor [@value 4] | Field [@value 5] | Variable [@value 6] | Class [@value 7] | Interface [@value 8] | Module [@value 9] | Property [@value 10] | Unit [@value 11] | Value [@value 12] | Enum [@value 13] | Keyword [@value 14] | Snippet [@value 15] | Color [@value 16] | File [@value 17] | Reference [@value 18] | Folder [@value 19] | MemberOf [@value 20] | Constant [@value 21] | Struct [@value 22] | Event [@value 23] | Operator [@value 24] | TypeParameter [@value 25] [@@deriving enum] type insertTextFormat = | PlainText [@value 1] | SnippetFormat [@value 2] [@@deriving enum] type completionTriggerKind = | Invoked [@value 1] | TriggerCharacter [@value 2] | TriggerForIncompleteCompletions [@value 3] [@@deriving enum] let is_invoked = function | Invoked -> true | TriggerCharacter | TriggerForIncompleteCompletions -> false type params = completionParams and completionParams = { loc: TextDocumentPositionParams.t; context: completionContext option; } and completionContext = { triggerKind: completionTriggerKind; triggerCharacter: string option; } and result = completionList and completionList = { isIncomplete: bool; items: completionItem list; } and completionDocumentation = | MarkedStringsDocumentation of markedString list | UnparsedDocumentation of Hh_json.json and completionItem = { label: string; kind: completionItemKind option; detail: string option; documentation: completionDocumentation option; sortText: string option; filterText: string option; insertText: string option; insertTextFormat: insertTextFormat option; textEdit: TextEdit.t option; additionalTextEdits: TextEdit.t list; command: Command.t option; data: Hh_json.json option; } end module CompletionItemResolve = struct type params = Completion.completionItem and result = Completion.completionItem end module WorkspaceSymbol = struct type params = workspaceSymbolParams and result = SymbolInformation.t list and workspaceSymbolParams = { query: string (** a non-empty query string *) } end module DocumentSymbol = struct type params = documentSymbolParams and result = SymbolInformation.t list and documentSymbolParams = { textDocument: TextDocumentIdentifier.t } end module FindReferences = struct type params = referenceParams and result = Location.t list and referenceParams = { loc: TextDocumentPositionParams.t; context: referenceContext; partialResultToken: partial_result_token option; } and referenceContext = { includeDeclaration: bool; includeIndirectReferences: bool; } end module PrepareCallHierarchy = struct type params = TextDocumentPositionParams.t type result = CallHierarchyItem.t list option end module CallHierarchyIncomingCalls = struct type params = CallHierarchyCallsRequestParam.t type result = callHierarchyIncomingCall list option and callHierarchyIncomingCall = { from: CallHierarchyItem.t; fromRanges: range list; } end module CallHierarchyOutgoingCalls = struct type params = CallHierarchyCallsRequestParam.t type result = callHierarchyOutgoingCall list option and callHierarchyOutgoingCall = { call_to: CallHierarchyItem.t; fromRanges: range list; } end module DocumentHighlight = struct type params = TextDocumentPositionParams.t type documentHighlightKind = | Text [@value 1] | Read [@value 2] | Write [@value 3] [@@deriving enum] type result = documentHighlight list and documentHighlight = { range: range; kind: documentHighlightKind option; } end module DocumentFormatting = struct type params = documentFormattingParams and result = TextEdit.t list and documentFormattingParams = { textDocument: TextDocumentIdentifier.t; options: formattingOptions; } and formattingOptions = { tabSize: int; insertSpaces: bool; } end module DocumentRangeFormatting = struct type params = documentRangeFormattingParams and result = TextEdit.t list and documentRangeFormattingParams = { textDocument: TextDocumentIdentifier.t; range: range; options: DocumentFormatting.formattingOptions; } end (** Document On Type Formatting req., method="textDocument/onTypeFormatting" *) module DocumentOnTypeFormatting = struct type params = documentOnTypeFormattingParams and result = TextEdit.t list and documentOnTypeFormattingParams = { textDocument: TextDocumentIdentifier.t; position: position; ch: string; options: DocumentFormatting.formattingOptions; } end module SignatureHelp = struct type params = TextDocumentPositionParams.t and result = t option and t = { signatures: signature_information list; activeSignature: int; activeParameter: int; } and signature_information = { siginfo_label: string; siginfo_documentation: string option; parameters: parameter_information list; } and parameter_information = { parinfo_label: string; parinfo_documentation: string option; } end (* Document Type Hierarchy request, method="textDocument/typeHierarchy" *) module TypeHierarchy = struct type params = TextDocumentPositionParams.t type memberKind = | Method [@value 1] | SMethod [@value 2] | Property [@value 3] | SProperty [@value 4] | Const [@value 5] [@@deriving enum] type memberEntry = { name: string; snippet: string; kind: memberKind; uri: documentUri; range: range; origin: string; } type entryKind = | Class [@value 1] | Interface [@value 2] | Enum [@value 3] | Trait [@value 4] [@@deriving enum] type ancestorEntry = | AncestorName of string | AncestorDetails of { name: string; kind: entryKind; uri: documentUri; range: range; } type hierarchyEntry = { name: string; uri: documentUri; range: range; kind: entryKind; ancestors: ancestorEntry list; members: memberEntry list; } type result = hierarchyEntry option end (* Workspace Rename request, method="textDocument/rename" *) module Rename = struct type params = renameParams and result = WorkspaceEdit.t and renameParams = { textDocument: TextDocumentIdentifier.t; position: position; newName: string; } end (** Code Lens request, method="textDocument/codeLens" *) module DocumentCodeLens = struct type params = codelensParams and result = CodeLens.t list and codelensParams = { textDocument: TextDocumentIdentifier.t } end module LogMessage = struct type params = logMessageParams and logMessageParams = { type_: MessageType.t; message: string; } end module ShowMessage = struct type params = showMessageParams and showMessageParams = { type_: MessageType.t; message: string; } end module ShowMessageRequest = struct type t = | Present of { id: lsp_id } | Absent and params = showMessageRequestParams and result = messageActionItem option and showMessageRequestParams = { type_: MessageType.t; message: string; actions: messageActionItem list; } and messageActionItem = { title: string } end module ShowStatusFB = struct type params = showStatusParams and result = unit (** the showStatus LSP request will be handled by our VSCode extension. It's a facebook-specific extension to the LSP spec. How it's rendered is currently [shortMessage] in the status-bar, and "[progress]/[total] [message]" in the tooltip. The [telemetry] field isn't displayed to the user, but might be useful to someone debugging an LSP transcript. *) and showStatusParams = { request: showStatusRequestParams; progress: int option; total: int option; shortMessage: string option; telemetry: Hh_json.json option; } and showStatusRequestParams = { type_: MessageType.t; message: string; } end module ConnectionStatusFB = struct type params = connectionStatusParams and connectionStatusParams = { isConnected: bool } end type lsp_registration_options = | DidChangeWatchedFilesRegistrationOptions of DidChangeWatchedFiles.registerOptions module RegisterCapability = struct type params = { registrations: registration list } and registration = { id: string; method_: string; registerOptions: lsp_registration_options; } let make_registration (registerOptions : lsp_registration_options) : registration = let (id, method_) = match registerOptions with | DidChangeWatchedFilesRegistrationOptions _ -> ("did-change-watched-files", "workspace/didChangeWatchedFiles") in { id; method_; registerOptions } end (** * Here are gathered-up ADTs for all the messages we handle *) type lsp_request = | InitializeRequest of Initialize.params | RegisterCapabilityRequest of RegisterCapability.params | ShutdownRequest | CodeLensResolveRequest of CodeLensResolve.params | HoverRequest of Hover.params | DefinitionRequest of Definition.params | TypeDefinitionRequest of TypeDefinition.params | ImplementationRequest of Implementation.params | CodeActionRequest of CodeActionRequest.params | CodeActionResolveRequest of CodeActionResolveRequest.params | CompletionRequest of Completion.params | CompletionItemResolveRequest of CompletionItemResolve.params | WorkspaceSymbolRequest of WorkspaceSymbol.params | DocumentSymbolRequest of DocumentSymbol.params | FindReferencesRequest of FindReferences.params | PrepareCallHierarchyRequest of PrepareCallHierarchy.params | CallHierarchyIncomingCallsRequest of CallHierarchyIncomingCalls.params | CallHierarchyOutgoingCallsRequest of CallHierarchyOutgoingCalls.params | DocumentHighlightRequest of DocumentHighlight.params | DocumentFormattingRequest of DocumentFormatting.params | DocumentRangeFormattingRequest of DocumentRangeFormatting.params | DocumentOnTypeFormattingRequest of DocumentOnTypeFormatting.params | ShowMessageRequestRequest of ShowMessageRequest.params | ShowStatusRequestFB of ShowStatusFB.params | RageRequestFB | RenameRequest of Rename.params | DocumentCodeLensRequest of DocumentCodeLens.params | SignatureHelpRequest of SignatureHelp.params | TypeHierarchyRequest of TypeHierarchy.params | HackTestStartServerRequestFB | HackTestStopServerRequestFB | HackTestShutdownServerlessRequestFB | WillSaveWaitUntilRequest of WillSaveWaitUntil.params | UnknownRequest of string * Hh_json.json option type lsp_result = | InitializeResult of Initialize.result | ShutdownResult | CodeLensResolveResult of CodeLensResolve.result | HoverResult of Hover.result | DefinitionResult of Definition.result | TypeDefinitionResult of TypeDefinition.result | ImplementationResult of Implementation.result | CodeActionResult of CodeAction.result * CodeActionRequest.params | CodeActionResolveResult of CodeActionResolve.result | CompletionResult of Completion.result | CompletionItemResolveResult of CompletionItemResolve.result | WorkspaceSymbolResult of WorkspaceSymbol.result | DocumentSymbolResult of DocumentSymbol.result | FindReferencesResult of FindReferences.result | PrepareCallHierarchyResult of PrepareCallHierarchy.result | CallHierarchyIncomingCallsResult of CallHierarchyIncomingCalls.result | CallHierarchyOutgoingCallsResult of CallHierarchyOutgoingCalls.result | DocumentHighlightResult of DocumentHighlight.result | DocumentFormattingResult of DocumentFormatting.result | DocumentRangeFormattingResult of DocumentRangeFormatting.result | DocumentOnTypeFormattingResult of DocumentOnTypeFormatting.result | ShowMessageRequestResult of ShowMessageRequest.result | ShowStatusResultFB of ShowStatusFB.result | RageResultFB of RageFB.result | RenameResult of Rename.result | DocumentCodeLensResult of DocumentCodeLens.result | SignatureHelpResult of SignatureHelp.result | TypeHierarchyResult of TypeHierarchy.result | HackTestStartServerResultFB | HackTestStopServerResultFB | HackTestShutdownServerlessResultFB | RegisterCapabilityRequestResult | WillSaveWaitUntilResult of WillSaveWaitUntil.result | ErrorResult of Error.t type lsp_notification = | ExitNotification | CancelRequestNotification of CancelRequest.params | PublishDiagnosticsNotification of PublishDiagnostics.params | DidOpenNotification of DidOpen.params | DidCloseNotification of DidClose.params | DidSaveNotification of DidSave.params | DidChangeNotification of DidChange.params | DidChangeWatchedFilesNotification of DidChangeWatchedFiles.params | LogMessageNotification of LogMessage.params | TelemetryNotification of LogMessage.params * (string * Hh_json.json) list | ShowMessageNotification of ShowMessage.params | ConnectionStatusNotificationFB of ConnectionStatusFB.params | InitializedNotification | FindReferencesPartialResultNotification of partial_result_token * FindReferences.result | SetTraceNotification of SetTraceNotification.params | LogTraceNotification | UnknownNotification of string * Hh_json.json option type lsp_message = | RequestMessage of lsp_id * lsp_request | ResponseMessage of lsp_id * lsp_result | NotificationMessage of lsp_notification type 'a lsp_handler = 'a lsp_result_handler * 'a lsp_error_handler and 'a lsp_error_handler = Error.t * string -> 'a -> 'a and 'a lsp_result_handler = | ShowMessageHandler of (ShowMessageRequest.result -> 'a -> 'a) | ShowStatusHandler of (ShowStatusFB.result -> 'a -> 'a) module IdKey = struct type t = lsp_id let compare (x : t) (y : t) = match (x, y) with | (NumberId x, NumberId y) -> x - y | (NumberId _, StringId _) -> -1 | (StringId x, StringId y) -> String.compare x y | (StringId _, NumberId _) -> 1 end module IdSet = Set.Make (IdKey) module IdMap = WrappedMap.Make (IdKey) module UriKey = struct type t = documentUri let compare (DocumentUri x) (DocumentUri y) = String.compare x y end module UriSet = Set.Make (UriKey) module UriMap = WrappedMap.Make (UriKey) let lsp_result_to_log_string = function | InitializeResult _ -> "InitializeResult" | ShutdownResult -> "ShutdownResult" | CodeLensResolveResult _ -> "CodeLensResolveResult" | HoverResult _ -> "HoverResult" | DefinitionResult _ -> "DefinitionResult" | TypeDefinitionResult _ -> "TypeDefinitionResult" | ImplementationResult _ -> "ImplementationResult" | CodeActionResult _ -> "CodeActionResult" | CodeActionResolveResult _ -> "CodeActionResolveResult" | CompletionResult _ -> "CompletionResult" | CompletionItemResolveResult _ -> "CompletionItemResolveResult" | WorkspaceSymbolResult _ -> "WorkspaceSymbolResult" | DocumentSymbolResult _ -> "DocumentSymbolResult" | FindReferencesResult _ -> "FindReferencesResult" | PrepareCallHierarchyResult _ -> "PrepareCallHierarchyResult" | CallHierarchyIncomingCallsResult _ -> "CallHierarchyIncomingCallsResult" | CallHierarchyOutgoingCallsResult _ -> "CallHierarchyOutgoingCallsResult" | DocumentHighlightResult _ -> "DocumentHighlightResult" | DocumentFormattingResult _ -> "DocumentFormattingResult" | DocumentRangeFormattingResult _ -> "DocumentRangeFormattingResult" | DocumentOnTypeFormattingResult _ -> "DocumentOnTypeFormattingResult" | ShowMessageRequestResult _ -> "ShowMessageRequestResult" | ShowStatusResultFB _ -> "ShowStatusResultFB" | RageResultFB _ -> "RageResultFB" | RenameResult _ -> "RenameResult" | DocumentCodeLensResult _ -> "DocumentCodeLensResult" | SignatureHelpResult _ -> "SignatureHelpResult" | HackTestStartServerResultFB -> "HackTestStartServerResultFB" | HackTestStopServerResultFB -> "HackTestStopServerResultFB" | HackTestShutdownServerlessResultFB -> "HackTestShutdownServerlessResultFB" | RegisterCapabilityRequestResult -> "RegisterCapabilityRequestResult" | WillSaveWaitUntilResult _ -> "WillSaveWaitUntilResult" | ErrorResult _ -> "ErrorResult" | TypeHierarchyResult _ -> "TypeHierarchyResult"
OCaml Interface
hhvm/hphp/hack/src/utils/lsp/lsp.mli
(* * Copyright (c) 2019, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) (** * This file is an OCaml representation of the Language Server Protocol * https://github.com/Microsoft/language-server-protocol/blob/master/protocol.md * based on the current v3. * * Changes to make it more natural in OCaml: * - We don't represent the common base types of Requests/Errors/Notifications * because base types don't naturally mix with abstract data types, and * because code for these things is done more naturally at the JSON layer * - We avoid option types where we can. The idea is to follow the internet * "robustness" rule of being liberal in what we accept, conservative in * what we emit: if we're parsing a message and it lacks a field, and if * the spec tells us how to interpret absence, then we do that interpretation * at the JSON->LSP parsing level (so long as the interpretation is lossless). * On the emitting side, we might as well emit all fields. * - For every request, like Initialize or workspace/Symbol, we've invented * "Initialize.response = (Initialize.result, Initialize.error) Result" * or "Symbol.response = (Symbol.result, Error.error) Result" to show * the two possible return types from this request. Note that each different * request can have its own custom error type, although most don't. * - Most datatypes go in modules since there are so many name-clashes in * the protocol and OCaml doesn't like name-clashes. Only exceptions are * the really primitive types like location and documentUri. * The few places where we still had to rename fields to avoid OCaml name * clashes I've noted in the comments with the word "wire" to indicate the * over-the-wire form of the name. * - Names have been translated from jsonConvention into ocaml convention * only where necessary, e.g. because ocaml uses lowercase for types. * - The spec has space for extra fields like "experimental". It obviously * doesn't make sense to encode them in a type system. I've omitted them * entirely. *) type lsp_id = | NumberId of int | StringId of string type partial_result_token = PartialResultToken of string (** Note: this datatype provides no invariants that the string is well-formed. *) type documentUri = DocumentUri of string [@@deriving eq] val uri_of_string : string -> documentUri val string_of_uri : documentUri -> string (** A position is between two characters like an 'insert' cursor in a editor *) type position = { line: int; (** line position in a document [zero-based] *) character: int; (** character offset on a line in a document [zero-based] *) } [@@deriving eq] (** A range is comparable to a selection in an editor *) type range = { start: position; (** the range's start position *) end_: position; (** the range's end position [exclusive] *) } [@@deriving eq] type textDocumentSaveReason = | Manual [@value 1] | AfterDelay [@value 2] | FocusOut [@value 3] [@@deriving enum] (** Represents a location inside a resource, such as a line inside a text file *) module Location : sig type t = { uri: documentUri; range: range; } [@@deriving eq] end (** Represents a location inside a resource which also wants to display a friendly name to the user. *) module DefinitionLocation : sig type t = { location: Location.t; title: string option; } end (** markedString can be used to render human readable text. It is either a * markdown string or a code-block that provides a language and a code snippet. * Note that markdown strings will be sanitized by the client - including * escaping html *) type markedString = | MarkedString of string | MarkedCode of string * string (** lang, value *) (* Represents a reference to a command. Provides a title which will be used to * represent a command in the UI. Commands are identitifed using a string * identifier and the protocol currently doesn't specify a set of well known * commands. So executing a command requires some tool extension code. *) module Command : sig type t = { title: string; (** title of the command, like `save` *) command: string; (** the identifier of the actual command handler *) arguments: Hh_json.json list; (** wire: it can be omitted *) } end (** A textual edit applicable to a text document. If n textEdits are applied to a text document all text edits describe changes to the initial document version. Execution wise text edits should applied from the bottom to the top of the text document. Overlapping text edits are not supported. *) module TextEdit : sig type t = { range: range; (** to insert text, use a range where start = end *) newText: string; (** for delete operations, use an empty string *) } end (** Text documents are identified using a URI. *) module TextDocumentIdentifier : sig type t = { uri: documentUri } end (** An identifier to denote a specific version of a text document. *) module VersionedTextDocumentIdentifier : sig type t = { uri: documentUri; version: int; } end (** Describes textual changes on a single text document. The text document is referred to as a VersionedTextDocumentIdentifier to allow clients to check the text document version before an edit is applied. *) module TextDocumentEdit : sig type t = { textDocument: VersionedTextDocumentIdentifier.t; edits: TextEdit.t list; } end (** A workspace edit represents changes to many resources managed in the workspace. A workspace edit consists of a mapping from a URI to an array of TextEdits to be applied to the document with that URI. *) module WorkspaceEdit : sig type t = { changes: TextEdit.t list SMap.t; (* holds changes to existing docs *) } end (** An item to transfer a text document from the client to the server. The version number strictly increases after each change, including undo/redo. *) module TextDocumentItem : sig type t = { uri: documentUri; languageId: string; version: int; text: string; } end (** * A code lens represents a command that should be shown along with * source text, like the number of references, a way to run tests, etc. * * A code lens is _unresolved_ when no command is associated to it. For performance * reasons the creation of a code lens and resolving should be done in two stages. *) module CodeLens : sig type t = { range: range; command: Command.t; data: Hh_json.json option; } end (** A parameter literal used in requests to pass a text document and a position inside that document. *) module TextDocumentPositionParams : sig type t = { textDocument: TextDocumentIdentifier.t; position: position; } end (** A document filter denotes a document through properties like language, schema or pattern. E.g. language:"typescript",scheme:"file" or language:"json",pattern:"**/package.json" *) module DocumentFilter : sig type t = { language: string option; (** a language id, like "typescript" *) scheme: string option; (** a uri scheme, like "file" or "untitled" *) pattern: string option; (** a glob pattern, like "*.{ts,js}" *) } end (** A document selector is the combination of one or many document filters. *) module DocumentSelector : sig type t = DocumentFilter.t list end (** Represents information about programming constructs like variables etc. *) module SymbolInformation : sig (** These numbers should match * https://microsoft.github.io/language-server-protocol/specifications/specification-3-17/#symbolKind *) type symbolKind = | File [@value 1] | Module [@value 2] | Namespace [@value 3] | Package [@value 4] | Class [@value 5] | Method [@value 6] | Property [@value 7] | Field [@value 8] | Constructor [@value 9] | Enum [@value 10] | Interface [@value 11] | Function [@value 12] | Variable [@value 13] | Constant [@value 14] | String [@value 15] | Number [@value 16] | Boolean [@value 17] | Array [@value 18] | Object [@value 19] | Key [@value 20] | Null [@value 21] | EnumMember [@value 22] | Struct [@value 23] | Event [@value 24] | Operator [@value 25] | TypeParameter [@value 26] [@@deriving enum] type t = { name: string; kind: symbolKind; location: Location.t; (** the span of the symbol including its contents *) containerName: string option; (** the symbol containing this symbol *) } end (** Represents an item in the Call Hieararchy *) module CallHierarchyItem : sig type t = { name: string; kind: SymbolInformation.symbolKind; detail: string option; uri: documentUri; range: range; selectionRange: range; } end (** Represents a parameter for a CallHierarchyIncomingCallsRequest or CallHierarchyOutgoingCallsRequest *) module CallHierarchyCallsRequestParam : sig type t = { item: CallHierarchyItem.t } end (** For showing messages (not diagnostics) in the user interface. *) module MessageType : sig type t = | ErrorMessage [@value 1] | WarningMessage [@value 2] | InfoMessage [@value 3] | LogMessage [@value 4] [@@deriving eq, enum] end (** Cancellation notification, method="$/cancelRequest" *) module CancelRequest : sig type params = cancelParams and cancelParams = { id: lsp_id (** the request id to cancel *) } end (** SetTraceNotification, method="$/setTraceNotification" *) module SetTraceNotification : sig type params = | Verbose | Off end (** The kind of a code action. * Kinds are a hierarchical list of identifiers separated by `.`, e.g. * `"refactor.extract.function"`. * The set of kinds is open and client needs to announce the kinds it supports * to the server during initialization. * Module CodeAction below also references this module as Kind. *) module CodeActionKind : sig type t = string * string list (** is x of kind k? *) val is_kind : t -> t -> bool (** does `ks` contain kind `k` *) val contains_kind : t -> t list -> bool (** does an optional list of kinds `ks` contain kind `k` *) val contains_kind_opt : default:bool -> t -> t list option -> bool (** Create a kind from a string that follows the spec *) val kind_of_string : string -> t (** Create the equivalent string that the spec would have required *) val string_of_kind : t -> string (** Create a new sub-kind of an existing kind *) val sub_kind : t -> string -> t (** A constant defined by the spec *) val quickfix : t (** A constant defined by the spec *) val refactor : t (** Document-wide code actions *) val source : t end (** Initialize request, method="initialize" *) module Initialize : sig type textDocumentSyncKind = | NoSync [@value 0] (** docs should not be synced at all. Wire "None" *) | FullSync [@value 1] (** synced by always sending full content. Wire "Full" *) | IncrementalSync [@value 2] [@@deriving enum] module CodeActionOptions : sig type t = { resolveProvider: bool } end module CompletionOptions : sig type t = { resolveProvider: bool; (** server resolves extra info on demand *) completion_triggerCharacters: string list; (** wire "triggerCharacters" *) } end module ServerExperimentalCapabilities : sig type t = { snippetTextEdit: bool; (** see ClientExperimentalCapabilities.snippetTextEdit *) } end module ClientExperimentalCapabilities : sig type t = { snippetTextEdit: bool; (** A client that supports this capability accepts snippet text edits like `${0:foo}`. * https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#snippet_syntax **) } end type params = { processId: int option; (** pid of parent process *) rootPath: string option; (** deprecated *) rootUri: documentUri option; (** the root URI of the workspace *) initializationOptions: initializationOptions; client_capabilities: client_capabilities; (** "capabilities" over wire *) trace: trace; (** the initial trace setting, default="off" *) } and result = { server_capabilities: server_capabilities; (** "capabilities" over wire *) } and errorData = { retry: bool; (** should client retry the initialize request *) } and trace = | Off | Messages | Verbose (** These are hack-specific options. They're all optional in initialize request, and we pick a default if necessary while parsing. *) and initializationOptions = { namingTableSavedStatePath: string option; (** used for test scenarios where we pass a naming-table sqlite file, rather than leaving clientIdeDaemon to find and download one itself. *) namingTableSavedStateTestDelay: float; (** used for test scenarios where we've passed a naming-sqlite file and so clientIdeDaemon completes instantly, but we still want a little bit of a delay before it reports readiness so as to exercise race conditions. This is the delay in seconds. *) delayUntilDoneInit: bool; (** used for test scenarios where we want clientLsp to delay receiving any further LSP requests until clientIdeDaemon has done its init. *) skipLspServerOnTypeFormatting: bool; (** `true` iff formatting is to be done on the LSP client, * rather than provided by the LSP server. * TODO(T155870670): remove this field *) } and client_capabilities = { workspace: workspaceClientCapabilities; textDocument: textDocumentClientCapabilities; window: windowClientCapabilities; telemetry: telemetryClientCapabilities; (** omitted: experimental *) client_experimental: ClientExperimentalCapabilities.t; } and workspaceClientCapabilities = { applyEdit: bool; (** client supports appling batch edits *) workspaceEdit: workspaceEdit; didChangeWatchedFiles: dynamicRegistration; (** omitted: other dynamic-registration fields *) } and dynamicRegistration = { dynamicRegistration: bool; (** client supports dynamic registration for this capability *) } and workspaceEdit = { documentChanges: bool; (** client supports versioned doc changes *) } and textDocumentClientCapabilities = { synchronization: synchronization; completion: completion; (** textDocument/completion *) codeAction: codeAction; definition: definition; typeDefinition: typeDefinition; declaration: declaration; implementation: implementation; } (** synchronization capabilities say what messages the client is capable * of sending, should be be so asked by the server. * We use the "can_" prefix for OCaml naming reasons; it's absent in LSP *) and synchronization = { can_willSave: bool; (** client can send textDocument/willSave *) can_willSaveWaitUntil: bool; (** textDoc.../willSaveWaitUntil *) can_didSave: bool; (** textDocument/didSave *) } and completion = { completionItem: completionItem } and completionItem = { snippetSupport: bool; (** client can do snippets as insert text *) } and codeAction = { codeAction_dynamicRegistration: bool; (** wire: dynamicRegistraction Whether code action supports dynamic registration. *) codeActionLiteralSupport: codeActionliteralSupport option; (** The client support code action literals as a valid * response of the `textDocument/codeAction` request. *) } (** The code action kind values the client supports. When this * property exists the client also guarantees that it will * handle values outside its set gracefully and falls back * to a default value when unknown. *) and codeActionliteralSupport = { codeAction_valueSet: CodeActionKind.t list; (** wire: valueSet *) } and definition = { definitionLinkSupport: bool } and typeDefinition = { typeDefinitionLinkSupport: bool } and declaration = { declarationLinkSupport: bool } and implementation = { implementationLinkSupport: bool } and windowClientCapabilities = { status: bool; (** Nuclide-specific: client supports window/showStatusRequest *) } and telemetryClientCapabilities = { connectionStatus: bool; (** Nuclide-specific: client supports telemetry/connectionStatus *) } (** What capabilities the server provides *) and server_capabilities = { textDocumentSync: textDocumentSyncOptions; (** how to sync *) hoverProvider: bool; completionProvider: CompletionOptions.t option; signatureHelpProvider: signatureHelpOptions option; definitionProvider: bool; typeDefinitionProvider: bool; referencesProvider: bool; callHierarchyProvider: bool; documentHighlightProvider: bool; documentSymbolProvider: bool; (** ie. document outline *) workspaceSymbolProvider: bool; (** ie. find-symbol-in-project *) codeActionProvider: CodeActionOptions.t option; codeLensProvider: codeLensOptions option; documentFormattingProvider: bool; documentRangeFormattingProvider: bool; documentOnTypeFormattingProvider: documentOnTypeFormattingOptions option; renameProvider: bool; documentLinkProvider: documentLinkOptions option; executeCommandProvider: executeCommandOptions option; implementationProvider: bool; rageProviderFB: bool; server_experimental: ServerExperimentalCapabilities.t option; } and signatureHelpOptions = { sighelp_triggerCharacters: string list } and codeLensOptions = { codelens_resolveProvider: bool; (** wire "resolveProvider" *) } and documentOnTypeFormattingOptions = { firstTriggerCharacter: string; moreTriggerCharacter: string list; (** e.g. "}" *) } and documentLinkOptions = { doclink_resolveProvider: bool; (** wire "resolveProvider" *) } and executeCommandOptions = { commands: string list; (** the commands to be executed on the server *) } (** text document sync options say what messages the server requests the * client to send. We use the "want_" prefix for OCaml naming reasons; * this prefix is absent in LSP. *) and textDocumentSyncOptions = { want_openClose: bool; (** textDocument/didOpen+didClose *) want_change: textDocumentSyncKind; want_willSave: bool; (** textDocument/willSave *) want_willSaveWaitUntil: bool; (** textDoc.../willSaveWaitUntil *) want_didSave: saveOptions option; (** textDocument/didSave *) } (** full only on open. Wire "Incremental" *) and saveOptions = { includeText: bool; (** the client should include content on save *) } end (** ErrorResponse *) module Error : sig type code = | ParseError [@value -32700] | InvalidRequest [@value -32600] | MethodNotFound [@value -32601] | InvalidParams [@value -32602] | InternalError [@value -32603] | ServerErrorStart [@value -32099] | ServerErrorEnd [@value -32000] | ServerNotInitialized [@value -32002] | UnknownErrorCode [@value -32001] | RequestCancelled [@value -32800] | ContentModified [@value -32801] [@@deriving show, enum] type t = { code: code; message: string; data: Hh_json.json option; } (** For methods which want to return exceptions, and they also want to decide how the exception gets serialized over LSP, they should throw this one. *) exception LspException of t end (** Rage request, method="telemetry/rage" *) module RageFB : sig type result = rageItem list and rageItem = { title: string option; data: string; } end (** Code Lens resolve request, method="codeLens/resolve" *) module CodeLensResolve : sig type params = CodeLens.t and result = CodeLens.t end (** Hover request, method="textDocument/hover" *) module Hover : sig type params = TextDocumentPositionParams.t and result = hoverResult option and hoverResult = { contents: markedString list; (** wire: either a single one or an array *) range: range option; } end (** PublishDiagnostics notification, method="textDocument/PublishDiagnostics" *) module PublishDiagnostics : sig type diagnosticCode = | IntCode of int | StringCode of string | NoCode [@@deriving eq] type diagnosticSeverity = | Error | Warning | Information | Hint [@@deriving enum, eq] type params = publishDiagnosticsParams and publishDiagnosticsParams = { uri: documentUri; diagnostics: diagnostic list; isStatusFB: bool; (** FB-specific extension, for diagnostics used only to show status *) } and diagnostic = { range: range; (** the range at which the message applies *) severity: diagnosticSeverity option; (** if omitted, client decides *) code: diagnosticCode; source: string option; (** human-readable string, eg. typescript/lint *) message: string; (** the diagnostic's message *) relatedInformation: diagnosticRelatedInformation list; relatedLocations: relatedLocation list; (** legacy FB extension *) } [@@deriving eq] and diagnosticRelatedInformation = { relatedLocation: Location.t; (** wire: just "location" *) relatedMessage: string; (** wire: just "message" *) } [@@deriving eq] (** legacy FB extension *) and relatedLocation = diagnosticRelatedInformation end (** DidOpenTextDocument notification, method="textDocument/didOpen" *) module DidOpen : sig type params = didOpenTextDocumentParams and didOpenTextDocumentParams = { textDocument: TextDocumentItem.t; (** the document that was opened *) } end (** DidCloseTextDocument notification, method="textDocument/didClose" *) module DidClose : sig type params = didCloseTextDocumentParams and didCloseTextDocumentParams = { textDocument: TextDocumentIdentifier.t; (** the doc that was closed *) } end (** DidSaveTextDocument notification, method="textDocument/didSave" *) module DidSave : sig type params = didSaveTextDocumentParams and didSaveTextDocumentParams = { textDocument: TextDocumentIdentifier.t; (** the doc that was saved *) text: string option; (** content when saved; depends on includeText *) } end (** DidChangeTextDocument notification, method="textDocument/didChange" *) module DidChange : sig type params = didChangeTextDocumentParams and didChangeTextDocumentParams = { textDocument: VersionedTextDocumentIdentifier.t; contentChanges: textDocumentContentChangeEvent list; } and textDocumentContentChangeEvent = { range: range option; (** the range of the document that changed *) rangeLength: int option; (** the length that got replaced *) text: string; (** the new text of the range/document *) } end (** WillSaveWaitUntilTextDocument request, method="textDocument/willSaveWaitUntil" *) module WillSaveWaitUntil : sig type params = willSaveWaitUntilTextDocumentParams and willSaveWaitUntilTextDocumentParams = { textDocument: TextDocumentIdentifier.t; reason: textDocumentSaveReason; } and result = TextEdit.t list end (** Watched files changed notification, method="workspace/didChangeWatchedFiles" *) module DidChangeWatchedFiles : sig type registerOptions = { watchers: fileSystemWatcher list } and fileSystemWatcher = { globPattern: string } type fileChangeType = | Created | Updated | Deleted [@@deriving enum] type params = { changes: fileEvent list } and fileEvent = { uri: documentUri; type_: fileChangeType; } end (** Goto Definition request, method="textDocument/definition" *) module Definition : sig type params = TextDocumentPositionParams.t (** wire: either a single one or an array *) and result = DefinitionLocation.t list end (** Goto TypeDefinition request, method="textDocument/typeDefinition" *) module TypeDefinition : sig type params = TextDocumentPositionParams.t and result = DefinitionLocation.t list end (** Go To Implementation request, method="textDocument/implementation" *) module Implementation : sig type params = TextDocumentPositionParams.t and result = Location.t list end (** A code action represents a change that can be performed in code, e.g. to fix a problem or to refactor code. *) module CodeAction : sig (** Note: For "textDocument/codeAction" requests we return a `data` field containing the original request params, then when the client sends "codeAction/resolve" we read the `data` param to re-calculate the requested code action. This adding of the "data" field is done in our serialization step, to avoid passing extra state around and enforce that `data` is all+only the original request params. See [edit_or_command] for more information on the resolution flow. *) type 'resolution_phase t = { title: string; (** A short, human-readable, title for this code action. *) kind: CodeActionKind.t; (** The kind of the code action. Used to filter code actions. *) diagnostics: PublishDiagnostics.diagnostic list; (** The diagnostics that this code action resolves. *) action: 'resolution_phase edit_and_or_command; (* A CodeAction must set either `edit`, a `command` (or neither iff only resolved lazily) If both are supplied, the `edit` is applied first, then the `command` is executed. If neither is supplied, the client requests 'edit' be resolved using "codeAction/resolve" *) } (** 'resolution_phase is used to follow the protocol in a type-safe and prescribed manner: LSP protocol: 1. The client sends server "textDocument/codeAction" 2. The server can send back an unresolved code action (neither "edit" nor "command" fields) 3. If the code action is unresolved, the client sends "codeAction/resolve" Our implementation flow: - create a representation of a code action which includes a lazily-computed edit - if the request is "textDocument/codeAction", we do not compute an edit - if the request is "codeAction/resolve", we have access to the original request params via the `data` field (see [t] comments above) and perform the same calculation as for "textDocument/codeAction" and then compute the edit. *) and 'resolution_phase edit_and_or_command = | EditOnly of WorkspaceEdit.t | CommandOnly of Command.t | BothEditThenCommand of (WorkspaceEdit.t * Command.t) | UnresolvedEdit of 'resolution_phase type 'resolution_phase command_or_action_ = | Command of Command.t | Action of 'resolution_phase t type resolved_marker = | type resolved_command_or_action = resolved_marker command_or_action_ type command_or_action = unit command_or_action_ type result = command_or_action list end (** Code Action Request, method="textDocument/codeAction" *) module CodeActionRequest : sig type params = { textDocument: TextDocumentIdentifier.t; (** The document in which the command was invoked. *) range: range; (** The range for which the command was invoked. *) context: codeActionContext; (** Context carrying additional information. *) } (** Contains additional diagnostic information about the context in which a code action is run. *) and codeActionContext = { diagnostics: PublishDiagnostics.diagnostic list; only: CodeActionKind.t list option; } end (** Completion request, method="textDocument/completion" *) module CodeActionResolve : sig type result = (CodeAction.resolved_command_or_action, Error.t) Result.t end (** method="codeAction/resolve" *) module CodeActionResolveRequest : sig (** The client sends a partially-resolved [CodeAction] with an additional [data] field. We don't bother parsing all the fields from the partially-resolved [CodeAction] because [data] and [title] are all we need and so we don't have to duplicate the entire [CodeAction.command_or_action] shape here *) type params = { data: CodeActionRequest.params; title: string; (** From LSP spec: "A data entry field that is preserved on a code action between a `textDocument/codeAction` and a `codeAction/resolve` request" We commit to a single representation for simplicity and type-safety *) } end module Completion : sig (** These numbers should match * https://microsoft.github.io/language-server-protocol/specification#textDocument_completion *) type completionItemKind = | Text (* 1 *) | Method (* 2 *) | Function (* 3 *) | Constructor (* 4 *) | Field (* 5 *) | Variable (* 6 *) | Class (* 7 *) | Interface (* 8 *) | Module (* 9 *) | Property (* 10 *) | Unit (* 11 *) | Value (* 12 *) | Enum (* 13 *) | Keyword (* 14 *) | Snippet (* 15 *) | Color (* 16 *) | File (* 17 *) | Reference (* 18 *) | Folder (* 19 *) | MemberOf (* 20 *) | Constant (* 21 *) | Struct (* 22 *) | Event (* 23 *) | Operator (* 24 *) | TypeParameter (* 25 *) [@@deriving enum] (** These numbers should match * https://microsoft.github.io/language-server-protocol/specification#textDocument_completion *) type insertTextFormat = | PlainText (* 1 *) (** the insertText/textEdits are just plain strings *) | SnippetFormat (* 2 *) (** wire: just "Snippet" *) [@@deriving enum] type completionTriggerKind = | Invoked [@value 1] | TriggerCharacter [@value 2] | TriggerForIncompleteCompletions [@value 3] [@@deriving enum] val is_invoked : completionTriggerKind -> bool type params = completionParams and completionParams = { loc: TextDocumentPositionParams.t; context: completionContext option; } and completionContext = { triggerKind: completionTriggerKind; triggerCharacter: string option; } and result = completionList (** wire: can also be 'completionItem list' *) and completionList = { isIncomplete: bool; (** further typing should result in recomputing *) items: completionItem list; } and completionDocumentation = | MarkedStringsDocumentation of markedString list | UnparsedDocumentation of Hh_json.json and completionItem = { label: string; (** the label in the UI *) kind: completionItemKind option; (** tells editor which icon to use *) detail: string option; (** human-readable string like type/symbol info *) documentation: completionDocumentation option; (** human-readable doc-comment *) sortText: string option; (** used for sorting; if absent, uses label *) filterText: string option; (** used for filtering; if absent, uses label *) insertText: string option; (** used for inserting; if absent, uses label *) insertTextFormat: insertTextFormat option; textEdit: TextEdit.t option; additionalTextEdits: TextEdit.t list; (** wire: split into hd and tl *) command: Command.t option; (** if present, is executed after completion *) data: Hh_json.json option; } end (** Completion Item Resolve request, method="completionItem/resolve" *) module CompletionItemResolve : sig type params = Completion.completionItem and result = Completion.completionItem end (** Workspace Symbols request, method="workspace/symbol" *) module WorkspaceSymbol : sig type params = workspaceSymbolParams and result = SymbolInformation.t list and workspaceSymbolParams = { query: string } end (** Document Symbols request, method="textDocument/documentSymbol" *) module DocumentSymbol : sig type params = documentSymbolParams and result = SymbolInformation.t list and documentSymbolParams = { textDocument: TextDocumentIdentifier.t } end (** Find References request, method="textDocument/references" *) module FindReferences : sig type params = referenceParams and result = Location.t list and referenceParams = { loc: TextDocumentPositionParams.t; (** wire: loc's members are part of referenceParams *) context: referenceContext; partialResultToken: partial_result_token option; } and referenceContext = { includeDeclaration: bool; (** include declaration of current symbol *) includeIndirectReferences: bool; } end module PrepareCallHierarchy : sig type params = TextDocumentPositionParams.t type result = CallHierarchyItem.t list option end module CallHierarchyIncomingCalls : sig type params = CallHierarchyCallsRequestParam.t type result = callHierarchyIncomingCall list option and callHierarchyIncomingCall = { from: CallHierarchyItem.t; fromRanges: range list; } end module CallHierarchyOutgoingCalls : sig type params = CallHierarchyCallsRequestParam.t type result = callHierarchyOutgoingCall list option and callHierarchyOutgoingCall = { call_to: CallHierarchyItem.t; (** The name should just be "to", but "to" is a reserved keyword in OCaml*) fromRanges: range list; } end (** Document Highlights request, method="textDocument/documentHighlight" *) module DocumentHighlight : sig type params = TextDocumentPositionParams.t type documentHighlightKind = | Text [@value 1] (** a textual occurrence *) | Read [@value 2] (** read-access of a symbol, like reading a variable *) | Write [@value 3] (** write-access of a symbol, like writing a variable *) [@@deriving enum] type result = documentHighlight list and documentHighlight = { range: range; (** the range this highlight applies to *) kind: documentHighlightKind option; } end (** Document Formatting request, method="textDocument/formatting" *) module DocumentFormatting : sig type params = documentFormattingParams and result = TextEdit.t list and documentFormattingParams = { textDocument: TextDocumentIdentifier.t; options: formattingOptions; } and formattingOptions = { tabSize: int; (** size of a tab in spaces *) insertSpaces: bool; (** prefer spaces over tabs omitted: signature for further properties *) } end (** Document Range Formatting request, method="textDocument/rangeFormatting" *) module DocumentRangeFormatting : sig type params = documentRangeFormattingParams and result = TextEdit.t list and documentRangeFormattingParams = { textDocument: TextDocumentIdentifier.t; range: range; options: DocumentFormatting.formattingOptions; } end module DocumentOnTypeFormatting : sig type params = documentOnTypeFormattingParams and result = TextEdit.t list and documentOnTypeFormattingParams = { textDocument: TextDocumentIdentifier.t; position: position; (** the position at which this request was sent *) ch: string; (** the character that has been typed *) options: DocumentFormatting.formattingOptions; } end (** Document Signature Help request, method="textDocument/signatureHelp" *) module SignatureHelp : sig type params = TextDocumentPositionParams.t and result = t option and t = { signatures: signature_information list; activeSignature: int; activeParameter: int; } and signature_information = { siginfo_label: string; siginfo_documentation: string option; parameters: parameter_information list; } and parameter_information = { parinfo_label: string; parinfo_documentation: string option; } end (** Workspace TypeHierarchy request, method="textDocument/typeHierarchy" *) module TypeHierarchy : sig type params = TextDocumentPositionParams.t type memberKind = | Method [@value 1] | SMethod [@value 2] | Property [@value 3] | SProperty [@value 4] | Const [@value 5] [@@deriving enum] type memberEntry = { name: string; snippet: string; kind: memberKind; uri: documentUri; range: range; origin: string; } type entryKind = | Class [@value 1] | Interface [@value 2] | Enum [@value 3] | Trait [@value 4] [@@deriving enum] type ancestorEntry = | AncestorName of string | AncestorDetails of { name: string; kind: entryKind; uri: documentUri; range: range; } type hierarchyEntry = { name: string; uri: documentUri; range: range; kind: entryKind; ancestors: ancestorEntry list; members: memberEntry list; } type result = hierarchyEntry option end (** Workspace Rename request, method="textDocument/rename" *) module Rename : sig type params = renameParams and result = WorkspaceEdit.t and renameParams = { textDocument: TextDocumentIdentifier.t; position: position; newName: string; } end module DocumentCodeLens : sig type params = codelensParams and result = CodeLens.t list and codelensParams = { textDocument: TextDocumentIdentifier.t } end (** LogMessage notification, method="window/logMessage" *) module LogMessage : sig type params = logMessageParams and logMessageParams = { type_: MessageType.t; message: string; } end (** ShowMessage notification, method="window/showMessage" *) module ShowMessage : sig type params = showMessageParams and showMessageParams = { type_: MessageType.t; message: string; } end (** ShowMessage request, method="window/showMessageRequest" *) module ShowMessageRequest : sig type t = | Present of { id: lsp_id } | Absent and params = showMessageRequestParams and result = messageActionItem option and showMessageRequestParams = { type_: MessageType.t; message: string; actions: messageActionItem list; } and messageActionItem = { title: string } end (** ShowStatus request, method="window/showStatus" *) module ShowStatusFB : sig type params = showStatusParams and result = unit and showStatusParams = { request: showStatusRequestParams; progress: int option; total: int option; shortMessage: string option; telemetry: Hh_json.json option; } and showStatusRequestParams = { type_: MessageType.t; message: string; } end (** ConnectionStatus notification, method="telemetry/connectionStatus" *) module ConnectionStatusFB : sig type params = connectionStatusParams and connectionStatusParams = { isConnected: bool } end type lsp_registration_options = | DidChangeWatchedFilesRegistrationOptions of DidChangeWatchedFiles.registerOptions (** Register capability request, method="client/registerCapability" *) module RegisterCapability : sig type params = { registrations: registration list } and registration = { id: string; (** The ID field is arbitrary but unique per type of capability (for future deregistering, which we don't do). *) method_: string; registerOptions: lsp_registration_options; } val make_registration : lsp_registration_options -> registration end type lsp_request = | InitializeRequest of Initialize.params | RegisterCapabilityRequest of RegisterCapability.params | ShutdownRequest | CodeLensResolveRequest of CodeLensResolve.params | HoverRequest of Hover.params | DefinitionRequest of Definition.params | TypeDefinitionRequest of TypeDefinition.params | ImplementationRequest of Implementation.params | CodeActionRequest of CodeActionRequest.params | CodeActionResolveRequest of CodeActionResolveRequest.params | CompletionRequest of Completion.params | CompletionItemResolveRequest of CompletionItemResolve.params | WorkspaceSymbolRequest of WorkspaceSymbol.params | DocumentSymbolRequest of DocumentSymbol.params | FindReferencesRequest of FindReferences.params | PrepareCallHierarchyRequest of PrepareCallHierarchy.params | CallHierarchyIncomingCallsRequest of CallHierarchyIncomingCalls.params | CallHierarchyOutgoingCallsRequest of CallHierarchyOutgoingCalls.params | DocumentHighlightRequest of DocumentHighlight.params | DocumentFormattingRequest of DocumentFormatting.params | DocumentRangeFormattingRequest of DocumentRangeFormatting.params | DocumentOnTypeFormattingRequest of DocumentOnTypeFormatting.params | ShowMessageRequestRequest of ShowMessageRequest.params | ShowStatusRequestFB of ShowStatusFB.params | RageRequestFB | RenameRequest of Rename.params | DocumentCodeLensRequest of DocumentCodeLens.params | SignatureHelpRequest of SignatureHelp.params | TypeHierarchyRequest of TypeHierarchy.params | HackTestStartServerRequestFB | HackTestStopServerRequestFB | HackTestShutdownServerlessRequestFB | WillSaveWaitUntilRequest of WillSaveWaitUntil.params | UnknownRequest of string * Hh_json.json option type lsp_result = | InitializeResult of Initialize.result | ShutdownResult | CodeLensResolveResult of CodeLensResolve.result | HoverResult of Hover.result | DefinitionResult of Definition.result | TypeDefinitionResult of TypeDefinition.result | ImplementationResult of Implementation.result | CodeActionResult of CodeAction.result * CodeActionRequest.params | CodeActionResolveResult of CodeActionResolve.result | CompletionResult of Completion.result | CompletionItemResolveResult of CompletionItemResolve.result | WorkspaceSymbolResult of WorkspaceSymbol.result | DocumentSymbolResult of DocumentSymbol.result | FindReferencesResult of FindReferences.result | PrepareCallHierarchyResult of PrepareCallHierarchy.result | CallHierarchyIncomingCallsResult of CallHierarchyIncomingCalls.result | CallHierarchyOutgoingCallsResult of CallHierarchyOutgoingCalls.result | DocumentHighlightResult of DocumentHighlight.result | DocumentFormattingResult of DocumentFormatting.result | DocumentRangeFormattingResult of DocumentRangeFormatting.result | DocumentOnTypeFormattingResult of DocumentOnTypeFormatting.result | ShowMessageRequestResult of ShowMessageRequest.result | ShowStatusResultFB of ShowStatusFB.result | RageResultFB of RageFB.result | RenameResult of Rename.result | DocumentCodeLensResult of DocumentCodeLens.result | SignatureHelpResult of SignatureHelp.result | TypeHierarchyResult of TypeHierarchy.result | HackTestStartServerResultFB | HackTestStopServerResultFB | HackTestShutdownServerlessResultFB | RegisterCapabilityRequestResult | WillSaveWaitUntilResult of WillSaveWaitUntil.result | ErrorResult of Error.t type lsp_notification = | ExitNotification | CancelRequestNotification of CancelRequest.params | PublishDiagnosticsNotification of PublishDiagnostics.params | DidOpenNotification of DidOpen.params | DidCloseNotification of DidClose.params | DidSaveNotification of DidSave.params | DidChangeNotification of DidChange.params | DidChangeWatchedFilesNotification of DidChangeWatchedFiles.params | LogMessageNotification of LogMessage.params | TelemetryNotification of LogMessage.params * (string * Hh_json.json) list (** For telemetry, LSP allows 'any', but we're going to send these *) | ShowMessageNotification of ShowMessage.params | ConnectionStatusNotificationFB of ConnectionStatusFB.params | InitializedNotification | FindReferencesPartialResultNotification of partial_result_token * FindReferences.result | SetTraceNotification of SetTraceNotification.params | LogTraceNotification (* $/logTraceNotification *) | UnknownNotification of string * Hh_json.json option type lsp_message = | RequestMessage of lsp_id * lsp_request | ResponseMessage of lsp_id * lsp_result | NotificationMessage of lsp_notification type 'a lsp_handler = 'a lsp_result_handler * 'a lsp_error_handler and 'a lsp_error_handler = Error.t * string -> 'a -> 'a and 'a lsp_result_handler = | ShowMessageHandler of (ShowMessageRequest.result -> 'a -> 'a) | ShowStatusHandler of (ShowStatusFB.result -> 'a -> 'a) module IdKey : sig type t = lsp_id val compare : t -> t -> int end module IdSet : sig include module type of Set.Make (IdKey) end module IdMap : sig include module type of WrappedMap.Make (IdKey) end module UriKey : sig type t = documentUri val compare : t -> t -> int end module UriSet : sig include module type of Set.Make (UriKey) end module UriMap : sig include module type of WrappedMap.Make (UriKey) end val lsp_result_to_log_string : lsp_result -> string
OCaml
hhvm/hphp/hack/src/utils/lsp/lsp_fmt.ml
(* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) open Hh_prelude open Lsp open Hh_json open Hh_json_helpers (************************************************************************) (* Miscellaneous LSP structures *) (************************************************************************) let parse_id (json : json) : lsp_id = match json with | JSON_Number s -> begin try NumberId (int_of_string s) with | Failure _ -> raise (Error.LspException { Error.code = Error.ParseError; message = "float ids not allowed: " ^ s; data = None; }) end | JSON_String s -> StringId s | _ -> raise (Error.LspException { Error.code = Error.ParseError; message = "not an id: " ^ Hh_json.json_to_string json; data = None; }) let parse_id_opt (json : json option) : lsp_id option = Option.map json ~f:parse_id let print_id (id : lsp_id) : json = match id with | NumberId n -> JSON_Number (string_of_int n) | StringId s -> JSON_String s let id_to_string (id : lsp_id) : string = match id with | NumberId n -> string_of_int n | StringId s -> Printf.sprintf "\"%s\"" s let parse_position (json : json option) : position = { line = Jget.int_exn json "line"; character = Jget.int_exn json "character" } let print_position (position : position) : json = JSON_Object [("line", position.line |> int_); ("character", position.character |> int_)] let print_range (range : range) : json = JSON_Object [("start", print_position range.start); ("end", print_position range.end_)] let print_location (location : Location.t) : json = Location.( JSON_Object [ ("uri", JSON_String (string_of_uri location.uri)); ("range", print_range location.range); ]) let print_locations (r : Location.t list) : json = JSON_Array (List.map r ~f:print_location) let print_definition_location (definition_location : DefinitionLocation.t) : json = DefinitionLocation.( let location = definition_location.location in Jprint.object_opt [ ("uri", Some (JSON_String (string_of_uri location.Location.uri))); ("range", Some (print_range location.Location.range)); ("title", Option.map definition_location.title ~f:string_); ]) let print_definition_locations (r : DefinitionLocation.t list) : json = JSON_Array (List.map r ~f:print_definition_location) let parse_range_exn (json : json option) : range = { start = Jget.obj_exn json "start" |> parse_position; end_ = Jget.obj_exn json "end" |> parse_position; } let parse_location (j : json option) : Location.t = Location. { uri = Jget.string_exn j "uri" |> uri_of_string; range = Jget.obj_exn j "range" |> parse_range_exn; } let parse_range_opt (json : json option) : range option = if Option.is_none json then None else Some (parse_range_exn json) (************************************************************************) let print_error (e : Error.t) : json = let open Hh_json in let data = match e.Error.data with | None -> [] | Some data -> [("data", data)] in let entries = ("code", int_ (Error.code_to_enum e.Error.code)) :: ("message", string_ e.Error.message) :: data in JSON_Object entries let error_to_log_string (e : Error.t) : string = let data = Option.value_map e.Error.data ~f:Hh_json.json_to_multiline ~default:"" in Printf.sprintf "%s [%s]\n%s" e.Error.message (Error.show_code e.Error.code) data let parse_error (error : json) : Error.t = let json = Some error in let code = Jget.int_exn json "code" |> Error.code_of_enum |> Option.value ~default:Error.UnknownErrorCode in let message = Jget.string_exn json "message" in let data = Jget.val_opt json "data" in { Error.code; message; data } let parse_textDocumentIdentifier (json : json option) : TextDocumentIdentifier.t = TextDocumentIdentifier.{ uri = Jget.string_exn json "uri" |> uri_of_string } let parse_versionedTextDocumentIdentifier (json : json option) : VersionedTextDocumentIdentifier.t = VersionedTextDocumentIdentifier. { uri = Jget.string_exn json "uri" |> uri_of_string; version = Jget.int_d json "version" ~default:0; } let parse_textDocumentItem (json : json option) : TextDocumentItem.t = TextDocumentItem. { uri = Jget.string_exn json "uri" |> uri_of_string; languageId = Jget.string_d json "languageId" ~default:""; version = Jget.int_d json "version" ~default:0; text = Jget.string_exn json "text"; } let print_textDocumentItem (item : TextDocumentItem.t) : json = TextDocumentItem.( JSON_Object [ ("uri", JSON_String (string_of_uri item.uri)); ("languageId", JSON_String item.languageId); ("version", JSON_Number (string_of_int item.version)); ("text", JSON_String item.text); ]) let print_markedItem (item : markedString) : json = match item with | MarkedString s -> JSON_String s | MarkedCode (language, value) -> JSON_Object [("language", JSON_String language); ("value", JSON_String value)] let parse_textDocumentPositionParams (params : json option) : TextDocumentPositionParams.t = TextDocumentPositionParams. { textDocument = Jget.obj_exn params "textDocument" |> parse_textDocumentIdentifier; position = Jget.obj_exn params "position" |> parse_position; } let parse_textEdit (params : json option) : TextEdit.t option = match params with | None -> None | _ -> TextEdit.( Some { range = Jget.obj_exn params "range" |> parse_range_exn; newText = Jget.string_exn params "newText"; }) let print_textEdit (edit : TextEdit.t) : json = TextEdit.( JSON_Object [("range", print_range edit.range); ("newText", JSON_String edit.newText)]) let print_textEdits (r : TextEdit.t list) : json = JSON_Array (List.map r ~f:print_textEdit) let print_workspaceEdit (r : WorkspaceEdit.t) : json = WorkspaceEdit.( let print_workspace_edit_changes (uri, text_edits) = (uri, print_textEdits text_edits) in JSON_Object [ ( "changes", JSON_Object (List.map (SMap.elements r.changes) ~f:print_workspace_edit_changes) ); ]) let print_command (command : Command.t) : json = Command.( JSON_Object [ ("title", JSON_String command.title); ("command", JSON_String command.command); ("arguments", JSON_Array command.arguments); ]) let parse_command (json : json option) : Command.t = Command. { title = Jget.string_d json "title" ~default:""; command = Jget.string_d json "command" ~default:""; arguments = Jget.array_d json "arguments" ~default:[] |> List.filter_opt; } let parse_formattingOptions (json : json option) : DocumentFormatting.formattingOptions = { DocumentFormatting.tabSize = Jget.int_d json "tabSize" ~default:2; insertSpaces = Jget.bool_d json "insertSpaces" ~default:true; } let print_symbolInformation (info : SymbolInformation.t) : json = SymbolInformation.( let print_symbol_kind k = int_ (SymbolInformation.symbolKind_to_enum k) in Jprint.object_opt [ ("name", Some (JSON_String info.name)); ("kind", Some (print_symbol_kind info.kind)); ("location", Some (print_location info.location)); ("containerName", Option.map info.containerName ~f:string_); ]) let parse_codeLens (json : json option) : CodeLens.t = CodeLens. { range = Jget.obj_exn json "range" |> parse_range_exn; command = Jget.obj_exn json "command" |> parse_command; data = Jget.obj_exn json "data"; } let print_codeLens (codeLens : CodeLens.t) : json = CodeLens.( JSON_Object [ ("range", print_range codeLens.range); ("command", print_command codeLens.command); ( "data", match codeLens.data with | None -> JSON_Null | Some json -> json ); ]) (************************************************************************) let print_shutdown () : json = JSON_Null (************************************************************************) let parse_cancelRequest (params : json option) : CancelRequest.params = CancelRequest.{ id = Jget.val_exn params "id" |> parse_id } let print_cancelRequest (p : CancelRequest.params) : json = CancelRequest.(JSON_Object [("id", print_id p.id)]) (************************************************************************) let parse_setTraceNotification (params : json option) : SetTraceNotification.params = match Jget.string_opt params "value" with | Some "verbose" -> SetTraceNotification.Verbose | _ -> SetTraceNotification.Off let print_setTraceNotification (p : SetTraceNotification.params) : json = let s = match p with | SetTraceNotification.Verbose -> "verbose" | SetTraceNotification.Off -> "off" in JSON_Object [("value", JSON_String s)] (************************************************************************) let print_rage (r : RageFB.result) : json = RageFB.( let print_item (item : rageItem) : json = JSON_Object [ ("data", JSON_String item.data); ( "title", match item.title with | None -> JSON_Null | Some s -> JSON_String s ); ] in JSON_Array (List.map r ~f:print_item)) (************************************************************************) let parse_didOpen (params : json option) : DidOpen.params = DidOpen. { textDocument = Jget.obj_exn params "textDocument" |> parse_textDocumentItem; } let print_didOpen (params : DidOpen.params) : json = DidOpen.( JSON_Object [("textDocument", params.textDocument |> print_textDocumentItem)]) (************************************************************************) let parse_didClose (params : json option) : DidClose.params = DidClose. { textDocument = Jget.obj_exn params "textDocument" |> parse_textDocumentIdentifier; } (************************************************************************) let parse_didSave (params : json option) : DidSave.params = DidSave. { textDocument = Jget.obj_exn params "textDocument" |> parse_textDocumentIdentifier; text = Jget.string_opt params "text"; } (************************************************************************) let parse_didChange (params : json option) : DidChange.params = DidChange.( let parse_textDocumentContentChangeEvent json = { range = Jget.obj_opt json "range" |> parse_range_opt; rangeLength = Jget.int_opt json "rangeLength"; text = Jget.string_exn json "text"; } in { textDocument = Jget.obj_exn params "textDocument" |> parse_versionedTextDocumentIdentifier; contentChanges = Jget.array_d params "contentChanges" ~default:[] |> List.map ~f:parse_textDocumentContentChangeEvent; }) (************************************************************************) let parse_signatureHelp (params : json option) : SignatureHelp.params = parse_textDocumentPositionParams params let print_signatureHelp (r : SignatureHelp.result) : json = SignatureHelp.( let print_parInfo parInfo = Jprint.object_opt [ ("label", Some (Hh_json.JSON_String parInfo.parinfo_label)); ( "documentation", Option.map ~f:Hh_json.string_ parInfo.parinfo_documentation ); ] in let print_sigInfo sigInfo = Jprint.object_opt [ ("label", Some (Hh_json.JSON_String sigInfo.siginfo_label)); ( "documentation", Option.map ~f:Hh_json.string_ sigInfo.siginfo_documentation ); ( "parameters", Some (Hh_json.JSON_Array (List.map ~f:print_parInfo sigInfo.parameters)) ); ] in match r with | None -> Hh_json.JSON_Null | Some r -> Hh_json.JSON_Object [ ( "signatures", Hh_json.JSON_Array (List.map ~f:print_sigInfo r.signatures) ); ("activeSignature", Hh_json.int_ r.activeSignature); ("activeParameter", Hh_json.int_ r.activeParameter); ]) (************************************************************************) let parse_typeHierarchy (params : json option) : TypeHierarchy.params = parse_textDocumentPositionParams params let print_typeHierarchy (r : TypeHierarchy.result) : json = TypeHierarchy.( let print_member_entry (entry : TypeHierarchy.memberEntry) = Hh_json.JSON_Object [ ("name", Hh_json.string_ entry.name); ("snippet", Hh_json.string_ entry.snippet); ("uri", JSON_String (string_of_uri entry.uri)); ("range", print_range entry.range); ("kind", Hh_json.int_ (memberKind_to_enum entry.kind)); ("origin", Hh_json.string_ entry.origin); ] in let print_ancestor_entry (entry : TypeHierarchy.ancestorEntry) = match entry with | AncestorName name -> Hh_json.string_ name | AncestorDetails entry -> Hh_json.JSON_Object [ ("name", Hh_json.string_ entry.name); ("uri", JSON_String (string_of_uri entry.uri)); ("range", print_range entry.range); ("kind", Hh_json.int_ (entryKind_to_enum entry.kind)); ] in let print_hierarchy_entry (entry : TypeHierarchy.hierarchyEntry) = Hh_json.JSON_Object [ ("name", Hh_json.string_ entry.name); ("uri", JSON_String (string_of_uri entry.uri)); ("range", print_range entry.range); ("kind", Hh_json.int_ (entryKind_to_enum entry.kind)); ( "ancestors", Hh_json.JSON_Array (List.map ~f:print_ancestor_entry entry.ancestors) ); ( "members", Hh_json.JSON_Array (List.map ~f:print_member_entry entry.members) ); ] in match r with | None -> Hh_json.JSON_Object [] | Some r -> print_hierarchy_entry r) (************************************************************************) let parse_codeLensResolve (params : json option) : CodeLensResolve.params = parse_codeLens params let print_codeLensResolve (r : CodeLensResolve.result) : json = print_codeLens r (************************************************************************) let parse_documentRename (params : json option) : Rename.params = Rename. { textDocument = Jget.obj_exn params "textDocument" |> parse_textDocumentIdentifier; position = Jget.obj_exn params "position" |> parse_position; newName = Jget.string_exn params "newName"; } let print_documentRename : Rename.result -> json = print_workspaceEdit (************************************************************************) let parse_documentCodeLens (params : json option) : DocumentCodeLens.params = DocumentCodeLens. { textDocument = Jget.obj_exn params "textDocument" |> parse_textDocumentIdentifier; } let print_documentCodeLens (r : DocumentCodeLens.result) : json = JSON_Array (List.map r ~f:print_codeLens) (************************************************************************) let print_diagnostic (diagnostic : PublishDiagnostics.diagnostic) : json = PublishDiagnostics.( let print_diagnosticSeverity = Fn.compose int_ diagnosticSeverity_to_enum in let print_diagnosticCode = function | IntCode i -> Some (int_ i) | StringCode s -> Some (string_ s) | NoCode -> None in let print_related (related : relatedLocation) : json = Hh_json.JSON_Object [ ("location", print_location related.relatedLocation); ("message", string_ related.relatedMessage); ] in Jprint.object_opt [ ("range", Some (print_range diagnostic.range)); ("severity", Option.map diagnostic.severity ~f:print_diagnosticSeverity); ("code", print_diagnosticCode diagnostic.code); ("source", Option.map diagnostic.source ~f:string_); ("message", Some (JSON_String diagnostic.message)); ( "relatedInformation", Some (JSON_Array (List.map diagnostic.relatedInformation ~f:print_related)) ); ( "relatedLocations", Some (JSON_Array (List.map diagnostic.relatedLocations ~f:print_related)) ); ]) let print_diagnostic_list (ds : PublishDiagnostics.diagnostic list) : json = JSON_Array (List.map ds ~f:print_diagnostic) let print_diagnostics (r : PublishDiagnostics.params) : json = PublishDiagnostics.( Jprint.object_opt [ ("uri", Some (JSON_String (string_of_uri r.uri))); ("diagnostics", Some (print_diagnostic_list r.diagnostics)); ( "isStatusFB", if r.isStatusFB then Some (JSON_Bool true) else None ); ]) let parse_diagnostic (j : json option) : PublishDiagnostics.diagnostic = PublishDiagnostics.( let parse_code = function | None -> NoCode | Some (JSON_String s) -> StringCode s | Some (JSON_Number s) -> begin try IntCode (int_of_string s) with | Failure _ -> raise (Error.LspException { Error.code = Error.ParseError; message = "Diagnostic code expected to be an int: " ^ s; data = None; }) end | _ -> raise (Error.LspException { Error.code = Error.ParseError; message = "Diagnostic code expected to be an int or string"; data = None; }) in let parse_info j = { relatedLocation = Jget.obj_exn j "location" |> parse_location; relatedMessage = Jget.string_exn j "message"; } in { range = Jget.obj_exn j "range" |> parse_range_exn; severity = Jget.int_opt j "severity" |> Option.map ~f:diagnosticSeverity_of_enum |> Option.join; code = Jget.val_opt j "code" |> parse_code; source = Jget.string_opt j "source"; message = Jget.string_exn j "message"; relatedInformation = Jget.array_d j "relatedInformation" ~default:[] |> List.map ~f:parse_info; relatedLocations = Jget.array_d j "relatedLocations" ~default:[] |> List.map ~f:parse_info; }) let parse_kind json : CodeActionKind.t option = CodeActionKind.( match json with | Some (JSON_String s) -> Some (kind_of_string s) | _ -> None) let parse_kinds jsons : CodeActionKind.t list = List.map ~f:parse_kind jsons |> List.filter_opt let parse_codeActionRequest (j : json option) : CodeActionRequest.params = let parse_context c : CodeActionRequest.codeActionContext = CodeActionRequest. { diagnostics = Jget.array_exn c "diagnostics" |> List.map ~f:parse_diagnostic; only = Jget.array_opt c "only" |> Option.map ~f:parse_kinds; } in CodeActionRequest. { textDocument = Jget.obj_exn j "textDocument" |> parse_textDocumentIdentifier; range = Jget.obj_exn j "range" |> parse_range_exn; context = Jget.obj_exn j "context" |> parse_context; } let parse_codeActionResolveRequest (j : json option) : CodeActionResolveRequest.params = let data = Jget.obj_exn j "data" |> parse_codeActionRequest in let title = Jget.string_exn j "title" in CodeActionResolveRequest.{ data; title } (************************************************************************) let print_codeAction (c : 'a CodeAction.t) ~(unresolved_to_code_action_request : 'a -> CodeActionRequest.params) : json = CodeAction.( let (edit, command, data) = match c.action with | EditOnly e -> (Some e, None, None) | CommandOnly c -> (None, Some c, None) | BothEditThenCommand (e, c) -> (Some e, Some c, None) | UnresolvedEdit e -> (None, None, Some (unresolved_to_code_action_request e)) in let print_params CodeActionRequest.{ textDocument; range; context } = Hh_json.JSON_Object [ ( "textDocument", Hh_json.JSON_Object [ ( "uri", JSON_String (string_of_uri textDocument.TextDocumentIdentifier.uri) ); ] ); ("range", print_range range); ( "context", Hh_json.JSON_Object [ ( "diagnostics", print_diagnostic_list context.CodeActionRequest.diagnostics ); ] ); ] in Jprint.object_opt [ ("title", Some (JSON_String c.title)); ("kind", Some (JSON_String (CodeActionKind.string_of_kind c.kind))); ("diagnostics", Some (print_diagnostic_list c.diagnostics)); ("edit", Option.map edit ~f:print_documentRename); ("command", Option.map command ~f:print_command); ("data", Option.map data ~f:print_params); ]) let print_codeActionResult (c : CodeAction.result) (p : CodeActionRequest.params) : json = CodeAction.( let print_command_or_action = function | Command c -> print_command c | Action c -> print_codeAction c ~unresolved_to_code_action_request:(Fn.const p) in JSON_Array (List.map c ~f:print_command_or_action)) let print_codeActionResolveResult (c : CodeActionResolve.result) : json = let open CodeAction in let print_command_or_action = function | Command c -> print_command c | Action c -> let unresolved_to_code_action_request : CodeAction.resolved_marker -> CodeActionRequest.params = function | _ -> . in print_codeAction c ~unresolved_to_code_action_request in match c with | Ok command_or_action -> print_command_or_action command_or_action | Error err -> print_error err (************************************************************************) let print_logMessage (type_ : MessageType.t) (message : string) : json = JSON_Object [ ("type", int_ (MessageType.to_enum type_)); ("message", JSON_String message); ] (************************************************************************) let print_telemetryNotification (r : LogMessage.params) (extras : (string * Hh_json.json) list) : json = (* LSP allows "any" for the format of telemetry notifications. It's up to us! *) JSON_Object (("type", int_ (MessageType.to_enum r.LogMessage.type_)) :: ("message", JSON_String r.LogMessage.message) :: extras) (************************************************************************) let print_showMessage (type_ : MessageType.t) (message : string) : json = ShowMessage.( let r = { type_; message } in JSON_Object [ ("type", int_ (MessageType.to_enum r.type_)); ("message", JSON_String r.message); ]) (************************************************************************) let print_showMessageRequest (r : ShowMessageRequest.showMessageRequestParams) : json = let print_action (action : ShowMessageRequest.messageActionItem) : json = JSON_Object [("title", JSON_String action.ShowMessageRequest.title)] in Jprint.object_opt [ ("type", Some (int_ (MessageType.to_enum r.ShowMessageRequest.type_))); ("message", Some (JSON_String r.ShowMessageRequest.message)); ( "actions", Some (JSON_Array (List.map r.ShowMessageRequest.actions ~f:print_action)) ); ] let parse_result_showMessageRequest (result : json option) : ShowMessageRequest.result = ShowMessageRequest.( let title = Jget.string_opt result "title" in Option.map title ~f:(fun title -> { title })) (************************************************************************) let print_showStatus (r : ShowStatusFB.showStatusParams) : json = let rr = r.ShowStatusFB.request in Jprint.object_opt [ ("type", Some (int_ (MessageType.to_enum rr.ShowStatusFB.type_))); ("message", Some (JSON_String rr.ShowStatusFB.message)); ("shortMessage", Option.map r.ShowStatusFB.shortMessage ~f:string_); ("telemetry", r.ShowStatusFB.telemetry); ( "progress", Option.map r.ShowStatusFB.progress ~f:(fun progress -> Jprint.object_opt [ ("numerator", Some (int_ progress)); ("denominator", Option.map r.ShowStatusFB.total ~f:int_); ]) ); ] (************************************************************************) let print_connectionStatus (p : ConnectionStatusFB.params) : json = ConnectionStatusFB.(JSON_Object [("isConnected", JSON_Bool p.isConnected)]) (************************************************************************) let parse_hover (params : json option) : Hover.params = parse_textDocumentPositionParams params let print_hover (r : Hover.result) : json = Hover.( match r with | None -> JSON_Null | Some r -> Jprint.object_opt [ ( "contents", Some (JSON_Array (List.map r.Hover.contents ~f:print_markedItem)) ); ("range", Option.map r.range ~f:print_range); ]) (************************************************************************) let parse_completionItem (params : json option) : CompletionItemResolve.params = Completion.( let textEdit = Jget.obj_opt params "textEdit" |> parse_textEdit in let additionalTextEdits = Jget.array_d params "additionalTextEdits" ~default:[] |> List.filter_map ~f:parse_textEdit in let command = match Jget.obj_opt params "command" with | None -> None | c -> Some (parse_command c) in let documentation = match Jget.obj_opt params "documentation" with | None -> None | Some json -> Some (UnparsedDocumentation json) in { label = Jget.string_exn params "label"; kind = Option.bind (Jget.int_opt params "kind") ~f:completionItemKind_of_enum; detail = Jget.string_opt params "detail"; documentation; sortText = Jget.string_opt params "sortText"; filterText = Jget.string_opt params "filterText"; insertText = Jget.string_opt params "insertText"; insertTextFormat = Option.bind (Jget.int_opt params "insertTextFormat") ~f:insertTextFormat_of_enum; textEdit; additionalTextEdits; command; data = Jget.obj_opt params "data"; }) let string_of_markedString (acc : string) (marked : markedString) : string = match marked with | MarkedCode (lang, code) -> acc ^ "```" ^ lang ^ "\n" ^ code ^ "\n" ^ "```\n" | MarkedString str -> acc ^ str ^ "\n" let print_completionItem (item : Completion.completionItem) : json = Completion.( Jprint.object_opt [ ("label", Some (JSON_String item.label)); ( "kind", Option.map item.kind ~f:(fun x -> int_ @@ completionItemKind_to_enum x) ); ("detail", Option.map item.detail ~f:string_); ( "documentation", match item.documentation with | None -> None | Some (UnparsedDocumentation json) -> Some json | Some (MarkedStringsDocumentation doc) -> Some (JSON_Object [ ("kind", JSON_String "markdown"); ( "value", JSON_String (String.strip (List.fold doc ~init:"" ~f:string_of_markedString)) ); ]) ); ("sortText", Option.map item.sortText ~f:string_); ("filterText", Option.map item.filterText ~f:string_); ("insertText", Option.map item.insertText ~f:string_); ( "insertTextFormat", Option.map item.insertTextFormat ~f:(fun x -> int_ @@ insertTextFormat_to_enum x) ); ("textEdit", Option.map item.textEdit ~f:print_textEdit); ( "additionalTextEdits", match item.additionalTextEdits with | [] -> None | text_edits -> Some (print_textEdits text_edits) ); ("command", Option.map item.command ~f:print_command); ("data", item.data); ]) let parse_completion (params : json option) : Completion.params = Lsp.Completion.( let context = Jget.obj_opt params "context" in { loc = parse_textDocumentPositionParams params; context = (match context with | Some _ -> let tk = Jget.int_exn context "triggerKind" in Some { triggerKind = Option.value_exn ~message:(Printf.sprintf "Unsupported trigger kind: %d" tk) (Lsp.Completion.completionTriggerKind_of_enum tk); triggerCharacter = Jget.string_opt context "triggerCharacter"; } | None -> None); }) let print_completion (r : Completion.result) : json = Completion.( JSON_Object [ ("isIncomplete", JSON_Bool r.isIncomplete); ("items", JSON_Array (List.map r.items ~f:print_completionItem)); ]) (************************************************************************) let parse_workspaceSymbol (params : json option) : WorkspaceSymbol.params = WorkspaceSymbol.{ query = Jget.string_exn params "query" } let print_workspaceSymbol (r : WorkspaceSymbol.result) : json = JSON_Array (List.map r ~f:print_symbolInformation) (************************************************************************) let parse_documentSymbol (params : json option) : DocumentSymbol.params = DocumentSymbol. { textDocument = Jget.obj_exn params "textDocument" |> parse_textDocumentIdentifier; } let print_documentSymbol (r : DocumentSymbol.result) : json = JSON_Array (List.map r ~f:print_symbolInformation) (************************************************************************) let parse_findReferences (params : json option) : FindReferences.params = let partialResultToken = Jget.string_opt params "partialResultToken" |> Option.map ~f:(fun t -> PartialResultToken t) in let context = Jget.obj_opt params "context" in { FindReferences.loc = parse_textDocumentPositionParams params; partialResultToken; context = { FindReferences.includeDeclaration = Jget.bool_d context "includeDeclaration" ~default:true; includeIndirectReferences = Jget.bool_d context "includeIndirectReferences" ~default:false; }; } let print_findReferencesPartialResult (Lsp.PartialResultToken token) refs = Hh_json.JSON_Object [("token", Hh_json.string_ token); ("value", print_locations refs)] (************************************************************************) let parse_callItem (params : json option) : CallHierarchyItem.t = let rangeObj = Jget.obj_exn params "range" in let selectionRangeObj = Jget.obj_exn params "selectionRange" in let open CallHierarchyItem in { name = Jget.string_exn params "name"; kind = (match Jget.int_exn params "kind" |> SymbolInformation.symbolKind_of_enum with | None -> raise (Jget.Parse "invalid symbol kind") | Some s -> s); detail = Jget.string_opt params "detail"; uri = Jget.string_exn params "uri" |> uri_of_string; range = parse_range_exn rangeObj; selectionRange = parse_range_exn selectionRangeObj; } (************************************************************************) let print_callItem (item : CallHierarchyItem.t) : json = let open CallHierarchyItem in let kindJSON = SymbolInformation.symbolKind_to_enum item.kind |> int_ in Jprint.object_opt [ ("name", Some (JSON_String item.name)); ("kind", Some kindJSON); ("detail", Option.map item.detail ~f:string_); ("uri", Some (JSON_String (string_of_uri item.uri))); ("range", Some (print_range item.range)); ("selectionRange", Some (print_range item.selectionRange)); ] (************************************************************************) let parse_callHierarchyCalls (params : json option) : CallHierarchyCallsRequestParam.t = let json_item = Jget.obj_opt params "item" in let parsed_item = parse_callItem json_item in let open CallHierarchyCallsRequestParam in { item = parsed_item } (************************************************************************) let print_PrepareCallHierarchyResult (r : PrepareCallHierarchy.result) : json = match r with | None -> JSON_Null | Some list -> array_ print_callItem list (************************************************************************) let print_CallHierarchyIncomingCallsResult (r : CallHierarchyIncomingCalls.result) : json = let open CallHierarchyIncomingCalls in let print_CallHierarchyIncomingCall (call : CallHierarchyIncomingCalls.callHierarchyIncomingCall) : json = JSON_Object [ ("from", print_callItem call.from); ("fromRanges", array_ print_range call.fromRanges); ] in match r with | None -> JSON_Null | Some list -> array_ print_CallHierarchyIncomingCall list (************************************************************************) let print_CallHierarchyOutgoingCallsResult (r : CallHierarchyOutgoingCalls.result) : json = let open CallHierarchyOutgoingCalls in let print_CallHierarchyOutgoingCall (call : CallHierarchyOutgoingCalls.callHierarchyOutgoingCall) : json = JSON_Object [ ("to", print_callItem call.call_to); ("fromRanges", array_ print_range call.fromRanges); ] in match r with | None -> JSON_Null | Some list -> array_ print_CallHierarchyOutgoingCall list (************************************************************************) let parse_documentHighlight (params : json option) : DocumentHighlight.params = parse_textDocumentPositionParams params let print_documentHighlight (r : DocumentHighlight.result) : json = DocumentHighlight.( let print_highlightKind kind = int_ (documentHighlightKind_to_enum kind) in let print_highlight highlight = Jprint.object_opt [ ("range", Some (print_range highlight.range)); ("kind", Option.map highlight.kind ~f:print_highlightKind); ] in JSON_Array (List.map r ~f:print_highlight)) (************************************************************************) let parse_documentFormatting (params : json option) : DocumentFormatting.params = { DocumentFormatting.textDocument = Jget.obj_exn params "textDocument" |> parse_textDocumentIdentifier; options = Jget.obj_opt params "options" |> parse_formattingOptions; } let print_documentFormatting (r : DocumentFormatting.result) : json = print_textEdits r let parse_documentRangeFormatting (params : json option) : DocumentRangeFormatting.params = { DocumentRangeFormatting.textDocument = Jget.obj_exn params "textDocument" |> parse_textDocumentIdentifier; range = Jget.obj_exn params "range" |> parse_range_exn; options = Jget.obj_opt params "options" |> parse_formattingOptions; } let print_documentRangeFormatting (r : DocumentRangeFormatting.result) : json = print_textEdits r let parse_documentOnTypeFormatting (params : json option) : DocumentOnTypeFormatting.params = { DocumentOnTypeFormatting.textDocument = Jget.obj_exn params "textDocument" |> parse_textDocumentIdentifier; position = Jget.obj_exn params "position" |> parse_position; ch = Jget.string_exn params "ch"; options = Jget.obj_opt params "options" |> parse_formattingOptions; } let print_documentOnTypeFormatting (r : DocumentOnTypeFormatting.result) : json = print_textEdits r let parse_willSaveWaitUntil (params : json option) : WillSaveWaitUntil.params = let open WillSaveWaitUntil in { textDocument = Jget.obj_exn params "textDocument" |> parse_textDocumentIdentifier; reason = Jget.int_exn params "reason" |> textDocumentSaveReason_of_enum |> Option.value ~default:Manual; } (************************************************************************) let parse_initialize (params : json option) : Initialize.params = Initialize.( let rec parse_initialize json = { processId = Jget.int_opt json "processId"; rootPath = Jget.string_opt json "rootPath"; rootUri = Option.map ~f:uri_of_string (Jget.string_opt json "rootUri"); initializationOptions = Jget.obj_opt json "initializationOptions" |> parse_initializationOptions; client_capabilities = Jget.obj_opt json "capabilities" |> parse_capabilities; trace = Jget.string_opt json "trace" |> parse_trace; } and parse_trace (s : string option) : trace = match s with | Some "messages" -> Messages | Some "verbose" -> Verbose | _ -> Off and parse_initializationOptions json = { namingTableSavedStatePath = Jget.string_opt json "namingTableSavedStatePath"; namingTableSavedStateTestDelay = Jget.float_d json "namingTableSavedStateTestDelay" ~default:0.0; delayUntilDoneInit = Jget.bool_d json "delayUntilDoneInit" ~default:false; skipLspServerOnTypeFormatting = Jget.bool_d json "skipLspServerOnTypeFormatting" ~default:false; } and parse_capabilities json = { workspace = Jget.obj_opt json "workspace" |> parse_workspace; textDocument = Jget.obj_opt json "textDocument" |> parse_textDocument; window = Jget.obj_opt json "window" |> parse_window; telemetry = Jget.obj_opt json "telemetry" |> parse_telemetry; client_experimental = Jget.obj_opt json "experimental" |> parse_client_experimental; } and parse_workspace json = { applyEdit = Jget.bool_d json "applyEdit" ~default:false; workspaceEdit = Jget.obj_opt json "workspaceEdit" |> parse_workspaceEdit; didChangeWatchedFiles = Jget.obj_opt json "didChangeWatchedFiles" |> parse_dynamicRegistration; } and parse_dynamicRegistration json = { dynamicRegistration = Jget.bool_d json "dynamicRegistration" ~default:false; } and parse_workspaceEdit json = { documentChanges = Jget.bool_d json "documentChanges" ~default:false } and parse_textDocument json = { synchronization = Jget.obj_opt json "synchronization" |> parse_synchronization; completion = Jget.obj_opt json "completion" |> parse_completion; codeAction = Jget.obj_opt json "codeAction" |> parse_codeAction; definition = Jget.obj_opt json "definition" |> parse_definition; typeDefinition = Jget.obj_opt json "typeDefinition" |> parse_typeDefinition; implementation = Jget.obj_opt json "implementation" |> parse_implementation; declaration = Jget.obj_opt json "declaration" |> parse_declaration; } and parse_synchronization json = { can_willSave = Jget.bool_d json "willSave" ~default:false; can_willSaveWaitUntil = Jget.bool_d json "willSaveWaitUntil" ~default:false; can_didSave = Jget.bool_d json "didSave" ~default:false; } and parse_completion json = { completionItem = Jget.obj_opt json "completionItem" |> parse_completionItem; } and parse_completionItem json = { snippetSupport = Jget.bool_d json "snippetSupport" ~default:false } and parse_codeAction json = { codeAction_dynamicRegistration = Jget.bool_d json "dynamicRegistration" ~default:false; codeActionLiteralSupport = Jget.obj_opt json "codeActionLiteralSupport" |> parse_codeActionLiteralSupport; } and parse_codeActionLiteralSupport json = Option.( Jget.array_opt json "valueSet" >>= fun ls -> Some { codeAction_valueSet = parse_kinds ls }) and parse_definition json = { definitionLinkSupport = Jget.bool_d json "linkSupport" ~default:false } and parse_typeDefinition json = { typeDefinitionLinkSupport = Jget.bool_d json "linkSupport" ~default:false; } and parse_implementation json = { implementationLinkSupport = Jget.bool_d json "linkSupport" ~default:false; } and parse_declaration json = { declarationLinkSupport = Jget.bool_d json "linkSupport" ~default:false } and parse_window json = { status = Jget.obj_opt json "status" |> Option.is_some } and parse_telemetry json = { connectionStatus = Jget.obj_opt json "connectionStatus" |> Option.is_some; } and parse_client_experimental json = ClientExperimentalCapabilities. { snippetTextEdit = Jget.bool_d json "snippetTextEdit" ~default:false } in parse_initialize params) let print_initializeError (r : Initialize.errorData) : json = Initialize.(JSON_Object [("retry", JSON_Bool r.retry)]) let print_initialize (r : Initialize.result) : json = Initialize.( let print_textDocumentSyncKind kind = int_ (textDocumentSyncKind_to_enum kind) in let cap = r.server_capabilities in let sync = cap.textDocumentSync in JSON_Object [ ( "capabilities", Jprint.object_opt [ ( "textDocumentSync", Some (Jprint.object_opt [ ("openClose", Some (JSON_Bool sync.want_openClose)); ( "change", Some (print_textDocumentSyncKind sync.want_change) ); ("willSave", Some (JSON_Bool sync.want_willSave)); ( "willSaveWaitUntil", Some (JSON_Bool sync.want_willSaveWaitUntil) ); ( "save", Option.map sync.want_didSave ~f:(fun save -> JSON_Object [("includeText", JSON_Bool save.includeText)]) ); ]) ); ("hoverProvider", Some (JSON_Bool cap.hoverProvider)); ( "completionProvider", Option.map cap.completionProvider ~f:(fun comp -> JSON_Object [ ( "resolveProvider", JSON_Bool comp.CompletionOptions.resolveProvider ); ( "triggerCharacters", Jprint.string_array comp.CompletionOptions.completion_triggerCharacters ); ]) ); ( "signatureHelpProvider", Option.map cap.signatureHelpProvider ~f:(fun shp -> JSON_Object [ ( "triggerCharacters", Jprint.string_array shp.sighelp_triggerCharacters ); ]) ); ("definitionProvider", Some (JSON_Bool cap.definitionProvider)); ( "typeDefinitionProvider", Some (JSON_Bool cap.typeDefinitionProvider) ); ("referencesProvider", Some (JSON_Bool cap.referencesProvider)); ( "documentHighlightProvider", Some (JSON_Bool cap.documentHighlightProvider) ); ( "documentSymbolProvider", Some (JSON_Bool cap.documentSymbolProvider) ); ( "workspaceSymbolProvider", Some (JSON_Bool cap.workspaceSymbolProvider) ); ( "codeActionProvider", Option.map cap.codeActionProvider ~f:(fun provider -> JSON_Object [ ( "resolveProvider", JSON_Bool provider.CodeActionOptions.resolveProvider ); ]) ); ( "codeLensProvider", Option.map cap.codeLensProvider ~f:(fun codelens -> JSON_Object [ ( "resolveProvider", JSON_Bool codelens.codelens_resolveProvider ); ]) ); ( "documentFormattingProvider", Some (JSON_Bool cap.documentFormattingProvider) ); ( "documentRangeFormattingProvider", Some (JSON_Bool cap.documentRangeFormattingProvider) ); ( "documentOnTypeFormattingProvider", Option.map cap.documentOnTypeFormattingProvider ~f:(fun o -> JSON_Object [ ( "firstTriggerCharacter", JSON_String o.firstTriggerCharacter ); ( "moreTriggerCharacter", Jprint.string_array o.moreTriggerCharacter ); ]) ); ("renameProvider", Some (JSON_Bool cap.renameProvider)); ( "documentLinkProvider", Option.map cap.documentLinkProvider ~f:(fun dlp -> JSON_Object [ ( "resolveProvider", JSON_Bool dlp.doclink_resolveProvider ); ]) ); ( "executeCommandProvider", Option.map cap.executeCommandProvider ~f:(fun p -> JSON_Object [("commands", Jprint.string_array p.commands)]) ); ( "implementationProvider", Some (JSON_Bool cap.implementationProvider) ); ("rageProvider", Some (JSON_Bool cap.rageProviderFB)); ( "experimental", Option.map cap.server_experimental ~f:(fun experimental_capabilities -> JSON_Object [ ( "snippetTextEdit", JSON_Bool experimental_capabilities .ServerExperimentalCapabilities.snippetTextEdit ); ]) ); ] ); ]) (************************************************************************) let print_registrationOptions (registerOptions : Lsp.lsp_registration_options) : Hh_json.json = match registerOptions with | Lsp.DidChangeWatchedFilesRegistrationOptions registerOptions -> Lsp.DidChangeWatchedFiles.( JSON_Object [ ( "watchers", JSON_Array (List.map registerOptions.watchers ~f:(fun watcher -> JSON_Object [ ("globPattern", JSON_String watcher.globPattern); ("kind", int_ 7); (* all events: create, change, and delete *) ])) ); ]) let print_registerCapability (params : Lsp.RegisterCapability.params) : Hh_json.json = Lsp.RegisterCapability.( JSON_Object [ ( "registrations", JSON_Array (List.map params.registrations ~f:(fun registration -> JSON_Object [ ("id", string_ registration.id); ("method", string_ registration.method_); ( "registerOptions", print_registrationOptions registration.registerOptions ); ])) ); ]) let parse_didChangeWatchedFiles (json : Hh_json.json option) : DidChangeWatchedFiles.params = let changes = Jget.array_exn json "changes" |> List.map ~f:(fun change -> let uri = Jget.string_exn change "uri" |> uri_of_string in let type_ = Jget.int_exn change "type" in let type_ = match DidChangeWatchedFiles.fileChangeType_of_enum type_ with | Some type_ -> type_ | None -> failwith (Printf.sprintf "Invalid file change type %d" type_) in { DidChangeWatchedFiles.uri; type_ }) in { DidChangeWatchedFiles.changes } (************************************************************************) (* universal parser+printer *) (************************************************************************) let get_uri_opt (m : lsp_message) : Lsp.documentUri option = let open TextDocumentIdentifier in match m with | RequestMessage (_, DocumentCodeLensRequest p) -> Some p.DocumentCodeLens.textDocument.uri | RequestMessage (_, HoverRequest p) -> Some p.TextDocumentPositionParams.textDocument.uri | RequestMessage (_, DefinitionRequest p) -> Some p.TextDocumentPositionParams.textDocument.uri | RequestMessage (_, TypeDefinitionRequest p) -> Some p.TextDocumentPositionParams.textDocument.uri | RequestMessage (_, CodeActionRequest p) -> Some p.CodeActionRequest.textDocument.uri | RequestMessage (_, CodeActionResolveRequest p) -> Some p.CodeActionResolveRequest.data.CodeActionRequest.textDocument.uri | RequestMessage (_, CompletionRequest p) -> Some p.Completion.loc.TextDocumentPositionParams.textDocument.uri | RequestMessage (_, DocumentSymbolRequest p) -> Some p.DocumentSymbol.textDocument.uri | RequestMessage (_, FindReferencesRequest p) -> Some p.FindReferences.loc.TextDocumentPositionParams.textDocument.uri | RequestMessage (_, PrepareCallHierarchyRequest p) -> Some p.TextDocumentPositionParams.textDocument.uri (*Implement for CallHierarchy*) | RequestMessage (_, CallHierarchyIncomingCallsRequest p) -> Some p.CallHierarchyCallsRequestParam.item.CallHierarchyItem.uri | RequestMessage (_, CallHierarchyOutgoingCallsRequest p) -> Some p.CallHierarchyCallsRequestParam.item.CallHierarchyItem.uri | RequestMessage (_, ImplementationRequest p) -> Some p.TextDocumentPositionParams.textDocument.uri | RequestMessage (_, DocumentHighlightRequest p) -> Some p.TextDocumentPositionParams.textDocument.uri | RequestMessage (_, DocumentFormattingRequest p) -> Some p.DocumentFormatting.textDocument.uri | RequestMessage (_, DocumentRangeFormattingRequest p) -> Some p.DocumentRangeFormatting.textDocument.uri | RequestMessage (_, DocumentOnTypeFormattingRequest p) -> Some p.DocumentOnTypeFormatting.textDocument.uri | RequestMessage (_, RenameRequest p) -> Some p.Rename.textDocument.uri | RequestMessage (_, SignatureHelpRequest p) -> Some p.TextDocumentPositionParams.textDocument.uri | RequestMessage (_, TypeHierarchyRequest p) -> Some p.TextDocumentPositionParams.textDocument.uri | NotificationMessage (PublishDiagnosticsNotification p) -> Some p.PublishDiagnostics.uri | NotificationMessage (DidOpenNotification p) -> Some p.DidOpen.textDocument.TextDocumentItem.uri | NotificationMessage (DidCloseNotification p) -> Some p.DidClose.textDocument.uri | NotificationMessage (DidSaveNotification p) -> Some p.DidSave.textDocument.uri | NotificationMessage (DidChangeNotification p) -> Some p.DidChange.textDocument.VersionedTextDocumentIdentifier.uri | NotificationMessage (DidChangeWatchedFilesNotification p) -> begin match p.DidChangeWatchedFiles.changes with | [] -> None | { DidChangeWatchedFiles.uri; _ } :: _ -> Some uri end | RequestMessage (_, HackTestStartServerRequestFB) | RequestMessage (_, HackTestStopServerRequestFB) | RequestMessage (_, HackTestShutdownServerlessRequestFB) | RequestMessage (_, UnknownRequest _) | RequestMessage (_, InitializeRequest _) | RequestMessage (_, RegisterCapabilityRequest _) | RequestMessage (_, ShutdownRequest) | RequestMessage (_, CodeLensResolveRequest _) | RequestMessage (_, CompletionItemResolveRequest _) | RequestMessage (_, WorkspaceSymbolRequest _) | RequestMessage (_, ShowMessageRequestRequest _) | RequestMessage (_, ShowStatusRequestFB _) | RequestMessage (_, RageRequestFB) | RequestMessage (_, WillSaveWaitUntilRequest _) | NotificationMessage ExitNotification | NotificationMessage (CancelRequestNotification _) | NotificationMessage (LogMessageNotification _) | NotificationMessage (TelemetryNotification _) | NotificationMessage (ShowMessageNotification _) | NotificationMessage (ConnectionStatusNotificationFB _) | NotificationMessage InitializedNotification | NotificationMessage (FindReferencesPartialResultNotification _) | NotificationMessage (SetTraceNotification _) | NotificationMessage LogTraceNotification | NotificationMessage (UnknownNotification _) | ResponseMessage _ -> None let request_name_to_string (request : lsp_request) : string = match request with | ShowMessageRequestRequest _ -> "window/showMessageRequest" | ShowStatusRequestFB _ -> "window/showStatus" | InitializeRequest _ -> "initialize" | RegisterCapabilityRequest _ -> "client/registerCapability" | ShutdownRequest -> "shutdown" | CodeLensResolveRequest _ -> "codeLens/resolve" | HoverRequest _ -> "textDocument/hover" | CodeActionRequest _ -> "textDocument/codeAction" | CodeActionResolveRequest _ -> "codeAction/resolve" | CompletionRequest _ -> "textDocument/completion" | CompletionItemResolveRequest _ -> "completionItem/resolve" | DefinitionRequest _ -> "textDocument/definition" | TypeDefinitionRequest _ -> "textDocument/typeDefinition" | ImplementationRequest _ -> "textDocument/implementation" | WorkspaceSymbolRequest _ -> "workspace/symbol" | DocumentSymbolRequest _ -> "textDocument/documentSymbol" | FindReferencesRequest _ -> "textDocument/references" | PrepareCallHierarchyRequest _ -> "textDocument/prepareCallHierarchy" | CallHierarchyIncomingCallsRequest _ -> "callHierarchy/incomingCalls" | CallHierarchyOutgoingCallsRequest _ -> "callHierarchy/outgoingCalls" | DocumentHighlightRequest _ -> "textDocument/documentHighlight" | DocumentFormattingRequest _ -> "textDocument/formatting" | DocumentRangeFormattingRequest _ -> "textDocument/rangeFormatting" | DocumentOnTypeFormattingRequest _ -> "textDocument/onTypeFormatting" | RageRequestFB -> "telemetry/rage" | RenameRequest _ -> "textDocument/rename" | DocumentCodeLensRequest _ -> "textDocument/codeLens" | SignatureHelpRequest _ -> "textDocument/signatureHelp" | TypeHierarchyRequest _ -> "textDocument/typeHierarchy" | HackTestStartServerRequestFB -> "$test/startHhServer" | HackTestStopServerRequestFB -> "$test/stopHhServer" | HackTestShutdownServerlessRequestFB -> "$test/shutdownServerlessIde" | WillSaveWaitUntilRequest _ -> "textDocument/willSaveWaitUntil" | UnknownRequest (method_, _params) -> method_ let result_name_to_string (result : lsp_result) : string = match result with | ShowMessageRequestResult _ -> "window/showMessageRequest" | ShowStatusResultFB _ -> "window/showStatus" | InitializeResult _ -> "initialize" | ShutdownResult -> "shutdown" | CodeLensResolveResult _ -> "codeLens/resolve" | HoverResult _ -> "textDocument/hover" | CodeActionResult _ -> "textDocument/codeAction" | CodeActionResolveResult _ -> "codeAction/resolve" | CompletionResult _ -> "textDocument/completion" | CompletionItemResolveResult _ -> "completionItem/resolve" | DefinitionResult _ -> "textDocument/definition" | TypeDefinitionResult _ -> "textDocument/typeDefinition" | ImplementationResult _ -> "textDocument/implementation" | WorkspaceSymbolResult _ -> "workspace/symbol" | DocumentSymbolResult _ -> "textDocument/documentSymbol" | FindReferencesResult _ -> "textDocument/references" | PrepareCallHierarchyResult _ -> "textDocument/prepareCallHierarchy" | CallHierarchyIncomingCallsResult _ -> "callHierarchy/incomingCalls" | CallHierarchyOutgoingCallsResult _ -> "callHierarchy/outgoingCalls" | DocumentHighlightResult _ -> "textDocument/documentHighlight" | DocumentFormattingResult _ -> "textDocument/formatting" | DocumentRangeFormattingResult _ -> "textDocument/rangeFormatting" | DocumentOnTypeFormattingResult _ -> "textDocument/onTypeFormatting" | RageResultFB _ -> "telemetry/rage" | RenameResult _ -> "textDocument/rename" | DocumentCodeLensResult _ -> "textDocument/codeLens" | SignatureHelpResult _ -> "textDocument/signatureHelp" | TypeHierarchyResult _ -> "textDocument/typeHierarchy" | HackTestStartServerResultFB -> "$test/startHhServer" | HackTestStopServerResultFB -> "$test/stopHhServer" | HackTestShutdownServerlessResultFB -> "$test/shutdownServerlessIde" | RegisterCapabilityRequestResult -> "client/registerCapability" | WillSaveWaitUntilResult _ -> "textDocument/willSaveWaitUntil" | ErrorResult e -> "ERROR/" ^ e.Error.message let notification_name_to_string (notification : lsp_notification) : string = match notification with | ExitNotification -> "exit" | CancelRequestNotification _ -> "$/cancelRequest" | PublishDiagnosticsNotification _ -> "textDocument/publishDiagnostics" | DidOpenNotification _ -> "textDocument/didOpen" | DidCloseNotification _ -> "textDocument/didClose" | DidSaveNotification _ -> "textDocument/didSave" | DidChangeNotification _ -> "textDocument/didChange" | DidChangeWatchedFilesNotification _ -> "workspace/didChangeWatchedFiles" | TelemetryNotification _ -> "telemetry/event" | LogMessageNotification _ -> "window/logMessage" | ShowMessageNotification _ -> "window/showMessage" | ConnectionStatusNotificationFB _ -> "telemetry/connectionStatus" | InitializedNotification -> "initialized" | FindReferencesPartialResultNotification _ -> "$/progress" | SetTraceNotification _ -> "$/setTraceNotification" | LogTraceNotification -> "$/logTraceNotification" | UnknownNotification (method_, _params) -> method_ let message_name_to_string (message : lsp_message) : string = match message with | RequestMessage (_, r) -> request_name_to_string r | NotificationMessage n -> notification_name_to_string n | ResponseMessage (_, r) -> result_name_to_string r let denorm_message_to_string (message : lsp_message) : string = let uri = match get_uri_opt message with | Some (DocumentUri uri) -> "(" ^ uri ^ ")" | None -> "" in match message with | RequestMessage (id, r) -> Printf.sprintf "request %s %s%s" (id_to_string id) (request_name_to_string r) uri | NotificationMessage n -> Printf.sprintf "notification %s%s" (notification_name_to_string n) uri | ResponseMessage (id, ErrorResult e) -> Printf.sprintf "error %s %s %s" (id_to_string id) e.Error.message uri | ResponseMessage (id, r) -> Printf.sprintf "result %s %s%s" (id_to_string id) (result_name_to_string r) uri let parse_lsp_request (method_ : string) (params : json option) : lsp_request = match method_ with | "initialize" -> InitializeRequest (parse_initialize params) | "shutdown" -> ShutdownRequest | "codeLens/resolve" -> CodeLensResolveRequest (parse_codeLensResolve params) | "textDocument/hover" -> HoverRequest (parse_hover params) | "textDocument/codeAction" -> CodeActionRequest (parse_codeActionRequest params) | "codeAction/resolve" -> CodeActionResolveRequest (parse_codeActionResolveRequest params) | "textDocument/completion" -> CompletionRequest (parse_completion params) | "completionItem/resolve" -> CompletionItemResolveRequest (parse_completionItem params) | "textDocument/definition" -> DefinitionRequest (parse_textDocumentPositionParams params) | "textDocument/typeDefinition" -> TypeDefinitionRequest (parse_textDocumentPositionParams params) | "textDocument/implementation" -> ImplementationRequest (parse_textDocumentPositionParams params) | "workspace/symbol" -> WorkspaceSymbolRequest (parse_workspaceSymbol params) | "textDocument/documentSymbol" -> DocumentSymbolRequest (parse_documentSymbol params) | "textDocument/references" -> FindReferencesRequest (parse_findReferences params) | "textDocument/prepareCallHierarchy" -> PrepareCallHierarchyRequest (parse_textDocumentPositionParams params) | "callHierarchy/incomingCalls" -> CallHierarchyIncomingCallsRequest (parse_callHierarchyCalls params) | "callHierarchy/outgoingCalls" -> CallHierarchyOutgoingCallsRequest (parse_callHierarchyCalls params) | "textDocument/rename" -> RenameRequest (parse_documentRename params) | "textDocument/documentHighlight" -> DocumentHighlightRequest (parse_documentHighlight params) | "textDocument/formatting" -> DocumentFormattingRequest (parse_documentFormatting params) | "textDocument/rangeFormatting" -> DocumentRangeFormattingRequest (parse_documentRangeFormatting params) | "textDocument/onTypeFormatting" -> DocumentOnTypeFormattingRequest (parse_documentOnTypeFormatting params) | "textDocument/signatureHelp" -> SignatureHelpRequest (parse_signatureHelp params) | "textDocument/typeHierarchy" -> TypeHierarchyRequest (parse_typeHierarchy params) | "textDocument/codeLens" -> DocumentCodeLensRequest (parse_documentCodeLens params) | "telemetry/rage" -> RageRequestFB | "$test/startHhServer" -> HackTestStartServerRequestFB | "$test/stopHhServer" -> HackTestStopServerRequestFB | "$test/shutdownServerlessIde" -> HackTestShutdownServerlessRequestFB | "textDocument/willSaveWaitUntil" -> WillSaveWaitUntilRequest (parse_willSaveWaitUntil params) | "window/showMessageRequest" | "window/showStatus" | _ -> UnknownRequest (method_, params) let parse_lsp_notification (method_ : string) (params : json option) : lsp_notification = match method_ with | "$/cancelRequest" -> CancelRequestNotification (parse_cancelRequest params) | "$/setTraceNotification" -> SetTraceNotification (parse_setTraceNotification params) | "$/logTraceNotification" -> LogTraceNotification | "initialized" -> InitializedNotification | "exit" -> ExitNotification | "textDocument/didOpen" -> DidOpenNotification (parse_didOpen params) | "textDocument/didClose" -> DidCloseNotification (parse_didClose params) | "textDocument/didSave" -> DidSaveNotification (parse_didSave params) | "textDocument/didChange" -> DidChangeNotification (parse_didChange params) | "workspace/didChangeWatchedFiles" -> DidChangeWatchedFilesNotification (parse_didChangeWatchedFiles params) | "textDocument/publishDiagnostics" | "window/logMessage" | "window/showMessage" | "window/progress" | "window/actionRequired" | "telemetry/connectionStatus" | _ -> UnknownNotification (method_, params) let parse_lsp_result (request : lsp_request) (result : json) : lsp_result = let method_ = request_name_to_string request in match request with | ShowMessageRequestRequest _ -> ShowMessageRequestResult (parse_result_showMessageRequest (Some result)) | ShowStatusRequestFB _ -> ShowStatusResultFB () | RegisterCapabilityRequest _ -> RegisterCapabilityRequestResult | InitializeRequest _ | ShutdownRequest | CodeLensResolveRequest _ | HoverRequest _ | CodeActionRequest _ | CodeActionResolveRequest _ | CompletionRequest _ | CompletionItemResolveRequest _ | DefinitionRequest _ | TypeDefinitionRequest _ | ImplementationRequest _ | WorkspaceSymbolRequest _ | DocumentSymbolRequest _ | FindReferencesRequest _ | PrepareCallHierarchyRequest _ | CallHierarchyIncomingCallsRequest _ | CallHierarchyOutgoingCallsRequest _ | DocumentHighlightRequest _ | DocumentFormattingRequest _ | DocumentRangeFormattingRequest _ | DocumentOnTypeFormattingRequest _ | RageRequestFB | RenameRequest _ | DocumentCodeLensRequest _ | SignatureHelpRequest _ | TypeHierarchyRequest _ | HackTestStartServerRequestFB | HackTestStopServerRequestFB | HackTestShutdownServerlessRequestFB | WillSaveWaitUntilRequest _ | UnknownRequest _ -> raise (Error.LspException { Error.code = Error.ParseError; message = "Don't know how to parse LSP response " ^ method_; data = None; }) (* parse_lsp: non-jsonrpc inputs - will raise an exception *) (* requests and notifications - will raise an exception if they're malformed, *) (* otherwise return Some *) (* responses - will raise an exception if they're malformed, will return None *) (* if they're absent from the "outstanding" map, otherwise return Some. *) let parse_lsp (json : json) (outstanding : lsp_id -> lsp_request) : lsp_message = let json = Some json in let id = Jget.val_opt json "id" |> parse_id_opt in let method_opt = Jget.string_opt json "method" in let params = Jget.val_opt json "params" in let result = Jget.val_opt json "result" in let error = Jget.val_opt json "error" in match (id, method_opt, result, error) with | (None, Some method_, _, _) -> NotificationMessage (parse_lsp_notification method_ params) | (Some id, Some method_, _, _) -> RequestMessage (id, parse_lsp_request method_ params) | (Some id, _, Some result, _) -> let request = outstanding id in ResponseMessage (id, parse_lsp_result request result) | (Some id, _, _, Some error) -> ResponseMessage (id, ErrorResult (parse_error error)) | (_, _, _, _) -> raise (Error.LspException { Error.code = Error.ParseError; message = "Not JsonRPC"; data = None }) let print_lsp_request (id : lsp_id) (request : lsp_request) : json = let method_ = request_name_to_string request in let params = match request with | ShowMessageRequestRequest r -> print_showMessageRequest r | ShowStatusRequestFB r -> print_showStatus r | RegisterCapabilityRequest r -> print_registerCapability r | InitializeRequest _ | ShutdownRequest | HoverRequest _ | CodeActionRequest _ | CodeActionResolveRequest _ | CodeLensResolveRequest _ | CompletionRequest _ | CompletionItemResolveRequest _ | DefinitionRequest _ | TypeDefinitionRequest _ | ImplementationRequest _ | WorkspaceSymbolRequest _ | DocumentSymbolRequest _ | FindReferencesRequest _ | PrepareCallHierarchyRequest _ | CallHierarchyIncomingCallsRequest _ | CallHierarchyOutgoingCallsRequest _ | DocumentHighlightRequest _ | DocumentFormattingRequest _ | DocumentRangeFormattingRequest _ | DocumentOnTypeFormattingRequest _ | RageRequestFB | RenameRequest _ | DocumentCodeLensRequest _ | SignatureHelpRequest _ | TypeHierarchyRequest _ | HackTestStartServerRequestFB | HackTestStopServerRequestFB | HackTestShutdownServerlessRequestFB | WillSaveWaitUntilRequest _ | UnknownRequest _ -> failwith ("Don't know how to print request " ^ method_) in JSON_Object [ ("jsonrpc", JSON_String "2.0"); ("id", print_id id); ("method", JSON_String method_); ("params", params); ] let print_lsp_response (id : lsp_id) (result : lsp_result) : json = let method_ = result_name_to_string result in let json = match result with | InitializeResult r -> print_initialize r | ShutdownResult -> print_shutdown () | CodeLensResolveResult r -> print_codeLensResolve r | HoverResult r -> print_hover r | CodeActionResult (r, p) -> print_codeActionResult r p | CodeActionResolveResult r -> print_codeActionResolveResult r | CompletionResult r -> print_completion r | CompletionItemResolveResult r -> print_completionItem r | DefinitionResult r -> print_definition_locations r | TypeDefinitionResult r -> print_definition_locations r | ImplementationResult r -> print_locations r | WorkspaceSymbolResult r -> print_workspaceSymbol r | DocumentSymbolResult r -> print_documentSymbol r | FindReferencesResult r -> print_locations r | PrepareCallHierarchyResult r -> print_PrepareCallHierarchyResult r | CallHierarchyIncomingCallsResult r -> print_CallHierarchyIncomingCallsResult r | CallHierarchyOutgoingCallsResult r -> print_CallHierarchyOutgoingCallsResult r | DocumentHighlightResult r -> print_documentHighlight r | DocumentFormattingResult r -> print_documentFormatting r | DocumentRangeFormattingResult r -> print_documentRangeFormatting r | DocumentOnTypeFormattingResult r -> print_documentOnTypeFormatting r | RageResultFB r -> print_rage r | RenameResult r -> print_documentRename r | DocumentCodeLensResult r -> print_documentCodeLens r | SignatureHelpResult r -> print_signatureHelp r | TypeHierarchyResult r -> print_typeHierarchy r | HackTestStartServerResultFB -> JSON_Null | HackTestStopServerResultFB -> JSON_Null | HackTestShutdownServerlessResultFB -> JSON_Null | ShowMessageRequestResult _ | ShowStatusResultFB _ | RegisterCapabilityRequestResult -> failwith ("Don't know how to print result " ^ method_) | WillSaveWaitUntilResult r -> print_textEdits r | ErrorResult e -> print_error e in match result with | ErrorResult _ -> JSON_Object [("jsonrpc", JSON_String "2.0"); ("id", print_id id); ("error", json)] | _ -> JSON_Object [("jsonrpc", JSON_String "2.0"); ("id", print_id id); ("result", json)] let print_lsp_notification (notification : lsp_notification) : json = let method_ = notification_name_to_string notification in let params = match notification with | CancelRequestNotification r -> print_cancelRequest r | SetTraceNotification r -> print_setTraceNotification r | PublishDiagnosticsNotification r -> print_diagnostics r | FindReferencesPartialResultNotification (token, r) -> print_findReferencesPartialResult token r | TelemetryNotification (r, extras) -> print_telemetryNotification r extras | LogMessageNotification r -> print_logMessage r.LogMessage.type_ r.LogMessage.message | ShowMessageNotification r -> print_showMessage r.ShowMessage.type_ r.ShowMessage.message | ConnectionStatusNotificationFB r -> print_connectionStatus r | ExitNotification | InitializedNotification | LogTraceNotification | DidOpenNotification _ | DidCloseNotification _ | DidSaveNotification _ | DidChangeNotification _ | DidChangeWatchedFilesNotification _ | UnknownNotification _ -> failwith ("Don't know how to print notification " ^ method_) in JSON_Object [ ("jsonrpc", JSON_String "2.0"); ("method", JSON_String method_); ("params", params); ] let print_lsp (message : lsp_message) : json = match message with | RequestMessage (id, request) -> print_lsp_request id request | ResponseMessage (id, result) -> print_lsp_response id result | NotificationMessage notification -> print_lsp_notification notification
OCaml Interface
hhvm/hphp/hack/src/utils/lsp/lsp_fmt.mli
(* * Copyright (c) 2019, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) val parse_id : Hh_json.json -> Lsp.lsp_id val parse_id_opt : Hh_json.json option -> Lsp.lsp_id option val print_id : Lsp.lsp_id -> Hh_json.json val id_to_string : Lsp.lsp_id -> string val parse_position : Hh_json.json option -> Lsp.position val print_position : Lsp.position -> Hh_json.json val print_range : Lsp.range -> Hh_json.json val print_location : Lsp.Location.t -> Hh_json.json val print_locations : Lsp.Location.t list -> Hh_json.json val print_definition_location : Lsp.DefinitionLocation.t -> Hh_json.json val print_definition_locations : Lsp.DefinitionLocation.t list -> Hh_json.json val parse_range_exn : Hh_json.json option -> Lsp.range val parse_range_opt : Hh_json.json option -> Lsp.range option val parse_textDocumentIdentifier : Hh_json.json option -> Lsp.TextDocumentIdentifier.t val parse_versionedTextDocumentIdentifier : Hh_json.json option -> Lsp.VersionedTextDocumentIdentifier.t val parse_textDocumentItem : Hh_json.json option -> Lsp.TextDocumentItem.t val print_textDocumentItem : Lsp.TextDocumentItem.t -> Hh_json.json val print_markedItem : Lsp.markedString -> Hh_json.json val parse_textDocumentPositionParams : Hh_json.json option -> Lsp.TextDocumentPositionParams.t val parse_textEdit : Hh_json.json option -> Lsp.TextEdit.t option val print_textEdit : Lsp.TextEdit.t -> Hh_json.json val print_textEdits : Lsp.TextEdit.t list -> Hh_json.json val print_command : Lsp.Command.t -> Hh_json.json val parse_command : Hh_json.json option -> Lsp.Command.t val parse_formattingOptions : Hh_json.json option -> Lsp.DocumentFormatting.formattingOptions val print_symbolInformation : Lsp.SymbolInformation.t -> Hh_json.json val print_shutdown : unit -> Hh_json.json val parse_cancelRequest : Hh_json.json option -> Lsp.CancelRequest.params val print_cancelRequest : Lsp.CancelRequest.params -> Hh_json.json val print_rage : Lsp.RageFB.result -> Hh_json.json val parse_didOpen : Hh_json.json option -> Lsp.DidOpen.params val print_didOpen : Lsp.DidOpen.params -> Hh_json.json val parse_didClose : Hh_json.json option -> Lsp.DidClose.params val parse_didSave : Hh_json.json option -> Lsp.DidSave.params val parse_didChange : Hh_json.json option -> Lsp.DidChange.params val parse_signatureHelp : Hh_json.json option -> Lsp.SignatureHelp.params val print_signatureHelp : Lsp.SignatureHelp.result -> Hh_json.json val parse_documentRename : Hh_json.json option -> Lsp.Rename.params val print_documentRename : Lsp.Rename.result -> Hh_json.json val print_diagnostics : Lsp.PublishDiagnostics.params -> Hh_json.json val print_codeActionResult : Lsp.CodeAction.result -> Lsp.CodeActionRequest.params -> Hh_json.json val print_codeActionResolveResult : Lsp.CodeActionResolve.result -> Hh_json.json val print_logMessage : Lsp.MessageType.t -> string -> Hh_json.json val print_showMessage : Lsp.MessageType.t -> string -> Hh_json.json val print_showMessageRequest : Lsp.ShowMessageRequest.showMessageRequestParams -> Hh_json.json val parse_result_showMessageRequest : Hh_json.json option -> Lsp.ShowMessageRequest.result val print_showStatus : Lsp.ShowStatusFB.showStatusParams -> Hh_json.json val print_connectionStatus : Lsp.ConnectionStatusFB.params -> Hh_json.json val parse_hover : Hh_json.json option -> Lsp.Hover.params val print_hover : Lsp.Hover.result -> Hh_json.json val parse_completionItem : Hh_json.json option -> Lsp.CompletionItemResolve.params val print_completionItem : Lsp.Completion.completionItem -> Hh_json.json val parse_completion : Hh_json.json option -> Lsp.Completion.params val print_completion : Lsp.Completion.result -> Hh_json.json val parse_callItem : Hh_json.json option -> Lsp.CallHierarchyItem.t val print_callItem : Lsp.CallHierarchyItem.t -> Hh_json.json val parse_callHierarchyCalls : Hh_json.json option -> Lsp.CallHierarchyCallsRequestParam.t val print_PrepareCallHierarchyResult : Lsp.PrepareCallHierarchy.result -> Hh_json.json val print_CallHierarchyIncomingCallsResult : Lsp.CallHierarchyIncomingCalls.result -> Hh_json.json val print_CallHierarchyOutgoingCallsResult : Lsp.CallHierarchyOutgoingCalls.result -> Hh_json.json val parse_willSaveWaitUntil : Hh_json.json option -> Lsp.WillSaveWaitUntil.params val parse_workspaceSymbol : Hh_json.json option -> Lsp.WorkspaceSymbol.params val print_workspaceSymbol : Lsp.WorkspaceSymbol.result -> Hh_json.json val parse_documentSymbol : Hh_json.json option -> Lsp.DocumentSymbol.params val print_documentSymbol : Lsp.DocumentSymbol.result -> Hh_json.json val parse_findReferences : Hh_json.json option -> Lsp.FindReferences.params val parse_documentHighlight : Hh_json.json option -> Lsp.DocumentHighlight.params val print_documentHighlight : Lsp.DocumentHighlight.result -> Hh_json.json val parse_documentFormatting : Hh_json.json option -> Lsp.DocumentFormatting.params val print_documentFormatting : Lsp.DocumentFormatting.result -> Hh_json.json val parse_documentRangeFormatting : Hh_json.json option -> Lsp.DocumentRangeFormatting.params val print_documentRangeFormatting : Lsp.DocumentRangeFormatting.result -> Hh_json.json val parse_documentOnTypeFormatting : Hh_json.json option -> Lsp.DocumentOnTypeFormatting.params val print_documentOnTypeFormatting : Lsp.DocumentOnTypeFormatting.result -> Hh_json.json val parse_initialize : Hh_json.json option -> Lsp.Initialize.params val print_initializeError : Lsp.Initialize.errorData -> Hh_json.json val print_initialize : Lsp.Initialize.result -> Hh_json.json val print_registerCapability : Lsp.RegisterCapability.params -> Hh_json.json val parse_didChangeWatchedFiles : Hh_json.json option -> Lsp.DidChangeWatchedFiles.params val print_error : Lsp.Error.t -> Hh_json.json val error_to_log_string : Lsp.Error.t -> string val parse_error : Hh_json.json -> Lsp.Error.t val request_name_to_string : Lsp.lsp_request -> string val result_name_to_string : Lsp.lsp_result -> string val notification_name_to_string : Lsp.lsp_notification -> string val message_name_to_string : Lsp.lsp_message -> string val denorm_message_to_string : Lsp.lsp_message -> string val parse_lsp_request : string -> Hh_json.json option -> Lsp.lsp_request val parse_lsp_notification : string -> Hh_json.json option -> Lsp.lsp_notification val parse_lsp_result : Lsp.lsp_request -> Hh_json.json -> Lsp.lsp_result val parse_lsp : Hh_json.json -> (Lsp.lsp_id -> Lsp.lsp_request) -> Lsp.lsp_message val print_lsp_request : Lsp.lsp_id -> Lsp.lsp_request -> Hh_json.json val print_lsp_response : Lsp.lsp_id -> Lsp.lsp_result -> Hh_json.json val print_lsp_notification : Lsp.lsp_notification -> Hh_json.json val print_lsp : Lsp.lsp_message -> Hh_json.json val get_uri_opt : Lsp.lsp_message -> Lsp.documentUri option
OCaml
hhvm/hphp/hack/src/utils/lsp/lsp_helpers.ml
(* A few helpful wrappers around LSP *) module Option = Base.Option open Lsp open Lsp_fmt let progress_and_actionRequired_counter = ref 0 (************************************************************************) (* Conversions *) (************************************************************************) let url_scheme_regex = Str.regexp "^\\([a-zA-Z][a-zA-Z0-9+.-]+\\):" (* this requires schemes with 2+ characters, so "c:\path" isn't considered a scheme *) let lsp_uri_to_path (uri : documentUri) : string = let uri = string_of_uri uri in if Str.string_match url_scheme_regex uri 0 then let scheme = Str.matched_group 1 uri in if scheme = "file" then File_url.parse uri else raise (Error.LspException { Error.code = Error.InvalidParams; message = Printf.sprintf "Not a valid file url '%s'" uri; data = None; }) else uri let path_to_lsp_uri (path : string) ~(default_path : string) : Lsp.documentUri = if path = "" then begin HackEventLogger.invariant_violation_bug "missing path"; Hh_logger.log "missing path %s" (Exception.get_current_callstack_string 99 |> Exception.clean_stack); File_url.create default_path |> uri_of_string end else File_url.create path |> uri_of_string let lsp_textDocumentIdentifier_to_filename (identifier : Lsp.TextDocumentIdentifier.t) : string = Lsp.TextDocumentIdentifier.(lsp_uri_to_path identifier.uri) let lsp_position_to_fc (pos : Lsp.position) : File_content.position = { File_content.line = pos.Lsp.line + 1; (* LSP is 0-based; File_content is 1-based. *) column = pos.Lsp.character + 1; } let lsp_range_to_fc (range : Lsp.range) : File_content.range = { File_content.st = lsp_position_to_fc range.Lsp.start; ed = lsp_position_to_fc range.Lsp.end_; } let lsp_range_to_pos ~line_to_offset path (range : Lsp.range) : Pos.t = let triple_of_endpoint Lsp.{ line; character } = let bol = line_to_offset line in (line, bol, bol + character) in Pos.make_from_lnum_bol_offset ~pos_file:path ~pos_start:(triple_of_endpoint range.Lsp.start) ~pos_end:(triple_of_endpoint range.Lsp.end_) let lsp_range_is_selection (range : Lsp.range) = range.start.line < range.end_.line || range.start.line = range.end_.line && range.start.character < range.end_.character let lsp_edit_to_fc (edit : Lsp.DidChange.textDocumentContentChangeEvent) : File_content.text_edit = { File_content.range = Option.map edit.DidChange.range ~f:lsp_range_to_fc; text = edit.DidChange.text; } let apply_changes (text : string) (contentChanges : DidChange.textDocumentContentChangeEvent list) : (string, string * Exception.t) result = let edits = List.map lsp_edit_to_fc contentChanges in File_content.edit_file text edits let get_char_from_lsp_position (content : string) (position : Lsp.position) : char = let fc_position = lsp_position_to_fc position in File_content.(get_char content (get_offset content fc_position)) let apply_changes_unsafe text (contentChanges : DidChange.textDocumentContentChangeEvent list) : string = match apply_changes text contentChanges with | Ok r -> r | Error (e, _stack) -> failwith e let sym_occ_kind_to_lsp_sym_info_kind (sym_occ_kind : SymbolOccurrence.kind) : Lsp.SymbolInformation.symbolKind = let open Lsp.SymbolInformation in let open SymbolOccurrence in match sym_occ_kind with | Class _ -> Class | BuiltInType _ -> Class | Function -> Function | Method _ -> Method | LocalVar -> Variable | TypeVar -> TypeParameter | Property _ -> Property | XhpLiteralAttr _ -> Property | ClassConst _ -> TypeParameter | Typeconst _ -> TypeParameter | GConst -> Constant | Attribute _ -> Class | EnumClassLabel _ -> EnumMember | Keyword _ -> Null | PureFunctionContext -> Null | BestEffortArgument _ -> Null | HhFixme -> Null | Module -> Module let hack_pos_to_lsp_range ~(equal : 'a -> 'a -> bool) (pos : 'a Pos.pos) : Lsp.range = (* .hhconfig errors are Positions with a filename, but dummy start/end * positions. Handle that case - and Pos.none - specially, as the LSP * specification requires line and character >= 0, and VSCode silently * drops diagnostics that violate the spec in this way *) if Pos.equal_pos equal pos (Pos.make_from (Pos.filename pos)) then { start = { line = 0; character = 0 }; end_ = { line = 0; character = 0 } } else let (line1, col1, line2, col2) = Pos.destruct_range pos in { start = { line = line1 - 1; character = col1 - 1 }; end_ = { line = line2 - 1; character = col2 - 1 }; } let hack_pos_to_lsp_range_adjusted (p : 'a Pos.pos) : range = let (line_start, line_end, character_start, character_end) = Pos.info_pos_extended p in let (start_position : position) = { line = line_start; character = character_start } in let (end_position : position) = { line = line_end; character = character_end } in { start = start_position; end_ = end_position } let symbol_to_lsp_call_item (sym_occ : Relative_path.t SymbolOccurrence.t) (sym_def_opt : Relative_path.t SymbolDefinition.t option) : CallHierarchyItem.t = let open CallHierarchyItem in let open SymbolOccurrence in let (selectionRange_, range_, file_pos) = match sym_def_opt with | None -> let sym_range = hack_pos_to_lsp_range_adjusted sym_occ.pos in (sym_range, sym_range, sym_occ.pos) | Some sym_def -> ( hack_pos_to_lsp_range_adjusted sym_def.SymbolDefinition.pos, hack_pos_to_lsp_range_adjusted sym_def.SymbolDefinition.span, sym_def.SymbolDefinition.pos ) in let filename_ = Pos.to_absolute file_pos |> Pos.filename in let uri_ = path_to_lsp_uri filename_ ~default_path:"www" in { name = sym_occ.name; kind = sym_occ_kind_to_lsp_sym_info_kind sym_occ.type_; detail = None; uri = uri_; range = range_; selectionRange = selectionRange_; } (************************************************************************) (* Range calculations *) (************************************************************************) (* We need to do intersection and other calculations on ranges. * The functions in the following module all assume LSP 0-based ranges, * and assume without testing that a range's start is equal to or before * its end. *) let pos_compare (p1 : position) (p2 : position) : int = if p1.line < p2.line then -1 else if p1.line > p2.line then 1 else p1.character - p2.character (* Given a "selection" range A..B and a "squiggle" range a..b, how do they overlap? * There are 12 ways to order the four letters ABab, of which six * satisfy both A<=B and a<=b. Here they are. *) type range_overlap = | Selection_before_start_of_squiggle (* ABab *) | Selection_overlaps_start_of_squiggle (* AaBb *) | Selection_covers_whole_squiggle (* AabB *) | Selection_in_middle_of_squiggle (* aABb *) | Selection_overlaps_end_of_squiggle (* aAbB *) (* abAB *) | Selection_after_end_of_squiggle (* Computes how two ranges "selection" and "squiggle" overlap *) let get_range_overlap (selection : range) (squiggle : range) : range_overlap = let selStart_leq_squiggleStart = pos_compare selection.start squiggle.start <= 0 in let selStart_leq_squiggleEnd = pos_compare selection.start squiggle.end_ <= 0 in let selEnd_lt_squiggleStart = pos_compare selection.end_ squiggle.start < 0 in let selEnd_lt_squiggleEnd = pos_compare selection.end_ squiggle.end_ < 0 in (* Q. Why does it test "<=" for the first two and "<" for the last two? *) (* Intuitively you can trust that it has something to do with how ranges are *) (* inclusive at their start and exclusive at their end. But the real reason *) (* is just that I did an exhaustive case analysis to look at all cases where *) (* A,B,a,b might be equal, and decided which outcome I wanted for each of them *) (* because of how I'm going to treat them in other functions, and retrofitted *) (* those answers into this function. For instance, if squiggleStart==selEnd, *) (* I'll want to handle it in the same way as squiggleStart<selEnd<squiggleEnd. *) (* The choices of "leq" and "lt" in this function embody those answers. *) match ( selStart_leq_squiggleStart, selStart_leq_squiggleEnd, selEnd_lt_squiggleStart, selEnd_lt_squiggleEnd ) with | (true, true, true, true) -> Selection_before_start_of_squiggle | (true, true, false, true) -> Selection_overlaps_start_of_squiggle | (true, true, false, false) -> Selection_covers_whole_squiggle | (false, true, false, true) -> Selection_in_middle_of_squiggle | (false, true, false, false) -> Selection_overlaps_end_of_squiggle | (false, false, false, false) -> Selection_after_end_of_squiggle | (true, false, _, _) -> failwith "sel.start proves squiggle.start > squiggle.end_" | (_, _, true, false) -> failwith "sel.end proves squiggle.start > squiggle.end_" | (false, _, true, _) -> failwith "squiggle.start proves sel.start > sel.end_" | (_, false, _, true) -> failwith "squiggle.end_ proves sel.start > sel.end_" (* this structure models a change where a certain range is replaced with * a block of text. For instance, if you merely insert a single character, * then remove_range.start==remove_range.end_ and insert_lines=0 * and insert_chars_on_final_line=1. *) type range_replace = { remove_range: range; insert_lines: int; insert_chars_on_final_line: int; } (* If you have a position "p", and some range before this point is replaced with * text of a certain number of lines, the last line having a certain number of characters, * then how will the position be shifted? * Note: this function assumes but doesn't verify that the range ends on or before * the position. *) let update_pos_due_to_prior_replace (p : position) (replace : range_replace) : position = if replace.remove_range.end_.line < p.line then (* The replaced range doesn't touch the position, so position merely gets shifted up/down *) let line = p.line - (replace.remove_range.end_.line - replace.remove_range.start.line) + replace.insert_lines in { p with line } else if replace.insert_lines > 0 then (* The position is on the final line and multiple lines were inserted *) let line = p.line - (replace.remove_range.end_.line - replace.remove_range.start.line) + replace.insert_lines in let character = replace.insert_chars_on_final_line + (p.character - replace.remove_range.end_.character) in { line; character } else (* The position is on the line where a few characters were inserted *) let line = p.line - (replace.remove_range.end_.line - replace.remove_range.start.line) in let character = replace.remove_range.start.character + replace.insert_chars_on_final_line + (p.character - replace.remove_range.end_.character) in { line; character } (* If you have a squiggle, and some range in the document is replaced with a block * some lines long and with insert_chars on the final line, then what's the new * range of the squiggle? *) let update_range_due_to_replace (squiggle : range) (replace : range_replace) : range option = match get_range_overlap replace.remove_range squiggle with | Selection_before_start_of_squiggle -> let start = update_pos_due_to_prior_replace squiggle.start replace in let end_ = update_pos_due_to_prior_replace squiggle.end_ replace in Some { start; end_ } | Selection_overlaps_start_of_squiggle -> let line = replace.remove_range.start.line + replace.insert_lines in let character = if replace.insert_lines = 0 then replace.remove_range.start.character + replace.insert_chars_on_final_line else replace.insert_chars_on_final_line in let start = { line; character } in let end_ = update_pos_due_to_prior_replace squiggle.end_ replace in Some { start; end_ } | Selection_covers_whole_squiggle -> None | Selection_in_middle_of_squiggle -> let start = squiggle.start in let end_ = update_pos_due_to_prior_replace squiggle.end_ replace in Some { start; end_ } | Selection_overlaps_end_of_squiggle -> let start = squiggle.start in let end_ = replace.remove_range.start in Some { start; end_ } | Selection_after_end_of_squiggle -> Some squiggle (* Moves all diagnostics in response to an LSP change. * The change might insert text before a diagnostic squiggle (so the squiggle * has to be moved down or to the right); it might delete text before the squiggle; * it might modify text inside the squiggle; it might replace text that overlaps * the squiggle in which case the squiggle gets truncated/moved; it might replace * the squiggle in its entirety in which case the squiggle gets removed. * Note that an LSP change is actually a set of changes, applied in order. *) let update_diagnostics_due_to_change (diagnostics : PublishDiagnostics.diagnostic list) (change : Lsp.DidChange.params) : PublishDiagnostics.diagnostic list = PublishDiagnostics.( let replace_of_change change = match change.DidChange.range with | None -> None | Some remove_range -> let offset = String.length change.DidChange.text in let pos = File_content.offset_to_position change.DidChange.text offset in (* 1-based *) let insert_lines = pos.File_content.line - 1 in let insert_chars_on_final_line = pos.File_content.column - 1 in Some { remove_range; insert_lines; insert_chars_on_final_line } in let apply_replace diagnostic_opt replace_opt = match (diagnostic_opt, replace_opt) with | (Some diagnostic, Some replace) -> let range = update_range_due_to_replace diagnostic.range replace in Option.map range ~f:(fun range -> { diagnostic with range }) | _ -> None in let replaces = Base.List.map change.DidChange.contentChanges ~f:replace_of_change in let apply_all_replaces diagnostic = Base.List.fold replaces ~init:(Some diagnostic) ~f:apply_replace in Base.List.filter_map diagnostics ~f:apply_all_replaces) (************************************************************************) (* Accessors *) (************************************************************************) let get_root (p : Lsp.Initialize.params) : string = Lsp.Initialize.( match (p.rootUri, p.rootPath) with | (Some uri, _) -> lsp_uri_to_path uri | (None, Some path) -> path | (None, None) -> failwith "Initialize params missing root") let supports_status (p : Lsp.Initialize.params) : bool = Lsp.Initialize.(p.client_capabilities.window.status) let supports_snippets (p : Lsp.Initialize.params) : bool = Lsp.Initialize.( p.client_capabilities.textDocument.completion.completionItem.snippetSupport) let supports_connectionStatus (p : Lsp.Initialize.params) : bool = Lsp.Initialize.(p.client_capabilities.telemetry.connectionStatus) (************************************************************************) (* Wrappers for some LSP methods *) (************************************************************************) let telemetry (writer : Jsonrpc.writer) (type_ : MessageType.t) (extras : (string * Hh_json.json) list) (message : string) : unit = let params = { LogMessage.type_; message } in let notification = TelemetryNotification (params, extras) in notification |> print_lsp_notification |> writer let telemetry_error (writer : Jsonrpc.writer) ?(extras : (string * Hh_json.json) list = []) (message : string) : unit = telemetry writer MessageType.ErrorMessage extras message let telemetry_log (writer : Jsonrpc.writer) ?(extras : (string * Hh_json.json) list = []) (message : string) : unit = telemetry writer MessageType.LogMessage extras message let log (writer : Jsonrpc.writer) (type_ : MessageType.t) (message : string) : unit = let params = { LogMessage.type_; message } in let notification = LogMessageNotification params in notification |> print_lsp_notification |> writer let log_error (writer : Jsonrpc.writer) = log writer MessageType.ErrorMessage let log_warning (writer : Jsonrpc.writer) = log writer MessageType.WarningMessage let log_info (writer : Jsonrpc.writer) = log writer MessageType.InfoMessage let showMessage (writer : Jsonrpc.writer) (type_ : MessageType.t) (message : string) : unit = let params = { ShowMessage.type_; message } in let notification = ShowMessageNotification params in notification |> print_lsp_notification |> writer let showMessage_info (writer : Jsonrpc.writer) = showMessage writer MessageType.InfoMessage let showMessage_warning (writer : Jsonrpc.writer) = showMessage writer MessageType.WarningMessage let showMessage_error (writer : Jsonrpc.writer) = showMessage writer MessageType.ErrorMessage let title_of_command_or_action = Lsp.CodeAction.( function | Command Command.{ title; _ } -> title | Action { title; _ } -> title)
OCaml Interface
hhvm/hphp/hack/src/utils/lsp/lsp_helpers.mli
(* * Copyright (c) 2019, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) (* This `.mli` file was generated automatically. It may include extra definitions that should not actually be exposed to the caller. If you notice that this interface file is a poor interface, please take a few minutes to clean it up manually, and then delete this comment once the interface is in shape. *) type range_replace = { remove_range: Lsp.range; insert_lines: int; insert_chars_on_final_line: int; } val progress_and_actionRequired_counter : int ref val url_scheme_regex : Str.regexp val lsp_uri_to_path : Lsp.documentUri -> string val path_to_lsp_uri : string -> default_path:string -> Lsp.documentUri val lsp_textDocumentIdentifier_to_filename : Lsp.TextDocumentIdentifier.t -> string val lsp_position_to_fc : Lsp.position -> File_content.position val lsp_range_to_fc : Lsp.range -> File_content.range val lsp_range_to_pos : line_to_offset:(int -> int) -> Relative_path.t -> Lsp.range -> Pos.t (** The range spans more than 0 chars, such as when the user clicks and drags. *) val lsp_range_is_selection : Lsp.range -> bool val lsp_edit_to_fc : Lsp.DidChange.textDocumentContentChangeEvent -> File_content.text_edit val apply_changes : string -> Lsp.DidChange.textDocumentContentChangeEvent list -> (string, string * Exception.t) result val get_char_from_lsp_position : string -> Lsp.position -> char val apply_changes_unsafe : string -> Lsp.DidChange.textDocumentContentChangeEvent list -> string val sym_occ_kind_to_lsp_sym_info_kind : SymbolOccurrence.kind -> Lsp.SymbolInformation.symbolKind (** Correctly handles our various positions: * - real positions * - .hhconfig error positions, which have dummy start/ends * - and [Pos.none] * Special handling is required, as the LSP * specification requires line and character >= 0, and VSCode silently * drops diagnostics that violate the spec in this way *) val hack_pos_to_lsp_range : equal:('a -> 'a -> bool) -> 'a Pos.pos -> Lsp.range (** You probably want [hack_pos_to_lsp_range]. * Equivalent to `[hack_pos_to_lsp_range] sans handling of special positions and with the following transformation: * {r with start = {line = r.start.line + 1; character = r.start.character + 1}; end_ = {r.end_ with line = r.end_.line + 1}} * where `r` is a range produced by [hack_pos_to_lsp_range] *) val hack_pos_to_lsp_range_adjusted : 'a Pos.pos -> Lsp.range val symbol_to_lsp_call_item : Relative_path.t SymbolOccurrence.t -> Relative_path.t SymbolDefinition.t option -> Lsp.CallHierarchyItem.t val pos_compare : Lsp.position -> Lsp.position -> int type range_overlap = | Selection_before_start_of_squiggle | Selection_overlaps_start_of_squiggle | Selection_covers_whole_squiggle | Selection_in_middle_of_squiggle | Selection_overlaps_end_of_squiggle | Selection_after_end_of_squiggle val get_range_overlap : Lsp.range -> Lsp.range -> range_overlap val update_pos_due_to_prior_replace : Lsp.position -> range_replace -> Lsp.position val update_range_due_to_replace : Lsp.range -> range_replace -> Lsp.range option val update_diagnostics_due_to_change : Lsp.PublishDiagnostics.diagnostic list -> Lsp.DidChange.params -> Lsp.PublishDiagnostics.diagnostic list val get_root : Lsp.Initialize.params -> string val supports_status : Lsp.Initialize.params -> bool val supports_snippets : Lsp.Initialize.params -> bool val supports_connectionStatus : Lsp.Initialize.params -> bool val telemetry : Jsonrpc.writer -> Lsp.MessageType.t -> (string * Hh_json.json) list -> string -> unit val telemetry_error : Jsonrpc.writer -> ?extras:(string * Hh_json.json) list -> string -> unit val telemetry_log : Jsonrpc.writer -> ?extras:(string * Hh_json.json) list -> string -> unit val log : Jsonrpc.writer -> Lsp.MessageType.t -> string -> unit val log_error : Jsonrpc.writer -> string -> unit val log_warning : Jsonrpc.writer -> string -> unit val log_info : Jsonrpc.writer -> string -> unit val showMessage_info : Jsonrpc.writer -> string -> unit val showMessage_warning : Jsonrpc.writer -> string -> unit val showMessage_error : Jsonrpc.writer -> string -> unit val title_of_command_or_action : 'a Lsp.CodeAction.command_or_action_ -> string
hhvm/hphp/hack/src/utils/marshal_tools/dune
(library (name marshal_tools) (wrapped false) (modules marshal_tools) (libraries sys_utils utils_core) (preprocess (pps lwt_ppx ppx_deriving.std))) (library (name marshal_tools_lwt) (wrapped false) (modules marshal_tools_lwt) (libraries marshal_tools lwt lwt.unix lwt_log) (preprocess (pps lwt_ppx ppx_deriving.std)))
OCaml
hhvm/hphp/hack/src/utils/marshal_tools/marshal_tools.ml
(* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) (** * This tool allows for marshaling directly over file descriptors (instead of * ocaml "channels") to avoid buffering so that we can safely use marshaling * and libancillary together. * * The problem: * Ocaml's marshaling is done over channels, which have their own internal * buffer. This means after reading a marshaled object from a channel, the * FD's position is not guaranteed to be pointing to the beginning of the * next marshaled object (but instead points to the position after the * buffered read). So another process cannot receive this FD (over * libancillary) to start reading the next object. * * The solution: * Start each message with a fixed-size preamble that describes the * size of the payload to read. Read precisely that many bytes directly * from the FD avoiding Ocaml channels entirely. *) exception Invalid_Int_Size_Exception exception Payload_Size_Too_Large_Exception exception Malformed_Preamble_Exception exception Writing_Preamble_Exception exception Writing_Payload_Exception exception Reading_Preamble_Exception exception Reading_Payload_Exception (* We want to marshal exceptions (or at least their message+stacktrace) over *) (* the wire. This type ensures that no one will attempt to pattern-match on *) (* the thing we marshal: 'Values of extensible variant types, for example *) (* exceptions (of extensible type exn), returned by the unmarhsaller should *) (* not be pattern-matched over, because unmarshalling does not preserve the *) (* information required for matching their constructors.' *) (* https://caml.inria.fr/pub/docs/manual-ocaml/libref/Marshal.html *) type remote_exception_data = { message: string; stack: string; } [@@deriving eq] let of_exception e = { message = Exception.get_ctor_string e; stack = Exception.get_backtrace_string e; } type error = | Rpc_absent of Exception.t | Rpc_disconnected of Exception.t | Rpc_malformed of string * Utils.callstack | Rpc_remote_panic of remote_exception_data let error_to_verbose_string (err : error) : string = match err with | Rpc_absent e -> "Absent: " ^ Exception.to_string e | Rpc_disconnected e -> "Disconnected: " ^ Exception.to_string e | Rpc_malformed (s, Utils.Callstack stack) -> Printf.sprintf "Malformed: %s\n%s" s stack | Rpc_remote_panic { message; stack } -> Printf.sprintf "Remote panic: %s\n%s" message stack module type WRITER_READER = sig type 'a result type fd val return : 'a -> 'a result val fail : exn -> 'a result val ( >>= ) : 'a result -> ('a -> 'b result) -> 'b result val write : ?timeout:Timeout.t -> fd -> buffer:bytes -> offset:int -> size:int -> int result val read : ?timeout:Timeout.t -> fd -> buffer:bytes -> offset:int -> size:int -> int result val log : string -> unit end module type REGULAR_WRITER_READER = WRITER_READER with type 'a result = 'a and type fd = Unix.file_descr module RegularWriterReader : REGULAR_WRITER_READER = struct type 'a result = 'a type fd = Unix.file_descr let return x = x let fail exn = raise exn let ( >>= ) a f = f a let rec write ?timeout fd ~buffer ~offset ~size = match Timeout.select ?timeout [] [fd] [] ~-.1.0 with | (_, [], _) -> 0 | _ -> (* Timeout.select handles EINTR, but the Unix.write call can also be interrupted. If the write * is interrupted before any bytes are written, the call fails with EINTR. Otherwise, the call * succeeds and returns the number of bytes written. *) (try Unix.write fd buffer offset size with | Unix.Unix_error (Unix.EINTR, _, _) -> write ?timeout fd ~buffer ~offset ~size) (* Marshal_tools reads from file descriptors. These file descriptors might be for some * non-blocking socket. Normally if you try to read from an fd, it will block until some data is * ready. But if you try to read from a non-blocking socket and it's not ready, you get an * EWOULDBLOCK error. * * People using Marshal_tools probably are calling Unix.select first. However that only guarantees * that the first read won't block. Marshal_tools will always do at least 2 reads (one for the * preamble and one or more for the data). Any read after the first might block. *) let rec read ?timeout fd ~buffer ~offset ~size = match Timeout.select ?timeout [fd] [] [] ~-.1.0 with | ([], _, _) -> 0 | _ -> (* Timeout.select handles EINTR, but the Unix.read call can also be interrupted. If the read * is interrupted before any bytes are read, the call fails with EINTR. Otherwise, the call * succeeds and returns the number of bytes read. *) (try Unix.read fd buffer offset size with | Unix.Unix_error (Unix.EINTR, _, _) -> read ?timeout fd ~buffer ~offset ~size) let log str = Printf.eprintf "%s\n%!" str end let preamble_start_sentinel = '\142' (* Size in bytes. *) let preamble_core_size = 4 let expected_preamble_size = preamble_core_size + 1 (* Payload size in bytes = 2^31 - 1. *) let maximum_payload_size = (1 lsl (preamble_core_size * 8)) - 1 let get_preamble_core (size : int) = (* We limit payload size to 2^31 - 1 bytes. *) if size >= maximum_payload_size then raise Payload_Size_Too_Large_Exception; let rec loop i (remainder : int) acc = if i < 0 then acc else loop (i - 1) (remainder / 256) (Bytes.set acc i (Char.chr (remainder mod 256)); acc) in loop (preamble_core_size - 1) size (Bytes.create preamble_core_size) let make_preamble (size : int) = let preamble_core = get_preamble_core size in let preamble = Bytes.create (preamble_core_size + 1) in Bytes.set preamble 0 preamble_start_sentinel; Bytes.blit preamble_core 0 preamble 1 4; preamble let parse_preamble preamble = if Bytes.length preamble <> expected_preamble_size || Bytes.get preamble 0 <> preamble_start_sentinel then raise Malformed_Preamble_Exception; let rec loop i acc = if i >= 5 then acc else loop (i + 1) ((acc * 256) + int_of_char (Bytes.get preamble i)) in loop 1 0 module MarshalToolsFunctor (WriterReader : WRITER_READER) : sig val to_fd_with_preamble : ?timeout:Timeout.t -> ?flags:Marshal.extern_flags list -> WriterReader.fd -> 'a -> int WriterReader.result val from_fd_with_preamble : ?timeout:Timeout.t -> WriterReader.fd -> 'a WriterReader.result end = struct let ( >>= ) = WriterReader.( >>= ) let rec write_payload ?timeout fd buffer offset to_write = if to_write = 0 then WriterReader.return offset else WriterReader.write ?timeout fd ~buffer ~offset ~size:to_write >>= fun bytes_written -> if bytes_written = 0 then WriterReader.return offset else write_payload ?timeout fd buffer (offset + bytes_written) (to_write - bytes_written) (* Returns the size of the marshaled payload *) let to_fd_with_preamble ?timeout ?(flags = []) fd obj = let payload = Marshal.to_bytes obj flags in let size = Bytes.length payload in let preamble = make_preamble size in ( ( write_payload ?timeout fd preamble 0 expected_preamble_size >>= fun preamble_bytes_written -> if preamble_bytes_written <> expected_preamble_size then WriterReader.fail Writing_Preamble_Exception else WriterReader.return () ) >>= fun () -> write_payload ?timeout fd payload 0 size ) >>= fun bytes_written -> if bytes_written <> size then WriterReader.fail Writing_Payload_Exception else WriterReader.return size let rec read_payload ?timeout fd buffer offset to_read = if to_read = 0 then WriterReader.return offset else WriterReader.read ?timeout fd ~buffer ~offset ~size:to_read >>= fun bytes_read -> if bytes_read = 0 then WriterReader.return offset else read_payload ?timeout fd buffer (offset + bytes_read) (to_read - bytes_read) let from_fd_with_preamble ?timeout fd = let preamble = Bytes.create expected_preamble_size in ( WriterReader.read ?timeout fd ~buffer:preamble ~offset:0 ~size:expected_preamble_size >>= fun bytes_read -> if bytes_read = 0 (* Unix manpage for read says 0 bytes read indicates end of file. *) then WriterReader.fail End_of_file else if bytes_read <> expected_preamble_size then ( WriterReader.log (Printf.sprintf "Error, only read %d bytes for preamble." bytes_read); WriterReader.fail Reading_Preamble_Exception ) else WriterReader.return () ) >>= fun () -> let payload_size = parse_preamble preamble in let payload = Bytes.create payload_size in read_payload ?timeout fd payload 0 payload_size >>= fun payload_size_read -> if payload_size_read <> payload_size then WriterReader.fail Reading_Payload_Exception else WriterReader.return (Marshal.from_bytes payload 0) end module RegularMarshalTools = MarshalToolsFunctor (RegularWriterReader) include RegularMarshalTools
OCaml Interface
hhvm/hphp/hack/src/utils/marshal_tools/marshal_tools.mli
(* * Copyright (c) 2015, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) exception Invalid_Int_Size_Exception exception Payload_Size_Too_Large_Exception exception Malformed_Preamble_Exception exception Writing_Preamble_Exception exception Writing_Payload_Exception exception Reading_Preamble_Exception exception Reading_Payload_Exception type remote_exception_data = { message: string; stack: string; } [@@deriving eq] val of_exception : Exception.t -> remote_exception_data (** Some say we should represent network communications failures with results, not exceptions. Here we go for those who favor results... *) type error = | Rpc_absent of Exception.t (** socket isn't open to start with *) | Rpc_disconnected of Exception.t (** closed while trying to read/write *) | Rpc_malformed of string * Utils.callstack (** malformed packet *) | Rpc_remote_panic of remote_exception_data (** other party's unhandled exception *) (** Turns an rpc_error into a detailed string suitable for debugging, maybe including stack trace *) val error_to_verbose_string : error -> string (** Writes a payload with preamble to a file descriptor. Returns the size of the marshalled payload. [timeout] is the timeout for [Timeout.select] which selects ready file descriptors. Unix write operations interrupted by signals are automatically restarted. *) val to_fd_with_preamble : ?timeout:Timeout.t -> ?flags:Marshal.extern_flags list -> Unix.file_descr -> 'a -> int (** Reads a payload with preamble from a file descriptor. [timeout] is the timeout for [Timeout.select] which selects ready file descriptors. Unix read operations interrupted by signals are automatically restarted. *) val from_fd_with_preamble : ?timeout:Timeout.t -> Unix.file_descr -> 'a module type WRITER_READER = sig type 'a result type fd val return : 'a -> 'a result val fail : exn -> 'a result val ( >>= ) : 'a result -> ('a -> 'b result) -> 'b result val write : ?timeout:Timeout.t -> fd -> buffer:bytes -> offset:int -> size:int -> int result val read : ?timeout:Timeout.t -> fd -> buffer:bytes -> offset:int -> size:int -> int result val log : string -> unit end (** A "preamble" is a length 5 'bytes' that encodes a single integer up to size 2^31-1. One of its bytes is a parity byte, to help safeguard against corruption. *) val expected_preamble_size : int (** "make_preamble n" will return a length 5 'bytes' that encodes the integer n. *) val make_preamble : int -> bytes (** "make_preamble n |> parse_preamble" will return the integer n. *) val parse_preamble : bytes -> int module MarshalToolsFunctor (WriterReader : WRITER_READER) : sig val to_fd_with_preamble : ?timeout:Timeout.t -> ?flags:Marshal.extern_flags list -> WriterReader.fd -> 'a -> int WriterReader.result val from_fd_with_preamble : ?timeout:Timeout.t -> WriterReader.fd -> 'a WriterReader.result end
OCaml
hhvm/hphp/hack/src/utils/marshal_tools/marshal_tools_lwt.ml
(* * Copyright (c) 2017, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) module Marshal_tools_lwt = Marshal_tools.MarshalToolsFunctor (struct type 'a result = 'a Lwt.t type fd = Lwt_unix.file_descr let return = Lwt.return let fail = Lwt.fail let ( >>= ) = Lwt.( >>= ) let write ?timeout fd ~buffer ~offset ~size = if timeout <> None then raise (Invalid_argument "Use Lwt timeouts directly"); Lwt_unix.wait_write fd >>= fun () -> Lwt_unix.write fd buffer offset size let read ?timeout fd ~buffer ~offset ~size = if timeout <> None then raise (Invalid_argument "Use lwt timeouts directly"); Lwt_unix.wait_read fd >>= fun () -> Lwt_unix.read fd buffer offset size let log str = Lwt_log_core.ign_error str end) include Marshal_tools_lwt (* The Timeout module probably doesn't work terribly well with Lwt. Luckily, timeouts are super easy * to write in Lwt, so we don't **really** need them *) let to_fd_with_preamble ?flags fd obj = to_fd_with_preamble ?flags fd obj let from_fd_with_preamble fd = from_fd_with_preamble fd
OCaml Interface
hhvm/hphp/hack/src/utils/marshal_tools/marshal_tools_lwt.mli
(* * Copyright (c) 2017, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) val to_fd_with_preamble : ?flags:Marshal.extern_flags list -> Lwt_unix.file_descr -> 'a -> int Lwt.t val from_fd_with_preamble : Lwt_unix.file_descr -> 'a Lwt.t
hhvm/hphp/hack/src/utils/memtrace/dune
(executable (name memtrace_merge) (modules memtrace_merge) (modes exe byte_complete) (link_flags (:standard (:include ../../dune_config/ld-opts.sexp))) (libraries default_injector_config core_kernel sys_utils utils_core collections) (preprocess (pps ppx_deriving.std)))
OCaml
hhvm/hphp/hack/src/utils/memtrace/memtrace_merge.ml
(* * (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. *) open Hh_prelude let usage () = Printf.printf ("Usage: memtrace_merge *.ctf > merged.ctf -- this will merge memtrace logs.\n" ^^ "Note: a bug means that timestamps in the merged log are incorrect.") let merge_memtraces srcs dst = let open Memtrace.Trace in let fd = Unix.openfile dst [Unix.O_RDWR; Unix.O_CREAT; Unix.O_TRUNC] 0o666 in (* Memtrace requires us to claim a unique pid for each trace. We have to lie; we'll pick this one. *) let pid = Int64.of_int (Unix.getpid ()) in (* We'll pick out the single earliest memtrace, and all other timestamps will be relative to this. *) let infos = List.map srcs ~f:(fun filename -> let reader = Reader.open_ ~filename in let info = Reader.info reader in Reader.close reader; (filename, info)) |> List.sort ~compare:(fun (_, info1) (_, info2) -> Float.compare (Timestamp.to_float info1.Info.start_time) (Timestamp.to_float info2.Info.start_time)) in begin match infos with | [] -> () | (_, info) :: _ -> let info = { info with Info.pid } in let writer = Writer.create fd ~getpid:(fun () -> info.Info.pid) info in List.iter infos ~f:(fun (src, _src_info) -> let obj = Obj_id.Tbl.create 100 in let reader = Reader.open_ ~filename:src in Reader.iter ~parse_backtraces:true reader (fun _time_delta ev -> (* TODO: I can't figure out time. See https://github.com/janestreet/memtrace/issues/14 *) let time = info.Info.start_time in match ev with | Event.Alloc { obj_id = relative_id; length; nsamples; source; backtrace_buffer; backtrace_length; common_prefix = _; } -> (* I don't know why this should be reversed, but I see that the memtrace source code does it *) let btrev = Array.init backtrace_length ~f:(fun i -> backtrace_buffer.(backtrace_length - 1 - i)) in let decode_callstack_entry = Reader.lookup_location_code reader in let absolute_id = Writer.put_alloc writer time ~length ~nsamples ~source ~callstack:btrev ~decode_callstack_entry in Obj_id.Tbl.add obj relative_id absolute_id | Event.Promote relative_id -> let absolute_id = Obj_id.Tbl.find obj relative_id in Writer.put_promote writer time absolute_id | Event.Collect relative_id -> let absolute_id = Obj_id.Tbl.find obj relative_id in Writer.put_collect writer time absolute_id); Reader.close reader); Writer.flush writer; () end; Unix.close fd; () let () = if Array.length Sys.argv = 1 || (Array.length Sys.argv = 2 && String.equal Sys.argv.(1) "--help") then usage () else let srcs = List.drop (Array.to_list Sys.argv) 1 in (* The ctf-writer requires a seekable output. We'll write to a tempfile, then cat it, then delete it. *) let dst = Caml.Filename.temp_file "memtrace.merged." ".ctf" in Utils.try_finally ~f:(fun () -> merge_memtraces srcs dst; Printf.printf "%s" (Sys_utils.cat dst)) ~finally:(fun () -> Sys_utils.unlink_no_fail dst)
hhvm/hphp/hack/src/utils/messages/dune
(* -*- tuareg -*- *) let library_entry name suffix = Printf.sprintf "(library (name %s) (wrapped false) (modules) (libraries %s_%s))" name name suffix let fb_entry name = library_entry name "fb" let stubs_entry name = library_entry name "stubs" let entry is_fb name = if is_fb then fb_entry name else stubs_entry name let () = (* test presence of fb subfolder *) let current_dir = Sys.getcwd () in (* we are in src/utils/messages, locate src/facebook *) let src_dir = Filename.dirname @@ Filename.dirname current_dir in let fb_dir = Filename.concat src_dir "facebook" in (* locate src/facebook/dune *) let fb_dune = Filename.concat fb_dir "dune" in let is_fb = Sys.file_exists fb_dune in let messages = entry is_fb "messages" in Jbuild_plugin.V1.send messages
TOML
hhvm/hphp/hack/src/utils/multifile/Cargo.toml
# @generated by autocargo [package] name = "multifile_rust" version = "0.0.0" edition = "2021" [lib] path = "../multifile.rs" [dependencies] anyhow = "1.0.71" lazy_static = "1.4" regex = "1.9.2" [dev-dependencies] pretty_assertions = { version = "1.2", features = ["alloc"], default-features = false }
TOML
hhvm/hphp/hack/src/utils/newtype/Cargo.toml
# @generated by autocargo [package] name = "newtype" version = "0.0.0" edition = "2021" [lib] path = "lib.rs" [dependencies] serde = { version = "1.0.176", features = ["derive", "rc"] }
Rust
hhvm/hphp/hack/src/utils/newtype/idhasher.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ // This is the same as the definition in relay's intern crate - but duplicated // here so we don't have to rely on intern simply for this simple definition. // // It's a utility type/trait so duplication is fine. use std::hash::BuildHasherDefault; use std::hash::Hasher; use std::marker::PhantomData; pub type BuildIdHasher<T> = BuildHasherDefault<IdHasher<T>>; /// A simple fast multiplicative hasher for Ids. /// /// It's tempting to use `IdHasher` for Ids --- but /// [HashBrown](https://crates.io/crates/hashbrown) and /// [std::collections::HashMap](https://doc.rust-lang.org/std/collections/struct.HashMap.html) /// use the upper 7 bits of the hash for a tag, then compare 8-16 tags in /// parallel. Without the multiply, typical low-valued u32 ids would all have /// tag 0. #[derive(Debug, Default, Clone, Copy)] pub struct IdHasher<T>(u64, PhantomData<T>); /// Marker interface to allow supported types. Additional primitive types /// can be supported by adding an `IsEnabled` decl. pub trait IsEnabled {} impl IsEnabled for u32 {} impl<T: IsEnabled> Hasher for IdHasher<T> { fn write(&mut self, _: &[u8]) { unimplemented!() } fn write_u32(&mut self, n: u32) { debug_assert_eq!(self.0, 0); // Allow one write per Hasher instance. self.0 = (n as u64).wrapping_mul(0x9e3779b97f4a7c15); } fn finish(&self) -> u64 { self.0 } }
Rust
hhvm/hphp/hack/src/utils/newtype/lib.rs
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. mod idhasher; use std::hash::Hash; use std::hash::Hasher; pub use idhasher::BuildIdHasher; use serde::Deserialize; use serde::Serialize; pub trait HasNone: Copy { const NONE: Self; } pub trait FromUsize { fn from_usize(u: usize) -> Self; } /// Define a macro that makes building integer-based newtypes easy. /// /// Using a newtype for integer based types can be useful to use the typechecker /// to make sure that different contextual types of ints aren't accidentally /// mixed. /// /// In addition to building the newtype HashMap and HashSet types are created /// which take the newtype as a key. These use BuildIdHasher to produce good /// hash values for integers - which is not the case for the default hasher. /// /// Usage: /// newtype_int!(NAME, BASIS, MAP-NAME, SET-NAME) /// /// NAME - The name used to create the newtype. /// BASIS - The basis type that the newtype wraps (such as i64, u32, etc) /// MAP-NAME - The name used to create the HashMap. /// SET-NAME - The name used to create the HashSet. /// /// The newtype created has this signature: /// /// struct NAME(pub BASIS); /// /// impl NAME { /// pub fn as_usize(&self) -> usize; /// pub fn from_usize(usize) -> Self; /// } /// /// impl Clone for NAME; /// impl Copy for NAME; /// impl Debug for NAME; /// impl Default for NAME; /// impl Display for NAME; /// impl Eq for NAME; /// impl From<BASIS> for NAME; /// impl FromUsize for NAME; /// impl HasNone for NAME; /// impl Hash for NAME; /// impl Ord for NAME; /// impl PartialEq for NAME; /// impl PartialOrd for NAME; /// /// impl From<NAME> for usize; /// #[macro_export] macro_rules! newtype_int { ($name:ident, $num:ident, $hashmap:ident, $hashset:ident $(, $derive:ident)*) => { #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord $(, $derive)*)] #[repr(transparent)] pub struct $name(pub $num); impl $name { #[inline] pub fn as_usize(&self) -> usize { self.0 as usize } #[inline] pub fn from_usize(u: usize) -> Self { Self(u as $num) } pub const NONE: Self = Self(std::$num::MAX); } impl $crate::FromUsize for $name { fn from_usize(u: usize) -> Self { Self::from_usize(u) } } impl std::convert::From<$num> for $name { fn from(x: $num) -> $name { $name(x) } } impl std::convert::From<$name> for usize { fn from(id: $name) -> Self { id.0 as usize } } impl $crate::HasNone for $name { const NONE: Self = Self(std::$num::MAX); } impl std::fmt::Display for $name { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}({})", stringify!($name), self.0) } } impl std::default::Default for $name { fn default() -> Self { Self::NONE } } // Unfortunately Rust does not yet let you make associated generic types, // so these need to be given top-level names here. pub type $hashmap<V> = std::collections::HashMap<$name, V, $crate::BuildIdHasher<$num>>; pub type $hashset = std::collections::HashSet<$name, $crate::BuildIdHasher<$num>>; }; } /// A Vec indexable by a newtype_int. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct IdVec<N: Into<usize>, T> { pub vec: Vec<T>, phantom: core::marker::PhantomData<N>, } impl<N: Into<usize>, T> IdVec<N, T> { pub fn new() -> IdVec<N, T> { IdVec::new_from_vec(Vec::new()) } pub fn new_from_vec(vec: Vec<T>) -> IdVec<N, T> { IdVec { vec, phantom: Default::default(), } } pub fn with_capacity(cap: usize) -> IdVec<N, T> { IdVec { vec: Vec::with_capacity(cap), phantom: Default::default(), } } pub fn get(&self, index: N) -> Option<&T> { self.vec.get(index.into()) } pub fn get_mut(&mut self, index: N) -> Option<&mut T> { self.vec.get_mut(index.into()) } pub fn capacity_bytes(&self) -> usize { std::mem::size_of::<T>() * self.vec.capacity() } pub fn swap(&mut self, a: N, b: N) { self.vec.swap(a.into(), b.into()); } } impl<N: FromUsize + Into<usize>, T> IdVec<N, T> { pub fn keys(&self) -> impl DoubleEndedIterator<Item = N> + '_ { (0..self.vec.len()).into_iter().map(|i| N::from_usize(i)) } pub fn push(&mut self, v: T) -> N { let id = N::from_usize(self.len()); self.vec.push(v); id } } impl<N: Into<usize>, T: PartialEq> PartialEq for IdVec<N, T> { fn eq(&self, rhs: &Self) -> bool { self.vec.eq(&rhs.vec) } } impl<N: Into<usize>, T: Eq> Eq for IdVec<N, T> {} impl<N: Into<usize>, T: Hash> Hash for IdVec<N, T> { fn hash<H: Hasher>(&self, state: &mut H) { self.vec.hash(state) } } impl<N: Into<usize>, T> Default for IdVec<N, T> { fn default() -> Self { IdVec::new_from_vec(Vec::new()) } } impl<N: Into<usize>, T> IntoIterator for IdVec<N, T> { type Item = T; type IntoIter = ::std::vec::IntoIter<T>; fn into_iter(self) -> Self::IntoIter { self.vec.into_iter() } } impl<N: Into<usize>, T> FromIterator<T> for IdVec<N, T> { fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self { IdVec { vec: Vec::from_iter(iter), phantom: Default::default(), } } } impl<N: Into<usize>, T> std::ops::Index<N> for IdVec<N, T> { type Output = T; fn index(&self, i: N) -> &'_ T { self.vec.index(i.into()) } } impl<N: Into<usize>, T> std::ops::IndexMut<N> for IdVec<N, T> { fn index_mut(&mut self, i: N) -> &'_ mut T { self.vec.index_mut(i.into()) } } impl<N: Into<usize>, T> std::ops::Deref for IdVec<N, T> { type Target = Vec<T>; #[inline] fn deref(&self) -> &Vec<T> { &self.vec } } impl<N: Into<usize>, T> std::ops::DerefMut for IdVec<N, T> { #[inline] fn deref_mut(&mut self) -> &mut Vec<T> { &mut self.vec } } #[cfg(test)] mod tests { newtype_int!(MyId, u32, MyIdMap, MyIdSet); #[test] fn test_newtype() { let id = MyId::from_usize(42); assert_eq!(id.as_usize(), 42); assert_eq!(format!("{}", id), "MyId(42)"); let id_set: MyIdSet = [ MyId::from_usize(1), MyId::from_usize(2), MyId::from_usize(3), MyId::from_usize(2), ] .iter() .copied() .collect(); assert_eq!(id_set.len(), 3); let id_map: MyIdMap<&str> = [(MyId::from_usize(1), "a"), (MyId::from_usize(2), "b")] .iter() .copied() .collect(); assert_eq!(id_map.get(&MyId::from_usize(1)).copied(), Some("a")); assert_eq!(id_map.get(&MyId::from_usize(2)).copied(), Some("b")); } }
TOML
hhvm/hphp/hack/src/utils/no_pos_hash/Cargo.toml
# @generated by autocargo [package] name = "no_pos_hash" version = "0.0.0" edition = "2021" [lib] path = "no_pos_hash.rs" [dependencies] arena_collections = { version = "0.0.0", path = "../../arena_collections" } bstr = { version = "1.4.0", features = ["serde", "std", "unicode"] } fnv = "1.0" no_pos_hash_derive = { version = "0.0.0", path = "derive" } ocamlrep_caml_builtins = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" }
Rust
hhvm/hphp/hack/src/utils/no_pos_hash/no_pos_hash.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. pub use std::hash::Hasher; pub use no_pos_hash_derive::NoPosHash; /// A type for which we can produce a position-insensitive hash. /// /// For incremental typechecking, we are often interested in /// determining whether a declaration or AST has changed in a way that /// requires re-typechecking of dependents. Changes which only affect /// the `Pos` fields in a declaration do not require rechecking of /// dependents, so we want to distinguish "position-only" changes from /// other types of changes. /// /// In OCaml, we do this by reallocating the old and new declarations /// (or ASTs) with `Pos.none` in every position field, then performing /// a polymorphic hash or comparison. In Rust, we could rewrite /// positions and then use the `Hash` trait, but we'd like to avoid /// the reallocation/clone (besides, we don't have an endo-visitor for /// our by-ref types at this time). By comparing the output of hashing /// with `Hash` and `NoPosHash`, we can easily determine when a value /// has only changed in positions. pub trait NoPosHash { fn hash<H: Hasher>(&self, state: &mut H); fn hash_slice<H: Hasher>(data: &[Self], state: &mut H) where Self: Sized, { for piece in data { piece.hash(state); } } } pub fn position_insensitive_hash<T: NoPosHash>(value: &T) -> u64 { let mut hasher = fnv::FnvHasher::default(); value.hash(&mut hasher); hasher.finish() } mod impls { use ocamlrep_caml_builtins::Int64; use super::*; impl<T: NoPosHash> NoPosHash for [T] { fn hash<H: Hasher>(&self, state: &mut H) { self.len().hash(state); NoPosHash::hash_slice(self, state) } } impl<T: NoPosHash> NoPosHash for Option<T> { fn hash<H: Hasher>(&self, state: &mut H) { match self { None => std::mem::discriminant(self).hash(state), Some(value) => { std::mem::discriminant(self).hash(state); value.hash(state); } } } } impl<T> NoPosHash for std::mem::Discriminant<T> { fn hash<H: Hasher>(&self, state: &mut H) { std::hash::Hash::hash(self, state); } } macro_rules! impl_with_std_hash { ($($ty:ty,)*) => {$( impl NoPosHash for $ty { #[inline] fn hash<H: Hasher>(&self, state: &mut H) { std::hash::Hash::hash(self, state); } #[inline] fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) { std::hash::Hash::hash_slice(data, state); } } )*} } impl_with_std_hash! { u8, u16, u32, u64, usize, i8, i16, i32, i64, isize, u128, i128, bool, char, String, std::path::PathBuf, bstr::BString, Int64, } macro_rules! impl_with_std_hash_unsized { ($($ty:ty,)*) => {$( impl NoPosHash for $ty { #[inline] fn hash<H: Hasher>(&self, state: &mut H) { std::hash::Hash::hash(self, state); } } )*} } impl_with_std_hash_unsized! { str, std::path::Path, bstr::BStr, } macro_rules! impl_hash_tuple { () => ( impl NoPosHash for () { fn hash<H: Hasher>(&self, _state: &mut H) {} } ); ( $($name:ident)+) => ( impl<$($name: NoPosHash),+> NoPosHash for ($($name,)+) where last_type!($($name,)+): ?Sized { #[allow(non_snake_case)] fn hash<S: Hasher>(&self, state: &mut S) { let ($(ref $name,)+) = *self; $($name.hash(state);)+ } } ); } macro_rules! last_type { ($a:ident,) => { $a }; ($a:ident, $($rest_a:ident,)+) => { last_type!($($rest_a,)+) }; } impl_hash_tuple! {} impl_hash_tuple! { A } impl_hash_tuple! { A B } impl_hash_tuple! { A B C } impl_hash_tuple! { A B C D } impl_hash_tuple! { A B C D E } impl_hash_tuple! { A B C D E F } impl_hash_tuple! { A B C D E F G } impl_hash_tuple! { A B C D E F G H } impl_hash_tuple! { A B C D E F G H I } impl_hash_tuple! { A B C D E F G H I J } impl_hash_tuple! { A B C D E F G H I J K } impl_hash_tuple! { A B C D E F G H I J K L } macro_rules! impl_with_deref { ($(<$($gen:ident $(: $bound:tt)?),* $(,)?> $ty:ty,)*) => {$( impl<$($gen: NoPosHash $(+ $bound)*,)*> NoPosHash for $ty { #[inline] fn hash<H: Hasher>(&self, state: &mut H) { (**self).hash(state); } } )*} } impl_with_deref! { <T: (?Sized)> &T, <T: (?Sized)> &mut T, <T: (?Sized)> Box<T>, <T: (?Sized)> std::rc::Rc<T>, <T: (?Sized)> std::sync::Arc<T>, } macro_rules! impl_with_iter { ($(<$($gen:ident $(: $bound:tt)?),* $(,)?> $ty:ty,)*) => {$( impl<$($gen: NoPosHash $(+ $bound)*,)*> NoPosHash for $ty { fn hash<H: Hasher>(&self, state: &mut H) { for element in self.iter() { element.hash(state); } } } )*} } impl_with_iter! { <T> Vec<T>, <T> std::collections::HashSet<T>, <T> std::collections::BTreeSet<T>, <K, V> std::collections::HashMap<K, V>, <K, V> std::collections::BTreeMap<K, V>, <T> arena_collections::list::List<'_, T>, <T> arena_collections::set::Set<'_, T>, <T> arena_collections::MultiSet<'_, T>, <T> arena_collections::MultiSetMut<'_, T>, <T> arena_collections::SortedSet<'_, T>, <K, V> arena_collections::map::Map<'_, K, V>, <K, V> arena_collections::AssocList<'_, K, V>, <K, V> arena_collections::AssocListMut<'_, K, V>, <K, V> arena_collections::SortedAssocList<'_, K, V>, } }
Rust
hhvm/hphp/hack/src/utils/no_pos_hash/no_pos_hash_derive.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. #![recursion_limit = "128"] use proc_macro2::TokenStream; use quote::quote; use synstructure::decl_derive; decl_derive!([NoPosHash] => derive_no_pos_hash); fn derive_no_pos_hash(mut s: synstructure::Structure<'_>) -> TokenStream { // By default, if you are deriving an impl of trait Foo for generic type // X<T>, synstructure will add Foo as a bound not only for the type // parameter T, but also for every type which appears as a field in X. This // is not necessary for our use case--we can just require that the type // parameters implement our trait. s.add_bounds(synstructure::AddBounds::Generics); let body = no_pos_hash_body(&s); s.gen_impl(quote! { gen impl ::no_pos_hash::NoPosHash for @Self { fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) { match self { #body } } } }) } fn no_pos_hash_body(s: &synstructure::Structure<'_>) -> TokenStream { let hash = quote! { ::no_pos_hash::NoPosHash::hash }; match &s.ast().data { syn::Data::Struct(_) => s.each(|bi| quote! { #hash(#bi, state); }), syn::Data::Enum(_) => s.each_variant(|v| { v.bindings().iter().fold( quote! { #hash(&::std::mem::discriminant(self), state); }, |acc, bi| quote! { #acc #hash(#bi, state); }, ) }), syn::Data::Union(_) => panic!("untagged unions not supported"), } }
TOML
hhvm/hphp/hack/src/utils/no_pos_hash/derive/Cargo.toml
# @generated by autocargo [package] name = "no_pos_hash_derive" version = "0.0.0" edition = "2021" [lib] path = "../no_pos_hash_derive.rs" test = false doctest = false proc-macro = true [dependencies] proc-macro2 = { version = "1.0.64", features = ["span-locations"] } quote = "1.0.29" syn = { version = "1.0.109", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } synstructure = "0.12"
C
hhvm/hphp/hack/src/utils/ocaml_ffi_mock/ocaml.c
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * */ void assert_master() {} void caml_add_to_heap(void) {} void caml_alloc(void) {} void caml_alloc_for_heap(void) {} void caml_alloc_small(void) {} void caml_alloc_tuple(void) {} void caml_allocation_color(void) {} void caml_array_length(void) {} void caml_initialize(void) {} void caml_is_double_array(void) {} void caml_local_roots(void) {} void caml_modify(void) {} void caml_alloc_string(void) {} void caml_copy_double(void) {} void caml_copy_int32(void) {} void caml_copy_int64(void) {} void caml_copy_nativeint(void) {} void caml_enter_blocking_section(void) {} void caml_failwith_value(void) {} void caml_failwith(void) {} void caml_invalid_argument_value(void) {} void caml_leave_blocking_section(void) {} void caml_raise(void) {} void caml_raise_constant(void) {} void caml_register_global_root(void) {} void caml_remove_global_root(void) {} void caml_string_length(void) {} void caml_array_bound_error(void) {} void caml_raise_end_of_file(void) {} void caml_raise_not_found(void) {} void caml_raise_out_of_memory(void) {} void caml_raise_stack_overflow(void) {} void caml_raise_sys_blocked_io(void) {} void caml_raise_sys_error(void) {} void caml_raise_with_arg(void) {} void caml_raise_zero_divide(void) {} void caml_named_value(void) {} void caml_callbackN(void) {} void caml_raise_with_string(void) {} void caml_register_custom_operations() {} void caml_serialize_block_1() {} void caml_serialize_int_8() {} void caml_deserialize_sint_8() {} void caml_deserialize_block_1() {} void caml_input_value_from_block() {} void caml_alloc_initialized_string() {} void caml_output_value_to_malloc() {} void* Caml_state = 0; unsigned long caml_allocated_words = 0;
TOML
hhvm/hphp/hack/src/utils/ocaml_helper/Cargo.toml
# @generated by autocargo [package] name = "ocaml_helper" version = "0.0.0" edition = "2021" [lib] path = "../ocaml_helper.rs" [dev-dependencies] pretty_assertions = { version = "1.2", features = ["alloc"], default-features = false }
TOML
hhvm/hphp/hack/src/utils/ocaml_runtime/Cargo.toml
# @generated by autocargo [package] name = "ocaml_runtime" version = "0.0.0" edition = "2021" [lib] path = "lib.rs" [dependencies] ocamlrep = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" }
Rust
hhvm/hphp/hack/src/utils/ocaml_runtime/lib.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::ffi::CString; use std::os::raw::c_char; use ocamlrep::CLOSURE_TAG; extern "C" { fn caml_named_value(name: *const c_char) -> *mut usize; fn caml_callback_exn(closure: usize, arg1: usize) -> usize; } pub type Value = usize; /// Return a named value registered by OCaml (e.g., via `Callback.register`). /// If no value was registered for that name, return `None`. /// /// # Safety /// /// OCaml runtime doesn't document thread safety for each API, we are conservative /// to assume that APIs are not thread-safe. If any other thread interacts with /// the OCaml runtime during the execution of this function, undefined behavior /// will result. /// /// # Panics /// /// Panics if the given `name` contains a nul byte. pub unsafe fn named_value<S: AsRef<str>>(name: S) -> Option<Value> { let name = CString::new(name.as_ref()).expect("string contained nul byte"); let named = caml_named_value(name.as_ptr()); if named.is_null() { return None; } Some(*named) } #[derive(Debug)] pub enum Error { NotInvokable, Exception(Value), } /// Call a closure with a single argument. /// /// # Safety /// /// The calling thread must be known to the OCaml runtime system. Threads /// created from OCaml (via `Thread.create`) and the main thread are /// automatically known to the runtime system. See the [OCaml manual] for more /// details. /// /// [OCaml manual]: (https://caml.inria.fr/pub/docs/manual-ocaml/intfc.html#s:C-multithreading) pub unsafe fn callback_exn(f: Value, arg: Value) -> Result<Value, Error> { let f_block = match ocamlrep::Value::from_bits(f).as_block() { Some(block) => block, None => return Err(Error::NotInvokable), }; if f_block.tag() != CLOSURE_TAG { return Err(Error::NotInvokable); } let res = caml_callback_exn(f, arg); if is_exception_result(res) { Err(Error::Exception(extract_exception(res))) } else { Ok(res) } } /// Exception returned by caml_callback_exn is documented /// in runtime/caml/mlvalues.h /// /// "Encoded exceptional return values, when functions are suffixed with /// _exn. Encoded exceptions are invalid values and must not be seen /// by the garbage collector." /// #define Make_exception_result(v) ((v) | 2) /// #define Is_exception_result(v) (((v) & 3) == 2) /// #define Extract_exception(v) ((v) & ~3) fn is_exception_result(v: usize) -> bool { v & 3 == 2 } fn extract_exception(v: usize) -> usize { v & !3 }
OCaml
hhvm/hphp/hack/src/utils/opaque_digest/opaqueDigest.ml
include Digest let to_raw_contents x = x (* take the raw contents of the Digest and convert it into a digest if it would be capable of being to_hex'd *) let from_raw_contents x = try let (_ : string) = to_hex x in Some x with | Invalid_argument _ -> None
OCaml Interface
hhvm/hphp/hack/src/utils/opaque_digest/opaqueDigest.mli
type t val compare : t -> t -> int val equal : t -> t -> bool val string : string -> t val bytes : bytes -> t val substring : string -> int -> int -> t val subbytes : bytes -> int -> int -> t val channel : Stdlib.in_channel -> int -> t val file : string -> t val output : Stdlib.out_channel -> t -> unit val input : Stdlib.in_channel -> t val to_hex : t -> string val from_hex : string -> t val to_raw_contents : t -> string val from_raw_contents : string -> t option
hhvm/hphp/hack/src/utils/parent/dune
(library (name parentimpl) (wrapped false) (modules) (foreign_stubs (language c) (names stubparentimpl))) (library (name parent) (wrapped false) (foreign_stubs (language c) (names libparent)) (libraries parentimpl))
C
hhvm/hphp/hack/src/utils/parent/libparent.c
#define CAML_NAME_SPACE #include <caml/mlvalues.h> #include <caml/memory.h> extern void exit_on_parent_exit_(int interval, int grace); CAMLprim value exit_on_parent_exit( value ml_interval, value ml_grace ) { CAMLparam2(ml_interval, ml_grace); int interval = Int_val(ml_interval); int grace = Int_val(ml_grace); exit_on_parent_exit_(interval, grace); CAMLreturn(Val_unit); }
C++
hhvm/hphp/hack/src/utils/parent/parentimpl.cpp
#include <thread> #include <mutex> #include <cassert> #include <atomic> #include <unistd.h> #include "parentimpl.h" std::mutex watchdog_mut; static int watchdog_count = 0; static void check_and_die(int interval, int grace) noexcept { assert(interval > 0); assert(grace > 0); for (;;) { // when we get reparented // exit immediately if (getppid() == 1) { sleep(static_cast<unsigned>(grace)); exit(20); } sleep(static_cast<unsigned>(interval)); } } extern "C" void exit_on_parent_exit_(int interval, int grace) noexcept { assert(interval > 0); assert(grace > 0); std::lock_guard<std::mutex> guard(watchdog_mut); assert(watchdog_count == 0 || watchdog_count == 1); if (watchdog_count == 0) { ++watchdog_count; std::thread t(check_and_die, interval, grace); t.detach(); } return; } extern "C" int get_watchdog_count_() noexcept { std::lock_guard<std::mutex> guard(watchdog_mut); return watchdog_count; }
C/C++
hhvm/hphp/hack/src/utils/parent/parentimpl.h
extern "C" void exit_on_parent_exit_(int interval, int grace) noexcept; // for tests only extern "C" int get_watchdog_count_() noexcept;
C
hhvm/hphp/hack/src/utils/parent/stubparentimpl.c
#define UNUSED(x) \ ((void)(x)) void exit_on_parent_exit_(int interval, int grace) { UNUSED(interval); UNUSED(grace); return; }
C++
hhvm/hphp/hack/src/utils/parent/test/test_parentimpl.cpp
#include <folly/portability/GTest.h> #include "../parentimpl.h" #include <folly/portability/Unistd.h> TEST(ParentImpl, WatchdogCount) { ::testing::FLAGS_gtest_death_test_style = "threadsafe"; // paranoia ASSERT_EQ(get_watchdog_count_(), 0); // 1000 is an arbitrary large number. // The thread will be spawned but it won't check for // the death of the parent exit_on_parent_exit_(1000, 1000); ASSERT_EQ(get_watchdog_count_(), 1); // try again, make sure that the number of watchdog // processes is still 1. exit_on_parent_exit_(1000, 1000); ASSERT_EQ(get_watchdog_count_(), 1); }
OCaml
hhvm/hphp/hack/src/utils/perf/perf_stats.ml
(* * Copyright (c) 2018, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) (** Returns the requested number of quantiles (i.e., the least value * such that index/count values are smaller). Assumes the input list is sorted * unless compare argument is given to first sort the list according to it. *) let quantiles ?compare xs count = let a = Array.of_list xs in begin match compare with | None -> () | Some cmp -> Array.sort cmp a end; let n = Array.length a in let step = float_of_int (n - 1) /. float_of_int count in let to_idx count = int_of_float (float_of_int count *. step) in let rec work count acc = if count = 0 then acc else let q = a.(to_idx count) in (work [@tailrec]) (count - 1) (q :: acc) in work count []
Rust
hhvm/hphp/hack/src/utils/perf/perf_stats.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. /// Returns the requested number of quantiles (i.e., the least value such that index/count values /// are smaller). pub fn calculate_quantiles(user_times: &[f64], nbr_quantiles: usize) -> Vec<f64> { // Returns the largest time in each bucket let mut ts = user_times.to_vec(); ts.sort_by(|a, b| a.partial_cmp(b).unwrap()); let step = (ts.len() - 1) as f64 / nbr_quantiles as f64; (0..nbr_quantiles) .map(|bucket| ts[((bucket + 1) as f64 * step) as usize]) .collect() } #[cfg(test)] mod test { use super::*; #[test] fn test_quantiles_equal_step2() { let user_times = [1., 3., 5., 7., 9., 11.]; let nbr_quantiles = 3; let quantiles = calculate_quantiles(&user_times, nbr_quantiles); assert_eq!(quantiles, [3., 7., 11.]); } #[test] fn test_quantiles_equal_step4() { let user_times = [0., 0.5, 1., 1., 7., 8., 8., 9., 64., 64., 64., 81.]; let nbr_quantiles = 3; let quantiles = calculate_quantiles(&user_times, nbr_quantiles); assert_eq!(quantiles, [1., 9., 81.]); } #[test] fn test_quantiles_equal_uniform_4() { let user_times = [1., 2., 3., 4.]; let nbr_quantiles = 4; let quantiles = calculate_quantiles(&user_times, nbr_quantiles); assert_eq!(quantiles, [1., 2., 3., 4.]); } #[test] fn test_quantiles_equal_uniform_3() { let user_times = [1., 2., 3., 4.]; let nbr_quantiles = 3; let quantiles = calculate_quantiles(&user_times, nbr_quantiles); assert_eq!(quantiles, [2., 3., 4.]); } }
OCaml
hhvm/hphp/hack/src/utils/perf/profile.ml
(* * Copyright (c) 2018, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) type gc_alloc = { minor_words: float; major_words: float; (* words in major heap _including_ promoted ones *) promo_words: float; (* words promoted from minor to major heap *) } let make_merge_alloc merge_absolute alloc1 alloc2 = { minor_words = merge_absolute alloc1.minor_words alloc2.minor_words; major_words = merge_absolute alloc1.major_words alloc2.major_words; promo_words = merge_absolute alloc1.promo_words alloc2.promo_words; } let sum_alloc = make_merge_alloc ( +. ) let sub_alloc = make_merge_alloc ( -. ) let gc_alloc_neutral : gc_alloc = { minor_words = 0.; major_words = 0.; promo_words = 0. } (** Accepts a closure with a query parameter that can be called to check GC * allocation counts (minor & major words) since the closure was called, e.g.: * with_gc_alloc fun delta -> // use delta () to get _relative_ counters ) *) let with_gc_alloc func = let query () = match Gc.quick_stat () with | { Gc.minor_words; major_words; promoted_words; _ } -> { minor_words; major_words; promo_words = promoted_words } in let before = query () in let delta () = sub_alloc (query ()) before in func delta let measure_gc_alloc func = with_gc_alloc (fun delta -> (func delta, delta ())) let fake_gc_alloc func = (func (fun () -> gc_alloc_neutral), gc_alloc_neutral) (* Non-cumulative statistic (i.e., non-counters) related to heap *) type mem_stat = { heap_major_words: float; (* see Gc.heap_words *) heap_major_chunks: float; (* see Gc.heap_chunks *) } let avg_mem ?(size1 = 1) ?(size2 = 1) mem1 mem2 = let avg x1 x2 = ((float_of_int size1 *. x1) +. (float_of_int size2 *. x2)) /. (float_of_int @@ (size1 + size2)) in { heap_major_words = avg mem1.heap_major_words mem2.heap_major_words; heap_major_chunks = avg mem1.heap_major_chunks mem2.heap_major_chunks; } let mem_neutral : mem_stat = { heap_major_words = 0.; heap_major_chunks = 0. } (* Returns the current memory stats that are not counters *) let query_mem () = match Gc.quick_stat () with | { Gc.heap_words; heap_chunks; _ } -> { heap_major_words = float_of_int heap_words; heap_major_chunks = float_of_int heap_chunks; } let query_user_time ?(children = true) () = let tm = Unix.times () in tm.Unix.tms_utime +. if children then tm.Unix.tms_cutime else 1. let query_sys_time ?(children = true) () = let tm = Unix.times () in tm.Unix.tms_stime +. if children then tm.Unix.tms_cstime else 1. let query_real_time = Unix.gettimeofday (** Given a function to profile, profiles it multiple times such that it * runs at least min_time seconds, returning the average time, non-aggregatable heap stats from * the first run and number of runs. It returns 0 for the number of runs if the first run fails * so that failures can be unambiguously identified and filtered out if needed. *) let profile_longer_than run ?(min_runs = 1) ?(retry = true) min_time = let rec work ?mem_stat dt_user_tot nbr_runs = let t_user0 = query_user_time () in let run_incr = try run (); 1 with | exn -> let e = Exception.wrap exn in if retry then 0 (* distinguish failures by letting run count stay 0 *) else Exception.reraise e in let dt_user = query_user_time () -. t_user0 in let dt_user_tot = dt_user_tot +. dt_user in let mem_stat = if nbr_runs = 0 then Some (query_mem ()) else mem_stat in let nbr_runs = nbr_runs + run_incr in if (dt_user_tot < min_time || nbr_runs < min_runs) && run_incr > 0 then (work [@tailcall]) dt_user_tot nbr_runs ?mem_stat else (dt_user_tot, nbr_runs, mem_stat) in let (dt_user_tot, nbr_runs, mem_stat) = work 0. 0 in let to_div n = if nbr_runs = 0 then 1. else float_of_int n in (dt_user_tot /. to_div nbr_runs, nbr_runs, Base.Option.value_exn mem_stat)
Rust
hhvm/hphp/hack/src/utils/perf/profile.rs
use std::mem; use std::time::Duration; use std::time::Instant; /// Gets CPU times for this and all child processes. /// Returns /// (this proc user, this proc sys, /// this thread user, this thread sys, /// children user, children sys) /// /// Adapted from OCaml implementation in /// ocaml/otherlibs/unix/times.c pub fn get_cpu_time_seconds() -> (f64, f64, f64, f64, f64, f64) { let mut ru_self: libc::rusage = unsafe { mem::zeroed() }; let mut ru_thread: libc::rusage = unsafe { mem::zeroed() }; let mut ru_children: libc::rusage = unsafe { mem::zeroed() }; const RUSAGE_SELF: i32 = 0; const RUSAGE_THREAD: i32 = 1; const RUSAGE_CHILDREN: i32 = -1; unsafe { libc::getrusage(RUSAGE_SELF, &mut ru_self); libc::getrusage(RUSAGE_THREAD, &mut ru_thread); libc::getrusage(RUSAGE_CHILDREN, &mut ru_children); } let to_seconds = |t: libc::timeval| t.tv_sec as f64 + t.tv_usec as f64 / 1e6; ( to_seconds(ru_self.ru_utime), to_seconds(ru_self.ru_stime), to_seconds(ru_thread.ru_utime), to_seconds(ru_thread.ru_stime), to_seconds(ru_children.ru_utime), to_seconds(ru_children.ru_stime), ) } pub fn get_user_thread_cpu_time_seconds() -> f64 { get_cpu_time_seconds().2 } pub fn get_sys_cpu_time_seconds() -> f64 { let times = get_cpu_time_seconds(); times.1 + times.5 } /// Given a function to profile, profiles it multiple times such that it runs at least min_time /// seconds, returning the average time and the number of runs. Does not yet handle failures. pub fn profile_longer_than<F>(mut f: F, min_time: f64, min_runs: u64) -> (f64, u64) where F: FnMut(), { let start = get_user_thread_cpu_time_seconds(); let mut now = start; let mut iteration = 0; while (now - start) < min_time || iteration < min_runs { f(); now = get_user_thread_cpu_time_seconds(); iteration += 1; } let avg_time = (now - start) / (if iteration == 0 { 1. } else { iteration as f64 }); (avg_time, iteration) } pub fn time<T>(f: impl FnOnce() -> T) -> (T, Duration) { let start = Instant::now(); let result = f(); let time_taken = start.elapsed(); (result, time_taken) } #[cfg(test)] mod test { use std::time::Instant; use super::*; #[test] fn test_profile_longer_than() { let real_time_before = Instant::now(); let (avg_user_time, nbr_runs) = profile_longer_than( || { for _ in 0..1000000 { // Do some work. let values = [1., 2., 3., 4., 5.]; if values.iter().sum::<f64>() == 0. { panic!("Never hit, prevent loop from being optimized away"); } } }, 0.01, 0, ); let duration = Instant::now() .duration_since(real_time_before) .as_secs_f64(); assert!( avg_user_time >= 1e-8, "time run shorter than given duration" ); assert!( avg_user_time * nbr_runs as f64 >= 0.00001 * duration, "CPU time much less than real time" ); assert!( avg_user_time * nbr_runs as f64 <= 10. * duration, "CPU time much more than real time" ); assert!(nbr_runs > 0, "Loop never run"); } #[test] fn test_profile_longer_than_min_runs_gt1() { let min_runs = 7; let mut cnt = 0; let (_, nbr_runs) = profile_longer_than( || { cnt += 1; }, 0.0, min_runs, ); assert_eq!(nbr_runs, min_runs); assert_eq!(cnt, min_runs); } }
TOML
hhvm/hphp/hack/src/utils/perf/cargo/profile/Cargo.toml
# @generated by autocargo [package] name = "profile_rust" version = "0.0.0" edition = "2021" [lib] path = "../../profile.rs" [dependencies] libc = "0.2.139"
TOML
hhvm/hphp/hack/src/utils/perf/cargo/stats/Cargo.toml
# @generated by autocargo [package] name = "stats_rust" version = "0.0.0" edition = "2021" [lib] path = "../../perf_stats.rs"
hhvm/hphp/hack/src/utils/process/dune
(library (name process_types) (wrapped false) (modules process_types) (preprocess (pps ppx_deriving.std)) (libraries core_kernel utils_core sys_utils)) (library (name process) (wrapped false) (modules process) (preprocess (pps ppx_deriving.std)) (libraries exec_command process_types utils_core utils_ocaml_overrides stack_utils sys_utils)) (library (name future) (wrapped false) (modules future future_sig futureProcess) (preprocess (pps ppx_deriving.std)) (libraries process process_types promise))
OCaml
hhvm/hphp/hack/src/utils/process/future.ml
(* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) open Hh_prelude type verbose_error = { message: string; stack: Utils.callstack; environment: string option; } [@@deriving show] (* First class module abstract signature for external errors *) module type Error = sig type config type t (* Create an error object *) val create : config -> t (* Callback handler that converts error object to string *) val to_string : t -> string (* Callback handler that converts error object to verbose_error *) val to_string_verbose : t -> verbose_error end (* Wrapper module the bundles Error object with its handler. This uses the technique/pattern as "A Query-Handling Framework" example in RealWorld Ocaml book *) module type Error_instance = sig module Error : Error val this : Error.t end (* Future error can be categorized into interanl errors and external environment specific errors. For external errors, the environment specific handlers will be used to convert the error into string *) type error = | External of (module Error_instance) | Internal_Continuation_raised of Exception.t | Internal_timeout of Exception.t type 'value get_value = timeout:int -> ('value, (module Error_instance)) result type is_value_ready = unit -> bool type 'value status = | Complete_with_result of ('value, error) result | In_progress of { age: float } exception Future_failure of error type 'value delayed = { (* Number of times it has been tapped by "is_ready" or "check_status". *) tapped: int; (* Number of times remaining to be tapped before it is ready. *) remaining: int; value: 'value; } type 'value incomplete = { get_value: 'value get_value; deadline: float option; is_value_ready: is_value_ready; } type 'value t = 'value promise ref * creation_time and 'value promise = | Complete : 'value -> 'value promise | Complete_but_failed of error (* Delayed is useful for deterministic testing. Must be tapped by "is ready" or * "check_status" the remaining number of times before it is ready. *) | Delayed : 'value delayed -> 'value promise (* A future formed from two underlying futures. Calling "get" blocks until * both underlying are ready. *) | Merged : 'a t * 'b t * (('a, error) result -> ('b, error) result -> ('value, error) result) -> 'value promise (* The future's success is bound to another future producer; it's a chain of futures that have to execute serially in order, as opposed to the list of Merged futures above that have to execute in parallel. *) | Bound : 'value t * (('value, error) result -> 'next_value t) -> 'next_value promise (* The future is not yet fulfilled. *) | Incomplete of 'value incomplete and creation_time = float let equal (_f : 'a -> 'a -> bool) (x : 'a t) (y : 'a t) = Poly.equal x y (* 30 seconds *) let default_timeout = 30 let create_error_instance (type a) (module E : Error with type config = a) err = (module struct module Error = E let this = E.create err end : Error_instance) (* Provide a default error implementation *) module DefaultError = struct type config = string type t = string let create config = config let to_string (error : t) : string = error let to_string_verbose (error : t) : verbose_error = { message = error; environment = None; stack = Utils.Callstack error } end (* Helper function to create a default error instance *) let create_default_error_instance (error : string) = create_error_instance (module DefaultError) error (* Given an optional deadline, constructs a timeout time span, in seconds, relative to the current time (dealine - now). If the current time is past the deadline, the timeout is 0. If the deadline is not specified, the max timeout value is returned. *) let timeout_of_deadline deadline ~max_timeout = match deadline with | Some deadline -> let time_left = deadline -. Unix.gettimeofday () in if Float.(time_left > 0.0) then min (int_of_float time_left) max_timeout else 0 | None -> max_timeout (* Given an optional timeout, constructs the deadline relative to the specified start time. If the timeout is not specified, then the deadline is also not specified *) let deadline_of_timeout ~timeout ~start_time = match timeout with | Some timeout -> Some (start_time +. float_of_int timeout) | None -> None let make ?(timeout : int option) (get_value, is_value_ready) : 'value t = let deadline = deadline_of_timeout ~timeout ~start_time:(Unix.gettimeofday ()) in ( ref (Incomplete { get_value; deadline; is_value_ready }), Unix.gettimeofday () ) let of_value (value : 'value) : 'value t = (ref @@ Complete value, Unix.gettimeofday ()) let of_error (e : string) = try failwith e with | e -> ( ref @@ Complete_but_failed (Internal_Continuation_raised (Exception.wrap e)), Unix.gettimeofday () ) let delayed_value ~(delays : int) (value : 'value) : 'value t = ( ref @@ Delayed { tapped = 0; remaining = delays; value }, Unix.gettimeofday () ) let error_to_exn e = Future_failure e let error_to_string err : string = match err with | Internal_Continuation_raised e -> Printf.sprintf "Continuation_raised(%s)" (Exception.get_ctor_string e) | Internal_timeout e -> Printf.sprintf "Timed_out(%s)" (Exception.get_ctor_string e) | External err -> let module I = (val err : Error_instance) in I.Error.to_string I.this let error_to_string_verbose err : verbose_error = match err with | Internal_Continuation_raised ex -> let stack = Exception.get_backtrace_string ex |> Exception.clean_stack in { message = Printf.sprintf "Continuation failure - %s" (Exception.get_ctor_string ex); environment = None; stack = Utils.Callstack stack; } | Internal_timeout ex -> { message = Exception.to_string ex; stack = Utils.Callstack (Exception.get_backtrace_string ex); environment = None; } | External err -> let module I = (val err : Error_instance) in I.Error.to_string_verbose I.this let () = Stdlib.Printexc.register_printer (function | Future_failure e -> Some (error_to_string e) | _ -> None) (* Must explicitly make recursive functions polymorphic. *) let rec get : 'value. ?timeout:int -> 'value t -> ('value, error) result = fun ?(timeout = default_timeout) (promise, _) -> match !promise with | Complete v -> Ok v | Complete_but_failed e -> Error e | Delayed { value; remaining; _ } when remaining <= 0 -> Ok value | Delayed _ -> let error = Internal_timeout (Exception.wrap_unraised (Failure "Delayed value not ready yet")) in Error error | Merged (a_future, b_future, handler) -> let start_t = Unix.gettimeofday () in let a_result = get ~timeout a_future in let consumed_t = int_of_float @@ (Unix.gettimeofday () -. start_t) in let timeout = timeout - consumed_t in let b_result = get ~timeout b_future in (* NB: We don't need to cache the result of running the handler because * underlying Futures a and b have the values cached internally. So * subsequent calls to "get" on this Merged Future will just re-run * the handler on the cached result. *) handler a_result b_result | Bound (curr_future, next_producer) -> let start_t = Unix.gettimeofday () in let curr_result = get ~timeout curr_future in let consumed_t = int_of_float @@ (Unix.gettimeofday () -. start_t) in let timeout = timeout - consumed_t in begin try let next_future = next_producer curr_result in let next_result = get ~timeout next_future in (* The `get` call above changes next_promise's internal state/cache, so the updating of the promise below should happen AFTER calling `get` on it. *) let (next_promise, _t) = next_future in promise := !next_promise; next_result with | e -> let e = Exception.wrap e in promise := Complete_but_failed (Internal_Continuation_raised e); Error (Internal_Continuation_raised e) end | Incomplete { get_value; deadline; is_value_ready = _ } -> let timeout = timeout_of_deadline deadline ~max_timeout:timeout in let result = get_value ~timeout in (match result with | Ok res -> promise := Complete res; Ok res | Error err -> let error = External err in promise := Complete_but_failed error; Error error) let get_exn ?timeout (future : 'value t) = get ?timeout future |> Result.map_error ~f:error_to_exn |> Result.ok_exn (* Must explicitly make recursive functions polymorphic. *) let rec is_ready : 'value. 'value t -> bool = fun (promise, _) -> match !promise with | Complete _ | Complete_but_failed _ -> true | Delayed { remaining; _ } when remaining <= 0 -> true | Delayed { tapped; remaining; value } -> promise := Delayed { tapped = tapped + 1; remaining = remaining - 1; value }; false | Merged (a, b, _) -> (* Prevent the && operator from short-cirtuiting the is-ready check for the second future: *) let is_a_ready = is_ready a in let is_b_ready = is_ready b in is_a_ready && is_b_ready | Bound (curr_future, next_producer) -> if is_ready curr_future then begin let curr_result = get curr_future in try let next_future = next_producer curr_result in let is_next_ready = is_ready next_future in (* `is_ready` *may* change next_promise's internal state/cache, so the updating of the promise below should happen AFTER calling `is_ready` on it. *) let (next_promise, _t) = next_future in promise := !next_promise; is_next_ready with | e -> let e = Exception.wrap e in promise := Complete_but_failed (Internal_Continuation_raised e); true end else false | Incomplete { get_value = _; deadline; is_value_ready } -> (* Note: if the promise's own deadline is not set, we allow the caller to call is_ready as long as they wish, without timing out *) let timeout = timeout_of_deadline deadline ~max_timeout:1 in if timeout > 0 then is_value_ready () else (* E.g., we timed out *) true let merge_status (status_a : 'a status) (status_b : 'b status) (handler : ('a, error) result -> ('b, error) result -> ('value, error) result) : 'value status = match (status_a, status_b) with | (Complete_with_result result_a, Complete_with_result result_b) -> Complete_with_result (handler result_a result_b) | (In_progress { age }, In_progress { age = age_b }) when Float.(age > age_b) -> In_progress { age } | (In_progress { age }, _) | (_, In_progress { age }) -> In_progress { age } let start_t : 'value. 'value t -> float = (fun (_, time) -> time) let merge (future_a : 'a t) (future_b : 'b t) (handler : ('a, error) result -> ('b, error) result -> ('value, error) result) : 'value t = ( ref @@ Merged (future_a, future_b, handler), Float.min (start_t future_a) (start_t future_b) ) let make_continue (first : 'first t) (next_producer : ('first, error) result -> 'next t) : 'next_value t = (ref @@ Bound (first, next_producer), start_t first) let continue_with_future (future : 'value t) (continuation_handler : 'value -> 'next_value t) : 'next_value t = let continuation_handler (result : ('value, error) result) = match result with | Ok value -> continuation_handler value | Error error -> (ref @@ Complete_but_failed error, start_t future) in make_continue future continuation_handler let continue_with (a : 'a t) (f : 'a -> 'b) : 'b t = continue_with_future a (fun a -> of_value (f a)) let continue_and_map_err (a : 'a t) (f : ('a, error) result -> ('b, 'c) result) : ('b, 'c) result t = let f res = of_value (f res) in make_continue a f let on_error (future : 'value t) (f : error -> unit) : 'value t = let continuation res = match res with | Ok res -> of_value res | Error error -> f error; (ref (Complete_but_failed error), start_t future) in make_continue future continuation (* Must explicitly make recursive functions polymorphic. *) let rec with_timeout : 'value. 'value t -> timeout:int -> 'value t = fun ((promise, start_time) as future) ~timeout -> match !promise with | Complete _ | Complete_but_failed _ -> future | Delayed _ -> (* The delayed state is used for testing, so it doesn't make sense to figure out the semantics of what it means to have a timeout on a delayed future *) failwith "Setting timeout on a delayed future is not supported" | Merged (a_future, b_future, handler) -> merge (with_timeout a_future ~timeout) (with_timeout b_future ~timeout) handler | Bound (curr_future, next_producer) -> make_continue (with_timeout curr_future ~timeout) next_producer | Incomplete { get_value; is_value_ready; _ } -> let deadline = deadline_of_timeout ~timeout:(Some timeout) ~start_time in (ref (Incomplete { get_value; deadline; is_value_ready }), start_time) (* Must explicitly make recursive function polymorphic. *) let rec check_status : 'a. 'a t -> 'a status = fun (promise, start_t) -> match !promise with | Complete v -> Complete_with_result (Ok v) | Complete_but_failed e -> Complete_with_result (Error e) | Delayed { value; remaining; _ } when remaining <= 0 -> Complete_with_result (Ok value) | Delayed { tapped; remaining; value } -> promise := Delayed { tapped = tapped + 1; remaining = remaining - 1; value }; In_progress { age = float_of_int tapped } | Merged (a, b, handler) -> merge_status (check_status a) (check_status b) handler | Bound _ -> if is_ready (promise, start_t) then Complete_with_result (get (promise, start_t)) else let age = Unix.gettimeofday () -. start_t in In_progress { age } | Incomplete { get_value = _; deadline; is_value_ready } -> (* Note: if the promise's own deadline is not set, we allow the caller to call check_status as long as they wish, without timing out *) let timeout = timeout_of_deadline deadline ~max_timeout:1 in if is_value_ready () || timeout <= 0 then Complete_with_result (get ~timeout (promise, start_t)) else let age = Unix.gettimeofday () -. start_t in In_progress { age } (* Necessary to avoid a cyclic type definition error. *) type 'value future = 'value t module Promise = struct type 'value t = 'value future let return = of_value let map = continue_with let bind = continue_with_future let both a b = merge a b @@ fun a b -> match (a, b) with | (Error e, _) | (_, Error e) -> Error e | (Ok a, Ok b) -> Ok (a, b) end
OCaml Interface
hhvm/hphp/hack/src/utils/process/future.mli
(* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) include Future_sig.S module Promise : Promise.S with type 'value t = 'value t
OCaml
hhvm/hphp/hack/src/utils/process/futureProcess.ml
(* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) open Hh_prelude open Future type error_mode = | Process_failure of { status: Unix.process_status; stderr: string; } | Timed_out of { stdout: string; stderr: string; } | Process_aborted | Transformer_raised of Exception.t type process_error = Process_types.invocation_info * error_mode module ProcessError = struct type config = process_error type t = process_error let create config = config let to_string ((info, error_mode) : t) : string = let info = Printf.sprintf "(%s [%s])" info.Process_types.name (String.concat ~sep:", " info.Process_types.args) in let status_string s = match s with | Unix.WEXITED i -> Printf.sprintf "(%s WEXITED %d)" info i | Unix.WSIGNALED i -> Printf.sprintf "(%s WSIGNALED %d)" info i | Unix.WSTOPPED i -> Printf.sprintf "(%s WSTOPPED %d)" info i in match error_mode with | Process_failure { status; stderr } -> Printf.sprintf "Process_failure(%s, stderr: %s)" (status_string status) stderr | Timed_out { stdout; stderr } -> Printf.sprintf "Timed_out(%s (stdout: %s) (stderr: %s))" info stdout stderr | Process_aborted -> Printf.sprintf "Process_aborted(%s)" info | Transformer_raised e -> Printf.sprintf "Transformer_raised(%s %s)" info (Exception.get_ctor_string e) let to_string_verbose ((info, error_mode) : t) : Future.verbose_error = let Process_types.{ name; args; env; stack = Utils.Callstack stack } = info in let env = Process.env_to_string env in let stack = stack |> Exception.clean_stack in let cmd_and_args = Printf.sprintf "`%s %s`" name (String.concat ~sep:" " args) in match error_mode with | Process_failure { status; stderr } -> let status = match status with | Unix.WEXITED i -> Printf.sprintf "exited with code %n" i | Unix.WSIGNALED i -> Printf.sprintf "killed with signal %n" i | Unix.WSTOPPED i -> Printf.sprintf "stopped with signal %n" i in { message = Printf.sprintf "%s - %s\n%s\n" cmd_and_args status stderr; environment = Some env; stack = Utils.Callstack stack; } | Timed_out { stdout; stderr } -> { message = Printf.sprintf "%s timed out\nSTDOUT:\n%s\nSTDERR:\n%s\n" cmd_and_args stdout stderr; environment = Some env; stack = Utils.Callstack stack; } | Process_aborted -> { message = Printf.sprintf "%s aborted" cmd_and_args; environment = None; stack = Utils.Callstack stack; } | Transformer_raised e -> let stack = (Exception.get_backtrace_string e |> Exception.clean_stack) ^ "-----\n" ^ stack in { message = Printf.sprintf "%s - unable to process output - %s" cmd_and_args (Exception.get_ctor_string e); environment = None; stack = Utils.Callstack stack; } end let make ?(timeout : int option) (process : Process_types.t) (transformer : string -> 'value) : 'value Future.t = let get_value ~(timeout : int) : ('value, (module Future.Error_instance)) result = let info = process.Process_types.info in let res = match Process.read_and_wait_pid ~timeout process with | Ok { Process_types.stdout; _ } -> begin try Ok (transformer stdout) with | e -> let e = Exception.wrap e in Error (info, Transformer_raised e) end | Error (Process_types.Abnormal_exit { status; stderr; _ }) -> Error (info, Process_failure { status; stderr }) | Error (Process_types.Timed_out { stdout; stderr }) -> Error (info, Timed_out { stdout; stderr }) | Error Process_types.Overflow_stdin -> Error (info, Process_aborted) in Result.map_error res ~f:(fun (e : process_error) -> Future.create_error_instance (module ProcessError) e) in let is_value_ready () = Process.is_ready process in match timeout with | Some v -> Future.make ~timeout:v (get_value, is_value_ready) | None -> Future.make (get_value, is_value_ready)
OCaml
hhvm/hphp/hack/src/utils/process/future_sig.ml
(* * Copyright (c) 2016, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) module type S = sig type verbose_error = { message: string; stack: Utils.callstack; environment: string option; } [@@deriving show] (** First class module abstract signature for external errors *) module type Error = sig type config type t (** Create an error object *) val create : config -> t (** Callback handler that converts error object to string *) val to_string : t -> string (** Callback handler that converts error object to verbose_error *) val to_string_verbose : t -> verbose_error end (** Wrapper module the bundles Error object with its handler. This uses the technique/pattern as "A Query-Handling Framework" example in RealWorld Ocaml book *) module type Error_instance = sig module Error : Error val this : Error.t end (** Helper function to create a default error instance *) val create_error_instance : (module Error with type config = 'a) -> 'a -> (module Error_instance) val create_default_error_instance : string -> (module Error_instance) (** Future error can be categorized into interanl errors and external environment specific errors. For external errors, the environment specific handlers will be used to convert the error into string *) type error = | External of (module Error_instance) | Internal_Continuation_raised of Exception.t | Internal_timeout of Exception.t type 'value get_value = timeout:int -> ('value, (module Error_instance)) result type is_value_ready = unit -> bool type 'value status = | Complete_with_result of ('value, error) result | In_progress of { age: float } exception Future_failure of error type 'value t [@@deriving eq] (** Blocking. Returns the value from the underlying process. *) val get : ?timeout:int -> 'value t -> ('value, error) result (** Like get, but raises Failure instead of returning when result is Error. *) val get_exn : ?timeout:int -> 'value t -> 'value (** Creates a future out of the process handle. If the timeout is specified, then this timeout will take priority over the timeout that's passed into the `get` function, or whichever is less. If the timeout is not specified, then it's assumed to be infinite for the purposes of how `is_ready` and `check_status` work, and for the purpose of figuring out `get`'s timeout. Example 1: - the future is made without a timeout - the future is synchronously gotten with a timeout of 15 seconds - if the process is not ready after 15 seconds, the result is a timeout Example 2: - the future is made with a timeout of 5 seconds - the future is synchronously gotten with a timeout of 10 seconds - the timeout that applies is the 5 second `make` timeout because its priority is higher than the priority of the `get` timeout Example 3: - the future is made with a timeout of 10 seconds - the future is synchronously gotten with a timeout of 5 seconds - the timeout that applies is the 5 second `get` timeout because its value is less than the value of the `make` timeout NOTE: the timeout affects the behavior of `is_ready` and `check_status`: if set here and if it is expired, `is_ready` will return true and `check_status` will return a "timed out" result *) val make : ?timeout:int -> 'value get_value * is_value_ready -> 'value t (** Sets or resets the timeout on an existing future. The meaning of setting a timeout on a merged or a bound future is thus: - for a merged future, we set the timeout recursively on all the futures - for a bound future, we set the timeout on the first future, but we can't do anything reasonable for the bound continuation - if that continuation produces a true future, this timeout will not be applied to it. This is a convenience function meant to enable setting timeouts on futures produced by other modules. Normally, you would create a future with a timeout by specifying the timeout when invoking the `make` function, but if you've already gotten a future from, e.g., another module, then it might be convenient to set the timeout separately, instead of threading the timeout into the other module's APIs. *) val with_timeout : 'value t -> timeout:int -> 'value t (** Analogous to "make" above, but takes in two futures and a function that consumes their results, producing a third future that "is_ready" when both of the underlying are ready (and will block on "get" until both of the underlying are completed). NB: The handler is run each time "get" is called on the Future. *) val merge : 'a t -> 'b t -> (('a, error) result -> ('b, error) result -> ('value, error) result) -> 'value t (** Adds a computation that will be applied to the result of the future when t is finished. *) val continue_with : 'value t -> ('value -> 'next_value) -> 'next_value t (** Adds another future to be generated after the given future finishes. *) val continue_with_future : 'value t -> ('value -> 'next_value t) -> 'next_value t (** Adds another future to be generated after the given future finishes, but allows custom handling of process errors. *) val continue_and_map_err : 'value t -> (('value, error) result -> ('next_value, 'next_error) result) -> ('next_value, 'next_error) result t val on_error : 'value t -> (error -> unit) -> 'value t (** Wraps a value inside a future. *) val of_value : 'value -> 'value t (** Wraps an error with the specified string message inside a future *) val of_error : string -> 'value t (** Like of_value, except returns false "delays" number of times of calling is_ready on it before returning true. *) val delayed_value : delays:int -> 'value -> 'value t (** Checks whether the future is ready or not by checking the underlying implementation (e.g., Process) whether it's ready. The meaning of "ready" is that calling the `get` function will not block. The `is_ready` check will also return true if getting the future failed or timed out. The latter can happen if the future was created with a timeout: `is_ready` will return true if the timeout is expired, even if the underlying implementation is still working. Calling `get` afterwards will result in a timeout failure. *) val is_ready : 'value t -> bool (** Think of this as a combination of `is_ready` and `get`: - if ready, returns the result - if not ready, then returns the in-progress status Note that, if the future was created with a timeout, it will return a timed out result if the timeout is expired *) val check_status : 'value t -> 'value status (** Return the timestamp the future was constructed. For Merged futures, returns the older of the merged futures. *) val start_t : 'value t -> float val error_to_string : error -> string val error_to_string_verbose : error -> verbose_error val error_to_exn : error -> exn end
OCaml
hhvm/hphp/hack/src/utils/process/process.ml
(* * Copyright (c) 2016, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) open Hh_prelude open Ocaml_overrides open Stack_utils module Entry = struct type 'param t = ('param, unit, unit) Daemon.entry let register name entry = let daemon_entry = Daemon.register_entry_point name (fun params _channels -> entry params) in daemon_entry end (** In the blocking read_and_wait_pid call, we alternate between * non-blocking consuming of output and a nonblocking waitpid. * To avoid pegging the CPU at 100%, sleep for a short time between * those. *) let sleep_seconds_per_retry = 0.04 let chunk_size = 65536 (** Reuse the buffer for reading. Just an allocation optimization. *) let buffer = Bytes.create chunk_size let env_to_array (env : Process_types.environment) : string array option = match env with | Process_types.Default -> None | Process_types.Empty -> Some [||] | Process_types.Augment augments_to_env -> (* deduping the env is not necessary. glibc putenv/getenv will grab the first * one *) let fullenv = Array.append (Array.of_list augments_to_env) (Unix.environment ()) in Some fullenv | Process_types.Replace fullenv -> Some (Array.of_list fullenv) let env_to_string (env : Process_types.environment) : string = match env with | Process_types.Default -> Printf.sprintf "=====Process environment inherited from parent process:\n%s\n" (String.concat ~sep:"\n" (Array.to_list (Unix.environment ()))) | Process_types.Empty -> "Process environment is explicitly made empty" | Process_types.Augment additions -> Printf.sprintf "=====Process environment is augmented with:\n%s\n\n=====Parent process environment:\n%s\n" (String.concat ~sep:"\n" additions) (String.concat ~sep:"\n" (Array.to_list (Unix.environment ()))) | Process_types.Replace fullenv -> Printf.sprintf "=====Process environment explicitly set to:\n%s\n" (String.concat ~sep:"\n" fullenv) let status_to_string (status : Unix.process_status) : string = match status with | Unix.WEXITED i -> Printf.sprintf "Unix.WEXITED %d" i | Unix.WSIGNALED i -> Printf.sprintf "Unix.WSIGNALED %d" i | Unix.WSTOPPED i -> Printf.sprintf "Unix.WSTOPPED %d" i (* make_result returns either (stdout,stderr) or a failure. *) let make_result (status : Unix.process_status) (stdout : string) (stderr : string) : Process_types.process_result = Process_types.( match status with | Unix.WEXITED 0 -> Ok { stdout; stderr } | Unix.WEXITED _ | Unix.WSIGNALED _ | Unix.WSTOPPED _ -> Error (Abnormal_exit { status; stdout; stderr })) (** Read from the FD if there is something to be read. FD is a reference * so when EOF is read from it, it is set to None. *) let rec maybe_consume ?(max_time : float = 0.0) (fd_ref : Unix.file_descr option ref) (acc : string Stack_utils.Stack.t) : unit = if Float.(max_time < 0.0) then () else let start_t = Unix.time () in Option.iter !fd_ref ~f:(fun fd -> match Sys_utils.select_non_intr [fd] [] [] max_time with | ([], _, _) -> () | _ -> let bytes_read = Unix.read fd buffer 0 chunk_size in if bytes_read = 0 then ( (* EOF reached. *) Unix.close fd; fd_ref := None ) else let chunk = String.sub (Bytes.to_string buffer) ~pos:0 ~len:bytes_read in Stack.push chunk acc; let consumed_t = Unix.time () -. start_t in let max_time = max_time -. consumed_t in maybe_consume ~max_time fd_ref acc) (** Read data from stdout and stderr until EOF is reached. Waits for process to terminate returns the stderr and stdout and stderr. Idempotent. If process exits with something other than (Unix.WEXITED 0), will return a Error *) let read_and_wait_pid_nonblocking (process : Process_types.t) : unit = Process_types.( let { stdin_fd = _stdin_fd; stdout_fd; stderr_fd; lifecycle; acc; acc_err; _; } = process in match !lifecycle with | Lifecycle_killed_due_to_overflow_stdin | Lifecycle_exited _ -> () | Lifecycle_running { pid } -> maybe_consume stdout_fd acc; maybe_consume stderr_fd acc_err; (match Unix.waitpid [Unix.WNOHANG] pid with | (0, _) -> () | (_, status) -> let () = lifecycle := Lifecycle_exited status in (* Process has exited. Non-blockingly consume residual output. *) let () = maybe_consume stdout_fd acc in let () = maybe_consume stderr_fd acc_err in ())) (** Returns true if read_and_close_pid would be nonblocking. *) let is_ready (process : Process_types.t) : bool = read_and_wait_pid_nonblocking process; Process_types.( match !(process.lifecycle) with | Lifecycle_running _ -> false | Lifecycle_killed_due_to_overflow_stdin | Lifecycle_exited _ -> true) let kill_and_cleanup_fds (pid : int) (fds : Unix.file_descr option ref list) : unit = Unix.kill pid Sys.sigkill; let maybe_close fd_ref = Option.iter !fd_ref ~f:(fun fd -> Unix.close fd; fd_ref := None) in List.iter fds ~f:maybe_close (** * Consumes from stdout and stderr pipes and waitpids on the process. * Returns immediately if process has already been waited on (so this * function is idempotent). * * The implementation is a little complicated because: * (1) The pipe can get filled up and the child process will pause * until it's emptied out. * (2) If the child process itself forks a grandchild, the * granchild will unknowingly inherit the pipe's file descriptors; * in this case, the pipe will not provide an EOF as you'd expect. * * Due to (1), we can't just blockingly waitpid followed by reading the * data from the pipe. * * Due to (2), we can't just read data from the pipes until an EOF is * reached and then do a waitpid. * * We must do some weird alternating between them. *) let rec read_and_wait_pid ~(retries : int) (process : Process_types.t) : Process_types.process_result = Process_types.( let { stdin_fd = _stdin_fd; stdout_fd; stderr_fd; lifecycle; acc; acc_err; _; } = process in read_and_wait_pid_nonblocking process; match !lifecycle with | Lifecycle_exited status -> make_result status (Stack.merge_bytes acc) (Stack.merge_bytes acc_err) | Lifecycle_killed_due_to_overflow_stdin -> Error Overflow_stdin | Lifecycle_running { pid } -> let fds = List.rev_filter_map ~f:( ! ) [stdout_fd; stderr_fd] in if List.is_empty fds then (* EOF reached for all FDs. Blocking wait. *) let (_, status) = Unix.waitpid [] pid in let () = lifecycle := Lifecycle_exited status in make_result status (Stack.merge_bytes acc) (Stack.merge_bytes acc_err) else (* Consume output to clear the buffers which might * be blocking the process from continuing. *) let () = maybe_consume ~max_time:(sleep_seconds_per_retry /. 2.0) stdout_fd acc in let () = maybe_consume ~max_time:(sleep_seconds_per_retry /. 2.0) stderr_fd acc_err in (* EOF hasn't been reached for all FDs. Here's where we switch from * reading the pipes to attempting a non-blocking waitpid. *) (match Unix.waitpid [Unix.WNOHANG] pid with | (0, _) -> if retries <= 0 then let () = kill_and_cleanup_fds pid [stdout_fd; stderr_fd] in let stdout = Stack.merge_bytes acc in let stderr = Stack.merge_bytes acc_err in Error (Timed_out { stdout; stderr }) else (* And here we switch from waitpid back to reading. *) read_and_wait_pid ~retries:(retries - 1) process | (_, status) -> (* Process has exited. Non-blockingly consume residual output. *) let () = maybe_consume stdout_fd acc in let () = maybe_consume stderr_fd acc_err in let () = lifecycle := Lifecycle_exited status in make_result status (Stack.merge_bytes acc) (Stack.merge_bytes acc_err))) let read_and_wait_pid ~(timeout : int) (process : Process_types.t) : Process_types.process_result = let retries = float_of_int timeout /. sleep_seconds_per_retry |> int_of_float in read_and_wait_pid ~retries process let failure_msg (failure : Process_types.failure) : string = Process_types.( match failure with | Timed_out { stdout; stderr } -> Printf.sprintf "Process timed out. stdout:\n%s\nstderr:\n%s\n" stdout stderr | Abnormal_exit { stdout; stderr; _ } -> Printf.sprintf "Process exited abnormally. stdout:\n%s\nstderr:\n%s\n" stdout stderr | Overflow_stdin -> Printf.sprintf "Process_aborted_input_too_large") let send_input_and_form_result ?(input : string option) ~(info : Process_types.invocation_info) pid ~(stdin_parent : Unix.file_descr) ~(stdout_parent : Unix.file_descr) ~(stderr_parent : Unix.file_descr) : Process_types.t = Process_types.( let input_succeeded = match input with | None -> true | Some input -> let input = Bytes.of_string input in let written = Unix.write stdin_parent input 0 (Bytes.length input) in written = Bytes.length input in let lifecycle = if input_succeeded then Lifecycle_running { pid } else let () = Unix.kill pid Sys.sigkill in Lifecycle_killed_due_to_overflow_stdin in Unix.close stdin_parent; { info; stdin_fd = ref @@ None; stdout_fd = ref @@ Some stdout_parent; stderr_fd = ref @@ Some stderr_parent; acc = Stack.create (); acc_err = Stack.create (); lifecycle = ref @@ lifecycle; }) (** * Launches a process, optionally modifying the environment variables with ~env *) let exec_no_chdir ~(prog : Exec_command.t) ?(input : string option) ~(env : Process_types.environment option) (args : string list) : Process_types.t = let prog = Exec_command.to_string prog in let env = Option.value env ~default:Process_types.Default in let info = { Process_types.name = prog; args; env; stack = Utils.Callstack (Caml.Printexc.get_callstack 100 |> Caml.Printexc.raw_backtrace_to_string); } in let args = Array.of_list (prog :: args) in let (stdin_child, stdin_parent) = Unix.pipe () in let (stdout_parent, stdout_child) = Unix.pipe () in let (stderr_parent, stderr_child) = Unix.pipe () in Unix.set_close_on_exec stdin_parent; Unix.set_close_on_exec stdout_parent; Unix.set_close_on_exec stderr_parent; let pid = match env_to_array env with | None -> Unix.create_process prog args stdin_child stdout_child stderr_child | Some env -> Unix.create_process_env prog args env stdin_child stdout_child stderr_child in Unix.close stdin_child; Unix.close stdout_child; Unix.close stderr_child; send_input_and_form_result ?input ~info pid ~stdin_parent ~stdout_parent ~stderr_parent let register_entry_point = Entry.register type chdir_params = { cwd: string; prog: string; env: Process_types.environment; args: string list; } (** Wraps a entry point inside a Process, so we get Process's * goodness for free (read_and_wait_pid and is_ready). The entry will be * spawned into a separate process. *) let run_entry ?(input : string option) (env : Process_types.environment) (entry : 'a Entry.t) (params : 'a) : Process_types.t = let (stdin_child, stdin_parent) = Unix.pipe () in let (stdout_parent, stdout_child) = Unix.pipe () in let (stderr_parent, stderr_child) = Unix.pipe () in let info = { Process_types.name = Daemon.name_of_entry entry; args = []; env; stack = Utils.Callstack (Caml.Printexc.get_callstack 100 |> Caml.Printexc.raw_backtrace_to_string); } in let ({ Daemon.pid; _ } as daemon) = Daemon.spawn (stdin_child, stdout_child, stderr_child) entry params in Daemon.close daemon; send_input_and_form_result ?input ~info pid ~stdin_parent ~stdout_parent ~stderr_parent let chdir_main (p : chdir_params) : 'a = Unix.chdir p.cwd; let args = Array.of_list (p.prog :: p.args) in let env = env_to_array p.env in match env with | None -> Unix.execvp p.prog args | Some env -> Unix.execvpe p.prog args env let chdir_entry : (chdir_params, 'a, 'b) Daemon.entry = Entry.register "chdir_main" chdir_main let exec (prog : Exec_command.t) ?(input : string option) ?(env : Process_types.environment option) (args : string list) : Process_types.t = exec_no_chdir ~prog ?input ~env args let exec_with_working_directory ~(dir : string) (prog : Exec_command.t) ?(input : string option) ?(env = Process_types.Default) (args : string list) : Process_types.t = run_entry ?input env chdir_entry { cwd = dir; prog = Exec_command.to_string prog; env; args }
OCaml Interface
hhvm/hphp/hack/src/utils/process/process.mli
(* * Copyright (c) 2016, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) (** Utilities to deal with subprocesses. *) open Process_types module Entry : sig type 'param t val register : string -> ('param -> unit) -> 'param t end (** Shells out the program with the given args. Sends input to stdin of spawned process if given. *) val exec : Exec_command.t -> ?input:string -> ?env:Process_types.environment -> string list -> Process_types.t (** Shells out the program with the given args. Sets the working directory to the one specified before executing. NOTE: make sure to call Daemon.check_entry_point in your main entry point if passing this argument! We actually spawn our own process with chdir_main as the entry point, which changes the current working directory to the desired directory, executes the program, and redirect the output back to the original process. Therefore, if you don't check entry point, the process will use the regular main entry point instead, and the results will be unpredictable and difficult to understand. Sends input to stdin of spawned process if given. NOTE: the default environment for the execution is the current program's environment. Specify the desired environment if you want a different behavior. Sends input to stdin of spawned process if given. *) val exec_with_working_directory : dir:string -> Exec_command.t -> ?input:string -> ?env:Process_types.environment -> string list -> Process_types.t val register_entry_point : string -> ('param -> unit) -> 'param Entry.t (** Wraps a entry point inside a Process, so we get Process's goodness for free (read_and_wait_pid and is_ready). The entry will be spawned into a separate process. *) val run_entry : ?input:string -> environment -> 'a Entry.t -> 'a -> Process_types.t (** Read data from stdout and stderr until EOF is reached. Waits for process to terminate returns the stderr and stdout and stderr. Idempotent. If process exits with something other than (Unix.WEXITED 0), will return a Error. *) val read_and_wait_pid : timeout:int -> Process_types.t -> process_result val failure_msg : failure -> string val status_to_string : Unix.process_status -> string val env_to_string : Process_types.environment -> string (** Returns true if read_and_close_pid would be nonblocking. *) val is_ready : Process_types.t -> bool val env_to_array : Process_types.environment -> string array option
OCaml
hhvm/hphp/hack/src/utils/process/process_types.ml
(* * Copyright (c) 2016, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. * *) (* A Process.t represents a unix process which we execute * It is a mutable structure which accumulates stdout/stderr. * The mutation happens during calls to read_and_wait_pid. * As for stdin, that's sent all in one chunk at the beginning. *) (* lifecycle is an internal book-keeping thing. The lifecycle is: * 1. We launch the process, and try to send all the stdin in one go. * 1a. If all goes well, we start in the state "Process_running" * 1b. If stdin exceeds the OS buffer then we'll kill it immediately and * start in the state "Process_killed_due_to_overflow_stdin" * 2. The caller calls read_and_wait_pid. If it was in Process_running, * go on to 2a/2b. * 2a. If eventually the process terminates before the timeout then we * transition to the state "Process_exited". * 2b. If the timeout happens first, then we actually bypass updating * current_state_of_process, and we just construct a process_result directly. *) type lifecycle = | Lifecycle_running of { pid: int } (* the process is still running *) | Lifecycle_exited of Unix.process_status (* the process exited *) | Lifecycle_killed_due_to_overflow_stdin type environment = | Default | Empty | Augment of string list | Replace of string list [@@deriving show] (* Invocation info is internal book-keeping to record information about * the process's original invocation. *) type invocation_info = { name: string; args: string list; env: environment; stack: Utils.callstack; } [@@deriving show] (* type 't' represents a process, be it completed or still underway. * From the information in 't' we can figure out if it has completed, * and if so then we can synthesize the process_result. *) type t = { info: invocation_info; stdin_fd: Unix.file_descr option ref; stdout_fd: Unix.file_descr option ref; stderr_fd: Unix.file_descr option ref; acc: string Stack.t; acc_err: string Stack.t; lifecycle: lifecycle ref; } (* type 'process_results' represents the end-state of waiting for a process. It's obtained by invoking read_and_wait_pid, which runs until either the lifecycle isn't Process_running, or until the timeout parameter expires. *) type process_result = (success, failure) result and success = { stdout: string; stderr: string; } and failure = (* process terminated with an error code *) | Abnormal_exit of { status: Unix.process_status; stdout: string; stderr: string; } (* process didn't terminate within specified timeout *) | Timed_out of { stdout: string; stderr: string; } (* we initially tried to send a bigger stdin than the current implementation allows for *) | Overflow_stdin let dummy = { info = { name = "dummy"; env = Default; args = []; stack = Utils.Callstack "" }; stdin_fd = ref None; stdout_fd = ref None; stderr_fd = ref None; acc = Stack.create (); acc_err = Stack.create (); lifecycle = ref @@ Lifecycle_exited (Unix.WEXITED 0); }
OCaml
hhvm/hphp/hack/src/utils/procfs/procFS.ml
(* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. *) open Core module Sys = Stdlib.Sys open Result.Monad_infix let spf = Printf.sprintf let read_proc_file filename pid = let file = spf "/proc/%d/%s" pid filename in try Ok (Sys_utils.cat file) with | exn -> let exn = Exception.wrap exn in Error (Exception.to_string exn) type status = { (* The total number of bytes currently in memory used for anonymous memory *) rss_anon: int; (* The total number of bytes currently in memory used for file mappings *) rss_file: int; (* The total number of bytes currently in memory used for shared memory *) rss_shmem: int; (* The total number of bytes currently in memory. It should be the sum of * rss_anon + rss_file + rss_shmem *) rss_total: int; (* The high water mark for the number of bytes in memory at one time *) rss_hwm: int; } (* The stats we're reading always end in kB. If we start reading more stats then we'll need to beef * up this logic *) let humanReadableToBytes str = try Scanf.sscanf str "%d kB" (fun kb -> 1000 * kb) with | _ -> 0 let parse_status raw_status_contents = let stats = String.split raw_status_contents ~on:'\n' |> List.fold_left ~init:SMap.empty ~f:(fun stats line -> match String.split line ~on:':' with | [raw_key; raw_stat] -> let key = String.strip raw_key in let stat = String.strip raw_stat in SMap.add key stat stats | _ -> stats) in { rss_anon = SMap.find_opt "RssAnon" stats |> Option.value_map ~default:0 ~f:humanReadableToBytes; rss_file = SMap.find_opt "RssFile" stats |> Option.value_map ~default:0 ~f:humanReadableToBytes; rss_shmem = SMap.find_opt "RssShmem" stats |> Option.value_map ~default:0 ~f:humanReadableToBytes; rss_total = SMap.find_opt "VmRSS" stats |> Option.value_map ~default:0 ~f:humanReadableToBytes; rss_hwm = SMap.find_opt "VmHWM" stats |> Option.value_map ~default:0 ~f:humanReadableToBytes; } let parse_cgroup raw_cgroup_contents = match String.split raw_cgroup_contents ~on:'\n' with | [] -> Error "Expected at least one cgroup in /proc/<PID>/cgroup file" | first_line :: _ -> begin match String.split first_line ~on:':' with | [_id; _controllers; cgroup] -> Ok cgroup | _ -> Error "First line of /proc/<PID>/cgroup file was not correctly formatted" end let asset_procfs_supported = let memoized_result = ref None in fun () -> match !memoized_result with | Some supported -> supported | None -> let supported = if Sys.unix && Sys.file_exists "/proc" then Ok () else Error "Proc filesystem not supported" in memoized_result := Some supported; supported let status_for_pid pid = asset_procfs_supported () >>= fun () -> read_proc_file "status" pid >>| parse_status let telemetry_for_pid pid = match status_for_pid pid with | Error e -> Telemetry.create () |> Telemetry.error ~e | Ok { rss_anon; rss_file; rss_shmem; rss_total; rss_hwm } -> Telemetry.create () |> Telemetry.int_ ~key:"rss_anon" ~value:rss_anon |> Telemetry.int_ ~key:"rss_file" ~value:rss_file |> Telemetry.int_ ~key:"rss_shmem" ~value:rss_shmem |> Telemetry.int_ ~key:"rss_total" ~value:rss_total |> Telemetry.int_ ~key:"rss_hwm" ~value:rss_hwm (* In cgroup v1 a pid can be in multiple cgroups. In cgroup v2 it will only be in a single cgroup. *) let first_cgroup_for_pid pid = asset_procfs_supported () >>= fun () -> read_proc_file "cgroup" pid >>= parse_cgroup
OCaml Interface
hhvm/hphp/hack/src/utils/procfs/procFS.mli
(* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. *) type status = { rss_anon: int; rss_file: int; rss_shmem: int; rss_total: int; rss_hwm: int; } val status_for_pid : int -> (status, string) result val telemetry_for_pid : int -> Telemetry.t val first_cgroup_for_pid : int -> (string, string) result
Rust
hhvm/hphp/hack/src/utils/rust/delta_log.rs
use std::time::Duration; use std::time::Instant; use once_cell::sync::Lazy; static LOG_START: Lazy<Instant> = Lazy::new(Instant::now); static LOG_LAST: Lazy<std::sync::RwLock<Instant>> = Lazy::new(|| std::sync::RwLock::new(Instant::now())); fn log_durations() -> (Duration, Duration) { let mut last = LOG_LAST.write().unwrap(); let now = Instant::now(); let d1 = now.duration_since(*last); *last = now; drop(last); (d1, now.duration_since(*LOG_START)) } /// Initialize a logger that shows the time since start and since the /// previous log message, rather than absolute time. pub fn init_delta_logger() { use std::io::Write; let mut builder = env_logger::Builder::from_default_env(); // Be defensive against double-init by just throwing away the result. let _ = builder .format(|buf, record| { let (since_last, since_start) = log_durations(); writeln!( buf, "[{:>6.2} {:>6.1} {}] {}", since_last.as_secs_f64(), since_start.as_secs_f64(), record.level(), record.args() ) }) .try_init(); // Trigger lazy initialization. let _ = LOG_START.elapsed(); }
Rust
hhvm/hphp/hack/src/utils/rust/file_rwlock.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. #![feature(assert_matches)] use std::os::unix::prelude::FileExt; use std::path::Path; use std::path::PathBuf; /// This class provides a RwLock backed by file and flock(LOCK_EX/LOCK_SH), which /// is therefore usable as a synchronization primitive by multiple processes. /// * The file on disk may be in state "absent / present / poisoned / stopped". /// * Poisoning means that unless a process released the lock cleanly, /// then no other process can acquire it. Poisoning is important for multi-process /// since having one process die via Ctrl+C is a routine event and has to be handled. /// (As for whether poisoning is important for multi-thread, it only arises /// when a thread panics, and some people think that behavior after a panic is unimportant). /// * Unix flock is "advisory". This means that it is respected by anyone who /// uses flock, but ignored by anyone who doesn't. Thus, someone can read+write /// the the lock's file even while we consider it locked, just by bypassing RwLock. /// You might consider that a feature or a bug! but there's nothing better available. /// As a safeguard, though, we do mark the file with a "poisoned" byte while anyone /// holds a lock, so that people won't inadvertently deserialize it. /// * With an in-memory lock you know that RwLock::new() will necessarily succeed, /// but for a shared-file lock then FileRwLock::create() might fail in the case /// that the lock already exists on disk. This is indicated by LockError::Present /// Also, as a convenience, the create() method will create any directories necessary. /// * There's no 'delete' method, no way provided by this class to remove the file /// from disk. This reflects underlying OS race limitations. You will have to do /// std::fs::remove_file() yourself at a time when you know there won't be races, /// e.g. you know there won't be a concurrent "create". /// * As implementation, the "T" is stored on disk as pretty json. That's to help /// humans read it for debugging. It is deserialized when you lock, and serialized /// again when you release a write-lock. Also as implementation, the json is /// stored on disk with a leading space, and that leading space is replaced /// with "@" to indicate poison. This is how FileRwLock knows to fail a lock attempt /// it's poisoned, and it also prevents any non-FileRwLock consumers from inadvertently /// json-parsing the file when it's locked or poisoned. /// /// IMPLEMENTATION /// I'd like to say I copied this all from stackoverflow, but I couldn't find any file /// locks on the internet that seemed rigorous enough. A bunch of internet places suggest /// a two-file approach (file, file.lock). I struggled to see how that would provide /// serializability in the presence of crashes, and also it felt unnecessarily clunky. /// /// I had wanted to have a DELETE method instead of STOP, one that would unlink the file /// off disk. But there are two problems with that. /// First, consider "absent -> CREATE || DELETE;CREATE". The first CREATE might get in first /// and grab a file descriptor, then DELETE;CREATE runs and unlinks the file and creates a /// new file in its place and locks it, then the first CREATE continues with its own /// private (now deleted) file descriptor. We can't have two concurrent CREATEs both holding /// exclusive locks at the same time and both thinking they're the only ones. Therefore, /// the first CREATE in this situation must fail, maybe by having the DELETE mark the /// file as poisoned or deleted. But now we've invented a failure mode that's not serializable, /// that arises solely when CREATE happens concurrently with DELETE. /// Second, consider "present -> A.DELETE || B.DELETE;CREATE". Both A and B might open file- /// descriptors to the file. B gets the lock first, poisons the file, and unlinks it. /// B then creates a new file at the same directory location and returns an exclusive lock on it. /// A then acquires a lock on the old (now unlinked) file, poisons it a second time, /// and unlinks. The problem is that it poisoned the old version of the file but unlinked /// the new file. The end result is that CREATE believes it has an exclusive lock, /// but there exists no file on disk. This is an unavoidable race with the way unlink works: /// https://bugzilla.kernel.org/show_bug.cgi?id=93441 /// /// I picked "empty file" to represent STOPPED. That just made some code neater in "stop". /// /// The way I reasoned about this was a gigantic state diagram for two concurrent processes. /// Every state has (FILE, STATE1, STATE2): /// FILE ::= absent | empty | poisoned | value /// STATE ::= c0 -create,flock,writep-> c1 -mv[RDWR|EX]-> c2[RDWR|EX] -writev-> c3 -close-> /// w0 -open-> w1[RDWR] -flock-> w2[RDWR|EX] -read-> c3[RDWR|EX] -writep -> w4[RDWR|EX] -writev-> w5 -close-> /// r0 -open-> r1[RD] -flock-> r2[RD|SH] -read-> r3[RD|SH] -close-> /// s0 -open-> s1[RDWR] -flock-> s2[RDWR|EX] -writes-> s3[RDWR|EX] -close-> /// The correctness statement for mutual exclusion is that there should be no reachable state with /// (_, c2/w4/r3, c2/w4/r3) other than (_, r3, r3). The correctness statement for serializability is that for /// every path starting at (_,,c0/w0/r0/s0,c0/w0/r0/d0) which leads to (_,c3/w5/r3/s3,c3/w5/r3/s3), there /// exists another path which does all of process 1 followed by all of process 2 or vice versa. /// Well, I managed to draw by the full state diagram for each of the four process kinds in isolation, and for all /// pairs of processes kinds, but the diagrams grew large enough that I lost confidence that I was exhaustive. /// Also, I didn't get on to more complex combinations like "WRITE || DELETE;CREATE". #[derive(Debug, Clone)] pub struct FileRwLock<T> { path: PathBuf, value_type: std::marker::PhantomData<T>, } #[repr(u8)] enum State { /// This byte at the start of a file signifies that the lock is poisoned. Poisoned = b'@', /// This byte at the start of the file signifies that the lock is present /// and has a value. We use a symbol that won't interfere with json parsing, /// for human debugging convenience. Present = b' ', } impl<T: serde::Serialize + for<'de> serde::Deserialize<'de>> FileRwLock<T> { /// This method doesn't do anything on disk to the lockfile. /// It merely initializes the FileRwLock<T> structure in memory, so you /// can do subsequent create/read/write/stop operations that modify disk. pub fn new(path: PathBuf) -> Self { Self { path, value_type: std::marker::PhantomData, } } /// Creates a new lockfile on disk, resulting in the file being in 'state' /// (either present or poisoned). Returns Some(file, value). /// Will fail if the file was previously present/stopped/poisoned; all three /// cases are represented by None. fn create_impl( &mut self, value: T, state: State, ) -> Result<Option<(std::fs::File, T)>, UnexpectedError> { if let Some(parent) = self.path.parent() { std::fs::create_dir_all(parent).path_context(&self.path, "create_dir_all")?; } // We will atomically create+initialize+lock the file. This will either succeed with us // having ownership, or will fail because the file already exists. How to achieve this? // (1) create+initialize+lock the file in some other location, then "mv" it into place // For this to work we'd need renameat2(RENAME_NOREPLACE) but this isn't portable // and indeed doesn't exist on some of hack's targets. // (2) create+initialize+lock the file in some temporary location on the same device, then // do link(...) to atomically link it from the desired path, then remove the temporary. // This works portably but it's a bit of a pain to have to remove the temporary, and might // leave dangling temporaries if we crash. // (3) create+initialize+lock the file using O_TMPFILE, then do linkat(fd,"",AT_FDCWD,path,AT_EMPTY_PATH) // to atomically place, https://stackoverflow.com/questions/17127522/create-a-hard-link-from-a-file-handle-on-unix/18644492#18644492 // This would be nice but AT_EMPTY_PATH is gated by CAP_DAC_READ_SEARCH, which isn't in force // in some places we run. Also, O_TMPFILE is only on linux; not on bsd (mac) nor windows. // (4) as above using O_TMPFILE, but linkat(AT_FDCWD,"/proc/self/fd/{fd}",AT_FDCWD,path,AT_SYMLINK_FOLLOW) // This doesn't need the capability -- for why, see https://www.spinics.net/lists/linux-fsdevel/msg101911.html. // It still isn't portable because of O_TMPFILE. But this is the option I'll pick. // Because we're using link, we need to create the temp file on the same device. // open(O_TMPFILE) takes a directory argument for this reason, to know which device // to create on, and it's normal to use the desired containing directory. let root = PathBuf::from("/"); let parent = self.path.parent().unwrap_or(&root); // Create and lock the file. // On linux, the tempfile crate uses O_TMPFILE for a guaranteed non-leaking FD and // we can later do an atomic linkat("/proc/self/fd/{fd}"). // On other platforms, we fall back to NamedTempFile::persist_no_clobber which // might leak the temp file if we crash. #[cfg(target_os = "linux")] let mut file = tempfile::tempfile_in(parent).path_context(&self.path, "open(O_TMPFILE)")?; #[cfg(not(target_os = "linux"))] let mut nfile = tempfile::NamedTempFile::new_in(parent).path_context(&self.path, "open(O_TMPFILE)")?; #[cfg(not(target_os = "linux"))] let file = nfile.as_file_mut(); use fs2::FileExt; file.lock_exclusive() .path_context(&self.path, "flock(LOCK_EX)")?; // Initialize the file. The file will initially be in "state" provided as a parameter, // either present or poisoned, with different behaviors should we crash. use std::io::Write; let json = serde_json::to_string_pretty(&value) .path_context(&self.path, "json::to_string_pretty")?; let content = format!("{}{json}", state as u8 as char); file.write_all(content.as_bytes()) .path_context(&self.path, "write_initial")?; // Link the locked+initialized file into the desired location. // Our sequence point is the moment we link. #[cfg(not(target_os = "linux"))] { match nfile.persist_noclobber(&self.path) { Ok(file) => Ok(Some((file, value))), Err(e) if e.error.kind() == std::io::ErrorKind::AlreadyExists => Ok(None), Err(e) => Err(e).path_context(&self.path, "linkat"), } } #[cfg(target_os = "linux")] { use std::os::unix::io::AsRawFd; let fd = file.as_raw_fd(); use std::os::unix::ffi::OsStrExt; let c_path = std::ffi::CString::new(self.path.as_os_str().as_bytes()) .path_context(&self.path, "cpath")?; let c_proc_fd = std::ffi::CString::new(format!("/proc/self/fd/{fd}")).unwrap(); let ret = unsafe { libc::linkat( libc::AT_FDCWD, c_proc_fd.as_ptr(), libc::AT_FDCWD, c_path.as_ptr(), libc::AT_SYMLINK_FOLLOW, ) }; if ret >= 0 { Ok(Some((file, value))) } else { let e = std::io::Error::last_os_error(); if e.kind() == std::io::ErrorKind::AlreadyExists { // Either it already existed prior to this 'create' even started, // or a concurrent 'create' raced with us and won. Ok(None) } else { Err(e).path_context(&self.path, "linkat") } } } } /// Creates a new lockfile on disk. Upon success leaves the lockfile in state /// 'present' and returns an ExclusiveGuard; if we crash then other parties /// will be able to acquire the lock. If the lockfile already existed /// (i.e. it was present/stopped/poisoned) then return None. pub fn create( &mut self, value: T, ) -> Result<Option<FileRwLockExclusiveGuard<'_, T>>, UnexpectedError> { Ok(self .create_impl(value, State::Present)? .map(|(file, value)| FileRwLockExclusiveGuard { lock: MutDropRef(self), file, value, })) } /// Creates a new lockfile on disk. Upon success leaves the lockfile in state /// 'poisoned' and returns a WriteGuard; if we crash then other parties /// will not be able to acquire the lock. If the lockfile already existed /// (i.e. it was present/stopped/poisoned) then return None. pub fn create_poisoned( &mut self, value: T, ) -> Result<Option<FileRwLockWriteGuard<'_, T>>, UnexpectedError> { Ok(self .create_impl(value, State::Poisoned)? .map(|(file, value)| FileRwLockWriteGuard { lock: Some(MutDropRef(self)), file: Some(file), value: Some(value), })) } // Internal helper function used by read/ write... // Opens the file in RDWR or RD mode depending on the flag, // locks it with LOCK_EX or LOCK_SH depending on the flag, // reads the content, // validates that it's present (i.e. not absent/stopped/poisoned), // deserializes it from json into T, // and returns the file and T. fn open_lock_read_validate(&self, write: bool) -> Result<(std::fs::File, T), LockError> { let context = if write { "open(RDWR)" } else { "open(RD)" }; let mut file = match std::fs::OpenOptions::new() .read(true) .write(write) .open(&self.path) { Ok(file) => file, Err(e) if e.kind() == std::io::ErrorKind::NotFound => { return Err(LockError::Absent(self.path.clone())); } Err(e) => { return Err(e) .path_context(&self.path, context) .map_err(LockError::Unexpected); } }; // Our sequence point will be the moment we acquire the lock. use fs2::FileExt; if write { file.lock_exclusive() .path_context(&self.path, "flock(LOCK_EX)")?; } else { file.lock_shared() .path_context(&self.path, "flock(LOCKS_SH)")?; } // Now we have the lock, we can examine what we have in hand... use std::io::Read; let mut content = String::new(); file.read_to_string(&mut content) .path_context(&self.path, "read")?; match content.bytes().next() { Some(b) if b == State::Present as u8 => { let value: T = serde_json::from_str(&content).path_context(&self.path, "json::from_str")?; Ok((file, value)) } Some(b) if b == State::Poisoned as u8 => Err(LockError::Poisoned(self.path.clone())), Some(_) => Err(anyhow::anyhow!("corrupt: {}", content)) .path_context(&self.path, "validate") .map_err(LockError::Unexpected), None => Err(LockError::Stopped(self.path.clone())), } } /// Locks this lockfile with shared read access, blocking the current thread until it can /// can be acquired. The lockfile must be present -- if absent/poisoned/stopped, error. pub fn read(&self) -> Result<FileRwLockSharedGuard<'_, T>, LockError> { // Our sequence point is the moment the lock is acquired let (file, value) = self.open_lock_read_validate(true)?; // Over to the SharedGuard, who will be responsible for // closing the file-descriptor and hence releasing the lock, upon drop. Ok(FileRwLockSharedGuard { lock: DropRef(self), file, value, }) } /// Locks this lockfile with exclusive write access, blocking the current thread until it can /// can be acquired. The lockfile must be present -- if absent/poisoned/stopped, error. pub fn exclusive(&mut self) -> Result<FileRwLockExclusiveGuard<'_, T>, LockError> { // Our sequence point is the moment the lock is acquired let (file, value) = self.open_lock_read_validate(true)?; // Over to the ExclusiveGuard, who will be responsible for closing // the file-descriptor and hence releasing the lock, upon drop. Ok(FileRwLockExclusiveGuard { lock: MutDropRef(self), file, value, }) } pub fn write(&mut self) -> Result<FileRwLockWriteGuard<'_, T>, LockError> { Ok(self.exclusive()?.write()?) } /// Puts the lockfile into a permanently stopped state. The only way out of this /// is to delete the path from disk and start over. This function works fine /// regardless of the state - present, absent, stopped, poisoned. pub fn stop(&mut self) -> Result<(), UnexpectedError> { // If the file was initially absent, the stop method still has to mark the lockfile // as permanently stopped. We represent permanently-stopped with an empty file. // The following "open" will create an empty file if none existed. match std::fs::OpenOptions::new() .write(true) .create(true) .open(&self.path) { Err(e) => Err(e).path_context(&self.path, "open(WR)"), Ok(file) => { // Our sequence point will be the moment we acquire the lock. use fs2::FileExt; file.lock_exclusive() .path_context(&self.path, "flock(LOCK_EX)")?; // A stopped lockfile is represented by an empty file -- either // empty from the create(true).open() statement above at the start // to "stop", or we empty it right here and now. file.set_len(0).path_context(&self.path, "trunc")?; Ok(()) } } } } /// This structure is a Deref around 'value', but with the additional property /// that upon drop then it closes the file (hence releasing all flocks) #[derive(Debug)] pub struct FileRwLockSharedGuard<'a, T: serde::Serialize> { /// This lock parameter isn't actually used, but it expresses that the lock /// has a 'borrow' for the duration of the shared lock. This is just a convenience /// so that some detection of "tried to acquire exclusive lock while shared /// lock is held" can be done at compile-time, rather than solely at run-time. #[allow(unused)] lock: DropRef<'a, FileRwLock<T>>, /// Note: members are dropped in order of declaration. Thus, value will be /// dropped before file has been dropped, hence before the shared lock has /// been released. This provides some determinism guarantees should there /// be a destructor for value. value: T, /// This file parameter isn't actually used directly; it's only used because /// when the struct is dropped then the file will be dropped too, and with it /// the OS will drop all locks. If we didn't keep this parameter then the file /// and its locks would be dropped too soon. #[allow(unused)] file: std::fs::File, } impl<T: serde::Serialize> std::ops::Deref for FileRwLockSharedGuard<'_, T> { type Target = T; fn deref(&self) -> &Self::Target { &self.value } } /// This structure is a Deref around 'value' with the additional property /// that upon drop then it closes the file (hence releasing all flocks). /// Additionally, instead of dropping, you can call write() to obtain FileRwLockWriteGuard. /// Invariant: file+value are Some until after write/drop, when they become None. #[derive(Debug)] pub struct FileRwLockExclusiveGuard<'a, T: serde::Serialize> { lock: MutDropRef<'a, FileRwLock<T>>, /// Note: members are dropped in order of declaration. Thus, value will be /// dropped before file has been dropped, hence before the exclusive lock /// has been released. This will provide some determinism for any value destructor. value: T, file: std::fs::File, } impl<T: serde::Serialize> std::ops::Deref for FileRwLockExclusiveGuard<'_, T> { type Target = T; fn deref(&self) -> &Self::Target { &self.value } } impl<'a, T: serde::Serialize> FileRwLockExclusiveGuard<'a, T> { pub fn write(self) -> Result<FileRwLockWriteGuard<'a, T>, UnexpectedError> { // First we'll poison the file. Prior to this point, if we crashed, then // everyone else would find the lockfile present and unclaimed, which is fine // since we've not yet returned the WriteGuard to our caller. After this point // though, if we crash, everyone else will find it poisoned. self.file .write_all_at(&[State::Poisoned as u8], 0) .path_context(&self.lock.0.path, "write(poison)")?; Ok(FileRwLockWriteGuard { lock: Some(self.lock), file: Some(self.file), value: Some(self.value), }) } } /// This structure is a Deref+DerefMut around 'value' with the additional property /// that upon drop/close then writes the value to disk and closes the file /// (hence releasing all flocks). You can also call commit() which writes /// the value to disk but retains an ExclusiveGuard. /// Invariant: fields are Some until after close/commit/drop, when they become None. #[derive(Debug)] pub struct FileRwLockWriteGuard<'a, T: serde::Serialize> { lock: Option<MutDropRef<'a, FileRwLock<T>>>, /// Note: members are dropped in order of declaration. Thus, value will be /// dropped before file has been dropped, hence before the exclusive lock /// has been released. This will provide some determinism for any value destructor. value: Option<T>, file: Option<std::fs::File>, } impl<T: serde::Serialize> std::ops::Deref for FileRwLockWriteGuard<'_, T> { type Target = T; fn deref(&self) -> &Self::Target { self.value.as_ref().unwrap() } } impl<T: serde::Serialize> std::ops::DerefMut for FileRwLockWriteGuard<'_, T> { fn deref_mut(&mut self) -> &mut Self::Target { self.value.as_mut().unwrap() } } impl<'a, T: serde::Serialize> FileRwLockWriteGuard<'a, T> { pub fn commit(mut self) -> Result<FileRwLockExclusiveGuard<'a, T>, UnexpectedError> { let lock = self.lock.take().unwrap(); let file = self.file.take().unwrap(); let value = self.value.take().unwrap(); self.commit_impl(&lock.0.path, &file, &value)?; Ok(FileRwLockExclusiveGuard { lock, file, value }) } pub fn close(mut self) -> Result<(), UnexpectedError> { self.close_impl() } fn commit_impl( &self, path: &Path, file: &std::fs::File, value: &T, ) -> Result<(), UnexpectedError> { let json = serde_json::to_string_pretty(value).path_context(path, "json::to_string_pretty")?; // We must be sure upon crash we won't leave the file either // empty or with partially written content but no poison marker. So: we'll write // the value starting at byte offset 1, trim any off the end in case the lockfile // has shrunk, and only after that is finished will we remove the poison pill at // byte offset 0. file.write_all_at(json.as_bytes(), 1) .path_context(path, "write(value@1)")?; // do the truncate here, after writing json, so as to minimize how much we // jiggle the file's size. file.set_len(1 + json.as_bytes().len() as u64) .path_context(path, "trunc")?; file.write_all_at(&[State::Present as u8], 0) .path_context(path, "write(value@0)")?; Ok(()) } fn close_impl(&mut self) -> Result<(), UnexpectedError> { let (lock, file, value) = match (self.lock.take(), self.file.take(), self.value.take()) { (None, None, None) => return Ok(()), (Some(lock), Some(file), Some(value)) => (lock, file, value), _ => panic!("lock, file, value should be Some/None at the same time"), }; // This path arises when a user calls write_lock.close(), // or when a user drops us without first having called close(). // First we commit the changes and unpoison the file contents self.commit_impl(&lock.0.path, &file, &value)?; // Now the ordinary Rust machinery will drop file, hence // closing the FD and releasing flocks. Ok(()) } } impl<T: serde::Serialize> Drop for FileRwLockWriteGuard<'_, T> { fn drop(&mut self) { let _ = self.close_impl(); } } /// Errors that might arise from FileRwLock::read/write #[derive(thiserror::Error, Debug)] pub enum LockError { #[error("{} absent", .0.display())] Absent(PathBuf), #[error("{} poisoned", .0.display())] Poisoned(PathBuf), #[error("{} stopped", .0.display())] Stopped(PathBuf), #[error(transparent)] Unexpected(#[from] UnexpectedError), } /// Errors that might arise from FileRwLock::create/stop/close #[derive(thiserror::Error, Debug)] pub enum UnexpectedError { #[error("{} - {} - {}", .0.display(), .1, .2)] Unexpected(PathBuf, &'static str, #[source] anyhow::Error), } trait UnexpectedContext<T> { fn path_context(self, path: &Path, context: &'static str) -> Result<T, UnexpectedError>; } impl<T, E: Into<anyhow::Error>> UnexpectedContext<T> for Result<T, E> { fn path_context(self, path: &Path, context: &'static str) -> Result<T, UnexpectedError> { self.map_err(|e| UnexpectedError::Unexpected(path.to_owned(), context, e.into())) } } /// This struct solely provides an empty Drop method. Because of this, the lifetime 'a /// of DropRef<'a, S> lasts until it it has been dropped -- by contrast, the lifetime /// of &'a S (without a Drop) ends earlier than the drop. /// https://doc.rust-lang.org/nomicon/lifetimes.html#the-area-covered-by-a-lifetime /// "[A borrow] is alive from the place it is created to its last use... if the value /// has a destructor, the destructor is run at the end of the scope. And running /// the destructor is considered a use." /// Here's a practical motivation. This code would deadlock, since it tries to acquire /// an exclusive flock on the file even while a shared lock is still being held. Without /// DropRef, the borrow checker would say that guard1 is only used at the read() call, /// and hence its lifetime doesn't conflict with the exclusive() call. With DropRef, /// the borrow checker will say that guard1 is (implicitly) used at the end of the block, /// and hence its lifetime does conflcit, and hence the code is disallowed. /// { /// let lock = FileRwLock::new(...) /// let guard1 = lock.read()?; /// let guard2 = lock.exclusive()?; /// } #[derive(Debug)] struct DropRef<'a, S>(&'a S); impl<'a, S> Drop for DropRef<'a, S> { fn drop(&mut self) {} } /// This struct solely provides an empty Drop method, to influence lifetime analysis /// (which is sensitive to the presence or absence of Drop implementations). Please /// read the docs for DropRef which explain in greater length. #[derive(Debug)] struct MutDropRef<'a, S>(&'a mut S); impl<'a, S> Drop for MutDropRef<'a, S> { fn drop(&mut self) {} } #[cfg(test)] mod tests { use std::assert_matches::assert_matches; use super::*; #[test] fn test_create_then_lock() -> anyhow::Result<()> { let tmpdir = tempfile::tempdir()?; // with an integer let path = tmpdir.path().join("lock1"); let mut lock = FileRwLock::new(path.clone()); let mut guard = lock.create(15)?.expect("no create conflicts").write()?; assert_eq!(*guard, 15); assert_eq!(std::fs::read_to_string(&path)?, "@15"); *guard = 12; guard.close()?; assert_eq!(std::fs::read_to_string(&path)?, " 12"); let guard = lock.write()?; assert_eq!(std::fs::read_to_string(&path)?, "@12"); assert_eq!(*guard, 12); guard.close()?; assert_eq!(std::fs::read_to_string(&path)?, " 12"); // with unit, to check that our "poison" byte is ok let path = tmpdir.path().join("lock2"); let mut lock = FileRwLock::new(path.clone()); let guard = lock.create(())?.expect("no create conflicts").write()?; assert_eq!(std::fs::read_to_string(&path)?, "@null"); guard.close()?; // with a string, to check that it looks right with our poison byte let path = tmpdir.path().join("lock3"); let mut lock = FileRwLock::new(path.clone()); let guard = lock .create("!".to_owned())? .expect("no create conflicts") .write()?; assert_eq!(*guard, "!"); assert_eq!(std::fs::read_to_string(&path)?, "@\"!\""); guard.close()?; Ok(()) } #[test] fn test_cannot_create() -> anyhow::Result<()> { let tmpdir = tempfile::tempdir()?; // can't create if created let path = tmpdir.path().join("lock1"); let mut lock = FileRwLock::new(path.clone()); let guard = lock.create(16)?.expect("no create conflicts").write()?; guard.close()?; assert!(lock.create(16)?.is_none()); // can't create if stopped lock.stop()?; let mut lock = FileRwLock::new(path); assert!(lock.create(17)?.is_none()); // can't create if poisoned let path = tmpdir.path().join("lock2"); std::fs::write(&path, "@12")?; let mut lock = FileRwLock::new(path); assert!(lock.create(18)?.is_none()); Ok(()) } #[test] fn test_cannot_lock() -> anyhow::Result<()> { let tmpdir = tempfile::tempdir()?; // can't lock if doesn't exist let path = tmpdir.path().join("lock1"); let mut lock = FileRwLock::<i32>::new(path); assert_matches!(lock.write(), Err(LockError::Absent(_))); // can't lock if it's stopped let path = tmpdir.path().join("lock2"); let mut lock = FileRwLock::<i32>::new(path.clone()); lock.stop()?; let mut lock = FileRwLock::<i32>::new(path); assert_matches!(lock.write(), Err(LockError::Stopped(_))); // can't lock if it's poisoned let path = tmpdir.path().join("lock3"); std::fs::write(&path, "@123\n")?; let mut lock = FileRwLock::<i32>::new(path); assert_matches!(lock.write(), Err(LockError::Poisoned(_))); Ok(()) } #[test] fn test_lock_will_wait() -> anyhow::Result<()> { let tmpdir = tempfile::tempdir()?; let path = tmpdir.path().join("lock"); let mut lock = FileRwLock::new(path.clone()); let mut guard = lock.create(15)?.expect("no create conflict").write()?; let path2 = path; let thread = std::thread::spawn(move || -> anyhow::Result<_> { let mut lock2 = FileRwLock::<i32>::new(path2); let guard2 = lock2.write()?; Ok(*guard2) }); std::thread::sleep(std::time::Duration::from_millis(100)); *guard = 16; guard.close()?; assert_eq!(thread.join().unwrap()?, 16); Ok(()) } }
Rust
hhvm/hphp/hack/src/utils/rust/random_id.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. /// Generates a 10-digit random alphanumeric string using rand::thread_rng pub fn short_string() -> String { generate_alphanumeric(10, rand::thread_rng()) } fn generate_alphanumeric(len: usize, mut rng: rand::rngs::ThreadRng) -> String { use rand::Rng; const ALPHANUMERIC: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; (0..len) .map(|_| ALPHANUMERIC[rng.gen_range(0..ALPHANUMERIC.len())] as char) .collect() }
Rust
hhvm/hphp/hack/src/utils/rust/relative_path.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::fmt; use std::fmt::Display; use std::path::Path; use std::path::PathBuf; use eq_modulo_pos::EqModuloPos; use no_pos_hash::NoPosHash; use ocamlrep::FromOcamlRep; use ocamlrep::FromOcamlRepIn; use ocamlrep::ToOcamlRep; use serde::Deserialize; use serde::Serialize; #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[derive(Deserialize, Serialize)] #[derive(EqModuloPos, FromOcamlRep, FromOcamlRepIn, ToOcamlRep, NoPosHash)] #[repr(u8)] pub enum Prefix { Root, Hhi, Dummy, Tmp, } impl arena_trait::TrivialDrop for Prefix {} impl Prefix { pub fn is_hhi(self) -> bool { matches!(self, Prefix::Hhi) } } impl TryFrom<usize> for Prefix { type Error = String; fn try_from(prefix_raw: usize) -> Result<Self, String> { match prefix_raw { 0 => Ok(Prefix::Root), 1 => Ok(Prefix::Hhi), 2 => Ok(Prefix::Dummy), 3 => Ok(Prefix::Tmp), _ => Err(format!("prefix {} is not defined", prefix_raw)), } } } impl Display for Prefix { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // NB: This encoding is used in the impl of Serialize and Deserialize // for RelativePath below. match self { Self::Root => write!(f, "root"), Self::Hhi => write!(f, "hhi"), Self::Tmp => write!(f, "tmp"), Self::Dummy => write!(f, ""), } } } #[derive(Clone, Eq, Hash, PartialEq)] #[derive(EqModuloPos, NoPosHash)] pub struct RelativePath { prefix: Prefix, /// Representation invariant: the empty path is always encoded as `None`. /// This allows us to construct `RelativePath` in `const` contexts /// (because `Path::new` is not a `const fn`). path: Option<PathBuf>, } impl RelativePath { pub fn make(prefix: Prefix, path: PathBuf) -> Self { Self { prefix, path: if path.as_os_str().is_empty() { None } else { Some(path) }, } } pub const EMPTY: Self = Self { prefix: Prefix::Dummy, path: None, }; pub fn is_empty(&self) -> bool { self == &Self::EMPTY } pub fn has_extension(&self, s: impl AsRef<Path>) -> bool { self.path().extension() == Some(s.as_ref().as_os_str()) } pub fn path(&self) -> &Path { self.path.as_deref().unwrap_or(Path::new("")) } pub fn path_str(&self) -> &str { self.path().to_str().unwrap() } pub fn prefix(&self) -> Prefix { self.prefix } pub fn is_hhi(&self) -> bool { self.prefix.is_hhi() } pub fn to_absolute(&self, ctx: &RelativePathCtx) -> PathBuf { ctx.prefix_path(self.prefix).join(self.path()) } } impl Display for RelativePath { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}|{}", self.prefix, self.path().display()) } } impl std::fmt::Debug for RelativePath { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{self}") } } // This custom impl of Ord treats the path suffix as raw bytes instead of a // Path, so that they are ordered the same as in OCaml (i.e., `foo.bar` comes // before `foo/bar` lexicographically, but Rust Paths consider `foo/bar` to come // first because the `foo` component is shorter than `foo.bar`) impl Ord for RelativePath { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.prefix .cmp(&other.prefix) .then(self.path().as_os_str().cmp(other.path().as_os_str())) } } impl PartialOrd for RelativePath { fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { Some(self.cmp(other)) } } // This custom implementation of Serialize/Deserialize encodes the RelativePath // as a string. This allows using it as a map key in serde_json. impl Serialize for RelativePath { fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { let path_str = (self.path().to_str()) .ok_or_else(|| serde::ser::Error::custom("path contains invalid UTF-8 characters"))?; serializer.serialize_str(&format!("{}|{}", self.prefix, path_str)) } } // See comment on impl of Serialize above. impl<'de> Deserialize<'de> for RelativePath { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = RelativePath; fn expecting(&self, formatter: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { write!(formatter, "a string for RelativePath") } fn visit_str<E>(self, value: &str) -> Result<RelativePath, E> where E: serde::de::Error, { let mut split = value.splitn(2, '|'); let prefix_str = split.next(); let path_str = split.next(); assert!(split.next().is_none(), "splitn(2) should yield <=2 results"); let prefix = match prefix_str { Some("root") => Prefix::Root, Some("hhi") => Prefix::Hhi, Some("tmp") => Prefix::Tmp, Some("") => Prefix::Dummy, _ => { return Err(E::invalid_value( serde::de::Unexpected::Other(&format!( "unknown relative_path::Prefix: {:?}", value )), &self, )); } }; let path = match path_str { Some(path_str) if path_str.is_empty() => None, Some(path_str) => Some(PathBuf::from(path_str)), None => { return Err(E::invalid_value( serde::de::Unexpected::Other( "missing pipe or got empty string \ when deserializing RelativePath", ), &self, )); } }; Ok(RelativePath { prefix, path }) } } deserializer.deserialize_str(Visitor) } } impl ToOcamlRep for RelativePath { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { let mut block = alloc.block_with_size(2); alloc.set_field(&mut block, 0, alloc.add(&self.prefix)); alloc.set_field(&mut block, 1, alloc.add(self.path())); block.build() } } impl FromOcamlRep for RelativePath { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { let block = ocamlrep::from::expect_tuple(value, 2)?; let prefix = ocamlrep::from::field(block, 0)?; let path: PathBuf = ocamlrep::from::field(block, 1)?; Ok(Self::make(prefix, path)) } } pub type Map<T> = std::collections::BTreeMap<RelativePath, T>; pub mod map { pub use super::Map; } #[derive(Debug, Default, Clone)] #[derive(serde::Serialize, serde::Deserialize)] pub struct RelativePathCtx { pub root: PathBuf, pub hhi: PathBuf, pub tmp: PathBuf, pub dummy: PathBuf, } impl RelativePathCtx { pub fn prefix_path(&self, prefix: Prefix) -> &Path { match prefix { Prefix::Root => &self.root, Prefix::Hhi => &self.hhi, Prefix::Tmp => &self.tmp, Prefix::Dummy => &self.dummy, } } } #[cfg(test)] mod tests { use pretty_assertions::assert_eq; use super::*; #[test] fn test_valid_usize_prefix() { let valid_prefix: usize = 2; assert_eq!(Ok(Prefix::Dummy), Prefix::try_from(valid_prefix)); } #[test] fn test_invalid_usize_prefix() { let invalid_prefix: usize = 22; assert_eq!( Err("prefix 22 is not defined".to_string()), Prefix::try_from(invalid_prefix) ) } }
Rust
hhvm/hphp/hack/src/utils/rust/relative_path_utils.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::path::Path; use std::path::PathBuf; /// This function is like std::path::Path::strip_prefix, /// namely it returns a result such that base.join(result) is the same file as 'path', /// and returns an error if there's no such result. /// * However, this function operates on files on disk: base.join(result) *identifies the same file* as path. /// Meanwhile, strip_prefix operates on strings: it's spec is that its base.join(result) is the same *string* as path. /// * The first way it operates on files is that it works relative to the current working directory (CWD). /// e.g. if cwd is "/home/ljw/www/flib" and root is "/home/ljw/www" /// then given "a.php" this function will return "flib/a.php" /// and given "../a.php" this function will return "a.php" /// * The second way it operates on files is that it works up to std::fs::canonicalize of base. /// e.g. if root is "/data/users/ljw/www-hg" and "/home/ljw/www" is a symlink to root, then /// then given "/home/ljw/www/flib/a.php" this function will return "flib/a.php" /// It does this by using std::fs::canonicalize. /// For efficiency, it assumes that base has already been canonicalized (so as to avoid doing a file-system lookup on it). /// * This function works for files that don't exist. /// You wouldn't be surprised that std::path::Path::strip_prefix does too, since it operates only on strings. /// But this function uses std::fs::canonicalize, which normally works only on files that exist. /// This function nevertheless succeeds, by merely requiring that base exists, and not requiring /// that the result exist too. pub fn canonicalized_strip_prefix_relative_to_cwd( path: &Path, canonicalized_base: &Path, ) -> anyhow::Result<PathBuf> { // This function solves for canonicalization of non-existent files // by walking up the hierarchy until it finds one that does exist. For example, // 1. canonicalize_candidate "/home/users/ljw/www/d/e/a.php", trailing None // 2. canonicalize_candidate "/home/users/ljw/www/d/e", trailing Some("a.php") // 3. canonicalize_candidate "/home/users/ljw/www/d", trailing Some("e/a.php") // It walks up the hierarchy of canonicalize_candidate until it finds one that exists. // For instance it might find that step 3 does exist, and resolves to "/data/users/ljw/www-hg/d" // which matches root "/data/users/ljw/www-hg". Therefore, the result from this function // will be "d/e/a.php" where "d" came from canonicalize and strip_prefix, and "e/a.php" came // from walking up the hiearchy until we found something that could canonicalize. // Therefore the final answer is "d/e/a.php", i.e. the suffix after stripping plus whatever was trailing. let mut trailing = None; let mut canonicalize_candidate = PathBuf::from(path); // If path is relative of the form "ax.php" then we can't canonicalize it as is, and it has no parent. // Let's normalize it to "./ax.php" so we can at least canonicalize the parent. if let Some(std::path::Component::Normal(_)) = path.components().next() { canonicalize_candidate = PathBuf::from(".").join(path); } loop { match std::fs::canonicalize(&canonicalize_candidate) { Ok(canonicalized) => match canonicalized.strip_prefix(canonicalized_base) { Ok(suffix) => return Ok(path_join_opt(suffix, trailing)), Err(_) => anyhow::bail!( "Path \"{}\" (realpath {}) doesn't start with base {}", path.display(), canonicalized.display(), canonicalized_base.display() ), }, Err(_) => match ( canonicalize_candidate.parent(), canonicalize_candidate.file_name(), ) { (Some(parent), Some(file)) => { trailing = Some(path_join_opt(file.as_ref(), trailing)); canonicalize_candidate = PathBuf::from(parent); continue; } _ => anyhow::bail!("Can't realpath \"{}\"", path.display(),), }, } } } /// This helper is for the common situation that you have a Vec<PathBuf> /// and want to turn it into RelativePath. Use it like v.filter_map(some_if_root(&root)). /// It is called some_if_root because it only returns Some(RelativePath) for the /// paths given to it that were indeed root-relative; it returns None for everything else. /// Note: it requires root to be canonicalized, and it treats any non-absolute /// input paths as CWD-relative. pub fn some_if_root(root: &Path) -> Box<dyn Fn(&PathBuf) -> Option<relative_path::RelativePath>> { let root = root.to_owned(); Box::new( move |path| match canonicalized_strip_prefix_relative_to_cwd(path, &root) { Err(_) => None, Ok(suffix) => Some(relative_path::RelativePath::make( relative_path::Prefix::Root, suffix, )), }, ) } /// Like path.join, except is a no-op if suffix is None. fn path_join_opt(path: &Path, suffix: Option<PathBuf>) -> PathBuf { match suffix { None => path.to_owned(), Some(suffix) => path.join(suffix), } } #[cfg(test)] mod tests { use super::*; #[test] fn test_strip_prefix() -> anyhow::Result<()> { let tmpdir = tempfile::TempDir::new()?; let tmp = PathBuf::from(tmpdir.path()); let empty = PathBuf::new(); // within $tmp, we will create the following directory structure // $tmp/www-hg <- this will be the project root // $tmp/www-hg/.hhconfig // $tmp/www-hg/a.php // $tmp/www-hg/d/b.php // $tmp/www <- a symlink to www-hg // $tmp/t <- a symlink to $tmp // $tmp/c.php <- a file not part of any root // $tmp/hhi let root = tmp.join("www-hg"); let link_root = tmp.join("www"); std::fs::create_dir(&root)?; let root = std::fs::canonicalize(&root)?; std::os::unix::fs::symlink(&root, &link_root)?; std::fs::write(root.join(".hhconfig"), "")?; std::fs::write(root.join("a.php"), "<?hh")?; std::fs::create_dir(root.join("d"))?; std::fs::write(root.join("d").join("b.php"), "<?hh")?; std::os::unix::fs::symlink(&tmp, tmp.join("t"))?; std::fs::create_dir(root.join("hhi"))?; // a helper - given specified "base/suffix", extracts the root-relative path as a string let relpath = |base: &Path, suffix: &str| -> anyhow::Result<String> { Ok( canonicalized_strip_prefix_relative_to_cwd(&base.join(suffix), &root)? .to_str() .unwrap() .to_owned(), ) }; // files in the repo that exist assert_eq!(&relpath(&root, "a.php")?, "a.php"); assert_eq!(&relpath(&root, "d/b.php")?, "d/b.php"); assert_eq!(&relpath(&link_root, "a.php")?, "a.php"); assert_eq!(&relpath(&link_root, "d/b.php")?, "d/b.php"); // files in the repo that don't exist assert_eq!(&relpath(&root, "ax.php")?, "ax.php"); assert_eq!(&relpath(&root, "d/bx.php")?, "d/bx.php"); assert_eq!(&relpath(&link_root, "ax.php")?, "ax.php"); assert_eq!(&relpath(&link_root, "d/bx.php")?, "d/bx.php"); // files outside the repo assert!(relpath(&tmp, "c.php").is_err()); assert!(relpath(&tmp, "cx.php").is_err()); assert!(relpath(&tmp, "t/c.php").is_err()); assert!(relpath(&tmp, "t/cx.php").is_err()); // relative to CWD std::env::set_current_dir(&root)?; assert_eq!(&relpath(&empty, "a.php")?, "a.php"); assert_eq!(&relpath(&empty, "d/b.php")?, "d/b.php"); assert_eq!(&relpath(&empty, "ax.php")?, "ax.php"); assert_eq!(&relpath(&empty, "d/bx.php")?, "d/bx.php"); assert_eq!(&relpath(&empty, "./a.php")?, "a.php"); assert_eq!(&relpath(&empty, "./d/b.php")?, "d/b.php"); assert_eq!(&relpath(&empty, "./ax.php")?, "ax.php"); assert_eq!(&relpath(&empty, "./d/bx.php")?, "d/bx.php"); std::env::set_current_dir(root.join("d"))?; assert_eq!(&relpath(&empty, "b.php")?, "d/b.php"); assert_eq!(&relpath(&empty, "bx.php")?, "d/bx.php"); assert_eq!(&relpath(&empty, "./b.php")?, "d/b.php"); assert_eq!(&relpath(&empty, "./bx.php")?, "d/bx.php"); assert_eq!(&relpath(&empty, "../a.php")?, "a.php"); assert_eq!(&relpath(&empty, "../ax.php")?, "ax.php"); assert!(&relpath(&empty, "../../x.php").is_err()); // relative to CWD, a symlink std::env::set_current_dir(&link_root)?; assert_eq!(&relpath(&empty, "a.php")?, "a.php"); assert_eq!(&relpath(&empty, "d/b.php")?, "d/b.php"); assert_eq!(&relpath(&empty, "ax.php")?, "ax.php"); assert_eq!(&relpath(&empty, "d/bx.php")?, "d/bx.php"); assert_eq!(&relpath(&empty, "./a.php")?, "a.php"); assert_eq!(&relpath(&empty, "./d/b.php")?, "d/b.php"); assert_eq!(&relpath(&empty, "./ax.php")?, "ax.php"); assert_eq!(&relpath(&empty, "./d/bx.php")?, "d/bx.php"); std::env::set_current_dir(link_root.join("d"))?; assert_eq!(&relpath(&empty, "b.php")?, "d/b.php"); assert_eq!(&relpath(&empty, "bx.php")?, "d/bx.php"); assert_eq!(&relpath(&empty, "./b.php")?, "d/b.php"); assert_eq!(&relpath(&empty, "./bx.php")?, "d/bx.php"); assert_eq!(&relpath(&empty, "../a.php")?, "a.php"); assert_eq!(&relpath(&empty, "../ax.php")?, "ax.php"); assert!(&relpath(&empty, "../../x.php").is_err()); // filter_map std::env::set_current_dir(&root)?; let v = vec![ PathBuf::from("a.php"), PathBuf::from("../ao.php"), // outside root PathBuf::from("d/b.php"), PathBuf::from("ax.php"), ] .iter() .filter_map(some_if_root(&root)) .collect::<Vec<_>>(); use relative_path::Prefix; use relative_path::RelativePath; assert_eq!( v, vec![ RelativePath::make(Prefix::Root, PathBuf::from("a.php")), RelativePath::make(Prefix::Root, PathBuf::from("d/b.php")), RelativePath::make(Prefix::Root, PathBuf::from("ax.php")) ] ); Ok(()) } }
Rust
hhvm/hphp/hack/src/utils/rust/signed_source.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use bstr::ByteSlice; use once_cell::sync::Lazy; use regex::bytes::Regex; /// This crate is a port of hphp/hack/src/utils/signed_source.ml, which was /// based on a historical version of fbsource/tools/signedsource.py. /// The signing token, which you must embed in the file you wish to sign. /// Generally, you should put this in a header comment. pub static SIGNING_TOKEN: &str = concat!( "@", "generated", " ", "<<SignedSource::*O*zOeWoEQle#+L!plEphiEmie@IsG>>" ); /// Sign a source file into which you have previously embedded a signing token. /// Signing modifies only the signing token, so the semantics of the file will /// not change if the token is put in a comment. /// /// Returns `TokenNotFoundError` if no signing token is present. pub fn sign_file(data: &[u8]) -> Result<Vec<u8>, TokenNotFoundError> { let data = SIGN_OR_OLD_TOKEN.replace_all(data, TOKEN.as_bytes()); if !data.contains_str(TOKEN) { return Err(TokenNotFoundError); } let signature = format!("SignedSource<<{}>>", hash(&data)); Ok(TOKEN_REGEX .replace_all(&data, signature.as_bytes()) .into_owned()) } /// Sign a UTF-8 source file into which you have previously embedded a signing /// token. Signing modifies only the signing token, so the semantics of the file /// will not change if the token is put in a comment. /// /// Returns `TokenNotFoundError` if no signing token is present. pub fn sign_utf8_file(data: &str) -> Result<String, TokenNotFoundError> { let data = sign_file(data.as_bytes())?; // SAFETY: `data` was a valid `&str` before signing, and signing only // replaces ASCII characters with other ASCII characters. unsafe { Ok(String::from_utf8_unchecked(data)) } } /// Determine whether a file is signed. This does NOT verify the signature. pub fn is_signed(data: &[u8]) -> bool { SIGNING_REGEX.is_match(data) } #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum SignCheckResponse { Ok, Unsigned, Invalid, } /// Verify a file's signature. pub fn verify_signature(data: &[u8]) -> SignCheckResponse { let expected_md5 = match SIGNING_REGEX.captures(data) { None => return SignCheckResponse::Unsigned, Some(caps) => match caps.get(1) { None => return SignCheckResponse::Unsigned, Some(cap) => cap.as_bytes(), }, }; for tok in [TOKEN, OLD_TOKEN] { let replacement = make_signing_token(tok); let unsigned_data = SIGNING_REGEX.replace_all(data, replacement.as_bytes()); let actual_md5 = hash(&unsigned_data); if expected_md5 == actual_md5.as_bytes() { return SignCheckResponse::Ok; } } SignCheckResponse::Invalid } static TOKEN: &str = "<<SignedSource::*O*zOeWoEQle#+L!plEphiEmie@IsG>>"; /// This old token was historically used as the signing token. It was replaced /// because it is 2 characters shorter than the final signature, and as a result, /// signing data with the old token forced the entire string to be rewritten /// (everything after the token needs to be shifted forwards 2 bytes). /// In this implementation, we rewrite the entire string anyway. static OLD_TOKEN: &str = "<<SignedSource::*O*zOeWoEQle#+L!plEphiEmie@I>>"; fn make_signing_token(token: &str) -> String { format!("@{} {}", "generated", token) } static SIGNATURE_RE: &str = r"SignedSource<<([a-f0-9]+)>>"; static SIGN_OR_OLD_TOKEN: Lazy<Regex> = Lazy::new(|| Regex::new(&format!("{}|{}", SIGNATURE_RE, regex::escape(OLD_TOKEN))).unwrap()); static SIGNING_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(&make_signing_token(SIGNATURE_RE)).unwrap()); static TOKEN_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(&regex::escape(TOKEN)).unwrap()); fn hash(data: &[u8]) -> String { use md5::Digest; let mut digest = md5::Md5::new(); digest.update(data); hex::encode(digest.finalize()) } #[derive(Debug, thiserror::Error, PartialEq, Eq)] #[error("Failed to sign file: input does not contain signing token")] pub struct TokenNotFoundError; #[cfg(test)] mod test { use super::is_signed; use super::make_signing_token; use super::sign_utf8_file; use super::verify_signature; use super::SignCheckResponse; use super::TokenNotFoundError; use super::SIGNING_TOKEN; use super::TOKEN; static NO_TOKEN: &str = concat!("// @", "generated\nfn foo() {}"); static INVALID: &str = concat!( "// @", "generated SignedSource<<48ab1081d9394843f184debf0b251a18>>\nfn foo() {}" ); static UNSIGNED: &str = concat!( "// @", "generated <<SignedSource::*O*zOeWoEQle#+L!plEphiEmie@IsG>>\nfn foo() {}" ); // Below signature was manually verified to be equal to the OCaml // Signed_source output for `UNSIGNED`. static SIGNED: &str = concat!( "// @", "generated SignedSource<<38ab1081d9394843f184debf0b251a18>>\nfn foo() {}" ); #[test] fn test_signing_token() { // We use `concat!` so that `SIGNING_TOKEN` can be a `&str` rather than // a `Lazy`, since `make_signing_token` can't be a `const fn` yet. // Verify that we're producing the same result. assert_eq!(SIGNING_TOKEN, make_signing_token(TOKEN)) } #[test] fn test_sign_utf8_file() { assert_eq!(sign_utf8_file(UNSIGNED), Ok(SIGNED.to_owned())); assert_eq!(sign_utf8_file(SIGNED), Ok(SIGNED.to_owned())); assert_eq!(sign_utf8_file(NO_TOKEN), Err(TokenNotFoundError)); } #[test] fn test_is_signed() { assert!(is_signed(SIGNED.as_bytes())); assert!(is_signed(INVALID.as_bytes())); // `is_signed` doesn't validate assert!(!is_signed(NO_TOKEN.as_bytes())); assert!(!is_signed(UNSIGNED.as_bytes())); } #[test] fn test_verify_signature() { assert_eq!(verify_signature(SIGNED.as_bytes()), SignCheckResponse::Ok); assert_eq!( verify_signature(INVALID.as_bytes()), SignCheckResponse::Invalid ); assert_eq!( verify_signature(NO_TOKEN.as_bytes()), SignCheckResponse::Unsigned ); assert_eq!( verify_signature(UNSIGNED.as_bytes()), SignCheckResponse::Unsigned ); } }
TOML
hhvm/hphp/hack/src/utils/rust/delta_log/Cargo.toml
# @generated by autocargo [package] name = "delta_log" version = "0.0.0" edition = "2021" [lib] path = "../delta_log.rs" [dependencies] env_logger = "0.10" once_cell = "1.12"
TOML
hhvm/hphp/hack/src/utils/rust/file_rwlock/Cargo.toml
# @generated by autocargo [package] name = "file_rwlock" version = "0.0.0" edition = "2021" [lib] path = "../file_rwlock.rs" [dependencies] anyhow = "1.0.71" fs2 = "0.4" libc = "0.2.139" serde = { version = "1.0.176", features = ["derive", "rc"] } serde_json = { version = "1.0.100", features = ["float_roundtrip", "unbounded_depth"] } tempfile = "3.5" thiserror = "1.0.43"
TOML
hhvm/hphp/hack/src/utils/rust/measure/Cargo.toml
# @generated by autocargo [package] name = "measure" version = "0.0.0" edition = "2021" [lib] path = "measure.rs" [dependencies] hash = { version = "0.0.0", path = "../../hash" } ocamlrep = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" } once_cell = "1.12" parking_lot = { version = "0.12.1", features = ["send_guard"] }
Rust
hhvm/hphp/hack/src/utils/rust/measure/measure.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. //! A simple telemetry crate ported from src/utils/core/measure.ml. //! //! The OCaml implementation will call into this one upon invocations of //! `Measure.push_global` and `Measure.pop_global`, unioning the Rust //! measurements into the returned OCaml record. //! //! The `measure` crate is primarily useful for debugging. It's particularly //! useful for gathering stats about something that happens a lot. Let's say you //! have some code like this //! //! let number_bunnies = count_bunnies(); //! //! If you want to debug how many bunnies are being counted, you could do //! something like //! //! let number_bunnies = count_bunnies(); //! eprintln!("Num bunnies: {number_bunnies}"); //! //! but what if this code is called 1000 times? Then you end up with log spew. //! Using the `measure` crate helps with this. You can now do //! //! let number_bunnies = count_bunnies(); //! measure::sample("num_bunnies", number_bunnies); //! //! and then later you do //! //! measure::print_stats(); //! //! which will print the number of samples, the total, the average, the //! variance, the max and the min. //! //! Measurements are stored in a stateful way in a record. You can either use a //! global record or a local record. //! //! Using a global record: //! //! measure::sample("num_bunnies", number_bunnies); //! measure::print_stats(); //! //! You can push and pop the global record. This is useful if you want to reset //! some counters without throwing away that data. //! //! measure::push_global(); //! // ...measure stuff //! let record = measure::pop_global(); //! record.print_stats(); //! //! Using a local record: //! //! let record = measure::Record::default(); //! record.sample("num_bunnies", number_bunnies); //! record.print_stats(); //! //! A record does not store the individual measurements, just the aggregate //! stats, which are updated online. use ocamlrep::ToOcamlRep; use once_cell::sync::Lazy; use parking_lot::RwLock; #[derive(Debug, Default)] pub struct Record { entries: hash::DashMap<RecordName, RecordEntry>, } /// A `RecordName` can be constructed from a single static string which does not /// contain the `(` character, or a pair of static strings where the first does /// not contain the `(` character. When constructed from a pair, e.g., /// `RecordName::from(("foo", "bar"))`, the record name will be rendered as the /// first string followed by the second, parenthesized, e.g., `"foo (bar)"`. /// This is in order to support some existing patterns for constructing record /// names in our OCaml without requiring the caller to concatenate strings at /// sample time. #[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] pub struct RecordName(&'static str, Option<&'static str>); impl From<&'static str> for RecordName { fn from(name: &'static str) -> Self { debug_assert!(!name.contains('(')); Self(name, None) } } impl From<(&'static str, &'static str)> for RecordName { fn from(names: (&'static str, &'static str)) -> Self { debug_assert!(!names.0.contains('(')); Self(names.0, Some(names.1)) } } static GLOBAL: Lazy<RwLock<Vec<Record>>> = Lazy::new(|| RwLock::new(vec![Default::default()])); pub fn push_global() { GLOBAL.write().push(Default::default()) } /// # Panics /// /// Panics if invoked when the global record stack is empty. pub fn pop_global() -> Record { match GLOBAL.write().pop() { Some(record) => record, None => panic!("measure::pop_global called with empty stack"), } } pub fn print_stats() { let stack = GLOBAL.read(); let record = stack .last() .expect("No global record available! Did you forget to call measure::push_global?"); record.print_stats() } /// # Panics /// /// Panics if invoked when the global record stack is empty (i.e., /// `measure::pop_global` was called without a corresponding /// `measure::push_global`). #[inline] pub fn sample(name: impl Into<RecordName>, value: f64) { let name: RecordName = name.into(); sample_impl(name, value) } fn sample_impl(name: RecordName, value: f64) { let stack = GLOBAL.read(); let record = stack .last() .expect("No global record available! Did you forget to call measure::push_global?"); record.sample_impl(name, value) } impl Record { pub fn new() -> Self { Default::default() } #[inline] pub fn sample(&self, name: impl Into<RecordName>, value: f64) { let name: RecordName = name.into(); self.sample_impl(name, value) } fn sample_impl(&self, name: RecordName, value: f64) { let mut entry = self.entries.entry(name).or_default(); let RecordEntry { count: old_count, mean: old_mean, variance_sum, max, min, distribution: _, } = *entry; // The OCaml version allows different weights, but that feature seems to // be unused. let weight = 1.0; // Add `1 * weight` to the count let count = old_count + weight; let mean = old_mean + (weight * (value - old_mean) / count); // Knuth's online variance approximation algorithm, updated for weights. // Weighted version from http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf let variance_sum = variance_sum + (weight * (value - old_mean) * (value - mean)); let max = f64::max(max, value); let min = f64::min(min, value); *entry = RecordEntry { count, mean, variance_sum, max, min, distribution: (), }; } pub fn print_stats(&self) { let mut entries: Vec<_> = (self.entries.iter()) .map(|kv| (*kv.key(), *kv.value())) .collect(); entries.sort_unstable_by_key(|&(name, _)| name); for (name, entry) in &entries { let prefix = format!("{name} stats --"); if entry.count == 0.0 { eprintln!("{prefix} NO DATA"); } else { let total = entry.count * entry.mean; let std_dev = (entry.variance_sum / entry.count).sqrt(); eprintln!( "{prefix} samples: {}, total: {}, avg: {}, stddev: {}, max: {}, min: {}", pretty_num(entry.count), pretty_num(total), pretty_num(entry.mean), pretty_num(std_dev), pretty_num(entry.max), pretty_num(entry.min), ) } } } } fn pretty_num(f: f64) -> String { if f > 1000000000.0 { format!("{:.3}G", f / 1000000000.0) } else if f > 1000000.0 { format!("{:.3}M", f / 1000000.0) } else if f > 1000.0 { format!("{:.3}K", f / 1000.0) } else if f == f.floor() { format!("{}", f as u64) } else { format!("{}", f) } } #[derive(Copy, Clone, Debug, ToOcamlRep)] struct RecordEntry { count: f64, mean: f64, variance_sum: f64, max: f64, min: f64, // Included only for the sake of the derived ToOcamlRep impl. // We're taking advantage here of the fact that `None` has the same // representation in OCaml as `unit` (since in OCaml this field has type // `distribution option`). distribution: (), } impl Default for RecordEntry { fn default() -> Self { Self { count: 0.0, mean: 0.0, variance_sum: 0.0, max: f64::MIN, min: f64::MAX, distribution: (), } } } impl std::fmt::Display for RecordName { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if let Some(suffix) = self.1 { write!(f, "{} ({})", self.0, suffix) } else { write!(f, "{}", self.0) } } } impl std::fmt::Debug for RecordName { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { format!("{self}").fmt(f) } } // Implemented manually instead of derived because in OCaml, records are keyed // by a single string, and invokers of `Measure.sample` concatenate categories // with parenthesized subcategories (since string concatenation can be done // inexpensively in the minor heap). `RecordName` supports the same // concatenation pattern without needing to do any allocation or memcpys at // sample-time; we just have to do the concatenation when converting to OCaml // instead. impl ToOcamlRep for RecordName { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { use std::io::Write; let mut str = alloc.byte_string_with_len(self.0.len() + self.1.map_or(0, |s| s.len() + 3)); write!(&mut str, "{self}").unwrap(); str.build() } } // Implemented manually because the OCaml version contains a sorted map rather // than a hash map. impl ToOcamlRep for Record { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { let mut entries: Vec<_> = (self.entries.iter()) .map(|kv| (*kv.key(), *kv.value())) .collect(); entries.sort_unstable_by_key(|&(name, _)| name); let len = entries.len(); let mut iter = entries .into_iter() .map(|(name, entry)| (alloc.add_copy(name), alloc.add_copy(entry))); let (res, _) = ocamlrep::sorted_iter_to_ocaml_map(&mut iter, alloc, len); res } }
Rust
hhvm/hphp/hack/src/utils/rust/measure/measure_ffi.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. ocamlrep_ocamlpool::ocaml_ffi! { fn hh_measure_push_global() { measure::push_global() } fn hh_measure_pop_global() -> measure::Record { measure::pop_global() } }
TOML
hhvm/hphp/hack/src/utils/rust/measure/ffi/Cargo.toml
# @generated by autocargo [package] name = "measure_ffi" version = "0.0.0" edition = "2021" [lib] path = "../measure_ffi.rs" test = false doctest = false crate-type = ["lib", "staticlib"] [dependencies] measure = { version = "0.0.0", path = ".." } ocamlrep_ocamlpool = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" }
TOML
hhvm/hphp/hack/src/utils/rust/pos/Cargo.toml
# @generated by autocargo [package] name = "rc_pos" version = "0.0.0" edition = "2021" [lib] path = "rc_pos.rs" [dependencies] arena_deserializer = { version = "0.0.0", path = "../../arena_deserializer" } arena_trait = { version = "0.0.0", path = "../../../arena_trait" } eq_modulo_pos = { version = "0.0.0", path = "../../eq_modulo_pos" } no_pos_hash = { version = "0.0.0", path = "../../no_pos_hash" } ocamlrep = { version = "0.1.0", git = "https://github.com/facebook/ocamlrep/", branch = "main" } parser_core_types = { version = "0.0.0", path = "../../../parser/cargo/core_types" } relative_path = { version = "0.0.0", path = "../relative_path" } serde = { version = "1.0.176", features = ["derive", "rc"] } static_assertions = "1.1.0" [dev-dependencies] pretty_assertions = { version = "1.2", features = ["alloc"], default-features = false }
Rust
hhvm/hphp/hack/src/utils/rust/pos/file_pos.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. pub trait FilePos { fn offset(&self) -> usize; fn line_column_beg(&self) -> (usize, usize, usize); }
Rust
hhvm/hphp/hack/src/utils/rust/pos/file_pos_large.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use ocamlrep::FromOcamlRep; use ocamlrep::FromOcamlRepIn; use ocamlrep::ToOcamlRep; use serde::Deserialize; use serde::Serialize; use crate::file_pos::FilePos; use crate::file_pos_small::FilePosSmall; #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub struct FilePosLarge { /// line number. Starts at 1. lnum: usize, /// character number of the beginning of line of this position. /// The column number is therefore offset - bol /// Starts at 0 bol: usize, /// character offset from the beginning of the file. Starts at 0. offset: usize, } arena_deserializer::impl_deserialize_in_arena!(FilePosLarge); impl arena_trait::TrivialDrop for FilePosLarge {} const DUMMY: FilePosLarge = FilePosLarge { lnum: 0, bol: 0, offset: usize::max_value(), }; impl FilePosLarge { #[inline] pub const fn make_dummy() -> Self { DUMMY } #[inline] pub fn is_dummy(self) -> bool { self == DUMMY } #[inline] pub const fn beg_of_file(self) -> Self { FilePosLarge { lnum: 1, bol: 0, offset: 0, } } // constructors #[inline] pub const fn from_line_column_offset(line: usize, column: usize, offset: usize) -> Self { FilePosLarge { lnum: line, bol: offset - column, offset, } } #[inline] pub const fn from_lnum_bol_offset(lnum: usize, bol: usize, offset: usize) -> Self { FilePosLarge { lnum, bol, offset } } // accessors #[inline] pub const fn line(self) -> usize { self.lnum } #[inline] pub const fn column(self) -> usize { self.offset - self.bol } #[inline] pub const fn beg_of_line(self) -> usize { self.bol } #[inline] pub const fn with_column(self, col: usize) -> Self { FilePosLarge { lnum: self.lnum, bol: self.bol, offset: self.bol + col, } } #[inline] pub const fn line_beg(self) -> (usize, usize) { (self.lnum, self.bol) } #[inline] pub const fn line_column(self) -> (usize, usize) { (self.lnum, self.offset - self.bol) } #[inline] pub const fn line_column_offset(self) -> (usize, usize, usize) { (self.lnum, self.offset - self.bol, self.offset) } #[inline] pub const fn line_beg_offset(self) -> (usize, usize, usize) { (self.lnum, self.bol, self.offset) } } impl FilePos for FilePosLarge { #[inline] fn offset(&self) -> usize { self.offset } #[inline] fn line_column_beg(&self) -> (usize, usize, usize) { (self.lnum, self.offset - self.bol, self.bol) } } impl Ord for FilePosLarge { fn cmp(&self, other: &FilePosLarge) -> std::cmp::Ordering { self.offset().cmp(&other.offset()) } } impl PartialOrd for FilePosLarge { fn partial_cmp(&self, other: &FilePosLarge) -> Option<std::cmp::Ordering> { Some(self.cmp(other)) } } impl From<FilePosSmall> for FilePosLarge { fn from(pos: FilePosSmall) -> Self { let (lnum, bol, offset) = pos.line_beg_offset(); Self::from_lnum_bol_offset(lnum, bol, offset) } } impl ToOcamlRep for FilePosLarge { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { let mut block = alloc.block_with_size(3); alloc.set_field(&mut block, 0, alloc.add(&self.lnum)); alloc.set_field(&mut block, 1, alloc.add(&self.bol)); alloc.set_field(&mut block, 2, alloc.add_copy(self.offset as isize)); block.build() } } impl FromOcamlRep for FilePosLarge { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { let block = ocamlrep::from::expect_tuple(value, 3)?; let lnum = ocamlrep::from::field(block, 0)?; let bol = ocamlrep::from::field(block, 1)?; let offset: isize = ocamlrep::from::field(block, 2)?; Ok(Self { lnum, bol, offset: offset as usize, }) } } impl<'a> FromOcamlRepIn<'a> for FilePosLarge { fn from_ocamlrep_in( value: ocamlrep::Value<'_>, _alloc: &'a ocamlrep::Bump, ) -> Result<Self, ocamlrep::FromError> { Self::from_ocamlrep(value) } }
Rust
hhvm/hphp/hack/src/utils/rust/pos/file_pos_small.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::fmt; use ocamlrep::FromOcamlRep; use ocamlrep::FromOcamlRepIn; use ocamlrep::ToOcamlRep; use serde::Deserialize; use serde::Serialize; use crate::file_pos::FilePos; // Three values packed into one 64-bit integer: // // 6 5 4 3 2 1 0 // 3210987654321098765432109876543210987654321098765432109876543210 // X<----------------------------><----------------------><-------> // beginning of line line column // // - (X) This bit left empty for OCaml's tagged integer representation // (1 if int, 0 if pointer). OCaml uses the least significant bit // for this rather than the most significant, but conversion code // (e.g., code generated by deriving OcamlRep) will shift the value // left and set the tag bit for us. // - (bol) beginning of line (byte offset from start of file) starts at 0, // maximum is 2^30-1 = 1,073,741,823 // - (line) line number starts at 1, maximum is 2^24-1 = 16,777,215 // - (col) column number starts at 0, maximum is 2^9-1 = 511 // This is saturating, i.e. every column past 511 has column // number 511 (so as not to raise exceptions). #[derive(Copy, Clone, Deserialize, Hash, Eq, PartialEq, Serialize)] pub struct FilePosSmall(u64); arena_deserializer::impl_deserialize_in_arena!(FilePosSmall); impl arena_trait::TrivialDrop for FilePosSmall {} const COLUMN_BITS: usize = 9; const LINE_BITS: usize = 24; const BOL_BITS: usize = 30; #[inline] const fn mask(bits: usize) -> usize { (1 << bits) - 1 } #[inline] const fn mask_by(bits: usize, x: u64) -> usize { (x & (mask(bits) as u64)) as usize } const MAX_COLUMN: usize = mask(COLUMN_BITS); const MAX_LINE: usize = mask(LINE_BITS); const MAX_BOL: usize = mask(BOL_BITS); const DUMMY: u64 = u64::max_value(); impl FilePosSmall { #[inline] pub const fn make_dummy() -> Self { FilePosSmall(DUMMY) } #[inline] pub const fn is_dummy(self) -> bool { self.0 == DUMMY } #[inline] pub fn beg_of_line(self) -> usize { if self.is_dummy() { 0 } else { mask_by(BOL_BITS, self.0 >> (LINE_BITS + COLUMN_BITS)) } } #[inline] pub fn line(self) -> usize { if self.is_dummy() { 0 } else { mask_by(LINE_BITS, self.0 >> COLUMN_BITS) } } #[inline] pub fn column(self) -> usize { if self.is_dummy() { DUMMY as usize } else { mask_by(COLUMN_BITS, self.0) } } #[inline] const fn bol_line_col_unchecked(bol: usize, line: usize, col: usize) -> Self { FilePosSmall( ((bol as u64) << (COLUMN_BITS + LINE_BITS)) + ((line as u64) << COLUMN_BITS) + (col as u64), ) } #[inline] fn bol_line_col(bol: usize, line: usize, col: usize) -> Option<Self> { if col > MAX_COLUMN || line > MAX_LINE || bol > MAX_BOL { None } else { Some(Self::bol_line_col_unchecked(bol, line, col)) } } #[inline] pub const fn beg_of_file() -> Self { Self::bol_line_col_unchecked(0, 1, 0) } // constructors #[inline] pub fn from_line_column_offset(line: usize, column: usize, offset: usize) -> Option<Self> { Self::bol_line_col(offset - column, line, column) } #[inline] pub fn from_lnum_bol_offset(lnum: usize, bol: usize, offset: usize) -> Option<Self> { Self::bol_line_col(bol, lnum, offset - bol) } // accessors #[inline] pub fn line_beg(self) -> (usize, usize) { (self.line(), self.beg_of_line()) } #[inline] pub fn line_column(self) -> (usize, usize) { (self.line(), self.column()) } #[inline] pub fn line_column_offset(self) -> (usize, usize, usize) { (self.line(), self.column(), self.offset()) } #[inline] pub fn line_beg_offset(self) -> (usize, usize, usize) { (self.line(), self.beg_of_line(), self.offset()) } #[inline] pub fn with_column(self, col: usize) -> Self { match Self::bol_line_col(self.beg_of_line(), self.line(), col) { None => FilePosSmall(DUMMY), Some(pos) => pos, } } } impl FilePos for FilePosSmall { #[inline] fn offset(&self) -> usize { self.beg_of_line() + self.column() } #[inline] fn line_column_beg(&self) -> (usize, usize, usize) { (self.line(), self.column(), self.beg_of_line()) } } impl Ord for FilePosSmall { fn cmp(&self, other: &FilePosSmall) -> std::cmp::Ordering { self.offset().cmp(&other.offset()) } } impl PartialOrd for FilePosSmall { fn partial_cmp(&self, other: &FilePosSmall) -> Option<std::cmp::Ordering> { Some(self.cmp(other)) } } impl fmt::Debug for FilePosSmall { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("FilePosSmall") .field("bol", &self.beg_of_line()) .field("line", &self.line()) .field("column", &self.column()) .finish() } } impl ToOcamlRep for FilePosSmall { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, _alloc: &'a A) -> ocamlrep::Value<'a> { ocamlrep::Value::int(self.0 as isize) } } impl FromOcamlRep for FilePosSmall { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { Ok(Self(ocamlrep::from::expect_int(value)? as u64)) } } impl<'a> FromOcamlRepIn<'a> for FilePosSmall { fn from_ocamlrep_in( value: ocamlrep::Value<'_>, _alloc: &'a ocamlrep::Bump, ) -> Result<Self, ocamlrep::FromError> { Self::from_ocamlrep(value) } }
Rust
hhvm/hphp/hack/src/utils/rust/pos/pos_impl.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::borrow::Cow; use std::cmp::Ordering; use std::ops::Range; use std::sync::Arc; use eq_modulo_pos::EqModuloPos; use ocamlrep::FromOcamlRep; use ocamlrep::ToOcamlRep; use relative_path::RelativePath; use relative_path::RelativePathCtx; use serde::Deserialize; use serde::Serialize; use crate::file_pos::FilePos; use crate::file_pos_large::FilePosLarge; use crate::file_pos_small::FilePosSmall; use crate::pos_span_raw::PosSpanRaw; use crate::pos_span_tiny::PosSpanTiny; #[derive(Clone, Deserialize, Hash, Serialize)] enum PosImpl { Small { file: Arc<RelativePath>, start: FilePosSmall, end: FilePosSmall, }, Large { file: Arc<RelativePath>, start: Box<FilePosLarge>, end: Box<FilePosLarge>, }, Tiny { /// Representation invariant: `RelativePath::EMPTY` is always encoded as /// `None`. This allows us to construct `Pos` in `const` contexts. file: Option<Arc<RelativePath>>, span: PosSpanTiny, }, FromReason(Box<PosImpl>), } #[derive(Clone, Deserialize, FromOcamlRep, ToOcamlRep, Serialize)] pub struct Pos(PosImpl); pub type PosR<'a> = &'a Pos; impl Pos { pub const NONE: Self = Self(PosImpl::Tiny { file: None, span: PosSpanTiny::make_dummy(), }); pub fn is_none(&self) -> bool { match self { Pos(PosImpl::Tiny { file, span }) => span.is_dummy() && file.is_none(), _ => false, } } // Validness based on HHVM's definition pub fn is_valid(&self) -> bool { let (line0, line1, char0, char1) = self.info_pos_extended(); line0 != 1 || char0 != 1 || line1 != 1 || char1 != 1 } pub fn from_raw_span(file: Arc<RelativePath>, span: PosSpanRaw) -> Self { if let Some(span) = PosSpanTiny::make(&span.start, &span.end) { return Pos(PosImpl::Tiny { file: if file.is_empty() { None } else { Some(file) }, span, }); } let (lnum, bol, offset) = span.start.line_beg_offset(); if let Some(start) = FilePosSmall::from_lnum_bol_offset(lnum, bol, offset) { let (lnum, bol, offset) = span.end.line_beg_offset(); if let Some(end) = FilePosSmall::from_lnum_bol_offset(lnum, bol, offset) { return Pos(PosImpl::Small { file, start, end }); } } Pos(PosImpl::Large { file, start: Box::new(span.start), end: Box::new(span.end), }) } pub fn to_raw_span(&self) -> PosSpanRaw { match &self.0 { PosImpl::Tiny { span, .. } => span.to_raw_span(), PosImpl::Small { start, end, .. } => PosSpanRaw { start: (*start).into(), end: (*end).into(), }, PosImpl::Large { start, end, .. } => PosSpanRaw { start: **start, end: **end, }, PosImpl::FromReason(_p) => unimplemented!(), } } pub fn filename(&self) -> &RelativePath { match &self.0 { PosImpl::Small { file, .. } | PosImpl::Large { file, .. } | PosImpl::Tiny { file: Some(file), .. } => file, PosImpl::Tiny { file: None, .. } => &RelativePath::EMPTY, PosImpl::FromReason(_p) => unimplemented!(), } } pub fn filename_rc(&self) -> Arc<RelativePath> { match &self.0 { PosImpl::Small { file, .. } | PosImpl::Large { file, .. } | PosImpl::Tiny { file: Some(file), .. } => Arc::clone(file), PosImpl::Tiny { file: None, .. } => Arc::new(RelativePath::EMPTY), PosImpl::FromReason(_p) => unimplemented!(), } } fn into_filename(self) -> Arc<RelativePath> { match self.0 { PosImpl::Small { file, .. } | PosImpl::Large { file, .. } | PosImpl::Tiny { file: Some(file), .. } => file, PosImpl::Tiny { file: None, .. } => Arc::new(RelativePath::EMPTY), PosImpl::FromReason(_p) => unimplemented!(), } } /// Returns a closed interval that's incorrect for multi-line spans. pub fn info_pos(&self) -> (usize, usize, usize) { fn compute<P: FilePos>(pos_start: &P, pos_end: &P) -> (usize, usize, usize) { let (line, start_minus1, bol) = pos_start.line_column_beg(); let start = start_minus1.wrapping_add(1); let end_offset = pos_end.offset(); let mut end = end_offset - bol; // To represent the empty interval, pos_start and pos_end are equal because // end_offset is exclusive. Here, it's best for error messages to the user if // we print characters N to N (highlighting a single character) rather than characters // N to (N-1), which is very unintuitive. if end == start_minus1 { end = start } (line, start, end) } match &self.0 { PosImpl::Small { start, end, .. } => compute(start, end), PosImpl::Large { start, end, .. } => compute(start.as_ref(), end.as_ref()), PosImpl::Tiny { span, .. } => { let PosSpanRaw { start, end } = span.to_raw_span(); compute(&start, &end) } PosImpl::FromReason(_p) => unimplemented!(), } } pub fn info_pos_extended(&self) -> (usize, usize, usize, usize) { let (line_begin, start, end) = self.info_pos(); let line_end = match &self.0 { PosImpl::Small { end, .. } => end.line_column_beg(), PosImpl::Large { end, .. } => (*end).line_column_beg(), PosImpl::Tiny { span, .. } => span.to_raw_span().end.line_column_beg(), PosImpl::FromReason(_p) => unimplemented!(), } .0; (line_begin, line_end, start, end) } pub fn info_raw(&self) -> (usize, usize) { (self.start_offset(), self.end_offset()) } pub fn line(&self) -> usize { match &self.0 { PosImpl::Small { start, .. } => start.line(), PosImpl::Large { start, .. } => start.line(), PosImpl::Tiny { span, .. } => span.start_line_number(), PosImpl::FromReason(_p) => unimplemented!(), } } pub fn from_lnum_bol_offset( file: Arc<RelativePath>, start: (usize, usize, usize), end: (usize, usize, usize), ) -> Self { let (start_line, start_bol, start_offset) = start; let (end_line, end_bol, end_offset) = end; let start = FilePosLarge::from_lnum_bol_offset(start_line, start_bol, start_offset); let end = FilePosLarge::from_lnum_bol_offset(end_line, end_bol, end_offset); Self::from_raw_span(file, PosSpanRaw { start, end }) } pub fn to_start_and_end_lnum_bol_offset( &self, ) -> ((usize, usize, usize), (usize, usize, usize)) { match &self.0 { PosImpl::Small { start, end, .. } => (start.line_beg_offset(), end.line_beg_offset()), PosImpl::Large { start, end, .. } => (start.line_beg_offset(), end.line_beg_offset()), PosImpl::Tiny { span, .. } => { let PosSpanRaw { start, end } = span.to_raw_span(); (start.line_beg_offset(), end.line_beg_offset()) } PosImpl::FromReason(_p) => unimplemented!(), } } /// For single-line spans only. pub fn from_line_cols_offset( file: Arc<RelativePath>, line: usize, cols: Range<usize>, start_offset: usize, ) -> Self { let start = FilePosLarge::from_line_column_offset(line, cols.start, start_offset); let end = FilePosLarge::from_line_column_offset( line, cols.end, start_offset + (cols.end - cols.start), ); Self::from_raw_span(file, PosSpanRaw { start, end }) } pub fn btw_nocheck(x1: Self, x2: Self) -> Self { let start = x1.to_raw_span().start; let end = x2.to_raw_span().end; Self::from_raw_span(x1.into_filename(), PosSpanRaw { start, end }) } pub fn btw(x1: &Self, x2: &Self) -> Result<Self, String> { if x1.filename() != x2.filename() { // using string concatenation instead of format!, // it is not stable see T52404885 Err(String::from("Position in separate files ") + &x1.filename().to_string() + " and " + &x2.filename().to_string()) } else if x1.end_offset() > x2.end_offset() { Err(String::from("btw: invalid positions") + &x1.end_offset().to_string() + "and" + &x2.end_offset().to_string()) } else { Ok(Self::btw_nocheck(x1.clone(), x2.clone())) } } pub fn merge(x1: &Self, x2: &Self) -> Result<Self, String> { if x1.filename() != x2.filename() { // see comment above (T52404885) return Err(String::from("Position in separate files ") + &x1.filename().to_string() + " and " + &x2.filename().to_string()); } let span1 = x1.to_raw_span(); let span2 = x2.to_raw_span(); let start = if span1.start.is_dummy() { span2.start } else if span2.start.is_dummy() || span1.start.offset() < span2.start.offset() { span1.start } else { span2.start }; let end = if span1.end.is_dummy() { span2.end } else if span2.end.is_dummy() || span1.end.offset() >= span2.end.offset() { span1.end } else { span2.end }; Ok(Self::from_raw_span( x1.filename_rc(), PosSpanRaw { start, end }, )) } pub fn last_char(&self) -> Cow<'_, Self> { if self.is_none() { Cow::Borrowed(self) } else { let end = self.to_raw_span().end; Cow::Owned(Self::from_raw_span( self.filename_rc(), PosSpanRaw { start: end, end }, )) } } pub fn first_char_of_line(&self) -> Cow<'_, Self> { if self.is_none() { Cow::Borrowed(self) } else { let start = self.to_raw_span().start.with_column(0); Cow::Owned(Self::from_raw_span( self.filename_rc(), PosSpanRaw { start, end: start }, )) } } pub fn end_offset(&self) -> usize { match &self.0 { PosImpl::Small { end, .. } => end.offset(), PosImpl::Large { end, .. } => end.offset(), PosImpl::Tiny { span, .. } => span.end_offset(), PosImpl::FromReason(_p) => unimplemented!(), } } pub fn start_offset(&self) -> usize { match &self.0 { PosImpl::Small { start, .. } => start.offset(), PosImpl::Large { start, .. } => start.offset(), PosImpl::Tiny { span, .. } => span.start_offset(), PosImpl::FromReason(_p) => unimplemented!(), } } } impl std::fmt::Display for Pos { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn do_fmt<P: FilePos>( f: &mut std::fmt::Formatter<'_>, file: &RelativePath, start: &P, end: &P, ) -> std::fmt::Result { write!(f, "{}", file)?; let (start_line, start_col, _) = start.line_column_beg(); let (end_line, end_col, _) = end.line_column_beg(); if start_line == end_line { write!(f, "({}:{}-{})", start_line, start_col, end_col) } else { write!(f, "({}:{}-{}:{})", start_line, start_col, end_line, end_col) } } match &self.0 { PosImpl::Small { file, start, end, .. } => do_fmt(f, file, start, end), PosImpl::Large { file, start, end, .. } => do_fmt(f, file, &**start, &**end), PosImpl::Tiny { span, .. } => { if self.is_none() { return write!(f, "Pos::NONE"); } let PosSpanRaw { start, end } = span.to_raw_span(); do_fmt(f, self.filename(), &start, &end) } PosImpl::FromReason(_p) => unimplemented!(), } } } impl std::fmt::Debug for Pos { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{self}") } } impl Default for Pos { fn default() -> Self { Self::NONE } } impl Ord for Pos { // Intended to match the implementation of `Pos.compare` in OCaml. fn cmp(&self, other: &Pos) -> Ordering { self.filename() .cmp(other.filename()) .then(self.start_offset().cmp(&other.start_offset())) .then(self.end_offset().cmp(&other.end_offset())) } } impl PartialOrd for Pos { fn partial_cmp(&self, other: &Pos) -> Option<Ordering> { Some(self.cmp(other)) } } impl PartialEq for Pos { fn eq(&self, other: &Self) -> bool { self.cmp(other) == Ordering::Equal } } impl Eq for Pos {} // non-derived impl Hash because PartialEq and Eq are non-derived impl std::hash::Hash for Pos { fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) { self.0.hash(hasher) } } impl EqModuloPos for Pos { fn eq_modulo_pos(&self, _rhs: &Self) -> bool { true } fn eq_modulo_pos_and_reason(&self, _rhs: &Self) -> bool { true } } impl ToOcamlRep for PosImpl { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, alloc: &'a A) -> ocamlrep::Value<'a> { match self { PosImpl::Small { file, start, end } => { let mut block = alloc.block_with_size_and_tag(3, 0); alloc.set_field(&mut block, 0, alloc.add(file)); alloc.set_field(&mut block, 1, alloc.add(start)); alloc.set_field(&mut block, 2, alloc.add(end)); block.build() } PosImpl::Large { file, start, end } => { let mut block = alloc.block_with_size_and_tag(3, 1); alloc.set_field(&mut block, 0, alloc.add(file)); alloc.set_field(&mut block, 1, alloc.add(start)); alloc.set_field(&mut block, 2, alloc.add(end)); block.build() } PosImpl::Tiny { file, span } => { let file = file.as_deref().unwrap_or(&RelativePath::EMPTY); let mut block = alloc.block_with_size_and_tag(2, 2); alloc.set_field(&mut block, 0, alloc.add(file)); alloc.set_field(&mut block, 1, alloc.add(span)); block.build() } PosImpl::FromReason(pos) => { let mut block = alloc.block_with_size_and_tag(1, 3); alloc.set_field(&mut block, 0, alloc.add(pos)); block.build() } } } } impl FromOcamlRep for PosImpl { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { use ocamlrep::from; let block = from::expect_block(value)?; match block.tag() { 0 => { from::expect_block_size(block, 3)?; Ok(PosImpl::Small { file: from::field(block, 0)?, start: from::field(block, 1)?, end: from::field(block, 2)?, }) } 1 => { from::expect_block_size(block, 3)?; Ok(PosImpl::Large { file: from::field(block, 0)?, start: from::field(block, 1)?, end: from::field(block, 2)?, }) } 2 => { from::expect_block_size(block, 2)?; let file: RelativePath = from::field(block, 0)?; Ok(PosImpl::Tiny { file: if file.is_empty() { None } else { Some(Arc::new(file)) }, span: from::field(block, 1)?, }) } 3 => { from::expect_block_size(block, 1)?; Ok(PosImpl::FromReason(from::field(block, 0)?)) } tag => Err(ocamlrep::FromError::BlockTagOutOfRange { max: 3, actual: tag, }), } } } impl Pos { /// Returns a struct implementing Display which produces the same format as /// `Pos.string` in OCaml. pub fn string(&self) -> PosString<'_> { PosString(self, None) } pub fn absolute<'a>(&'a self, ctx: &'a RelativePathCtx) -> PosString<'a> { PosString(self, Some(ctx)) } } /// This struct has an impl of Display which produces the same format as /// `Pos.string` in OCaml. pub struct PosString<'a>(&'a Pos, Option<&'a RelativePathCtx>); impl std::fmt::Display for PosString<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let Self(pos, ctx) = self; let path = match ctx { Some(ctx) => pos.filename().to_absolute(ctx), None => pos.filename().path().to_owned(), }; let (line, start, end) = pos.info_pos(); write!( f, "File {:?}, line {}, characters {}-{}:", path.display(), line, start, end ) } } // NoPosHash is meant to be position-insensitive, so don't do anything! impl no_pos_hash::NoPosHash for Pos { fn hash<H: std::hash::Hasher>(&self, _state: &mut H) {} } pub mod map { pub type Map<T> = std::collections::BTreeMap<super::Pos, T>; } impl From<parser_core_types::indexed_source_text::Pos> for Pos { fn from(pos: parser_core_types::indexed_source_text::Pos) -> Self { Self::from_lnum_bol_offset(pos.path, pos.start, pos.end) } } #[cfg(test)] mod tests { use std::path::PathBuf; use pretty_assertions::assert_eq; use relative_path::Prefix; use super::*; fn make_pos(name: &str, start: (usize, usize, usize), end: (usize, usize, usize)) -> Pos { Pos::from_lnum_bol_offset( Arc::new(RelativePath::make(Prefix::Dummy, PathBuf::from(name))), start, end, ) } #[test] fn test_pos() { assert!(Pos::NONE.is_none()); assert!( !Pos::from_lnum_bol_offset( Arc::new(RelativePath::make(Prefix::Dummy, PathBuf::from("a"))), (0, 0, 0), (0, 0, 0) ) .is_none(), ); assert!( !Pos::from_lnum_bol_offset( Arc::new(RelativePath::make(Prefix::Dummy, PathBuf::from(""))), (1, 0, 0), (0, 0, 0) ) .is_none(), ); } #[test] fn test_pos_absolute() { let ctx = RelativePathCtx { dummy: PathBuf::from("dummy"), ..Default::default() }; assert_eq!( Pos::NONE.absolute(&ctx).to_string(), r#"File "dummy/", line 0, characters 0-0:"# ); let path = Arc::new(RelativePath::make(Prefix::Dummy, PathBuf::from("a.php"))); assert_eq!( Pos::from_lnum_bol_offset(path, (5, 100, 117), (5, 100, 142)) .absolute(&ctx) .to_string(), r#"File "dummy/a.php", line 5, characters 18-42:"# ); } #[test] fn test_pos_string() { assert_eq!( Pos::NONE.string().to_string(), r#"File "", line 0, characters 0-0:"# ); let path = Arc::new(RelativePath::make(Prefix::Dummy, PathBuf::from("a.php"))); assert_eq!( Pos::from_lnum_bol_offset(path, (5, 100, 117), (5, 100, 142)) .string() .to_string(), r#"File "a.php", line 5, characters 18-42:"# ); } #[test] fn test_pos_merge() { let test = |name, (exp_start, exp_end), ((fst_start, fst_end), (snd_start, snd_end))| { assert_eq!( Ok(make_pos("a", exp_start, exp_end)), Pos::merge( &make_pos("a", fst_start, fst_end), &make_pos("a", snd_start, snd_end) ), "{}", name ); // Run this again because we want to test that we get the same // result regardless of order. assert_eq!( Ok(make_pos("a", exp_start, exp_end)), Pos::merge( &make_pos("a", snd_start, snd_end), &make_pos("a", fst_start, fst_end), ), "{} (reversed)", name ); }; test( "basic test", ((0, 0, 0), (0, 0, 5)), (((0, 0, 0), (0, 0, 2)), ((0, 0, 2), (0, 0, 5))), ); test( "merge should work with gaps", ((0, 0, 0), (0, 0, 15)), (((0, 0, 0), (0, 0, 5)), ((0, 0, 10), (0, 0, 15))), ); test( "merge should work with overlaps", ((0, 0, 0), (0, 0, 15)), (((0, 0, 0), (0, 0, 12)), ((0, 0, 7), (0, 0, 15))), ); test( "merge should work between lines", ((0, 0, 0), (2, 20, 25)), (((0, 0, 0), (1, 10, 15)), ((1, 10, 20), (2, 20, 25))), ); assert_eq!( Err("Position in separate files |a and |b".to_string()), Pos::merge( &make_pos("a", (0, 0, 0), (0, 0, 0)), &make_pos("b", (0, 0, 0), (0, 0, 0)) ), "should reject merges with different filenames" ); } }
Rust
hhvm/hphp/hack/src/utils/rust/pos/pos_span_raw.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use crate::file_pos_large::FilePosLarge; #[derive(Copy, Clone, Eq, PartialEq)] pub struct PosSpanRaw { pub start: FilePosLarge, pub end: FilePosLarge, } const DUMMY: PosSpanRaw = PosSpanRaw { start: FilePosLarge::make_dummy(), end: FilePosLarge::make_dummy(), }; impl PosSpanRaw { #[inline] pub const fn make_dummy() -> Self { DUMMY } #[inline] pub fn is_dummy(self) -> bool { self == DUMMY } }
Rust
hhvm/hphp/hack/src/utils/rust/pos/pos_span_tiny.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::cmp::Ordering; use std::fmt; use ocamlrep::FromOcamlRep; use ocamlrep::FromOcamlRepIn; use ocamlrep::ToOcamlRep; use serde::Deserialize; use serde::Serialize; use static_assertions::const_assert_eq; use crate::file_pos::FilePos; use crate::file_pos_large::FilePosLarge; use crate::pos_span_raw::PosSpanRaw; // PosSpanTiny packs multiple fields into one 63-bit integer: // // 6 5 4 3 2 1 0 // 3210987654321098765432109876543210987654321098765432109876543210 // X<-------------------><--------------><------------------><----> // byte offset of line line number column number width /// A compressed representation of a position span, i.e. a start and an end position. #[derive(Copy, Clone, Eq, PartialEq, Deserialize, Hash, Serialize)] pub struct PosSpanTiny(u64); arena_deserializer::impl_deserialize_in_arena!(PosSpanTiny); /// These numbers were obtained by gathering statistics on the positions in /// the decl heap for a large code base run as of December 2020. They should /// allow us to encode about 99% of positions. const START_BEGINNING_OF_LINE_BITS: u64 = 21; const START_LINE_NUMBER_BITS: u64 = 16; const START_COLUMN_NUMBER_BITS: u64 = 20; const BEGINNING_OF_LINE_INCREMENT_BITS: u64 = 0; const LINE_NUMBER_INCREMENT_BITS: u64 = 0; const WIDTH_BITS: u64 = 6; // The offset of each field (i.e., the number of bits to the right of it) is // the offset of the field to the right plus that field's bit width. const WIDTH_OFFSET: u64 = 0; const LINE_NUMBER_INCREMENT_OFFSET: u64 = WIDTH_OFFSET + WIDTH_BITS; const BEGINNING_OF_LINE_INCREMENT_OFFSET: u64 = LINE_NUMBER_INCREMENT_OFFSET + LINE_NUMBER_INCREMENT_BITS; const START_COLUMN_NUMBER_OFFSET: u64 = BEGINNING_OF_LINE_INCREMENT_OFFSET + BEGINNING_OF_LINE_INCREMENT_BITS; const START_LINE_NUMBER_OFFSET: u64 = START_COLUMN_NUMBER_OFFSET + START_COLUMN_NUMBER_BITS; const START_BEGINNING_OF_LINE_OFFSET: u64 = START_LINE_NUMBER_OFFSET + START_LINE_NUMBER_BITS; // The total number of bits used must be 63 (OCaml reserves one bit). const_assert_eq!( 63, START_BEGINNING_OF_LINE_BITS + START_BEGINNING_OF_LINE_OFFSET ); #[inline] const fn mask(bits: u64) -> u64 { (1 << bits) - 1 } #[inline] const fn mask_by(bits: u64, x: u64) -> u64 { x & mask(bits) } const MAX_START_BEGINNING_OF_LINE: u64 = mask(START_BEGINNING_OF_LINE_BITS); const MAX_START_LINE_NUMBER: u64 = mask(START_LINE_NUMBER_BITS); const MAX_START_COLUMN_NUMBER: u64 = mask(START_COLUMN_NUMBER_BITS); const MAX_BEGINNING_OF_LINE_INCREMENT: u64 = mask(BEGINNING_OF_LINE_INCREMENT_BITS); const MAX_LINE_NUMBER_INCREMENT: u64 = mask(LINE_NUMBER_INCREMENT_BITS); const MAX_WIDTH: u64 = mask(WIDTH_BITS); /// We only have 63 bits to work with, since OCaml reserves one bit for the /// integer tag. On the OCaml side, the integer tag is the low bit, but when /// converting ints from OCaml to Rust, we shift them right one bit, making the /// spare bit the high bit. Since OCaml integers are signed, when we convert a /// PosSpanTiny value from OCaml, the high bit is filled with a sign bit via the /// arithmetic shift in `ocamlrep::ocaml_int_to_isize`. Since this high bit /// isn't meaningful, we want to mask it off (to prevent construction of /// equivalent values with differing high bits). const MASK: u64 = !(1 << 63); const DUMMY: u64 = MASK; impl PosSpanTiny { #[inline] pub const fn make_dummy() -> Self { Self(DUMMY) } #[inline] pub const fn is_dummy(&self) -> bool { self.0 == DUMMY } pub fn make(start: &FilePosLarge, end: &FilePosLarge) -> Option<Self> { if start.is_dummy() || end.is_dummy() { return Some(Self::make_dummy()); } let start_bol = start.beg_of_line() as u64; let start_line = start.line() as u64; let start_col = start.column() as u64; let start_offset = start.offset() as u64; let end_bol = end.beg_of_line() as u64; let end_line = end.line() as u64; let end_offset = end.offset() as u64; let bol_increment = end_bol.checked_sub(start_bol)?; let line_increment = end_line.checked_sub(start_line)?; let width = end_offset.checked_sub(start_offset)?; if start_bol > MAX_START_BEGINNING_OF_LINE || start_line > MAX_START_LINE_NUMBER || start_col > MAX_START_COLUMN_NUMBER || bol_increment > MAX_BEGINNING_OF_LINE_INCREMENT || line_increment > MAX_LINE_NUMBER_INCREMENT || width > MAX_WIDTH { return None; } Some(Self( start_bol << START_BEGINNING_OF_LINE_OFFSET | start_line << START_LINE_NUMBER_OFFSET | start_col << START_COLUMN_NUMBER_OFFSET | bol_increment << BEGINNING_OF_LINE_INCREMENT_OFFSET | line_increment << LINE_NUMBER_INCREMENT_OFFSET | width << WIDTH_OFFSET, )) } pub fn start_beginning_of_line(self) -> usize { if self.is_dummy() { 0 } else { mask_by( START_BEGINNING_OF_LINE_BITS, self.0 >> START_BEGINNING_OF_LINE_OFFSET, ) as usize } } pub fn start_line_number(self) -> usize { if self.is_dummy() { 0 } else { mask_by(START_LINE_NUMBER_BITS, self.0 >> START_LINE_NUMBER_OFFSET) as usize } } pub fn start_column(self) -> usize { if self.is_dummy() { usize::MAX } else { mask_by( START_COLUMN_NUMBER_BITS, self.0 >> START_COLUMN_NUMBER_OFFSET, ) as usize } } fn beginning_of_line_increment(self) -> usize { if self.is_dummy() { 0 } else { mask_by( BEGINNING_OF_LINE_INCREMENT_BITS, self.0 >> BEGINNING_OF_LINE_INCREMENT_OFFSET, ) as usize } } fn line_number_increment(self) -> usize { if self.is_dummy() { 0 } else { mask_by( LINE_NUMBER_INCREMENT_BITS, self.0 >> LINE_NUMBER_INCREMENT_OFFSET, ) as usize } } fn width(self) -> usize { if self.is_dummy() { 0 } else { mask_by(WIDTH_BITS, self.0 >> WIDTH_OFFSET) as usize } } pub fn start_offset(self) -> usize { self.start_beginning_of_line() + self.start_column() } pub fn end_line_number(self) -> usize { self.start_line_number() + self.line_number_increment() } pub fn end_beginning_of_line(self) -> usize { self.start_beginning_of_line() + self.beginning_of_line_increment() } pub fn end_offset(self) -> usize { self.start_offset() + self.width() } pub fn end_column(self) -> usize { self.end_offset() - self.end_beginning_of_line() } pub fn to_raw_span(self) -> PosSpanRaw { if self.is_dummy() { PosSpanRaw::make_dummy() } else { let start_lnum = self.start_line_number(); let start_bol = self.start_beginning_of_line(); let start_offset = self.start_offset(); let end_lnum = self.end_line_number(); let end_bol = self.end_beginning_of_line(); let end_offset = self.end_offset(); PosSpanRaw { start: FilePosLarge::from_lnum_bol_offset(start_lnum, start_bol, start_offset), end: FilePosLarge::from_lnum_bol_offset(end_lnum, end_bol, end_offset), } } } } impl Ord for PosSpanTiny { // Intended to match the implementation of `Pos.compare` in OCaml. fn cmp(&self, other: &Self) -> Ordering { self.start_offset() .cmp(&other.start_offset()) .then(self.end_offset().cmp(&other.end_offset())) } } impl PartialOrd for PosSpanTiny { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl ToOcamlRep for PosSpanTiny { fn to_ocamlrep<'a, A: ocamlrep::Allocator>(&'a self, _alloc: &'a A) -> ocamlrep::Value<'a> { ocamlrep::Value::int(self.0 as isize) } } impl FromOcamlRep for PosSpanTiny { fn from_ocamlrep(value: ocamlrep::Value<'_>) -> Result<Self, ocamlrep::FromError> { Ok(Self(MASK & ocamlrep::from::expect_int(value)? as u64)) } } impl<'a> FromOcamlRepIn<'a> for PosSpanTiny { fn from_ocamlrep_in( value: ocamlrep::Value<'_>, _alloc: &'a ocamlrep::Bump, ) -> Result<Self, ocamlrep::FromError> { Self::from_ocamlrep(value) } } #[cfg(test)] mod test { use pretty_assertions::assert_eq; use super::*; #[test] fn test_tiny_small() { let line = 30; let bol = 2349; let start_offset = 2398; let end_offset = 2450; let start = FilePosLarge::from_lnum_bol_offset(line, bol, start_offset); let end = FilePosLarge::from_lnum_bol_offset(line, bol, end_offset); match PosSpanTiny::make(&start, &end) { None => assert_eq!(true, false), Some(span) => { assert_eq!(line, span.start_line_number()); assert_eq!(line, span.end_line_number()); assert_eq!(bol, span.start_beginning_of_line()); assert_eq!(bol, span.end_beginning_of_line()); assert_eq!(start_offset, span.start_offset()); assert_eq!(end_offset, span.end_offset()); assert_eq!((start_offset - bol), span.start_column()); assert_eq!((end_offset - bol), span.end_column()); let PosSpanRaw { start: start_, end: end_, } = span.to_raw_span(); assert_eq!(start, start_); assert_eq!(end, end_); } } } #[test] fn test_tiny_multiline_span() { let start_line = 30; let start_bol = 2349; let end_line = 33; let end_bol = 2500; let start_offset = 2398; let end_offset = 2600; let start = FilePosLarge::from_lnum_bol_offset(start_line, start_bol, start_offset); let end = FilePosLarge::from_lnum_bol_offset(end_line, end_bol, end_offset); match PosSpanTiny::make(&start, &end) { None => {} Some(span) => { assert_eq!(start_line, span.start_line_number()); assert_eq!(end_line, span.end_line_number()); assert_eq!(start_bol, span.start_beginning_of_line()); assert_eq!(end_bol, span.end_beginning_of_line()); assert_eq!(start_offset, span.start_offset()); assert_eq!(end_offset, span.end_offset()); assert_eq!((start_offset - start_bol), span.start_column()); assert_eq!((end_offset - end_bol), span.end_column()); let PosSpanRaw { start: start_, end: end_, } = span.to_raw_span(); assert_eq!(start, start_); assert_eq!(end, end_); } } } #[test] fn test_tiny_negative_span() { let start_line = 30; let start_bol = 2349; let end_line = 23; let end_bol = 2000; let start_offset = 2398; let end_offset = 2003; let start = FilePosLarge::from_lnum_bol_offset(start_line, start_bol, start_offset); let end = FilePosLarge::from_lnum_bol_offset(end_line, end_bol, end_offset); match PosSpanTiny::make(&start, &end) { None => {} Some(span) => { assert_eq!(start_line, span.start_line_number()); assert_eq!(end_line, span.end_line_number()); assert_eq!(start_bol, span.start_beginning_of_line()); assert_eq!(end_bol, span.end_beginning_of_line()); assert_eq!(start_offset, span.start_offset()); assert_eq!(end_offset, span.end_offset()); assert_eq!((start_offset - start_bol), span.start_column()); assert_eq!((end_offset - end_bol), span.end_column()); let PosSpanRaw { start: start_, end: end_, } = span.to_raw_span(); assert_eq!(start, start_); assert_eq!(end, end_); } } } #[test] fn test_tiny_dummy() { let line: u64 = 0; let bol: u64 = 0; let start_offset = u64::max_value(); let end_offset = u64::max_value(); let start = FilePosLarge::from_lnum_bol_offset(line as usize, bol as usize, start_offset as usize); let end = FilePosLarge::from_lnum_bol_offset(line as usize, bol as usize, end_offset as usize); let span = PosSpanTiny::make_dummy(); assert_eq!(line, span.start_line_number() as u64); assert_eq!(line, span.end_line_number() as u64); assert_eq!(bol, span.start_beginning_of_line() as u64); assert_eq!(bol, span.end_beginning_of_line() as u64); assert_eq!(start_offset, span.start_offset() as u64); assert_eq!(end_offset, span.end_offset() as u64); assert_eq!((start_offset - bol), span.start_column() as u64); assert_eq!((end_offset - bol), span.end_column() as u64); let PosSpanRaw { start: start_, end: end_, } = span.to_raw_span(); assert_eq!(start, start_); assert_eq!(end, end_); } #[test] fn test_tiny_large() { let max_int = u64::max_value(); let line = max_int; let bol = max_int; let start_offset = max_int; let end_offset = max_int; let start = FilePosLarge::from_lnum_bol_offset(line as usize, bol as usize, start_offset as usize); let end = FilePosLarge::from_lnum_bol_offset(line as usize, bol as usize, end_offset as usize); match PosSpanTiny::make(&start, &end) { None => { // expected } Some(span) => { // will likely fail here assert_eq!(line, span.start_line_number() as u64); assert_eq!(line, span.end_line_number() as u64); assert_eq!(bol, span.start_beginning_of_line() as u64); assert_eq!(bol, span.end_beginning_of_line() as u64); assert_eq!(start_offset, span.start_offset() as u64); assert_eq!(end_offset, span.end_offset() as u64); assert_eq!((start_offset - bol), span.start_column() as u64); assert_eq!((end_offset - bol), span.end_column() as u64); let PosSpanRaw { start: start_, end: end_, } = span.to_raw_span(); assert_eq!(start, start_); assert_eq!(end, end_); } } } #[test] fn test_round_trip_through_ocaml_value_large_tiny_span() { // This marks a span between columns 6 and 62 of line 49110 (where the // beginning of the line is character 1667611 from the beginning of the // file). let line = 49110usize; let bol = 1667611usize; let start_offset = 1667617usize; let end_offset = 1667673usize; let start = FilePosLarge::from_lnum_bol_offset(line, bol, start_offset); let end = FilePosLarge::from_lnum_bol_offset(line, bol, end_offset); let span = PosSpanTiny::make(&start, &end).unwrap(); // Though the span fits nicely into 63-bits, the resulting value is > 2^62 - // 1, OCaml's max (signed) int. let alloc = ocamlrep::Arena::new(); let value = span.to_ocamlrep(&alloc); let span_read_back = PosSpanTiny::from_ocamlrep(value).ok().unwrap(); assert_eq!(span, span_read_back); } } impl fmt::Debug for PosSpanTiny { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "PosSpanTiny {{ from {}:{} to {}:{} }}", self.start_line_number(), self.start_column(), self.end_line_number(), self.end_column() ) } }
Rust
hhvm/hphp/hack/src/utils/rust/pos/rc_pos.rs
// Copyright (c) Meta Platforms, Inc. and affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. pub mod file_pos; pub mod file_pos_large; pub mod file_pos_small; pub mod pos_span_raw; pub mod pos_span_tiny; mod pos_impl; pub use pos_impl::map; pub use pos_impl::Pos; pub use pos_impl::PosR; pub use pos_impl::PosString;
TOML
hhvm/hphp/hack/src/utils/rust/random_id/Cargo.toml
# @generated by autocargo [package] name = "random_id" version = "0.0.0" edition = "2021" [lib] path = "../random_id.rs" test = false doctest = false [dependencies] rand = { version = "0.8", features = ["small_rng"] }