language
stringlengths 0
24
| filename
stringlengths 9
214
| code
stringlengths 99
9.93M
|
---|---|---|
Text | hhvm/hphp/CMakeLists.txt | #
# +----------------------------------------------------------------------+
# | HipHop for PHP |
# +----------------------------------------------------------------------+
# | Copyright (c) 2010 Facebook, Inc. (http://www.facebook.com) |
# | Copyright (c) 1997-2010 The PHP Group |
# +----------------------------------------------------------------------+
# | This source file is subject to version 3.01 of the PHP license, |
# | that is bundled with this package in the file LICENSE, and is |
# | available through the world-wide-web at the following url: |
# | http://www.php.net/license/3_01.txt |
# | If you did not receive a copy of the PHP license and are unable to |
# | obtain it through the world-wide-web, please send a note to |
# | [email protected] so we can mail you a copy immediately. |
# +----------------------------------------------------------------------+
#
add_definitions("-DHHVM")
if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/facebook")
# I'm sorry, but facebook's internal repo has the top level dir stored inside
# of hphp/, so we need to pull that one in first if it exists
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.7 FATAL_ERROR)
get_filename_component(HPHP_HOME "${CMAKE_CURRENT_SOURCE_DIR}/.." ABSOLUTE)
set(ENABLE_EXTENSION_PGSQL OFF CACHE INTERNAL "" FORCE)
set(TP_DIR "${HPHP_HOME}/hphp/public_tld/third-party/")
set(TP_BUILD_DIR "${TP_DIR}")
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/facebook/CMake"
"${CMAKE_CURRENT_SOURCE_DIR}/public_tld/CMake"
${CMAKE_MODULE_PATH})
include(HPHPCompiler)
endif()
include(HHVMProject)
include(HHVMExtensionConfig)
add_custom_target(generated_systemlib)
# Before we do anything else, we'll configure the extensions,
# so that the defines take effect globally.
file(GLOB subdirs ${CMAKE_CURRENT_SOURCE_DIR}/runtime/ext/*)
foreach (dir ${subdirs})
if (IS_DIRECTORY ${dir})
if (EXISTS "${dir}/config.cmake")
set(HRE_CURRENT_EXT_PATH "${dir}")
include("${dir}/config.cmake")
endif()
endif()
endforeach()
HHVM_EXTENSION_RESOLVE_DEPENDENCIES()
if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/facebook")
include(FBTLD)
endif()
include(HPHPSetup)
include(HPHPFindLibs)
include(HHVMGenerateConfig)
HHVM_GENERATE_CONFIG("${CMAKE_CURRENT_BINARY_DIR}/util/hphp-config.h")
# Link against this target if you `#include "hphp/runtime/base/foo.h"`; it
# brings in all the transitive dependencies (e.g. rust FFI, folly, boost)
#
# It's currently really the wrong way around: we should declare all the
# dependencies directly against this target without using `hphp_link()`, then
# `hphp_link()` should be changed to link against this target. Done like this
# for now to minimize risk while unblocking adding new dependencies.
#
# Even that's wrong: given improvements in CMake over the years, we don't need
# and should remove many of the HPHP/hphp/HHVM CMake macros, including
# hphp_link, and just use target_link_libraries throughout.
add_library(hhvm_base_headers INTERFACE)
hphp_link(hhvm_base_headers INTERFACE)
target_include_directories(hhvm_base_headers INTERFACE ${CMAKE_SOURCE_DIR})
if (ENABLE_COTIRE)
include(cotire)
FIND_PATH(LIBC_INCLUDE_PATH stdlib.h)
# Detect the architecture-specific include directory
IF("${CMAKE_LIBRARY_ARCHITECTURE}" STREQUAL "")
# For CentOS/Red Hat where they store it directly in /usr/include
SET(ARCH_INCLUDE_PATH "${LIBC_INCLUDE_PATH}/bits")
ELSE()
FIND_PATH(ARCH_INCLUDE_PATH ${CMAKE_LIBRARY_ARCHITECTURE})
SET(ARCH_INCLUDE_PATH "${ARCH_INCLUDE_PATH}/${CMAKE_LIBRARY_ARCHITECTURE}")
ENDIF()
set_property(DIRECTORY
PROPERTY COTIRE_PREFIX_HEADER_IGNORE_PATH
"${LIBC_INCLUDE_PATH}/stdlib.h"
"${LIBC_INCLUDE_PATH}/string.h"
"${LIBC_INCLUDE_PATH}/ansidecl.h"
"${LIBC_INCLUDE_PATH}/bfd.h"
"${LIBC_INCLUDE_PATH}/libelf.h"
"${LIBC_INCLUDE_PATH}/elf.h"
"${LIBC_INCLUDE_PATH}/gelf.h"
"${LIBC_INCLUDE_PATH}/resolv.h"
"${ARCH_INCLUDE_PATH}"
"${CCLIENT_INCLUDE_PATH}"
"${LIBPNG_INCLUDE_DIRS}/png.h"
"${LDAP_INCLUDE_DIR}/ldap.h"
"${LIBSQLITE3_INCLUDE_DIR}/sqlite3ext.h"
"${CMAKE_SOURCE_DIR}"
"${CMAKE_BINARY_DIR}")
# XED headers need to be wrapped in extern "C"
if (ENABLE_XED)
if (LibXed_INCLUDE_DIR)
set_property(DIRECTORY
APPEND PROPERTY COTIRE_PREFIX_HEADER_IGNORE_PATH
"${LibXed_INCLUDE_DIR}")
else()
set_property(DIRECTORY
APPEND PROPERTY COTIRE_PREFIX_HEADER_IGNORE_PATH
"${TP_DIR}/xed/xed/build/include/xed")
endif()
endif()
endif()
# Only thing to do directly in tools is install this one script. Tools also has
# its own Makefile, so just do it here instead of dealing with moving that all
# around.
install(PROGRAMS ${CMAKE_CURRENT_SOURCE_DIR}/tools/oss-repo-mode
RENAME hhvm-repo-mode
DESTINATION bin
COMPONENT dev)
add_definitions("-DUSE_CMAKE")
enable_language(ASM)
if (NOT PCRE_LIBRARY)
link_libraries(pcre)
endif()
add_subdirectory(tools/hfsort)
add_subdirectory(tools/version)
add_subdirectory(tools/tc-print)
add_subdirectory(compiler)
add_subdirectory(hack)
add_subdirectory(hhbbc)
add_subdirectory(neo)
add_subdirectory(runtime)
add_subdirectory(runtime/ext)
# The runtime/test binary require GTest and GMock to be installed globally
option(RUNTIME_TEST_BIN "Create the HHVM runtime/test binary" OFF)
if (RUNTIME_TEST_BIN)
add_subdirectory(runtime/test)
endif ()
add_subdirectory(system)
add_subdirectory(util)
add_subdirectory(vixl)
add_subdirectory(zend)
add_subdirectory(hhvm)
option(TEST_BIN "Create the HHVM test binary" OFF)
if (TEST_BIN)
add_subdirectory(test)
endif ()
add_subdirectory(tools/gdb)
include(test/dso_test/dso_test.cmake)
# Keep this last
add_subdirectory(tools/hphpize) |
hhvm/hphp/dune | ;; Only hack (with actual ocaml code) and hsl (with some php files Hack needs)
;; are relevant to the OSS ocaml build of Hack. Ban all other directories
(data_only_dirs
bin
compiler
doc
facebook
hhbbc
hhvm
neo
public_autocargo
public_tld
runtime
system
test
tools
util
vixl
zend
) |
|
hhvm/hphp/hack.typechecker.code-workspace | {
/*
* TS 2019-12-16 - This file is for UX testing for vs code workspace functionality.
* For questions please contact Ted Spence or Omar Tawfik
*/
"folders": [
{
"name": "hack",
"uri": "./hack"
}
],
"tasks": {
"version": "2.0.0",
"tasks": [
{
"label": "Build hack",
"command": "./hack/scripts/facebook/build-hh.sh",
"args": [],
"group": "build",
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": "$tslint5"
},
{
"label": "Switch hack to buck-out",
"command": "hh",
"args": ["--switch", "buck-out"],
"group": "test",
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": "$tslint5"
},
{
"label": "Switch hack to dev",
"command": "hh",
"args": ["--switch", "dev"],
"group": "test",
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": "$tslint5"
},
{
"label": "Switch hack to prod",
"command": "hh",
"args": ["--switch", "prod"],
"group": "test",
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": "$tslint5"
},
{
"label": "Kill all running HH instances",
"command": "./hack/scripts/facebook/kill-hh.sh",
"args": [],
"group": "test",
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": "$tslint5"
},
{
"label": "Run hack integration tests",
"command": "buck",
"args": ["test", "//hphp/hack/test/integration/..."],
"group": "test",
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": "$tslint5"
}
]
},
"settings": {
"facebook.repositories": ["fbcode"],
"facebook.server": "reserved"
}
} |
|
Text | hhvm/hphp/compiler/CMakeLists.txt | set(SOURCE_SUBDIRS expression statement analysis system util parser)
set(CXX_SOURCES)
auto_sources(files "*.cpp" "${SOURCE_SUBDIRS}")
list(APPEND CXX_SOURCES ${files})
set(C_SOURCES)
auto_sources(files "*.c" "${SOURCE_SUBDIRS}")
list(APPEND C_SOURCES ${files})
set(HEADER_SOURCES)
auto_sources(files "*.h" "{$SOURCE_SUBDIRS}")
list(APPEND HEADER_SOURCES ${files})
HHVM_PUBLIC_HEADERS(compiler ${files})
include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../")
set(CMAKE_CURRENT_BINARY_DIR "${CMAKE_SOURCE_DIR}/bin")
foreach (CXX_FILE ${CXX_SOURCES})
if(${CXX_FILE} MATCHES ".no.cpp$")
SET_SOURCE_FILES_PROPERTIES(
${CXX_FILE}
PROPERTIES
COMPILE_FLAGS -O0
)
endif()
endforeach()
add_definitions(-DALWAYS_ASSERT=1)
add_library(hphp_analysis STATIC ${CXX_SOURCES} ${C_SOURCES} ${HEADER_SOURCES})
auto_source_group("hphp_analysis" "${CMAKE_CURRENT_SOURCE_DIR}"
${CXX_SOURCES} ${C_SOURCES} ${HEADER_SOURCES})
target_link_libraries(hphp_analysis boost hphp_util hphp_system)
if (ENABLE_COTIRE)
cotire(hphp_analysis)
endif() |
C++ | hhvm/hphp/compiler/compiler.cpp | /*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| [email protected] so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/compiler/compiler.h"
#include "hphp/compiler/option.h"
#include "hphp/compiler/package.h"
#include "hphp/hack/src/hackc/ffi_bridge/compiler_ffi.rs.h"
#include "hphp/hhbbc/hhbbc.h"
#include "hphp/hhbbc/misc.h"
#include "hphp/hhbbc/options.h"
#include "hphp/runtime/base/config.h"
#include "hphp/runtime/base/file-util.h"
#include "hphp/runtime/base/ini-setting.h"
#include "hphp/runtime/base/preg.h"
#include "hphp/runtime/base/program-functions.h"
#include "hphp/runtime/base/variable-serializer.h"
#include "hphp/runtime/version.h"
#include "hphp/runtime/vm/builtin-symbol-map.h"
#include "hphp/runtime/vm/disas.h"
#include "hphp/runtime/vm/preclass-emitter.h"
#include "hphp/runtime/vm/repo-autoload-map-builder.h"
#include "hphp/runtime/vm/repo-global-data.h"
#include "hphp/runtime/vm/type-alias-emitter.h"
#include "hphp/runtime/vm/unit-emitter.h"
#include "hphp/util/async-func.h"
#include "hphp/util/build-info.h"
#include "hphp/util/current-executable.h"
#include "hphp/util/exception.h"
#include "hphp/util/hdf.h"
#include "hphp/util/job-queue.h"
#include "hphp/util/logger.h"
#include "hphp/util/process.h"
#include "hphp/util/process-exec.h"
#include "hphp/util/rds-local.h"
#include "hphp/util/text-util.h"
#include "hphp/util/timer.h"
#ifndef _MSC_VER
#include "hphp/util/light-process.h"
#endif
#include "hphp/hhvm/process-init.h"
#include <sys/types.h>
#ifndef _MSC_VER
#include <sys/wait.h>
#include <dlfcn.h>
#endif
#include <boost/algorithm/string/replace.hpp>
#include <boost/program_options/options_description.hpp>
#include <boost/program_options/positional_options.hpp>
#include <boost/program_options/variables_map.hpp>
#include <boost/program_options/parsers.hpp>
#include <cerrno>
#include <exception>
#include <filesystem>
#include <fstream>
#include <folly/portability/SysStat.h>
using namespace boost::program_options;
namespace HPHP {
using namespace extern_worker;
///////////////////////////////////////////////////////////////////////////////
namespace {
///////////////////////////////////////////////////////////////////////////////
struct CompilerOptions {
std::string outputDir;
std::vector<std::string> config;
std::vector<std::string> confStrings;
std::vector<std::string> iniStrings;
std::string repoOptionsDir;
std::string inputDir;
std::vector<std::string> inputs;
std::string inputList;
std::vector<std::string> dirs;
std::vector<std::string> excludeDirs;
std::vector<std::string> excludeFiles;
std::vector<std::string> excludePatterns;
std::vector<std::string> excludeStaticDirs;
std::vector<std::string> excludeStaticFiles;
std::vector<std::string> excludeStaticPatterns;
std::vector<std::string> cfiles;
std::vector<std::string> cdirs;
std::string push_phases;
std::string matched_overrides;
int logLevel;
std::string filecache;
bool coredump;
std::string ondemandEdgesPath;
};
///////////////////////////////////////////////////////////////////////////////
void applyBuildOverrides(IniSetting::Map& ini,
Hdf& config,
CompilerOptions& po) {
std::string push_phases = Config::GetString(ini, config, "Build.PushPhases");
po.push_phases = push_phases;
// convert push phases to newline-separated, to make matching them less
// error-prone.
replaceAll(push_phases, ",", "\n");
bool loggedOnce = false;
for (Hdf hdf = config["Overrides"].firstChild();
hdf.exists();
hdf = hdf.next()) {
if (!loggedOnce) {
Logger::Info(folly::sformat(
"Matching build overrides using: push_phases='{}'",
po.push_phases));
loggedOnce = true;
}
if (Config::matchHdfPattern(push_phases, ini, hdf, "push_phase" , "m")) {
Logger::Info(folly::sformat("Matched override: {}", hdf.getName()));
folly::format(
&po.matched_overrides,
"{}{}",
po.matched_overrides.empty() ? "" : ",",
hdf.getName()
);
if (hdf.exists("clear")) {
std::vector<std::string> list;
hdf["clear"].configGet(list);
for (auto const& s : list) {
config.remove(s);
}
}
config.copy(hdf["overwrite"]);
// no break here, so we can continue to match more overrides
}
hdf["overwrite"].setVisited(); // avoid lint complaining
if (hdf.exists("clear")) {
// when the tier does not match, "clear" is not accessed
// mark it visited, so the linter does not complain
hdf["clear"].setVisited();
}
}
}
// Parse queryStr as a JSON-encoded watchman query expression, adding the the
// directories specified in the query to package. Only supports 'expression'
// queries and the 'dirname' term.
bool addAutoloadQueryToPackage(Package& package, const std::string& queryStr) {
try {
auto query = folly::parseJson(queryStr);
if (!query.isObject()) {
Logger::FError("Autoload.Query is not a JSON Object");
return false;
}
auto expr = query["expression"];
for (auto& term : expr) {
if (term.isArray() && term[0] == "dirname") {
Logger::FInfo("adding autoload dir {}", term[1].asString());
package.addDirectory(term[1].asString());
}
}
return true;
} catch (const folly::json::parse_error& e) {
Logger::FError("Error JSON-parsing Autoload.Query = \"{}\": {}",
queryStr, e.what());
return false;
}
}
void addListToPackage(Package& package, const std::vector<std::string>& dirs,
const CompilerOptions& po) {
namespace fs = std::filesystem;
std::string prefix{""};
if (po.repoOptionsDir != po.inputDir) {
auto const input = fs::path(po.inputDir);
auto const rdr = fs::path(po.repoOptionsDir);
prefix = fs::relative(po.repoOptionsDir, po.inputDir).native();
if (!prefix.empty() && prefix.back() != '/') prefix += '/';
}
for (auto const& dir : dirs) {
Logger::FInfo("adding autoload dir {}", dir);
package.addDirectory(prefix + dir);
}
}
void addInputsToPackage(Package& package, const CompilerOptions& po) {
if (po.dirs.empty() && po.inputs.empty() && po.inputList.empty()) {
package.addDirectory("/");
} else {
for (auto const& dir : po.dirs) {
package.addDirectory(dir);
}
for (auto const& cdir : po.cdirs) {
package.addStaticDirectory(cdir);
}
for (auto const& cfile : po.cfiles) {
package.addStaticFile(cfile);
}
for (auto const& input : po.inputs) {
package.addSourceFile(input);
}
if (!po.inputList.empty()) {
package.addInputList(po.inputList);
}
}
}
void genText(const UnitEmitter& ue, const std::string& outputPath) {
assertx(Option::GenerateTextHHBC || Option::GenerateHhasHHBC);
auto const unit = ue.create();
auto const basePath = [&] {
auto fullPath = outputPath;
if (!fullPath.empty() &&
!FileUtil::isDirSeparator(fullPath[fullPath.size() - 1])) {
fullPath += FileUtil::getDirSeparator();
}
auto const fileName = "php/" + unit->filepath()->toCppString();
if (fileName.size() > 4 &&
fileName.substr(fileName.length() - 4) == ".php") {
fullPath += fileName.substr(0, fileName.length() - 4);
} else {
fullPath += fileName;
}
for (auto pos = outputPath.size(); pos < fullPath.size(); pos++) {
if (FileUtil::isDirSeparator(fullPath[pos])) {
mkdir(fullPath.substr(0, pos).c_str(), 0777);
}
}
return fullPath;
}();
if (Option::GenerateTextHHBC) {
auto const fullPath = basePath + ".hhbc.txt";
std::ofstream f(fullPath.c_str());
if (!f) {
Logger::Error("Unable to open %s for write", fullPath.c_str());
} else {
f << "Hash: " << ue.sha1().toString() << std::endl;
f << unit->toString();
f.close();
}
}
if (Option::GenerateHhasHHBC) {
auto const fullPath = basePath + ".hhas";
std::ofstream f(fullPath.c_str());
if (!f) {
Logger::Error("Unable to open %s for write", fullPath.c_str());
} else {
f << disassemble(unit.get());
f.close();
}
}
}
/*
* It's an invariant that symbols in the repo must be Unique and
* Persistent. Verify all relevant symbols are unique and set the
* appropriate Attrs.
*/
struct SymbolSets {
SymbolSets() {
// These aren't stored in the repo, but we still need to check for
// collisions against them, so put them in the maps.
for (auto const& kv : Native::getConstants()) {
assertx(kv.second.m_type != KindOfUninit);
add(constants, kv.first, nullptr, "constant");
}
}
// For local parses, where we have an UnitEmitter
void add(UnitEmitter& ue) {
// Verify uniqueness of symbols and set Attrs appropriately.
auto const path = ue.m_filepath;
add(units, path, path, "unit");
for (auto const pce : ue.preclasses()) {
pce->setAttrs(pce->attrs() | AttrPersistent);
if (pce->attrs() & AttrEnum) add(enums, pce->name(), path, "enum");
add(classes, pce->name(), path, "class", typeAliases);
}
for (auto& fe : ue.fevec()) {
if (fe->attrs & AttrIsMethCaller) {
if (addNoFail(funcs, fe->name, path, "function")) {
fe->attrs |= AttrPersistent;
}
} else {
fe->attrs |= AttrPersistent;
add(funcs, fe->name, path, "function");
}
}
for (auto& te : ue.typeAliases()) {
te->setAttrs(te->attrs() | AttrPersistent);
add(typeAliases, te->name(), path, "type alias", classes);
}
for (auto& c : ue.constants()) {
c.attrs |= AttrPersistent;
add(constants, c.name, path, "constant");
}
for (auto& m : ue.modules()) {
m.attrs |= AttrPersistent;
add(modules, m.name, path, "module");
}
}
// For remote parses, where we don't have an UnitEmitter
void add(const Package::ParseMeta::Definitions& d, const StringData* path) {
add(units, path, path, "unit");
for (auto const& c : d.m_classes) {
add(classes, c, path, "class", typeAliases);
}
for (auto const& e : d.m_enums) {
add(enums, e, path, "enum");
add(classes, e, path, "class", typeAliases);
}
for (auto const& f : d.m_funcs) {
add(funcs, f, path, "function");
}
for (auto const& m : d.m_methCallers) {
addNoFail(funcs, m, path, "function");
}
for (auto const& a : d.m_typeAliases) {
add(typeAliases, a, path, "type alias", classes);
}
for (auto const& c : d.m_constants) {
add(constants, c, path, "constant");
}
for (auto const& m : d.m_modules) {
add(modules, m, path, "module");
}
}
struct NonUnique : std::runtime_error {
using std::runtime_error::runtime_error;
};
private:
template <typename T>
void add(T& map,
const StringData* name,
const StringData* unit,
const char* type) {
assertx(name->isStatic());
assertx(!unit || unit->isStatic());
auto const ret = map.emplace(name, unit);
if (!ret.second) return fail(name, unit, ret.first->second, type);
}
template <typename T>
bool addNoFail(T& map,
const StringData* name,
const StringData* unit,
const char* type) {
assertx(name->isStatic());
assertx(!unit || unit->isStatic());
return map.emplace(name, unit).second;
}
template <typename T, typename E>
void add(T& map,
const StringData* name,
const StringData* unit,
const char* type,
const E& other) {
assertx(name->isStatic());
assertx(!unit || unit->isStatic());
auto const it = other.find(name);
if (it != other.end()) return fail(name, unit, it->second, "symbol");
add(map, name, unit, type);
}
[[noreturn]]
void fail(const StringData* name,
const StringData* unit1,
const StringData* unit2,
const char* type) {
auto const filename = [] (const StringData* u) {
if (!u) return "BUILTIN";
return u->data();
};
throw NonUnique{
folly::sformat(
"More than one {} with the name {}. In {} and {}",
type,
name,
filename(unit1),
filename(unit2)
)
};
}
using IMap = folly_concurrent_hash_map_simd<
const StringData*,
const StringData*,
string_data_hash,
string_data_isame
>;
using Map = folly_concurrent_hash_map_simd<
const StringData*,
const StringData*,
string_data_hash,
string_data_same
>;
IMap enums;
IMap classes;
IMap funcs;
IMap typeAliases;
Map constants;
Map modules;
Map units;
};
RepoGlobalData getGlobalData() {
auto const now = std::chrono::high_resolution_clock::now();
auto const nanos =
std::chrono::duration_cast<std::chrono::nanoseconds>(
now.time_since_epoch()
);
auto gd = RepoGlobalData{};
gd.Signature = nanos.count();
gd.CheckPropTypeHints = RuntimeOption::EvalCheckPropTypeHints;
gd.PHP7_NoHexNumerics = RuntimeOption::PHP7_NoHexNumerics;
gd.PHP7_Substr = RuntimeOption::PHP7_Substr;
gd.PHP7_Builtins = RuntimeOption::PHP7_Builtins;
gd.HardGenericsUB = RuntimeOption::EvalEnforceGenericsUB >= 2;
gd.EnableIntrinsicsExtension = RuntimeOption::EnableIntrinsicsExtension;
gd.ForbidDynamicCallsToFunc = RuntimeOption::EvalForbidDynamicCallsToFunc;
gd.ForbidDynamicCallsWithAttr =
RuntimeOption::EvalForbidDynamicCallsWithAttr;
gd.ForbidDynamicCallsToClsMeth =
RuntimeOption::EvalForbidDynamicCallsToClsMeth;
gd.ForbidDynamicCallsToInstMeth =
RuntimeOption::EvalForbidDynamicCallsToInstMeth;
gd.ForbidDynamicConstructs = RuntimeOption::EvalForbidDynamicConstructs;
gd.LogKnownMethodsAsDynamicCalls =
RuntimeOption::EvalLogKnownMethodsAsDynamicCalls;
gd.EnableArgsInBacktraces = RuntimeOption::EnableArgsInBacktraces;
gd.NoticeOnBuiltinDynamicCalls =
RuntimeOption::EvalNoticeOnBuiltinDynamicCalls;
gd.InitialTypeTableSize =
RuntimeOption::EvalInitialTypeTableSize;
gd.InitialFuncTableSize =
RuntimeOption::EvalInitialFuncTableSize;
gd.InitialStaticStringTableSize =
RuntimeOption::EvalInitialStaticStringTableSize;
gd.HackArrCompatSerializeNotices =
RuntimeOption::EvalHackArrCompatSerializeNotices;
gd.AbortBuildOnVerifyError = RuntimeOption::EvalAbortBuildOnVerifyError;
gd.EmitClsMethPointers = RuntimeOption::EvalEmitClsMethPointers;
gd.IsVecNotices = RuntimeOption::EvalIsVecNotices;
gd.RaiseClassConversionWarning =
RuntimeOption::EvalRaiseClassConversionWarning;
gd.ClassPassesClassname = RuntimeOption::EvalClassPassesClassname;
gd.ClassnameNotices = RuntimeOption::EvalClassnameNotices;
gd.ClassStringHintNotices = RO::EvalClassStringHintNotices;
gd.ClassIsStringNotices = RuntimeOption::EvalClassIsStringNotices;
gd.StrictArrayFillKeys = RuntimeOption::StrictArrayFillKeys;
gd.TraitConstantInterfaceBehavior =
RuntimeOption::EvalTraitConstantInterfaceBehavior;
gd.BuildMayNoticeOnMethCallerHelperIsObject =
RO::EvalBuildMayNoticeOnMethCallerHelperIsObject;
gd.DiamondTraitMethods = RuntimeOption::EvalDiamondTraitMethods;
gd.EvalCoeffectEnforcementLevels = RO::EvalCoeffectEnforcementLevels;
gd.EmitBespokeTypeStructures = RO::EvalEmitBespokeTypeStructures;
gd.ActiveDeployment = RO::EvalActiveDeployment;
gd.ModuleLevelTraits = RO::EvalModuleLevelTraits;
gd.TreatCaseTypesAsMixed = RO::EvalTreatCaseTypesAsMixed;
gd.JitEnableRenameFunction = RO::EvalJitEnableRenameFunction;
gd.RenamableFunctions = RO::RenamableFunctions;
gd.NonInterceptableFunctions = RO::NonInterceptableFunctions;
if (Option::ConstFoldFileBC) {
gd.SourceRootForFileBC.emplace(RO::SourceRoot);
}
for (auto const& elm : RuntimeOption::ConstantFunctions) {
auto const s = internal_serialize(tvAsCVarRef(elm.second));
gd.ConstantFunctions.emplace_back(elm.first, s.toCppString());
}
std::sort(gd.ConstantFunctions.begin(), gd.ConstantFunctions.end());
return gd;
}
void setCoredumps(CompilerOptions& po) {
#ifdef _MSC_VER
/**
* Windows actually does core dump size and control at a system, not an app
* level. So we do nothing here and are at the mercy of Dr. Watson.
*/
#elif defined(__APPLE__) || defined(__FreeBSD__)
struct rlimit rl;
getrlimit(RLIMIT_CORE, &rl);
if (!po.coredump) {
po.coredump = rl.rlim_cur > 0;
return;
}
rl.rlim_cur = 80000000LL;
if (rl.rlim_max < rl.rlim_cur) {
rl.rlim_max = rl.rlim_cur;
}
setrlimit(RLIMIT_CORE, &rl);
#else
struct rlimit64 rl;
getrlimit64(RLIMIT_CORE, &rl);
if (!po.coredump) {
po.coredump = rl.rlim_cur > 0;
return;
}
rl.rlim_cur = 8000000000LL;
if (rl.rlim_max < rl.rlim_cur) {
rl.rlim_max = rl.rlim_cur;
}
setrlimit64(RLIMIT_CORE, &rl);
#endif
}
int prepareOptions(CompilerOptions &po, int argc, char **argv) {
options_description desc("HipHop Compiler for PHP Usage:\n\n"
"\thphp <options> <inputs>\n\n"
"Options");
std::vector<std::string> formats;
desc.add_options()
("help", "display this message")
("version", "display version number")
("format,f", value<std::vector<std::string>>(&formats)->composing(),
"HHBC Output format: binary (default) | hhas | text")
("repo-options-dir", value<std::string>(&po.repoOptionsDir),
"repo options directory")
("input-dir", value<std::string>(&po.inputDir), "input directory")
("inputs,i", value<std::vector<std::string>>(&po.inputs)->composing(),
"input file names")
("input-list", value<std::string>(&po.inputList),
"file containing list of file names, one per line")
("dir", value<std::vector<std::string>>(&po.dirs)->composing(),
"directories containing all input files")
("exclude-dir",
value<std::vector<std::string>>(&po.excludeDirs)->composing(),
"directories to exclude from the input")
("exclude-file",
value<std::vector<std::string>>(&po.excludeFiles)->composing(),
"files to exclude from the input, even if referenced by included files")
("exclude-pattern",
value<std::vector<std::string>>(&po.excludePatterns)->composing(),
"regex (in 'find' command's regex command line option format) of files "
"or directories to exclude from the input, even if referenced by "
"included files")
("exclude-static-pattern",
value<std::vector<std::string>>(&po.excludeStaticPatterns)->composing(),
"regex (in 'find' command's regex command line option format) of files "
"or directories to exclude from static content cache")
("exclude-static-dir",
value<std::vector<std::string>>(&po.excludeStaticDirs)->composing(),
"directories to exclude from static content cache")
("exclude-static-file",
value<std::vector<std::string>>(&po.excludeStaticFiles)->composing(),
"files to exclude from static content cache")
("cfile", value<std::vector<std::string>>(&po.cfiles)->composing(),
"extra static files forced to include without exclusion checking")
("cdir", value<std::vector<std::string>>(&po.cdirs)->composing(),
"extra directories for static files without exclusion checking")
("output-dir,o", value<std::string>(&po.outputDir), "output directory")
("config,c", value<std::vector<std::string>>(&po.config)->composing(),
"config file name")
("config-value,v",
value<std::vector<std::string>>(&po.confStrings)->composing(),
"individual configuration string in a format of name=value, where "
"name can be any valid configuration for a config file")
("define,d", value<std::vector<std::string>>(&po.iniStrings)->composing(),
"define an ini setting in the same format ( foo[=bar] ) as provided in a "
".ini file")
("log,l",
value<int>(&po.logLevel)->default_value(-1),
"-1: (default); 0: no logging; 1: errors only; 2: warnings and errors; "
"3: informational as well; 4: really verbose.")
("file-cache",
value<std::string>(&po.filecache),
"if specified, generate a static file cache with this file name")
("coredump",
value<bool>(&po.coredump)->default_value(false),
"turn on coredump")
("compiler-id", "display the git hash for the compiler id")
("repo-schema", "display the repo schema id used by this app")
("report-ondemand-edges",
value<std::string>(&po.ondemandEdgesPath),
"Write parse-on-demand dependency edges to the specified file")
;
positional_options_description p;
p.add("inputs", -1);
variables_map vm;
try {
auto opts = command_line_parser(argc, argv).options(desc)
.positional(p).run();
try {
store(opts, vm);
notify(vm);
#if defined(BOOST_VERSION) && BOOST_VERSION >= 105000 && BOOST_VERSION <= 105400
} catch (const error_with_option_name &e) {
std::string wrong_name = e.get_option_name();
std::string right_name = get_right_option_name(opts, wrong_name);
std::string message = e.what();
if (right_name != "") {
boost::replace_all(message, wrong_name, right_name);
}
Logger::Error("Error in command line: %s", message.c_str());
std::cout << desc << "\n";
return -1;
#endif
} catch (const error& e) {
Logger::Error("Error in command line: %s", e.what());
std::cout << desc << "\n";
return -1;
}
} catch (const unknown_option& e) {
Logger::Error("Error in command line: %s", e.what());
std::cout << desc << "\n";
return -1;
} catch (const error& e) {
Logger::Error("Error in command line: %s", e.what());
std::cout << desc << "\n";
return -1;
} catch (...) {
Logger::Error("Error in command line parsing.");
std::cout << desc << "\n";
return -1;
}
if (argc <= 1 || vm.count("help")) {
std::cout << desc << "\n";
return 1;
}
if (vm.count("version")) {
std::cout << "HipHop Repo Compiler";
std::cout << " " << HHVM_VERSION;
std::cout << " (" << (debug ? "dbg" : "rel") << ")\n";
std::cout << "Compiler: " << compilerId() << "\n";
std::cout << "Repo schema: " << repoSchemaId() << "\n";
return 1;
}
if (vm.count("compiler-id")) {
std::cout << compilerId() << "\n";
return 1;
}
if (vm.count("repo-schema")) {
std::cout << repoSchemaId() << "\n";
return 1;
}
if (po.outputDir.empty()) {
Logger::Error("Error in command line: output-dir must be provided.");
std::cout << desc << "\n";
return -1;
}
// log level
if (po.logLevel != -1) {
Logger::LogLevel = (Logger::LogLevelType)po.logLevel;
} else {
Logger::LogLevel = Logger::LogInfo;
}
Logger::Escape = false;
Logger::AlwaysEscapeLog = false;
if (!formats.empty()) {
for (auto const& format : formats) {
if (format == "text") {
Option::GenerateTextHHBC = true;
} else if (format == "hhas") {
Option::GenerateHhasHHBC = true;
} else if (format == "binary") {
Option::GenerateBinaryHHBC = true;
} else {
Logger::Error("Unknown format for HHBC target: %s", format.c_str());
std::cout << desc << "\n";
return -1;
}
}
} else {
Option::GenerateBinaryHHBC = true;
}
tl_heap.getCheck();
IniSetting::Map ini = IniSetting::Map::object;
Hdf config;
for (auto const& file : po.config) {
Config::ParseConfigFile(file, ini, config);
}
for (auto const& iniString : po.iniStrings) {
Config::ParseIniString(iniString, ini);
}
for (auto const& confString : po.confStrings) {
Config::ParseHdfString(confString, config);
}
applyBuildOverrides(ini, config, po);
Hdf runtime = config["Runtime"];
// The configuration command line strings were already processed above
// Don't process them again.
//
// Note that some options depends on RepoAuthoritative, we thus
// set/unset them here. We restore it to false since we need
// compile_systemlib_string to actually parse the file instead of
// trying to load it from repo (which is the case when
// RepoAuthoritative is true).
RuntimeOption::RepoAuthoritative = true;
// Set RepoPath to satisfy assertions (we need a path set in
// RepoAuthoritative). It will never actually be used.
RuntimeOption::RepoPath = "/tmp/dummy.hhbc";
// We don't want debug info in repo builds, since we don't support attaching
// a debugger in repo authoritative mode, but we want the default for debug
// info to be true so that it's present in sandboxes. Override that default
// here, since we only get here when building for repo authoritative mode.
RuntimeOption::RepoDebugInfo = false;
RuntimeOption::Load(ini, runtime);
Option::Load(ini, config);
RuntimeOption::RepoAuthoritative = false;
RuntimeOption::RepoPath = "";
RuntimeOption::EvalJit = false;
RuntimeOption::EvalLowStaticArrays = false;
std::vector<std::string> badnodes;
config.lint(badnodes);
for (auto const& badnode : badnodes) {
Logger::Error("Possible bad config node: %s", badnode.c_str());
}
// we need to initialize pcre cache table very early
pcre_init();
if (po.inputDir.empty()) po.inputDir = '.';
po.inputDir = FileUtil::normalizeDir(po.inputDir);
if (po.repoOptionsDir.empty()) {
po.repoOptionsDir = po.inputDir;
} else {
po.repoOptionsDir = FileUtil::normalizeDir(po.repoOptionsDir);
}
for (auto const& dir : po.excludeDirs) {
Option::PackageExcludeDirs.insert(FileUtil::normalizeDir(dir));
}
for (auto const& file : po.excludeFiles) {
Option::PackageExcludeFiles.insert(file);
}
for (auto const& pattern : po.excludePatterns) {
Option::PackageExcludePatterns.insert(
format_pattern(pattern, true /* prefixSlash */));
}
for (auto const& dir : po.excludeStaticDirs) {
Option::PackageExcludeStaticDirs.insert(FileUtil::normalizeDir(dir));
}
for (auto const& file : po.excludeStaticFiles) {
Option::PackageExcludeStaticFiles.insert(file);
}
for (auto const& pattern : po.excludeStaticPatterns) {
Option::PackageExcludeStaticPatterns.insert(
format_pattern(pattern, true /* prefixSlash */));
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////
Options makeExternWorkerOptions(const CompilerOptions& po) {
Options options;
options
.setUseCase(Option::ExternWorkerUseCase)
.setFeaturesFile(Option::ExternWorkerFeaturesFile)
.setUseSubprocess(Option::ExternWorkerForceSubprocess
? Options::UseSubprocess::Always
: Options::UseSubprocess::Fallback)
.setCacheExecs(Option::ExternWorkerUseExecCache)
.setCleanup(Option::ExternWorkerCleanup)
.setUseEdenFS(RO::EvalUseEdenFS)
.setUseRichClient(Option::ExternWorkerUseRichClient)
.setUseZippyRichClient(Option::ExternWorkerUseZippyRichClient)
.setUseP2P(Option::ExternWorkerUseP2P)
.setCasConnectionCount(Option::ExternWorkerCasConnectionCount)
.setEngineConnectionCount(Option::ExternWorkerEngineConnectionCount)
.setAcConnectionCount(Option::ExternWorkerAcConnectionCount)
.setVerboseLogging(Option::ExternWorkerVerboseLogging);
if (Option::ExternWorkerTimeoutSecs > 0) {
options.setTimeout(std::chrono::seconds{Option::ExternWorkerTimeoutSecs});
}
if (!Option::ExternWorkerWorkingDir.empty()) {
options.setWorkingDir(Option::ExternWorkerWorkingDir);
} else {
options.setWorkingDir(po.outputDir);
}
if (Option::ExternWorkerThrottleRetries >= 0) {
options.setThrottleRetries(Option::ExternWorkerThrottleRetries);
}
if (Option::ExternWorkerThrottleBaseWaitMSecs >= 0) {
options.setThrottleBaseWait(
std::chrono::milliseconds{Option::ExternWorkerThrottleBaseWaitMSecs}
);
}
return options;
}
void logPhaseStats(const std::string& phase, const Package& package,
extern_worker::Client& client, StructuredLogEntry& sample, int64_t micros)
{
auto const& stats = client.getStats();
Logger::FInfo(
"{}",
stats.toString(
phase,
folly::sformat("total package files {:,}", package.getTotalFiles())
)
);
sample.setInt(phase + "_total_files", package.getTotalFiles());
sample.setInt(phase + "_micros", micros);
if (auto const t = package.inputsTime()) {
sample.setInt(
phase + "_input_micros",
std::chrono::duration_cast<std::chrono::microseconds>(*t).count()
);
}
if (auto const t = package.ondemandTime()) {
sample.setInt(
phase + "_ondemand_micros",
std::chrono::duration_cast<std::chrono::microseconds>(*t).count()
);
}
stats.logSample(phase, sample);
sample.setStr(phase + "_fellback", client.fellback() ? "true" : "false");
}
namespace {
// Upload all builtin decls, and pass their IndexMeta summary and
// Ref<UnitDecls> to callback() to include in the overall UnitIndex. This
// makes systemlib decls visible to files being compiled as part of the
// full repo build, but does not make repo decls available to systemlib.
coro::Task<bool> indexBuiltinSymbolDecls(
const Package::IndexCallback& callback,
coro::TicketExecutor& executor,
extern_worker::Client& client
) {
std::vector<coro::TaskWithExecutor<void>> tasks;
auto const declCallback = [&](auto const* d) -> coro::Task<void> {
auto const facts = hackc::decls_to_facts(*d->decls, "");
auto summary = summary_of_facts(facts);
callback(
"",
summary,
HPHP_CORO_AWAIT(client.store(Package::UnitDecls{
summary,
std::string{d->serialized.begin(), d->serialized.end()}
}))
);
HPHP_CORO_RETURN_VOID;
};
for (auto const& d: Native::getAllBuiltinDecls()) {
tasks.emplace_back(declCallback(d).scheduleOn(executor.sticky()));
}
HPHP_CORO_AWAIT(coro::collectRange(std::move(tasks)));
HPHP_CORO_RETURN(true);
}
}
// Compute a UnitIndex by parsing decls for all autoload-eligible files.
// If no Autoload.Query is specified by RepoOptions, this just indexes
// the input files.
std::unique_ptr<UnitIndex> computeIndex(
const CompilerOptions& po,
StructuredLogEntry& sample,
coro::TicketExecutor& executor,
extern_worker::Client& client
) {
auto index = std::make_unique<UnitIndex>();
auto const indexUnit = [&] (
std::string&& rpath,
Package::IndexMeta&& meta,
Ref<Package::UnitDecls>&& declsRef
) {
auto locations = std::make_shared<UnitIndex::Locations>(
std::move(rpath), std::move(declsRef)
);
auto insert = [&](auto const& names, auto& map, const char* kind) {
for (auto name : names) {
auto const ret = map.emplace(name, locations);
if (!ret.second) {
Logger::FWarning("Duplicate {} {} in {} and {}",
kind, name, ret.first->first, locations->rpath
);
}
}
};
insert(meta.types, index->types, "type");
insert(meta.funcs, index->funcs, "function");
insert(meta.constants, index->constants, "constant");
insert(meta.modules, index->modules, "module");
};
Package indexPackage{po.inputDir, executor, client, po.coredump};
Timer indexTimer(Timer::WallTime, "indexing");
auto const& repoFlags = RepoOptions::forFile(po.repoOptionsDir).flags();
auto const& dirs = repoFlags.autoloadRepoBuildSearchDirs();
auto const queryStr = repoFlags.autoloadQuery();
if (!dirs.empty()) {
addListToPackage(indexPackage, dirs, po);
} else if (!queryStr.empty()) {
// Index the files specified by Autoload.Query
if (!addAutoloadQueryToPackage(indexPackage, queryStr)) return nullptr;
} else {
// index just the input files
addInputsToPackage(indexPackage, po);
}
// Here, we are doing the following in parallel:
// * Indexing the build package
// * Indexing builtin decls to be used by decl driven bytecode compilation
// If DDB is not enabled, we will return early from the second task.
auto const [indexingRepoOK, indexingSystemlibDeclsOK] = coro::wait(
coro::collect(
indexPackage.index(indexUnit),
coro::invoke([&]() -> coro::Task<bool> {
if (RO::EvalEnableDecl) {
HPHP_CORO_RETURN(HPHP_CORO_AWAIT(
indexBuiltinSymbolDecls(indexUnit, executor, client)
));
}
HPHP_CORO_RETURN(true);
})
)
);
if (!indexingRepoOK || !indexingSystemlibDeclsOK) return nullptr;
logPhaseStats("index", indexPackage, client, sample,
indexTimer.getMicroSeconds());
Logger::FInfo("index size: types={:,} funcs={:,} constants={:,} modules={:,}",
index->types.size(),
index->funcs.size(),
index->constants.size(),
index->modules.size()
);
client.resetStats();
return index;
}
///////////////////////////////////////////////////////////////////////////////
// Parses a file and produces an UnitEmitter. Used when we're not
// going to run HHBBC.
struct ParseJob {
static std::string name() { return "hphpc-parse"; }
static void init(const Package::Config& config,
Package::FileMetaVec meta) {
Package::parseInit(config, std::move(meta));
}
static Package::ParseMetaVec fini() {
return Package::parseFini();
}
static UnitEmitterSerdeWrapper run(const std::string& contents,
const RepoOptionsFlags& flags,
Variadic<Package::UnitDecls> decls) {
return Package::parseRun(contents, flags, std::move(decls.vals));
}
};
using WPI = HHBBC::WholeProgramInput;
// Parses a file (as ParseJob does), but then hands the UnitEmitter
// off to HHBBC to produce a WholeProgramInput key and value. This is
// for when we are going to run HHBBC.
struct ParseForHHBBCJob {
static std::string name() { return "hphpc-parse-for-hhbbc"; }
static void init(const Package::Config& config,
const HHBBC::Config& hhbbcConfig,
Package::FileMetaVec meta) {
Package::parseInit(config, std::move(meta));
HHBBC::options = hhbbcConfig.o;
hhbbcConfig.gd.load(true);
}
static std::tuple<Package::ParseMetaVec, std::vector<WPI::Key>> fini() {
return std::make_tuple(Package::parseFini(), std::move(s_inputKeys));
}
static Variadic<WPI::Value> run(const std::string& contents,
const RepoOptionsFlags& flags,
Variadic<Package::UnitDecls> decls) {
auto wrapper = Package::parseRun(contents, flags, std::move(decls.vals));
if (!wrapper.m_ue) return {};
std::vector<WPI::Value> values;
for (auto& [key, value] : WPI::make(std::move(wrapper.m_ue))) {
s_inputKeys.emplace_back(std::move(key));
values.emplace_back(std::move(value));
}
return Variadic<WPI::Value>{std::move(values)};
}
static std::vector<WPI::Key> s_inputKeys;
};
std::vector<WPI::Key> ParseForHHBBCJob::s_inputKeys;
Job<ParseJob> s_parseJob;
Job<ParseForHHBBCJob> s_parseForHHBBCJob;
// A ParsedFile owns all the HHBC state associated with a parsed source
// file before we have decided to add it to the Program.
struct ParsedFile {
explicit ParsedFile(Package::ParseMeta m)
: parseMeta(std::move(m))
{}
ParsedFile(Package::ParseMeta m, Ref<UnitEmitterSerdeWrapper> w)
: parseMeta(std::move(m)), ueRef(std::move(w))
{}
Package::ParseMeta parseMeta;
Optional<Ref<UnitEmitterSerdeWrapper>> ueRef;
std::vector<std::pair<WPI::Key, Ref<WPI::Value>>> hhbbcInputs;
};
using ParsedFiles = folly_concurrent_hash_map_simd<
std::string,
std::unique_ptr<ParsedFile>
>;
///////////////////////////////////////////////////////////////////////////////
bool process(CompilerOptions &po) {
#ifndef _MSC_VER
LightProcess::Initialize(RuntimeOption::LightProcessFilePrefix,
RuntimeOption::LightProcessCount,
RuntimeOption::EvalRecordSubprocessTimes,
{});
#endif
setCoredumps(po);
register_process_init();
StructuredLogEntry sample;
sample.setStr("debug", debug ? "true" : "false");
sample.setStr("use_case", Option::ExternWorkerUseCase);
sample.setStr("features_file", Option::ExternWorkerFeaturesFile);
sample.setInt("use_rich_client", Option::ExternWorkerUseRichClient);
sample.setInt("use_zippy_rich_client",
Option::ExternWorkerUseZippyRichClient);
sample.setInt("use_p2p", Option::ExternWorkerUseP2P);
sample.setInt("cas_connection_count", Option::ExternWorkerCasConnectionCount);
sample.setInt("engine_connection_count", Option::ExternWorkerEngineConnectionCount);
sample.setInt("ac_connection_count", Option::ExternWorkerAcConnectionCount);
sample.setInt("force_subprocess", Option::ExternWorkerForceSubprocess);
sample.setInt("use_exec_cache", Option::ExternWorkerUseExecCache);
sample.setInt("timeout_secs", Option::ExternWorkerTimeoutSecs);
sample.setInt("cleanup", Option::ExternWorkerCleanup);
sample.setInt("throttle_retries", Option::ExternWorkerThrottleRetries);
sample.setInt("throttle_base_wait_ms",
Option::ExternWorkerThrottleBaseWaitMSecs);
sample.setStr("working_dir", Option::ExternWorkerWorkingDir);
sample.setInt("parser_group_size", Option::ParserGroupSize);
sample.setInt("parser_dir_group_size_limit", Option::ParserDirGroupSizeLimit);
sample.setInt("parser_thread_count", Option::ParserThreadCount);
sample.setInt("parser_optimistic_store", Option::ParserOptimisticStore);
sample.setInt("parser_async_cleanup", Option::ParserAsyncCleanup);
sample.setStr("push_phases", po.push_phases);
sample.setStr("matched_overrides", po.matched_overrides);
sample.setStr("use_hphpc", "true");
sample.setStr("use_hhbbc", RO::EvalUseHHBBC ? "true" : "false");
// Track the unit-emitters created for system during
// hphp_process_init().
SystemLib::keepRegisteredUnitEmitters(true);
hphp_process_init();
SCOPE_EXIT { hphp_process_exit(); };
SystemLib::keepRegisteredUnitEmitters(false);
auto const outputFile = po.outputDir + "/hhvm.hhbc";
unlink(outputFile.c_str());
auto executor = std::make_unique<coro::TicketExecutor>(
"HPHPcWorker",
0,
size_t(Option::ParserThreadCount <= 0 ? 1 : Option::ParserThreadCount),
[] {
hphp_thread_init();
hphp_session_init(Treadmill::SessionKind::CompilerEmit);
},
[] {
hphp_context_exit();
hphp_session_exit();
hphp_thread_exit();
},
std::chrono::minutes{15}
);
auto client =
std::make_unique<Client>(executor->sticky(), makeExternWorkerOptions(po));
sample.setStr("extern_worker_impl", client->implName());
sample.setStr("extern_worker_session", client->session());
auto index = computeIndex(po, sample, *executor, *client);
if (!index) return false;
// Always used, but we can clear it early to save memory.
Optional<SymbolSets> unique;
unique.emplace();
// HHBBC specific state (if we're going to run it).
Optional<WPI> hhbbcInputs;
Optional<coro::AsyncValue<Ref<HHBBC::Config>>> hhbbcConfig;
if (RO::EvalUseHHBBC) {
hhbbcInputs.emplace();
// We want to do this as early as possible
hhbbcConfig.emplace(
[&client] () {
return client->store(HHBBC::Config::get(getGlobalData()));
},
executor->sticky()
);
}
hphp_fast_set<const StringData*> moduleInDeployment;
if (!RO::EvalActiveDeployment.empty()) {
// Many files will be in the same module, so it is better to precompute
// a mapping of whether a given module is in the current deployment
auto const& packageInfo =
RepoOptions::forFile(po.repoOptionsDir).packageInfo();
auto const it = packageInfo.deployments().find(RO::EvalActiveDeployment);
if (it == end(packageInfo.deployments())) {
Logger::FError("The active deployment is set to {}; "
"however, it is not defined in the {}/{} file",
RO::EvalActiveDeployment,
po.repoOptionsDir,
kPackagesToml);
return false;
}
moduleInDeployment.reserve(index->modules.size());
for (auto const& [module, _] : index->modules) {
assertx(!moduleInDeployment.contains(module));
if (packageInfo.moduleInDeployment(module,
it->second,
DeployKind::Hard)) {
moduleInDeployment.insert(module);
}
}
}
Optional<RepoAutoloadMapBuilder> autoload;
Optional<RepoFileBuilder> repo;
std::atomic<uint32_t> nextSn{0};
std::atomic<size_t> numUnits{0};
std::mutex repoLock;
// Emit a fully processed unit (either processed by HHBBC or not).
auto const emitUnit = [&] (std::unique_ptr<UnitEmitter> ue) {
assertx(ue);
assertx(Option::GenerateBinaryHHBC ||
Option::GenerateTextHHBC ||
Option::GenerateHhasHHBC);
if (Option::GenerateTextHHBC || Option::GenerateHhasHHBC) {
genText(*ue, po.outputDir);
}
if (!Option::GenerateBinaryHHBC) return;
++numUnits;
if (!RO::EvalUseHHBBC) {
// HHBBC assigns m_sn and the SHA1, but we have to do it ourself
// if we're not running it.
auto const sn = nextSn++;
ue->m_symbol_refs.clear();
ue->m_sn = sn;
ue->setSha1(SHA1 { sn });
unique->add(*ue);
}
autoload->addUnit(*ue);
RepoFileBuilder::EncodedUE encoded{*ue};
std::scoped_lock<std::mutex> _{repoLock};
repo->add(encoded);
};
// This will contain all files eligible to be in the program: input files
// and all ondemand-eligible files, except files excluded by CLI options.
auto parsedFiles = std::make_unique<ParsedFiles>();
// Process unit-emitters produced locally (usually systemlib stuff).
auto const emitLocalUnit = [&] (Package::UEVec ues) -> coro::Task<void> {
if (RO::EvalUseHHBBC) {
// If we're using HHBBC, turn them into WholeProgramInput
// key/values (after checking uniqueness), upload the values,
// and store them in the WholeProgramInput.
std::vector<WPI::Key> keys;
std::vector<WPI::Value> values;
for (auto& ue : ues) {
unique->add(*ue);
for (auto& [key, value] : WPI::make(std::move(ue))) {
keys.emplace_back(std::move(key));
values.emplace_back(std::move(value));
}
}
if (keys.empty()) HPHP_CORO_RETURN_VOID;
auto valueRefs = HPHP_CORO_AWAIT(client->storeMulti(std::move(values)));
auto const numKeys = keys.size();
assertx(valueRefs.size() == numKeys);
for (size_t i = 0; i < numKeys; ++i) {
hhbbcInputs->add(std::move(keys[i]), std::move(valueRefs[i]));
}
HPHP_CORO_RETURN_VOID;
}
// Otherwise just emit it
for (auto& ue : ues) emitUnit(std::move(ue));
HPHP_CORO_RETURN_VOID;
};
// Parse a group of files remotely
auto const parseRemoteUnit = [&] (const Ref<Package::Config>& config,
Ref<Package::FileMetaVec> fileMetas,
std::vector<Package::FileData> files,
Client::ExecMetadata metadata)
-> coro::Task<Package::ParseMetaVec> {
if (RO::EvalUseHHBBC) {
// Run the HHBBC parse job, which produces WholeProgramInput
// key/values.
auto hhbbcConfigRef = HPHP_CORO_AWAIT(hhbbcConfig->getCopy());
auto [inputValueRefs, metaRefs] = HPHP_CORO_AWAIT(
client->exec(
s_parseForHHBBCJob,
std::make_tuple(
config,
std::move(hhbbcConfigRef),
std::move(fileMetas)
),
std::move(files),
std::move(metadata)
)
);
// The parse metadata and the keys are loaded, but the values
// are kept as Refs.
auto [parseMetas, inputKeys] =
HPHP_CORO_AWAIT(client->load(std::move(metaRefs)));
// Stop now if the index contains any missing decls.
// parseRun() will retry this job with additional inputs.
if (index->containsAnyMissing(parseMetas)) {
HPHP_CORO_MOVE_RETURN(parseMetas);
}
always_assert(parseMetas.size() == inputValueRefs.size());
auto const numKeys = inputKeys.size();
size_t keyIdx = 0;
for (size_t i = 0, n = parseMetas.size(); i < n; i++) {
auto& p = parseMetas[i];
p.m_missing = Package::DeclNames{}; // done with this list now.
if (!p.m_filepath) continue;
auto& valueRefs = inputValueRefs[i];
auto filename = p.m_filepath->toCppString();
auto pf = std::make_unique<ParsedFile>(std::move(p));
pf->hhbbcInputs.reserve(valueRefs.size());
for (auto& r : valueRefs) {
always_assert(keyIdx < numKeys);
pf->hhbbcInputs.emplace_back(
std::move(inputKeys[keyIdx]), std::move(r)
);
++keyIdx;
}
parsedFiles->emplace(filename, std::move(pf));
}
// Indicate we're done by returning an empty vec.
HPHP_CORO_RETURN(Package::ParseMetaVec{});
}
// Otherwise, do a "normal" (non-HHBBC parse job), load the
// unit-emitters and parse metadata, and emit the unit-emitters.
auto [ueRefs, metaRefs] = HPHP_CORO_AWAIT(
client->exec(
s_parseJob,
std::make_tuple(config, std::move(fileMetas)),
std::move(files),
std::move(metadata)
)
);
auto parseMetas = HPHP_CORO_AWAIT(client->load(std::move(metaRefs)));
// Stop now if the index contains any missing decls.
// parseRun() will retry this job with additional inputs.
if (index->containsAnyMissing(parseMetas)) {
HPHP_CORO_MOVE_RETURN(parseMetas);
}
always_assert(parseMetas.size() == ueRefs.size());
for (size_t i = 0, n = parseMetas.size(); i < n; i++) {
auto& p = parseMetas[i];
p.m_missing = Package::DeclNames{}; // done with this list now.
if (!p.m_filepath) continue;
auto filename = p.m_filepath->toCppString();
auto pf = std::make_unique<ParsedFile>(
std::move(p), std::move(ueRefs[i])
);
parsedFiles->emplace(filename, std::move(pf));
}
// Indicate we're done by returning an empty vec.
HPHP_CORO_RETURN(Package::ParseMetaVec{});
};
// Emit a group of files that were parsed remotely
auto const emitRemoteUnit = [&] (
const std::vector<std::filesystem::path>& rpaths
) -> coro::Task<Package::EmitCallBackResult> {
Package::ParseMetaVec parseMetas;
Package::ParseMetaItemsToSkipSet itemsToSkip;
auto const shouldIncludeInBuild = [&] (const Package::ParseMeta& p) {
if (RO::EvalActiveDeployment.empty()) return true;
// If the unit defines any modules, then it is always included
if (!p.m_definitions.m_modules.empty()) return true;
return p.m_module_use && moduleInDeployment.contains(p.m_module_use);
};
if (RO::EvalUseHHBBC) {
// Retrieve HHBBC WPI (Key, Ref<Value>) pairs that were already parsed.
// No Async I/O is necessary in this case.
for (size_t i = 0, n = rpaths.size(); i < n; ++i) {
auto& rpath = rpaths[i];
auto it = parsedFiles->find(rpath.native());
if (it == parsedFiles->end()) {
// If you see this error in a test case, add a line to to test.php.hphp_opts:
// --inputs=hphp/path/to/file.inc
Package::ParseMeta bad;
bad.m_abort = folly::sformat("Unknown include file: {}\n", rpath.native());
parseMetas.emplace_back(std::move(bad));
continue;
}
auto& pf = it->second;
parseMetas.emplace_back(std::move(pf->parseMeta));
auto& p = parseMetas.back();
if (!p.m_filepath) continue;
if (!shouldIncludeInBuild(p)) {
Logger::FVerbose("Dropping {} from the repo build because module {} is "
"not part of {} deployment",
p.m_filepath,
p.m_module_use ? p.m_module_use->data() : "top-level",
RO::EvalActiveDeployment);
itemsToSkip.insert(i);
continue;
}
// We don't have unit-emitters to do uniqueness checking, but
// the parse metadata has the definitions we can use instead.
unique->add(p.m_definitions, p.m_filepath);
auto inputs = std::move(pf->hhbbcInputs);
for (auto& e : inputs) {
hhbbcInputs->add(std::move(e.first), std::move(e.second));
}
}
HPHP_CORO_RETURN(std::make_pair(std::move(parseMetas),
std::move(itemsToSkip)));
}
// Otherwise, retrieve ParseMeta and load unit-emitters from a normal
// ParseJob, then emit the unit-emitters.
std::vector<Ref<UnitEmitterSerdeWrapper>> ueRefs;
ueRefs.reserve(rpaths.size());
for (size_t i = 0, n = rpaths.size(); i < n; ++i) {
auto& rpath = rpaths[i];
auto it = parsedFiles->find(rpath);
if (it == parsedFiles->end()) {
// If you see this error in a test case, add a line to to test.php.hphp_opts:
// --inputs=hphp/path/to/file.inc
Package::ParseMeta bad;
bad.m_abort = folly::sformat("Unknown include file: {}", rpath.native());
parseMetas.emplace_back(std::move(bad));
continue;
}
auto& pf = it->second;
auto& p = pf->parseMeta;
if (!shouldIncludeInBuild(p)) {
Logger::FVerbose("Dropping {} from the repo build because module {} is "
"not part of {} deployment",
p.m_filepath,
p.m_module_use ? p.m_module_use->data() : "top-level",
RO::EvalActiveDeployment);
itemsToSkip.insert(i);
continue;
}
parseMetas.emplace_back(std::move(pf->parseMeta));
ueRefs.emplace_back(std::move(*pf->ueRef));
}
always_assert(parseMetas.size() == ueRefs.size());
auto ueWrappers = HPHP_CORO_AWAIT(
client->load(std::move(ueRefs))
);
for (auto& wrapper : ueWrappers) {
if (!wrapper.m_ue) continue;
emitUnit(std::move(wrapper.m_ue));
}
HPHP_CORO_RETURN(std::make_pair(std::move(parseMetas),
std::move(itemsToSkip)));
};
{
// Parsing phase: compile all input files and autoload files to bytecode.
// Deferring emit reduces wall time by parsing all files in parallel in
// one pass, then computing the full transitive closure of ondemand files
// in one go while emitting. Unreferenced ondemand files are discarded.
auto parsePackage = std::make_unique<Package>(
po.inputDir,
*executor,
*client,
po.coredump
);
Timer parseTimer(Timer::WallTime, "parsing");
// Parse the input files specified on the command line
addInputsToPackage(*parsePackage, po);
auto const& repoFlags = RepoOptions::forFile(po.repoOptionsDir).flags();
auto const& dirs = repoFlags.autoloadRepoBuildSearchDirs();
auto const queryStr = repoFlags.autoloadQuery();
if (!dirs.empty()) {
addListToPackage(*parsePackage, dirs, po);
} else if (!queryStr.empty()) {
// Parse all the files specified by Autoload.Query
if (!addAutoloadQueryToPackage(*parsePackage, queryStr)) return false;
}
if (!coro::wait(parsePackage->parse(*index, parseRemoteUnit))) return false;
logPhaseStats("parse", *parsePackage, *client, sample,
parseTimer.getMicroSeconds());
client->resetStats();
}
auto package = std::make_unique<Package>(
po.inputDir,
*executor,
*client,
po.coredump
);
{
// Emit phase: emit systemlib units, all input files, and the transitive
// closure of files referenced by symbolRefs.
Timer emitTimer(Timer::WallTime, "emit");
addInputsToPackage(*package, po);
if (!RO::EvalUseHHBBC && Option::GenerateBinaryHHBC) {
// Initialize autoload and repo for emitUnit() to populate
autoload.emplace();
repo.emplace(outputFile);
}
if (!coro::wait(package->emit(*index, emitRemoteUnit, emitLocalUnit,
po.ondemandEdgesPath))) {
return false;
}
// We didn't run any extern worker jobs, and in HHBBC mode we
// also didn't load anything. Most of these stats are zero but a
// few are still interesting.
logPhaseStats("emit", *package, *client, sample,
emitTimer.getMicroSeconds());
}
std::thread fileCache{
[&, package = std::move(package), parsedFiles = std::move(parsedFiles),
index = std::move(index)] () mutable {
{
Timer t{Timer::WallTime, "dropping unused files"};
parsedFiles.reset();
}
{
Timer t{Timer::WallTime, "dropping index"};
index.reset();
}
SCOPE_EXIT { package.reset(); };
if (po.filecache.empty()) return;
Timer _{Timer::WallTime, "saving file cache..."};
HphpSessionAndThread session{Treadmill::SessionKind::CompilerEmit};
package->writeVirtualFileSystem(po.filecache.c_str());
struct stat sb;
stat(po.filecache.c_str(), &sb);
Logger::Info("%" PRId64" MB %s saved",
(int64_t)sb.st_size/(1024*1024), po.filecache.c_str());
}
};
SCOPE_EXIT { fileCache.join(); };
std::thread asyncDispose;
SCOPE_EXIT { if (asyncDispose.joinable()) asyncDispose.join(); };
auto const dispose = [&] (std::unique_ptr<coro::TicketExecutor> e,
std::unique_ptr<Client> c) {
if (!Option::ParserAsyncCleanup) {
// If we don't want to cleanup asynchronously, do so now.
c.reset();
e.reset();
return;
}
// All the thread does is reset the unique_ptr to run the dtor.
asyncDispose = std::thread{
[e = std::move(e), c = std::move(c)] () mutable {
c.reset();
e.reset();
}
};
};
auto const logSample = [&] {
// Only log big builds.
if (numUnits >= RO::EvalHHBBCMinUnitsToLog) {
sample.force_init = true;
StructuredLog::log("hhvm_whole_program", sample);
}
return true;
};
auto const finish = [&] {
if (!Option::GenerateBinaryHHBC) return true;
Timer _{Timer::WallTime, "finalizing repo"};
auto const& packageInfo =
RepoOptions::forFile(po.repoOptionsDir).packageInfo();
repo->finish(getGlobalData(), *autoload, packageInfo);
return true;
};
if (!RO::EvalUseHHBBC) {
logSample();
dispose(std::move(executor), std::move(client));
return finish();
}
// We don't need these anymore, and since they can consume a lot of
// memory, free them before doing anything else.
unique.reset();
hhbbcConfig.reset();
assertx(!autoload.has_value());
assertx(!repo.has_value());
if (Option::GenerateBinaryHHBC) {
autoload.emplace();
repo.emplace(outputFile);
}
if (Option::ConstFoldFileBC) {
HHBBC::options.SourceRootForFileBC = RO::SourceRoot;
}
HHBBC::options.CoreDump = po.coredump;
Timer timer{Timer::WallTime, "running HHBBC"};
HphpSession session{Treadmill::SessionKind::HHBBC};
client->resetStats();
HHBBC::trace_time::register_client_stats(client->getStatsPtr());
HHBBC::whole_program(
std::move(*hhbbcInputs),
HHBBC::Config::get(getGlobalData()),
std::move(executor),
std::move(client),
emitUnit,
dispose,
&sample,
Option::ParserThreadCount > 0 ? Option::ParserThreadCount : 0
);
finish();
sample.setInt("hhbbc_micros", timer.getMicroSeconds());
logSample();
return true;
}
///////////////////////////////////////////////////////////////////////////////
}
///////////////////////////////////////////////////////////////////////////////
int compiler_main(int argc, char **argv) {
try {
rds::local::init();
SCOPE_EXIT { rds::local::fini(); };
CompilerOptions po;
auto const ret = prepareOptions(po, argc, argv);
if (ret == 1) return 0; // --help
if (ret != 0) return ret; // command line error
Timer totalTimer(Timer::WallTime, "running hphp");
always_assert_flog(
mkdir(po.outputDir.c_str(), 0777) == 0 || errno == EEXIST,
"Unable to mkdir({}): {}",
po.outputDir.c_str(),
folly::errnoStr(errno)
);
if (!process(po)) {
Logger::Error("hphp failed");
return -1;
} else {
Logger::Info("all files saved in %s ...", po.outputDir.c_str());
return 0;
}
} catch (const Exception& e) {
Logger::Error("Exception: %s", e.getMessage().c_str());
} catch (const std::exception& e) {
Logger::Error("std::exception: %s", e.what());
} catch (...) {
Logger::Error("(non-standard exception \"%s\" was thrown)",
current_exception_name().c_str());
}
return -1;
}
///////////////////////////////////////////////////////////////////////////////
} |
C/C++ | hhvm/hphp/compiler/compiler.h | /*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| [email protected] so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#pragma once
namespace HPHP {
int compiler_main(int argc, char** argv);
} |
C++ | hhvm/hphp/compiler/decl-provider.cpp | /*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| [email protected] so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/compiler/decl-provider.h"
namespace HPHP {
BatchDeclProvider::BatchDeclProvider(
const std::vector<Package::UnitDecls>& decls
) {
for (auto const& unit_decls : decls) {
assertx(!unit_decls.symbols.empty());
auto const& symbols = unit_decls.symbols;
auto const& data = unit_decls.decls;
for (auto name : symbols.types) m_types.emplace(name, data);
for (auto name : symbols.funcs) m_funcs.emplace(name, data);
for (auto name : symbols.constants) m_constants.emplace(name, data);
for (auto name : symbols.modules) m_modules.emplace(name, data);
}
}
namespace {
template<typename T, typename V> hackc::ExternalDeclProviderResult find(
std::string_view symbol, const T& map, V& list
) {
// TODO(T110866581): symbol should be normalized by hackc
std::string_view normalized(normalizeNS(symbol));
auto interned = makeStaticString(normalized);
auto const it = map.find(interned);
if (it != map.end()) {
return hackc::ExternalDeclProviderResult::from_string(it->second);
}
list.emplace_back(interned);
return hackc::ExternalDeclProviderResult::missing();
}
}
hackc::ExternalDeclProviderResult
BatchDeclProvider::getType(std::string_view symbol, uint64_t) noexcept {
return find(symbol, m_types, m_missing.types);
}
hackc::ExternalDeclProviderResult
BatchDeclProvider::getFunc(std::string_view symbol) noexcept {
return find(symbol, m_funcs, m_missing.funcs);
}
hackc::ExternalDeclProviderResult
BatchDeclProvider::getConst(std::string_view symbol) noexcept {
return find(symbol, m_constants, m_missing.constants);
}
hackc::ExternalDeclProviderResult
BatchDeclProvider::getModule(std::string_view symbol) noexcept {
return find(symbol, m_modules, m_missing.modules);
}
void BatchDeclProvider::finish() {
// Dedup but preserve case so we can see all case mismatches.
auto dedup = [&](auto& names) {
std::sort(names.begin(), names.end());
names.erase(std::unique(names.begin(), names.end()), names.end());
};
dedup(m_missing.types);
dedup(m_missing.funcs);
dedup(m_missing.constants);
dedup(m_missing.modules);
}
} |
C/C++ | hhvm/hphp/compiler/decl-provider.h | /*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| [email protected] so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#pragma once
#include "hphp/hack/src/hackc/ffi_bridge/decl_provider.h"
#include "hphp/compiler/package.h"
namespace HPHP {
// A BatchDeclProvider is populated with a list of UnitDecls to make
// available to hackc. Each UnitDecl contains a list of symbols from
// that source unit and the serialized decls. These names are used
// to constructed a unified local autoload map of all the available decls.
//
// When hackc requests a symbol we don't have in the local map,
// remember it in m_missing so hphpc can look it up in its UnitIndex,
// then retry hackc with additional UnitDecls.
struct BatchDeclProvider final: hackc::DeclProvider {
// Initialize provider from a list of UnitDecls. The given decls must
// be live and unchanged for the lifetime of this BatchDeclProvider.
explicit BatchDeclProvider(const std::vector<Package::UnitDecls>&);
hackc::ExternalDeclProviderResult
getType(std::string_view symbol, uint64_t) noexcept override;
hackc::ExternalDeclProviderResult
getFunc(std::string_view symbol) noexcept override;
hackc::ExternalDeclProviderResult
getConst(std::string_view symbol) noexcept override;
hackc::ExternalDeclProviderResult
getModule(std::string_view symbol) noexcept override;
void finish();
// Maps from Name to serialized inside the UnitDecls given in the
// constructor.
using Map = hphp_fast_map<const StringData*, const std::string&>;
using IMap = hphp_fast_map<
const StringData*, const std::string&, string_data_hash, string_data_isame
>;
// Symbols requested but not found
Package::DeclNames m_missing;
IMap m_types;
IMap m_funcs;
Map m_constants;
Map m_modules;
};
} |
C++ | hhvm/hphp/compiler/option.cpp | /*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| [email protected] so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/compiler/option.h"
#include <algorithm>
#include <map>
#include <set>
#include <string>
#include <vector>
#include "hphp/runtime/base/config.h"
#include "hphp/runtime/base/ini-setting.h"
#include "hphp/runtime/base/preg.h"
#include "hphp/runtime/base/variable-unserializer.h"
#include "hphp/util/hdf.h"
#include "hphp/util/logger.h"
#include "hphp/util/process.h"
namespace HPHP {
///////////////////////////////////////////////////////////////////////////////
hphp_fast_string_set Option::PackageExcludeDirs;
hphp_fast_string_set Option::PackageExcludeFiles;
hphp_fast_string_set Option::PackageExcludePatterns;
hphp_fast_string_set Option::PackageExcludeStaticDirs;
hphp_fast_string_set Option::PackageExcludeStaticFiles;
hphp_fast_string_set Option::PackageExcludeStaticPatterns;
bool Option::CachePHPFile = false;
bool Option::ConstFoldFileBC = false;
bool Option::GenerateTextHHBC = false;
bool Option::GenerateHhasHHBC = false;
bool Option::GenerateBinaryHHBC = false;
int Option::ParserThreadCount = 0;
// These default sizes were selected by experimentation
const int Option::kDefaultParserGroupSize = 500;
const int Option::kDefaultParserDirGroupSizeLimit = 50000;
int Option::ParserGroupSize = kDefaultParserGroupSize;
int Option::ParserDirGroupSizeLimit = kDefaultParserDirGroupSizeLimit;
bool Option::ParserAsyncCleanup = true;
bool Option::ParserOptimisticStore = true;
bool Option::ForceEnableSymbolRefs = false;
std::string Option::ExternWorkerUseCase;
std::string Option::ExternWorkerFeaturesFile;
bool Option::ExternWorkerForceSubprocess = false;
int Option::ExternWorkerTimeoutSecs = 0;
bool Option::ExternWorkerUseExecCache = true;
bool Option::ExternWorkerCleanup = true;
bool Option::ExternWorkerUseRichClient = true;
bool Option::ExternWorkerUseZippyRichClient = true;
bool Option::ExternWorkerUseP2P = false;
int Option::ExternWorkerCasConnectionCount = 16;
int Option::ExternWorkerEngineConnectionCount = 6;
int Option::ExternWorkerAcConnectionCount = 16;
bool Option::ExternWorkerVerboseLogging = false;
std::string Option::ExternWorkerWorkingDir;
int Option::ExternWorkerThrottleRetries = -1;
int Option::ExternWorkerThrottleBaseWaitMSecs = -1;
///////////////////////////////////////////////////////////////////////////////
// load from HDF file
void Option::LoadRootHdf(const IniSetting::Map& ini,
const Hdf &roots,
const std::string& name,
std::map<std::string, std::string> &map) {
auto root_map_callback = [&](const IniSetting::Map& ini_rm, const Hdf& hdf_rm,
const std::string& /*ini_rm_key*/) {
map[Config::GetString(ini_rm, hdf_rm, "root", "", false)] =
Config::GetString(ini_rm, hdf_rm, "path", "", false);
};
Config::Iterate(root_map_callback, ini, roots, name);
}
void Option::Load(const IniSetting::Map& ini, Hdf &config) {
LoadRootHdf(ini, config, "IncludeRoots", RuntimeOption::IncludeRoots);
Config::Bind(PackageExcludeDirs, ini, config, "PackageExcludeDirs");
Config::Bind(PackageExcludeFiles, ini, config, "PackageExcludeFiles");
Config::Bind(PackageExcludePatterns, ini, config, "PackageExcludePatterns");
Config::Bind(PackageExcludeStaticDirs, ini,
config, "PackageExcludeStaticDirs");
Config::Bind(PackageExcludeStaticFiles, ini,
config, "PackageExcludeStaticFiles");
Config::Bind(PackageExcludeStaticFiles, ini,
config, "PackageExcludeStaticPatterns");
Config::Bind(CachePHPFile, ini, config, "CachePHPFile");
for (auto& str : Config::GetStrVector(ini, config, "ConstantFunctions")) {
std::string func;
std::string value;
if (folly::split('|', str, func, value)) {
VariableUnserializer uns{
value.data(), value.size(),
VariableUnserializer::Type::Internal,
/* allowUnknownSerializableClass = */ false,
empty_dict_array()
};
try {
auto v = uns.unserialize();
v.setEvalScalar();
RuntimeOption::ConstantFunctions[func] = *v.asTypedValue();
continue;
} catch (const Exception& e) {
// fall through and log
}
}
Logger::FError("Invalid ConstantFunction: '{}'\n", str);
}
{
// Repo
Config::Bind(RuntimeOption::RepoDebugInfo,
ini, config, "Repo.DebugInfo",
RuntimeOption::RepoDebugInfo);
}
Config::Bind(RuntimeOption::EvalCheckPropTypeHints, ini, config,
"CheckPropTypeHints", RuntimeOption::EvalCheckPropTypeHints);
Config::Bind(RuntimeOption::EvalJitEnableRenameFunction,
ini, config, "JitEnableRenameFunction",
RuntimeOption::EvalJitEnableRenameFunction);
Config::Bind(RuntimeOption::EvalHackArrCompatSerializeNotices,
ini, config, "HackArrCompatSerializeNotices",
RuntimeOption::EvalHackArrCompatSerializeNotices);
Config::Bind(RuntimeOption::EvalForbidDynamicCallsToFunc,
ini, config, "ForbidDynamicCallsToFunc",
RuntimeOption::EvalForbidDynamicCallsToFunc);
Config::Bind(RuntimeOption::EvalForbidDynamicCallsToClsMeth,
ini, config, "ForbidDynamicCallsToClsMeth",
RuntimeOption::EvalForbidDynamicCallsToClsMeth);
Config::Bind(RuntimeOption::EvalForbidDynamicCallsToInstMeth,
ini, config, "ForbidDynamicCallsToInstMeth",
RuntimeOption::EvalForbidDynamicCallsToInstMeth);
Config::Bind(RuntimeOption::EvalForbidDynamicConstructs,
ini, config, "ForbidDynamicConstructs",
RuntimeOption::EvalForbidDynamicConstructs);
Config::Bind(RuntimeOption::EvalForbidDynamicCallsWithAttr,
ini, config, "ForbidDynamicCallsWithAttr",
RuntimeOption::EvalForbidDynamicCallsWithAttr);
Config::Bind(RuntimeOption::EvalLogKnownMethodsAsDynamicCalls,
ini, config, "LogKnownMethodsAsDynamicCalls",
RuntimeOption::EvalLogKnownMethodsAsDynamicCalls);
Config::Bind(RuntimeOption::EvalNoticeOnBuiltinDynamicCalls,
ini, config, "NoticeOnBuiltinDynamicCalls",
RuntimeOption::EvalNoticeOnBuiltinDynamicCalls);
Config::Bind(RuntimeOption::EvalAbortBuildOnVerifyError,
ini, config, "AbortBuildOnVerifyError",
RuntimeOption::EvalAbortBuildOnVerifyError);
{
// Hack
Config::Bind(RuntimeOption::StrictArrayFillKeys, ini, config,
"Hack.Lang.StrictArrayFillKeys",
RuntimeOption::StrictArrayFillKeys);
}
Config::Bind(RuntimeOption::EnableXHP, ini, config, "EnableXHP",
RuntimeOption::EnableXHP);
Config::Bind(ParserThreadCount, ini, config, "ParserThreadCount", 0);
if (ParserThreadCount <= 0) {
ParserThreadCount = Process::GetCPUCount();
}
Config::Bind(ForceEnableSymbolRefs, ini, config,
"ForceEnableSymbolRefs", false);
Config::Bind(RuntimeOption::EvalGenerateDocComments, ini, config,
"GenerateDocComments", RuntimeOption::EvalGenerateDocComments);
Config::Bind(RuntimeOption::EvalUseHHBBC, ini, config, "UseHHBBC",
RuntimeOption::EvalUseHHBBC);
Config::Bind(ParserGroupSize, ini, config,
"ParserGroupSize", kDefaultParserGroupSize);
Config::Bind(ParserDirGroupSizeLimit, ini, config,
"ParserDirGroupSizeLimit", kDefaultParserDirGroupSizeLimit);
if (ParserGroupSize <= 0) ParserGroupSize = kDefaultParserGroupSize;
if (ParserDirGroupSizeLimit <= 0) {
ParserDirGroupSizeLimit = kDefaultParserDirGroupSizeLimit;
}
Config::Bind(ConstFoldFileBC, ini, config,
"ConstFoldFileBC", ConstFoldFileBC);
Config::Bind(ParserAsyncCleanup, ini, config,
"ParserAsyncCleanup", ParserAsyncCleanup);
Config::Bind(ParserOptimisticStore, ini, config,
"ParserOptimisticStore", ParserOptimisticStore);
Config::Bind(ExternWorkerUseCase, ini, config, "ExternWorker.UseCase",
ExternWorkerUseCase);
Config::Bind(ExternWorkerFeaturesFile, ini, config,
"ExternWorker.FeaturesFile", ExternWorkerFeaturesFile);
// Kill switch for extern-worker. Disable all implementations except
// the builtin one.
Config::Bind(ExternWorkerForceSubprocess, ini, config,
"ExternWorker.ForceSubprocess", ExternWorkerForceSubprocess);
Config::Bind(ExternWorkerTimeoutSecs, ini, config, "ExternWorker.TimeoutSecs",
ExternWorkerTimeoutSecs);
Config::Bind(ExternWorkerUseExecCache, ini, config,
"ExternWorker.UseExecCache", ExternWorkerUseExecCache);
Config::Bind(ExternWorkerCleanup, ini, config, "ExternWorker.Cleanup",
ExternWorkerCleanup);
Config::Bind(ExternWorkerWorkingDir, ini, config, "ExternWorker.WorkingDir",
ExternWorkerWorkingDir);
Config::Bind(ExternWorkerUseRichClient, ini, config,
"ExternWorker.UseRichClient", ExternWorkerUseRichClient);
Config::Bind(ExternWorkerUseZippyRichClient, ini, config,
"ExternWorker.UseZippyRichClient",
ExternWorkerUseZippyRichClient);
Config::Bind(ExternWorkerUseP2P, ini, config, "ExternWorker.UseP2P",
ExternWorkerUseP2P);
Config::Bind(ExternWorkerCasConnectionCount, ini, config,
"ExternWorker.CasConnectionCount",
ExternWorkerCasConnectionCount);
Config::Bind(ExternWorkerEngineConnectionCount, ini, config,
"ExternWorker.EngineConnectionCount",
ExternWorkerEngineConnectionCount);
Config::Bind(ExternWorkerAcConnectionCount, ini, config,
"ExternWorker.AcConnectionCount",
ExternWorkerAcConnectionCount);
Config::Bind(ExternWorkerVerboseLogging, ini, config,
"ExternWorker.VerboseLogging",
ExternWorkerVerboseLogging);
Config::Bind(ExternWorkerThrottleRetries, ini, config,
"ExternWorker.ThrottleRetries",
ExternWorkerThrottleRetries);
Config::Bind(ExternWorkerThrottleBaseWaitMSecs, ini, config,
"ExternWorker.ThrottleBaseWaitMSecs",
ExternWorkerThrottleBaseWaitMSecs);
}
///////////////////////////////////////////////////////////////////////////////
bool Option::IsFileExcluded(const std::string& file,
const hphp_fast_string_set& patterns) {
String sfile(file.c_str(), file.size(), CopyString);
for (auto const& pattern : patterns) {
Variant matches;
Variant ret = preg_match(String(pattern.c_str(), pattern.size(),
CopyString), sfile, &matches);
if (ret.toInt64() > 0) {
return true;
}
}
return false;
}
void Option::FilterFiles(std::vector<std::string>& files,
const hphp_fast_string_set& patterns) {
auto const it = std::remove_if(
files.begin(),
files.end(),
[&](const std::string& file) { return IsFileExcluded(file, patterns); });
files.erase(it, files.end());
}
//////////////////////////////////////////////////////////////////////
} |
C/C++ | hhvm/hphp/compiler/option.h | /*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| [email protected] so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#pragma once
#include <map>
#include <string>
#include <vector>
#include "hphp/util/hash-map.h"
#include "hphp/util/hash-set.h"
namespace HPHP {
///////////////////////////////////////////////////////////////////////////////
struct Hdf;
struct IniSettingMap;
struct Option {
/**
* Load options from different sources.
*/
static void Load(const IniSettingMap& ini, Hdf &config);
/**
* File path patterns for excluding files from a package scan of programs.
*/
static hphp_fast_string_set PackageExcludeDirs;
static hphp_fast_string_set PackageExcludeFiles;
static hphp_fast_string_set PackageExcludePatterns;
static hphp_fast_string_set PackageExcludeStaticFiles;
static hphp_fast_string_set PackageExcludeStaticDirs;
static hphp_fast_string_set PackageExcludeStaticPatterns;
static bool IsFileExcluded(const std::string& file,
const hphp_fast_string_set& patterns);
static void FilterFiles(std::vector<std::string>& files,
const hphp_fast_string_set& patterns);
/**
* Whether to store PHP source files in static file cache.
*/
static bool CachePHPFile;
/*
* If true, HHBBC will const fold File and Dir bytecodes to static
* strings (using SourceRoot).
*/
static bool ConstFoldFileBC;
/*
* Whether to generate HHBC, HHAS, or a textual dump of HHBC
*/
static bool GenerateTextHHBC;
static bool GenerateHhasHHBC;
static bool GenerateBinaryHHBC;
/*
* Number of threads to use for parsing
*/
static int ParserThreadCount;
/*
* The number of files (on average) we'll group together for a
* worker during parsing. Files in directories (including sub-dirs)
* with more than ParserDirGroupSizeLimit files won't be grouped
* with files outside of those directories.
*/
static int ParserGroupSize;
static int ParserDirGroupSizeLimit;
/*
* If true, we'll free async state (which can take a while) in
* another thread asynchronously. If false, it will be done
* synchronously.
*/
static bool ParserAsyncCleanup;
/*
* If true, as an optimization, we'll assume the files have already
* been stored with extern_worker previously and proceed as if they
* had. If not (so execution fails), we'll then store them and try
* again. This avoids doing a lot of redundant stores in the common
* case.
*/
static bool ParserOptimisticStore;
/*
* When an ActiveDeployment is specified, we disable SymbolRefs
* logic and only compile the requested files. This option will
* be used to force include SymbolRefs even when ActiveDeployment
* is specified.
*/
static bool ForceEnableSymbolRefs;
/* Config passed to extern_worker::Client */
static std::string ExternWorkerUseCase;
static std::string ExternWorkerFeaturesFile;
static bool ExternWorkerForceSubprocess;
static int ExternWorkerTimeoutSecs;
static bool ExternWorkerUseExecCache;
static bool ExternWorkerCleanup;
static bool ExternWorkerUseRichClient;
static bool ExternWorkerUseZippyRichClient;
static bool ExternWorkerUseP2P;
static int ExternWorkerCasConnectionCount;
static int ExternWorkerEngineConnectionCount;
static int ExternWorkerAcConnectionCount;
static bool ExternWorkerVerboseLogging;
static int ExternWorkerThrottleRetries;
static int ExternWorkerThrottleBaseWaitMSecs;
static std::string ExternWorkerWorkingDir;
private:
static const int kDefaultParserGroupSize;
static const int kDefaultParserDirGroupSizeLimit;
static void LoadRootHdf(const IniSettingMap& ini, const Hdf &roots,
const std::string& name,
std::map<std::string, std::string> &map);
};
///////////////////////////////////////////////////////////////////////////////
} |
C++ | hhvm/hphp/compiler/package.cpp | /*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| [email protected] so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/compiler/package.h"
#include <exception>
#include <filesystem>
#include <fstream>
#include <map>
#include <memory>
#include <set>
#include <sys/stat.h>
#include <sys/types.h>
#include <utility>
#include <vector>
#include <folly/String.h>
#include <folly/portability/Dirent.h>
#include <folly/portability/Unistd.h>
#include "hphp/compiler/decl-provider.h"
#include "hphp/compiler/option.h"
#include "hphp/hack/src/hackc/ffi_bridge/compiler_ffi.rs.h"
#include "hphp/hhvm/process-init.h"
#include "hphp/runtime/base/execution-context.h"
#include "hphp/runtime/base/file-util-defs.h"
#include "hphp/runtime/base/file-util.h"
#include "hphp/runtime/base/program-functions.h"
#include "hphp/runtime/base/static-string-table.h"
#include "hphp/runtime/vm/as.h"
#include "hphp/runtime/vm/func-emitter.h"
#include "hphp/runtime/vm/preclass-emitter.h"
#include "hphp/runtime/vm/type-alias-emitter.h"
#include "hphp/runtime/vm/unit-emitter.h"
#include "hphp/runtime/vm/unit-parser.h"
#include "hphp/util/exception.h"
#include "hphp/util/extern-worker.h"
#include "hphp/util/hash.h"
#include "hphp/util/logger.h"
#include "hphp/util/match.h"
#include "hphp/util/process.h"
#include "hphp/util/timer.h"
#include "hphp/util/virtual-file-system.h"
#include "hphp/zend/zend-string.h"
using namespace HPHP;
using namespace extern_worker;
///////////////////////////////////////////////////////////////////////////////
const StaticString s_EntryPoint("__EntryPoint");
///////////////////////////////////////////////////////////////////////////////
Package::Package(const std::string& root,
coro::TicketExecutor& executor,
extern_worker::Client& client,
bool coredump)
: m_root{root}
, m_failed{false}
, m_total{0}
, m_executor{executor}
, m_client{client}
, m_config{
[this, coredump] { return m_client.store(Config::make(coredump)); },
m_executor.sticky()
}
, m_repoOptions{client}
{
}
void Package::addInputList(const std::string& listFileName) {
assert(!listFileName.empty());
auto const f = fopen(listFileName.c_str(), "r");
if (f == nullptr) {
throw Exception("Unable to open %s: %s", listFileName.c_str(),
folly::errnoStr(errno).c_str());
}
char fileName[PATH_MAX];
while (fgets(fileName, sizeof(fileName), f)) {
int len = strlen(fileName);
if (fileName[len - 1] == '\n') fileName[len - 1] = '\0';
len = strlen(fileName);
if (len) {
if (FileUtil::isDirSeparator(fileName[len - 1])) {
addDirectory(fileName);
} else {
addSourceFile(fileName);
}
}
}
fclose(f);
}
void Package::addStaticFile(const std::string& fileName) {
assert(!fileName.empty());
m_extraStaticFiles.insert(fileName);
}
void Package::addStaticDirectory(const std::string& path) {
m_staticDirectories.insert(path);
}
void Package::addDirectory(const std::string& path) {
m_directories.emplace(path);
}
void Package::addSourceFile(const std::string& fileName) {
if (fileName.empty()) return;
auto canonFileName =
FileUtil::canonicalize(String(fileName)).toCppString();
m_filesToParse.emplace(std::move(canonFileName), true);
}
void Package::writeVirtualFileSystem(const std::string& path) {
auto writer = VirtualFileSystemWriter(path);
for (auto const& dir : m_directories) {
std::vector<std::string> files;
FileUtil::find(files, m_root, dir, /* php */ false,
&Option::PackageExcludeStaticDirs,
&Option::PackageExcludeStaticFiles);
Option::FilterFiles(files, Option::PackageExcludeStaticPatterns);
for (auto& file : files) {
auto const rpath = file.substr(m_root.size());
if (writer.addFile(rpath.c_str(), file.c_str())) {
Logger::Verbose("saving %s", file.c_str());
}
}
}
for (auto const& dir : m_staticDirectories) {
std::vector<std::string> files;
FileUtil::find(files, m_root, dir, /* php */ false);
for (auto& file : files) {
auto const rpath = file.substr(m_root.size());
if (writer.addFile(rpath.c_str(), file.c_str())) {
Logger::Verbose("saving %s", file.c_str());
}
}
}
for (auto const& file : m_extraStaticFiles) {
auto const fullpath = m_root + file;
if (writer.addFile(file.c_str(), fullpath.c_str())) {
Logger::Verbose("saving %s", fullpath.c_str());
}
}
for (auto const& pair : m_discoveredStaticFiles) {
auto const file = pair.first.c_str();
const char *fullpath = pair.second.c_str();
if (fullpath[0]) {
if (writer.addFile(file, fullpath)) {
Logger::Verbose("saving %s", fullpath);
}
} else {
if (writer.addFileWithoutContent(file)) {
Logger::Verbose("saving %s", file);
}
}
}
writer.finish();
}
///////////////////////////////////////////////////////////////////////////////
namespace {
///////////////////////////////////////////////////////////////////////////////
std::unique_ptr<UnitEmitter>
createSymlinkWrapper(const std::string& fileName,
const std::string& targetPath,
std::unique_ptr<UnitEmitter> origUE) {
auto found = false;
std::ostringstream ss;
for (auto const& fe : origUE->fevec()) {
auto const& attrs = fe->userAttributes;
if (attrs.find(s_EntryPoint.get()) != attrs.end()) {
found = true;
std::string escapedName;
folly::cEscape(fe->name->toCppString(), escapedName);
ss << ".function{} [persistent "
"\"__EntryPoint\"(\"\"\"y:0:{}\"\"\")] (4,7) <\"\" N > "
"entrypoint$symlink$" << string_sha1(fileName) << "() {\n"
<< " String \"" << targetPath << "\"\n"
<< " ReqOnce\n"
<< " PopC\n"
<< " NullUninit\n"
<< " NullUninit\n"
<< " FCallFuncD <> 0 1 \"\" \"\" - \"\" \"" << escapedName << "\"\n"
<< " PopC\n"
<< " Null\n"
<< " RetC\n"
<< "}\n\n";
break;
}
}
if (!found) return nullptr;
auto const content = ss.str();
return assemble_string(
content.data(),
content.size(),
fileName.c_str(),
SHA1{string_sha1(content)},
nullptr,
origUE->m_packageInfo,
false
);
}
///////////////////////////////////////////////////////////////////////////////
Package::FileMetaVec s_fileMetas;
Package::ParseMetaVec s_parseMetas;
Package::IndexMetaVec s_indexMetas;
size_t s_fileMetasIdx{0};
// Construct parse metadata for the given unit-emitter
UnitEmitterSerdeWrapper output(
std::unique_ptr<UnitEmitter> ue,
std::unique_ptr<Package::DeclNames> missing,
const StringData* filepath
) {
Package::ParseMeta meta;
if (missing) meta.m_missing = std::move(*missing);
if (!ue) {
meta.m_filepath = filepath;
s_parseMetas.emplace_back(std::move(meta));
return UnitEmitterSerdeWrapper{};
}
meta.m_symbol_refs = std::move(ue->m_symbol_refs);
meta.m_filepath = ue->m_filepath;
meta.m_module_use = ue->m_moduleName;
for (auto const pce : ue->preclasses()) {
if (pce->attrs() & AttrEnum) {
meta.m_definitions.m_enums.emplace_back(pce->name());
} else {
meta.m_definitions.m_classes.emplace_back(pce->name());
}
}
for (auto const& fe : ue->fevec()) {
if (fe->attrs & AttrIsMethCaller) {
meta.m_definitions.m_methCallers.emplace_back(fe->name);
} else {
meta.m_definitions.m_funcs.emplace_back(fe->name);
}
}
for (auto const& t : ue->typeAliases()) {
meta.m_definitions.m_typeAliases.emplace_back(t->name());
}
for (auto const& c : ue->constants()) {
meta.m_definitions.m_constants.emplace_back(c.name);
}
for (auto const& m : ue->modules()) {
meta.m_definitions.m_modules.emplace_back(m.name);
}
s_parseMetas.emplace_back(std::move(meta));
return std::move(ue);
}
// HHVM shutdown code shared by different Job types.
void finishJob() {
hphp_process_exit();
rds::local::fini();
}
///////////////////////////////////////////////////////////////////////////////
}
///////////////////////////////////////////////////////////////////////////////
void Package::parseInit(const Config& config, FileMetaVec meta) {
if (!config.CoreDump) {
struct rlimit rl{};
rl.rlim_cur = 0;
rl.rlim_max = 0;
setrlimit(RLIMIT_CORE, &rl);
}
rds::local::init();
Hdf hdf;
IniSetting::Map ini = IniSetting::Map::object;
RO::Load(ini, hdf);
config.apply();
Logger::LogLevel = Logger::LogError;
// Inhibit extensions and systemlib from being initialized. It
// takes a while and we don't need it.
register_process_init();
hphp_process_init(true);
// Don't use unit emitter's caching here, we're relying on
// extern-worker to do that for us.
g_unit_emitter_cache_hook = nullptr;
s_fileMetas = std::move(meta);
s_fileMetasIdx = 0;
}
Package::ParseMetaVec Package::parseFini() {
assertx(s_fileMetasIdx == s_fileMetas.size());
assertx(s_parseMetas.size() == s_fileMetas.size());
finishJob();
return std::move(s_parseMetas);
}
UnitEmitterSerdeWrapper
Package::parseRun(const std::string& content,
const RepoOptionsFlags& repoOptions,
const std::vector<UnitDecls>& decls) {
if (s_fileMetasIdx >= s_fileMetas.size()) {
throw Error{
folly::sformat("Encountered {} inputs, but only {} file metas",
s_fileMetasIdx+1, s_fileMetas.size())
};
}
auto const& meta = s_fileMetas[s_fileMetasIdx++];
auto const& fileName = meta.m_filename;
try {
if (RO::EvalAllowHhas && folly::StringPiece(fileName).endsWith(".hhas")) {
auto ue = assemble_string(
content.data(),
content.size(),
fileName.c_str(),
SHA1{string_sha1(content)},
nullptr,
repoOptions.packageInfo()
);
if (meta.m_targetPath) {
ue = createSymlinkWrapper(
fileName, *meta.m_targetPath, std::move(ue)
);
if (!ue) {
// If the symlink contains no EntryPoint we don't do
// anything but it is still success
return output(nullptr, nullptr, makeStaticString(fileName));
}
}
// Assembling hhas never emits DeclNames.
return output(std::move(ue), nullptr, nullptr);
}
SHA1 mangled_sha1{
mangleUnitSha1(string_sha1(content), fileName, repoOptions)
};
auto const mode =
RO::EvalAbortBuildOnCompilerError ? CompileAbortMode::AllErrors :
RO::EvalAbortBuildOnVerifyError ? CompileAbortMode::VerifyErrors :
CompileAbortMode::OnlyICE;
std::unique_ptr<UnitEmitter> ue;
BatchDeclProvider provider(decls);
try {
ue = compile_unit(
content,
fileName.c_str(),
mangled_sha1,
nullptr,
/* isSystemLib */ false,
/* forDebuggerEval */ false,
repoOptions,
mode,
RO::EvalEnableDecl ? &provider : nullptr
);
} catch (const CompilerAbort& exn) {
ParseMeta meta;
meta.m_abort = exn.what();
meta.m_filepath = makeStaticString(fileName);
s_parseMetas.emplace_back(std::move(meta));
return UnitEmitterSerdeWrapper{};
}
if (ue) {
if (!ue->m_ICE && meta.m_targetPath) {
ue =
createSymlinkWrapper(fileName, *meta.m_targetPath, std::move(ue));
if (!ue) {
// If the symlink contains no EntryPoint we don't do anything but it
// is still success
return output(nullptr, nullptr, makeStaticString(fileName));
}
}
provider.finish();
auto missing = std::make_unique<DeclNames>(std::move(provider.m_missing));
return output(std::move(ue), std::move(missing), nullptr);
} else {
throw Error{
folly::sformat("Unable to compile: {}", fileName)
};
}
} catch (const std::exception& exn) {
throw Error{
folly::sformat("While parsing `{}`: {}", fileName, exn.what())
};
}
}
///////////////////////////////////////////////////////////////////////////////
/*
* File grouping:
*
* Since every invocation of an extern-worker worker has some fixed
* overhead, we want to parse multiple files per invocation. We also
* want to leverage any caching that extern-worker has for job
* execution. Since we assume that source files will change over time,
* we don't want to group too many files together (if one file
* changes, we'll have to reparse all of them in that
* group). Furthermore, to maximize cache effectiveness, we want to
* group files together in a deterministic way. Finally, there may be
* different "subsections" of the source tree, which are only parsed
* depending on the input files configeration (for example, some
* builds may discard test directories and some might not). Again, we
* want to maximize caching across these different "flavor" of builds
* and try to avoid grouping together files from these different
* subsets.
*
* We utilize the following scheme to try to accomplish all
* this. First we define a group size (Option::ParserGroupSize). This
* is the amount of files (on average) we'll group together in one
* job. Input files are discovered by walking directories
* recursively. We proceed bottom up. For every directory, we first
* process its sub-directories. Each sub-directory processed returns
* the groups it has already created (each roughly containing
* Option::ParserGroupSize) files, along with any "left over" files
* which have not been grouped. These results are all aggregated
* together, and any files in the current directory are added to the
* ungrouped set. If the number of files in the ungrouped set exceeds
* Option::ParserDirGroupSizeLimit, then we attempt to group all of
* those files.
*
* Grouping is done by hashing the files' names, and then using
* consistent_hash to assign them to one of N buckets (where N is the
* number of files divided by Option::ParserGroupSize rounded up). The
* consistent hashing ensures that the minimal amount of disruption
* happens when we add/remove files. (Adding one file will change
* exactly one bucket, etc).
*
* If we grouped the files, they are returned to the parent directory
* as groups (along with any groups from sub-directories). Otherwise
* the files are returned as ungrouped and the process repeats in the
* parent.
*
* The idea behind Option::ParserDirGroupSizeLimit is to try to
* partition the source tree into distinct chunks and only grouping
* files within those chunks. So, if you don't compile one of those
* chunks (say because you're not compiling tests, for example), it
* won't affect the files in other chunks. Otherwise if that test code
* was mixed in with the rest of the groups, they'd all miss the cache
* and have to be rerun. This is a heuristic, but in practice it seems
* to work well.
*
* Once you reach the top level, any remaining ungrouped files (along
* with any top level files added in by config) are grouped together.
*
* Before parsing, we sort all of the groups by their summed file
* size. We want to start parsing larger groups first because they'll
* probably take the longest.
*/
// Given the path of a directory, find all (relevant) files in that
// directory (and sub-directories), and attempt to group them.
coro::Task<Package::GroupResult> Package::groupDirectories(
std::string path, bool filterFiles, bool filterDirs
) {
// We're not going to be blocking on I/O here, so make sure we're
// running on the thread pool.
HPHP_CORO_RESCHEDULE_ON_CURRENT_EXECUTOR;
GroupResult result;
std::vector<coro::Task<GroupResult>> dirs;
FileUtil::find(
m_root, path, /* php */ true,
[&] (const std::string& name, bool dir, size_t size) {
if (!dir) {
if (filterFiles) {
if (Option::PackageExcludeFiles.count(name) ||
Option::IsFileExcluded(name, Option::PackageExcludePatterns)) {
return false;
}
}
if (!name.empty()) {
auto canonFileName =
FileUtil::canonicalize(String(name)).toCppString();
if (m_seenFiles.emplace(std::move(canonFileName), true).second) {
result.m_ungrouped.emplace_back(FileAndSize{name, size});
}
}
return true;
}
if (filterDirs && Option::PackageExcludeDirs.count(name)) {
// Only skip excluded dirs when requested.
return false;
}
if (path == name ||
(name.size() == path.size() + 1 &&
name.back() == FileUtil::getDirSeparator() &&
name.compare(0, path.size(), path) == 0)) {
// find immediately calls us back with a canonicalized version
// of path; we want to ignore that, and let it proceed to
// iterate the directory.
return true;
}
// Process the directory as a new job
dirs.emplace_back(groupDirectories(name, filterFiles, filterDirs));
// Don't iterate the directory in this job.
return false;
}
);
// Coalesce the sub results
for (auto& sub : HPHP_CORO_AWAIT(coro::collectRange(std::move(dirs)))) {
result.m_grouped.insert(
result.m_grouped.end(),
std::make_move_iterator(sub.m_grouped.begin()),
std::make_move_iterator(sub.m_grouped.end())
);
result.m_ungrouped.insert(
result.m_ungrouped.end(),
std::make_move_iterator(sub.m_ungrouped.begin()),
std::make_move_iterator(sub.m_ungrouped.end())
);
}
// Have we gathered enough files to assign them to groups?
if (result.m_ungrouped.size() >= Option::ParserDirGroupSizeLimit) {
groupFiles(result.m_grouped, std::move(result.m_ungrouped));
assertx(result.m_ungrouped.empty());
}
HPHP_CORO_MOVE_RETURN(result);
}
// Group sets of files together using consistent hashing
void Package::groupFiles(Groups& groups, FileAndSizeVec files) {
if (files.empty()) return;
assertx(Option::ParserGroupSize > 0);
// Number of buckets
auto const numNew =
(files.size() + (Option::ParserGroupSize - 1)) / Option::ParserGroupSize;
auto const origSize = groups.size();
groups.resize(origSize + numNew);
// Assign to buckets by hash(filename)
for (auto& [file, size] : files) {
auto const idx = consistent_hash(
hash_string_cs(file.c_str(), file.native().size()),
numNew
);
assertx(idx < numNew);
groups[origSize + idx].m_files.emplace_back(std::move(file));
groups[origSize + idx].m_size += size;
}
// We could (though unlikely) have empty buckets. Remove them so we
// don't have to deal with this when parsing.
groups.erase(
std::remove_if(
groups.begin() + origSize,
groups.end(),
[] (const Group& g) { return g.m_files.empty(); }
),
groups.end()
);
// Keep the order of the files within the bucket deterministic
for (size_t i = origSize, n = groups.size(); i < n; ++i) {
std::sort(groups[i].m_files.begin(), groups[i].m_files.end());
}
}
// Parse all of the files in the given group
coro::Task<void> Package::parseGroups(
Groups groups,
const ParseCallback& callback,
const UnitIndex& index
) {
if (groups.empty()) HPHP_CORO_RETURN_VOID;
// Kick off the parsing. Each group gets its own sticky ticket (so
// earlier groups will get scheduling priority over later ones).
std::vector<coro::TaskWithExecutor<void>> tasks;
for (auto& group : groups) {
tasks.emplace_back(
parseGroup(std::move(group), callback, index)
.scheduleOn(m_executor.sticky())
);
}
HPHP_CORO_AWAIT(coro::collectRange(std::move(tasks)));
HPHP_CORO_RETURN_VOID;
}
coro::Task<Package::Groups>
Package::groupAll(bool filterFiles, bool filterDirs) {
Timer timer{Timer::WallTime, "finding inputs"};
std::vector<coro::Task<GroupResult>> tasks;
for (auto& dir : m_directories) {
tasks.emplace_back(
groupDirectories(std::move(dir), filterFiles, filterDirs)
);
}
// Gather together all top level files
GroupResult top;
for (auto& result :
HPHP_CORO_AWAIT(coro::collectRange(std::move(tasks)))) {
top.m_grouped.insert(
top.m_grouped.end(),
std::make_move_iterator(result.m_grouped.begin()),
std::make_move_iterator(result.m_grouped.end())
);
top.m_ungrouped.insert(
top.m_ungrouped.end(),
std::make_move_iterator(result.m_ungrouped.begin()),
std::make_move_iterator(result.m_ungrouped.end())
);
}
// If there's any ungrouped files left over, group those now
groupFiles(top.m_grouped, std::move(top.m_ungrouped));
assertx(top.m_ungrouped.empty());
// Finally add in any files explicitly added via configuration
// and group them.
FileAndSizeVec extraFiles;
for (auto& file : m_filesToParse) {
if (!m_seenFiles.insert(file).second) continue;
extraFiles.emplace_back(FileAndSize{std::move(file.first), 0});
}
groupFiles(top.m_grouped, std::move(extraFiles));
// Sort the groups from highest combined file size to lowest.
// Larger groups will probably take longer to process, so we want to
// start those earliest.
std::sort(
top.m_grouped.begin(),
top.m_grouped.end(),
[] (const Group& a, const Group& b) {
if (a.m_size != b.m_size) return b.m_size < a.m_size;
if (a.m_files.size() != b.m_files.size()) {
return b.m_files.size() < a.m_files.size();
}
return a.m_files < b.m_files;
}
);
HPHP_CORO_RETURN(std::move(top.m_grouped));
}
coro::Task<void>
Package::parseAll(const ParseCallback& callback, const UnitIndex& index) {
// Find the initial set of groups.
auto groups = HPHP_CORO_AWAIT(groupAll(true, false));
// Parse all input files and autoload-eligible files
Timer timer{Timer::WallTime, "parsing files"};
HPHP_CORO_AWAIT(parseGroups(std::move(groups), callback, index));
m_inputMicros = std::chrono::microseconds{timer.getMicroSeconds()};
HPHP_CORO_RETURN_VOID;
}
coro::Task<bool> Package::parse(const UnitIndex& index,
const ParseCallback& callback) {
assertx(callback);
Logger::FInfo(
"parsing using {} threads using {}{}",
m_executor.numThreads(),
m_client.implName(),
coro::using_coros ? "" : " (coros disabled!)"
);
HPHP_CORO_AWAIT(
parseAll(callback, index).scheduleOn(m_executor.sticky())
);
HPHP_CORO_RETURN(!m_failed.load());
}
namespace {
std::string createFullPath(const std::filesystem::path& fileName,
const std::string& root) {
if (FileUtil::isDirSeparator(fileName.native().front())) {
return fileName.native();
}
return root + fileName.native();
}
bool statFile(const std::filesystem::path& fileName, const std::string& root,
std::string& fullPath, Optional<std::string>& targetPath) {
fullPath = createFullPath(fileName, root);
struct stat sb;
if (lstat(fullPath.c_str(), &sb)) {
if (fullPath.find(' ') == std::string::npos) {
Logger::Error("Unable to stat file %s", fullPath.c_str());
}
return false;
}
if ((sb.st_mode & S_IFMT) == S_IFDIR) {
Logger::Error("Unable to parse directory: %s", fullPath.c_str());
return false;
}
if (S_ISLNK(sb.st_mode)) {
auto const target = std::filesystem::canonical(fullPath);
targetPath.emplace(std::filesystem::relative(target, root).native());
}
return true;
}
coro::Task<std::tuple<
const Ref<Package::Config>*,
Ref<std::vector<Package::FileMeta>>,
std::vector<Ref<std::string>>,
std::vector<Ref<RepoOptionsFlags>>
>>
storeInputs(
bool optimistic,
std::vector<std::filesystem::path>& paths,
std::vector<Package::FileMeta>& metas,
std::vector<coro::Task<Ref<RepoOptionsFlags>>>& options,
Optional<std::vector<Ref<RepoOptionsFlags>>>& storedOptions,
extern_worker::Client& client,
const coro::AsyncValue<extern_worker::Ref<Package::Config>>& config
) {
// Store the inputs and get their refs
auto [fileRefs, metasRef, optionRefs, configRef] =
HPHP_CORO_AWAIT(
coro::collect(
client.storeFile(paths, optimistic),
optimistic
? client.storeOptimistically(metas)
: client.store(metas),
// If we already called storeInputs, then options will be
// empty here (but storedOptions will be set).
coro::collectRange(std::move(options)),
*config
)
);
assertx(fileRefs.size() == paths.size());
// Options are never stored optimistically. The first time we call
// storeInputs, we'll store them above and store the results in
// storedOptions. If we call this again, the above store for options
// will do nothing, and we'll just reload the storedOptions.
if (storedOptions) {
assertx(!optimistic);
assertx(optionRefs.empty());
assertx(storedOptions->size() == paths.size());
optionRefs = *std::move(storedOptions);
} else {
assertx(optionRefs.size() == paths.size());
if (optimistic) storedOptions = optionRefs;
}
HPHP_CORO_RETURN(std::make_tuple(
configRef, metasRef, fileRefs, optionRefs
));
}
}
coro::Task<void> Package::prepareInputs(
Group group,
std::vector<std::filesystem::path>& paths,
std::vector<Package::FileMeta>& metas,
std::vector<coro::Task<Ref<RepoOptionsFlags>>>& options
) {
paths.reserve(group.m_files.size());
metas.reserve(group.m_files.size());
options.reserve(group.m_files.size());
for (auto& fileName : group.m_files) {
assertx(!fileName.empty());
std::string fullPath;
Optional<std::string> targetPath;
if (!statFile(fileName, m_root, fullPath, targetPath)) {
Logger::FError("Fatal: Unable to stat/parse {}", fileName.native());
m_failed.store(true);
continue;
}
// Most files will have the same RepoOptions, so we cache them
auto const& repoOptions = RepoOptions::forFile(fullPath.data()).flags();
options.emplace_back(
m_repoOptions.get(
repoOptions.cacheKeySha1(),
repoOptions,
HPHP_CORO_CURRENT_EXECUTOR
)
);
paths.emplace_back(std::move(fullPath));
metas.emplace_back(std::move(fileName), std::move(targetPath));
}
HPHP_CORO_RETURN_VOID;
}
// Parse a group using extern-worker and hand off the UnitEmitter or
// WPI::Key/Values obtained.
coro::Task<void> Package::parseGroup(
Group group,
const ParseCallback& callback,
const UnitIndex& index
) {
using namespace folly::gen;
// Make sure we're running on the thread we should be
HPHP_CORO_RESCHEDULE_ON_CURRENT_EXECUTOR;
try {
// First build the inputs for the job
std::vector<std::filesystem::path> paths;
std::vector<FileMeta> metas;
std::vector<coro::Task<Ref<RepoOptionsFlags>>> options;
HPHP_CORO_AWAIT(prepareInputs(std::move(group), paths, metas, options));
if (paths.empty()) {
assertx(metas.empty());
assertx(options.empty());
HPHP_CORO_RETURN_VOID;
}
auto const workItems = paths.size();
for (size_t i = 0; i < workItems; i++) {
auto const& fileName = metas[i].m_filename;
if (!m_extraStaticFiles.count(fileName)) {
m_discoveredStaticFiles.emplace(
fileName,
Option::CachePHPFile ? paths[i] : ""
);
}
}
Optional<std::vector<Ref<RepoOptionsFlags>>> storedOptions;
// Try optimistic mode first. We won't actually store
// anything, just generate the Refs. If something isn't
// actually present in the workers, the execution will throw
// an exception. If everything is present, we've skipped a
// lot of work.
bool optimistic = Option::ParserOptimisticStore &&
m_client.supportsOptimistic();
for (;;) {
auto [configRef, metasRef, fileRefs, optionsRefs] = HPHP_CORO_AWAIT(
storeInputs(optimistic, paths, metas, options, storedOptions,
m_client, m_config)
);
std::vector<FileData> fds;
fds.reserve(workItems);
for (size_t i = 0; i < workItems; i++) {
fds.emplace_back(FileData {
fileRefs[i],
optionsRefs[i],
{} // Start with no decls
});
}
try {
// When Eval.EnableDecl==true, parse jobs may return a list of
// missing decl symbols. If any exist in UnitIndex, callback()
// will return a nonempty ParseMetaVec, expecting a retry with
// additional decls available from other files.
// Retry until we successfully parse bytecode without requesting
// any new decls.
for (size_t attempts = 1;; ++attempts) {
assertx(!metas.empty());
Client::ExecMetadata metadata{
.optimistic = optimistic,
.job_key = folly::sformat("parse-{}-{}", attempts,
metas[0].m_filename)
};
auto parseMetas = HPHP_CORO_AWAIT(callback(
*configRef, metasRef, fds, metadata
));
if (parseMetas.empty()) {
m_total += workItems;
HPHP_CORO_RETURN_VOID;
}
always_assert(parseMetas.size() == workItems);
// At least one item in the group needed additional decls.
// Update its FileData with additional Ref<UnitDecls>.
resolveDecls(index, metas, parseMetas, fds, attempts);
}
} catch (const extern_worker::WorkerError&) {
throw;
} catch (const extern_worker::Error&) {
if (!optimistic) throw;
optimistic = false;
}
}
} catch (const Exception& e) {
Logger::FError(
"Fatal: An unexpected exception was thrown while parsing: {}",
e.getMessage()
);
m_failed.store(true);
} catch (const extern_worker::Error& e) {
Logger::FError("Extern worker error while parsing: {}", e.what());
m_failed.store(true);
} catch (const std::exception& e) {
Logger::FError(
"Fatal: An unexpected exception was thrown while parsing: {}",
e.what()
);
m_failed.store(true);
} catch (...) {
Logger::Error("Fatal: An unexpected exception was thrown while parsing");
m_failed.store(true);
}
HPHP_CORO_RETURN_VOID;
}
///////////////////////////////////////////////////////////////////////////////
void Package::resolveDecls(
const UnitIndex& index,
const FileMetaVec& metas,
const std::vector<ParseMeta>& parseMetas,
std::vector<FileData>& fileDatas,
size_t attempts
) {
bool discovered = false;
for (size_t i = 0, n = parseMetas.size(); i < n; i++) {
auto& decls = std::get<2>(fileDatas[i]);
auto origSize = decls.size();
auto resolve = [&](auto const& names, auto const& table) {
for (auto& name : names) {
auto it = table.find(name);
if (it != table.end()) {
decls.emplace_back(it->second->declsRef);
}
}
};
auto const& missing = parseMetas[i].m_missing;
resolve(missing.types, index.types);
resolve(missing.funcs, index.funcs);
resolve(missing.constants, index.constants);
resolve(missing.modules, index.modules);
// Remove dups
std::sort(decls.begin(), decls.end());
auto it = std::unique(decls.begin(), decls.end());
decls.erase(it, decls.end());
discovered |= decls.size() > origSize;
if ((decls.size() > origSize && attempts > 1) || decls.size() > 100) {
Logger::FVerbose("retry after attempts={} decls={} {}",
attempts, decls.size(), metas[i].m_filename);
}
}
// If no new decls were discovered in any group item,
// we would retry forever. Abort instead.
assertx(discovered);
}
void Package::resolveOnDemand(OndemandInfo& out,
const StringData* fromFile,
const SymbolRefs& symbolRefs,
const UnitIndex& index,
bool report) {
auto const& onPath = [&] (const std::string& rpath,
const StringData* sym) {
if (rpath.empty()) return;
if (Option::PackageExcludeFiles.count(rpath) > 0 ||
Option::IsFileExcluded(rpath, Option::PackageExcludePatterns)) {
// Found symbol in UnitIndex, but the corresponding file was excluded.
Logger::FVerbose("excluding ondemand file {}", rpath);
return;
}
auto const toFile = makeStaticString(rpath);
if (fromFile != toFile) {
out.m_edges.emplace_back(SymbolRefEdge{sym, fromFile, toFile});
}
auto canon = FileUtil::canonicalize(String(rpath)).toCppString();
assertx(!canon.empty());
// Only emit a file once. This ensures we eventually run out
// of things to emit.
if (report || m_seenFiles.emplace(canon, true).second) {
auto const absolute = [&] {
if (FileUtil::isDirSeparator(canon.front())) {
return canon;
} else {
return m_root + canon;
}
}();
struct stat sb;
if (stat(absolute.c_str(), &sb)) {
Logger::FError("Unable to stat {}", absolute);
m_failed.store(true);
return;
}
out.m_files.emplace_back(FileAndSize{std::move(canon), (size_t)sb.st_size});
}
};
auto const onMap = [&] (auto const& syms, auto const& sym_to_file) {
for (auto const& sym : syms) {
auto const s = makeStaticString(sym);
auto const it = sym_to_file.find(s);
if (it == sym_to_file.end()) continue;
onPath(it->second->rpath, s);
}
};
for (auto const& [kind, syms] : symbolRefs) {
switch (kind) {
case SymbolRef::Include:
for (auto const& path : syms) {
auto const rpath = path.compare(0, m_root.length(), m_root) == 0
? path.substr(m_root.length())
: path;
onPath(rpath, nullptr);
}
break;
case SymbolRef::Class:
onMap(syms, index.types);
break;
case SymbolRef::Function:
onMap(syms, index.funcs);
break;
case SymbolRef::Constant:
onMap(syms, index.constants);
break;
}
}
}
///////////////////////////////////////////////////////////////////////////////
namespace {
// Extern-worker job for computing decls and autoload-index from source files.
struct IndexJob {
static std::string name() { return "hphpc-index"; }
static void init(const Package::Config& config,
Package::FileMetaVec meta) {
Package::parseInit(config, std::move(meta));
}
static Package::IndexMetaVec fini() {
return Package::indexFini();
}
static Package::UnitDecls run(
const std::string& content,
const RepoOptionsFlags& repoOptions
);
};
Job<IndexJob> g_indexJob;
}
Package::IndexMetaVec Package::indexFini() {
assertx(s_fileMetasIdx == s_fileMetas.size());
assertx(s_indexMetas.size() == s_indexMetas.size());
finishJob();
return std::move(s_indexMetas);
}
// Index one source file:
// 1. compute decls
// 2. use facts from decls to compute IndexMeta (decl names in each file)
// 3. save serialized decls in UnitDecls as job output
Package::UnitDecls IndexJob::run(
const std::string& content,
const RepoOptionsFlags& repoOptions
) {
if (s_fileMetasIdx >= s_fileMetas.size()) {
throw Error{
folly::sformat("Encountered {} inputs, but only {} file metas",
s_fileMetasIdx+1, s_fileMetas.size())
};
}
auto const& meta = s_fileMetas[s_fileMetasIdx++];
auto const& fileName = meta.m_filename;
auto const bail = [&](const char* message) {
Package::IndexMeta summary;
summary.error = message;
s_indexMetas.emplace_back(std::move(summary));
return Package::UnitDecls{};
};
if (meta.m_targetPath) {
// When/if this symlink is parsed, it's UnitEmitter will be a generated
// stub function that calls the entrypoint of the target file (if any).
// Don't index it since we don't expect any external references to the
// generated stub entry point function.
return bail("not indexing symlink");
}
if (RO::EvalAllowHhas && folly::StringPiece(fileName).endsWith(".hhas")) {
return bail("cannot index hhas");
}
hackc::DeclParserConfig decl_config;
repoOptions.initDeclConfig(decl_config);
auto decls = hackc::direct_decl_parse_and_serialize(
decl_config,
fileName,
{(const uint8_t*)content.data(), content.size()}
);
if (decls.has_errors) {
return bail("decl parser error");
}
// Get Facts from Decls, then populate IndexMeta.
auto facts = hackc::decls_to_facts(*decls.decls, "");
auto summary = summary_of_facts(facts);
s_indexMetas.emplace_back(summary);
if (!RO::EvalEnableDecl) {
// If decl-directed bytecode is disabled, parseRun() will not need
// these decls, so don't bother storing them.
return Package::UnitDecls{};
}
return Package::UnitDecls{
std::move(summary),
std::string{decls.serialized.begin(), decls.serialized.end()}
};
}
coro::Task<bool> Package::index(const IndexCallback& callback) {
Logger::FInfo(
"indexing using {} threads using {}{}",
m_executor.numThreads(),
m_client.implName(),
coro::using_coros ? "" : " (coros disabled!)"
);
// TODO: index systemlib. But here is too late; they have already been
// parsed into UEs at startup, and not yet claimed.
HPHP_CORO_AWAIT(indexAll(callback).scheduleOn(m_executor.sticky()));
HPHP_CORO_RETURN(!m_failed.load());
}
coro::Task<void> Package::indexAll(const IndexCallback& callback) {
// If EnableDecl==true, all source files should be included in the
// index, not just ondemand-eligible files.
auto const filterFiles = !RO::EvalEnableDecl;
auto const filterDirs = false;
// Compute the groups to index
auto groups = HPHP_CORO_AWAIT(groupAll(filterFiles, filterDirs));
Logger::FInfo("indexing {:,} groups", groups.size());
// Index all files
Timer timer{Timer::WallTime, "indexing files"};
HPHP_CORO_AWAIT(indexGroups(callback, std::move(groups)));
HPHP_CORO_RETURN_VOID;
}
// Index all of the files in the given groups
coro::Task<void> Package::indexGroups(const IndexCallback& callback,
Groups groups) {
if (groups.empty()) HPHP_CORO_RETURN_VOID;
// Kick off indexing. Each group gets its own sticky ticket (so
// earlier groups will get scheduling priority over later ones).
std::vector<coro::TaskWithExecutor<void>> tasks;
for (auto& group : groups) {
tasks.emplace_back(
indexGroup(callback, std::move(group)).scheduleOn(m_executor.sticky())
);
}
HPHP_CORO_AWAIT(coro::collectRange(std::move(tasks)));
HPHP_CORO_RETURN_VOID;
}
// Index a group using extern-worker, invoke callback with each IndexMeta.
coro::Task<void> Package::indexGroup(const IndexCallback& callback,
Group group) {
using namespace folly::gen;
// Make sure we're running on the thread we should be
HPHP_CORO_RESCHEDULE_ON_CURRENT_EXECUTOR;
try {
// First build the inputs for the job
std::vector<std::filesystem::path> paths;
std::vector<FileMeta> metas;
std::vector<coro::Task<Ref<RepoOptionsFlags>>> options;
HPHP_CORO_AWAIT(prepareInputs(std::move(group), paths, metas, options));
if (paths.empty()) {
assertx(metas.empty());
assertx(options.empty());
HPHP_CORO_RETURN_VOID;
}
auto const workItems = paths.size();
Optional<std::vector<Ref<RepoOptionsFlags>>> storedOptions;
using ExecT = decltype(g_indexJob)::ExecT;
auto const doExec = [&] (
auto configRef, auto metasRef, auto fileRefs, auto metadata
) -> coro::Task<ExecT> {
auto out = HPHP_CORO_AWAIT(
m_client.exec(
g_indexJob,
std::make_tuple(*configRef, std::move(metasRef)),
std::move(fileRefs),
std::move(metadata)
)
);
HPHP_CORO_MOVE_RETURN(out);
};
using IndexInputs = std::tuple<
Ref<std::string>,
Ref<RepoOptionsFlags>
>;
auto [declsRefs, summariesRef] = HPHP_CORO_AWAIT(coro::invoke(
[&] () -> coro::Task<ExecT> {
// Try optimistic mode first. We won't actually store
// anything, just generate the Refs. If something isn't
// actually present in the workers, the execution will throw
// an exception. If everything is present, we've skipped a
// lot of work.
bool optimistic = Option::ParserOptimisticStore &&
m_client.supportsOptimistic();
for (;;) {
assertx(!metas.empty());
Client::ExecMetadata metadata{
.optimistic = optimistic,
.job_key = folly::sformat("index-{}", metas[0].m_filename)
};
auto [configRef, metasRef, fileRefs, optionRefs] =
HPHP_CORO_AWAIT(storeInputs(
optimistic, paths, metas, options, storedOptions, m_client,
m_config));
// "Tuplize" the input refs according to signature of IndexJob::run()
std::vector<IndexInputs> inputs;
inputs.reserve(workItems);
for (size_t i = 0; i < workItems; ++i) {
inputs.emplace_back(
std::move(fileRefs[i]),
std::move(optionRefs[i])
);
}
try {
HPHP_CORO_RETURN(HPHP_CORO_AWAIT(doExec(
configRef, std::move(metasRef), std::move(inputs), std::move(metadata)
)));
} catch (const extern_worker::WorkerError&) {
throw;
} catch (const extern_worker::Error&) {
if (!optimistic) throw;
optimistic = false;
}
}
}
));
// Load the summaries but leave decls in external storage
auto summaries = HPHP_CORO_AWAIT(m_client.load(summariesRef));
assertx(metas.size() == workItems);
assertx(declsRefs.size() == workItems);
always_assert(summaries.size() == workItems);
m_total += workItems;
// Process the summaries
for (size_t i = 0; i < workItems; ++i) {
auto& meta = metas[i];
auto& summary = summaries[i];
auto& declsRef = declsRefs[i];
if (summary.error.empty()) {
callback(
std::move(meta.m_filename),
std::move(summary),
std::move(declsRef)
);
} else {
// Could not parse decls in this file. Compiler may fail, or produce
// a unit that fatals when run.
Logger::FWarning("Warning: decl-parser error in {}: {}",
meta.m_filename, summary.error
);
}
}
HPHP_CORO_RETURN_VOID;
} catch (const Exception& e) {
Logger::FError(
"Fatal: An unexpected exception was thrown while indexing: {}",
e.getMessage()
);
m_failed.store(true);
} catch (const Error& e) {
Logger::FError("Extern worker error while indexing: {}",
e.what());
m_failed.store(true);
} catch (const std::exception& e) {
Logger::FError(
"Fatal: An unexpected exception was thrown while indexing: {}",
e.what()
);
m_failed.store(true);
} catch (...) {
Logger::Error("Fatal: An unexpected exception was thrown while indexing");
m_failed.store(true);
}
HPHP_CORO_RETURN_VOID;
}
///////////////////////////////////////////////////////////////////////////////
bool UnitIndex::containsAnyMissing(
const Package::ParseMetaVec& parseMetas
) const {
auto any = [&](auto const& names, auto const& map) -> bool {
for (auto name : names) {
if (map.find(name) != map.end()) return true;
}
return false;
};
for (auto const& p : parseMetas) {
if (any(p.m_missing.types, types) ||
any(p.m_missing.funcs, funcs) ||
any(p.m_missing.constants, constants) ||
any(p.m_missing.modules, modules)) {
return true;
}
}
return false;
}
///////////////////////////////////////////////////////////////////////////////
coro::Task<bool> Package::emit(const UnitIndex& index,
const EmitCallback& callback,
const LocalCallback& localCallback,
const std::filesystem::path& edgesPath) {
assertx(callback);
assertx(localCallback);
Logger::FInfo(
"emitting using {} threads using {}{}",
m_executor.numThreads(),
m_client.implName(),
coro::using_coros ? "" : " (coros disabled!)"
);
HphpSession _{Treadmill::SessionKind::CompilerEmit};
// Treat any symbol refs from systemlib as if they were part of the
// original Package. This allows systemlib to depend on input files
// and parse-on-demand files.
UEVec localUEs;
for (auto& ue : SystemLib::claimRegisteredUnitEmitters()) {
OndemandInfo ondemand;
resolveOnDemand(ondemand, ue->m_filepath, ue->m_symbol_refs, index, true);
for (auto const& p : ondemand.m_files) {
addSourceFile(p.m_path);
}
for (auto const& e : ondemand.m_edges) {
auto const sym = e.sym ? e.sym : makeStaticString("<include>");
Logger::FVerbose("systemlib unit {} -> {} -> {}", e.from, sym, e.to);
}
localUEs.emplace_back(std::move(ue));
}
std::vector<coro::TaskWithExecutor<void>> tasks;
if (!localUEs.empty()) {
auto task = localCallback(std::move(localUEs));
tasks.emplace_back(std::move(task).scheduleOn(m_executor.sticky()));
}
tasks.emplace_back(
emitAll(callback, index, edgesPath).scheduleOn(m_executor.sticky())
);
HPHP_CORO_AWAIT(coro::collectRange(std::move(tasks)));
HPHP_CORO_RETURN(!m_failed.load());
}
namespace {
// Write every ondemand edge to a text file. The file contains
// enough information to reconstruct a file dependency graph with
// edges labeled with symbol names.
//
// Format:
// f <filename> Defines a file (node)
// s <symbol> Defines a symbol (edge label)
// e <from> <to> <sym> Dependency edge
//
// Edges reference files and symbols by index, where indexes are
// assigned in the order of `f` and `s` lines in the file.
// For example:
//
// a.php
// function foo() {}
// b.php
// function test() { foo(); }
//
// reports one edge:
// f a.php
// f b.php
// s foo
// e 0 1 0
//
void saveSymbolRefEdges(std::vector<SymbolRefEdge> edges,
const std::filesystem::path& edgesPath) {
auto f = fopen(edgesPath.native().c_str(), "w");
if (!f) {
Logger::FError("Could not open {} for writing", edgesPath.native());
return;
}
SCOPE_EXIT { fclose(f); };
hphp_fast_map<const StringData*, size_t> files;
hphp_fast_map<const StringData*, size_t> symbols;
for (auto const& e : edges) {
auto it = files.find(e.from);
if (it == files.end()) {
files.insert(std::make_pair(e.from, files.size()));
fprintf(f, "f %s\n", e.from->data());
}
it = files.find(e.to);
if (it == files.end()) {
files.insert(std::make_pair(e.to, files.size()));
fprintf(f, "f %s\n", e.to->data());
}
it = symbols.find(e.sym);
if (it == symbols.end()) {
symbols.insert(std::make_pair(e.sym, symbols.size()));
auto const sym = e.sym ? e.sym->data() : "<include>";
fprintf(f, "s %s\n", sym);
}
fprintf(f, "e %lu %lu %lu\n", files[e.from], files[e.to], symbols[e.sym]);
}
Logger::FInfo("Saved ondemand edges to {}", edgesPath.native());
}
}
// The actual emit loop. Find the initial set of inputs (from
// configuration), emit them, enumerate on-demand files from symbol refs,
// then repeat the process until we have no new files to emit.
coro::Task<void>
Package::emitAll(const EmitCallback& callback, const UnitIndex& index,
const std::filesystem::path& edgesPath) {
auto const logEdges = !edgesPath.native().empty();
// Find the initial set of groups
auto input_groups = HPHP_CORO_AWAIT(groupAll(true, true));
// Select the files specified as inputs, collect ondemand file names.
std::vector<SymbolRefEdge> edges;
OndemandInfo ondemand;
{
Timer timer{Timer::WallTime, "emitting inputs"};
ondemand = HPHP_CORO_AWAIT(
emitGroups(std::move(input_groups), callback, index)
);
if (logEdges) {
edges.insert(
edges.end(), ondemand.m_edges.begin(), ondemand.m_edges.end()
);
}
m_inputMicros = std::chrono::microseconds{timer.getMicroSeconds()};
}
if (ondemand.m_files.empty()) {
HPHP_CORO_RETURN_VOID;
}
Timer timer{Timer::WallTime, "emitting on-demand"};
// We have ondemand files, so keep emitting until a fix point.
do {
Groups ondemand_groups;
groupFiles(ondemand_groups, std::move(ondemand.m_files));
ondemand = HPHP_CORO_AWAIT(
emitGroups(std::move(ondemand_groups), callback, index)
);
if (logEdges) {
edges.insert(
edges.end(), ondemand.m_edges.begin(), ondemand.m_edges.end()
);
}
} while (!ondemand.m_files.empty());
m_ondemandMicros = std::chrono::microseconds{timer.getMicroSeconds()};
// Save edge list to a text file, if requested
if (logEdges) {
saveSymbolRefEdges(std::move(edges), edgesPath);
}
HPHP_CORO_RETURN_VOID;
}
// Emit all of the files in the given group, returning a vector of
// ondemand files obtained from that group.
coro::Task<Package::OndemandInfo> Package::emitGroups(
Groups groups,
const EmitCallback& callback,
const UnitIndex& index
) {
if (groups.empty()) HPHP_CORO_RETURN(OndemandInfo{});
// Kick off emitting. Each group gets its own sticky ticket (so
// earlier groups will get scheduling priority over later ones).
std::vector<coro::TaskWithExecutor<OndemandInfo>> tasks;
for (auto& group : groups) {
tasks.emplace_back(
emitGroup(std::move(group), callback, index)
.scheduleOn(m_executor.sticky())
);
}
// Gather the on-demand files and return them
OndemandInfo ondemand;
for (auto& info : HPHP_CORO_AWAIT(coro::collectRange(std::move(tasks)))) {
ondemand.m_files.insert(
ondemand.m_files.end(),
std::make_move_iterator(info.m_files.begin()),
std::make_move_iterator(info.m_files.end())
);
ondemand.m_edges.insert(
ondemand.m_edges.end(),
std::make_move_iterator(info.m_edges.begin()),
std::make_move_iterator(info.m_edges.end())
);
}
HPHP_CORO_MOVE_RETURN(ondemand);
}
// Emit a group, hand off the UnitEmitter or WPI::Key/Value obtained,
// and return any on-demand files found.
coro::Task<Package::OndemandInfo> Package::emitGroup(
Group group,
const EmitCallback& callback,
const UnitIndex& index
) {
using namespace folly::gen;
// Make sure we're running on the thread we should be
HPHP_CORO_RESCHEDULE_ON_CURRENT_EXECUTOR;
try {
if (group.m_files.empty()) {
HPHP_CORO_RETURN(OndemandInfo{});
}
auto const workItems = group.m_files.size();
for (size_t i = 0; i < workItems; i++) {
auto const& fileName = group.m_files[i];
if (!m_extraStaticFiles.count(fileName)) {
m_discoveredStaticFiles.emplace(
fileName,
Option::CachePHPFile ? createFullPath(fileName, m_root) : ""
);
}
}
// The callback takes a group of files and returns the parse metas
// associated with these files. During the emit process, the units that
// do not belong to the active deployment will not produce parse metas
// as they do not need to be part of the final repo product.
// This will mean that the size of the parse metas does not need to be
// equal to number of files sent to the callback.
// One problem this creates is that we need to be able to associate
// parse metas with original group order. In order to do this,
// we also return a fixup list consisting of original indicies of the
// omitted units. We later use this fixup list to compute the original
// indicies of each unit.
auto parseMetasAndItemsToSkip = HPHP_CORO_AWAIT(callback(group.m_files));
auto& [parseMetas, itemsToSkip] = parseMetasAndItemsToSkip;
if (RO::EvalActiveDeployment.empty()) {
// If a deployment is not set, then we should have gotten results for
// all files
always_assert(parseMetas.size() == workItems);
}
m_total += parseMetas.size();
// Process the outputs
OndemandInfo ondemand;
size_t numSkipped = 0;
for (size_t i = 0; i < workItems; i++) {
if (itemsToSkip.contains(i)) {
numSkipped++;
continue;
}
auto const& meta = parseMetas[i - numSkipped];
if (!meta.m_abort.empty()) {
// The unit had an ICE and we're configured to treat that as a
// fatal error. Here is where we die on it.
fprintf(stderr, "%s", meta.m_abort.c_str());
_Exit(HPHP_EXIT_FAILURE);
}
if (Option::ForceEnableSymbolRefs || RO::EvalActiveDeployment.empty()) {
auto const filename = makeStaticString(group.m_files[i].native());
// Resolve any symbol refs into files to parse ondemand
resolveOnDemand(ondemand, filename, meta.m_symbol_refs, index);
}
}
HPHP_CORO_MOVE_RETURN(ondemand);
} catch (const Exception& e) {
Logger::FError(
"Fatal: An unexpected exception was thrown while emitting: {}",
e.getMessage()
);
m_failed.store(true);
} catch (const std::exception& e) {
Logger::FError(
"Fatal: An unexpected exception was thrown while emitting: {}",
e.what()
);
m_failed.store(true);
} catch (...) {
Logger::Error("Fatal: An unexpected exception was thrown while emitting");
m_failed.store(true);
}
HPHP_CORO_RETURN(OndemandInfo{});
}
Package::IndexMeta HPHP::summary_of_facts(const hackc::FileFacts& facts) {
Package::IndexMeta summary;
for (auto& e : facts.types) {
summary.types.emplace_back(makeStaticString(std::string(e.name)));
}
for (auto& e : facts.functions) {
summary.funcs.emplace_back(makeStaticString(std::string(e)));
}
for (auto& e : facts.constants) {
summary.constants.emplace_back(makeStaticString(std::string(e)));
}
for (auto& e : facts.modules) {
summary.modules.emplace_back(makeStaticString(std::string(e.name)));
}
return summary;
} |
C/C++ | hhvm/hphp/compiler/package.h | /*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| [email protected] so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#pragma once
#include <filesystem>
#include <map>
#include <memory>
#include <set>
#include <thread>
#include <vector>
#include "hphp/runtime/base/coeffects-config.h"
#include "hphp/runtime/base/unit-cache.h"
#include "hphp/util/coro.h"
#include "hphp/util/extern-worker.h"
#include "hphp/util/hash-map.h"
#include "hphp/util/mutex.h"
#include "hphp/util/optional.h"
#include "hphp/hhbbc/hhbbc.h"
namespace HPHP {
///////////////////////////////////////////////////////////////////////////////
namespace hackc {
struct FileFacts;
}
struct UnitIndex;
struct VirtualFileSystemWriter;
struct SymbolRefEdge {
const StringData* sym;
const StringData* from;
const StringData* to;
};
struct Package {
Package(const std::string& root,
coro::TicketExecutor& executor,
extern_worker::Client& client,
bool coredump);
size_t getTotalFiles() const { return m_total.load(); }
Optional<std::chrono::microseconds> inputsTime() const {
return m_inputMicros;
}
Optional<std::chrono::microseconds> ondemandTime() const {
return m_ondemandMicros;
}
void addSourceFile(const std::string& fileName);
void addInputList(const std::string& listFileName);
void addStaticFile(const std::string& fileName);
void addDirectory(const std::string& path);
void addStaticDirectory(const std::string& path);
void writeVirtualFileSystem(const std::string& path);
// Configuration for index & parse workers. This should contain any runtime
// options which can affect HackC (or the interface to it).
struct Config {
Config() = default;
static Config make(bool coredump) {
Config c;
#define R(Opt) c.Opt = RO::Opt;
UNITCACHEFLAGS()
#undef R
c.EvalAbortBuildOnCompilerError = RO::EvalAbortBuildOnCompilerError;
c.EvalAbortBuildOnVerifyError = RO::EvalAbortBuildOnVerifyError;
c.IncludeRoots = RO::IncludeRoots;
c.coeffects = CoeffectsConfig::exportForParse();
c.CoreDump = coredump;
return c;
}
void apply() const {
#define R(Opt) RO::Opt = Opt;
UNITCACHEFLAGS()
#undef R
RO::EvalAbortBuildOnCompilerError = EvalAbortBuildOnCompilerError;
RO::EvalAbortBuildOnVerifyError = EvalAbortBuildOnVerifyError;
RO::IncludeRoots = IncludeRoots;
CoeffectsConfig::importForParse(coeffects);
}
template <typename SerDe> void serde(SerDe& sd) {
#define R(Opt) sd(Opt);
UNITCACHEFLAGS()
#undef R
sd(EvalAbortBuildOnCompilerError)
(EvalAbortBuildOnVerifyError)
(IncludeRoots)
(coeffects)
(CoreDump)
;
}
bool CoreDump;
private:
#define R(Opt) decltype(RuntimeOption::Opt) Opt;
UNITCACHEFLAGS()
#undef R
bool EvalAbortBuildOnCompilerError;
bool EvalAbortBuildOnVerifyError;
decltype(RO::IncludeRoots) IncludeRoots;
CoeffectsConfig coeffects;
};
// Metadata for a parse or index job. Just filenames that we need to
// resolve when we have the whole source tree available.
struct FileMeta {
FileMeta() = default;
FileMeta(std::string f, Optional<std::string> o)
: m_filename{std::move(f)}, m_targetPath{std::move(o)} {}
// The (relative) filename of the file
std::string m_filename;
// If the file is a symlink, what its target is
Optional<std::string> m_targetPath;
template <typename SerDe> void serde(SerDe& sd) {
sd(m_filename)
(m_targetPath);
}
};
struct DeclNames {
std::vector<const StringData*> types;
std::vector<const StringData*> funcs;
std::vector<const StringData*> constants;
std::vector<const StringData*> modules;
bool empty() const {
return types.empty() && funcs.empty() && constants.empty() &&
modules.empty();
}
template <typename SerDe> void serde(SerDe& sd) {
sd(types)
(funcs)
(constants)
(modules);
}
};
// Index information collected during parsing, used to construct
// an autoload index for parse-on-demand.
struct IndexMeta: DeclNames {
// If not empty, indexing resulted in an ICE or had parse errors.
std::string error;
template<typename SerDe> void serde(SerDe& sd) {
DeclNames::serde(sd);
sd(error);
}
};
// Metadata obtained during parsing. Includes SymbolRefs used to
// drive on-demand parsing as well as toplevel decl names (from UEs)
// for use later by HHBBC's whole-program Index.
struct ParseMeta {
// Unresolved symbols required by hackc before bytecode generation.
DeclNames m_missing;
// Unresolved symbols needed by the Unit at runtime but not before
// bytecode generation. Used to find new files for parse on-demand.
SymbolRefs m_symbol_refs;
// If not empty, parsing resulted in an ICE and configuration
// indicated that this should be fatal.
std::string m_abort;
// List of symbol names extracted from UnitEmitters.
struct Definitions {
std::vector<const StringData*> m_classes;
std::vector<const StringData*> m_enums;
std::vector<const StringData*> m_funcs;
std::vector<const StringData*> m_methCallers;
std::vector<const StringData*> m_typeAliases;
std::vector<const StringData*> m_constants;
std::vector<const StringData*> m_modules;
template <typename SerDe> void serde(SerDe& sd) {
sd(m_classes)
(m_enums)
(m_funcs)
(m_methCallers)
(m_typeAliases)
(m_constants)
(m_modules);
}
};
Definitions m_definitions;
// File path copied from UnitEmitter::m_filepath
const StringData* m_filepath{nullptr};
// Name of the module that this unit belongs to
const StringData* m_module_use{nullptr};
template <typename SerDe> void serde(SerDe& sd) {
sd(m_missing)
(m_symbol_refs)
(m_abort)
(m_definitions)
(m_filepath)
(m_module_use)
;
}
};
// Serialized decls for a single file, along with the IndexMeta
// enumerating the symbols defined in the file.
struct UnitDecls {
DeclNames symbols;
std::string decls;
template <typename SerDe> void serde(SerDe& sd) {
sd(symbols)
(decls);
}
};
using IndexCallback = std::function<
void(std::string, IndexMeta, extern_worker::Ref<UnitDecls>)
>;
using IndexMetaVec = std::vector<IndexMeta>;
coro::Task<bool> index(const IndexCallback&);
using UEVec = std::vector<std::unique_ptr<UnitEmitter>>;
using FileMetaVec = std::vector<FileMeta>;
using ParseMetaVec = std::vector<ParseMeta>;
using FileData = std::tuple<extern_worker::Ref<std::string>,
extern_worker::Ref<RepoOptionsFlags>,
std::vector<extern_worker::Ref<UnitDecls>>>;
using ParseCallback = std::function<coro::Task<ParseMetaVec>(
const extern_worker::Ref<Config>&,
extern_worker::Ref<FileMetaVec>,
std::vector<FileData>,
extern_worker::Client::ExecMetadata
)>;
coro::Task<bool> parse(const UnitIndex&, const ParseCallback&);
// These are meant to be called from extern-worker Jobs to perform
// the actual parsing.
static void parseInit(const Config&, FileMetaVec);
static IndexMetaVec indexFini();
static ParseMetaVec parseFini();
static UnitEmitterSerdeWrapper parseRun(const std::string&,
const RepoOptionsFlags&,
const std::vector<UnitDecls>&);
using LocalCallback = std::function<coro::Task<void>(UEVec)>;
using ParseMetaItemsToSkipSet = hphp_fast_set<size_t>;
using EmitCallBackResult = std::pair<ParseMetaVec, ParseMetaItemsToSkipSet>;
using EmitCallback = std::function<
coro::Task<EmitCallBackResult>(const std::vector<std::filesystem::path>&)
>;
coro::Task<bool> emit(const UnitIndex&, const EmitCallback&,
const LocalCallback&, const std::filesystem::path&);
private:
struct FileAndSize {
std::filesystem::path m_path;
size_t size;
};
using FileAndSizeVec = std::vector<FileAndSize>;
struct Group {
std::vector<std::filesystem::path> m_files;
size_t m_size{0};
};
using Groups = std::vector<Group>;
struct GroupResult {
Groups m_grouped;
FileAndSizeVec m_ungrouped;
};
struct OndemandInfo {
FileAndSizeVec m_files;
std::vector<SymbolRefEdge> m_edges;
};
// Partition all files specified for this package into groups.
// If filterFiles/Dirs==true, ignore excluded files and/or directories
// according to options.
coro::Task<Groups> groupAll(bool filterFiles, bool filterDirs);
coro::Task<GroupResult>
groupDirectories(std::string, bool filterFiles, bool filterDirs);
void groupFiles(Groups&, FileAndSizeVec);
coro::Task<void> prepareInputs(Group,
std::vector<std::filesystem::path>& paths,
std::vector<FileMeta>& metas,
std::vector<coro::Task<extern_worker::Ref<RepoOptionsFlags>>>& options);
coro::Task<void> indexAll(const IndexCallback&);
coro::Task<void> indexGroups(const IndexCallback&, Groups);
coro::Task<void> indexGroup(const IndexCallback&, Group);
coro::Task<void> parseAll(const ParseCallback&, const UnitIndex&);
coro::Task<void> parseGroups(Groups, const ParseCallback&, const UnitIndex&);
coro::Task<void> parseGroup(Group, const ParseCallback&, const UnitIndex&);
void resolveDecls(const UnitIndex&, const FileMetaVec&,
const std::vector<ParseMeta>&, std::vector<FileData>&, size_t attempts);
coro::Task<void> emitAll(const EmitCallback&, const UnitIndex&,
const std::filesystem::path&);
coro::Task<OndemandInfo>
emitGroups(Groups, const EmitCallback&, const UnitIndex&);
coro::Task<OndemandInfo>
emitGroup(Group, const EmitCallback&, const UnitIndex&);
void resolveOnDemand(OndemandInfo&, const StringData* fromFile,
const SymbolRefs&, const UnitIndex&, bool report = false);
std::string m_root;
folly_concurrent_hash_map_simd<std::string, bool> m_seenFiles;
std::atomic<bool> m_failed;
std::atomic<size_t> m_total;
Optional<std::chrono::microseconds> m_inputMicros;
Optional<std::chrono::microseconds> m_ondemandMicros;
folly_concurrent_hash_map_simd<std::string, bool> m_filesToParse;
std::set<std::string> m_directories;
std::set<std::string> m_staticDirectories;
hphp_fast_set<std::string> m_extraStaticFiles;
folly_concurrent_hash_map_simd<
std::string, std::string
> m_discoveredStaticFiles;
coro::TicketExecutor& m_executor;
extern_worker::Client& m_client;
coro::AsyncValue<extern_worker::Ref<Config>> m_config;
// Content-store for options: Map<hash(options), options>
extern_worker::RefCache<SHA1, RepoOptionsFlags> m_repoOptions;
};
struct UnitIndex final {
struct Locations {
Locations(std::string rpath,
extern_worker::Ref<Package::UnitDecls> ref)
: rpath(std::move(rpath)), declsRef(std::move(ref))
{}
// Relative path to source file in local filesystem
std::string rpath;
// handle to serialized decls in extern blob store
extern_worker::Ref<Package::UnitDecls> declsRef;
};
using IMap = folly_concurrent_hash_map_simd<
const StringData*, std::shared_ptr<Locations>,
string_data_hash,
string_data_isame
>;
using Map = folly_concurrent_hash_map_simd<
const StringData*, std::shared_ptr<Locations>
>;
// Returns true if any ParseMeta in parseMetas references a missing
// symbol that is present in this index.
bool containsAnyMissing(const Package::ParseMetaVec& parseMetas) const;
IMap types;
IMap funcs;
Map constants;
Map modules;
};
// Given the result of running `hackc::decls_to_facts()`, create a
// `Package::IndexMeta` containing the names of all decls in `facts`.
Package::IndexMeta summary_of_facts(const hackc::FileFacts& facts);
///////////////////////////////////////////////////////////////////////////////
} |
hhvm/hphp/doc/bytecode.specification | **********************************
* HipHop Bytecode v1 revision 18 *
**********************************
Introduction
------------
HipHop bytecode (HHBC) v1 is intended to serve as the conceptual basis for
encoding the semantic meaning of HipHop source code into a format that is
appropriate for consumption by interpreters and just-in-time compilers. By
using simpler constructs to encode more complex expressions and statements,
HHBC makes it straightforward for an interpreter or a compiler to determine
the order of execution for a program.
HHBC was designed with several competing goals in mind:
1) Run-time efficiency. The design of HHBC should be congruous to implementing
an efficient execution engine, whether it be an interpreter or a just-in-time
compiler.
2) PHP 5.5 compatibility. It should be possible to compile valid PHP 5.5 source
code into HipHop bytecode in a way that preserves the semantic meaning of the
source.
3) Simplicity. The design of HHBC should avoid features that could be removed
or simplified without compromising PHP 5.5 compatibility, run-time efficiency,
or design cleanliness.
Compilation units
-----------------
Each HipHop source file is compiled into a separate "compilation unit", or
"unit" for short. Units are composed of bytecode and metadata.
A unit's bytecode is an array of bytes encoding a sequence of HHBC
instructions, where each instruction is encoded using one or more bytes. This
specification defines an instruction set and defines the behavior of each HHBC
instruction, but the exact byte values used to encode HHBC instructions is
currently unspecified.
A unit's metadata is a set of structures that provide essential information
that is needed at run time by the execution engine. This specification will
describe a unit's metadata as a set of named tables with ordered rows, but the
exact format of the metadata is currently unspecified.
Each instruction in a unit's bytecode can be referred to using a "bytecode
offset", which is the distance in bytes from the first byte of a unit's
bytecode to the first byte of the instruction.
A unit's bytecode is partitioned into sections called "functions". The unit's
metadata uses bytecode offsets to specify which instructions belong to which
functions.
When a unit is loaded at run time, the execution engine assigns the unit's
bytecode a logical range of addresses called "bytecode addresses". An
instruction is referred to at run time using its bytecode address.
Flow of execution
-----------------
HipHop bytecode models the flow of execution using a stack of frames referred
to as the "call stack". A "frame" is a structure that logically consists of a
header, a program counter (PC), a local variable store, an iterator variable
store, and an evaluation stack.
The frame at the top of the call stack is referred to as the "current frame".
The current frame represents the function that is currently executing. The
program counter (PC) of the current frame is referred to as the "current PC".
At any given time, the current PC holds the bytecode address of the current
instruction to execute. When the execution engine executes an instruction, the
current PC is updated to point to the next instruction. By default, the current
PC is updated to point to the byte that sequentially follows the last byte of
the current instruction in the bytecode. Some instructions override the default
behavior and explicitly update the current PC in a specific way.
HHBC provides special instructions to allow for calling a function and
returning from a function. When a function is called, a new frame is pushed
onto the call stack, and the PC of the new frame is initialized to the
appropriate entry point (typically the instruction of the function that is
sequentially first in the bytecode). The new frame becomes the current frame,
and the PC of the new frame becomes the current PC. When a function returns,
the current frame is popped off the call stack. The previous frame becomes the
current frame, and its PC becomes the current PC. The facility provided by the
execution engine that is responsible for handling function calls and returns is
called the "dispatcher".
Typically, a frame is removed from the call stack when its corresponding
function returns. However, a frame may be removed from the call stack before
its corresponding function returns in the course of processing an exception.
The facility provided by the execution engine that is responsible for
processing exceptions is called the "unwinder".
Values
------
HHBC instructions may push and pop values on the current frame's evaluation
stack and they may read and write values to the current frame's local
variables. A value (or "cell") is a structure that contains a type identifier
and either data (for non-refcounted types) or a pointer to data (for refcounted
types). When a cell containing a pointer is duplicated, the new cell will point
to the same data as the original cell. When a cell containing a pointer is
duplicated or discarded, the execution engine is responsible for honoring the
data's refcount logic. Throughout this document, we use "cell" and "value"
interchangeably.
Functions
---------
A unit's bytecode is organized into functions. Each function has its own
metadata that provides essential information about the function, such as the
name of the function, how many local variables it has, how many iterator
variables it has, how many formal parameters it has, the names of the local
variables, the names of the formal parameters, how each parameter should be
passed (pass by value vs. pass by reference), whether each parameter has a
default value, and an upper bound for the maximum depth the evaluation stack
can reach at run time.
Each local variable and iterator variable has an id, and HHBC instructions can
reference these variables using these ids. The id space for local variables and
iterator variables are all distinct from each other. Thus local id 1 refers to
a different variable than iterator id 1. Local variable ids and iterator ids
are signed 32-bit integer values. No function may have more than 2^31 - 1 each
of local variables or iterator variables.
Some local variables have names associated with them (called "named local
variables"), while other local variables do not have names associated with them
(called "unnamed local variables"). All local variables that reference formally
declared parameters have names associated with them. Iterator variables do not
have names associated with them. Variables that have a name associated with
them will appear in the current variable environment (if they are defined),
while variables that do not have a name associated with them will never appear
in the current variable environment.
Formally declared parameters are considered to be local variables. Given a
function with n formally declared parameters, local ids 0 through n-1 will be
used to reference the formally declared parameters. Formal parameters without
default values are called "required parameters", while formal parameters with
default values are called "optional parameters".
The metadata of each function specifies a set of non-overlapping ranges of
bytecode that compose the function body, and it specifies the main entry point
and 0 or more default value ("DV") entry points (entry points are discussed in
more detail in the "Entry points" section).
The total size of the bytecode for the function body must not exceed 2^31 - 1
bytes. The bytecode for a function must be one contiguous range of bytecode.
Each function's metadata provides a "line number table" to allow mapping
bytecode offsets back to source line numbers. Each row in the line number table
consists of a source line number and a range of bytecode. The table is sorted
by starting bytecode offset, lowest offset first. The bytecode offset of the
beginning of each instruction in the function must belong to exactly one of the
ranges of bytecode in the line number table.
Classes
-------
Functions may be grouped into metadata for classes. Class metadata objects are
used to describe several PHP-level language features including traits,
interfaces, closures, and (of course) classes.
Class metadata includes information about the properties on the class, special
functions on the class such as constructors or internal property initialization
routines (86sinit, 86pinit), class constants, list of used traits, list of
extended classes, list of implemented interfaces, etc.
Classes also include a flag indicating their "hoistability". For now this isn't
documented much here. See class.h.
Closures
--------
Closures are implemented in HHBC as subclasses of Closure, in conjunction with
the CreateCl opcode. It is legal HHBC to create other subclasses of Closure (to
represent user code that attempts to do the same), but attempting to
instantiate one will result in a fatal error. The documentation of the CreateCl
opcode below lists the requirements for a closure subclass to be usable with
it.
Generators
----------
The basic compilation strategy for generators is to create bytecode functions
consisting of two parts.
The first part, executed when the generator function is called, must consist
of a CreateCont, which is responsible for suspending execution state into a new
Generator object (includes resume offset pointing to the start of the second
part of the function) and returning it back to the caller.
The second part is where the real user-level code of the generator should be
placed. ContEnter and ContRaise opcodes used in Generator's next(), send()
and raise() methods resume execution and transfer control to the resume offset
stored in the Generator object. The user-level code yields values using
Yield and YieldK opcodes and returns using RetC opcode.
Async functions
---------------
Async functions are special type of functions representing asynchronous
execution. They can suspend while waiting for other asynchronous operations to
finish. This is achieved using Await opcode, which suspends execution into
an AsyncFunctionWaitHandle object. Once the given dependency is finished,
the scheduler resumes async function at the next opcode.
The async function body can be executed in 2 different modes. If the execution
was never suspended, we are in "eager execution" mode. The code executed after
the resume is executed in "resumed execution" mode.
The "eager execution" can end in 3 different ways. If a RetC opcode is reached,
the result is wrapped into a succeeded StaticWaitHandle and returned to the
caller. If an exception is thrown, it is wrapped into a failed StaticWaitHandle
and returned to the caller. Otherwise, if an Await opcode was reached and the
provided child WaitHandle has not finished, the current execution state is
suspended into an AsyncFunctionWaitHandle object and returned to the caller.
This mechanism allows fast execution if no blocking asynchronous operation was
reached.
The "resumed execution" mode is always entered from the scheduler. In this mode,
the async function either gets blocked on another dependency, or gets finished.
The scheduler is notified of these events using Await and RetC opcodes (or via
the unwinder if an exception was thrown) and the control is given back.
The async function implementation is still changing and the implementation may
change significantly, so this spec is staying light on details for now.
Entry points
------------
Entry points come in three varieties: the main entry point, default value ("DV")
entry points, and catch entry points.
Every function has exactly one main entry point. When a function is called, the
dispatcher will set the PC of the new frame to point to the main entry point if
either (1) the function does not have any optional parameters or (2) the caller
provides values for all of the optional parameters.
DV entry points are normally used to handle initializing optional parameters
that the caller did not provide. Generally the DV entries contain blocks that
initialize parameters, and then fall through directly into one another, with
the last block ending with a jump to the main entry point. This is not a
requirement, however. The dispatcher selects the appropriate DV entry point
based on the number of arguments passed into the function.
The main entry point and DV entry points are used by the dispatcher when
handling a function call. Each function's metadata provides an "entry point
table". Each row in the entry point table consists of a number of arguments and
the bytecode offset of the entry point that should be used by the dispatcher
(either the main entry point or a DV entry point).
Catch entry points are used by the unwinder to resume normal execution once a
matching "catch" block has been found and all the necessary cleanup has been
performed. When catch entry points are entered, the stack contains a single
Throwable object.
More details about the unwinder and catch entry points can be found in the
"Exception handler (EH) table" and "Processing exceptions" sections.
Unit metadata
-------------
Every compilation unit has a litstr table, a scalar array table, a function
table, and a class table.
The litstr table maps litstr ids to literal strings. Bytecodes that refer to
literal strings do so by litstr id. Litstr ids are signed 32-bit integer
values, which must be between 0 and 2^31 - 2 inclusive. In addition to the
per-unit litstr tables, a global table is built when generating an
"authoritative" repo (one in which all the PHP code is known at bytecode
generation time, and is guaranteed not to change). Global litstr ids can be
used in any unit, and are encoded in the range [2^30..2^31-2].
The scalar array table maps scalar array ids to a description of the contents
of a scalar array. An array is a scalar array if and only if each element of
the array is a null, boolean, integer, double, string, or a scalar array.
Furthermore, each element of a scalar array must be a cell. Finally, scalar
arrays may not recurse infinitely. Each scalar array id must be between 0 and
2^31 - 2 inclusive.
Each row in the function table contains a unique function id, a function name
specified by a litstr id, the bytecode offset for the corresponding function, a
flag that indicates if the function is unconditionally declared in the
outermost scope, and the function metadata. Note that there may be multiple
rows in the function table with same function name. However, there may not be
multiple rows that are marked as being unconditionally declared in the
outermost scope with the same function name. Each function id must be between 0
and 2^31 - 2 inclusive.
Each row in the class table contains a class name specified by a litstr id and
the class metadata.
Calling convention
------------------
The caller may pass any number of parameters to the callee by pushing zero or
more cells or refs on the stack prior to executing a FCall* instruction. The
caller must pass the parameters in forward order, i.e. the first pushed value
corresponds to the first parameter, and so forth.
The FCall* instructions can be used to call a global function, a method on
an object, or a method from a class. The caller is responsible for evaluating
all of the parameters in forward order. When the caller executes the FCall*
instruction, the dispatcher creates a new frame and moves the parameters
prepared by the caller into the callee's variable environment. The dispatcher
then transfers control to the appropriate entry point of the callee (either the
main entry point or a DV entry point) based on the number of parameters passed.
When the callee executes the Ret* instruction, the dispatcher pushes the return
value onto the caller's evaluation stack. Then the dispatcher destroys the
callee's frame and transfers control back to the caller.
Exception handler (EH) table
----------------------------
The metadata for each function provides an "exception handler (EH) table".
Each row in the EH table (called an "EH entry") consists of a non-negative
integer "region depth", a set of non-overlapping ranges of bytecode that
compose the "protected region", and an offset of a catch entry point.
Each range of bytecode is given by a starting offset and an ending offset,
where the starting offset is the bytecode offset of the first byte of the first
instruction in the range and the ending offset is the bytecode offset after the
last byte of the last instruction in the range.
Note that two or more EH entries may refer to the same catch entry point.
Regardless of whether multiple EH entries share the same catch entry point,
each EH entry in the EH table will be considered to declare a distinct
"protected region".
The EH entries in each EH table must honor the following rules:
1) For each EH entry with a region depth of D and a protected region P, for all
other protected regions Q that overlap with P, one of the following must be
true: (i) Q has a region depth that is greater than D and P is a superset of
(or equal to) Q; or (ii) Q has a region depth that is less than D and P is a
subset of (or equal to) Q.
2) For each EH entry with a region depth of D and a protected region P, for
each integer I where 0 <= I < D there must be exactly one protected region Q in
the EH table where Q's region depth equals I and P overlaps with Q.
Processing exceptions
---------------------
HHBC allows programs to throw exceptions via the Throw instruction. When a
Throw instruction executes it transfers control to the unwinder, which follows
the steps below starting with step 1 until control is transferred elsewhere.
Step 1) Discard all temporary values on the evaluation stack.
Step 2) Consult the EH table of the current function. If there are any EH
entries that cover the current PC, choose the EH entry with the
greatest region depth and continue on to step 3. If no matching EH
entries are found go to step 4.
Step 3) Push the exception object implementing the Throwable interface on the
evaluation stack, then transfer control to the catch entry point. If
this catch entry point corresponds to a PHP try/catch statement, it is
responsible for finding the matching PHP catch clause (e.g. by using
the InstanceOfD opcode) and rethrowing the exception if no matching
clause was found.
Step 4) Check if we are handling user exception in an eagerly executed async
function. If so, pop the current frame, wrap the exception into a
failed StaticWaitHandle object, leave it on the stack as a return
value from the async function and resume execution.
Step 5) Pop the current frame off of the call stack and then check if the call
stack is empty. If the call stack is empty transfer control to the
unhandled exception facility passing along the exception. If the call
stack is not empty, then set the PC to point to the FCall* instruction
which invoked the frame we just discarded and go to step 1.
Property access
---------------
As object properties are accessed during execution, the execution engine is
responsible for following certain rules to honor each property's accessibility
and visibility.
The accessibility and visibility of a property in a given class is determined
by that class's definition and the definitions of all of that class's
ancestors. When a property is declared in a class definition (a "declared
property") it may be specified as being "public", "protected", or "private".
Accessibility and visibility are two related but distinct concepts. Depending
on the current context, a property may be visible and accessible, visible but
inaccessible, or invisible and inaccessible.
If a property P is declared with the "public" qualifier in the definition of
class C, for instances of class C and descendent classes the property P will be
visible and accessible in all contexts. If C has an ancestor that declares a
public property with the same name as P, C is said to "redeclare" property P,
and the declaration of P in class C is considered to refer to the same property
as the declaration in the ancestor class.
If a property P is declared as "protected" in the definition of class C, for
instances of class C the property P will be visible in all contexts, but only
accessible in the context of class C, an ancestor class, or descendent class.
When class C is loaded at run time, a semantic check must be performed to
ensure that all ancestor classes of C do not declare a property as "public"
with the same name as P. If C has an ancestor that declares a public property
with the same name as P, the execution engine must throw a fatal error when
class C is loaded. If C has an ancestor that declares a protected property with
the same name as P, C is said to "redeclare" property P, and the declaration of
P in class C is considered to refer to the same property as the declaration in
the ancestor class. Note that there may exist a class D that is a descendent of
C and declares a property as "public" with the same name as P. In such cases
the new "public" declaration in D is considered to refer to the same property
as the original "protected" declaration in C, and the "protected" qualifier
from the original declaration is effectively overridden by the "public"
qualifier from the new declaration. Class D is said to "redeclare" property P
with the "public" qualifier. Thus, for instances of class D and descendent
classes of D, property P will be visible and accessible in all contexts.
Finally, if a class E that is descendent of C does not redeclare P as public
and does not have an ancestor class that redeclares P as public, for instances
of class E the property P will be visible in all contexts, but only accessible
in the context of class E, an ancestor class of E, or a descendent class of E.
If a property P is declared with the "private" qualifier in the definition of
class C, for instances of class C the property P will be visible in all
contexts, but only accessible in the context of class C. For instances of
descendent classes of C, the property P will be visible and accessible in the
context of the class C, and in all other contexts property P will be invisible
and inaccessible. When class C is loaded at run time, a semantic check must be
performed to ensure that all ancestor classes of C do not declare a property as
"public" or "protected" with the same as P. If C has an ancestor that declares
a public or protected property with the same name as P, the execution engine
must throw a fatal error when class C is loaded. Note that descendent classes
of C may declare another property with the same name as P. The declaration of
property P as "private" in class C is considered to define a separate property
that is distinct from all other properties of the same name declared in
ancestor classes and descendent classes of C.
An instruction that accesses a property specifies the property by a name N via
a litstr id, a local variable id, or a cell consumed from the evaluation stack.
As noted above, it is possible for a class to have multiple distinct properties
named N. In cases where there are multiple distinct properties named N, the
visibility rules are used to determine which property is retrieved. If there is
a visible private property P named N, then property P is retrieved. Otherwise,
if there is a visible non-private property Q named N, then property Q is
retrieved. If there is no visible property named N, the behavior is determined
by the specific instruction. The semantic checks and the visibility rules
ensure that for any context there cannot be more than one visible private
property, and there cannot be more than one visible non-private property.
Some instructions can create a new property at run time with a name that is
different than the names of all declared properties that are visible in the
current context. Such properties are called "non-declared properties" or
"dynamic properties". Dynamic properties are considered to be visible and
accessible in all contexts.
If a declared property is unset, and then re-accessed/re-created, then it is
treated the same way as an invisible property with the same attributes as the
original declared property. Specifically, if the property gets created again,
it must have the same access attributes as the original declared property.
Magic property access methods
-----------------------------
Instructions that access properties may in some cases invoke a magic property
access method (__get, __set, __isset, or __unset) if an object implements the
method and the method is considered eligible for invocation. A magic property
access method is considered "eligible" for a given object if there is not a
frame on the call stack that corresponds to an invocation of the same method on
the same object.
Static property access
----------------------
As a class's static properties are accessed during execution, the execution
engine is responsible for following certain rules to honor each static
property's accessibility and visibility.
The accessibility and visibility of a static property in a given class is
determined by that class's definition and the definitions of all of that
class's ancestors. When a static property is declared in a class definition it
may be specified as being "public", "protected", or "private". Depending on the
current context, a static property may be visible and accessible, visible but
inaccessible, or invisible and inaccessible.
Conceptually, each class has a "static store" associated with it at run time
that provides storage for the static properties declared in the class's
definition. Static properties are accessed at run time by name through the
scope of a class. When an instruction accesses a static property through the
scope of class C, it will search the static store of C and then the static
stores of C's ancestors (starting with C's base class and moving up the
inheritance chain) for the first static property with the given name that is
visible in the current context.
If a static property S is declared with the "public" qualifier in the
definition of class C, the static property S when accessed through the scope of
class C or a descendent of C will be visible and accessible in all contexts.
Note that descendent classes of C may declare another static property with the
same name as S. The declaration in class C is considered to define a separate
static property that is distinct from all other static properties declared in
descendent classes of C.
If a static property S is declared with the "protected" qualifier in the
definition of class C, the static property S when accessed through the scope of
class C or a descendent of C will be visible in all contexts, but only
accessible in the context of class C, an ancestor class of C, or descendent
class of C. When class C is loaded at run time, a semantic check must be
performed to ensure that all ancestor classes of C do not declare a static
property as "public" with the same name as S. If C has an ancestor that
declares a public static property with the same name as S, the execution engine
must throw a fatal error when class C is loaded. Note that descendent classes
of C may declare another static property with the same name as S. The
declaration in class C is considered to define a separate static property that
is distinct from all other static properties declared in descendent classes of
C.
If a static property S is declared with the "private" qualifier in the
definition of class C, the static property S when accessed through the scope of
class C will be visible in all contexts, but only accessible in the context of
class C. The static property S when accessed through the scope of a descendent
of C will only be visible and accessible in the context of class C. When class
C is loaded at run time, a semantic check must be performed to ensure that all
ancestor classes of C do not declare a static property as "public" or
"protected" with the same name as S. If C has an ancestor that declares a
public or protected static property with the same name as S, the execution
engine must throw a fatal error when class C is loaded. Note that descendent
classes of C may declare another static property with the same name as S. The
declaration in class C is considered to define a separate static property that
is distinct from all other static properties declared in descendent classes of
C.
Note that instructions cannot create new static properties in a class that were
not declared in the class definition.
Flavor descriptors
------------------
Any given value on the stack must either be a cell or ref at run time. However,
at bytecode generation time the specific flavor of a value on the stack is not
always known. HipHop bytecode uses symbols called "flavor descriptors" to
precisely describe what is known at bytecode generation about the state of the
evaluation stack at each instruction boundary.
Each instruction description specifies the flavor descriptor produced for each
of its outputs. Each description also specifies the flavor descriptor consumed
for each of the instruction's inputs.
Here is a description of each flavor descriptor:
C - cell; specifies that the value must be a typed value at run time
U - uninit; specifies that the value must be an uninitialized null at run
time; this is only used for FCallBuiltin, CreateCl, and CUGetL.
Verifiability
-------------
Because instructions specify constraints on the flavor descriptor of each
input, it is important to be able to determine if a given HHBC program
satisfies these constraints. A program that satisfies the constraints on the
inputs to each instruction is said to be "flavor-safe".
HHBC provides a set of verification rules that can be mechanically applied to
verify that an HHBC program is flavor-safe. All valid HHBC programs must be
verifiably flavor-safe, and the execution engine may refuse to execute HHBC
programs that cannot be verified.
At bytecode generation time, what is known about the state of the evaluation
stack at a given instruction boundary can be precisely described using flavor
descriptors.
In addition to being flavor-safe, there are other invariants that valid HHBC
programs must uphold with respect to metadata and how certain instructions are
used.
Below is the complete list of verifiability rules. If the bytecode to be
executed does not come from a trusted source, it is the responsibility of the
bytecode execution engine to verify that these invariants hold.
1) The depth of the evaluation stack at any given point in the bytecode must be
the same for all possible control flow paths. The flavor descriptor of any
given slot on the evaluation stack at any given point in the bytecode must be
the same for all possible control flow paths.
2) No instruction may consume more values from the evaluation stack than are
available at that given point in the bytecode. Likewise, the flavor descriptor
of each slot on the evaluation stack must be compatible with the instruction's
inputs' flavor descriptors.
3) The evaluation stack must be empty at any offset listed as a catch entry
point.
4) If a given instruction is not the target of a forward branch and it follows
a Jmp, Switch, SSwitch, RetC, Fatal, Throw, or NativeImpl instruction, the
evaluation stack before executing the given instruction must be empty.
5) Before executing the RetC instruction, the evaluation stack must contain
exactly one value and the flavor descriptor of the value must be cell.
Finally, before executing the NativeImpl instruction, the evaluation stack must
be empty.
6) The code for the function body must be laid out in one contiguous block.
7) The last instruction of the function body must be either a control flow
without fallthrough or a terminal instruction.
8) The initialization state of each iterator variable must be known at every
point in the code and must be the same for all control paths. There are two
possible states: (1) uninitialized, and (2) "iter-initialized" (initialized
via IterInit*). Every range of bytecode for which an iterator variable i is
initialized must be protected by an EH entry with a catch handler that unsets
i by calling IterFree.
9) The iterator variable referenced by IterInit* must be in the uninitialized
state when the instruction executes. An iterator variable referenced by
IterNext* and IterFree must be in the "iter-initialized" state. Note that
IterInit* conditionally initialize the iterator variable, and IterNext*
conditionally free the iterator variable.
10) Each EH table must follow all of the rules specified in the "Exception
handler (EH) table" section.
11) Assertion (AssertRATL and AssertRATStk) instructions cannot be separated
from the following instruction by control flow. Practically speaking, this means
that the instruction immediately following an assertion cannot be a jump target.
12) Sequences of member instructions should be consistent and continuous. That
is, only member instructions and asserts may appear in the sequence, control
flow cannot interrupt the sequence, and the member op mode should be consistent
across all instructions in the sequence. This is because in the case of
exceptions the unwinder decides whether the member state is live by looking at
the instruction that threw.
Instruction set
---------------
Each instruction description below consists of a mnemonic, followed by 0 or
more immediate operands, followed by a stack transition description of the form
"[xn,...,x2,x1] -> [ym,...,y2,y1]", where "[xn,...,x2,x1]" is a list of flavor
descriptors describing what the instruction consumes from the evaluation stack
and "[ym,...,y2,y1]" is the list of flavor descriptors describing what the
instruction pushes onto the stack. x1 and y1 represent the topmost stack
elements before and after execution, respectively.
Each element of a stack transition may also contain an optional type
annotation. Here is the list of the type annotations used in instruction
descriptions:
Null - denotes the null type
Bool - denotes the boolean type
Int - denotes the integer type
Dbl - denotes the double-precision floating-point type
Str - denotes the string type
Vec - denotes the vec type
Dict - denotes the dict type
Keyset - denotes the keyset type
Obj - denotes the object type
Rec - denotes the record type
ArrLike - denotes array, vec, dict, or keyset
Class - denotes class pointer type
LazyClass - denotes lazy class type
Multiple type annotations may be combined together using the "|" symbol. For
example, the type annotation "Int|Dbl" means that a value is either integer or
a double.
Some instructions may contain multiple stack transition descriptions to express
the relationship between the types of the values consumed from the stack and
types of the values pushed onto the stack. Also, in some stack transition
descriptions, "<T>" is used as shorthand to represent any one specific type.
For example, a transition such as "[C:<T>] -> [C:<T>]" indicates that the type
of value that the instruction pushes onto the stack will match the type of
value that it consumed from the stack. Likewise, "<F>" is used as shorthand to
represent any one specific flavor descriptor.
$1 is used to refer to the value at the top of the evaluation stack, $2 is used
to refer to the value directly below $1 on the evaluation stack, $3 is used to
refer to the value directly below $2, and so forth. Also, %1 is used to refer
to the first immediate argument, and %2 is used to refer to the second
immediate argument. Thus, the indices used to refer to values on both
the evaluation stack and in the immediate arguments list are 1-indexed.
Note that the relative offset immediate used by a Jmp*, Iter*, Switch,
or SSwitch instruction is relative to the beginning of the instruction.
There are numerous instructions that operate on different kinds of locations.
Locations are specified using "location descriptors". The complete list of
location descriptors is given below:
L - local id; location is the local variable whose id is given by an
immediate.
N - local name; location is the local variable whose name is given by the
value of a cell.
G - global name; location is the global variable whose name is given by the
value of a cell.
S - static property; location is the static property whose class is given by
value of a cell, and whose name is given by value of a cell.
C - cell; location is a temporary value given by a cell.
H - $this; location is the $this pointer in the current frame. Must only be
used in a frame that is known to have a non-null $this pointer; CheckThis
is most commonly used to ensure this.
There are several groups of similarly named instructions where the name of each
instruction ends with a different location descriptor (for example, Set*). Each
instruction in the group performs similar actions but takes different kinds of
inputs to specify the location to access.
There are numerous instructions which incorporate a readonly immediate.
These opcodes can be Mutable, Any, ReadOnly, CheckROCOW, or CheckMutROCOW and
specify the readonlyness constraint on the property read/written by the
instruction. The Any immediate is equivalent to no runtime check.
The ReadOnly immediate specifies this property must be readonly.
The Mutable immediate specifies this property must be mutable. The CheckROCOW
immediate specifies this property must be readonly and COW. The CheckMutROCOW
immediate specifies this property must be mutable unless it is readonly
and COW.
The member instructions provide functionality to operate on elements and
properties. Many of these instructions incorporate a readonly immediate
argument, as well as an immediate argument which specifies one of the
following member descriptors.
EC - consume a cell from the evaluation stack as an element
EL:<id> - consume a local given by an immediate id as an element
ET:<id> - consume a litstr given by an immediate id as an element
EI:<int> - consume a immediate integer as an element
PC - consume a cell from the evaluation stack as a property
PL:<id> - consume a local given by an immediate id as a property
PT:<id> - consume a litstr given by an immediate id as a property
QT:<id> - a nullsafe version of PT:<id>. The null-base doesn't issue
a warning, and no stdClass promotion in write context for the
base happens. Consume a litstr given by an immediate id
as a property
W - synthesize a new element (no corresponding local variable or
evaluation stack slot)
The instruction set is organized into the following sections:
1. Basic instructions
2. Literal and constant instructions
3. Operator instructions
4. Control flow instructions
5. Get instructions
6. Isset and type querying instructions
7. Mutator instructions
8. Call instructions
9. Member operations
10. Member instructions
11. Iterator instructions
12. Include, eval, and define instructions
13. Miscellaneous instructions
14. Generator creation and execution
15. Async functions
1. Basic instructions
---------------------
Nop [] -> []
No operation. This instruction does nothing.
PopC [C] -> []
PopU [U] -> []
Pop. Discards the value on the top of the stack.
PopU2 [U C:<T>] -> [C:<T>]
Pop two. Discards the uninit underneath the cell on top of the stack.
PopL <local variable id> [C] -> []
Teleport value from the stack into a local. This instruction marks the local
variable given by %1 as defined and pops and stores the value $1 into the
local variable. This instruction behaves as if it was a SetL PopC pair, but
might be implemented more efficiently.
Dup [C:<T>] -> [C:<T> C:<T>]
Duplicate. Duplicates the cell $1 and pushes it onto the stack.
CGetCUNop [C|U:<T>] -> [C:<T>]
Convert a cell or uninit value to a cell, no op. This is a flavor-safety only
opcode and should only be used when $1 is statically known to be a cell.
UGetCUNop [C|U:<T>] -> [U:<T>]
Convert a cell or uninit value to an uninit, no op. This is a flavor-safety
only opcode and should only be used when $1 is statically known to be an
uninit.
2. Literal and constant instructions
------------------------------------
Null [] -> [C:Null]
True [] -> [C:Bool]
False [] -> [C:Bool]
Push constant. Null pushes null onto the stack, True pushes true onto the
stack, and False pushes false onto the stack.
NullUninit [] -> [U]
Push an uninitialized null on the stack.
Int <signed 64-bit integer value> [] -> [C:Int]
Double <double value> [] -> [C:Dbl]
String <litstr id> [] -> [C:Str]
Vec <scalar vec id> [] -> [C:Vec]
Dict <scalar dict id> [] -> [C:Dict]
Keyset <scalar keyset id> [] -> [C:Keyset]
LazyClass <litstr id> [] -> [C:LazyClass]
Push immediate. Pushes %1 onto the stack.
NewDictArray <capacity hint> [] -> [C:Dict]
New dict, with a capacity hint. Creates a new dict and pushes
it onto the stack. The implementation may make use of the hint in %1 to
pre-size the array. The hint %1 must be greater than or equal to 0.
NewStructDict <litstr id vector> [C..C] -> [C:Dict]
New dict array. Creates a new dict array from the names given in %1 and
values from the stack. The vector of litstr ids gives the element names, one
value for each name is popped from the stack. Names are in array insertion
order, and values were pushed onto the stack in insertion order, so are added
to the array in reverse order (the topmost value will become the last element
in the array). For example:
NewStructDict < "a" "b" > [ 1 2 ] -> [ dict("a"=>1, "b"=>2) ]
NewVec <num elems> [C..C] -> [C:Vec]
New vec. Creates a new vec from the top %1 cells on the stack, pops those
cells, then pushes the new vec onto the stack. Elements are pushed on the
stack in vec insertion order.
NewKeysetArray <num elems> [C..C] -> [C:Keyset]
New keyset. Creates a new keyset from the top %1 cells on the stack, pops
those cells, then pushes the new keyset onto the stack. Elements are pushed
on the stack in keyset insertion order.
AddElemC [C C C] -> [C:Arr|Dict]
Add element. If $3 is an array or dict, this instruction executes $3[$2] = $1
and then pushes $3 onto the stack.
If $3 is not an array or dict, this instruction throws a fatal error.
AddNewElemC [C C] -> [C:Arr|Vec|Keyset]
Add new element. If $2 is an array, vec, or keyset this instruction executes
$2[] = $1 and then pushes $2 onto the stack.
If $2 is not an array, vec, or keyset, this instruction throws a fatal error.
NewCol <coll type> [] -> [C:Obj]
New collection. Creates a empty new collection of type %1, and pushes it
onto the stack. %1 must be one of the values of the CollectionType enum other
than Pair.
NewPair [C C] -> [C:Obj]
New Pair collection. Creates a Pair from the top 2 cells on the stack, and
pushes it onto the stack. Values were pushed onto the stack in the order
they exist in the pair, so are added to it in reverse order (the top value
on the stack will become the second element of the pair).
ColFromArray <coll type> [C:Arr] -> [C:Obj]
Create a collection of type %1 from array $1, and pushes the collection onto
the stack. %1 must be one of the values of the CollectionType enum other
than Pair. The array will be used to implement the collection without
conversion or duplication, thus it should not contain references. $1 must be
in packed mode if %1 is Vector or ImmVector, and must be in mixed mode
otherwise.
Note that integer-like string keys are converted to integers in arrays, but
not in collections; thus not all collections can be created using this
instruction.
CnsE <litstr id> [] -> [C:Null|Bool|Int|Dbl|Str|Arr|Vec|Dict|Keyset|Resource]
Get constant. Pushes the value of the global constant named %1 onto the stack
as a cell. If there is no constant named %1, throws a fatal error.
ClsCns <litstr id> [C:Class] -> [C:Null|Bool|Int|Dbl|Str|Arr|Vec|Dict|Keyset|Resource]
Get class constant. This instruction pushes the value of the class constant
named %1 from the class $1 onto the stack. If there is no class
constant named %1 in class $1, this instruction throws a fatal error.
ClsCnsL <local variable id> [C:Class] -> [C:Null|Bool|Int|Dbl|Str|Arr|Vec|Dict|Keyset|Resource]
Get class constant (local). This instruction pushes the value of the class
named %1 from the class $1 onto the stack. If there is no class constant
named %1 in class $1, this instruction throws a fatal error.
ClsCnsD <litstr id> <litstr id> [] -> [C:Null|Bool|Int|Dbl|Str|Arr|Vec|Dict|Keyset|Resource]
Get class constant (direct). This instruction first checks if %2 matches the
name of a defined class. If %2 does not match the name of a defined class,
this instruction will invoke the autoload facility passing in the class name
%2, and then it will again check if %2 matches the name of a defined class.
If %2 still does not match the name of a defined class this instruction
throws a fatal error.
Next, this instruction pushes the value of the class constant named %1 from
class %2 onto the stack. If there is no class constant named %1 in class %2,
this instruction throws a fatal error.
File [] -> [C:Static Str]
Dir [] -> [C:Static Str]
Method [] -> [C:Static Str]
Push string. File pushes __FILE__ onto the stack, Dir pushes __DIR__ onto
the stack, and Method pushes __METHOD__.
FuncCred [] -> [C:Obj]
Push object holding information about current executing function
onto the stack
ClassName [C:Class] -> [C:Static Str]
Push the name of the class in $1 as a string.
LazyClassFromClass [C:Class] -> [C:LazyClass]
Push the lazy class corresponding to the class in $1.
3. Operator instructions
------------------------
Concat [C C] -> [C:Str]
Concatenation (.). Pushes ((string)$2 . (string)$1) on the stack.
ConcatN <n> [C..C] -> [C:Str]
Concatenation (.). Pushes ((string)$n . ... . (string)$1) on the stack.
Add [C:<T2> C:<T1>] -> [C:Dbl] (where T1 == Dbl || T2 == Dbl)
[C:<T2> C:<T1>] -> [C:Int] (where T1 != Dbl && T2 != Dbl)
Addition (+). Pushes ($2 + $1) onto the stack. This instruction throws a
fatal error if either $1 or $2 is not numeric.
Sub [C:<T2> C:<T1>] -> [C:Dbl] (where T1 == Dbl || T2 == Dbl)
[C:<T2> C:<T1>] -> [C:Int] (where T1 != Dbl && T2 != Dbl)
Subtraction (-). Pushes ($2 - $1) onto the stack. This instruction throws a
fatal error if either $1 or $2 is not numeric.
Mul [C:<T2> C:<T1>] -> [C:Dbl] (where T1 == Dbl || T2 == Dbl)
[C:<T2> C:<T1>] -> [C:Int] (where T1 != Dbl && T2 != Dbl)
Multiplication (*). Pushes ($2 * $1) onto the stack. This instruction throws a
fatal error if either $1 or $2 is not numeric.
Div [C C] -> [C:Bool|Int|Dbl]
[C:Dbl C:Int] -> [C:Bool|Dbl]
[C:Int C:Dbl] -> [C:Bool|Dbl]
[C:Dbl C:Dbl] -> [C:Bool|Dbl]
Division (/). Pushes ($2 / $1) onto the stack. This instruction throws a
fatal error if either $1 or $2 is not numeric, or if $1 is zero.
Mod [C C] -> [C:Bool|Int]
Modulus (%). Pushes ((int)$2 % (int)$1) onto the stack. This instruction throws a
fatal error if either $1 or $2 is not numeric, or if $1 is zero.
Pow [C C] -> [C:Int|Dbl]
Power. Pushes $2 raised to the power of $1 onto the stack. This instruction throws a
fatal error if either $1 or $2 is not numeric.
Not [C] -> [C:Bool]
Logical not (!). Pushes (!(bool)$1) onto the stack.
Same [C C] -> [C:Bool]
Same (===). Pushes ($2 === $1) onto the stack.
NSame [C C] -> [C:Bool]
Not same (!==). Pushes ($2 !== $1) onto the stack.
Eq [C C] -> [C:Bool]
Equals (==). Pushes ($2 == $1) onto the stack.
Neq [C C] -> [C:Bool]
Not equal (!=). Pushes ($2 != $1) onto the stack.
Lt [C C] -> [C:Bool]
Less than (<). Pushes ($2 < $1) onto the stack.
Lte [C C] -> [C:Bool]
Less than or equal to (<=). Pushes ($2 <= $1) onto the stack.
Gt [C C] -> [C:Bool]
Greater than (>). Pushes ($2 > $1) onto the stack.
Gte [C C] -> [C:Bool]
Greater than or equal to (>=). Pushes ($2 >= $1) onto the stack.
Cmp [C C] -> [C:Int]
Comparison. Pushes either -1, 0, or 1 onto the stack if ($1 < $2), ($1 ==
$2), or ($1 > $2), respectively.
BitAnd [C:<T2> C:<T1>] -> [C:Int] (where T1 != Str || T2 != Str)
[C:Str C:Str] -> [C:Str]
Bitwise and (&). Pushes ($2 & $1) onto the stack. If either $1 or $2 is an
object, this instruction throws a fatal error.
BitOr [C:<T2> C:<T1>] -> [C:Int] (where T1 != Str || T2 != Str)
[C:Str C:Str] -> [C:Str]
Bitwise or (|). Pushes ($2 | $1) onto the stack. If either $1 or $2 is an
object, this instruction throws a fatal error.
BitXor [C:<T2> C:<T1>] -> [C:Int] (where T1 != Str || T2 != Str)
[C:Str C:Str] -> [C:Str]
Bitwise xor (^). Pushes ($2 ^ $1) onto the stack. If either $1 or $2 is an
object, this instruction throws a fatal error.
BitNot [C:<T>] -> [C:Int] (where T != Str)
[C:Str] -> [C:Str]
Bitwise not (~). Pushes (~$1) onto the stack. If $1 is null, a boolean, an
array, or an object, this instruction throws a fatal error.
Shl [C C] -> [C:Int]
Shift left (<<). Pushes ((int)$2 << (int)$1) onto the stack. This instruction
never throws a fatal error.
Shr [C C] -> [C:Int]
Shift right (>>). Pushes ((int)$2 >> (int)$1) onto the stack. This
instruction never throws a fatal error.
CastBool [C] -> [C:Bool]
Cast to boolean ((bool),(boolean)). Pushes (bool)$1 onto the stack.
CastInt [C] -> [C:Int]
Cast to integer ((int),(integer)). Pushes (int)$1 onto the stack.
CastDouble [C] -> [C:Dbl]
Cast to double ((float),(double),(real)). Pushes (double)$1 onto the stack.
CastString [C] -> [C:Str]
Cast to string ((string),(binary)). Pushes (string)$1 onto the stack. If $1
is an object that implements the __toString method, the string cast returns
$1->__toString(). If $1 is an object that does not implement __toString
method, the string cast throws a fatal error.
CastVec [C] -> [C:Vec]
Cast to vec array. Pushes vec($1) onto the stack.
CastDict [C] -> [C:Dict]
Cast to dict. Pushes dict($1) onto the stack.
CastKeyset [C] -> [C:Keyset]
Cast to keyset. Pushes keyset($1) onto the stack.
InstanceOf [C C] -> [C:Bool]
Instance of (instanceof). If $1 is a string and it matches the name of a
defined class and $2 is an object that is an instance of $1, this instruction
pushes true onto the stack. If $1 is an object and get_class($1) matches the
name of a defined class and $2 is an object that is an instance of
get_class($1), this instruction pushes true onto the stack. If $1 is not a
string or an object, this instruction throws a fatal error.
InstanceOfD <litstr id> [C] -> [C:Bool]
Instance of direct (instanceof). If %1 matches the name of a defined class
and $1 is an instance of the %1, this instruction pushes true onto the stack,
otherwise it pushes false onto the stack.
Select [C C C] -> [C]
Pushes (bool)$1 ? $2 : $3 onto the stack.
DblAsBits [C] -> [C]
If $1 is a double, reinterpret it as an integer (with the same bit-pattern)
and push it onto the stack. Otherwise, push 0.
IsLateBoundCls [C] -> [C:Bool]
If $1 is a subtype of the current late-bound class, this instruction pushes
true onto the stack, otherwise it pushes false onto the stack.
IsTypeStructC <type struct resolve op> [C C] -> [C:Bool]
If $1 matches the type structure of a defined type and $2 is a subtype of $1,
this instruction pushes true onto the stack, otherwise it pushes false onto
the stack.
If the type struct resolve op is Resolve, then resolves the type structure in
$1 before performing the subtype check.
If the type struct resolve op is DontResolve and the given type structure is
unresolved, then this instruction raises an error.
ThrowAsTypeStructException [C C] -> []
Throws a user catchable type assertion exception that indicates what the
given type of $2 is, what the expected type is (given on $1) and which key
it failed at, if applicable.
CombineAndResolveTypeStruct <num type structures> [C..C] -> [C]
Consumes a type structure from the stack that potentially has holes in it,
and (%1 - 1) amount of type structures from the stack and merges these type
structures into the first type structure. Merging means that the hole on the
first type structure denoted by the reified type kind will be replaced by the
type structure whose id matches the id provided at this field. If the id at
this field does not match that of any given type structures or the provided
inputs are not valid type structures, this instruction throws a fatal error.
After merging, this instruction resolves the final type structure and pushes
it onto the stack.
Print [C] -> [C:Int]
Print (print). Outputs (string)$1 to STDOUT and pushes the integer value 1
onto the stack.
Clone [C] -> [C:Obj]
Clone (clone). Clones $1 and pushes it onto the stack. If $1 is not an
object, this instruction throws a fatal error.
Exit [C] -> [C:Null]
Exit (exit). Terminates execution of the program.
If $1 is an integer, this instruction will set the exit status to $1, push
null onto the stack, and then it will terminate execution.
If $1 is not an integer, this instruction will output (string)$1 to STDOUT,
set the exit status to 0, push null onto the stack, and then it will
terminate execution.
Fatal <fatal subop> [C] -> []
Fatal. This instruction throws a fatal error using $1 as the error message.
If $1 is not a string, this instruction throws a fatal error with an error
message that indicates that the error message was not a string. Setting %1 to
0 will throw a runtime fatal error with a full backtrace. Setting %1 to 1
will throw a parse fatal error with a full backtrace. Setting %1 to 2 will
throw a runtime fatal error with the backtrace omitting the top frame.
4. Control flow instructions
----------------------------
Enter <rel offset> [] -> []
Enter the function body. This instruction is used at the end of default value
initializers to transfer control to the function body.
Jmp <rel offset> [] -> []
Jump. Transfers control to the location specified by %1.
JmpZ <rel offset> [C] -> []
Jump if zero. Conditionally transfers control to the location specified by %1
if (bool)$1 == (bool)0.
JmpNZ <rel offset> [C] -> []
Jump if not zero. Conditionally transfers control to the location specified
by %1 if (bool)$1 != (bool)0.
Switch <bounded> <base> <offset vector> [C] -> []
Switch over integer case values. If bounded == SwitchKind::Unbounded, the
implementation will assume that $1 is an integer in the range [0,
length(vector)) and unconditionally transfer control to the location
specified by vector[$1]. Undefined behavior will result if $1 is not an
integer inside this range. If bounded == SwitchKind::Bounded, the following
rules take over:
For a bounded Switch, the last two elements of the offset vector are special:
they represent the first non-zero case and the default case, respectively.
base + length(vector) - 2 must not be greater than 2^63-1. If $1 === true,
control will be transferred to the location specified by
vector[length(vector) - 2]. If $1 is equal (as defined by Eq) to any integer
$n in the range [base, base + length(vector) - 2), control will be
transferred to the location specified by vector[$n - base]. Otherwise,
control will be transferred to the location specified by
vector[length(vector) - 1].
SSwitch <litstr id/offset vector> [C] -> []
Switch over string case values. This instruction will search the
string/offset vector from the beginning until it finds a string that is equal
to $1. If one is found, control will be transferred to the location specified
by the offset corresponding to that string. If a matching string is not
found, control is transferred to the location specified by the final element
in the vector, which must have a litstr id of -1.
RetC [C] -> []
Return a cell. Returns $1 to the caller.
If this instruction is used inside an async function executed in an "eager
execution" mode, the $1 is wrapped into a StaticResultWaitHandle prior to
return. In a "resumed execution" mode, the control is given back to the
scheduler and it is informed that the async function has finished.
If used in a generator, the Generator object is marked as finished and
the control is given back to the next instruction after ContEnter or
ContRaise instruction in a previous frame. The $1 must be Null.
RetCSuspended [C] -> []
Return a cell. Returns $1, which is an already suspended wait-handle, to the
caller. This instruction can only be used within async functions. This is
meant to be used within memoized async functions where the memoized value to
be returned is already wrapped in a wait-handle.
RetM <num returns> [C..C] -> []
RetM is a variant of RetC that allows multiple cells to be returned. The RetM
bytecode must be the only form of return used in a single function, and all
callers must use FCall* with the matching number of returned values to invoke
the function.
Throw [C] -> []
Throw. Throws the object $1. If $1 is not an object that extends the
Exception class, this instruction throws a fatal error.
5. Get instructions
-------------------
CGetL <local variable id> [] -> [C]
Get local as cell. If the local variable given by %1 is defined, this
instruction gets the value of the local variable and pushes it onto the stack
as a cell. If the local variable is not defined, this instruction raises a
warning and pushes null onto the stack.
CGetQuietL <local variable id> [] -> [C]
Get local as cell. If the local variable given by %1 is defined, this
instruction gets the value of the local variable and pushes it onto the stack
as a cell. If the local variable is not defined, this instruction pushes null
onto the stack.
CGetL2 <local variable id> [<C>:<T>] -> [C <C>:<T>]
Get local as cell. First, $1 is popped off the stack. If the local variable
given by %1 is defined, this instruction then gets the value of the local variable,
pushes it onto the stack as a cell, and then pushes $1 back onto the stack.
If the local variable is not defined, this instruction raises a warning,
pushes null onto the stack, and then pushes $1 back onto the stack.
CUGetL <local variable id> [] -> [C|U]
Get local as cell or uninit. If the local variable given by %1 is defined,
this instruction gets the value of the local variable and pushes it onto the
stack as a cell. If the local variable is not defined, this instruction pushes
uninit onto the stack.
PushL <local variable id> [] -> [C]
Teleport local value to eval stack. The local variable given by %1 must be
defined and must not contain a reference. This instruction pushes the local's
value on the stack, then unsets it, equivalent to the behavior of UnsetL.
CGetG [C] -> [C]
Get global as cell. This instruction first computes x = (string)$1. Next,
this instruction reads the global variable named x pushes its value onto the
stack as a cell.
If there is not a global variable defined named x, this instruction pushes
null onto the stack.
CGetS <readonly op> [C C:Class] -> [C]
Get static property as cell. This instruction first checks if class $1 has a
visible and accessible static property named (string)$2. If it doesn't, this
instruction throws a fatal error. Otherwise, this instruction pushes the
static property onto the stack as a cell.
ClassGetC [C] -> [C:Class]
Fetch class. This instruction checks if $1 is a string, object, or class. If
a class, it pushes the input unchanged. If a string, it checks if $1 is the
name of a defined class. If so, the class is pushed. If not, this instruction
will invoke the autoload facility passing in $1, and then it will again check
if $1 matches the name of a defined class. If still not defined, this
instruction throws a fatal error. If $1 is an object, it pushes the runtime
class of the object. If $1 is not any of the above cases, this instruction
throws a fatal error.
ClassGetTS [C:Dict] -> [C:Class, C:StaticVec|Null]
Fetch class from type-structure. This instruction checks if $1 is a valid
type-structure (darray or dict). If not, this instruction throws a fatal
error. Otherwise, $1['classname'] is loaded, and mangled as dictated by the
type-structure's generics information (if present). The possibly mangled
classname is then processed like ClassGetC and the resulting class is
pushed. If present, the type-structure's generics information is processed as
in RecordReifiedGeneric and pushed. If not present, null is pushed.
6. Isset and type querying instructions
-----------------------------------------------
IssetL <local variable id> [] -> [C:Bool]
Isset local. This instruction reads the local variable given by %1. If the
local variable is undefined or null, this instruction pushes false onto the
stack, otherwise it pushes true.
IsUnsetL <local variable id> [] -> [C:Bool]
IsUnset local. This instruction reads the local variable given by %1. If the
local variable is undefined, this instruction pushes true onto the
stack, otherwise it pushes false.
IssetG [C] -> [C:Bool]
Isset global. This instruction reads the global variable named (string)$1. If
the global variable is undefined or null, this instruction pushes false onto
the stack, otherwise it pushes true.
IssetS [C C:Class] -> [C:Bool]
Isset static property. This instruction first computes x = (string)$2. Next
it checks if class $1 has an accessible static property named x. If it
doesn't, this instruction pushes false.
If class $1 does have an accessible property named x, this instruction reads
the static property named x. If the static property is null, this instruction
pushes false onto the stack, otherwise it pushes true.
IsTypeC <op> [C] -> [C:Bool]
Is type. This instruction checks the type of a value on the stack, according
to the following table:
operand t
-----------+------
Null | Null
Bool | Bool
Int | Int
Dbl | Dbl
Str | Str
Vec | Vec
Dict | Dict
Keyset | Keyset
Obj | Obj
ArrLike | Vec or Dict or Keyset
Scalar | Int or Dbl or Str or Bool
Res | Res
Class | Class or LazyClass
If t is Obj, this instruction checks if the operand in an object.
Instances of a special class __PHP_Incomplete_Class are not considered
objects.
Otherwise, the result is true if $1 is of type t and false otherwise.
The result is pushed on the stack.
IsTypeL <local variable id> <op> [] -> [C:Bool]
Is type. This instruction checks the type of a local, according to the
following table:
operand t
-----------+------
Null | Null
Bool | Bool
Int | Int
Dbl | Dbl
Str | Str
Vec | Vec
Dict | Dict
Keyset | Keyset
Obj | Obj
ArrLike | Vec or Dict or Keyset
Scalar | Int or Dbl or Str or Bool
Res | Res
Class | Class or LazyClass
If the local variable given by %1 is defined, the logic is the same as for
IsTypeC (see above).
If the local is of kind reference, then the inner value is used to determine
the type.
If the local variable given by %1 is not defined, this instruction raises a
warning and pushes false onto the stack, unless if the operand is Null, in
which case it pushes true.
7. Mutator instructions
-----------------------
SetL <local variable id> [C] -> [C]
Set local. This instruction marks the local variable given by %1 as defined,
stores the value $1 into the local variable, and then pushes $1 onto the
stack.
SetG [C C] -> [C]
Set global. This instruction marks the global variable named (string)$2 as
defined, assigns the value $1 to the global variable, and then pushes $1 onto
the stack.
SetS <readonly op> [C C:Class C] -> [C]
Set static property. First this instruction checks if the class $2 has an
accessible static property named (string)$3. If it doesn't, this instruction
throws a fatal error. Otherwise, this instruction assigns the value $1 to the
static property, and then it pushes $1 onto the stack.
SetOpL <local variable id> <op> [C] -> [C]
Set op local. If the local variable given %1 is not defined, this instruction
marks it as defined, sets it to null, and raises a warning.
Next, this instruction reads the local variable into x, then executes y = x
<op> $1, assigns y into local variable %1, and then pushes y onto the stack.
The immediate value must be one of the following opcodes:
Add, Sub, Mul, Div, Mod, Shl, Shr, Concat, BitAnd,
BitOr, BitXor.
SetOpG <op> [C C] -> [C]
Set op global. This instruction first computes x = (string)$2. If the global
variable named n is not defined, this instruction marks it as defined, sets
it to null, and raises a warning.
Next, this instruction reads the global variable named x into y, executes z =
y <op> $1, assigns z into the global variable named x, and then pushes z onto
the stack as a cell. The immediate value must be one of the following
opcodes:
Add, Sub, Mul, Div, Mod, Shl, Shr, Concat, BitAnd, BitOr, BitXor.
SetOpS <op> [C C:Class C] -> [C]
Set op static property. This instruction first computes x = (string)$3. Next
it checks if class $2 has an accessible static property named x. If it
doesn't, this instruction throws a fatal error. Otherwise, this instruction
reads the static property named x into y, executes z = y <op> $1, assigns z
into the static property, and then pushes z onto the stack. The immediate
value must be one of the following opcodes:
Add, Sub, Mul, Div, Mod, Shl, Shr, Concat, BitAnd, BitOr, BitXor.
IncDecL <local variable id> <op> [] -> [C]
Increment/decrement local. If the local variable given by %1 is not defined,
this instruction marks it as defined, sets it to null, and raises a warning.
Where x is the local given by %1, this instruction then does the following:
If op is PreInc, this instruction executes ++x and then pushes x onto the
stack as a cell.
If op is PostInc, this instruction pushes x onto the stack and then it
executes ++x.
If op is PreDec, this instruction executes --x and then pushes x onto the
stack.
If op is PostDec, this instruction pushes x onto the stack and then it
executes --x.
IncDecG <op> [C] -> [C]
Increment/decrement. This instruction first computes x = (string)$1. Next, if
the global variable named x is not defined, this instruction first defines it,
sets it to null, and raises a warning.
Where v is the local variable or global variable named x, this instruction
performs the following:
If op is PreInc, this instruction executes ++v and then pushes v onto the
stack as a cell.
If op is PostInc, this instruction pushes v onto the stack and then it
executes ++v.
If op is PreDec, this instruction executes --v and then pushes v onto the
stack.
If op is PostDec, this instruction pushes v onto the stack and then it
executes --v.
IncDecS <op> [C C:Class] -> [C]
Increment/decrement static property. This instruction first computes x =
(string)$2. Next it checks if class $1 has an accessible static property
named x. If it doesn't, this instruction throws a fatal error.
Where s is the static property named x, this instruction performs the
following:
If op is PreInc, this instruction increments the ++s and then pushes s onto
the stack.
If op is PostInc, this instruction pushes s onto the stack and then it
executes ++s.
If op is PreDec, this instruction executes --s and then pushes s onto the
stack.
If op is PostDec, this instruction pushes s onto the stack and then it
executes --s.
UnsetL <local variable id> [] -> []
Unset local. Breaks any bindings the local variable given by %1 may have and
marks the local variable as undefined.
UnsetG [C] -> []
Unset global. This instruction breaks any bindings the global variable named
(string)$1 may have and marks the global variable as undefined.
CheckProp <propName> [] -> [C:Bool]
Check non-scalar property initializer. This instruction checks the
initializer for property named %1 in the context class, and pushes
true on the stack if it is initialized, and false otherwise.
InitProp <propName> <op> [C] -> []
Initialize non-scalar property. If %2 is 'NonStatic', this instruction sets
the initializer for the property named %1 in the context class to $1. If %2
is 'Static', this instruction sets the initializer for the static property
named %1 in the context class to $1.
The CheckProp and InitProp opcodes should only be used in 86pinit methods.
86pinit methods are HHVM-internal property initialization methods that
cannot be called from user-land. After 86pinit runs, no declared properties
of the class can be of type NullUninit.
8. Call instructions
--------------------
NewObj [C:Class] -> [C:Obj]
NewObjD <litstr id> [] -> [C:Obj]
NewObjS <mode> [] -> [C:Obj]
New object. First, these instructions load a class into x as given
by the following table:
instruction x
------------+----
NewObj | $1
NewObjD | %1
NewObjS | %1
When loading %1 into x, NewObjD will perform the work performed
by the ClassGetC instruction to convert the name given by %1 into a class.
NewObjS will perform the same work as LateBoundCls/SelfCls/ParentCls depending
on the specified mode.
This instruction pushes a default-initialized object onto the stack. The
initialization will complete by running a constructor with FCallCtor, and
clearing the IsBeingConstructed flag using LockObj.
LockObj [C:Obj] -> [C:Obj]
Clears the IsBeingConstructed flag on the object, leaving it on the stack.
FCall* opcodes
--------------
FCall* opcodes are responsible for invoking the callee determined by the
specific opcode and performing operations related to the function call as
specified by the FCA (FCall arguments) immediate consisting of the following
data:
<flags> <num args> <num returns> <inout bool vector> <async eager offset>
[C|V..C|V] -> [C..C]
FCall* first looks up the callee function according to the specific opcode.
The vector %4 must be either empty or it must contain exactly %2 booleans.
If it is non-empty, FCall* checks whether inout-ness of parameters 1..%2
of the callee matches the corresponding inout-ness values specified by the
vector %4. Throws an exception if there is a mismatch.
Finally, FCall* transfers the top %2 values from the stack to the callee as
parameters and invokes the callee. When the callee returns, it will transfer
%3 return values onto the caller's evaluation stack using the C flavor. The
callee must return the matching number of values using either RetC opcode
(if %3 was one) or RetM opcode (otherwise).
If the optional offset %5 was specified, the callee supports async eager
return and it would return a finished Awaitable, it may instead return the
unpacked result of the Awaitable and continue execution of the caller at
offset %5.
If %5 was specified and the callee raised an exception, the exception will
continue to propagate thru the caller instead of being wrapped into Awaitable.
Note that for the purposes of exception handling inside the caller, the PC
will point after the FCall* rather than %5 so it is not advised to have
different EH entries for these two locations.
Async eager offset feature is used to avoid the cost of construction of short
lived Awaitables that are produced by eagerly finishing asynchronous code and
then immediately awaited by the caller.
The %1 contains a list of boolean flags:
Unpack: if enabled, %2 arguments on the stack are followed by an additional
value, which must be an array. Its elements are transferred to the callee as
parameters, following the regular %2 parameters.
Generics: if enabled, %2 arguments and an optional unpack array on the stack
are followed by an additional value containing the list of reified generics.
Only FCall*D opcodes are allowed to pass generics.
LockWhileUnwinding: whether to lock newly constructed objects if unwinding the
constructor call.
FCallFunc <fca> [U U C|V..C|V C] -> [C..C]
FCallFuncD <fca> <litstr id> [U U C|V..C|V] -> [C..C]
Call a callable. First, these instructions load a value into x as given by
the following table:
instruction x
--------------+----
FCallFunc | $1
FCallFuncD | %2
If x is a string, this instruction attempts to lookup a function named x. If
a function named x is defined, this instruction calls it. Otherwise it throws
a fatal error. With FCallFunc*D the litstr in %2 must not start with a '\'
character, or be of the form "Class::Method". Function names should be
normalized with respect to namespace and never start with a '\'.
If x is an object, this instruction checks if the object has an __invoke
method. If the object does have an __invoke method, this instruction calls
it. Otherwise it throws a fatal error.
if x is an array, this instruction will check that the first array element is
either the name of a class, or an instance of one, and that the second array
element is the name of a method implemented by the class. If a method exists,
this instruction calls it. Otherwise it throws a fatal error.
If x is a func or a clsmeth, this instruction calls it.
If x is not a string, object, array, func, or clsmeth, this instruction
throws a fatal error.
FCallObjMethod <fca> <class hint> <nullsafe>
[C U C|V..C|V C] -> [C..C]
FCallObjMethodD <fca> <class hint> <nullsafe> <litstr id>
[C U C|V..C|V] -> [C..C]
Call an instance method. First, these instructions load values into x
and y as given by the following table:
instruction x y
-----------------+---------------------------------------------+-----
FCallObjMethod | $(num args + has unpack + 4) | $1
FCallObjMethodD | $(num args + has unpack + has generics + 3) | %3
If x is not an object and nullsafe != ObjMethodOp::NullThrows, or if y is not
a string, this instruction throws a fatal error. Next, this instruction checks
if object x has an accessible method named y. If it does, this instruction
calls that method.
If object x does not have an accessible method named y, this instruction
throws a fatal error.
The string in %2 provides a static analysis hint. If it is non-empty, it is
the class name with the implementation of method y that will be called.
FCallClsMethod <fca> <class hint> <op>
[U U C|V..C|V C C:Class] -> [C..C]
FCallClsMethodM <fca> <class hint> <op> <litstr id>
[U U C|V..C|V C] -> [C..C]
FCallClsMethodD <fca> <litstr id> <litstr id>
[U U C|V..C|V] -> [C..C]
FCallClsMethodS <fca> <class hint> <mode>
[U U C|V..C|V C] -> [C..C]
FCallClsMethodSD <fca> <class hint> <mode> <litstr id>
[U U C|V..C|V] -> [C..C]
Call a static method. First, these instructions load values into x and
y as given by the following table:
instruction x y
--------------------+----+-----
FCallClsMethod | $1 | $2
FCallClsMethodM | $1 | %4
FCallClsMethodD | %2 | %3
FCallClsMethodS | %3 | $1
FCallClsMethodSD | %3 | %4
When loading litstr id %2 into x, FCallClsMethodD will perform the work
performed by the ClassGetC instruction to convert the name given by %2 into
a class. Similarly, FCallClsMethodM will convert $1 into a class.
When loading mode %3 into x, FCallClsMethodS and FCallClsMethodSD will
perform the work performed by LateBoundCls/SelfCls/ParentCls depending
on the specified mode.
If y is not a string, this instruction throws a fatal error. Next, this
instruction checks if class x has an accessible method named y. If it does,
this instruction calls that method.
If class x does not have an accessible method named y, this instruction
throws a fatal error.
The string in %2 provides a static analysis hint. If it is non-empty, it is
the class name with the implementation of method y that will be called.
If op is DontLogAsDynamicCall all logging set up for dynamic calls will be
skipped.
FCallCtor <fca> <class hint> [C:Obj U C|V..C|V] -> [C]
This instruction calls a constructor for class of the object given by
$(num args + has unpack + 3).
The string in %2 provides a static analysis hint. If it is non-empty, it is
the class name with the implementation of __construct() that will be called.
Constructors do not support inout, so the "num returns" in %1 must be 1.
9. Member operations
--------------------
Member operations represent one part of a member expression such as
"$a[0]['name'] = $foo". Each operation corresponds to one bytecode instruction,
but the operations are described separately from their instruction mapping to
separate them from any concerns about instruction encoding.
Operations can produce and consume intermediate values called "bases". A "base"
is a pointer to a memory location that is occupied by a cell or a ref, typically
a local variable, array element, or object property. The current base is stored
in a VM register called the member base register, or MBR for short. Bases are
never stored on the evaluation stack or in any VM location other than the MBR.
A base never owns a reference to the value it points to. It may point to a
temporary value in a scratch register, but the lifetime of the value is always
managed elsewhere.
There are three categories of member operations: base, intermediate, and
final. Base operations produce a base, intermediate operations consume the
current base and produce a new base, and final operations consume the current
base without producing a new one.
Operations are specified as if they directly operate on the top of the
evaluation stack in the name of consistency and clarity, but in fact their
inputs and outputs may reside elsewhere. The symbol 'B' is used in the input
descriptions and output descriptions of operations to indicate that a given
operation consumes a base as input or produces a base as output.
9.1 Member base operations
--------------------------
BaseC [C] -> [B]
Get base from value. This operation outputs a base that points to the value
given by $1.
BaseL <local variable id> [] -> [B]
Get base from local. This operation outputs a base that points to the local
given by %1. If the local is not defined, this operation outputs a base that
points to null.
BaseLW <local variable id> [] -> [B]
Get base from local. This operation outputs a base that points to the local
given by %1. If the local is not defined, this operation raises a warning and
outputs a base that points to null.
BaseLD <local variable id> [] -> [B]
Get base from local. This operation outputs a base that points to the local
given by %1, whether or not it is defined.
BaseGC [C] -> [B]
BaseGL <local variable id> [] -> [B]
Get base from global name. This operation outputs a base that points to the
global variable whose name is given by (string)%1 or (string)$1. If the
global is not defined, this operation produces a base that points to null.
BaseGCW [C] -> [B]
BaseGLW <local variable id> [] -> [B]
Get base from global name. This operation outputs a base that points to the
global variable whose name is given by (string)%1 or (string)$1. If the
global is not defined, this operation raises a warning and outputs a base
that points to null.
BaseGCD [C] -> [B]
BaseGLD <local variable id> [] -> [B]
Get base from global name. This operation outputs a base that points to the
global variable whose name is given by (string)%1 or (string)$1, defining it
first if necessary.
BaseSC [C C:Class] -> [B]
Get base from static property. First, this operation computes x = (string)$2.
Then this instruction checks if class $1 has an accessible property named
x. If it does, this operation outputs a base that points to the static
property. Otherwise, this operation throws a fatal error.
BaseH [] -> [B]
Get base from $this. This operation assumes that the current frame contains a
valid $this pointer and outputs a base pointing to the object in $this.
9.2 Intermediate member operations
----------------------------------
ElemC [C B] -> [B]
ElemL <local variable id> [B] -> [B]
Fetch element if it exists. First, these operations load a value into x and a
base into y, as given by the following table:
operation x y
----------+----+-----
ElemC | $2 | $1
ElemL | %1 | $1
Then, if y is an array, hack array, or hack collection this operation outputs
a base that points to the element at index x in y. If there is no element at
index x, this operation outputs a base that points to null.
If y is an object that is not a hack collection, this operation throws a
fatal error.
If y is a string, this operation computes z = (int)x. If z >= 0 and z <
strlen(z), this operation builds a new string consisting of the character at
offset z from y and outputs a base that contains the new string. Otherwise,
this operation outputs a base that points to the empty string.
If y is not a string, array, or object, this operation will output a base
pointing to null.
ElemCW [C B] -> [B]
ElemLW <local variable id> [B] -> [B]
Fetch element; warn if it doesn't exist.
First, these operations load a value into x and a base into y, as given by
the following table:
operation x y
----------+----+-----
ElemCW | $2 | $1
ElemLW | %1 | $1
If y is an array, hack array, or hack collection this operation outputs a
base that points to the element at index x in y. If there is no element at
index x, this operation outputs a base that points to null and raises a
warning.
If y is an object that is not a hack collection, this operation throws a
fatal error.
If y is a string, this operation continues to compute z = (int)x. If z >= 0
and z < strlen(z), this operation builds a new string consisting of the
character at offset z from y and outputs a base that points to the new string.
Otherwise, this operation raises a warning and outputs a base that points to
the empty string.
If y is not a string, array, or object, this operation will output a base
pointing to null.
ElemCD [C B] -> [B]
ElemLD <local variable id> [B] -> [B]
Fetch element; define it if it doesn't exist.
First, these operations load a value into x and a base into y, as given by
the following table:
operation x y
----------+----+-----
ElemCD | $2 | $1
ElemLD | %1 | $1
If y is an array, hack array, or hack collection this operation outputs a
base that references the element at index x. If there is no element at index
x, this operation creates an element at index x, and outputs a base that
references the element.
If y is non-empty string or an object that is not a hack collection, this
operation throws a fatal error.
If y is null, the empty string, or false, this operation will set y to a new
empty array, create an element at index x, and output a base that points to
the element.
If y is true, integer, double, this operation raises a warning and outputs a
base that points to null.
ElemCU [C B] -> [B]
ElemLU <local variable id> [B] -> [B]
Fetch element for unset.
First, these operations load a value into x and a base into y, as given by
the following table:
operation x y
----------+----+-----
ElemCU | $2 | $1
ElemLU | %1 | $1
If y is an array, hack array, or hack collection this operation outputs a
base that points to the element at index x in y. If there is no element at
index x, this operation outputs a base that points to null.
If y is an object that is not a hack collection, this operation throws a
fatal error.
If y is a string, this operation throws a fatal error.
If y is not a string, array, or object, this operation will output a base
pointing to null.
NewElem [B] -> [B]
Fetch new element. If $1 is an array, hack array, or hack collection this
operation creates a new element with the next available numeric key in $1
and outputs a base that points to the new element.
If $1 is a non-empty string or an object that is not a hack collection, this
operation throws a fatal error.
If $1 is null, false, or the empty string, this operation sets $1 to a new
empty array, creates a new element with the next available numeric key in
array $1, and then outputs a base that points to the new element.
If $1 is true, integer, or double, this operation raises a warning and
outputs a base that points to null.
PropC [C B] -> [B]
PropL <local variable id> [B] -> [B]
Fetch property if it exists.
First, these operations load a value into x and a base into y, as given by
the following table:
operation x y
----------+----+-----
PropC | $2 | $1
PropL | %1 | $1
Next, produce a base pointing to:
y is an object
y->x is visible
y->x is accessible
y has eligible __get method
y->x has been unset previously
------+---------------------------------------------------------------------
0XXXX | null
10X0X | null
10X1X | y->__get(x)
1100X | throw fatal error
1101X | y->__get(x)
111X0 | y->x
11101 | null
11111 | y->__get(x)
PropCW [C B] -> [B]
PropLW <local variable id> [B] -> [B]
Fetch property; warn if it doesn't exist.
First, these operations load a value into x and a base into y, as given by
the following table:
operation x y
----------+----+-----
PropCW | $2 | $1
PropLW | %1 | $1
Next, produce a base pointing to:
y is an object
y->x is visible
y->x is accessible
y has eligible __get method
y->x has been unset previously
------+---------------------------------------------------------------------
0XXXX | raise warning; null
10X0X | raise warning; null
10X1X | y->__get(x)
1100X | throw fatal error
1101X | y->__get(x)
111X0 | y->x
11101 | raise warning; null
11111 | y->__get(x)
PropCD [C B] -> [B]
PropLD <local variable id> [B] -> [B]
Fetch property; define it if it doesn't exist.
First, these operations load a value into x and a base into y, as given by
the following table:
operation x y
----------+----+-----
PropCD | $2 | $1
PropLD | %1 | $1
Next, produce a base pointing to:
y is an object
y is null/false/""
y->x is visible
y->x is accessible
y has eligible __get method
y->x has been unset previously
-------+--------------------------------------------------------------------
00XXXX | null
01XXXX | y = new stdClass; create property y->x; y->x
1X0X0X | create property y->x; y->x
1X0X1X | y->__get(x)
1X100X | throw fatal error
1X101X | y->__get(x)
1X11X0 | y->x
1X1101 | re-create property y->x, y->x
1X1111 | y->__get(x)
PropCU [C B] -> [B]
PropLU <local variabld id> [B] -> [B]
Fetch property for unset.
First, these operations load a value into x and a base into y, as given by
the following table:
operation x y
----------+----+-----
PropCU | $2 | $1
PropLU | %1 | $1
Next, produce a base pointing to:
y is an object
y->x is visible
y->x is accessible
y->x has been unset previously
-----+----------------------------------------------------------------------
0XXX | null
10XX | create property y->x; y->x
110X | throw fatal error
1110 | y->x
1111 | re-create property y->x; y->x
9.3 Final member operations
---------------------------
CGetElemC [C B] -> [C]
CGetElemL <local variable id> [B] -> [C]
Get element as cell.
These instructions first load a value into x and a base into y, as given by
the following table:
operation x y
------------+----+-----
CGetElemC | $2 | $1
CGetElemL | %1 | $1
If y is an array, hack array, or hack collection this operation retrieves
the element at index x from y and pushes it onto the stack as a cell. If
there is no element at index x, this operation raises a warning and pushes
null onto the stack.
If y is an object that is not a hack collection, this operation throws a
fatal error.
If y is a string, this operation continues to compute z = (int)x. If z >= 0
and z < strlen(z), this operation builds a new string consisting of the
character at offset z from y and pushes it onto the stack. Otherwise, this
operation raises a warning and pushes the empty string onto the stack.
If y is not a string, array, or object, this operation will push null onto
the stack.
IssetElemC [C B] -> [C:Bool]
IssetElemL <local variable id> [B] -> [C:Bool]
Isset element.
These instructions first load a value into x and a base into y, as given by
the following table:
operation x y
------------+----+-----
IssetElemC | $2 | $1
IssetElemL | %1 | $1
If y is an array, hack array, or hack collection this operation pushes
!is_null(y[x]) onto the stack.
If y is an object that is not a hack collection, this operation throws a
fatal error.
If y is a string, this operation computes z = (int)x and then it pushes (z >=
0 && z < strlen(y)) onto the stack.
If y is a not a string, array, or object, this operation pushes false onto
the stack.
SetElemC [C C B] -> [C]
Set element. If $1 is an array, hack array, or hack collection this operation
executes $1[$3] = $2 and then pushes $2 onto the stack.
If $1 is an object that is not a hack collection, this operation throws a
fatal error.
If $1 is null, the empty string, or false, this operation sets $1 to a new
empty array, executes $1[$3] = $2, and then pushes $2 onto the stack.
If $1 is a non-empty string, this operation first computes x = (int)$3. If x
is negative, this operation raises a warning and does nothing else. If x is
non-negative, this operation appends spaces to the end of $1 as needed to
ensure that x is in bounds, then it computes y = substr((string)$2,0,1), and
then it sets the character at index x in $1 equal to y (if y is not empty) or
it sets the character at index x in $1 to "\0" (if y is empty). Then this
operation pushes y on to the stack.
If $1 is true, integer, or double, this operation raises a warning and pushes
null onto the stack as a cell.
SetElemL <local variable id> [C B] -> [C]
Set element. If $1 is an array, hack array, or hack collection this operation
executes $1[%1] = $2 and then pushes $2 onto the stack.
If $1 is an object that is not a hack collection, this operation throws a
fatal error.
If $1 is null, the empty string, or false, this operation sets $1 to a new
empty array, executes $1[%1] = $2, and then pushes $2 onto the stack.
If $1 is a non-empty string, this operation first computes x = (int)%1. If x
is negative, this operation raises a warning and does nothing else. If x is
non-negative, this operation appends spaces to the end of $1 as needed to
ensure that x is in bounds, then it computes y = substr((string)$2,0,1), and
then it sets the character at index x in $1 equal to y (if y is not empty) or
it sets the character at index x in $1 to "\0" (if y is empty). Then this
operation pushes y on to the stack.
If $1 is true, integer, or double, this operation raises a warning and pushes
null onto the stack as a cell.
SetOpElemC <op> [C C B] -> [C]
Set element op. If $1 is an array, hack array, or hack collection this
operation first checks if $1 contains an element at offset $2. If it does
not, this operation creates an element at offset $2, sets it to null, and
raises a warning. Next, this operation executes x = $1[$3], y = x <op> $2,
and $1[$3] = y, and then it pushes y onto the stack as a cell.
If $1 is null, false, or the empty string, this operation first sets $1 to a
new empty array. Then it follows the rules described in the case above.
If $1 is a non-empty string or an object that is not a hack collection, this
operation throws a fatal error.
If $1 is true, integer, or double, this operation raises a warning and pushes
null onto the stack.
SetOpElemL <op> <local variable id> [C B] -> [C]
Set element op. If $1 is an array, hack array, or hack collection this
operation first checks if $1 contains an element at offset $2. If it does
not, this operation creates an element at offset $2, sets it to null, and
raises a warning. Next, this operation executes x = $1[%1], y = x <op> $2,
and $1[%1] = y, and then it pushes y onto the stack as a cell.
If $1 is null, false, or the empty string, this operation first sets $1 to a
new empty array. Then it follows the rules described in the case above.
If $1 is a non-empty string or an object that is not a hack collection, this
operation throws a fatal error.
If $1 is true, integer, or double, this operation raises a warning and pushes
null onto the stack.
IncDecElemC <op> [C B] -> [C]
Increment/decrement element. If $1 is an array, hack array, or hack
collection this operation checks if $1 contains an element at offset $2. If
it does not, this operation creates an element at offset $2, sets it to null,
and raises a warning. Next, this operation executes x = $1[$2], y = x, and
either ++y (if op is PreInc or PostInc) or --y (if op is PreDec or PostDec).
Then it assigns y to $1[$2] and pushes either y (if op is PreInc or PreDec)
or x (if op is PostInc or PostDec) onto the stack.
If $1 is null, false, or the empty string, this operation first sets $1 to an
empty array. Then it follows the rules described in the case above.
If $1 is a non-empty string or an object that is not a hack collection, this
operation throws a fatal error.
If $1 is true, integer, or double, this operation raises a warning and pushes
null onto the stack.
IncDecElemL <op> <local variable id> [B] -> [C]
Increment/decrement element. If $1 is an array, hack array or hack collection
this operation checks if $1 contains an element at offset %1. If it does not,
this operation creates an element at offset %1, sets it to null, and raises
a warning. Next, this operation executes x = $1[%1], y = x, and either ++y
(if op is PreInc or PostInc) or --y (if op is PreDec or PostDec). Then it
assigns y to $1[%1] and pushes either y (if op is PreInc or PreDec) or x (if
op is PostInc or PostDec) onto the stack.
If $1 is null, false, or the empty string, this operation first sets $1 to an
empty array. Then it follows the rules described in the case above.
If $1 is a non-empty string or an object that is not a hack collection, this
operation throws a fatal error.
If $1 is true, integer, or double, this operation raises a warning and pushes
null onto the stack.
UnsetElemC [C B] -> []
UnsetElemL <local variable id> [B] -> []
Unset element.
These instructions first load a value into x and a base into y, as given by
the following table:
operation x y
------------+----+-----
UnsetElemL | %1 | $1
UnsetElemC | $2 | $1
If y is an array, hack array, or hack collection this operation removes the
element at index x from y.
If y is an object that is not a hack collection, this operation throws a
fatal error.
If y is a string, this operation throws a fatal error.
If y is not a string, array, or object, this operation does nothing.
SetNewElem [C B] -> [C]
Set new element. If $1 is an array, hack array, or hack collection this
operation executes $1[] = $2 and then pushes $2 onto the stack.
If $1 is null, false, or the empty string, this operation sets $1 to a new
empty array, and then it executes $1[] = $2 and pushes $2 onto the stack.
If $1 is a non-empty string or an object that is not a hack collection, this
operation throws a fatal error.
If $1 is true, integer, or double, this operation raises a warning and pushes
null onto the stack.
SetOpNewElem <op> [C B] -> [C]
Set op new element. If $1 is an array, hack array, or hack collection this
operation first determines the next available integer offset k in $1. Next,
this operation executes $1[k] = null, x = $1[k], and y = x <op> $2. Then it
assigns y to $1[k] and pushes y onto the stack.
If $1 is null, false, or the empty string, this operation first sets $1 to an
empty array. Then it follows the rules described in the case above.
If $1 is a non-empty string or an object that is not a hack collection, this
operation throws a fatal error.
If $1 is true, integer, or double, this operation raises a warning and pushes
null onto the stack.
IncDecNewElem <op> [B] -> [C]
Increment/decrement new element. If $1 is an array, hack array, or hack
collection this operation first determines the next available integer offset
k in $1. Next, this operation executes $1[k] = null, x = $1[k], y = x, and
either ++y (if op is PreInc or PostInc) or --y (if op is PreDec or PostDec).
Then it assigns y to $1[k] and pushes either y (if op is PreInc or PreDec) or
x (if op is PostInc or PostDec) onto the stack.
If $1 is null, false, or the empty string, this operation first sets $1 to an
empty array. Then it follows the rules described in the case above.
If $1 is a non-empty string or an object that is not a hack collection, this
operation throws a fatal error.
If $1 is true, integer, or double, this operation raises a warning and pushes
null onto the stack.
CGetPropC [C B] -> [C]
CGetPropL <local variable id> [B] -> [C]
Get property as cell.
These instructions first load a value into x and a base into y, as given by
the following table:
operation x y
------------+----+-----
CGetPropC | $2 | $1
CGetPropL | %1 | $1
If y is an object that does not have an eligible __get method, this operation
first checks if y has a visible property named x. If it does not, this
operation raises a warning and pushes null. Otherwise, this operation
continues to check if the property named x is accessible. If the property
named x is accessible this operation pushes it onto the stack as a cell,
otherwise this operation throws a fatal error.
If y is an object that has an eligible __get method, this operation checks if
y has a visible and accessible property named x. If it does, this operation
pushes the property onto the stack. Otherwise, this operation pushes
y->__get(x) onto the stack.
If y is not an object, this operation will raise a warning and push null onto
the stack.
IssetPropC [C B] -> [C:Bool]
IssetPropL <local variable id> [B] -> [C:Bool]
Isset property.
These instructions first load a value into x and a base into y, as given by
the following table:
operation x y
-------------+----+-----
IssetPropC | $2 | $1
IssetPropL | %1 | $1
If y is an object that does not have an eligible __isset method, this
operation checks if y has a visible accessible property named x. If it does,
this operation pushes !is_null(y->x) onto the stack. Otherwise this operation
pushes false onto the stack.
If y is an object that has an eligible __isset method, this operation checks
if y has a visible and accessible property named x. If it does, this
operation pushes !is_null(y->x) onto the stack. Otherwise this operation
pushes y->__isset(x) onto the stack.
If y is an array, this operation pushes !is_null(y[x]) onto the stack.
If y is not an object or array, this operation pushes false.
SetPropC [C C B] -> [C]
SetPropL <local variable id> [C B] -> [C]
Set property. Perform one of the following actions:
First, these operations load values into k and x, and a base into y, as given
by the following table:
operation k x y
----------+----+----+----
SetPropC | $3 | $2 | $1
SetPropL | %1 | $2 | $1
Next, performs one of the following actions:
y is an object
y is null/false/""
y->k is visible
y->k is accessible
y has eligible __set method
y->k has been unset previously
-------+--------------------------------------------------------------------
00XXXX | raise warning; push null
01XXXX | y = new stdClass; y->k = x; push x
1X0X0X | create property y->k; y->k = x; push x
1X0X1X | y->__set(k, x); push x
1X100X | throw fatal error
1X101X | y->__set(k, x); push x
1X11X0 | y->k = x; push x
1X1101 | re-create property y->k; y->k = x; push x
1X1111 | y->__set(k, x); push x
SetOpPropC <op> [C C B] -> [C]
SetOpPropL <op> <local variable id> [C B] -> [C]
Set op property.
First, these operations load values into k and x, and a base into y, as given
by the following table:
operation k x y
------------+----+----+----
SetOpPropC | $3 | $2 | $1
SetOpPropL | %1 | $2 | $1
Next, perform one of the following actions:
y is an object
y is null/false/""
y->k is visible
y->k is accessible
y has eligible __get method
y has eligible __set method
y->k has been unset previously
--------+-------------------------------------------------------------------
00XXXXX | raise warning; push null
01XXXXX | y = new stdClass; z = null <op> x; y->k = z; push z
100X0XX | z = null <op> x; y->k = z; push z
100X10X | w = y->__get(k); z = w <op> x; y->k = z; push z
100X11X | w = y->__get(k); z = w <op> x; y->__set(k, z), push z
10100XX | throw fatal error
101010X | throw fatal error
101011X | w = y->__get(k); z = w <op> x; y->__set(k, z), push z
1011XX0 | w = y->k; z = w <op> x; y->k = z; push z
10110X1 | z = null <op> x; re-create y->k; y->k = z; push z
1011101 | w = y->__get(k); z = w <op> x; re-create y->k; y->k = z; push z
1011111 | w = y->__get(k); z = w <op> x; y->__set(k, z); push z
IncDecPropC <op> [C B] -> [C]
IncDecPropL <op> <local variable id> [B] -> [C]
Increment/decrement property.
First, these operations load a value into x and a base into y, as given by
the following table:
operation x y
-------------+----+----
IncDecPropC | $2 | $1
IncDecPropL | %1 | $1
Next, perform one of the following actions:
y is an object
y is null/false/""
y->x is visible
y->x is accessible
y has eligible __get method
y has eligible __set method
y->x has been unset previously
--------+-------------------------------------------------------------------
00XXXXX | raise warning; push null
01XXXXX | y = new stdClass; b = null; a = b; <op>a; y->x = a;
| push a (Pre*) or b (Post*)
100X0XX | b = null; a = b; <op>a; y->x = a; push a (Pre*) or b (Post*)
100X10X | b = y->__get(x); a = b; <op>a; y->x = a;
| push a (Pre*) or b (Post*)
100X11X | b = y->__get(x); a = b, <op>a; y->__set(x, a);
| push a (Pre*) or b (Post*)
10100XX | throw fatal error
101010X | throw fatal error
101011X | b = y->__get(x); a = b, <op>a; y->__set(x, a);
| push a (Pre*) or b (Post*)
1011XX0 | b = y->x; a = b; <op>a; y->x = a; push a (Pre*) or b (Post*)
10110X1 | b = null; a = b; <op>a; re-create y->x; y->x = a;
| push a (Pre*) or b (Post*)
1011101 | b = y->__get(x); a = b; <op>a; re-create y->x; y->x = a;
| push a (Pre*) or b (Post*)
1011111 | b = y->__get(x); a = b; <op>a; y->__set(x, a);
| push a (Pre*) or b (Post*)
UnsetPropC [C B] -> []
UnsetPropL <local variable id> [B] -> []
Unset property.
These instructions first load a value into x and a base into y, as given by
the following table:
operation x y
-------------+----+-----
UnsetPropC | $2 | $1
UnsetPropL | %1 | $1
Next, performs one of the following actions:
y is an object
y->x is visible
y->x is accessible
y has eligible __unset method
-----+----------------------------------------------------------------------
0XXX | do nothing
10X0 | do nothing
10X1 | y->__unset(x)
1100 | throw fatal error
1101 | y->__unset(x)
111X | unset(y->x)
10. Member instructions
-----------------------
Each instruction in this section corresponds to one member operation from the
previous section. The same bytecode may represent multiple different member
operations, differentiating between the options using MOpMode immediates.
Since they represent member operations, these instructions produced and/or
consume a base in the member base register. The MBR is live starting after a
Base* bytecode, modified by zero or more Dim* bytecodes, then finally consumed
by a final operation:
bytecode | MBR in-state | MBR out-state
----------+--------------+--------------
Base* | dead | live
Dim* | live | live
Final Ops | live | dead
Finally, many of these instructions have a <member key> immediate. This is
described in the "Instruction set" introduction section.
10.1 Base Operations
---------------------
BaseGC <stack index> <member op mode> [] -> []
BaseGL <local id> <member op mode> [] -> []
BaseG{C,L}{,W,D} member operation.
BaseSC <stack index> <stack index> <member op mode> <readonly op> [] -> []
BaseSC member operation. %1 gives the location of the static property name,
and %2 gives the location of the class.
BaseL <local id> <member op mode> <readonly op> [] -> []
BaseL{,W,D} member operation.
BaseC <stack index> <member op mode> [] -> []
BaseC member operation.
BaseH [] -> []
BaseH member operation.
10.2 Intermediate operations
-----------------------------
Dim <member op mode> <member key> [] -> []
{Prop,Elem}{L,C,I,T}{W,D,U} member operation.
NewElem operation.
10.3 Final operations
----------------------
All final operations take a <stack count> immediate, which indicates the number
of elements on the eval stack that must be consumed before pushing the final
result. These are elements read by Base*C instructions, and member keys.
QueryM <stack count> <query op> <member key> [...] -> [C]
{CGet,Isset}{Prop,Elem} member operation.
SetM <stack count> <member key> [... C] -> [C]
Set{Prop,Elem} or SetNewElem member operation.
SetRangeM <stack count> <op> <elem size> [... C C C] -> []
Store raw data into a string, optionally reversing the order of elements
based on op, which may be Forward or Reverse.
The current member base must be a string (if this or any other required
conditions are violated, an exception will be thrown). $3, and $1 are cast to
Int before inspecting their values. $3 gives the offset within the base
string to begin copying data into. The data comes from a source value in $2;
supported types are described below. $1 is the count of items to copy from
$2, and it maybe be -1 to request that an appropriate value is inferred from
$2. The range [$3, count * size) must fit within [0, length of base).
The following types are supported as data sources (the value in $2):
- Bool: op must be Forward, count is ignored, and size must be 1. Stored as a
1-byte value, either 0 or 1.
- Int: op must be Forward, count is ignored, and size must be 1, 2, 4, or
8. The value is truncated to the requested size and stored using the
current machine's byte ordering.
- Dbl: op must be Forward, count is ignored, and size must be 4 or 8. The
value is converted to the requested size and stored using the current
machine's byte ordering.
- Str: count indicates the number of characters to copy, starting at the
beginning of $2, and size must be 1. If op is Reverse, the characters are
copied in reverse order. Note that characters are still copied starting at
the beginning of $2, so Forward vs. Reverse never affects which characters
are copied, just their order as they're written to the base string.
- Vec: count indicates the number of elements to copy, and size indicates the
size of each element. All elements of the vec must have the same type,
which must be Bool, Int, or Dbl. The operation may modify the base string
before failing if there are elements with mismatched types. Size must be
one of the allowed values for the contained type, described above. Count
must not be greater than the size of the vec. If op is Reverse, the
elements will be copied in reverse order (always starting from offset 0 of
the vec, as with string sources).
IncDecM <stack count> <op> <member key> [...] -> [C]
IncDec{Prop,Elem} or IncDecNewElem member operation.
SetOpM <stack count> <op> <member key> [... C] -> [C]
SetOp{Prop,Elem} or SetOpNewElem member operation.
UnsetM <stack count> <member key> [...] -> []
Unset{Prop,Elem} member operation.
11. Iterator instructions
-------------------------
Several iterator instructions take an IterArgs struct. This struct contains an
iterator ID, an value output local ID, and optionally a key output local ID.
Below, when we refer to "the iterator ID in %1", "the value local given in %1",
etc., we're referring to these IDs.
IterInit <IterArgs> <rel offset> [C] -> []
Initialize an iterator. This instruction takes a "base" in $1. It creates an
iterator with ID given in %1 pointing to the beginning of $1 and, if $1 has
base-internal iteration state (see below), rewinds $1. It then checks if the
base is empty. If so, it frees the iterator (with an implicit IterFree) and
transfers control to the target %2.
If the base is non-empty, this instruction writes the value of the base's
first element to the value local given in %1. If the iterator is a key-value
iterator, then this instruction also writes the key of the base's first
element to the key local given in %1.
This instruction stores to its key and value output locals with the same
semantics as SetL (non-binding assignment).
The precise semantics of "rewind", "is empty", "get key", and "get value"
depend on the type of the base:
- If $1 is array-like, we will create a new array iterator. "rewind" does
nothing in this case - array iterators don't have base-internal state.
The "is empty" check is a check on the length of $1, and "get key" and
"get value" load $1's first element.
- If $1 is a collection object, then we'll unpack or create the underlying
vec or dict and then create a new array iterator, as above.
- If $1 is an object that implements Iterator, or if it is an instance of
an extension class that implements Traversable, then we create a new
object iterator. We call $1->rewind() to reset the base's internal state,
then call $1->valid() to check if $1 is non-empty. If $1 is valid, then we
call $1->current() (and $1->key()) to get the $1's first value (and key).
- If $1 is an object that implements the IteratorAggregate interface, then
we repeatedly execute "x = x->getIterator()" until x is no longer an
object that implements the IteratorAggregate interface. If x is now an
object that implements the Iterator interface, we create a new object
iterator as above. Otherwise, we throw an object of type Exception.
- If $1 is an object that does not match any of the cases above, then we
create a new default class iterator (with no base-internal state) that
iterates over accessible properties of the base's class, in the order
that they were defined. "get key" returns the name of the first property,
and "get value" returns the $1's value for that property.
- If $1 is not an array-like or object, this method raises a warning
and transfers control to the target %2.
LIterInit <IterArgs> <local id> <rel offset> [] -> []
Initialize iterator with local. If the local specified by %2 is array-like,
this instruction creates an array iterator pointing to the beginning of %2,
but leaves %2 in its local rather than copying it into the iterator and
inc-ref-ing it. Otherwise, it behaves as IterInit, except that the base comes
from the local %2 rather than from the stack.
If the base is non-empty, this instruction sets the output value (and, for
key-value iterators, the output key) to the first element of the base.
Otherwise, it frees the iterator and transfers control to the target %3.
Since we don't store array-like bases in the iterator, all other operations
on this iterator must use the LIter variants (LIterNext, LIterFree) and must
provide the same local (containing the same array-like) as immediates.
IterNext <IterArgs> <rel offset> [] -> []
Iterator next. This instruction first advances the iterator with the ID given
in %1. If the iterator has more elements, then it writes the next element's
value (and, for key-value iters, its key) to the output locals given in %1
and transfers control to the location specified by %2, Otherwise, the base
frees the iterator (with an implicit IterFree).
As with IterInit, the precise semantics of "advance" and "has more elements"
depend on the iterator type. (We could also say it depends on the type of
the base, but we use some iterator types (e.g. array iterator) for multiple
base types (e.g. array-likes and collection objects).)
- For array iterators: "advance" increments the iterator's position, and
the base "has more elements" if this position is not the final position
for the stored array. If the array has more elements, we take its key
and value at the new position.
- For object iterators: we call $base->next() to update the base's internal
state, then call $base->valid() to check whether it has more elements. If
so, we use $base->key() and $base->current() to get the new key and value.
- For default class iterators, we advance to the base's class's next
accessible property (if there are more) and we use its name and value as
the new key and value.
LIterNext <IterArgs> <local id> <rel offset> [] -> []
Iterator next with local. The iterator with ID given in %1 must have been
initialized with LIterInit. This instructions behaves similarily to IterNext,
except that if %2 is an array-like, it will use this local as the iterator's
base rather than the one stored in the iterator.
IterFree <iterator id> [] -> []
Iterator free. This instruction frees the iterator with ID %1. An iterator is
typically freed by IterInit or IterNext when its base has no more elements,
so IterFree is only needed for guarding against exceptions.
LIterFree <iterator id> <local id> [] -> []
Iterator free with local. This instruction frees the iterator with ID %1,
which must have been initialized by LIterInit with base %2.
12. Include, eval, and define instructions
------------------------------------------
Incl [C] -> [C]
Include. Includes the compilation unit containing the file (string)$1. The
instruction eagerly marks all functions and classes that are unconditionally
declared in the outermost scope as defined. Next this instruction calls the
pseudo-main function from the file (string)$1. The pseudo-main function
inherits the caller's variable environment. If the execution engine cannot
find a compilation unit containing the file (string)$1, this instruction
raises a warning.
InclOnce [C] -> [C]
Include once. Include the compilation unit containing the file (string)$1 if
it hasn't been included already. This instruction eagerly marks all functions
and classes that are unconditionally declared in the outermost scope as
defined, and then calls the pseudo-main function from (string)$1 if it hasn't
run already. The pseudo-main function inherits the caller's variable
environment. If the execution engine cannot find a compilation unit
containing the file (string)$1, this instruction raises a warning.
Req [C] -> [C]
Require. Includes the compilation unit containing the file (string)$1. The
instruction eagerly marks all functions and classes that are unconditionally
declared in the outermost scope as defined. Next this instruction calls the
pseudo-main function from the file (string)$1. The pseudo-main function
inherits the caller's variable environment. If the execution engine cannot
find a compilation unit containing the file (string)$1, this instruction
throws a fatal error.
ReqOnce [C] -> [C]
Require once. Include the compilation unit containing the file (string)$1 if
it hasn't been included already. This instruction eagerly marks all functions
and classes that are unconditionally declared in the outermost scope as
defined, and then calls the pseudo-main function from (string)$1 if it hasn't
run already. The pseudo-main function inherits the caller's variable
environment. If the execution engine cannot find a compilation unit
containing the file (string)$1, this instruction throws a fatal error.
ReqDoc [C] -> [C]
As ReqOnce except the string is always taken to be relative to the document
root (ie SourceRoot).
Eval [C] -> [C]
Eval. Executes the source code in (string)$1. This instruction eagerly marks
all functions and classes that are unconditionally declared in the outermost
scope as defined, and then calls the pseudo-main function from (string)$1.
The pseudo-main function from (string)$1 inherits the caller's variable
environment.
13. Miscellaneous instructions
------------------------------
This [] -> [C:Obj]
This. This instruction checks the current instance, and if it is null, this
instruction throws a fatal error. Next, this instruction pushes the current
instance onto the stack.
BareThis <notice> [] -> [C:Obj|Null]
This. This instruction pushes the current instance onto the stack. If %1 is
BareThisOp::Notice, and the current instance is null, emits a notice. If %1
is BareThisOp::NeverNull the current value of $this is guaranteed to be
available and can be loaded with no null check.
CheckThis [] -> []
Check existence of this. This instruction checks the current instance, and if
it is null, throws a fatal error.
ChainFaults [C C] -> [C]
Chain exception objects. If either $1 or $2 is not an object that implements
Throwable, raise a fatal error. Otherwise, start at $1 and walk the chain of
"previous" properties until an unset one is found. Set that property to $2,
unless the previous chain of $1 or $2 forms a cycle. In either case, $1 is
left on the top of the stack.
OODeclExists <Class|Interface|Trait> [C C] -> [C:Bool]
Check for class/interface/trait existence. If $1 cannot be cast to a bool or
$2 cannot be cast to a string, this instruction will throw a fatal error.
Otherwise, it will check for existence of the entity named by $2, invoking
the autoloader if needed and if $1 is true. The result of the existence check
will be pushed on the stack.
VerifyOutType <parameter id> [C] -> [C]
Verify out param type. Check that $1 is a value compatible with the declared
parameter type specified by the given parameter. In case of a mismatch
a warning or recoverable error is raised.
VerifyParamType <parameter id> [C] -> [C]
Verify parameter type. Functions and methods can optionally specify the types
of arguments they will accept.
VerifyParamType checks the type of the parameter in $1 against the enclosing
function's corresponding parameter constraints for `parameter id`. In case
of a mismatch, a recoverable error is raised.
VerifyRetTypeC [C] -> [C]
Verify return type. This instruction pops $1 off of the stack, checks if $1
is compatible with the current function's return type annotation and raises
a warning if there is a mismatch, and then it pushes $1 back onto the stack.
VerifyRetNonNullC [C] -> [C]
This is intended to provide the same behavior as VerifyRetTypeC, except in
only checks that $1 is non null. This should only be emitted by HHBBC if it
can statically verify that return value will pass the function's type
annotation if it is non null.
VerifyParamTypeTS <parameter id> [C] -> []
VerifyParamTypeTS pops a type structure from the stack and checks the
specified parameter against this type structure. In case of a mismatch, a
recoverable error is raised. If the popped cell is not a type structure,
an error is raised. This instruction also verifies the reified generic type
parameters of the specified parameter.
VerifyRetTypeTS [C C] -> [C]
VerifyRetTypeTS pops a type structure from the stack and checks whether
$2 is compatible with this type structure. In case of a mismatch, a
recoverable error is raised. If the popped cell is not a type structure,
an error is raised. This instruction also verifies the reified generic type
parameters of $2.
SelfCls [] -> [C:Class]
Push a class that refers to the class in which the current function is
defined. This instruction throws a fatal error if the current method is
defined outside of a class.
ParentCls [] -> [C:Class]
Push a class that refers to the parent of the class in which the
current method is defined. This instruction throws a fatal error if the
current method is defined outside of a class or if the class in which the
current method is defined has no parent.
LateBoundCls [] -> [C:Class]
Late-bound class. Push a class that refers to the current late-bound
class.
RecordReifiedGeneric [C:Vec] -> [C:Vec]
Takes a varray or vec based on runtime flag of type structures from $1
and unless the entry already exists adds a mapping from the grouped name of
these type structures to a static array that contains the runtime
representation of these type structures to the global reified generics table.
Pushes the resulting static list of type structures.
CheckClsReifiedGenericMismatch [C:Vec] -> []
Throws a fatal error unless whether each generic in $1 is reified or erased
matches exactly to the expectations of the current class. If there is
no class in the current context, throws a fatal error as well.
ClassHasReifiedGenerics [C:Class|LazyClass] -> [C:Bool]
Checks if the class in $1 has reified generics and pushes the
resulting boolean onto the stack. Throws a fatal error if $1 is not a class
or lazy class.
GetClsRGProp [C:Class|LazyClass] -> [C:Vec|Null]
Gets the reified generics property for current instance, using the index of this
property as stored in class $1. Raises a fatal error if the current instance
is null or $1 is not a class or lazy class. Returns null if the current
instance or $1 doesn't have reified generics.
HasReifiedParent [C:Class|LazyClass] -> [C:Bool]
Checks if the parent of the class in $1 has reified generics and pushes the
resulting boolean onto the stack. Throws a fatal error if $1 is not a class.
or lazy class.
CheckClsRGSoft [C:Class|LazyClass] -> []
Raises a warning if every reified generic in class $1 is soft.
Otherwise, raises a fatal error. This bytecode should only be emitted
when a class which expects reified generics is being instantiated without
any reified generics given (thus guarded by a ClassHasReifiedGenerics bytecode).
Throws a fatal error if $1 is not a class or lazy class.
NativeImpl [] -> []
Native implementation. This instruction invokes the native implementation
associated with current function and returns the return value to the caller
of the current function.
AKExists [C C] -> [C:Bool]
Checks if array (object) in $1 contains key (property) in $2 and pushes the
resulting boolean onto the stack. If $2 is null, uses the empty string as
key. Throws a fatal error if $1 is not an array or object, and raises a
warning if $2 is not a string, integer, or null.
CreateCl <num args> <class name> [C|U..C|U] -> [C]
Creates an instance of the class specified by <class name> and pushes it on the
stack.
The specified class must be a subclass of "Closure", must have a single
public method named __invoke, and must be defined in the same unit as the
CreateCl opcode.
If there is more than one CreateCl opcode in the unit for the Closure
subclass named by %2, all of the opcodes must be possible to associate with
the same class (or trait), or none if the closure will not inherit a class
context at runtime. This is intended to mean that CreateCl opcodes for a
given closure may only occur in bytecode bodies of functions that are
generated to represent a single user-visible PHP function, async function,
async closure, generator, or generator closure.
Moreover, for normal (non-async, non-generator) functions and methods, there
must be at most a single CreateCl opcode in the unit for a given Closure
subclass contained in the unit.
Idx [C C C] -> [C]
Checks if object in $3 contains key in $2 and pushes the result onto the
stack if found. Otherwise, $1 is pushed onto the stack. $3 must be an array,
hack array, or hack collection.
ArrayIdx [C C C] -> [C]
Checks if array in $3 contains key in $2 and pushes the result onto the stack
if found. Otherwise, $1 is pushed onto the stack. A fatal error will be
thrown if $3 is not an array.
ArrayMarkLegacy [C C] -> [C]
Marks the array in $2 as a legacy array and pushes it onto the stack. If $1
is true then the it is done recursively. If $1 isn't a bool then an exception
is thrown.
ArrayUnmarkLegacy [C C] -> [C]
Marks the array in $2 as a non-legacy array and pushes it onto the stack. If
$1 is true then the it is done recursively. If $1 isn't a bool then an
exception is thrown.
AssertRATL <local id> <repo auth type> [] -> []
AssertRATStk <stack offset> <repo auth type> [] -> []
Assert known "repo authoritative type", for locals or stack offsets.
These opcodes may be used to communicate the results of ahead of time static
analysis (hhbbc) to the runtime. They indicate that the value in the
specified local or stack offset is statically known to have a particular
type. The "repo auth type" immediate is an encoded RepoAuthType struct (for
details see runtime/base/repo-auth-type.h).
As suggested by the name, these opcodes are generally for use with
RepoAuthoritative mode. They may appear in non-RepoAuthoritative mode with
one restriction: "specialized" array type information may not be asserted,
because the global array type table may only be present in RepoAuthoritative
mode.
BreakTraceHint [] -> []
This opcode has no effects, but is a hint that code immediately following it
is probably not worth including in the same compilation unit as the code in
front of it. In HHVM, this is used to tell the JIT to break a Tracelet when
it sees this opcode.
Silence <local id> <Start|End> [] -> []
With %2 = Start, sets the error reporting level to 0 and stores the previous
one in the local variable %1. The local variable will be overwritten without
reference counting.
With %2 = End, if the error reporting level is 0, restores the error
reporting level to the previous value (stored in local variable %1); if the
error reporting level is not 0, does nothing.
The verifier requires that all code paths to an End on local variable %1
contain a Start on %1, and that all code paths with a Start lead to an End on
the same variable. It additionally requires that none of these paths store
any value in %1 between the Start and End operations. Lastly, the set of
variables storing the error reporting state must be consistent across block
boundaries.
In either case, the local variable %1 must be an unnamed local.
GetMemoKeyL <local id> [] -> [C:<Int/String>]
Push an int or string which is an appropriate memoize cache key for the
specified local. The local should be one of the function's parameters. The
exact scheme for the cache key generation depends on whether the parameter is
constrained by an appropriate type constraint. This op may throw if the input
value is one that cannot be converted to a cache key (IE, an object that does
not implement IMemoizeParam). This op can only be used within a function
marked as being a memoize wrapper.
MemoGet <rel offset> <local range> [] -> [C]
Retrieve a memoization value associated with the current function and push it
onto the stack. The values of the specified range of locals are used as the
keys to perform the lookup (if any). If any of the locals are not ints or
strings, fatal. The number of locals must match the number of formal
parameters to the function. If no value is present, branch to the specified
offset (without pushing anything). This op can only be used within a function
marked as being a memoize wrapper.
MemoGetEager <rel offset> <rel offset> <local range> [] -> [C]
Retrieve a memoization value associated with the current function and push it
onto the stack. This instruction behaves similarily to MemoGet, but is meant
to be used within an async memoize wrapper. If no value is present, branch to
the first specified offset (without pushing anything). If a value is present,
but it is a suspended wait-handle, push it onto the stack and branch to the
second specified offset. If a value is present, and it represents an eagerly
returned value (not a suspended wait-handle), push it without branching.
MemoSet <local range> [C] -> [C]
Store $1 as a memoization value associated with the current function and
leave it on the stack. The values of the specified range of locals are used
as keys to perform the lookup (if any). If any of the locals are not ints or
strings, fatal. The number of locals must match the number of formal
parameters to the function. If there is already a value stored with that
particular set of keys, it is overwritten. This op can only be used within a
function marked as being a memoize wrapper. If the function is an async
memoize wrapper, this marks the value as representing a suspended return
value from the wrapped async function (and therefore must be a wait-handle).
MemoSetEager <local range> [C] -> [C]
Store $1 as a memoization value associated with the current function and
leave it on the stack. This instruction behaves similarily as MemoSet, but is
meant to be used within async memoize wrappers. It indicates that the value
being stored represents an eager return from the wrapped async function (and
is not a suspended wait-handle).
ResolveFunc <litstr id> [] -> [C]
Resolve %1 as a function name to a function pointer value, then push the
pointer onto the top of stack. When resolution fails, raise an error.
ResolveMethCaller <litstr id> [] -> [C]
Resolve %1 as a function name to a function pointer value corresponding to a
MethCaller. If the method called is not available in the current context then
an exception is thrown. Otherwise, the function pointer is pushed to the top
of the stack. The meth caller must exist in the same unit as the resolving
function.
ResolveRFunc <litstr id> [C:Vec] -> [C]
Similar to ResolveFunc, resolve %1 as a function name to a function pointer
and raises an error if the resolution fails. $1 contains a list of reified
generics. If the function pointer takes reified generics, pushes a value
capturing the function pointer and reified generics onto the top of stack.
If the function pointer does not take reified generics, pushes just the
function pointer into the top of stack.
ResolveClsMethod <litstr id> [C:Class] -> [C]
ResolveClsMethodD <litstr id> <litstr id> [] -> [C]
ResolveClsMethodS <mode> <litstr id> [] -> [C]
Push a class method pointer value. First, these instructions load values
into x and y as given by the following table:
instruction x y
---------------------+----+-----
ResolveClsMethod | $1 | %1
ResolveClsMethodD | %1 | %2
ResolveClsMethodS | %1 | %2
When loading litstr id %1 into x, ResolveClsMethodD will perform the work
done by the ClassGetC instruction to convert the name given by %1 into a
class.
When loading mode %1 into x, ResolveClsMethodS will perform the same work
as LateBoundCls/SelfCls/ParentCls depending on the specified mode.
This instruction checks if class x has an accessible static method named y.
If not, it raises a fatal error. Otherwise, it creates a value that can be
used to call that method and pushes the resulting class method pointer onto
the stack.
ResolveRClsMethod <litstr id> [C:Vec C:Class] -> [C]
ResolveRClsMethodD <litstr id> <litstr id> [C:Vec] -> [C]
ResolveRClsMethodS <mode> <litstr id> [C:Vec] -> [C]
Similar to their non-reified counterparts (ResolveClsMethod*), these
instructions load values into x and y based on the same table and performing
the same work and the same checks to resolve the class method pointer.
$1 contains a list of reified generics. If the class x has accessible
static method y and takes the given reified generics, pushes a value
capturing the class method pointer and the reified generics. If the
class method does not accept the reified generics, pushes the class
method pointer onto the stack.
ThrowNonExhaustiveSwitch [] -> []
Throws an exception indicating that the switch statement is non exhaustive.
This exception can be downgraded to a warning or a noop through a runtime
option.
This bytecode instruction does not do any checks, it assumes that it was
emitted correctly.
ResolveClass <litstr id> [] -> [C:Class] (where %1 is a class name)
If %1 is a valid class name, resolve it to a class pointer value, then push
the pointer onto the top of stack. When resolution fails, raise an error.
RaiseClassStringConversionWarning [] -> []
Raises a warning indicating an implicit class to string conversion.
This warning can be downgraded to a noop through a runtime option.
SetImplicitContextByValue [C:?Obj] -> [C:?Obj]
Sets the implicit context to %1 and returns the previous implicit context.
VerifyImplicitContextState [] -> []
Verifies whether the current state of the implicit context is valid with
respect to the current calling context. Raises warning or throws an
exception if the state is invalid, set to soft implicit context or cleared.
Can only be used in memoized wrapper functions.
CreateSpecialImplicitContext [C:Int C:?Str] -> [C:?Obj]
Creates a special implicit context as if by calling the
create_special_implicit_context builtin. $1 is one of the
ImplicitContext::State enum values, and $2 is a nullable string optionally
providing a memo-key. If $1 is not an int, or if $2 is not a nullable string,
a fatal is raised.
14. Generator creation and execution
---------------------------------------
CreateCont [] -> [C:Null]
This instruction may only appear in bodies of generators. Creates a new
Generator object, moves all local variables from the current frame into
the object, sets resume offset at the next opcode and suspends execution by
transferring control flow back to the caller, returning the Generator
object. Once the execution is resumed, the Null value sent by ContEnter
becomes available on the stack. It is illegal to resume newly constructed
Generator using ContEnter with a non-null value or ContRaise opcodes.
ContEnter [C] -> [C]
This instruction may only appear in non-static methods of the Generator
class. It transfers control flow to the saved resume offset of a function
associated with $this Generator object. The $1 will remain available
on the stack after the control is transferred. Once the control is
transferred back, a value determined by suspending opcode (Await, Yield,
YieldK or RetC) will be pushed on the stack. This value corresponds to
the next()/send() return value -- null for non-async generators, and
WaitHandle or null for async generators.
ContRaise [C:Obj] -> [C]
This instruction may only appear in non-static methods of the Generator
class. It transfers control flow to the saved resume offset of a function
associated with $this Generator object. The Exception stored at $1 is
thrown instead of invoking code at the resume offset. Once the control is
transferred back, a value determined by suspending opcode (Await, Yield,
YieldK or RetC) will be pushed on the stack. This value corresponds to
the raise() return value -- null for non-async generators, and WaitHandle
or null for async generators.
Yield [C] -> [C]
This instruction may only appear in bodies of generators. Stores $1
in the generator as the result of the current iteration, sets resume
offset at the next opcode and suspends execution by transferring control
flow back to the ContEnter or ContRaise. Once the execution is resumed,
the value sent by ContEnter becomes available on the stack, or
an exception sent by ContRaise is thrown.
YieldK [C C] -> [C]
This instruction may only appear in bodies of generators. Stores $1
in the generator as the result and $2 as the key of the current
iteration, sets resume offset at the next opcode and suspends execution
by transferring control flow back to the ContEnter or ContRaise. Once
the execution is resumed, the value sent by ContEnter becomes available
on the stack, or an exception sent by ContRaise is thrown.
ContCheck <check started> [] -> []
Check whether generator can be iterated. $this must be a Generator
object. If the generator is finished, already running, or not yet started
and <check started> is enabled, an exception will be thrown.
ContValid [] -> [C:Bool]
Check generator validity. $this must be a Generator object. Pushes true
onto the stack if the generator can be iterated further, false otherwise.
ContKey [] -> [C]
Get generator key. $this must be a Generator object. Pushes the most
recently yielded key from the generator onto the stack.
ContCurrent [] -> [C]
Get generator value. $this must be a Generator object. Pushes the most
recently yielded value from the generator onto the stack.
ContGetReturn [] -> [C]
Get generator's return value. $this must be a Generator object. Pushes the
return value of the generator onto the stack.
15. Async functions
-------------------
WHResult [C:Obj] -> [C]
If $1 is not a subclass of WaitHandle, throws a fatal error. If $1 succeeded,
this instruction pushes the result value from the WaitHandle. If $1 failed,
this instruction throws the exception stored in the WaitHandle. If $1 is not
finished, throws an Exception.
Await [C] -> [C]
This instruction may only appear in bodies of async functions. Awaits
a WaitHandle provided by $1, suspending the execution if the WaitHandle
was not yet ready.
If $1 is not a subclass of WaitHandle, throws a fatal error. If $1 succeeded,
this instruction pushes the result value from the WaitHandle. If $1 failed,
this instruction throws the exception from the WaitHandle. Otherwise the
execution needs to be suspended:
If the async function is executed eagerly, creates an AsyncFunctionWaitHandle
object, moves all local variables and iterators from the current frame into
the object, sets resume offset at the next opcode, marks the
AsyncFunctionWaitHandle as blocked on the WaitHandle provided by $1 and
suspends execution by transferring control flow back to the caller, returning
the AsyncFunctionWaitHandle object.
If the async function is executed in resumed mode, sets resume offset at
the next opcode, marks the AsyncFunctionWaitHandle as blocked on the
WaitHandle provided by $1 and suspends execution by transferring control
flow back to the scheduler.
Once the execution is resumed, the result of the WaitHandle provided by $1
becomes available on the stack.
AwaitAll<local-range> [] -> [C:Null]
Fetches instances of Awaitables from the locals in range %1, and suspends
until all of them have completed, at which point execution is resumed with a
single null on the stack.
Nulls in %1 are ignored, a fatal error is thrown if other non-Awaitables are
encountered. The stack must be empty. Should all of the Awaitables in %1
already be complete a null will be pushed to the stack without suspending
the current function.
Basic statement transformations
-------------------------------
To achieve HHBC's goal of making it straightforward for an interpreter or a
compiler to determine order of execution, control flow statements are
transformed to use the simpler constructs. Most control flow statements such as
"if", "while", and "for" are implemented in a straightforward manner using the
Jmp* instructions.
HHBC provides the Switch instruction for implementing very simple switch
statements; most real switch statements are implemented naively using the Eq
and JmpNZ instructions. Also, the functionality of both the echo statement and
the print statement is implemented with the Print instruction.
Foreach statements are implemented using iterator variables and the Iter*
instructions. Each foreach loop must be protected by an EH catch entry to
ensure that the iterator variable is freed when a foreach loop exits abnormally
through an exception.
Simple break statements and continue statements are implemented using the Jmp*
and IterFree instructions. Dynamic break is implemented using an unnamed local
(to store the 'break count') and a chain of basic blocks, where each block
decrements the unnamed local variable and compares it with 0, and then decides
where to jump next.
Basic expression transformations
--------------------------------
To reduce the size of the instruction set, certain types of expressions are
transformed:
1) Unary plus and negation
Unary plus and negation "+(<expression>)" gets converted to "(0 +
(<expression>))", and "-(<expression>)" gets converted to "(0 -
(<expression>))".
2) Assignment-by operators (+=, -=, etc)
Assignment-by operators are converted to use the SetOp* instructions.
3) List assignment (list)
List assignments are converted to use an unnamed local variable and the QueryM
and SetL instructions. In case of exception, the unnamed local variable is
freed using EH entry.
4) Logical and and logical or operators (and/&&, or/||)
If any of the operands side-effect, these operators are implemented using Jmp*
instructions instead of using the "and" and "or" instructions to implement
short-circuit semantics correctly. All Jmp* instructions used to implement
"and" and "or" operators will be forward jumps.
5) The new expression
The new expression is implemented by using the NewObj*, FCallCtor, and LockObj
instructions.
6) The ternary operator (?:)
The functionality of the ternary operator is implemented using Jmp*
instructions. All Jmp* instructions used to implement the ternary operator will
be forward jumps.
7) Silence operator (@)
The silence operator is implemented by using various instructions (including
the Jmp* instructions), unnamed local variables, and an EH catch entry. All Jmp*
instructions used to implement the silence operator will be forward jumps.
8) The $this expression
The $this expression has different effects depending on whether or not $this is
the direct base of a property expression (such as "$this->x") or a method call
expression (such as "$this->foo()"). When the $this expression is the direct
base of a property expression or a method call expression, the This instruction
is used.
A bare $this expression within an instance method is handled one of two ways:
general or BareThis-optimized (optional). The general solution accesses a local
variable named "this", which is initialized at the beginning of the method
using the InitThisLoc instruction. The BareThis optimization applies to bare
$this access as long as $this is not passed by reference and there are no
dynamic method variables. In such cases, the BareThis instruction can be used
to directly access $this, and the InitThisLoc instruction is not needed.
Warning and errors at parse time
--------------------------------
Certain syntactically correct source code may cause warnings or errors to be
raised when the source file is parsed. Examples of this include using "$this"
on the left hand side of the assignment, using "$this" with binding assignment,
using "$a[]" in an r-value context, and doing "unset($a[])". HHBC handles these
cases by generating Throw or Fatal instructions at the beginning of the body
for the pseudo-main function.
Not yet implemented
-------------------
At the time of this writing, the HipHop bytecode specification is missing the
following details:
1) Description of traits
2) Description of metadata for class statements, trait statements, and method
statements
3) Description and examples for the yield generator feature
4) Description of the late static binding feature
5) Description of the resource type
6) Definitions of operators (ex. +, -, !) and other helper functions (ex.
is_null, get_class, strlen)
7) High level description of how namespaces are dealt with and any relevant
details
8) Description of async function implementation
/* Local Variables: */
/* fill-column: 79 */
/* End: */
vim:textwidth=80 |
|
Markdown | hhvm/hphp/doc/coding-conventions.md | HHVM Coding Conventions
=======================
This document is meant to serve as a guide to writing C++ in the HHVM codebase,
covering when and how to use various language features as well as how code
should be formatted. Our goal is to ensure a consistently high-quality codebase
that is easy to read and contribute to, especially for newcomers.
The HHVM codebase contains a wide variety of code from many different authors.
It's been through a few different major stages in its life, including stints in
multiple different repositories. As a result, large (primarily older) parts of
the codebase do not fit this guide. When in doubt about how to write or format
something, always prefer the advice here over existing conventions in the
code. If you're already touching some older code as part of your work, please
do clean it up as you go along. But please do not spend hours applying the
formatting guidelines here to code you aren't otherwise modifying. While we'd
love for the entire codebase to follow this guide, we'd rather get there
gradually than lose lots of git history and developer time to purely cosmetic
changes. That said, if cosmetic changes that you're making as part of a larger
diff keep growing in scope, it may be worth pulling them out into a separate
diff.
There's no well-defined cutoff here - just try to minimize effort for your
reviewers. A good rule of thumb is that if your cosmetic changes require adding
significant new sections to the diff (such as a function rename that touches
all callsites), it should probably be pulled out into its own diff.
## Headers ##
Every .cpp file in the HHVM repository should have a corresponding .h file with
the same name, and which declares its public interfaces. We tend to value API
documentation more heavily than inline implementation comments, so *all*
declarations in headers (classes, enums, functions, constants, etc.) should be
documented. See Comments and Documentation for more details.
Build times are a frequent source of pain in many large C++ projects. Try not
to make large header files that mostly serve to include groups of other large
header files. This can discourage "include what you use," discussed in the
"What to include section".
### Include guards ###
To prevent multiple inclusion, all headers should have the following directive
after their license header comment:
```cpp
/*
* ...see the 'File copyright' section for details on what goes here...
*/
#pragma once
// File contents
```
### What to include ###
The golden rule for what to include is "include what you use" (IWYU). In brief,
this means you should not rely on any headers you include to transitively
include other headers which have definitions you require. You should also
prefer to forward declare structs and classes when the definition is not needed
(so, "don't include what you don't use"), which helps reduce HHVM's nontrivial
build time.
To make it easier to achieve IWYU, we have the following guidelines for
includes:
- Always include the corresponding .h for a .cpp first, before even system
headers.
- Separate includes into groups: C++ standard library headers, external projects
(such as Boost and Intel TBB), and finally headers within HHVM. Each group
should be separated by a newline, for readability. (Whether to separate HHVM
includes by subsystem (e.g., `jit`) is left up to the author.)
- Keep headers alphabetized within each group. This makes it easier to ensure
that all necessary includes are made, and no extraneous ones are left behind.
- Use double quotes for Folly and HHVM headers and angle brackets for all
others.
As an example, here is what the include section might look like for a file
named `bytecode.cpp`:
```cpp
#include "hphp/runtime/vm/bytecode.h"
#include <cstdio>
#include <string>
#include <boost/program_options/options_description.hpp>
#include "hphp/runtime/vm/class.h"
#include "hphp/runtime/vm/func.h"
#include "hphp/runtime/vm/hhbc.h"
#include "hphp/util/string.h"
```
### Inline functions ###
Defining functions inline is encouraged for very short functions.
When defining inline member functions on structs or classes which have tight,
compact interfaces (e.g., a smart pointer class, or any wrapper class), prefer
to define the functions in the class definition, for concision.
However, for classes with more complex, malleable APIs where inline helpers
proliferate (e.g., Func, Class, IRInstruction, etc.), restrict the class
definition to member function prototypes *only*. This makes the API much
cleaner. For these classes, define all inline functions in a corresponding
`-inl.h` file.
```cpp
// At the bottom of func.h.
#include "hphp/runtime/vm/func-inl.h"
```
```cpp
// After the copyright in func-inl.h.
namespace HPHP {
// Definitions go here.
}
```
For API's large enough to warrant -inl.h files, move *all* definitions into the
-inl.h, even one-line accessors. This serves both to keep the API cleaner and
to avoid splitting implementations among three files (the header, the inline,
and the source).
Some files, with or without a corresponding -inl.h file, may need a -defs.h
file. This file also contains definitions of inline functions, but it is *not*
included by the main header. It is intended to be used when only a few callers
need access to the definitions, or when the definitions can't be in the main
header because it would create circular dependencies. It should be included
directly by the callers that do need access to the definitions it contains.
## Structs and Classes ##
Classes are used extensively throughout the HHVM codebase, with a number of
coding conventions. See also Naming for conventions around class naming.
### Using struct vs. class ###
In C++, `struct` and `class` have nearly identical meanings; the only
difference lies in the default accessibility (`struct` defaults to public, and
`class`, to private).
We do not assign further meaning to these keywords, so we use `struct`
everywhere. Efforts to compile under MSVC also require that we use the same
keyword between a struct/class definition and its forward declarations due to
MSVC's failure to adhere to the C++ spec, and sticking to `struct` everywhere
makes this easier.
### Access control ###
Try to avoid the `protected` keyword. It tends to give a false sense of
security about encapsulation: since anyone can inherit from your class, anyone
can access the `protected` member with a little extra effort.
### Implicit and explicit constructors ###
By default, always use `explicit` for single-argument, non-initializer list
constructors.
```cpp
struct MyStruct {
// We don't want to implicitly convert ints to MyStructs
explicit MyStruct(int foo);
// Two-argument constructor; no need for explicit
MyStruct(const std::string& name, int age);
};
```
### Public data members vs. getters/setters ###
Prefer declaring public member variables to using getters and setters. Getters
and setters that don't manage object state in a nontrivial way serve to bloat
the API and introduce unnecessary boilerplate.
Getters are, of course, encouraged for private members. Avoid prefixing getters
with `get`:
```cpp
struct Func {
const SVInfoVec& staticVars() const;
void setStaticVars(const SVInfoVec&);
ArFunction arFuncPtr() const;
static constexpr ptrdiff_t sharedBaseOff();
};
```
### Declaration order ###
Adhere to the following order for declarations in a struct or class definition:
1. Friend classes.
2. Nested classes, enums, typedefs. (If possible, just declare the nested
class and define it following the enclosing class definition.)
3. Constructors, destructor.
4. Member functions, including static functions, documented and grouped
coherently.
5. Constants and static data members.
6. *All* instance data members, regardless of accessibility.
Private member functions can be interspersed with public functions, or
relegated to a single section before the data members. However, all instance
properties *must* occur contiguously at the end of the class definition.
## Other C++ Language Features ##
Very few language features are unconditionally banned. However, if you want to
use one of the more controversial constructs such as `goto` or `operator,()`,
you'd better have a convincing argument as to why it's better than the
alternatives. C++ is a very large and complex language and we don't want to
artificially limit what developers can do, but that puts a lot of
responsibility on your shoulders.
Avoiding restrictions on useful language features (e.g., exceptions, templates,
C++11 lambdas) is a major motivating factor for maintaining our own style guide
rather than adopting an existing one.
### Namespaces ###
All HHVM code should be scoped in `namespace HPHP { /* everything */ }`. Large
submodules such as `HPHP::jit` and `HPHP::rds` may be contained in their own
namespace within `HPHP`. We often use anonymous namespaces instead of the
`static` keyword to keep symbols internal to their translation unit. This is
mostly left up to the author; just keep in mind that classes and structs,
unlike functions and variables, *must* be in an anonymous namespace in order to
be properly hidden.
Avoid `using namespace` whenever possible, especially in headers. It is
acceptable in `.cpp` files in very limited scopes (function-level or deeper) if
it will significantly aid in readability of the code that follows. `using
namespace std;` at the top of a `.cpp` is explicitly disallowed.
### Enums ###
Prefer `enum class` whenever possible. Old-style enums are generally only
acceptable if you expect that your type will be frequently used in an integer
context, such as array indexing.
## Naming ##
HHVM code adheres to the some broad naming conventions.
When the convention is left open, in general, prefer the local conventions
used in the file you are working on---e.g., in a struct whose data members all
have `m_namesLikeThis`, prefer `m_anotherNameLikeThis` to `m_this_style`, even
though the latter is found in other parts of the codebase.
### Variables ###
Use `lowerCamelCase` or `lower_case_with_underscores` for all local variables,
adhering to whichever is the discernable local convention if possible. Static
variables (whether declared in an anonymous namespace or with the `static`
keyword) should additionally be prefixed by `s` (e.g., `s_funcVec`). Globals,
likewise, should be prefixed by `g_` (e.g., `g_context`).
### Constants ###
All constants should be prefixed with `k` and use `CamelCase`, e.g.,
`kInvalidHandle`. Prefer `constexpr` to `const` whenever possible.
### Class data members ###
As with variables, use `lowerCamelCase` or `lower_case_with_underscores` for
all data members. Additionally, private instance members should be prefixed
with `m_` (e.g., `m_cls`, `m_baseCls`, `m_base_cls`), and all static members
should be prefixed with `s_` (e.g., `s_instance`). Prefer to leave public
members unprefixed.
### Functions ###
We generally prefer `lowerCamelCase` for header-exposed functions, including
member functions, although we use `lower_case_with_underscores` as well (e.g.,
`hphp_session_init`), more commonly in file-local scopes. As usual, follow the
local naming conventions of the file you are working in.
If you are modeling a class after an existing pattern, such as an STL
container, prefer to follow the appropriate conventions (e.g.,
`my_list::push_back` is preferred over `my_list::pushBack`).
### Classes ###
Classes use `UpperCamelCase`, except when modeling existing patterns like STL
containers or smart pointers.
### Namespaces ###
New namespaces should use `lowercase`---and single-word namespaces are greatly
prefered for common usage. For longer namespaces (e.g., `vasm_detail`), use
`lower_case_with_underscores`.
### Other conventions ###
Prefer correctly capitalizing acronyms in new code (e.g., prefer `IRTranslator`
to `HhbcTranslator`). In this vein, prefer `ID` (e.g., `TransID`) to `Id`
(e.g., `FuncId`) in new code.
## Formatting ##
While consistent code formatting doesn't directly affect correctness, it makes
it easier to read and maintain. For this reason, we've come up with a set of
guidelines about how code should be formatted. There's a good chance that some
of these will conflict with your own personal preferred style, but we believe
that having a consistently easy to read codebase is more important than letting
each developer write code that he or she thinks is uncompromisingly beautiful.
Anything not specified here is left up to the judgment of the developer.
However, this document is not set in stone, so if a particular formatting issue
keeps coming up in code review it probably deserves a few lines in here.
### General rules ###
- All indentation is to be done using spaces.
- Each indentation level is 2 spaces wide.
- Lines may be no longer than 80 characters, unless absolutely required for
some syntactic reason.
- Lines should not have any trailing whitespace. This includes blank lines at
non-zero indentation levels; the only character on those lines should be a
newline.
### Types and variables ###
- When declaring a variable or typedef, the `*` and `&` characters for pointer
and reference types should be adjacent to the type, not the name (e.g.,
`const Func*& func`).
- Limit variable declarations to one per line.
### Function signatures ###
The following function signatures are formatted properly:
```cpp
// If arguments would fit on 1 line:
inline void Func::appendParam(bool ref, const Func::ParamInfo& info) {
}
// If the arguments need to wrap, we have two accepted styles, both of which
// are OK even if the wrapping wasn't necessary:
SSATmp* HhbcTranslator::ldClsPropAddr(Block* catchBlock,
SSATmp* ssaCls,
SSATmp* ssaName,
bool raise) {
doSomeStuff();
}
// This style is helpful if any of the function, argument, or type names
// involved are particularly long.
SSATmp* HhbcTranslator::ldClsPropAddr(
Block* catchBlock,
SSATmp* ssaCls,
SSATmp* ssaName,
bool raise
) {
doSomeStuff();
}
```
Always keep the type on the same line as the function name, unless it would
leave insufficient room for arguments. Do likewise with other modifying
keywords (`inline`, `static`, any attributes).
Wrapped arguments should always be aligned with the argument on the previous
line. The opening curly brace should be on the same line as the last argument,
with the exception of class constructors (see the Constructor initializer list
section). When writing function declarations in headers, include argument names
unless they add no value:
```cpp
struct Person {
// The single string argument here is obviously the name.
void setName(const std::string&);
// Two string arguments, so it's not obvious what each one is without names.
void setFavorites(const std::string& color, const std::string& animal);
};
```
### Statements ###
Conditional and loop statements should be formatted like so:
```cpp
if (vmpc() == nullptr) {
fprintf(stderr, "whoops!\n");
std::abort();
}
```
Note that there is a single space after the `if` keyword, no spaces between
`condition` and the surrounding parentheses, and a single space between the `)`
and the `{`. As with all blocks, the body should be one indentation level
deeper than the `if`. If the *entire* statement (condition and body) fits on
one line, you may leave it on one line, omitting the curly braces. In all
other cases, the braces are required. For example, the following are OK:
```cpp
if (obj->_count == 0) deleteObject(obj);
for (auto block : blocks) block->setParent(nullptr);
```
But these are not acceptable:
```cpp
if (veryLongVariableName.hasVeryLongFieldName() &&
(rand() % 5) == 0) launchRocket();
if ((err = SSLHashSHA1.update(&hashCtx, &signedParams)) != 0)
goto fail;
```
Avoid assignments in conditional expressions, unless the variable is declared
within the condition, e.g.,
```cpp
if (auto const unit = getMyUnit(from, these, args)) {
// Do stuff with unit.
}
```
Prefer C++11 foreach syntax to explicit iterators:
```cpp
for (auto const& thing : thingVec) {
// Do stuff with thing.
}
```
### Expressions ###
- All binary operators should have one space on each side, except for `.`,
`->`, `.*`, and `->*` which should have zero.
- Do not include redundant parentheses unless you think the expression would be
confusing to read otherwise. A good rule of thumb is that if you and/or your
reviewers have to look at a chart of operator precedence to decide if the
expression parses as expected, you probably need some extra parentheses. GCC
or clang may suggest extra parens in certain situations; we compile with
`-Werror` so you must always follow those guidelines.
- If an expression does not fit on one line, attempt to wrap it after an
operator (rather than an identifier or keyword) and indent subsequent lines
with the beginning of the current parenthesis/brace nesting level. For
example, here are some long expressions, formatted appropriately:
```cpp
if (RuntimeOption::EvalJitRegionSelector != "" &&
(RuntimeOption::EvalHHIRRefcountOpts ||
RuntimeOption::EvalHHITExtraOptPass) &&
Func::numLoadedFuncs() < 600) {
// ...
}
longFunctionName(argumentTheFirst,
argumentTheSecond,
argumentTheThird,
argumentTheFourth);
```
- Function calls should be formatted primarily using the previous rule. If one
or more of the arguments to the function is very wide, it may be necessary to
shift all the arguments down one line and align them one level deeper than
the current scope. This is always acceptable, but is especially common when
passing lambdas:
```cpp
m_irb->ifThen(
[&](Block* taken) {
gen(CheckType, Type::Int, taken, src);
},
[&] {
doSomeStuff();
lotsOfNonTrivialCode();
// etc...
}
);
```
### Constructor initializer lists ###
If an initializer list can be kept on a single line, it is fine to do so:
```cpp
MyClass::MyClass(uint64_t idx) : m_idx(idx) {}
MyClass::MyClass(const Func* func) : m_idx(-1) {
// Do stuff.
}
```
Otherwise, it is always correct to format lists thusly:
```cpp
MyClass::MyClass(const Class* cls, const Func* func, const Class* ctx)
: m_cls(cls)
, m_func(func)
, m_ctx(ctx)
, m_isMyConditionMet(false)
{}
MyClass::MyClass(const Class* cls, const Func* func)
: m_cls(cls)
, m_func(func)
, m_ctx(nullptr)
, m_isMyConditionMet(false)
{
// Do stuff.
}
```
### Namespaces ###
We don't nest namespaces very deeply, so prefer to keep the scoping to a single
line:
```cpp
namespace HPHP::jit::x64 {
///////////////////////////////////////////////////////////////////////////////
/*
* Some nice documentation.
*/
struct SomeNiceThing {
// some nice properties
};
///////////////////////////////////////////////////////////////////////////////
}
```
Do not increase the indentation level when entering namespace scope. Instead,
consider adding a line of forward slashes as a separator, to more clearly
delineate the namespace (this is especially useful for anonymous namespaces in
source files). This form of delineation is encouraged, but we have no strict
convention for its formatting (you'll see 70- or 79- or 80-character
separators, with or without an extra newline between it and the braces, etc.).
## Comments ##
All public and private APIs in headers should be documented in detail. Names
and notions which are not obvious (e.g., "persistent" or "simple") should be
explained. Preconditions and postconditions should be noted.
Inline code comments are encouraged for complex logic, but their density is
left up to the author. Rather than summarizing/paraphrasing what your code is
doing, focus on explaining what overarching goal the code is achieving and/or
why that goal is necessary or desirable.
### Comment style ###
Here are some comment styles we use or avoid:
```cpp
// This style of comment is the most common for relatively short inline
// comments. It's fine if it's multi-line.
//
// It's also fine if it has line breaks. The extra newline aids readability in
// this case.
/*
* This style of comment is the right one to use for struct/function
* documentation. Prefer one star on the opening line, as opposed to the
* doxygen standard of two.
*
* This is also sometimes used for inline code comments, although the // style
* makes it easier to comment out blocks of code.
*/
struct ClassLikeThing {
std::vector<const Func*> methods; // This is fine for short annotations.
/* This is also ok, though try not to mix and match too much in a file. */
std::vector<const ClassLikeThing*> parents;
};
/* Don't write multiline comments where some lines are missing their prefix.
This is pretty weird. */
```
Try to use complete sentences in all but the shortest of comments. All comments
should be flowed to 79 characters in width.
### Separators ###
Delineate sections of code with a line of forward slashes. There is no strict
convention, but prefer lines of slashes to other delineators (e.g., `/*****/`,
five newlines, ASCII-art cartoon characters).
### File copyright ###
All files must begin with a copyright/license notice. For files created by
Facebook employees, the following should be used:
```cpp
/*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-201x Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| [email protected] so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
// File contents start here.
```
We do not require copyright assignment for external contributions, so
non-Facebook contributors should include their own header. The exact contents
are up to you, as long as the license is compatible with the PHP license. |
hhvm/hphp/doc/command.admin_server | <h2>Admin Server URL Commands</h2>
When running a compiled program as an HTTP server, by default it runs an
admin server on a specified port. One can send an HTTP request to this port
to perform certain actions. To list all possible commands:
GET http://localhost:8088
A particular command is like:
GET http://localhost:8088/<command-URL>?auth=<password>
This is a list of available URLs:
/stop: stop the web server
/translate: translate hex encoded stacktrace in 'stack' param
stack required, stack trace to translate
build-id optional, if specified, build ID has to match
bare optional, whether to display frame ordinates
/build-id: returns build id that's passed in from command line
/check-load: how many threads are actively handling requests
/check-mem: report memory quick statistics in log file
/check-apc: report APC quick statistics
/status.xml: show server status in XML
/status.json: show server status in JSON
/status.html: show server status in HTML
/stats-on: main switch: enable server stats
/stats-off: main switch: disable server stats
/stats-clear: clear all server stats
/stats-web: turn on/off server page stats (CPU and gen time)
/stats-mem: turn on/off memory statistics
/stats-apc: turn on/off APC statistics
/stats-apc-key: turn on/off APC key statistics
/stats-sql: turn on/off SQL statistics
/stats-mutex: turn on/off mutex statistics
sampling optional, default 1000
/stats.keys: list all available keys
/stats.kvp: show server stats in key-value pairs
keys optional, <key>,<key/hit>,<key/sec>,<:regex:>
prefix optional, a prefix to add to key names
/pcre-cache-size: get pcre cache map size
/dump-pcre-cache: dump cached pcre's to /tmp/pcre_cache
If program was compiled with GOOGLE_CPU_PROFILER, these commands will become
available:
/prof-cpu-on: turn on CPU profiler
/prof-cpu-off: turn off CPU profiler
If program was compiled with GOOGLE_TCMALLOC, these commands will become
available:
/free-mem: ask tcmalloc to release memory to system
/tcmalloc-stats: get internal tcmalloc stats
If program was compiled with USE_JEMALLOC, these commands will become available:
/free-mem: ask jemalloc to release memory to system
/jemalloc-stats: get internal jemalloc stats
/jemalloc-stats-print: get comprehensive jemalloc stats in human-readable form
/jemalloc-prof-activate: activate heap profiling
/jemalloc-prof-deactivate: deactivate heap profiling
/jemalloc-prof-dump: dump heap profile
file optional, filesystem path
If program was compiled with USE_JEMALLOC and ENABLE_HHPROF, these commands will
become available:
/hhprof/start: start profiling
requestType "all" or "next"*
url profile next matching url for "next"
lgSample lg sample rate
profileType "current"* or "cumulative"
/hhprof/status: configuration and current dump status
/hhprof/stop: stop profiling
/pprof/cmdline: program command line
/pprof/heap: heap dump
/pprof/symbol: symbol lookup |
|
hhvm/hphp/doc/command.compiled | <h2>Compiled Program's Command Line Options</h2>
= --help
Displays the list of command line options with short descriptions.
= -m, --mode
Specifies one of these execution modes,
run: (default) directly executes the program from command line.
debug: starts debugger.
server: starts an HTTP server from command line.
daemon: starts an HTTP server and runs it as a daemon.
replay: replays a previously recorded HTTP request file.
translate: translates a hex-encoded stacktrace.
= -a, --interactive
Starts hhvm Read-Eval-Print-Loop (REPL). Works just like an alias for `hhvm --mode debug`.
= -c, --config=FILE
This reads in a configuration file to set options. FILE should be in HDF
format. If no file is specified, the one in /etc/hhvm/config.hdf will be used
if it exists.
= -v, --config-value=STRING
This sets a configuration option on the command line. STRING should be
a line of HDF, e.g. Section.Name=Value. The options settable are the same
as those set by --config.
= -d, --define=STRING
This sets an ini option on the command line. STRING should be a line in the
format expected by ini, e.g. hhvm.option=value. The options settable are the
same as those set by --config, when providing an .ini file.
= -p, --port
Which port to start HTTP server on.
= --admin-port
Which port to start admin server that can take URL commands to perform
administration, status queries, stats queries or debugging commands.
= --debug-host
When running "debug" mode, specifies which HPHPi server to attach to.
= --debug-port
When running "debug" mode, specifies which HPHPi server port to connect.
= --debug-extension
When running "debug" mode, specifies which debugger extension PHP file to load.
= --debug-cmd
When running "debug" mode, specifies a command to execute at startup time.
Multiple --debug-cmd can be specified to execute more than one command.
= --debug-sandbox
When running "debug" mode, specifies which sandbox to attach to for remote
debugging. Default is "default".
= -f, --file
When mode is <b>run</b>, specifies which PHP to execute.
= --count
How many times to repeat execution of a PHP file.
= --no-safe-access-check
Whether to allow any file or directory access without security checking.
= --extra-header
Extra texts to add to front of each line of logging.
= --build-id
Specifies a version number or an arbitrary string that admin server's build-id
can return. Used for version tracking. |
|
hhvm/hphp/doc/command.compiler | <h2>Compiler Command Line Options</h2>
= --help
Displays the list of command line options with short descriptions.
= -t, --target=TARGET
This sets the mode of operation for hphp. TARGET is one of <b>hhbc</b>,
<b>filecache</b>, and <b>run</b> (default).
hhbc: runs analysis on the input files and generates a repo as output.
run: is the same as hhbc but also runs the program.
filecache: only builds the static file cache.
= -f, --format=FORMAT
The possible values of FORMAT depends on the chosen target.
If the target is <b>lint</b> or <b>filecache</b>, the format option is unused.
If the target is <b>hhbc</b> or <b>run</b>, FORMAT is one of the following:
binary: (default) A sqlite3 repo is produced.
hhas: This is the same as binary, but also hhas is output for each input PHP file.
text: This is the same as binary, but also bytecode is output as text for each input PHP file.
exe: This is the same as binary, but also creates an executable with the repo embedded in it.
= --input-dir=PATH
PATH is the path to the root directory of the PHP sources.
= --program=NAME
When using the exe format or run target, NAME will be the name of the compiled
executable.
= --args=ARGUMENTS
When using the run target, the executable will be run with arguments ARGUMENTS.
= -i, --inputs=FILE
FILE is added to the list of input PHP files.
= --input-list=FILE
FILE is the path to a file with a list of PHP sources to add to the input
list. The format is one path per line.
= --dir=DIR
DIR is a path to a directory. All PHP sources in that directory, including
subdirectories, are added to the input list.
= --exclude-dir=DIR
DIR is the path to a directory. All PHP sources in that directory, including
subdirectories, are excluded from the input list with the exception of
forced inputs.
= --ffile=FILE
FILE is included, overriding exclusion commands.
= --exclude-file=FILE
FILE is excluded from the input list, except where overridden by --ffile.
= --cfile=FILE
FILE is included in the file cache. It is not affected by exclusion commands.
= --cdir=DIR
All static resources in DIR, including subdirectories, are added to the file
cache. It is not affected by exclusion commands.
= --parse-on-demand=BOOL (default: true)
If BOOL is true, then include statements in PHP sources will add files to
the input list as they are encountered when the filename is statically
knowable.
= --branch=STRING
This specifies the SVN branch for logging purposes.
= --revision=NUMBER
This specifies the SVN revision for logging purposes.
= -o, --output-dir=DIR
The compiler will place the generated sources in DIR. If this parameter is
not specified, the compiler will use a new directory in /tmp.
= --sync-dir=DIR
If this parameter is set, the compiler will first output to DIR, and then
only copy over files that have changed to the output directory. This is to
preserve their timestamps so that a make will not recompile unchanged files.
= --gen-stats=BOOL (default: false)
If BOOL is true, then detected errors in the code and code statistics will
be output to CodeError.js and Stats.js respectively in the output directory.
= --keep-tempdir=BOOL (default: false)
If no output directory is specified, the compiler will place generated sources
into a directory in /tmp. If BOOL is true, then this directory will not be
deleted after the program is compiled and, optionally, run.
= --config=FILE
This reads in a configuration file to set options. FILE should be in HDF
format.
= --config-dir=DIR (default: --input-dir)
This sets the root directory for configuration purposes to be DIR. All
relative paths passed as options will be treated as relative from this
directory.
= -v, --config-value=STRING
This sets a configuration option on the command line. STRING should be
a line of HDF, e.g. Section.Name=Value. The options settable are the same
as those set by --config.
= -d, --define=STRING
This sets an ini option on the command line. STRING should be a line in the
format expected by ini, e.g. hhvm.option=value. The options settable are the
same as those set by --config, when providing an .ini file.
= -l, --log=INT (default: -1)
This sets the level of logging output to standard out. The Levels are:
-1: If the target is run, then none, else the same as 3.
0: No logging
1: Only errors
2: The same as 1 plus warnings.
3: The same as 2 plus extra information.
4: All log messages.
= --force=BOOL (default: true)
Forces the compiler to generate output even if there are code errors
encountered.
= --file-cache=FILE
If this argument is given, a static file cache will be created with path FILE. |
|
hhvm/hphp/doc/debug.gdb | <h2>Useful gdb Commands</h2>
info threads
thread apply [threadno] [all] args
set follow-fork-mode parent
set detach-on-fork on
set print pretty
handle SIGPIPE nostop noprint pass
dump binary memory [filename] [start_addr] [end_addr]
symbol-file [un-stripped binary OR symbol file from strip]
= b main.no.cpp:55
If you get "Couldn't get registers" error when starting a program with gdb,
instead of attaching to a running process, this is because gdb has a bug,
not able to switch to the main thread/process when it forks:
[Thread debugging using libthread_db enabled]
[New Thread 46912496246512 (LWP 30324)]
[New Thread 1084229952 (LWP 30327)]
[Thread 1084229952 (LWP 30327) exited]
Couldn't get registers: No such process.
Set a break point at line 55 of main.no.cpp, then "r", you will get this,
[Thread debugging using libthread_db enabled]
[New Thread 46912496246512 (LWP 30632)]
[New Thread 1084229952 (LWP 30636)]
[Thread 1084229952 (LWP 30636) exited]
<b>[Switching to Thread 46912496246512 (LWP 30632)]</b>
Breakpoint 1, main (argc=3, argv=0x7fff41b5b138) at sys/main.no.cpp:55
55 return HPHP::execute_program(argc, argv);
(gdb) c
Magically, gdb is able to switch to main thread, attaching to it, then it will
be able to debug it, even if it forks afterwards.
<h2> Getting PHP symbols in the JIT under gdb </h2>
The VM periodically emits DWARF files containing function address
information for the JIT code it generates. These DWARF files are synced with gdb
asynchronously (by default every ~128 tracelets). This means that the backtrace
you see under gdb may contain some unresolved PHP symbols that show up as ??s,
for symbols that have not been synced.
There are three ways to resolve this:
1. pass -v Eval.GdbSyncChunks=1 in the command line. This forces the VM to sync
debug info synchronously with gdb.
2. call HPHP::g_context.m_node.m_p->syncGdbState() from the gdb CLI. This
forces a manual sync of all outstanding symbols to gdb.
3. if the program has hit a seg fault (or another signal), press continue on
the CLI. The HHVM signal handler will sync outstanding DWARF symbols to gdb,
and a subsequent 'bt' should show all symbols. |
|
hhvm/hphp/doc/debug.linux | <h2>Useful Linux Commands</h2>
1. Who's listening on port 80?
sudo lsof -t -i :80
2. Who's talking to a remote port 11300?
netstat -pal | grep 11300
3. Incoming web requests
ngrep -q "POST|GET"
ngrep -q "POST|GET" | egrep "(POST|GET) /"
4. Incoming and outgoing web traffic
tcpdump "port 80" -X -s 1024
5. Who's blocking port access?
/sbin/iptables -L
/sbin/iptables -F # flush iptables
6. Who's the main www process?
ps -e | grep www
7. Start gdb by process name
gdb --pid=`ps -eo pid,comm | grep www | cut -d' ' -f1` |
|
hhvm/hphp/doc/debug.mutex | <h2>Debugging Excessive Mutex</h2>
1. Turn on mutex stats
Hit admin port with /stats-mutex to turn on mutex stats:
GET http://localhost:8088/stats-mutex
2. Query mutex stats
Get mutex stats like this,
GET "http://localhost:8088/stats.kvp?keys=:mutex.*:"
3. Pre-written script
Or, run bin/ report stats,
php ../bin/report_mutex.php localhost 10 1
4. Turn off mutex stats
Hit admin port with /stats-mutex to turn off mutex stats:
GET http://localhost:8088/stats-mutex |
|
hhvm/hphp/doc/debug.profile | <h2>Using Google CPU Profiler</h2>
Building with GOOGLE_CPU_PROFILER set lets you collect profiles from
the server or from the command line. However, our testing has found
that the server can get stalled while profiling for reasons
unknown. Still, it's possible to get enough requests in that the
profile can be significant.
There are two stages in profiling, collecting the profile and
processing it into a readable format. Collection is different on the
command line and server.
= Profiling from the command line
For building stand alone programs you need to link with libprofiler and
libunwind:
export LIBRARY_PATH=[path]/hphp/external/google-perftools/lib:\
[path]/hphp/external/libunwind/lib
g++ <my program>.cpp -lprofiler
With a compiled program, p, execute:
CPUPROFILE=p.prof CPUPROFILE_FREQUENCY=1000 p args
This will create a file p.prof when p finishes while taking samples 1000
times per second. The frequency can be changed: higher frequencies
will impact performance but lower frequencies will require a longer
run to collect a significant number of samples.
= Profiling from the server
Run
GET http://[server]:8088/prof-cpu-on
Then hit the server some number of times. When satisfied,
GET http://[server]:8088/prof-cpu-off
A file /hphp/pprof/[host]/hphp.prof should be created. The exact path is
configurable with the runtime option Debug.ProfilerOutputDir
(defaults to /tmp on production).
= Processing the profile
Use the tool pprof to process the profile. For example:
pprof --gif p p.prof > p.gif
This generates a gif with the callgraph of p.
Note that if you needed to strip the program, it's still possible
to use pprof if you call it on the unstripped version. |
|
hhvm/hphp/doc/debug.server | <h2>How to Debug Server Problems</h2>
1. Crashing and other memory problems
Normally a crash generates /tmp/stacktrace.<pid>.log file that has stacktrace
where crash happens. Sometimes, a crash can happen badly without giving crash
log a chance to generate a readable stack. Then use ResourceLimit.CoreFileSize
to turn on coredumps to capture a crash.
Another way is to use Debug.RecordInput option to capture bad HTTP requests.
Debug.RecordInput = true
Debug.ClearInputOnSuccess = false
Then replay with "-m replay" option from the compiled program at command line.
This way, one can run it under valgrind to detect memory problems.
./program -m replay -c config.hdf --count=3 /tmp/hphp_request_captured
./program -m replay -c config.hdf captured_request1 captured_request2
./program -m replay -c config.hdf --count=2 req1 req2
2. Server hanging and other status problems
Admin server commands provide status information that may be useful for
debugging, esp. /status.json command that will tell network I/O status of each
thread. |
|
hhvm/hphp/doc/debugger.cmds | -------------------------- Session Commands --------------------------
[m]achine connects to an HPHPi server
[t]hread switches between different threads
[q]uit quits debugger
------------------------ Program Flow Control ------------------------
[b]reak sets/clears/displays breakpoints
[e]xception catches/clears exceptions
[r]un starts over a program
<Ctrl-C> breaks program execution
[c]ontinue * continues program execution
[s]tep * steps into a function call or an expression
[n]ext * steps over a function call or a line
[o]ut * steps out a function call
-------------------------- Display Commands --------------------------
[p]rint prints a variable's value
[w]here displays stacktrace
[u]p goes up by frame(s)
[d]own goes down by frame(s)
[f]rame goes to a frame
[v]ariable lists all local variables
[g]lobal lists all global variables
[k]onstant lists all constants
------------------------ Evaluation Commands ------------------------
@ evaluates one line of PHP code
= prints right-hand-side's truncated value, assigns to $_
${name}= assigns a value to left-hand-side
[<?]php starts input of a block of PHP code
?> ends and evaluates a block a PHP code
[a]bort aborts input of a block of PHP code
[z]end evaluates the last snippet in PHP5
------------------- Documentation and Source Code -------------------
[i]nfo displays documentations and other information
[l]ist * displays source codes
[h]elp displays this help
? displays this help
-------------------- Shell and Extended Commands --------------------
! {cmd} executes a shell command
& {cmd} records and replays macros
x {cmd} extended commands
* These commands are replayable by just hitting return. |
|
hhvm/hphp/doc/debugger.devdocs | This document is intended to help developers understand how HHVM
debugging is implemented. For user documentation, see
docs/debugger.start.
1. Overview
-----------
HHVM provides a rich set of debugging services as well as a
command-line debugger client. The client and server (VM) can be on the
same or different machines. The client and server may also be in the
same process when debugging a script instead of a web server.
For simplicity, much of this document will assume the client and
server are in different processes. The operation of the various
components below is mostly unchanged when they are in the same
process, though.
A HHVM server can be configured to allow remote debugging with option
Eval.Debugger.EnableDebuggerServer. This creates a new debug-only
endpoint to which debugger clients may connect on the port specified
by Eval.Debugger.Port (default 8089). The class DebuggerServer is
responsible for setting up and listening for connections on this endpoint.
1.1 The Proxy
-------------
When a debugger client connects to a VM, whether that VM is a remote
server or a local instance running a script, a "debugger proxy" is
created by the server. This proxy owns a connection, via a socket
wrapped within a Thrift buffer, to the client which is doing the
debugging. All interaction with the client is performed through this
proxy, and any time the VM does something the debugger might need to
know about it informs the proxy. The proxy is implemented in the
DebuggerProxy class.
The proxy has two important states: interrupted, or not
interrupted. When a proxy is interrupted it listens for commands from
the client and responds to them. When it is not interrupted, the proxy
does nothing and simply sits around waiting to be interrupted. A proxy
gets interrupted in one of two ways: interesting events from the VM,
or by a dedicated signal polling thread in response to a signal from
the client.
The proxy will listen for commands from the client, and create
instances of subclasses of DebuggerCommand to execute those
commands. A command may respond to the client with results, or it may
cause the proxy to allow the interrupted thread to run again.
1.2 The Client
--------------
Anyone can build a client so long as they speak the protocol described
below. HHVM provides a command line client, usually called
"hphpd". The client is invoked by passing "--mode debug" on the
command line. This causes HHVM to create a DebuggerClient object,
which creates a new thread which will run the command processing
loop. The client may attach to a server, or it may attach to the VM in
its own process and debug a script running on the main thread. If
there is no script to run, the main thread simply waits for the client
to finish.
Somewhat confusingly, the client also creates a proxy to represent the
VM in its own process. Thus, the proxy is not only a server-side
object. This proxy works just like the proxy on a server, and is
connected to in the same way. This proxy is created even if the client
is connecting to a server, though in that case it will not really be
used. If the user disconnects from their server this proxy will be
used to debug local scripts.
2.0 Communication Protocol
--------------------------
The communication protocol between the client and server is fairly
simple. It is based on Thrift, and the bulk of the implementation is
held in DebuggerCommand and its subclasses. All communication is based
on sending a Command, and in most cases receiving a response of the
same Command back, sometimes updated with more information. User
actions like print, where, and breakpoint translate into CmdPrint,
CmdWhere, and CmdBreak being sent to the server, and received back
with data like the result of the print or where operation, or status
about the breakpoints being set. Some commands cause the server to
resume execution of the program. User actions like continue, next, and
step translate into CmdContinue, CmdNext, and CmdStep being sent to
the server, which does not respond immediately but continues execution
until the program reaches, say, a breakpoint. The server then responds
with CmdInterrupt(BreakPointReached) to signal that it is now in the
"interrupted state" and is ready to receive more commands.
2.1 Initialization
------------------
When a new connection is made to the debugger port a proxy is created
to own that connection. This proxy is held in a global map keyed by a
sandbox ID. The proxy starts a "dummy sandbox" so it can accept
commands when there is no active request, and it starts up a signal
thread to poll the client for "signals", i.e., Ctrl-C. The dummy
sandbox is always started, and should not be confused with a real
sandbox. It will never serve a request and is really just there to
provide a place to execute code and interact with the server when
there are no requests.
The proxy is now ready to use, and is not interrupted. The client,
after establishing the connection on the debugger port now waits for a
command from the proxy. Note that the proxy really doesn't have it's
own thread. From now on, it runs on whatever thread interrupts
it. That may be the dummy sandbox thread, or it may be a normal
request thread.
So long as the proxy is not interrupted, the signal thread will poll
the client once per second with CmdSignal. The client responds with
CmdSignal, updated with whether or not Ctrl-C was pressed. If it was,
the signal thread asks each thread registered with the proxy to
interrupt, then goes back to polling. If the proxy remains
un-interrupted on the next poll, the signal thread will ask the dummy
sandbox thread to interrupt.
The dummy sandbox creates a thread which first interrupts the proxy
with "session started", and then waits to see if it needs to respond
to a signal from the client. If there is a signal from the client, the
dummy sandbox thread simply loops and interrupts the proxy with
"session started" again, and waits again.
The proxy, having been interrupted with "session started" from the
dummy sandbox, sends a CmdInterrupt(SessionStarted) to the client. The
proxy is now interrupted, so it enters a loop listening for commands
from the client. It also blocks the signal thread from sending
CmdSignal to the client. The proxy will remain interrupted, processing
commands requested by the client, until one of those commands causes
the proxy to leave the interrupted state and let the thread which
interrupted it continue. In the case of SessionStarted, that lets the
dummy sandbox thread continue. In the case of more interesting
interrupts from the VM, on threads executing requests or other user
code, it lets those threads run.
When the client receives the SessionStarted interrupt after making the
initial connection, it sends CmdMachine to attach to the user's
sandbox. The proxy "attaches" to the sandbox by registering itself as
the proxy for that sandbox id in the global proxy map. It then signals
the dummy sandbox thread, responds with CmdMachine, and returns to the
un-interrupted state. The client again waits for a command from the
proxy. The dummy sandbox receives the signal, loops, and interrupts
the proxy again with "session started", which sends a second
CmdInterrupt with type SessionStarted to the client. At this point the
client has received CmdInterrupt(SessionStarted) and the proxy is
interrupted in the dummy sandbox. The initial connection is complete,
and the client can issue whatever commands it wishes.
Graphically, the initial connection protocol is:
Server threads:
DL -- Debugger Server Listening Thread
SP -- Signal Polling Thread
DS -- Dummy Sandbox Thread
RTx -- Request Threads
Client Server
------------- --------------------------------------------------
DL SP DS RTx
| Listen for
| connections
| |
Connect on ------> |
debugger port Create Proxy
| Create SP ----> |
| Create DS ------------------> |
| | | |
| | | |
| <----------------------- CmdSignal |
CmdSignal ----------------------> | |
| | | |
| <------------------------------------ CmdInterrupt(SS)
CmdMachine(attach) ---------------------------> |
| | | Switch sandbox
| | | Notify DS
| <------------------------------------ CmdMachine
| | | Loop due to notify
| <------------------------------------ CmdInterrupt(SS)
Ready to go | | |
| | | |
v v v v
2.2 Steady State
----------------
Once the client and server are connected, the most common flow is that
the client waits for a CmdInterrupt from the proxy while the
application runs. When a request thread hits a breakpoint, throws an
exception, etc. it will interrupt the proxy. The proxy may decide to
ignore the interrupt (perhaps it is not configured to care about
thrown exceptions, for instance), in which case the request thread
will keep running and the client will never know about the event. If
the proxy does decide to take the interrupt it will send CmdInterrupt
to the client, then wait for commands from the client. The client will
send commands and get responses from the proxy until it sends a
command that causes the proxy to let the interrupted thread continue
execution.
Signal polling continues so long as the proxy is not interrupted.
Client Server
------------- --------------------------------------------------
DL SP DS RTx
Listen for | | | |
commands | | | IP is at a
| | | | breakpoint.
| <----------------------------------------------- CmdInterrupt(BPR)
CmdWhere --------------------------------------------------> |
| <-------------------------------------------------- CmdWhere
| | | | |
CmdPrint --------------------------------------------------> |
| <-------------------------------------------------- CmdPrint
| | | | |
CmdContinue -----------------------------------------------> |
| | | | Continue request
| <----------------------- CmdSignal | |
CmdSignal ----------------------> | | |
| | | | |
v v v v v
2.3 Ctrl-C
----------
When the client wants to interrupt the server while it is executing
code, it responds to CmdSignal with a flag indicating it wants to
stop. In the command line client, pressing Ctrl-C will cause the next
response to CmdSignal to set the flag. The proxy's signal polling
thread will then ask all current request threads to interrupt. When
one does, it will send a CmdInterrupt to the client.
Client Server
------------- --------------------------------------------------
DL SP DS RTx
| <----------------------- CmdSignal | |
CmdSignal(stop) ----------------> | | |
| | Set flag on each | |
| | RTx thread to cause | |
| | it to interrupt | |
| | | | Interrupt flag seen.
| <----------------------------------------------- CmdInterrupt(BPR)
| | | | |
v v v v v
2.4 Quitting
------------
CmdQuit is just like any other command, except that after the proxy
responds it will remove itself from the global proxy map, close the
connection, turn off the signal polling and dummy sandbox threads, and
destroy itself. The same actions will occur if the connection with the
client is lost for any other reason, even if no CmdQuit was
received. An error reading from the socket, a closed socket, etc.
2.4.1 Cleaning the proxy
------------------------
There are many cases where the proxy will notice that a client has
terminated the connection. The easiest one is when a quit command is
received, but the client may exit for any number of reasons, and in
any state. At a minimum, the proxy's signal polling thread will, after
one second, notice that the connection has been dropped and initiate
cleanup. However, neither the signal polling thread nor the dummy
sandbox thread can completely perform the cleanup because they are
owned by the proxy, and destroying the proxy would destroy those
threads before they have completed.
Thus, proxy cleanup may be initiated by any thread with Proxy::stop(),
but the final cleanup is performed by another thread doing
housekeeping work. The cleanup work waits for both the signal polling
and dummy sandbox threads to exit before completing. Server-side this
housekeeping work is done by the server thread, which is also
listening for new debugger connections. Note that this cleanup work
may complete while a request thread is still using the proxy. The last
reference to the proxy will finally destroy it, and the cleanup work
ensures that the proxy is still usable (and communicates that it is
stopped) by any outstanding request threads.
3.0 Client Implementation
-------------------------
The debugger client provided by HHVM is not a separate program, but a
special mode passed when executing HHVM. When "--mode debug" is
passed, a DebuggerClient object is created and a new thread is started
to execute DebuggerClient::run(). This "client thread" will execute
the main loop of the client, presenting a command prompt at times, and
running a communication loop with the server at other times.
A "local proxy" is also created, which is a normal DebuggerProxy for
the VM within the process. The client connects to this proxy normally,
with a socket and a thrift buffer. The proxy will create a signal
polling thread as usual, but it will not setup a dummy sandbox. The
lack of a dummy sandbox is really the only difference between a normal
proxy and this local proxy.
The main thread of the process will run a script specified on the
command line, just like HHVM normally would. The client will, by
default, attempt to debug that script. The main thread's execution is
slightly modified to allow the client to restart the script in
response to the 'run' command, and to give control back to the client
when the script is complete instead of exiting the process.
If the client is asked to connect to a remote server (either via "-h"
on the command line or via the 'machine connect' command) then it does
so as described above, and the main thread of the process will simply
idle and wait for the client to exit, at which time the process will
exit.
3.1 Console and communication loops
-----------------------------------
The debugger client has a top-level "event loop" which waits to
receive commands from the proxy to which it is attached. It responds
to CmdSignal, and when it receives a CmdInterrupt it enters a "console
loop" which presents a prompt to the user and processes user
commands. Each user command is recognized and an instance of a
subclass of DebuggerCommand is created, then executed.
The client will remain in the top-level event loop until an interrupt
is received, and it will remain in the console loop until a command
causes execution on the server to continue. When such a command is
executed (e.g. 'continue'), it sends the request to the server and
then throws DebuggerConsoleExitException. This is the notification to
exit the console loop and return to the event loop. The use of an
exception for this is a bit odd, as it is typically simply the last
thing done from the command's onClientImpl() method, which is called
directly from the console loop. The use of an exception here is
similar to the use of DebuggerClientExitException, discussed below,
but is now a vestige that will likely be removed soon.
Some commands can cause the client to exit, like 'quit'. The client
may also exit due to various error conditions, like loss of
communication with the server. In these cases a
DebuggerClientExitException is thrown. This causes execution to unwind
out of both the console and event loops, back to
DebuggerClient::run(), which eventually causes the client to exit. The
use of an exception here is more interesting as we will see below when
discussing nested event loops, as it allows the client to exit out of
multiple nested event loops with ease.
Somewhat confusingly, DebuggerClientExitException is also thrown by
the proxy when it detects the client is exiting. This signals the
termination of the request which was being debugged, which is
reasonable. But you'd imagine that a different exception could serve
that purpose. This is a subtle cheat in the system, and is more
meaningful when the proxy is local: it is a signal back to the
modified main thread that the debugger is quitting and the main thread
should now quit. In the local case, the main thread is much like a web
server request thread in that it calls into the proxy to interrupt it.
4.0 Nested execution
--------------------
Some commands allow a user to recursively execute more PHP code while
stopped at, say, a breakpoint. More breakpoints may be hit in the
newly executed function, and more code may be executed while stopped
at those breakpoints. The best example of this is CmdEval, which is
used to evaluate arbitrary functions and code (e.g., '@foo(42)').
Both the client and proxy are designed to handle this.
On the proxy, execution is paused waiting for a command from the
client. When an eval command is received, the proxy is put back into
the running state and the code is executed directly from the command
processing loop. If another interrupt is encountered, a new command
processing loop is entered and the interrupt is communicated to the
client just like normal. When then code completes, the response to the
eval command is sent and control is returned to the command processing
loop still on the stack. Thus we may recurse arbitrarily deep on the
server down multiple levels of proxy command loops, depending on how
deeply the user wishes to go. In practice this depth is quite shallow,
and most often involves no recursion.
On the client the story is much the same. When an eval command is
entered by the user, the client sends CmdEval to the proxy then enters
a nested event loop to wait for either the eval command to be sent
back, indication completion of the command, or for new CmdInterrupts
to be received, indicating breakpoints and other interesting events
while the code was being executed. Interrupts are handled normally,
and a new console loop is entered. Again, like the proxy these loops
may nest to arbitrary depths.
5.0 Control Flow
----------------
This section will discuss how the control flow commands 'next',
'step', 'out', and 'continue' work in the proxy and the VM. Operation
of these commands on the client isn't very interesting and is covered
well enough elsewhere.
Flow control commands are treated specially by the proxy. A single
instance of a subclass of CmdFlowControl is held in the m_flow member
variable of DebuggerProxy so long as it remains active, and having an
active flow command typically means that the proxy will deliver all
interrupts to the flow command for processing first. The flow command
will have the opportunity to examine the interrupt and determine if
the proxy should really stop at the interrupt, or continue
execution. A flow command will mark itself as completed when it
decides it's time to stop execution, and the proxy will remove it.
The only thing that can get the proxy to stop at an interrupt when a
flow command has said not to is an active breakpoint.
When the proxy does finally have an interrupt to stop at and send to
the client it removes and deletes the flow command. The flow command
is either complete, in which case it doesn't need to remain active
anyway, or it has been trumped by a breakpoint, in which case the flow
command is essentially forced to be complete.
A flow command is set as active on the proxy when it is received from
the client. Execution continues at that time.
5.1 Continue
------------
'continue' is the simplest control flow command. It simply marks
itself completed and returns. The proxy will remove the CmdContinue
and continue execution.
5.2 Step
--------
'step' is the next simplest flow command. It operates on the very
simple theory that to step to the next line executed you simply need
to interpret the program until the current source location
changes. This is, by definition, the next source line to be executed
no matter how execution flows in the program: function call,
exception, a simple add, etc.
First, CmdStep sets a "VM interrupt" flag which is eventually
installed on the VM's execution context for the current thread. The
interpreter's execution loop is instrumented, only when a debugger
client is actually attached to the VM, with a "hook" to the debugger
infrastructure called phpDebuggerOpcodeHook(). This hook is given the
PC of the opcode which is about to be executed. Setting the VM
interrupt flag ensures the VM will only interpret code, and thus call
the debugger opcode hook. This flag remains set only while the step
command is active; the VM will go back to executing translated code
once the command completes.
Next, CmdStep sets up a "location filter" for the current source
line. This is a very simple set which contains all PC's for bytecodes
implementing the current source line. It first consults the location
filter to determine if this is a location which might be interesting
to a debugger. If it gets a hit in the location filter, it simply
returns to the interpreter and executes the opcode as usual. The
location filter, then, is a simple mechanism which flow commands can
use to avoid being interrupted when a set of bytecodes is executed.
By setting up a location filter for the current source line and
turning on interrupts the step command ensures it will be interrupted
as soon as a bytecode not belonging to the current source line is
encountered. When that occurs it marks itself completed, and the proxy
will remove the CmdStep and destroy it. When any flow command is
destroyed the location filter is cleared, and the VM interrupt flag is
turned off.
5.3 Out
-------
'out' works very differently from 'step'. It predicts the next
execution location and sets up an "internal breakpoint" at that
location. This internal breakpoint works just like a normal breakpoint
set by a user, but it is not visible to the user. The breakpoint will
be automatically removed when CmdOut is destroyed. CmdOut sets this
breakpoint, then continues execution like normal (no location filter,
no VM interrupt flag).
The breakpoint is placed at the return address of the current
function. When it is reached, there are two possibilities: we have
returned from the function in question, in which case the command is
marked as complete, or we have hit the breakpoint during a recursive
call before exiting the original function.
To determine which case it is, CmdOut remembers the original stack
depth when the command was activated, and checks it when the
breakpoint is hit. If the stack depth is the same or lower, execution
is continued. Otherwise, the command is complete.
5.3.1 Determining the location to step out to
---------------------------------------------
Finding the location a function will return to is not straightforward
in all cases. For a function called from FCALL, the location is
obvious and unambiguous. But for functions called from, say, ITERNEXT
or a destructor called from a simple SETL the return offset stored in
the VM's activation record is not what we would expect. A complex
instruction may have multiple points at which execution could
continue. ITERNEXT, for instance, will branch to the top of the loop
if the iterator is not done, or fall through if the iterator is
done. Thus the step out location could be multiple places.
Also, functions with no source information are ignored for a step out
operation, so the location could be multiple frames up, not just one.
All of this is accounted for in CmdFlowControl::setupStepOuts(). See
that function for the details.
5.4 Next
--------
'next' is the most complex of the flow control commands, and builds on
the primitives of the others. It starts just like 'step' by trying to
use the interpreter to get off the current source line. But the goal
is not to just get off the line, it is to get to the next line. If
execution ends up deeper on the stack, a call has been made. CmdNext
will re-use the step out facility needed for 'out' and, in essence,
internally execute a step out to get back to the original source line,
then continue try to interpret off of it. If execution ends up
shallower on the stack, then we have stepped over a return and are
done as well.
There is extra logic to support stepping within generators, so that
a 'next' executed on a source line with a 'yield' on it will step over
the yield. Thus CmdNext looks at the opcodes it is stepping over, and
upon recognizing CONTEXIT or CONTRETC will setup its own internal
breakpoints at destinations indicated by those opcodes.
For the details, see CmdNext.
5.5 Exceptions
--------------
Exceptions are non-local control flow that throw a monkey wrench into
most flow control commands. The VM is further instrumented, again only
while a debugger is attached, to call a hook whenever it is about to
transfer control to a catch block. This gives all of the flow commands
a chance to determine if the new location satisfies the completion
criteria for their respective operations. In most cases, the flow
command will force the first opcode of the catch clause to be
interpreted and let it's normal logic run its course.
6. Breakpoints
--------------
Breakpoints are set via commands processed by the client, which keeps a list
of all breakpoints. This list is sent to the proxy whenever an element is added,
modified or deleted. The proxy keeps of a copy of the list and updates each
breakpoint with a flag that indicates if the breakpoint is bound. An unbound
breakpoint is either invalid because it refers to a non existent source line
or function, or it refers to a location that has not yet been loaded into the
VM. The flag distinguishes these cases.
6.1 Checking if breakpoints are hit.
When a breakpoint becomes bound, it is mapped to a range of program counters
(PCs) in an execution unit. Each of these PCs is then added to a set of
breakable PCs (called the Breakpoint Filter) kept in the VM's execution context
object. If a debugger is attached to the VM, the interpreter calls
phpDebuggerOpcodeHook before executing each operation. This routine checks if
the PC of the operation is a member of the Breakpoint Filter. If so, it calls
the Debugger::InterruptVMHook method, which among other things, checks the
proxy's list of breakpoints to see if any of them apply to the source location
of the current PC. This involves a check if the breakpoint is enabled by the
user, a check if the source location is the same, a check if the breakpoint is
not already active and a check if the breakpoint is conditional on the value
of an expression.
6.2 Active breakpoints
A breakpoint can be already active when InterruptVMHook is called because it
can be set on a source line that maps to several interpreter operations.
(InterruptVMHook will be called for each such operation). It is not safe to
simply disable the breakpoint until the last of these operations are completed,
since one or more of those operations may be function calls that recurse back
to the active breakpoint.
In order to deal with this, each breakpoint keeps track of the depth of the
execution stack where it was activated. When InterruptVM finds that a breakpoint
will be hit, it checks that the height of the execution stack is greater than
the height recorded in the breakpoint before it proceeds with the steps that
are taken when a breakpoint is first hit. When control leaves the site of a
breakpoint at the right stack depth, the breakpoint is updated its previous
active stack depth (it has a stack of stack depths and it pops the top entry).
This pop operation cannot happen before the last operation corresponding to a
breakpoint has completed. However, it is not convenient for this to happen on
the very next operation. Instead, InterruptVM hook will update any breakpoints
that do not correspond to the current operation to ensure they are active if a
subsequent call to InterruptVMHook matches their location at the current stack
level. |
|
hhvm/hphp/doc/debugger.refs | --------------------------- Abort Command ---------------------------
[a]bort aborts current PHP code input
You will have to type this command on a new line, while you're typing
ad-hoc PHP code to evaluate. In other words, it only works when you see
continuation prompt like ">>>>".
--------------------------- Break Command ---------------------------
[b]reak breaks at current line of code
[b]reak {exp} breaks at matching location
[b]reak [o]nce {above} breaks just once then disables it
[b]reak {above} if {php} breaks if condition meets
[b]reak {above} && {php} breaks and evaluates an expression
[b]reak [l]ist lists all breakpoints
[b]reak [c]lear {index} clears the n-th breakpoint on list
[b]reak [c]lear [a]ll clears all breakpoints
[b]reak [c]lear clears current breakpoint
[b]reak [t]oggle {index} toggles the n-th breakpoint on list
[b]reak [t]oggle [a]ll toggles all breakpoints
[b]reak [t]oggle toggles current breakpoint
[b]reak [e]nable {index} enables the n-th breakpoint on list
[b]reak [e]nable [a]ll enables all breakpoints
[b]reak [e]nable enables current breakpoint
[b]reak [d]isable {index} disables the n-th breakpoint on list
[b]reak [d]isable [a]ll disables all breakpoints
[b]reak [d]isable disables current breakpoint
-------------------------- Where to break? --------------------------
There are many ways to specify a source file location to set a breakpoint,
but it's ONE single string without whitespaces. The format looks like this,
file location: {file}:{line}
function call: {func}()
method invoke: {cls}::{method}()
For examples,
b mypage.php:123
b foo()
b MyClass::foo()
------------------------ Special Breakpoints ------------------------
There are special breakpoints that can only be set by names:
start
end
psp
They represent different time points of a web request. 'start' is at the
beginning of a web request, when no PHP file is invoked yet, but query
string and server variables are already prepared. 'end' is at the end of
a web request, but BEFORE post-send processing (psp). 'psp' is at END of
psp, not beginning. To set a breakpoint at the beginning of psp, use
'end', because end of a request is the same as beginning of psp.
-------------- Conditional Breakpoints and Watchpoints --------------
Every breakpoint can specify a condition, which is an arbitrary PHP expression
that will be evaluated to TRUE or FALSE. When TRUE, it will break. When FALSE,
it will continue without break. "&&" is similar to "if", except it will always
break, regardless what the expression returns. This is useful to watch
variables at breakpoints. For example,
b mypage.php:123 && print $a
So every time it breaks at mypage.php line 123, it will print out $a.
-------------------------- Call chains -------------------------------
Function/method call breakpoints can be qualified with the names of
functions or methods that must be calling the right most function/method
name for execution to stop at the breakpoint. These calls need not be
direct calls. The syntax looks like this:
{call}=>{call}()
where call is either a {func} or {cls}::{method} and zero or more
"{call}=>" clauses can be specified.
--------------------- Breakpoint States and List ---------------------
Every breakpoint has 3 states: ALWAYS, ONCE, DISABLED. Without keyword "once",
a breakpoint is in ALWAYS state. ONCE breakpoints will turn into DISABLED after
it's hit once. DISABLED breakpoints will not break, but they are kept in the
list, so one can run 'b l' command and 'b t' command to toggle their states.
Use '[b]reak [l]ist' command to view indices of different breakpoints. Then use
those indices to clear or toggle their states. This list of breakpoints and
their states will remain the same when switching to different machines,
sandboxes and threads.
-------------------------- Hard Breakpoints --------------------------
From within PHP code, you can place a function call hphpd_break() to embed a
breakpoint. You may also specify a condition as the function's parameter, so it
breaks when the condition is met. Please read about this function for more
details with '[i]nfo hphpd_break'.
-------------------------- Continue Command --------------------------
[c]ontinue continues program execution
{count=1}
Use this command at break to resume program execution. Specify a count
to repeat the same command many times.
---------------------------- Down Command ----------------------------
[d]own {num=1} moves to inner frames (callees) on stacktrace
Use this command to walk down on stacktrace to find out inner callees of
current frame. By default it moves down by one level. Specify a number
to move down several levels a time.
------------------------- Exception Command -------------------------
[e]xception {cls} breaks if class of exception throws
[e]xception breaks if class of exception throws
{ns}\{cls}
[e]xception error breaks on errors, warnings and notices
[e]xception breaks only if url also matches
{above}@{url}
[e]xception breaks at matching regex pattern
[r]egex {above}
[e]xception breaks just once then disables it
[o]nce {above}
[e]xception breaks if condition meets
{above} if {php}
[e]xception breaks and evaluates an expression
{above} && {php}
Exception command is similar to '[b]reak' command, except it's used to
specify how to break on (or catch) a throw of an exception. Program
stops right before the exception is about to throw. Resuming program
execution will continue to throw the exception as is.
Only a class name can be specified with an optional namespace. All
exceptions of the class or its sub-classes will be matched. To specify a
perfect match without sub-classing test, use '[e]xception [r]egex
^{exact class name}$', although regex can match in a lot more different
ways.
An exception breakpoint can be listed, cleared or toggled with '[b]reak'
commands.
--------------------------- Frame Command ---------------------------
[f]rame {index} jumps to one particular frame
Use '[w]here' command to find out the frame number. Use 'f 0' to jump
back to the most recent frame or the innermost frame. Use 'f 999' or
some big number to jump to the outermost frame.
--------------------------- Global Command ---------------------------
[g]lobal lists all global variables
[g]lobal {text} full-text search global variables
This will print names and values of all global variables, if {text} is
not speified. Otherwise, it will print global variables that contain the
text in their names or values. The search is case-insensitive and
string-based.
---------------------------- Help Command ----------------------------
[h]elp [s]tart displays material for getting started
[h]elp [t]utorial changing tutorial modes
on|off|auto
Please read "Getting Started" material with '[h]elp [s]tart' for first
time use to get yourself familiar with basics.
Tutorial mode displays extra information when something didn't work as
you expected. "auto" mode will display the same information just once.
"on" mode will display it as long as you run into the same situation.
"off" mode completely turns off all tutorial texts.
To get detailed information of a command, type '{cmd} [h]elp' or '{cmd}
?' or 'help {cmd}' or '? {cmd}'.
---------------------------- Info Command ----------------------------
info displays current function's info
info {cls} displays declaration of this class
info {function} displays declaration of this function
info displays declaration of this method
{cls::method}
info displays declaration of this constant
{cls::constant}
info displays declaration of this property
{cls::$property}
Use this command to display declaration of a symbol.
-------------------------- Constant Command --------------------------
[k]onstant lists all constants
[k]onstant {text} full-text search constants
This will print names and values of all constants, if {text} is not
specified. Otherwise, it will print names and values of all constants
that contain the text in their names or values. The search is
case-insensitive and string-based.
---------------------------- List Command ----------------------------
list displays current block of source code
list {line} displays code around specified line
list displays specified block of source code
{line1}-{line2}
list {line1}- displays code starting with the line
list -{line2} displays code ending with the line
list {file} displays beginning lines of the file
list displays code around specified file:line
{file}:{line}
list displays specified block in the file
{file}:{l1}-{l2}
list {file}:{l1}- displays specified block in the file
list {file}:-{l2} displays specified block in the file
list {directory} sets PHP source root directory
Use list command to display PHP source code. In remote debugging, this
is displaying source code on server side. When server side cannot find
the file, it will fall back to local files.
Hit return to display more lines of code after current display.
When a directory name is specified, this will be set to root directory
for resolving relative paths of PHP files. Files with absolute paths
will not be affected by this setting. This directory will be stored in
configuration file for future sessions as well.
-------------------------- Machine Command ---------------------------
[m]achine debugging remote server natively
[c]onnect {host}
[m]achine debugging remote server natively
[c]onnect
{host}:{port}
[m]achine [r]pc debugging remote server with RPC
{host}
[m]achine [r]pc debugging remote server with RPC
{host}:{port}
[m]achine disconnect, debug only local script
[d]isconnect
[m]achine [l]ist list all sandboxes
[m]achine attach to a sandbox
[a]ttach {index}
[m]achine attach to my sandbox by name
[a]ttach
{sandbox}
[m]achine attach to a sandbox by user and name
[a]ttach {user}
{sandbox}
[m]achine force attach to a sandbox (see below)
[a]ttach [f]orce
{index|sandbox|us
er sandbox}
Use this command to switch between different machines or sandboxes.
If command prompt says "hphpd", all evaluation of PHP code happens
locally within the debugger. This is the mode when debugger is started
without a remote server name. No user libraries are pre-loaded in this
mode.
When connecting to a remote server, it will automatically attach to
"default" sandbox under current user. If "default" sandbox does not
exist, it will attach to a random sandbox under current user. In sandbox
mode, a file specified in server's configuration of
"Eval.Debugger.StartupDocument" is pre-loaded.
If there is no sandbox available, it will create a "dummy" sandbox and
attach to it.
When your sandbox is not available, please hit it at least once from
your browser. Then run '[m]achine [l]ist' command again.
If another debugger client is already attached to your sandbox you can
use the '[f]orce' option to '[m]achine [a]ttach'. This will disconnect
the other client and force your client to connect.
If a HipHop server has RPC port open, one can also debug the server in a
very special RPC mode. In this mode, one can type in PHP scripts to run,
but all functions will be executed on server through RPC. Because states
are still maintained locally and only functions are executed remotely,
it may not work with functions or scripts that depend on global
variables or low-level raw resource pointers. As a simple rule,
stateless functions will work just fine. This is true to objects and
method calls as well, except classes will have to be loaded on client
side by '=include("file-containing-the-class.php")'.
---------------------------- Next Command ----------------------------
[n]ext {count=1} steps over lines of code
Use this command at break to step over lines of code. Specify a count to
step over more than one line of code.
---------------------------- Out Command ----------------------------
[o]ut {count=1} steps out function calls
Use this command at break to step out function calls. Specify a count to
step out more than one level of function calls.
--------------------------- Print Command ---------------------------
[p]rint {php} prints result of PHP code
[p]rint r {php} prints result of PHP code, (print_r)
[p]rint v {php} prints result of PHP code, (var_dump)
[p]rint x {php} prints hex encoded string or number
[p]rint [h]ex prints hex encoded string or number
{php}
[p]rint [o]ct prints octal encoded string or number
{php}
[p]rint [d]ec prints as signed integer
{php}
[p]rint prints as unsigned integer
[u]nsigned {php}
[p]rint [t]ime converts between time and timestamp
{php}
[p]rint [a]lways adds a watch expression at break
{above}
[p]rint [l]ist lists watch expressions
[p]rint [c]lear clears a watch expression
{index}
[p]rint [c]lear clears all watch expressions
[a]ll
Prints result of an expression in certain format. If '[a]lways' is
specified, the expression will be added to a watch list. At every break,
either at a breakpoint or caused by step commands, these expressions
will be evaluated and printed out.
---------------------------- Quit Command ----------------------------
[q]uit quits this program
After you type this command, you will not see me anymore.
---------------------------- Run Command ----------------------------
[r]un restarts program
[r]un {file} starts a new program
{arg1} {arg2} ...
Aborts current execution and restarts program with specified arguments.
If no arguments are specified, it will reuse the PHP file and old
arguments. If arguments are to be changed, please include file name,
even if it is the same, as the first one.
In server mode, this command will simply abort current page handling
without restarting anything.
---------------------------- Set Command ----------------------------
set bac on/off on makes debugger bypass access checks on class members
set lf path/off turn logging on and specify log file
set pl level if level > 0, only print out object trees to that depth
set cc count display at most count characters when doing = command
set ss on/off on makes the debugger take small steps (not entire lines)
set sa on/off on makes where command display argument values
set mcl limit display at most limit source lines at breakpoints
Use this command to change default settings. The new values are persisted into
the configuration file that normally can be found at ~/.hphpd.ini.
Level, count and limit can be <= 0, in which case they are unlimited.
---------------------------- Step Command ----------------------------
[s]tep {count=1} steps into lines of code
Use this command at break to step into lines of code. Specify a count to
step more than once.
--------------------------- Thread Command ---------------------------
[t]hread displays current thread's information
[t]hread [l]ist lists all threads at break
[t]hread {index} switches to the specified thread
[t]hread [n]ormal breaks all threads
[t]hread [s]ticky only send command to current thread
[t]hread only break current thread
[e]xclusive
Use '[t]hread' alone to display information of current thread.
When a thread is at break, you may specify how other threads should
behave if they also happen to hit some breakpoints. Normally, other
threads will also break, and they will interrupt debugger session with
their breakpoints. So breaks from different threads may interleave. If
'[t]hread [s]ticky' is specified, all other threads will wait until
current thread is finished. This will help debugging to focus on just
one thread without losing breaks from other threads. If there is no need
to hold up any other threads, use '[t]hread [e]xclusive'. Then other
threads will not break at all. This mode is useful for live debugging a
production server, without interrupting many threads at a time. Use
'[t]hread [n]ormal' to change thread mode back to normal.
Some debugging commands will automatically turn thread mode to sticky.
These include continue, step, next or out commands with a counter of
more than 1. Or a jump command. These commands imply non-interruption
from another thread. The mode will remain even after these commands
until '[t]hread [n]ormal' is issued.
When multple threads hit breakpoints at the same time, use '[t]hread
[l]ist' command to display their indices, which can be used to switch
between them with '[t]hread {index}'.
----------------------------- Up Command -----------------------------
[u]p {num=1} moves to outer frames (callers) on stacktrace
Use this command to walk up on stacktrace to find out outer callers of
current frame. By default it moves up by one level. Specify a number to
move up several levels a time.
-------------------------- Variable Command --------------------------
[v]ariable lists all local variables on stack
[v]ariable {text} full-text search local variables
This will print names and values of all variables that are currently
accessible by simple names. Use '[w]here', '[u]p {num}', '[d]own {num}',
'[f]rame {index}' commands to choose a different frame to view variables
at different level of the stack.
Specify some free text to print local variables that contain the text
either in their names or values. The search is case-insensitive and
string-based.
--------------------------- Where Command ---------------------------
[w]here displays current stacktrace
[w]here {num} displays number of innermost frames
[w]here -{num} displays number of outermost frames
Use '[u]p {num}' or '[d]own {num}' to walk up or down the stacktrace.
Use '[f]rame {index}' to jump to one particular frame. At any frame, use
'[v]ariable' command to display all local variables.
-------------------------- Extended Command --------------------------
x {cmd} {arg1} invoke specified command
{arg2} ...
x{cmd} {arg1} invoke specified command
{arg2} ...
where {cmd} can be:
[a]mple
[t]ension
Type 'x [h]elp|? {cmd} to read their usages.
---------------------------- Zend Command ----------------------------
[z]end running the most recent code snippet in PHP5
This is mainly for comparing results from PHP vs. HipHop. After you type
in some PHP code, it will be evaluated immediately in HipHop. Then you
can type '[z]end' command to re-run the same script in PHP5. Please
note that only the most recent block of code you manually typed in was
evaluated, not any earlier ones, nor the ones from a PHP file.
--------------------------- Macro Command ---------------------------
& [s]tart starts recording of default macro
& [s]tart {name} starts recording of a named macro
& [e]nd stops and saves recorded macro
& [r]eplay replays default macro
& [r]eplay {name} replays a named macro
& [l]ist lists all macros
& [c]lear {index} deletes a macro
Macro command allows you to record a series of debugger command, so you
can replay later by its name. When name is not specified, it will use
"default" as the name.
There is also a special macro "startup" that will be replayed every time
when debugger is just started. Use startup macro to load certain PHP
files or perform certain debugging environment setup.
The space between & and command is not needed. '&s' works as well.
--------------------------- Shell Command ---------------------------
! {cmd} {arg1} {arg2} ... remotely executes shell command
Executes the shell command on connected machine.
The space between ! and command is not needed. '!ls' works as well. |
|
hhvm/hphp/doc/debugger.start | ------------------- Getting Started with Debugger -------------------
1. Quick Overview
(1) from A to Z
All built-in debugger commands are un-ambiguous with their first
letters. Therefore, a single letter is sufficient to issue the command.
(2) tab, tab, tab
Use TAB to auto-complete.
(3) input PHP code
For single line of PHP code, use "=" to print an expression's value, OR,
use "@" to execute an expression or statement without printing return
values, OR, start an assignment with "$" variable name.
For multi-line PHP code, type "<" then TAB. Now you can type or paste
multiple lines of code. Hit return to start a new line, then TAB. That
will auto-complete "?>" to finish the block. Hit return to execute.
(4) help
Use "help" to read more about command details.
(5) info and list
Use "info" and "list" commands to read more about source code.
(6) readline
Debugger is written with readline library, which has rich feature set,
including switching between emacs and vi editing mode. Please read its
[[ http://cnswww.cns.cwru.edu/php/chet/readline/readline.html#SEC1 |
documentation]] for more details.
2. Debugging local script
The command to run a script normally looks like this,
hhvm myscript.php
Simply add "-m debug" to run the script in debugger,
hhvm -m debug myscript.php
Once started, set breakpoints like this,
hphpd> break myscript.php:10
hphpd> break foo()
Then let it run, until it hits the breakpoints,
hphpd> run
The debugger will highlight current statement or expression that is just
about to evaluate. Sometimes a statement is highlighted first, then
sub-expressions inside the statement are highlighted one after another
while repeating step commands.
At any breakpoints, examine variables or evaluate expressions,
hphpd> variable
hphpd> print $a
hphpd> =$a
hphpd> <?hh print $a; ?>
hphpd> <?hh
..... print $a;
..... ?>
Optionally, modify variables like this,
hphpd> $a = 10
hphpd> <?hh $a = 10; ?>
hphpd> <?hh
..... $a = 10;
..... ?>
Then let it continue, until it hits more breakpoints,
hphpd> continue
Finally, quit debugger,
hphpd> quit
3. Debugging sandbox
Connect to an HPHP server from command line,
hhvm -m debug -h mymachine.com
Or, connect from within debugger,
hphpd> machine connect mymachine.com
This will try to attach to a default sandbox on that machine.
"Attaching" means it will only debug web requests hitting that sandbox.
To switch to a different sandbox,
mymachine> machine list
mymachine> machine attach 2
In remote debugging mode, a breakpoint can be specific about an URL,
mymachine> break myscript.php:[email protected]
mymachine> break foo()@index.php
You may connect to more than one machine and breakpoints will be shared
by all of them.
4. Understanding dummy sandbox
When a web request hits a breakpoint, debugger will run in a "Web
Request" thread. Use "thread" command to display this information,
mymachine> thread
What will debugger use when there is no web request thread that's
active, but we need to set a breakpoint? We created so-called "dummy
sandbox", purely for taking debugger commands when there is no active
web request. When there is no active request, hit Ctrl-C to break into
the debugger, and use "thread" to display dummy sandbox thread's
information.
Ctrl-C
mymachine> thread
In dummy sandbox, a PHP file can be pre-loaded, so that we can "info"
functions and classes and execute certain code. This file is specified
on server side by
Eval.Debugger.StartupDocument = scripts/startup.php
Dummy sandbox will always use currently attached sandbox's PHP files.
When files are modified, simply reload them by
mymachine> continue
Ctrl-C
5. Colors and Configuration
By default, it will use emacs colors for dark background. To change
them, run debugger at least once, then look for ~/.hphpd.ini file.
Replace "Code" node with,
Color {
Code : Color.Palette.vim
}
Or, specify your own colors in different places of the configuration
file. |
|
hhvm/hphp/doc/extension.new_functions | <h2>New Extension Functions</h2>
Warning, these functions are not ported to PHP yet. Do NOT use them if the code
is intended to run with PHP as well, unless guarded with function_exists()
testing and some alternative PHP implementation.
- hphp_crash_log
- hphp_stats
- hphp_get_stats
- hphp_output_global_state
- hphp_set_error_page
- hphp_thread_is_warmup_enabled
- hphp_thread_set_warmup_enabled
- hphp_get_thread_id
- hphpd_auth_token
- hphp_debug_session_auth
- hphpd_install_user_command
- hphpd_get_user_commands
- hphpd_break
- register_postsend_function
- clock_getres
- clock_gettime
- mysql_connect added connect_timeout_ms and query_timeout_ms
- mysql_pconnect added connect_timeout_ms and query_timeout_ms
- mysql_set_timeout
- fb_load_local_databases
- fb_parallel_query
- fb_crossall_query
- pagelet_server_is_enabled
- pagelet_server_task_start
- pagelet_server_task_status
- pagelet_server_task_result
- pagelet_server_flush
- xbox_task_start
- xbox_task_status
- xbox_task_result
- fb_serialize
- fb_unserialize
- fb_rename_function
- fb_get_code_coverage
- fb_utf8ize
- fb_enable_code_coverage
- fb_disable_code_coverage
- HH\disable_code_coverage_with_frequency
<h2>New Server Variables</h2>
$_SERVER['THREAD_TYPE'] can be:
- Web Request
- Pagelet Thread
- Xbox Thread
- RPC Thread |
|
hhvm/hphp/doc/extension.type_hints | <h2>Richer type hints</h2>
HipHop extends the support of type hints to primitive types, like bool, int, and
double. It also supports string for type hints.
For example, the following code would only allow passing an integer to the
function foo():
function foo(int $a) {
return $a + 1;
}
The main purposes for type hinting are (1) more efficient execution, (2) more
explicit contract for functions.
As in vanilla PHP, HipHop allows type-hinted parameters to have null as the
default value, even if the type hint is a primitive type.
function foo(int $a = null) { ... }
foo(null); // then passing null is allowed |
|
hhvm/hphp/doc/extension.yield | <h2>yield and generator</h2>
HipHop extends PHP to include Python and C#-style generators. If you're
unfamiliar with the concept, see
[[http://docs.python.org/tutorial/classes.html#generators | the Python docs]].
As in Python, the <i>yield</i> keyword marks the enclosing function as a
generator:
function foo() {
$a = 123;
yield $a;
$a = 456;
yield $a;
}
foreach (foo() as $a) {
print "$a,";
}
The above program outputs "123,456,". To abort a generator sequence, use "yield
break".
function bar() {
$a = 123;
// this will stop the "foreach" immediately without any value returned
if ($abort) yield break;
yield $a;
$a = 456;
yield $a;
}
Generators must observe the following restrictions:
- Generators are <b>not recursive</b>. In the above example, foo() cannot call
foo() while iterating.
- Generators are <b>called once</b>: foo() cannot be called again after it's
done iterating.
- Do not call the rewind() method of the objects (of class Iterator) returned by
iterator functions.
Also, yield in HipHop also supports passing a value from outside of the
generator.
function foo() {
$a = yield 5;
yield $a + 1;
}
From outside the generator, instead of resuming the generator with
Generator::next(), one can call Generator::send() to pass a value, which
will be assigned to $a, back into the generator.
Note that the yield expression in the above example is not really an expression;
it can only appear on its own on the RHS of an assignment statement. This is to
avoid the complicated evaluation order problem in bizarre expressions like
"($a = yield 5) * (yield $a + 3) - ($a = yield 4)". |
|
hhvm/hphp/doc/fuzzer | The fuzzer is an automating testing tool for exposing holes in the HHVM verifier
by generating random .hhas files from existing well-formed inputs. Specifics of
how the fuzzer works can be found at https://fburl.com/gc516ctu.
Currently the fuzzer supports eight kinds of mutations to HHAS programs:
* Changing instruction immediates
* Duplicating instructions
* Deleting instructions
* Reordering sequences of instructions
* Replacing instructions with other random instructions
* Inserting new instructions
* Generating random metadata
* Adding random exception regions
The Fuzzer is a dependency of hh_single_compile, but can also be compiled
standalone by pointing buck to the TARGETS file in the fuzzer directory.
The OCaml base of the tool can be run by itself with
buck-out/bin/hphp/vm/runtime/verifier/fuzzer/fuzzer/fuzzer.opt
and can be built with
buck build @//mode/<MODE> //hphp/runtime/vm/verifier/fuzzer:fuzzer
but this is unlikely to be useful to a user. The Python script is much more
useful for testing purposes.
In either case, both the OCaml base and the Python script are highly
configurable with command-line arguments.
The OCaml base tool requires only an input file, which is passed without an
argument flag. The rest of the options are:
* -out (string): the output directory for the generated mutations. By default
the mutations are printed to stdout, but this is almost never what you want.
If a directory is specified, the mutations are saved as
<dirname>/mutations<N>.hhas, where <N> denotes the number of the mutations.
* -o (string): alias for the above.
* -prob (float, <=1): The probability of a mutation occurring at each phase,
with a default value of 0.1 (10%). The higher this is, the more likely a
mutation is to occur at each step.
* -magnitude (int): The maximum change to an integer value that can occur as
part of a single mutation, with a default value of 1.
* -immediate (int): The number of immediate mutations to be performed, with a
default value of 1.
* -duplicate (int): The number of duplication mutations to be performed, with a
default value of 1.
* -remove (int): The number of removal mutations to be performed, with a default
value of 1.
* -insert (int): The number of insertion mutations to be performed, with a
default value of 1.
* -reorder (int): The number of reorder mutations to be performed, with a
default value of 1.
* -exception (int): The number of exception region mutations to be performed,
with a default value of 1.
* -metadata (int): The number of metadata mutations to be performed, with a
default value of 1.
* -complete (int): A shorthand way of setting all the mutation numbers to a
single value. This defaults to 0, but when it is N, each mutation will be
performed N times.
----
The Python script can be run with
python3 hphp/runtime/vm/verifier/fuzzer/fuzz.py
The script requires an initial input file (passed with -i) . The rest of the
options are:
* -g (int): The number of generations to run the Fuzzer for, with a default
value of 1. The runtime of the tool increases exponentially with this value.
* -f (int): The failure threshold for generational pruning, with a default
value of 1. All files with more verification errors than this threshold will
be eliminated at the end of each generation. Higher failure thresholds allow
more files to continue to the next generation, increasing the possibility of
discovering a bug but exponentially increasing the runtime of the tool.
* -p (float, <=1): The probability of a mutation occurring at each step, passed
directly to the OCaml code. This defaults to 0.05. Increasing this will
cause the Fuzzer to generate more trivially broken files, decreasing the
probability of finding a bug but also decreasing the runtime of the tool.
* -v: The flag for verbose mode, default off
* -l (string): The path to the logfile. If verbose mode is active, the tool
will log to this file. If verbose mode is on but no logfile is provided,
the information is printed to stdout.
* -t (int): The number of threads to use when running the Fuzzer. The default
is to run with one thread
* -h: Help flag. Prints the usage text for the script.
* -c: The flag for coverage mode. When turned on, the tool will use coverage
data from HHVM to decide which files to prune after each generation. Turning
this on will drastically increase the amount of time each generation will
take, since collecting coverage data requires actually running HHVM rather
than simply running the verifier. However, over a large number of
generations this will actually decrease the runtime of the tool since
an increasing number of files will be pruned each generation.
* --args (string): Used to pass arguments directly to the OCaml Fuzzer.
The output of the Python tool is stored in the mutations directory, which is
created as a subdirectory of the directory where you ran the script. A summary
of the results of the run, including a list of generated files that successfully
verified but crashed HHVM, is found in mutations/results.txt. |
|
Perl | hhvm/hphp/doc/generate-ir-opcodes.pl | #!/usr/bin/perl -w
use strict;
my $buffer = '';
my $output_buffer = '';
sub process_buffer {
$buffer =~ s/^\s*//g;
$buffer =~ s/\s+/ /g;
$buffer =~ s/^\s*([a-zA-Z0-9]+)<[^>]+>/$1/g;
$output_buffer .= 'O(';
$output_buffer .= $buffer.") \\\n";
$buffer = '';
}
while (<>) {
if ($buffer && !/^\|(.*)$/) {
process_buffer;
next;
}
if (/^\|(.*)/) {
$buffer .= $1;
}
}
######################################################################
# Print but insert some space so opcode names show up in a clear
# column.
sub print_pretty {
my @lines = split /\n/, $output_buffer;
my $max = 0;
foreach (@lines) {
if (/^(O[^,]*)/) {
$max = length $1 if length $1 > $max;
}
}
foreach (sort @lines) {
if (/^(O[^,]*)/) {
my $op = $1;
print $op . ", ";
s/^(O[^,]*),//;
print ' ' x ($max - length $op);
}
print;
print "\n";
}
}
print "// \@".
"generated\n";
print "#define IR_OPCODES \\\n";
print "\\\n";
print_pretty;
print "/**/\n"; |
Shell Script | hhvm/hphp/doc/generate-ir-opcodes.sh | #!/bin/bash
function die {
echo $1 1>&2
exit 1
}
# This fallback is for the cmake build, which won't have an INSTALL_DIR
# environment variable, and runs this from the runtime subdir.
if [ x"$INSTALL_DIR" = x"" ] ; then
cd "$(dirname "$(which "$0")")" || die "Can't find script dir for $0" 1>&2
INSTALL_DIR="$(pwd)/../runtime"
fi
SCRIPT=generate-ir-opcodes.pl
SRC=ir.specification
OUTPUT=$INSTALL_DIR/ir-opcode-generated.h
perl $SCRIPT $SRC > $OUTPUT |
hhvm/hphp/doc/hdf | <h2>Hierarchical Data Format (HDF)</h2>
Please use bin/hdf.el for syntax coloring that can help identify syntax errors.
1. Basic format
[node] = [value]
Where, [node] can be an alphanumeric name, and [value] can be
- booleans: true, false, on, off, yes, no, 1, 0
- numbers
- strings: without any quoting
2. Hierarchies
[node] {
[subnode1] = [value]
[subnode2] = [value]
}
[node] {
[subnode] = [value]
[subnode] {
[subsubnode1] = [value]
[subsubnode2] = [value]
}
}
3. Dotted node names
[node] {
[subnode] = [value1]
[subnode] {
[subsubnode] = [value2]
}
}
is the same as,
[node].[subnode] = [value1]
[node].[subnode].[subsubnode] = [value2]
These dotted node names and paths can appear anywhere a node can be at.
4. Arrays
Use '*' for automatically generated node names that you don't care
[node] {
* = [value1]
* = [value2]
}
This is fine, too, except it's harder to maintain if one needs to add/delete:
[node] {
0 = [value1]
1 = [value2]
}
5. Node alias
[node] : [another]
Watch out, this makes those two nodes symbolic linking to each other, so this
will modify [another] as well:
[node] : [another]
[node] {
extra = value
}
6. Node copying
To avoid the above accidental modification when aliasing a node, one can do,
[node] := [another]
[node] {
extra = value
}
Now, [node] is a different node than [another] and the modification doesn't
affect [another]
7. Node inheritance
[node] {
@[another]
extra = value
}
is the same as
[node] := [another]
[node] {
extra = value
}
Sometimes it's easier and clearer to write node copying in inheritance format:
[node] {
@[another1]
@[another2]
extra = value
}
8. Special shell commands
[node] != [command]
This will execute shell command and use its return as the node's value.
9. Include statement
#include "another.hdf"
10. Comments
# only one format of comment is supported
# it has to start from line beginning
Watch out, this is NOT comment:
[node] = [value] # this will become part of node's value
11. Multiple-line strings
[node] << EOM
1st line
2nd line
...
EOM |
|
hhvm/hphp/doc/hhprof | HHVM's HHProf feature provides memory heap profiling based on optional jemalloc
functionality. For HHProf to function, jemalloc must be built with
--enable-prof, and HHVM must be built with USE_JEMALLOC and ENABLE_HHPROF.
HHProf configuration is controlled in part by the following boolean runtime
configuration options, all of which default to false:
* HHProf.Enabled: If true, enable admin server commands:
/hhprof/start: start profiling
requestType "all" or "next"*
url profile next matching url for "next"
lgSample lg sample rate
profileType "current"* or "cumulative"
/hhprof/status: configuration and current dump status
/hhprof/stop: stop profiling
/pprof/cmdline: program command line
/pprof/heap: heap dump
/pprof/symbol: symbol lookup
retain regex for symbols client cares about
exclude regex for symbols client does not care about
The /hhprof/* endpoints are intended for direct use, whereas the /pprof/*
endpoints support jeprof (included with jemalloc).
* HHProf.Active: If true, activate allocation sampling during startup. This is
useful if the usage goal is to gain an application-global understanding of
memory usage; for request-based profiling, activation/deactivation is
controlled via other means.
* HHProf.Accum: If true, enable cumulative sample statistics in addition to
current sample statistics. Beware that metadata overhead is proportional to
the number of unique backtraces associated with sampled allocations, and with
cumulative statistics enabled, no samples are ever discarded. This means that
if the application's call graph is complex, a combinatorial explosion of
potential backtraces can cause unbounded metadata growth.
* HHProf.Request: If true, enable support for per request profiling. This
causes selected requests to allocate memory such that PHP-level allocations
are independently allocated, thus making their effects visible in heap
profiles.
Following are some common use case examples:
* Per request profiling
Start hhvm as such:
hhvm -m server -vHHProf.Enabled=true -vHHProf.Request=true
Profile the next request:
curl -s 'example.com:8088/hhprof/start?requestType=next&lgSample=12'
Profile the next request to a particular endpoint:
curl -s 'example.com:8088/hhprof/start?requestType=next&url=home.php&lgSample=12'
Note that the default sample rate (on average once per 512 KiB) is too low to
capture meaningful sample sets unless the allocation volume is sufficiently
high. Therefore the above examples set the rate to average one sample per 4
KiB, which is a reasonable setting for most purposes, though the higher the
sample rate, the higher the computational and memory overhead.
Download the resulting profile using jeprof, symbolizing only PHP functions:
jeprof --raw --retain='^PHP::' 'example.com:8088/pprof/heap' > hhprof.raw
Generate a call graph, focusing only on PHP functions:
jeprof --pdf --retain='^PHP::' hhprof.raw > hhprof.pdf
* Whole server profiling
Start hhvm as such:
hhvm -m server -vHHProf.Enabled=true -vHHProf.Active=true
Download a profile for currently allocated memory:
jeprof --raw 'example.com:8088/pprof/heap' > hhprof.raw
Generate a call graph:
jeprof --pdf hhprof.raw > hhprof.pdf |
|
Markdown | hhvm/hphp/doc/ini.md | # Using ini
ini usage in HHVM is fairly similar to that of php-src, albeit currently with
some limitations and enhancements. For example, HHVM currently doesn't support
per-dir ini settings (support coming soon), but it does support vector-based
settings and wildcards for copying and symlinking values from other settings.
## Common usage
Here is the typical format of an ini file (values not necessarily realistic):
```
hhvm.hot_func_count = 11
hhvm.stats.slot_duration = 11
hhvm.env_variables["MYBOOL"] = true
hhvm.env_variables["MYINT"] = 5
hhvm.env_variables["MYSTR"] = "Custom String"
hhvm.env_variables["ANOTHERSTR"] = "Another String"
hhvm.server_variables[] = "Key will be next available int"
hhvm.server_variables[] = "Value will be next available string"
hhvm.error_handling.notice_frequency = 1
hhvm.error_handling.warning_frequency = 1
hhvm.enable_obj_destruct_call = true
hhvm.enable_xhp = true
```
## Copying and Symlinking Settings
**NOTE**: This feature only currently work with core system settings. They
don't yet work with extensions, `ini_set()`, etc.
You can also provide wildcards to settings signaling that you want to use the
value of another setting for its value.
* `@`: Copy the value directly into this setting.
* `:`: Symlink the value from the other setting to this setting. If the other
setting changes, then this setting will change with it, and vice-versa.
To use this feature, use the form
```
hhvm.setting[any-sub-key | @ | :][...] = value | "hhvm.substitution-setting"
```
e.g.,
```
hhvm.a = 3
hhvm.b[@] = "hhvm.a"
hhvm.c[d][@] = "hhvm.a"
```
Here is a more complete example:
```
hhvm.hot_func_count = 11
hhvm.stats.slot_duration[@] = "hhvm.hot_func_count"
hhvm.server.allowed_exec_cmds[0] = "ls"
hhvm.server.allowed_exec_cmds[1][@]= "hhvm.env_variables[MYSTR]"
hhvm.server.allowed_exec_cmds[2][@]= "hhvm.env_variables[ANOTHERSTR]"
hhvm.env_variables["MYBOOL"] = true
hhvm.env_variables["MYINT"][:] = "hhvm.hot_func_count"
hhvm.env_variables["MYSTR"] = "Custom String"
hhvm.env_variables["ANOTHERSTR"] = "Another String"
hhvm.server_variables[0] = "Key will be next available int"
hhvm.server_variables[1][@] = "hhvm.server.allowed_exec_cmds[0]"
hhvm.error_handling.notice_frequency = 1
hhvm.error_handling.warning_frequency[:] = "hhvm.error_handling.notice_frequency"
hhvm.enable_obj_destruct_call = true
hhvm.enable_xhp[@]= "hhvm.enable_obj_destruct_call"
```
**NOTE**: If you using this feature with vector or map based settings where you
can specify `[]` to indicate an in-order increment of a setting, you must
specify explicit indices for them because they will be used to determine which values to be used when copying or symlinking. |
hhvm/hphp/doc/ir.specification | *******************************************
* HipHop Intermediate Representation (HHIR)
*******************************************
Introduction
------------
The HipHop Intermediate Representation (IR) is a typed, in-memory,
static-single-assignment, intermediate-level representation of HHBC programs
used for just in time compilation, with these goals:
1. Complete. The IR represents a program or program fragment entirely,
without reference to HHBC or other upstream forms of the program.
2. Type-Safe. Since the IR deals directly with refined types and internal VM
types, all operations are typesafe. All instruction parameters have a
parameter type P, and all variables have a type S. Given an instruction
with source parameter type P and variable type S, S must be equal to or
more refined than P (S == P or S <: P).
3. Machine Independent. Since this IR is intended to be used in a JIT
compiler, it will always be used in a machine specific context.
Nevertheless, we rely on machine independence in order to separate
concerns and increase portability of the VM. Passes which manipulate IR
based on PHP or HHBC semantics should be portable. Passes which deal with
machine specifics (such as register allocation) should be done in the
lower level IR (vasm). Types are machine independent.
The unit of compilation is the IRUnit, which is a collection of Blocks
containing IRInstructions that produce and consume SSATmp values. Blocks are
single-entry, single-exit sequences of instructions (i.e. basic
blocks). Instructions may be annotated with Type parameter which modifies the
instruction's behavior, or with additional compile-time constant data (see
extra-data.h). Each SSATmp has a Type which describes the set of values it may
hold, over its entire live range. Instructions may have side effects, which
occur in execution order.
The static single assignment form guarantees the following two invariants for a
well-formed compilation unit:
1. Each SSATmp is assigned to by exactly one IRInstruction.
2. Definitions dominate uses. Every path to an IRInstruction using an SSATmp
first executes the IRInstruction defining the SSATmp.
Any pass that generates or manipulates IR must preserve these invariants,
however it is possible and expected for the invariants to be temporarily broken
during IR generation or during an optimization pass.
Control Flow
------------
IRUnits have one entry block, zero or more exit blocks, and zero or more catch
blocks. Exit blocks leave the compilation unit in the middle of the same PHP
function using one of several instructions that exit a compilation unit
(e.g. ReqBindJmp). Catch blocks are blocks that are reachable from exceptional
control flow edges, and are executed during unwinding if an exception
propagates through the instruction that had it as a `taken' edge.
No SSATmps are defined on entry to the main Block.
Blocks which are join points may start with a DefLabel with destination
SSATmps. In that case, each predecessor must be a Jmp passing a matching number
of sources. In this case the Jmp acts as a tail-call, passing arguments the
same way a plain call would.
Together, the sources of the Jmp instructions and the destinations of the
DefLabel instructions act as traditional SSA Phi pseudo-functions; The type of
the DefLabel's destination is the type-union of the corresponding sources.
Because the Jmp sources are at the ends of blocks, they do not violate the SSA
dominator rule (rule 2, above).
Types
-----
For an overview of the HHIR type system, see the "Type System" section in
hackers-guide/jit-core.md.
SSATmps
-------
An SSATmp represents a virtual register. Since HHIR uses SSA, an SSATmp may
only be assigned to by one instruction. The type of an SSATmp represents the
set of values it may hold at the point it is defined, which is invariant over
the lifetime of the variable (from the definition point to the last use).
IRInstructions
--------------
An instruction is an executable operation with zero or more inputs (sources),
zero or one result (destination), and possible side effects such as accessing
memory, doing I/O, and which may branch or throw an exception. Some
instructions have a Type parameter which modifies its behavior, or other "extra
data" in an arbitrary C++ struct (see extra-data.h).
Each instruction has a signature which describes its effect, parameter types,
and return type, for example:
IsType<T>, D(Bool), S(Cell), NF
The first column is the instruction name (and optional Type parameter in <>).
The second column describes the result (destination) using one of the D*
macros documented in hphp/runtime/vm/jit/ir-opcode.h, or ND for no destination.
The third column describes the sources, separated by whitespace, using macros
documented in hphp/runtime/vm/jit/ir-opcode.h, or NA if there are no sources.
The fourth column contains the flags, described below. The short name of the
flag (used in this file) is given first, with the long name that it expands to
in hphp/runtime/vm/jit/ir-opcode.cpp in parentheses after it.
NF
The instruction has no flags.
PRc (ProducesRC)
The instruction produces a value with an unconsumed reference that must be
consumed, either by DecRefing it or storing it somewhere in memory.
CRc (ConsumesRC)
The instruction consumes a reference to one or more of its sources, either by
decreasing its refcount or storing the reference to memory.
T (Terminal)
The instruction has no next instruction; it either jumps, returns, or throws.
B (Branch)
The instruction has a (sometimes optional) taken edge. Instructions that are
conditional branches (i.e. a Branch that is not Terminal) will also have a
next edge.
P (Passthrough)
The value of the instruction's dest is the same as one of its inputs; it
differs only in the type of the variable, or some other property that doesn't
affect the value of the variable itself.
LA (Layout-Agnostic)
The instruction is generic over array-like inputs and outputs. Most ops that
deal with array-like types can only handle their default ("Vanilla") layouts.
We whitelist those ops that are generic over layout.
LP (Layout-preserving)
The instruction is closed under vanilla array layouts: if the first argument
is vanilla, so too is the destination. The first argument must be an
array-like type. Layout-preserving implies layout-agnostic.
Instruction set
---------------
1. Checks and Asserts
| CheckType<T>, DRefineS(0), S(Cell), B|P
Check that the type of the src S0 is T, and if so copy it to D, and
fallthrough. If S0 cannot be proven to be T, branch to block B. Note that
this means S0 still /may/ be a subtype of T in block B in some circumstances.
Specifically, subtypes of Type::Static may not be checked precisely,
depending on the type of the source. This means the instruction may take the
branch in some circumstances even when S0 is a subtype of T, if T has a
non-empty intersection with Type::Static.
Also note that many types are not supported as the typeParam right now.
| CheckNullptr, ND, S(Cls|StaticStr|Nullptr), B
If S0 is not a null pointer, branch to block B. This is used to check the
return value of a native helper that returns a potentially null StringData*.
| AssertType<T>, DRefineS(0), S(Cell,Mem), P
Assert that the type of S0 is T, copying it to D.
| CheckTypeMem<T>, ND, S(Mem), B
If the value pointed to by S0 is not type T, branch to the block B.
| CheckIter<iterId,iterType>, ND, S(FramePtr), B
Check that specialization type of the given iterator `iterId` on the frame S0
is `iterType`; if it is not, branch to block B.
| CheckLoc<T,localId>, ND, S(FramePtr), B
Check that type of the given localId on the frame S0 is T; if not, branch to
block B.
| CheckStk<T,offset>, ND, S(StkPtr), B
Check that the type of the cell on the stack pointed to by S0 at offset (in
cells) is T; if not, branch to block B.
| CheckMBase<T>, ND, S(Lval), B
Check that the value pointed to by the member base register S0 has type T; if
not, branch to block B. This is functionally the same as CheckTypeMem.
| AssertLoc<T,localId>, ND, S(FramePtr), NF
Asserts that type of the supplied local on the frame S0 is T. This is used
for local type information, and is similar to CheckLoc except it doesn't
imply a runtime check (the assertion must've already been proven to be true)
and cannot cause control flow.
| AssertStk<T,offset>, ND, S(StkPtr), NF
Assert that stack element at `offset' (in cells) from S0 has type T. This is
similar to a CheckStk except that it does not imply a runtime check and
cannot cause control flow.
| AssertMBase<T>, ND, NA, NF
Assert that the value pointed to by the member base register has type T.
This is similar to a CheckMBase except that it does not imply a runtime check
and cannot cause control flow.
| CheckInit, ND, S(Cell), B
If S0's type is Uninit, branch to block B.
| CheckInitMem, ND, S(Mem), B
If the value pointed to by S0 has type Uninit, branch to block B.
| CheckCold<TransID>, ND, NA, B
Check if the counter associated with translation TransID is cold (i.e. within
a fixed threshold). If it's not (i.e. such translation has reached the
"hotness threshold"), then branch to block B.
| EndGuards, ND, NA, NF
A no-op at runtime, this instruction serves to mark the end of the initial
sequence of guards in a trace.
| CheckNonNull, DSubtract(0, Nullptr), SNullptr(Cls,Func,Obj,Str,Mem,TCA,Dict,Vec), B|P|LA
If the value in S0 is Nullptr, branch to block B. If S0 cannot be Nullptr, or
always is Nullptr, this check may be optimized away.
| AssertNonNull, DSubtract(0, Nullptr), SNullptr(Func,StaticStr), P
Returns S0, with Nullptr removed from its type. This instruction currently
supports a very limited range of types but can be expanded if needed.
| CheckSmashableClass, ND, S(Smashable) S(Cls), B
If the lower 32 bits of S0 does not match class pointer S1, branch to block B.
2. Arithmetic
| AddInt, D(Int), S(Int) S(Int), NF
| AddOffset, D(VoidPtr), S(VoidPtr) C(Int), NF
| SubInt, D(Int), S(Int) S(Int), NF
| MulInt, D(Int), S(Int) S(Int), NF
| MulIntO, D(Int), S(Int) S(Int), B
| AddDbl, D(Dbl), S(Dbl) S(Dbl), NF
| SubDbl, D(Dbl), S(Dbl) S(Dbl), NF
| MulDbl, D(Dbl), S(Dbl) S(Dbl), NF
| DivDbl, D(Dbl), S(Dbl) S(Dbl), NF
| DivInt, D(Int), S(Int) S(Int), NF
| Floor, D(Dbl), S(Dbl), NF
| Ceil, D(Dbl), S(Dbl), NF
| AbsDbl, D(Dbl), S(Dbl), NF
| Sqrt, D(Dbl), S(Dbl), NF
| AndInt, D(Int), S(Int) S(Int), NF
| OrInt, D(Int), S(Int) S(Int), NF
| XorInt, D(Int), S(Int) S(Int), NF
| Shl, D(Int), S(Int) S(Int), NF
| Shr, D(Int), S(Int) S(Int), NF
| Lshr, D(Int), S(Int) S(Int), NF
Double arithmetic, integer arithmetic, and integer bitwise operations.
Performs the operation described by the opcode name on S0 and S1, and puts
the result in D.
Undefined behavior occurs if Mod is given a divisor of zero, or if the
divisor is -1 and the dividend is the minimum representable integer.
AbsDbl computes the absolute value of a double-precision value.
DivDbl conforms to IEEE 754. In particular, division by zero returns +/- INF
or NAN depending on the dividend; and should the result of a division be zero
the sign will follow the normal sign rules for division.
DivInt will perform integer division of S1 by S0. S0 should not be zero and
must divide S1.
Note that Shr is an arithmetic right shift: The MSB is sign-extended.
Lshr is logical right shift.
Floor and Ceil will return an integral value not greater, or not less
than their input respectively. Their use requires SSE 4.1, availability
should be checked before they are emitted.
MulIntO performs integer arithmetic on S0 and S1, but will branch to
block B on integer overflow.
| XorBool, D(Bool), S(Bool) S(Bool), NF
Logical XOR of the two sources. (Note that && and || do not have
corresponding opcodes because they're handled at the bytecode level, to
implement short-circuiting.)
| Mod, D(Int), S(Int) S(Int), NF
Compute S0 mod S1. If S1 is -1 or 0 the results are undefined.
3. Type conversions
To vec conversions:
| ConvArrLikeToVec, D(Vec), S(ArrLike), PRc|CRc|LP
| ConvObjToVec, D(Vec), S(Obj), PRc|CRc
To dict conversions:
| ConvArrLikeToDict, D(Dict), S(ArrLike), PRc|CRc|LP
| ConvObjToDict, D(Dict), S(Obj), PRc|CRc
To keyset conversions:
| ConvArrLikeToKeyset, D(Keyset), S(ArrLike), PRc|CRc|LP
| ConvObjToKeyset, D(Keyset), S(Obj), PRc|CRc
To bool conversions:
| ConvDblToBool, D(Bool), S(Dbl), NF
| ConvIntToBool, D(Bool), S(Int), NF
| ConvStrToBool, D(Bool), S(Str), NF
| ConvObjToBool, D(Bool), S(Obj), NF
| ConvTVToBool, D(Bool), S(Cell), NF
To double conversions:
| ConvBoolToDbl, D(Dbl), S(Bool), NF
| ConvIntToDbl, D(Dbl), S(Int), NF
| ConvObjToDbl, D(Dbl), S(Obj), NF
| ConvStrToDbl, D(Dbl), S(Str), NF
| ConvResToDbl, D(Dbl), S(Res), NF
| ConvTVToDbl, D(Dbl), S(Cell), NF
To int conversions:
| ConvBoolToInt, D(Int), S(Bool), NF
| ConvDblToInt, D(Int), S(Dbl), NF
| ConvObjToInt, D(Int), S(Obj), NF
| ConvStrToInt, D(Int), S(Str), NF
| ConvResToInt, D(Int), S(Res), NF
| ConvTVToInt, D(Int), S(Cell), NF
To string conversions:
| ConvDblToStr, D(Str), S(Dbl), PRc
| ConvIntToStr, D(Str), S(Int), PRc
| ConvObjToStr, D(Str), S(Obj), PRc
| ConvTVToStr, D(Str), S(Cell), PRc
All the above opcodes convert S0 from its current type to the destination
type, according to the PHP semantics of such a conversion.
| DblAsBits, D(Int), S(Dbl), NF
Reinterpret a double as an integer with the same bit pattern.
| OrdStr, D(Int), S(Str), NF
Convert the first byte in a string to an unsigned integer.
Intended as an optimization for ord($str)
| OrdStrIdx, D(Int), S(Str) S(Int), NF
Convert the character at position S1 in base string S0 to an unsigned
integer. Raises a notice if the position is out of bounds.
Intended as an optimization for ord($str[$idx]).
| ChrInt, D(StaticStr), S(Int), NF
Convert the integer S0 to a the one character string with ascii code
S0 & 255.
| StrictlyIntegerConv, D(Str|Int), S(Str), PRc
If S0 is a string representing an integer value (same criteria as array key
conversion), return that value as an integer. Otherwise return S0.
| ConvPtrToLval, DLvalOfPtr, S(Ptr), NF
Convert S0 to an equivalent lval.
| VoidPtrAsDataType<T>, DParam(Cell), S(VoidPtr), NF
Take VoidPtr S0 and convert it to heap type TParam.
4. Boolean predicates
| GtInt, D(Bool), S(Int) S(Int), NF
| GteInt, D(Bool), S(Int) S(Int), NF
| LtInt, D(Bool), S(Int) S(Int), NF
| LteInt, D(Bool), S(Int) S(Int), NF
| EqInt, D(Bool), S(Int) S(Int), NF
| NeqInt, D(Bool), S(Int) S(Int), NF
| CmpInt, D(Int), S(Int) S(Int), NF
Perform 64-bit integer comparisons.
| GtDbl, D(Bool), S(Dbl) S(Dbl), NF
| GteDbl, D(Bool), S(Dbl) S(Dbl), NF
| LtDbl, D(Bool), S(Dbl) S(Dbl), NF
| LteDbl, D(Bool), S(Dbl) S(Dbl), NF
| EqDbl, D(Bool), S(Dbl) S(Dbl), NF
| NeqDbl, D(Bool), S(Dbl) S(Dbl), NF
| CmpDbl, D(Int), S(Dbl) S(Dbl), NF
Perform comparisons of doubles. Comparisons that are unordered according to
IEEE 754 (such as when at least one operand is NaN) result in false.
| GtStr, D(Bool), S(Str) S(Str), NF
| GteStr, D(Bool), S(Str) S(Str), NF
| LtStr, D(Bool), S(Str) S(Str), NF
| LteStr, D(Bool), S(Str) S(Str), NF
| EqStr, D(Bool), S(Str) S(Str), NF
| NeqStr, D(Bool), S(Str) S(Str), NF
| SameStr, D(Bool), S(Str) S(Str), NF
| NSameStr, D(Bool), S(Str) S(Str), NF
| CmpStr, D(Int), S(Str) S(Str), NF
Performs comparison of strings
| GtBool, D(Bool), S(Bool) S(Bool), NF
| GteBool, D(Bool), S(Bool) S(Bool), NF
| LtBool, D(Bool), S(Bool) S(Bool), NF
| LteBool, D(Bool), S(Bool) S(Bool), NF
| EqBool, D(Bool), S(Bool) S(Bool), NF
| NeqBool, D(Bool), S(Bool) S(Bool), NF
| CmpBool, D(Int), S(Bool) S(Bool), NF
Performs comparison of booleans.
| GtObj, D(Bool), S(Obj) S(Obj), NF
| GteObj, D(Bool), S(Obj) S(Obj), NF
| LtObj, D(Bool), S(Obj) S(Obj), NF
| LteObj, D(Bool), S(Obj) S(Obj), NF
| EqObj, D(Bool), S(Obj) S(Obj), NF
| NeqObj, D(Bool), S(Obj) S(Obj), NF
| SameObj, D(Bool), S(Obj) S(Obj), NF
| NSameObj, D(Bool), S(Obj) S(Obj), NF
| CmpObj, D(Int), S(Obj) S(Obj), NF
Perform comparison of object. All versions except for SameObj and NSameObj may
re-enter the VM and therefore may throw exceptions. SameObj and NSameObj never
re-enter or throw.
| GtArrLike, D(Bool), S(ArrLike) S(ArrLike), NF|LA
| GteArrLike, D(Bool), S(ArrLike) S(ArrLike), NF|LA
| LtArrLike, D(Bool), S(ArrLike) S(ArrLike), NF|LA
| LteArrLike, D(Bool), S(ArrLike) S(ArrLike), NF|LA
| EqArrLike, D(Bool), S(ArrLike) S(ArrLike), NF|LA
| NeqArrLike, D(Bool), S(ArrLike) S(ArrLike), NF|LA
| SameArrLike, D(Bool), S(ArrLike) S(ArrLike), NF|LA
| NSameArrLike, D(Bool), S(ArrLike) S(ArrLike), NF|LA
| CmpArrLike, D(Int), S(ArrLike) S(ArrLike), NF|LA
Perform comparison of array-likes. All versions except for SameArrLike and
NSameArrLike may re-enter the VM and therefore may throw exceptions.
SameArrLike and NSameArrLike never re-enter or throw. Relational comparisons
for dicts and keysets are not supported. As keysets only contain ints and
strings, comparisons never re-enter or throw.
| GtRes, D(Bool), S(Res) S(Res), NF
| GteRes, D(Bool), S(Res) S(Res), NF
| LtRes, D(Bool), S(Res) S(Res), NF
| LteRes, D(Bool), S(Res) S(Res), NF
| EqRes, D(Bool), S(Res) S(Res), NF
| NeqRes, D(Bool), S(Res) S(Res), NF
| CmpRes, D(Int), S(Res) S(Res), NF
Perform comparison of resources using PHP semantics. Resource comparisons
never re-enter or throw.
| EqCls, D(Bool), S(Cls) S(Cls), NF
Checks if two Class values are equal.
| EqLazyCls, D(Bool), S(LazyCls) S(LazyCls), NF
Checks if two Lazy class values are equal.
| EqFunc, D(Bool), S(Func) S(Func), NF
Checks if two Func values are equal.
| EqStrPtr, D(Bool), S(Str) S(Str), NF
Checks if two string values represent the same underlying string. That is,
that they point at the same underlying storage.
| EqArrayDataPtr, D(Bool), S(ArrLike) S(ArrLike), LA
Checks if the two arguments represent the same underlying ArrayData. That is,
that they point at the same underlying storage.
| ProfileInstanceCheck, ND, C(StaticStr), NF
Profile that S0 has been used as the RHS of an instance check.
| InstanceOf, D(Bool), S(Cls) S(Cls|Nullptr), NF
Sets D based on whether S0 is a descendant of the class, interface, or trait
in S1. (Note that this is always false for a trait). S1 may be null at
runtime if the class is not defined.
| InstanceOfIface, D(Bool), S(Cls) CStr, NF
Fast path for interface checks. Sets D based on whether S0 implements S1, but
S1 must be a unique interface. This should only be used in repo-authoritative
mode.
| InstanceOfIfaceVtable<iface,canOptimize>, D(Bool), S(Cls), NF
Faster path for interface checks. Sets D based on whether S0 implements
iface, which must be a unique interface with an assigned vtable slot. In
some circumstances, this instruction is ensuring the presence of the
vtableVec; in those cases, canOptimize is false to avoid eliminating the
guard.
| ExtendsClass<cls,strictLikely>, D(Bool), S(Cls), NF
A fast-path for instanceof checks. Sets D based on whether S0 is a descendant
of cls, where cls must be a unique class that is not an interface or a trait.
If strictLikely is true, optimize for the case where S0 is not equal to S1.
| InstanceOfBitmask, D(Bool), S(Cls) CStr, NF
| NInstanceOfBitmask, D(Bool), S(Cls) CStr, NF
A fast-path for instanceof checks. Sets D based on whether S0 is a descendant
of the class named by S1, where S1 must have a bit allocated for it in the
fast instance check bitvector (see class.h).
| InterfaceSupportsArrLike, D(Bool), S(Str), NF
| InterfaceSupportsStr, D(Bool), S(Str), NF
| InterfaceSupportsInt, D(Bool), S(Str), NF
| InterfaceSupportsDbl, D(Bool), S(Str), NF
Returns whether t instanceof S0 returns true when t is of the given type.
| ResolveTypeStruct<class,suppress,offset,size,isOrAsOp>,
| D(Dict), S(StkPtr) S(Cls|Nullptr), LA
Applies class/alias resolution on the type structure that is at the stack
offset given by S0 and offset. If size > 1, combine the type structures on
the stack into the first one's denoted holes. Returns a copy.
S1 is the calling class, used to resolve the this typehint.
If isOrAsOp is set, raises an error if S0 contains traits, function types or
typevars.
If there is an error during type structure resolution, this instruction raises
an error. If suppress is set, this error is demoted to a warning.
| IsTypeStruct<handle>, D(Bool), S(Dict) S(Cell), LA
Returns whether S1 matches the type structure of a defined type in S0 and S1
is a subtype of S0. The input type structure (S0) must be resolved.
Handle is used for caching purposes.
| IsTypeStructCached, D(Bool), S(Dict) S(Cell), B|LA
Checks if S0 is cached in TSClassCache and if so, returns whehter S1 is a
subtype of S0. Otherwise, it branches.
| ProfileIsTypeStruct<handle>, ND, S(Dict), LA|ND
Profile S0 to determine whether S0 is a type structure holding a reference to
a Class*.
| ThrowAsTypeStructException, ND, S(Dict) S(Cell), LA|T
Throws an exception indicating why S1 does not match the type structure of a
defined type in S0 or why S1 is not a subtype of S0. The input type structure
(S0) must be resolved.
| RaiseErrorOnInvalidIsAsExpressionType, D(Dict), S(Dict), LA
Raises an error if the type hint for is/as expression contains an invalid
type such as callables, erased type variables and trait type hints.
The input type structure (S0) must be resolved.
| ProfileCoeffectFunParam<handle>, ND, S(Cell), ND
Profile S0 to determine which code paths to emit for coeffect fun param.
| HasToString, D(Bool), S(Obj), NF
Returns whether the object S0 has a toString method.
| IsType<T>, D(Bool), S(Cell), NF
Sets D to true iff S0 holds a value that is of type T. T must not be a
specialized type.
| IsNType<T>, D(Bool), S(Cell), NF
Sets D to true iff S0 holds a value that is not of type T. T must not be a
specialized type.
| IsTypeMem<T>, D(Bool), S(Mem), NF
Sets D to true iff the value referenced by S0 is of type T. T must not be a
specialized type.
The value in S0 must not be a pointer into the evaluation stack or frame
locals.
| IsNTypeMem<T>, D(Bool), S(Mem), NF
Sets D to true iff the value referenced by S0 is not of type T. T must not be
a specialized type.
| IsWaitHandle, D(Bool), S(Obj), NF
Sets D to true iff S0 is a subclass of WaitHandle.
| IsCol, D(Bool), S(Obj), NF
Sets D to true iff S0 is a collection.
| FuncHasReifiedGenerics, D(Bool), S(Func), NF
Set D to true iff S0 is a reified function.
| ClassHasReifiedGenerics, D(Bool), S(Cls), NF
Set D to true iff S0 is a reified class.
| GetClsRGProp, D(Vec), S(Cls) S(Obj), PRc
Get the reified generics property for object S1, using the
index of this property as stored in class S0.
| HasReifiedParent, D(Bool), S(Cls), NF
Set D to true iff S0 has a reified parent.
| CallViolatesModuleBoundary<caller>, D(Bool), S(Func,Cls), NF
Set D to true iff function call from caller to S0 violates module boundary.
Requires that S0 is an internal method.
| CallViolatesDeploymentBoundary<caller>, D(Bool), S(Func,Cls), NF
Set D to true iff function call from caller to S0 violates deployment boundary.
5. Branches
| JmpZero, ND, S(Int,Bool), B
| JmpNZero, ND, S(Int,Bool), B
Conditionally jump to based on S0.
| JmpSSwitchDest, ND, S(TCA) S(StkPtr) S(FramePtr), T
Jump to the target of a sswitch statement, leaving the region, where the
target TCA is S0.
| JmpSwitchDest, ND, S(Int) S(StkPtr) S(FramePtr), T
Jump to the target of a switch statement, leaving the region, using table
metadata <JmpSwitchData> and index S0, which must be a valid index in the
jump table.
| ProfileSwitchDest<handle,nCases>, ND, S(Int), NF
Profile a switch statement target.
| CheckSurpriseFlags, ND, S(FramePtr,StkPtr), B
Tests the implementation-specific surprise flags. If they're true, branches
to block B. This is done by comparing an evaluation stack pointer to the RDS
stackLimitAndSurprise word. Note that in a resumed, the frame pointer is not
pointing into the eval stack, so S0 should be a StkPtr in that case.
| HandleRequestSurprise, ND, NA, NF
Generate exceptions based on surprise flags on a per request basis.
Make sure CheckSurpriseFlags is true before calling HandleRequestSurprise.
| ReturnHook, ND, S(FramePtr) S(Cell), NF
Surprise flag hook for function returns.
| SuspendHookAwaitEF, ND, S(FramePtr) S(FramePtr) S(Obj), NF
Surprise flag hook for suspending eagerly executing async functions. The S0
frame was already teleported into S1. Decrefs S2 if it throws an exception.
| SuspendHookAwaitEG, ND, S(FramePtr) S(Obj), NF
Surprise flag hook for suspending eagerly executing async generators. The S0
frame has an associated AG, which is already linked to the newly constructed
AGWH in the blocked state. Decrefs S1 if it throws an exception.
| SuspendHookAwaitR, ND, S(FramePtr) S(Obj), NF
Surprise flag hook for suspending async functions and async generators resumed
at Await. The S0 frame has an associated AFWH/AGWH still in the running state,
S1 points to the child WH we are going to block on.
| SuspendHookCreateCont, ND, S(FramePtr) S(FramePtr) S(Obj), NF
Surprise flag hook for suspending generators and async generators during their
invocation. The S0 frame was already teleported into S1. Decrefs S2 if it
throws an exception.
| SuspendHookYield, ND, S(FramePtr), NF
Surprise flag hook for suspending generators and async generators at Yield.
| Unreachable<AssertReason>, ND, NA, T
Indicates an unreachable code path. Any instructions that are post-dominated
by an Unreachable may be treated as unreachable by the optimizer, and the
behavior of a program that attempts to execute an Unreachable is undefined.
| EndBlock<AssertReason>, ND, NA, T
Halt execution, without implying anything about the reachability of
instructions preceding this. Intended for use in internal tests or other code
not meant to be executed.
| Jmp, ND, SVar(Top), B|T
Unconditional jump to block B. In the second form, the target block must
start with a DefLabel with the same number of destinations as Jmp's number of
sources. Jmp parallel-copies its sources to the DefLabel destinations.
| DefLabel, DMulti, NA, NF
DefLabel defines variables received from a previous Jmp. A DefLabel with zero
destinations is a no-op, and the predecessor blocks may not necessarily end
in Jmp. A DefLabel with one or more destinations may only be reached by a Jmp
instruction with the same number of sources. Ordinary branch instructions may
not pass values to a DefLabel.
| Select, DUnion(1,2), S(Bool,Int) S(Top) S(Top), NF
If S0 is true/non-zero, return S1, otherwise return S2.
6. Loads
| LdStk<T,offset>, DParam(Cell), S(StkPtr), NF
Loads from S0 at offset (in cells), and puts the value in D as type T.
| LdLoc<T,localId>, DParam(Cell), S(FramePtr), NF
Loads local slot localId from the frame S0 and puts the value in D as type T.
| LdLocForeign<T>, DParam(Cell), S(FramePtr) S(Int), NF
Loads local slot S1 from the frame S0 and puts the value in D as type T.
Note that it does not perform the local optimizations that LdLoc does.
Users of this opcode need to ensure that the local is not optimized away.
| LdStkAddr<offset>, D(PtrToStk), S(StkPtr), NF
Loads the address of the stack slot given by the pointer in S0 at the given
stack offset (measured in cells).
| LdLocAddr<localId>, D(PtrToFrame), S(FramePtr), NF
Loads the address of the local slot localId from the frame S0 into D.
| LdRDSAddr<T,RDSHandle>, DParam(Ptr), NA, NF
Load the address of a Cell that lives at the specified RDS handle.
| LdInitRDSAddr<T,RDSHandle>, DParam(Ptr), NA, B
Load the address of a Cell that lives at the specified RDS handle. Branch if
the value at that address is Uninit.
| LdPairElem, D(InitCell), S(Obj) S(Int), NF
Load the element at S1 out of the Pair collection at S0.
| LdMem<T>, DParam(Cell), S(Mem), NF
Loads from S0 and puts the value in D.
| LdTVFromRDS<T,RDSHandle,includeAux>, DParam(InitCell), NA, NF
Load the TypedValue from the specified RDS handle. Must load the aux bits if
`includeAux` is true.
| LdContField<T>, DParam(Cell), S(Obj) C(Int), NF
Loads a property from the object referenced by S0 at the offset given by S1
and puts the value in D. S0 must be a Generator.
| LdClsInitElem<idx>, D(Cell), S(PtrToClsInit), NF
Load the cell at index `idx` from the class init vector at S0 into D0.
| LdColVec, D(Vec), S(Obj), NF
Load the vec array backing a collection instance in S0, which must be a
Vector or ImmVector, and that specific object type must be known at compile
time.
| LdColDict, D(Dict), S(Obj), NF
Load the dict array backing a collection instance in S0, which must be a
Map, Set, ImmMap, or ImmSet, and that specific object type must be known at
compile time.
| LdIterBase<T,iterId>, DParam(ArrLike), S(FramePtr), LA
Load the base of the iterator with type `T` at `iterId`. `T` must be a valid,
DataTypeSpecific-or-better type for the iterator's base; for example, it may
be based on an earlier call to CheckIter.
| LdIterPos<T,iterId>, DParam(Int|PtrToElem), S(FramePtr), NF
| LdIterEnd<T,iterId>, DParam(Int|PtrToElem), S(FramePtr), NF
Load the specified field of the iterator at `iterId`. These ops should only
be generated for iterators known to have a specialized type (via CheckIter).
The type param `T` should be compatible with this type - i.e. `T` should be
either an int or a pointer based on whether it's an index or pointer iter.
| LdFrameThis, DParam(Obj), S(FramePtr), NF
Loads into D the value of m_this from S0.
| LdFrameCls, DParam(Cls), S(FramePtr), NF
Loads into D the value of m_cls from S0.
| LdClsCtor, D(Func), S(Cls) S(Func), NF
Loads into D the constructor of class S0. If the constructor cannot be called
from the context of the func S1, raise an error.
| LdSmashable, D(Smashable), NA, NF
Loads a smashable value. The initial value is set to (1 << addr) + 1, where
addr is a pointer pointing to the value in TC. The lowest bit is set for
convenience of checking whether the value was already smashed.
| LdSmashableFunc, D(Func), S(Smashable), NF
Loads into D the func pointer stored in the higher 32 bits of S0.
| DefConst<T>, DParam(Top), NA, NF
Define a constant value of type T. D is presumed to be globally available and
the DefConst instruction will not actually appear in the IR instruction
stream.
| Conjure<T>, DParam(Top), NA, NF
Define a value of type T. This instruction aborts at runtime; it is meant to
be used in tests or code that is known to be unreachable.
| ConjureUse, ND, S(Cell), NF
Define a "use" of S0 effectively keeping the value alive. As with Conjure it
should not appear in reachable code.
| LdCls, D(Cls), S(Str) C(Cls|Nullptr), NF
Loads the class named S0 in the context of the class S1. Invokes autoload and
may raise an error if the class is not defined. The explicit context
parameter allows the compiler to simplify this instruction to a DefConst in
some cases. If S0 is constant, this instruction may be simplified to a
LdClsCached.
| LdClsCached, D(Cls), CStr, NF
Loads the class named S0 via the RDS. Invokes autoload and may raise an error
if the class is not defined.
| LdClsCachedSafe, D(Cls), CStr, B
Loads the class whose name is S0 out of the RDS. If the class is not defined,
branch to B.
| LdClsInitData, D(PtrToClsInit), S(Cls), NF
Loads the pointer to the property initializer array for class S0.
| LookupClsRDS, D(Cls|Nullptr), S(Str), NF
Lookup the cached-class RDS handle for a given class name. Dereference that
handle and return the associated Class, or null if not present.
| LdCns, D(InitCell), CStr, B
Load the constant named S0, branching to B if isn't present.
| LookupCnsE<T,constName>, D(InitCell), CStr, PRc
Load a constant via the RDS. Raises a fatal error if it cannot define the
constant. This should only be executed if LdCns on the same constant has
failed.
| LdClsCns<className,constantName>, DParam(InitCell), NA, B
Load the constant 'constantName' for the class 'className'. If not
initialized, branch to B.
| LookupClsCns, D(InitCell), S(Cls) S(Str), NF
Lookup a class constant. May raise errors if either the class or constant are
not defined.
| LookupClsCtxCns, D(Int), S(Cls) S(Str), NF
Lookup a class context constant. May raise errors if the context constant
is not defined, is abstract or is a type/value constant.
| LdClsCtxCns<slot>, D(Int), S(Cls), NF
Loads the context constant in the slot `slot' on S0. Requires that the slot
contains a valid context constant.
| LdSubClsCns<constantName,slot>, D(Uncounted), S(Cls), NF
Load the constant 'constantName' for the class S0. The constant is known to
be in the given slot. If the returned value is Uninit, the constant is not
initialized and it must be loaded using InitSubClsCns. Counted constants will
never have their slot be initialized and thus always take the slow path.
| InitSubClsCns<constantName,slot>, D(InitCell), S(Cls), NF
Slow path for LdSubClsCns. Used when LdSubClsCns indicates the slot is
uninitialized.
| CheckSubClsCns<constantName,slot>, ND, S(Cls), B
Check that the constant 'constantName' lives in the given slot for the class
S0, and branch if not. S0 must have at least slot+1 constants.
| ProfileSubClsCns<constantName,handle>, D(Cell), S(Cls), NF
Load the constant 'constantName' for the class S0, profiling the observed
slots. If the returned value is Uninit, the constant does not exist or is
abstract or is a type-constant.
| LdClsCnsVecLen, D(Int), S(Cls), NF
Load the size of S0's constant table.
| LdResolvedTypeCns<slot>, D(StaticDict), S(Cls), B|LA
Loads the resolved type constant in the slot `slot' on S0 or branches to B if
the type constant is not resolved.
| LdResolvedTypeCnsNoCheck<slot>, D(StaticDict), S(Cls), LA
Loads the resolved type constant in the slot `slot' on S0. The type constant
is assumed to be present and resolved.
| LdTypeCns, D(StaticDict), S(Cls) S(Str), NF
Loads type constant with name S1 from class S0, performing any necessary
resolution. Raises an error if no such constant could be found, if S0::S1 is
not a type constant, if resolution fails, or if S0::S1 is abstract.
| LdTypeCnsNoThrow, D(StaticDict), S(Cls) S(Str), NF
Loads type constant with name S1 from class S0, performing any necessary
resolution. If no such constant can be found, if S0::S1 is not a type
constant, if resolution fails, or if S0::S1 is abstract, then a "fake"
invalid type-structure is returned.
| LdResolvedTypeCnsClsName<slot>, DTypeCnsClsName, S(Cls), NF
Loads the cached 'classname' field from the resolved type constant in the
slot `slot' on S0. If there is no cached field, returns nullptr.
| LdTypeCnsClsName, D(StaticStr), S(Cls) S(Str), NF
Loads and resolved type constant with name S1 from class S0, as if by
LdTypeCns. Returns the 'classname' field from that type constant. Raises an
error if the type constant loading fails, or if there is no 'classname' field
present.
| LdClsMethodFCacheFunc<clsName,methodName>, D(Func), NA, B
Loads the target cache entry for a forwarding call to clsName::methodName.
If the method does not exist, or the cache hasn't been filled yet, branch to
B.
| LookupClsMethodFCache<clsName,methodName>,
| D(Func|Nullptr), C(Cls) S(FramePtr),
| NF
Lookup clsName::methodName in the forwarding class method cache. S0 should be
the Class named by clsName and S1 should be the current vm frame pointer. May
return Nullptr if lookup fails using a subset of the required lookup paths,
indicating that a more complete lookup path should be taken. May throw if the
method does not exist.
| LdClsMethodCacheFunc<clsName,methodName>, D(Func), NA, B
Loads the target cache entry for the method clsName::methodName. If the
method does not exist or the cache hasn't been filled yet, branch to B.
| LdClsMethodCacheCls<clsName,methodName>, D(Cls), NA, NF
Loads the target cache class context entry for a call to clsName::methodName
from the current context. This instruction must only be used when the value
is known to not be empty (i.e., LdClsMethodCacheFunc must have succeeded, or
LookupClsMethodCache returned a non-null value).
| LookupClsMethodCache<clsName,methodName>, D(Func|Nullptr), S(FramePtr), NF
Lookup a function in the class method targetcache. The class name and method
name are clsName and methodName, respectively. S0 is the current vm frame
pointer. Returns Nullptr if the method cannot be found using a subset of the
required lookup paths, indicating that a more complete lookup path should be
taken. May throw if the method does not exist.
| LdIfaceMethod<vtableIdx,methodIdx>, D(Func), S(Cls), NF
Load the Func* at methodIdx from the vtable at vtableIdx in S0.
If methodIdx represents an instance method, S0 must be a non-abstract class.
This requirement is naturally satisfied if S0 comes from LdObjClass.
| LdFuncVecLen, D(Int), S(Cls), NF
Load the funcVecLen field from S0.
| LdClsMethod, D(Func), S(Cls) C(Int), NF
Load the Func* in slot S1 of the class method table for class S0. (Note that
this vector is located before the class in memory, so the non-negative slots
will map to negative offset loads.)
| LookupClsMethod, D(Func|Nullptr), S(Cls) S(Str) S(Obj|Nullptr) S(Func), NF
Lookup a pointer to a class method for a given class S0 and method name S1,
assuming caller's $this is S2 and the caller is S3. Throws or
fatals if the method does not exist, is not accessible, or is not a static
method. Returns nullptr if it is an instance method defined in S2's class
hierarchy, indicating that this legacy call should be handled by interpreter.
| DeserializeLazyProp<index>, ND, S(Obj), NF
Given the `index` of a property in S0 that may be a handle to some lazily-
deserialized APC value, check if the property is lazy and deserialize it in
place if so. After this op, the prop will be a valid TypedValue.
| LdPropAddr<T,index>, D(LvalToProp), S(Obj), NF
Load the address of the object property at physical index `index`.
| LdInitPropAddr<T,index>, D(LvalToProp), S(Obj), B
Load the address of the object property at physical index `index`. Branch if
the value at that address is Uninit.
| LdGblAddr, D(LvalToGbl|Nullptr), S(Str), NF
Loads a pointer to a global with name S0, or a null lval if that global is
not already defined.
| LdGblAddrDef, D(LvalToGbl), S(Str), NF
Loads a pointer to a global with name S0, defining it to be InitNull if it
is not already defined.
| ProfileGlobal, ND, S(Str), NF
Profile an (attempted) access of a global with name S0.
| LdClsPropAddrOrNull<readonly op>, D(PtrToSProp|Nullptr), S(Cls) S(Str)
| C(Func) C(Bool) C(Bool), NF
Loads a pointer to a static class property. S0 points to the class, S1 is the
property name, and S2 is the function representing the context of the code
accessing the property. If class S0 does not have a visible and accessible
static property named S1, then nullptr is returned. An exception
will be thrown if the property is marked LateInit and its value is
Uninit, unless S3 is true. An exception is thrown if S4 is true,
and the property is constant. An exception is also thrown if there is a
readonly violation with the readonly op.
| LdClsPropAddrOrRaise<readonly op>, D(PtrToSProp), S(Cls) S(Str)
| C(Func) C(Bool) C(Bool), NF
Loads a pointer to a static class property. S0 points to the class, S1 is the
property name, and S2 is the function representing the context of the code
accessing the property. If class S0 does not have a visible and accessible
static property named S1, then nullptr is returned. An exception
will be thrown if the property is marked LateInit and its value is
Uninit, unless S3 is true. An exception is thrown if S4 is true,
and the property is constant. An exception is also thrown if there is a
readonly violation with the readonly op.
| LookupSPropSlot, D(Int), S(Cls) S(Str), NF
Lookup the slot index of the static property with the name S1 on the class
S0, returning -1 if not found.
| LdObjMethodD<opt-class, callerFunc>, D(Func), S(Cls) S(Str), NF
Loads a func pointer pointing to the instance method that would be called
if a method named S1 is invoked on an instance of S0. Raises a fatal if the
class does not have an accessible method with the given name.
| LdObjMethodS<methodName, ctx, callerFunc>, D(Func), S(Cls) S(Smashable), NF
Loads a func pointer pointing to the instance method that would be called
if a `methodName` is invoked on an instance of S0 from the calling context `ctx`.
Caches the mapping in the TC cache (using S1) and target cache. Raises a fatal
if the class does not have an accessible method with the given name.
| LdObjInvoke, D(Func|Nullptr), S(Cls), NF
Load a regular (non-static in prologue) __invoke Func from the Class in S0,
or nullptr if it is not present.
| LdObjClass, DLdObjCls, S(Obj), NF
Load the class out of the object in S0 and put it in D.
| LdClsName, D(StaticStr), S(Cls), NF
Load the name of the Class* in S0.
| LdLazyCls, D(LazyCls), S(Cls), NF
Load a lazy class corresponding to the Class* in S0.
| LdLazyClsName, D(StaticStr), S(LazyCls), NF
Load the name of the LazyClass in S0.
| LdFunc, D(Func|Nullptr), S(Str), NF
Loads the Func whose name is S0, invoking autoloader if it is not defined yet.
Fatals if the named function is not defined, and the autoloader fails to
define it. Returns nullptr if S0 contained '::', indicating that this legacy
call should be handled by interpreter.
| LdFuncCached<funcName>, D(Func), NA, NF
Loads the Func whose name is funcName from the RDS, invoking autoload if it
not defined yet. Fatal if function autoloader fails to define it.
| LookupFuncCached<funcName>, D(Func), NA, NF
Loads the Func whose name is given from %1, invoking autoload if it is not
defined yet. Fatal if the function autoload fails to define it. This
instruction does not assume the loaded function will be called immediately,
so it will raise a resolution failure error instead of a call failure error.
| LdFuncNumParams, D(Int), S(Func), NF
Returns the value of func->numParams().
| LdFuncInOutBits, D(Int), S(Func), NF
Loads the Func::m_inoutBits field.
| LdARFunc, D(Func), S(FramePtr), NF
Loads the result of ar->func() where ar is S0.
Users of this opcode need to ensure that the writing of m_funcId to S0 is
not optimized away.
| LdFuncName, D(StaticStr), S(Func), NF
Loads the full name of S0.
| LdMethCallerName<isCls>, D(StaticStr), S(Func), NF
Loads the meth_caller cls or func name.
| LdFuncCls, D(Cls|Nullptr), S(Func), NF
Loads the Func::cls() of S0, assuming !Func::isMethCaller().
| LdStrLen, D(Int), S(Str), NF
Load the length of the string in S0.
| FuncHasAttr<attr>, D(Bool), S(Func), NF
Tests for Func::m_attrs & attr.
| ClassHasAttr<attr>, D(Bool), S(Cls), NF
Tests for Class::m_attrsCopy & attr.
| LdFuncRequiredCoeffects, D(Int), S(Func), NF
Returns the value of func->requiredCoeffects().
| LdCoeffectFunParamNaive<param-idx>, D(Int), S(Cell), NF
Returns the coeffects from the S0.
| LdClsFromClsMeth, D(Cls), S(ClsMeth), NF
Load the Class* of the ClsMethDataRef in S0.
| LdFuncFromClsMeth, D(Func), S(ClsMeth), NF
Load the Func* of the ClsMethDataRef in S0.
| LdClsFromRClsMeth, D(Cls), S(RClsMeth), NF
Load the Class* of the RClsMeth* in S0.
| LdFuncFromRClsMeth, D(Func), S(RClsMeth), NF
Load the Func* of the RClsMeth* in S0.
| LdGenericsFromRClsMeth, D(Vec), S(RClsMeth), NF
Load the ArrayData* containing the generics attached to the RClsMeth in S0
| LdFuncFromRFunc, D(Func), S(RFunc), NF
Load the Func* of the RFuncData in S0
| LdGenericsFromRFunc, D(Vec), S(RFunc), NF
Load the ArrayData* containing the generics attached to the RFuncData in S0
| LdImplicitContext, D(Obj|InitNull), NA, NF
Loads implicit context from RDS.
| LdImplicitContextMemoKey, D(Str), S(Obj), NF
Loads memo key from implicit context in S0.
7. Allocation
| AllocInitROM<rom>, D(VoidPtr), NA, NF
Allocate a block of memory for `rom`, and initialize it's contents with the
ROM data. Dest is the base of the newly allocated block.
| AllocObj, DAllocObj, S(Cls), PRc
Allocates a new object of class S1.
| InitProps<class>, ND, NA, NF
Calls the property initializer function (86pinit) for class. May throw.
| InitSProps<class>, ND, NA, NF
Calls the static property initializer functions (86sinit and/or 86linit)
for class. May throw.
| CheckRDSInitialized<RDSHandle>, ND, NA, B
Check if the RDS entry at the specified handle is initialized, and branches
if not.
| MarkRDSInitialized<RDSHandle>, ND, NA, NF
Mark the given RDS entry as being initialized.
| MarkRDSAccess<RDSHandle>, ND, NA, NF
Mark the given RDS entry as being accessed for profiling.
| PropTypeRedefineCheck, ND, C(Cls) C(Int), NF
Check that the specified property at the slot S1 on S0, which redeclares a
property in the parent, has a declared type equivalent to the parent
declaration.
| PropTypeValid, ND, C(Cls), NF
Check that all properties on S0 carries a type which is valid for a
property. This is used when any types requires runtime resolution.
| DebugBacktrace, D(Vec), S(Int), PRc
Obtain stack trace by calling the debug_backtrace() method.
| InitThrowableFileAndLine, ND, S(Obj), NF
Initialize Throwable's file name and line number assuming the stack trace
was already initialized and the current vmfp() is a built-in.
| NewInstanceRaw<class>, DAllocObj, NA, PRc
Allocates an instance of class.
| InitObjProps<class>, ND, S(Obj), NF
Initializes properties of object S0.
| ConstructInstance<class>, DAllocObj, NA, PRc
Call the custom instance constructor of an extension class.
| ConstructClosure<class>, DAllocObj, S(Cls|Obj|Nullptr), CRc|PRc
Call the custom instance constructor of a Closure.
Store the context represented by S0 into the newly constructed closure object.
S0 may be a Nullptr when there is no context (i.e. the closure is being used
in a non-method).
| LockObj, ND, S(Obj), NF
Clear the IsBeingConstructed flag on the object.
| NewLoggingArray, DLoggingArrLike, S(Vec,Dict,Keyset), PRc|CRc|LA
Maybe create a LoggingArray wrapping the vanilla array-like S0. The decision
may be based on flags or sampling. If we don't create an array, we return S0
unchanged. The result will match S0 in type. For static array constructors
(including property initial values), the result will be static.
| ProfileArrLikeProps, ND, S(Obj), NF
Maybe create LoggingArrays wrapping each of the vanilla array-likes in the
properties of the newly-initialized object S0. The decision may be based on
flags or sampling. This op mutates the object's props in place.
| LogArrayReach<TransSrcKey>, ND, S(ArrLike), LA
If the supplied array is a LoggingArray, log the current tracelet and SrcKey
for use during specialization.
| LogGuardFailure<T>, ND, S(ArrLike), LA
Log that we failed to guard input S0 to the given type. Useful for catching
performance bugs in bespoke layout selection, but may have more general use.
| NewDictArray, D(Dict), C(Int), PRc
Allocate a new dict with the expected capacity S0.
| NewKeysetArray<offset,keys>, D(Keyset), S(StkPtr), PRc|CRc
Allocate a new keyset containing N elements off the stack given by S0, at
`offset'. This instruction moves the elements off the stack without
manipulating their reference counts.
| AllocVec<size>, D(Vec), NA, PRc
Allocate a new uninitialized vector array with space for size elements in it.
The array will be initialized with values using either InitVecElem or
InitVecElemLoop.
| InitVecElem<index>, ND, S(Vec) S(Cell), CRc
Store the S1 into the slot at index in array S0. This instruction assumes
that it doesn't have to incref the value being stored. Used to initialize an
array allocated with AllocVec.
| InitVecElemLoop<offset,size>, ND, S(Vec) S(StkPtr), CRc
Move `size' elements from the stack given by S1, at `offset', into the array
S0. Assumes that the first element on the stack is the last element in the
array. Used to initialize an array allocated with or AllocVec that was too
big for us to use a series of InitVecElem ops.
| AllocStructDict<keys...>, D(Dict), NA, PRc
Allocate a new key/value dict or mixed-layout array, given N string literal
immediates for keys. This op initializes the header and hash table of the
new array-like, but does not its elements; use InitDictElem for that.
| InitDictElem<index,key>, ND, S(Dict) S(Cell), CRc
Initialize the element at position `index` in array S0 to have the string
literal `key` as its key and S1 as its value. This instruction assumes that
S1 has already been inc-reffed. Used with the result of AllocStructDict.
| NewStructDict<offset,keys...>, D(Dict), S(StkPtr), PRc|CRc
Allocate a new key/value dict, given N string literal immediates for keys
and N stack elements at `offset` on stack S0 for values. This op assumes it
can move values from the stack without inc-reffing them.
| AllocBespokeStructDict<layout>, DStructDict, NA, PRc
Allocate a new, empty dict with the bespoke StructLayout `layout`.
| InitStructPositions<slots>, ND, SStructDict, NF
Initialize the size and iterator positions of the bespoke StructDict S0 for
the fields specified by `slots`. This instruction does not set the elements
of the dict; use InitStructElem for that.
| InitStructElem<slot,key>, ND, SStructDict S(InitCell), CRc
Initialize the element at slot `slot` corresponding to `key` in StructDict S0
with S1. This instruction assumes that S1 has already been inc-reffed.
Used with the result of AllocUninitBespokeStructDict.
| NewBespokeStructDict<offset,layout,slots...>, DStructDict, S(StkPtr), PRc|CRc
Allocate a new dict with the bespoke StructLayout `layout`. Then, init the
N fields with the given `slots` with N stack elements starting at `offset`.
This op moves stack values with no refcounting.
| NewCol<type>, DCol, NA, PRc
Create an empty new collection of `type'. `type' cannot be Pair.
| NewPair<offset>, DCol, S(Cell) S(Cell), PRc|CRc
Allocate a new Pair and fill it with the given cells. Ownership of the cells
is transferred from $1 and $2 to the pair without manipulating the refcounts.
| NewColFromArray<type>, DCol, S(Vec,Dict), PRc|CRc
Create a collection of `type` from a Vec or Dict kind. `type` cannot be
Pair. S0 must be vec kind when `type` is Vector or ImmVector, and must be
dict kind otherwise. Ownership of S0 is transferred from $1 to the
collection, without manipulating the refcount.
| Clone, DofS(0), S(Obj), PRc
Allocate an object by cloning S0.
| NewClsMeth, D(ClsMeth), S(Cls) S(Func), NF
Allocate a new ClsMethDataRef.
| NewRClsMeth, D(RClsMeth), S(Cls) S(Func) S(Vec), LA|PRc
Allocate a new reified class method by storing the Class in S0,
the Func in S1 and the reified generics in S2.
| NewRFunc, D(RFunc), S(Func) S(Vec), LA|PRc
Allocate a new reified function pointer given the Func pointer in S0
and reified generics in S1.
| FuncCred, DAllocObj, S(Func), PRc
Allocate a new FunctionCredential
8. Call & Return
| BeginInlining<func, offset>, D(FramePtr), S(StkPtr), NF
Defines a new frame pointer for an ActRec at callBCOff for use in an inlined
region. In resumed contexts the new frame is computed relative to S0 as S1 is
not a stack location.
| EnterInlineFrame, ND, S(FramePtr), NF
Marks the start of an inlined function whose stack resides offset cells below
the SP.
| EndInlining, ND, S(FramePtr), NF
Marks the end of an inlined function. S0 is no longer a valid frame location.
| InlineCall<retSPOff>, ND, S(FramePtr) S(FramePtr), NF
Sets the current vmfp for an inlined call to S0, and the m_sfp of S0 to the
previous vmfp, S1.
This instruction is primarily used to represent an inlined frame in the IR
when it cannot be eliminated during a side-exit. It performs callee-side
responsibilities for setting up an activation record (i.e. setting the return
ip, m_func and m_callOff, storing the frame pointer into D).
The caller frame pointer is passed as S1. This is used to keep track of the
call chain of inlined functions for simplification and dead code elimination.
| EnterFrame, D(FramePtr), S(FramePtr) S(FramePtr) S(Int) S(Int), NF
Initializes native portion of the callee's frame at S0, links it to the
previous frame at S1, and defines the new frame pointer.
The function being called is specified in `func'.
S2: ActRec flags (see ActRec::m_callOffAndFlags)
S3: callee func id
| Call<offset,numParams,callOff,destroyLocals>,
| DCall,
| S(StkPtr) S(FramePtr) S(Func) S(Cls|Obj|Nullptr) S(Int),
| CRc|PRc
Transfer control to the prologue of a callee S2, based on the pre-live
activation record and set of args on the stack pointed to by S0 at `offset'.
S1 is the current caller frame pointer. S3 is the context (nullptr, $this or
static::class). S4 is the ambient coeffects.
| CallFuncEntry<target,spOffset,arFlags>,
| DCall,
| S(StkPtr) S(FramePtr) S(Cls|Obj|Nullptr),
| CRc|PRc
Transfer control to the func entry at `target' SrcKey, based on the pre-live
activation record and set of args on the stack pointed to by S0 at `spOffset'.
S1 is the current caller frame pointer. S2 is the context (nullptr, $this or
static::class).
| NativeImpl<func>, ND, S(FramePtr) S(StkPtr), NF
Execute a call to the native builtin specified by the current function. S0
and S1 should be the current vmfp and vmsp, respectively.
| CallBuiltin, DBuiltin, S(FramePtr) S(StkPtr) SVar(Mem,Cell,Nullptr), PRc
Call builtin function with N arguments. S0 and S1 should be the current vmfp
and vmsp, respectively.
The source and destination types correspond to C++ parameter and return types
as follows:
C++ type HHIR type Position
----------------- --------- --------
bool Bool source, destination
int64_t Int source, destination
double Dbl source, destination
const String& Ptr source
const Array& Ptr source
const Object& Ptr source
const Variant& Ptr source
Variant& Ptr source (ref param)
String {Str|InitNull} destination
Array {Arr|InitNull} destination
Object {Obj|InitNull} destination
Variant {Cell-UninitNull} destination
| RetCtrl<spOff,suspendingResumed>, ND, S(StkPtr) S(FramePtr) S(Cell), T
Ensure that S0 + `spOff' (in cells) is stored in rvmsp and that S1's saved
frame pointer is stored in rvmfp, then return to the saved return address in
S1. The return value is S2, which is passed via the rret_*() registers to
the caller. The `suspendingResumed' flag indicates when this instruction is
suspending a resumable rather than performing a normal function return.
| AsyncFuncRetPrefetch, ND, S(FramePtr), NF
Prefetch the WaitHandle that the current (async, resume-mode) frame's AFWH
is blocking. If this AFWH is blocking multiple WaitHandles, we will only
prefetch the first one; in any case, this IR op has no visible semantics.
| AsyncFuncRet<spOff>, ND, S(StkPtr) S(FramePtr) S(Cell), T
Return from a resumed async function, assuming no surprise. Ensures that
S0 + `spOff` (in cells) is stored in rvmsp and that S1 is stored in rvmfp,
packs return value S2 into registers and calls the `asyncFuncRet` unique
stub. The stub stores the result into the wait handle associated with the
frame pointer, marks it as finished, unblocks its parents and if possible,
directly resumes the first parent (fast path), or a pending fast runnable
ResumableWaitHandle (slower path). Otherwise, it will exit VM and return
control to the asio scheduler (slow path). The stack must contain exactly one
cell containing uninitialized garbage, which will be populated by the stub
either to pass the return value to the resumed function, or to return null
to the scheduler.
| AsyncFuncRetSlow<spOff>, ND, S(StkPtr) S(FramePtr) S(Cell), T
Return from a resumed async function, assuming unknown surprise flag state
after the previous surprise was handled by executing "return" event hook.
Calls the `asyncFuncRetSlow` stub, which re-checks the surprise flag and
transfers control to the AsyncFuncRet if it was clear, or performs the slow
path of AsyncFuncRet if it was not, without resuming another function, as
we are not able to call a potential "resume await" event hook from the stub.
| AsyncGenRetR<spOff>, ND, S(StkPtr) S(FramePtr), T
Return from an async generator resumed at Await, assuming unknown surprise
flag state. Ensures that S0 + `spOff` (in cells) is stored in rvmsp and that
S1 is stored in rvmfp and calls the `asyncGenRetR` unique stub. The stub
stores `null` result indicating end of iteration into the async generator
wait handle associated with the generator associated with the frame pointer,
marks the wait handle as finished and unblocks its parents. If the surprise
flag was not set and there is a pending fast runnable ResumableWaitHandle,
it resumes it directly (slow path). Otherwise, it will exit VM and return
control to the asio scheduler (turtle path). The stack must contain exactly
one cell containing uninitialized garbage, which will be populated by the
stub either to pass the return value to the resumed function, or to return
null to the scheduler.
| AsyncGenYieldR<spOff>, ND, S(StkPtr) S(FramePtr) S(Cell) S(Cell), T
Yield from an async generator resumed at Await, assuming unknown surprise
flag state. Works the same as `AsyncGenRetR`, except the async generator
wait handle is populated with `tuple($key, $value)` result of the iteration
step, where `$key` is given by S2 and `$value` by S3.
| AsyncSwitchFast<spOff>, ND, S(StkPtr) S(FramePtr), T
Switch control to another ResumableWaitHandle. Ensures that S0 + `spOff`
(in cells) is stored in rvmsp and that S1 is stored in rvmfp and calls the
`asyncSwitchCtrl` unique stub, which tries to resume a pending fast runnable
ResumableWaitHandle (fast path) if possible, otherwise it will exit VM and
return control to the asio scheduler (slow path). As with AsyncRetFast, the
stack must contain exactly one cell containing uninitialied garbage.
| LdRetVal<T>, DParam(InitCell), S(FramePtr), NF
Load the return value from the already-returned-from ActRec pointed to by S0
into the dest. This is used by NativeImpl. TODO(#7150575): We want to make
NativeImpl return a TypedValue in the C++ ABI registers.
| GenericRetDecRefs, ND, S(FramePtr), NF
Does decrefs of all the current function's locals, where S0 is a pointer to
the relevant activation record. This instruction may not occur in an inlined
call.
9. Stores
| StPtrAt, ND, S(VoidPtr) S(VoidPtr), NF
Store the pointer address S1 to pointer S0.
| StTypeAt, ND, S(VoidPtr) S(Cell), NF
Store the type of S1 to pointer S0.
| StClsInitElem<idx>, ND, S(PtrToClsInit) S(Cell), NF
Store S1 into the slot at index `idx` in the class init vector at S0.
| StMem, ND, S(Mem) S(Cell), NF
Store S1 into the location pointed to by S0.
| StMemMeta, ND, S(Mem) S(Cell), NF
Logically store S1 into the location pointer to by S0. This is like StMem,
but emits no code. It exists to update frame state tracking.
| StTVInRDS<RDSHandle,includeAux>, ND, S(InitCell), NF
Store the TypedValue in S0 into the specified RDS handle. Must store the aux
bits if `includeAux` is true.
| StImplicitContext, ND, S(Obj|InitNull), NF
Sets the implicit context to S0.
| StLoc<localId>, ND, S(FramePtr) S(Cell), NF
Store S1 to local number localId on the frame pointed to by S0.
| StLocMeta<localId>, ND, S(FramePtr) S(Cell), NF
Logically store S1 to local number localId on the frame pointed to by
S0. This is like StLoc, but emits no code. It exists to update frame state
tracking.
| StLocRange<localIds>, ND, S(FramePtr) S(Cell), NF
Store S1 to the local variables corresponding to localIds, on the frame
pointed to by S0.
| StIterBase<iterId>, ND, S(FramePtr) S(ArrLike,Nullptr), LA
Sets the base of the iterator at `iterId` to the pointer S1. The array must
match the specialized type of the iterator, or be null (for local iterators).
| StIterType<iterId,iterType>, ND, S(FramePtr), NF
Sets the type of the iterator at `iterId` to `iterType`. This type must be a
specialized type. Also sets auxiliary fields (like next helper index).
| StIterPos<iterId>, ND, S(FramePtr) S(Int|PtrToElem), NF
| StIterEnd<iterId>, ND, S(FramePtr) S(Int|PtrToElem), NF
Store S1 to the given field of the iterator at `iterId`. S1 must be an int if
we're doing index iteration and a pointer if we're doing pointer iteration.
| StStk<offset>, ND, S(StkPtr) S(Cell), NF
Store S1 to the stack pointed to by S0, at a given offset (in cells).
| StStkMeta<offset>, ND, S(StkPtr) S(Cell), NF
Logically store S1 to the stack pointed to by S0, at a given offset (in
cells). This is like StStk, but emits no code. It exists to update frame
state tracking.
| StStkRange<offsets>, ND, S(StkPtr) S(Cell), NF
Store S1 to the stack slots at the given offsets from S0.
| StOutValue<index>, ND, S(FramePtr) S(Cell), NF
Store S1 in a caller allocated out-value vm stack cell index cells above
S0 on the stack.
| LdOutAddr<index>, D(PtrToOther), S(FramePtr), NF
Load the address of the storage for out parameter `index` provided by the
callee (the address will be a location on the callee stack).
| RecordReifiedGenericsAndGetTSList, D(Vec), S(Vec), CRc|LA
Takes a varray of reified generics from the stack and adds them to the reified
generics table and returns the input varray of reified generics, possibly
static
| StFrameCtx, ND, S(FramePtr) S(Obj,Cls), NF
Store object or class S1 on frame S0.
| StFrameFunc<func>, ND, S(FramePtr), NF
Store the func in m_funcId/m_func for S0.
| StFrameMeta<callBCOff,flags>, ND, S(FramePtr), NF
Store the m_callOffAndFlags field of S0.
10. Trace exits
| StVMFP, ND, S(FramePtr), NF
| StVMSP, ND, S(StkPtr), NF
| StVMPC, ND, C(Int), NF
| StVMReturnAddr, ND, C(Int), NF
| StVMRegState, ND, C(Int), NF
Sets the VM register state to the specified state. This is used
to track whether the register state is clean or dirty as
EagerSyncVMRegs is moved around by store elimination.
| ReqBindJmp<target,invSPOff,irSPOff>, ND, S(StkPtr) S(FramePtr) SCrossTrace, T
Emit a jump to a translation starting at `target'. If it does not exist, jump
to a service request that will translate the code and bind this jump to it.
| ReqRetranslate<irSPOff>, ND, S(StkPtr) S(FramePtr), T
Emit a jump to a service request that will chain to a retranslation of this
tracelet.
This instruction is used in exit traces for a type prediction that occurs at
the first bytecode offset of a tracelet.
| ReqRetranslateOpt<irSPOff>, ND, S(StkPtr) S(FramePtr), T
Emit a service request to retranslate the current function with a higher
optimization gear. This instruction is used in exit traces that trigger
profile-guided optimizations.
| ReqInterpBBNoTranslate<target,invSPOff,irSPOff>, ND, S(StkPtr) S(FramePtr), T
Jump to the `interpHelperNoTranslate' stub, which first interprets the basic
block starting at `target' and then continues interpreting basic blocks until
an already existing translation is found.
11. Refcounting and copies
| Mov, DofS(0), S(Top), P
Defines D as S0. May imply register-to-register moves at code generation
time. Does not imply an incref or any other manipulation of S0.
| IncRef, ND, S(Cell), NF
If S0 is a refcounted type, increment its refcount.
| DecRef<locId>, ND, S(Cell), CRc
Decrease the reference count of S0 by one, and call a destructor for types
that require it if it goes to zero.
The locId is just a hint to the runtime indicating which local variable is
being DecRef'd, if any.
| DecRefNZ<locId>, ND, S(Cell), CRc
Decrease the reference count of S0 by one, do not check if it goes to zero.
This instruction can be used for more efficient code when it is provable that
the reference count cannot go to zero.
| ProfileDecRef<locId>, ND, S(Cell), NF
Update the DecRefProfile for the given input as if it were dec-ref-ed, but do
not actually dec-ref it. We can use this op for e.g. iterator output locals,
because we don't specialize iterators in profiling translations.
| ReleaseShallow<locId>, ND, S(ArrLike|Obj), LA
Release memory associated with S0 without performing any other
operations.
| DecReleaseCheck<locId>, ND, S(Cell), B|CRc
Decrease the refcount of S0 and fall through if the refcount goes to zero.
Otherwise, branch to B.
12. Misc
| DefFP<offset>, D(FramePtr), NA, NF
Creates a temporary D representing the current VM frame pointer.
If the offset is specified, the frame is located at that offset from the SP,
otherwise the frame either lives on the heap, or its position on the stack
is unknown.
| DefFrameRelSP<irSPOff, bcSPOff>, D(StkPtr), S(FramePtr), NF
| DefRegSP<irSPOff, bcSPOff>, D(StkPtr), NA, NF
Defines a stack positioned relative to the frame or the rvmsp register.
Creates a temporary D representing the current VM stack pointer:
- DefFrameRelSP: D points to the same location as the frame pointer S0
- DefRegSP: D is the memory address given by the rvmsp register
The logical stack starts at the stack base and its position and size is
defined by these values:
- `irSPOff' is the offset from the stack base to D
- `bcSPOff' is the offset from the stack base to the top of the stack
DefFrameRelSP is used at the beginning of translations of non-resumed
functions to represent the state of the stack on entry.
DegRegSP is used at the beginning of translations of prologues and
resumed functions to represent the state of the stack on entry.
In prologues, the stack base represents a stack without any func arguments,
i.e. it is pointing to the empty space reserved for an ActRec.
In resumables, the stack base represents an empty VM stack.
| LoadBCSP<irSPOff>, D(StkPtr), S(StkPtr), NF
Gets the bytecode stack pointer from IR stack pointer, for use in eager
syncing.
| DefFuncPrologueFlags, D(Int), NA, NF
| DefFuncPrologueCallee, D(Func), NA, NF
| DefFuncPrologueNumArgs, D(Int), NA, NF
| DefFuncPrologueCtx, DParam(Cls|Obj), NA, NF
Creates a temporary D representing prologue flags, callee function pointer,
number of arguments, or context passed to the prologue.
May be used only at the beginning of a prologue or a stub used in a prologue
context.
| DefFuncEntryFP, D(FramePtr), NA, NF
| DefFuncEntryPrevFP, D(FramePtr), NA, NF
Creates a temporary D representing a callee's and caller's frame pointer.
May be used only at the beginning of a func entry.
| DefFuncEntryArFlags, D(Int), NA, NF
| DefFuncEntryCalleeId, D(Int), NA, NF
| DefFuncEntryCtx, DParam(Cls|Obj), NA, NF
| ConvFuncPrologueFlagsToARFlags, D(Int), S(Int), NF
Convert a function prologue flags to function entry flags.
| Count, D(Int), S(Cell), NF
Computes the number of elements in S0. The count of an array is the number of
elements it contains, without recursing into containers in the array.
Subtypes of Bool|Int|Dbl|Str|Res have a count of 1, subtypes of Null have a
count of 0. The count of objects that implement the Countable interface is
computed by returning the value of their count method. Objects that do not
implement Countable have a count of 1.
| CountVec, D(Int), S(Vec), LA
| CountDict, D(Int), S(Dict), LA
| CountKeyset, D(Int), S(Keyset), LA
| CountCollection, D(Int), S(Obj), NF
Computes the number of elements in S0 using the same definition as Count,
but with a restriction on the input type that allows for optimizations.
| Nop, ND, NA, NF
Does nothing. It's sometimes useful for the simplifier to insert one of these
in the instruction stream.
| JmpPlaceholder, ND, NA, B
Does nothing if executed. Semantically, this instruction carries a taken edge
to a block of speculatively-generated code during initial IR generation, such
as specialized code for an iterator init or next which we may or may not use.
If it survives irgen, it should be eliminated in the first DCE pass.
| CheckFuncNeedsCoverage<Func>, ND, NA, B
Checks if Func needs to have coverage information recorded for the current
request.
| RecordFuncCall<Func>, ND, NA, NF
Records a call to Func for a function call based code coverage report.
13. Runtime helpers
| VerifyParamCls<func,param,tc>, ND, S(Obj) S(Cls) S(Cls|Nullptr), NF
Verify parameter type for classes or traits. If S1 does not extend (if S2 is
a class) or implement (if S2 is an interface) S2, this instruction will raise
a recoverable fatal error describing the type mismatch.
| VerifyParamCallable<func,param>, ND, S(Cell), NF
If S0 is not callable, as defined by the php function is_callable, this
instruction will raise a recoverable fatal error describing the type
mismatch.
| VerifyParamFail<func,param,tc>, ND, S(Cell) S(Cls|Nullptr), NF
Assumes that parameter specified in extra-data in the current function
has failed its type check and could not be satisfied by coercing the value.
Depending on the typehint being verified and a number of runtime options,
may raise a recoverable fatal error describing the type mismatch.
| VerifyParamFailHard<func,param,tc>, ND, S(Cell) S(Cls|Nullptr), T
A terminal version of VerifyParamFail, to be used when the compiler can
statically prove that this failure will result in a fatal error rather than a
type coercion.
| VerifyParam<func,param,tc>, ND, S(Cell) S(Cls|Nullptr), NF
Verify that S0 is compatible with the parameter type hint. The verification
is guaranteed to not coerce. If the parameter type hint is `this`, S1 contains
class corresponding to `this` type.
| VerifyParamCoerce<func,param,tc>,
| DVerifyCoerce,
| S(Cell) S(Cls|Nullptr),
| PRc|CRc
Verify that S0 is compatible with the parameter type hint. The verification
may coerce and the updated value will be returned.
| VerifyRetCallable<func,param>, ND, S(Cell), NF
Verify a return type hint.
| VerifyRetCls<func,param,tc>, ND, S(Obj) S(Cls) S(Cls|Nullptr), NF
Verify a return type hint for a class.
| VerifyRetFail<func,param,tc>, ND, S(Cell) S(Cls|Nullptr), NF
Failure to verify a return type hint.
| VerifyRetFailHard<func,param,tc>, ND, S(Cell) S(Cls|Nullptr), T
Terminal version of VerifyRetFail, to be used when the compiler can prove
that this failure will result in a fatal error.
| VerifyRet<func,param,tc>, ND, S(Cell) S(Cls|Nullptr), NF
Verify a return type hint. The verification is guaranteed to not coerce.
| VerifyRetCoerce<func,param,tc>, DVerifyCoerce, S(Cell) S(Cls|Nullptr), PRc|CRc
Verify a return type hint. The verification may coerce.
| VerifyPropCls, ND, S(Cls) S(Int) S(Cls|Nullptr) S(Obj) C(Bool), NF
Verify a property type hint with AnnotType::Object against an object
value. S0 is the class of the object containing the property. S1 is the slot
of the property on the class. S3 is the object which is being set in the
property. If S2 is not nullptr, than the type-hint refers to that Class, and
S3 will be checked to see if its an instance of S2. Otherwise, the type-hint
refers to a type-alias, and the alias will be resolved and checked against
S3. S4 is true if this is a static property, false otherwise.
| VerifyPropFail, ND, S(Cls) S(Int) S(Cell) C(Bool), NF
Failure to verify a property type hint. S0 is the class of the object
containing the property. S1 is the slot of the property on the class. S2 is
the value which was being set in the property. S3 is true if this is a static
property, false otherwise.
| VerifyPropFailHard, ND, S(Cls) S(Int) S(Cell) C(Bool), T
Terminal version of VerifyPropFail, to be used when the compiler can prove
that this failure will result in a fatal error.
| VerifyProp, ND, S(Cls) S(Int) S(Cell) C(Bool), NF
Verify that S2 is compatible with the type hint for the property at slot S1
on S0. S3 is true if this is a static property, false otherwise.
| VerifyPropAll, ND, S(Cls) S(Int) S(Cell) C(Bool), NF
Verify that S2 is compatible with the all type hints for the property at slot
S1 on S0 including upper-bounds. S3 is true if this is a static property,
false otherwise.
| VerifyPropCoerce, DVerifyCoerce, S(Cls) S(Int) S(Cell) C(Bool), PRc|CRc
Verify that S2 is compatible with the type hint for the property at slot S1
on S0. S3 is true if this is a static property, false otherwise. Once support
for coercing class_meth types is removed this ir instruction can also be
removed (T61738946).
| VerifyPropCoerceAll, D(InitCell), S(Cls) S(Int) S(Cell) C(Bool), PRc|CRc
Verify that S2 is compatible with all type hints for the property at slot S1
on S0, including upper-bounds. S3 is true if this is a static property,
false otherwise. Once support for coercing class_meth types is removed this
ir instruction can also be removed (T61738946).
| VerifyReifiedLocalType<func,paramId>, ND, S(Cell) S(Dict) S(Cls|Nullptr), LA
Raises a catchable type hint error if the reified generics of function
parameter id does not match the type structure given on S0.
| VerifyReifiedReturnType<func>, ND, S(Cell) S(Dict) S(Cls|Nullptr), LA
Raises a catchable type hint error if the reified generics of S0 does not
match the type structure given on S1.
| ThrowUninitLoc<localId>, ND, S(Str), T
Throws an UndefinedVariableException on an uninitialized local variable.
| ThrowUndefPropException, ND, S(Obj) CStr, T
Throws an UndefinedPropertyException on an undefined property named S1 on the
class of S0.
| RaiseTooManyArg<func>, ND, S(Vec), CRc|LA
Raise a too many argument warning because extra arguments stored in S0 were
passed to function func.
| RaiseError, ND, S(Str), T
Raises a fatal error with the text in S0 as its message.
| RaiseWarning, ND, S(Str), NF
Raises a warning with the text in S0 as its message.
| RaiseNotice, ND, S(Str), NF
Raises a notice with the text in S0 as its message.
| ThrowHasThisNeedStatic, ND, S(Func), T
Throws a BadMethodCallException to indicate that func was called on an object
but is a static method.
| ThrowMissingArg<func,argc>, ND, NA, T
Throws a RuntimeExceptionObject to indicate that only argc arguments were
passed to function func.
| ThrowMissingThis, ND, S(Func), T
Throws a BadMethodCallException to indicate that an instance method was called
with null $this.
| ThrowCallReifiedFunctionWithoutGenerics, ND, S(Func), T
Throws a BadMethodCallException to indicate that S0 was called without reified
generics.
| CheckInOutMismatch<numArgs, inoutArgs>, ND, S(Func), NF
| ThrowInOutMismatch<numArgs, inoutArgs>, ND, S(Func), T
Throw an exception if the inout-ness of passed arguments given by `inoutArgs'
does not match the inout-ness of parameters of the callee S0. There must be
a mismatch if ThrowInOutMismatch is used.
These don't verify that if there are packed args they are not in a position
that require inout. You will still need verify that when unpacking.
| CheckReadonlyMismatch<numArgs, readonlyArgs>, ND, S(Func), NF
Checks whether readonly-ness of the caller matches that of callees
| ThrowReadonlyMismatch<numArgs, readonlyArgs>, ND, S(Func), T
Throw an exception if the readonly-ness of passed arguments given by `readonlyArgs'
does not match the readonly-ness of parameters of the callee S0. There must be
a mismatch if ThrowReadonlyMismatch is used.
These don't verify that packed args are not in a position that require
readonly. You will still need verify that when unpacking.
| RaiseForbiddenDynCall, ND, S(Func), NF
Depending on the setting of `ForbidDynamicCallsToFunc`,
`ForbidDynamicCallsToClsMeth` and `ForbidDynamicCallsToInstMeth` runtime
options, either raise a warning or throw an exception indicating that the
func specified in S0 was called dynamically (and should not be).
| RaiseForbiddenDynConstruct, ND, S(Cls), NF
Depending on the setting of the `ForbidDynamicConstructs` runtime option, either
raise a warning or throw an exception indicating that the class specified in
S0 was constructed dynamically (and should not be).
| RaiseCoeffectsCallViolation<func>, ND, S(Int) S(Int), NF
Depending on the setting of the `CoeffectEnforcementLevels` runtime option,
either raise a warning or throw an exception indicating that the caller
was violating coeffects enforcement when calling the callee
specified by func. S0 is used to pass the coeffects provided by the caller.
S1 is used to pass the required coeffects of the callee.
| RaiseCoeffectsFunParamTypeViolation<paramIdx>, ND, S(Cell), NF
Raises a warning to indicate a violation for the expected type for S0
at position paramIdx for the coeffect rule FunParam.
| RaiseCoeffectsFunParamCoeffectRulesViolation, ND, S(Func), NF
Raises a warning to indicate a violation for the input S0 to coeffect rule
FunParam uses polymorphic coeffect rules.
| RaiseModuleBoundaryViolation<ctx, caller>, ND, S(Func,Cls), NF
Raises a warning to indicate a module boundary violation resulting from caller
attempting to either call, if S0 is Func, or create a pointer to the callee in
S0 in context `ctx`.
| RaiseModulePropertyViolation<caller, propCls, propName, is_static>, ND, NA, NF
Raises a warning to indicate a module boundary violation resulting from caller
attempting to access a property propName from class propCls.
| RaiseDeploymentBoundaryViolation<ctx, caller>, ND, S(Func,Cls), NF
Raises a warning to indicate a deployment boundary violation resulting from caller
attempting to call the the callee in S0 in context `ctx`.
| RaiseImplicitContextStateInvalid<func>, ND, NA, NF
Raises a warning or throws an exception for invalid, soft implicit context
or cleared states.
Can only be used in memoized wrapper functions.
| RaiseStrToClassNotice, ND, S(Str), NF
Raise a notice if a string is implicitly converted to a class.
| CheckClsMethFunc, ND, S(Func), NF
Raises runtime errors if the func in S0 is not a callable static method.
| CheckClsReifiedGenericMismatch, ND, S(Cls) S(Vec), LA
Raises a runtime error unless whether each generic in S1 is reified or erased
matches exactly to the expectations of the cls in S0.
| CheckClsRGSoft, ND, S(Cls), NF
Raise a warning if all the reified generics on class S0 are soft,
otherwise raise an error. S0 must be a reified class.
| CheckFunReifiedGenericMismatch, ND, S(Func) S(Vec), LA
Raises a runtime error unless whether each generic in S1 is reified or erased
matches exactly to the expectations of the func in S0.
| IsFunReifiedGenericsMatched<func>, D(Bool), S(Int), NF
Load the generics bitmap from prologue flags given by S0 and check whether the
bitmap proves that the number of given generics and positions of reified vs
erased generics matches the expectations of the callee `func' (which must use
reified generics). If this opcode returned false, further checks implemented
by CheckFunReifiedGenericMismatch are needed.
| InitClsCns<className,constName>, DParam(InitCell), NA, NF
Initialize the RDS entry for a constant for a class, invoking autoload if it
is not defined. The initialized value is returned. This instruction may raise
an undefined constant error if autoload cannot define the constant.
| PrintStr, ND, S(Str), CRc
| PrintInt, ND, S(Int), CRc
| PrintBool, ND, S(Bool), CRc
Print for various types.
| ConcatIntStr, D(Str), S(Int) S(Str), PRc
Concatenate S0 and S1 after converting S0 to String.
| ConcatStrInt, D(Str), S(Str) S(Int), CRc|PRc
Concatenate S0 and S1 after converting S1 to String.
| ConcatStrStr, D(Str), S(Str) S(Str), CRc|PRc
Concatenate S0 and S1.
| ConcatStr3, D(Str), S(Str) S(Str) S(Str), CRc|PRc
Concatenate S0, S1, and S2.
| ConcatStr4, D(Str), S(Str) S(Str) S(Str) S(Str), CRc|PRc
Concatenate S0, S1, S2, and S3.
| AddNewElemKeyset, D(Keyset), S(Keyset) S(InitCell), CRc|PRc
| AddNewElemVec, DModified(0), S(Vec) S(InitCell), CRc|PRc
Add S1 as a new element to the array/keyset/vec S0. (Note: S1 must actually
be a subtype of InitCell for array invariants, but we can't assert this yet
in the IR because many eval stack slots are not entirely typed wrt initness
right now.)
| AKExistsDict, D(Bool), S(Dict) S(Int,Str), NF
Has the effects of array_key_exists(S0, S1).
| AKExistsKeyset, D(Bool), S(Keyset) S(Int,Str), NF
Has the effects of array_key_exists(S0, S1).
| AKExistsObj, D(Bool), S(Obj) S(Int,Str), NF
Has the effects of array_key_exists(S0, S1) on an object S0. This does
collection accesses.
| GetMemoKey, DMemoKey, S(Cell), PRc
Given a cell, produces a string or an int that can be used as a memoize cache
key. Valid values for the input include all basic types, arrays and
collections, and objects that implement IMemoizeParam. Any other type will
cause GetMemoKey to throw. This op can only be used within functions marked
as memoize wrappers.
| GetMemoKeyScalar, DMemoKey, S(Uncounted,Str), PRc
Identical to GetMemoKey but only accepts scalar types and cannot produce
errors.
| DictIdx<sizeHint>, DDictElem, S(Dict) S(Int,Str) S(Cell), NF
Checks if S0 contains the key S1 and returns the result if found. Otherwise
S2 is returned. The optimization data `sizeHint` doesn't affect semantics.
(`sizeHint` describes S0; it's one of {Default, SmallStatic}. Default is a
hash lookup. For SmallStatic, we'll do a linear scan for static string keys.)
| KeysetIdx, DKeysetElem, S(Keyset) S(Int,Str) S(Cell), NF
Checks if S0 contains the key S1 and returns the result if found. Otherwise
S2 is returned.
| MethodExists, D(Bool), S(Cls) S(Str), NF
Checks if the method named S1 exists on class S0. S0 must be a normal class
that is not abstract.
| LdBindAddr<SrcKey,spOff>, D(TCA), NA, NF
Creates a service request to bind the given target address. Returns a TCA
pointing either to the service request (before the service request is
satisfied) or to the native code for the given target address (once the
service request is satisfied).
| LdSSwitchDest<numCases, cases, defaultOffset, bcSPOff>, D(TCA), S(Str), NF
Using the cases in the extra data, create a hash table for destination lookup.
Then, lookup the destination for the switched value in the table and yield the
default if not present.
| InterpOne<T,spOff,bcOff,numPopped,numPushed>, ND,
| S(StkPtr) S(FramePtr),
| NF
Call the interpreter implementation function for one opcode. S0 + `spOff' (in
cells) and S1 are, respectively, the VM stack and frame pointers before this
instruction. T is only present if the instruction pushes to the stack, in
which case it is the type of the top stack element after the call. `bcOff' is
the bytecode offset. `numPopped' is the number of stack cells consumed by the
instruction, and `numPushed' is the number of stack cells produced by the
instruction.
| InterpOneCF<T,bcOff,numPopped,numPushed>, ND,
| S(StkPtr) S(FramePtr),
| T
Similar to InterpOne, but for instructions that may modify vmpc. This is
implemented as a tail call to a stub, so any exceptions thrown will be thrown
in the context of the stub, not the InterpOneCF instruction.
| OODeclExists<kind>, D(Bool), S(Str) S(Bool), NF
Returns a bool indicating whether the class, interface, or trait named by S0
exists. Invokes autoload if S1 is true.
| SetOpTV<op>, ND, S(Lval) S(Cell), NF
Performs S0 <op>= S1.
| OutlineSetOp<op>, D(InitCell), S(Cell) S(Cell), NF
Similar to SetOpTV, but does not write back the result to S0, instead it is
returned as the dest. This is useful to handle ops that may require a
type check before writing the value back.
| GetTime, D(Dbl), NA, NF
Returns a double of the current time in seconds.
| GetTimeNs, D(Int), C(Int), NF
Returns the current time of the given clock id specified as clockid_t in
nanoseconds as integer. This will call kernel's clock_gettime_ns() API. Note
that this cannot be used for CLOCK_THREAD_CPUTIME_ID, as HHVM provides
different semantics for that counter.
| LdUnitPerRequestFilepath<handle>, D(StaticStr), NA, NF
Returns the filepath currently bound to the current unit (stored in the given
RDS handle). The RDS handle must be an initialized normal handle. Only valid
when Eval.ReuseUnitsByHash is enabled.
| DirFromFilepath, D(StaticStr), S(StaticStr), NF
Given a string representing a filepath in S0, return only the directory
portion of the path.
14. Generators & Closures
| LdClosureCls, DParam(Cls), S(Obj), NF
| LdClosureThis, DParam(Obj), S(Obj), NF
Load the context from the closure object S0 into D, assuming `func' is
S0's closure Func.
| StClosureArg<index>, ND, S(Obj) S(Cell), CRc
Store one of the closure environment arguments (i.e. from the closure's use
clause) from S1 into the closure object S0.
| CreateGen, DAllocObj, S(FramePtr) C(Int) S(TCA,Nullptr) C(Int), PRc
Create a Generator object and suspend the ActRec provided by S0 into its
embedded ActRec, allocating S1 slots for locals/iterators. Set the native
resume address to S2 and resume offset to S3.
| CreateAGen, DAllocObj, S(FramePtr) C(Int) S(TCA,Nullptr) C(Int), PRc
Create an AsyncGenerator object and suspend the ActRec provided by S0 into its
embedded ActRec, allocating S1 slots for locals/iterators. Set the native
resume address to S2 and resume offset to S3.
| CreateAFWH, DAllocObj,
| S(FramePtr) C(Int) S(TCA,Nullptr) C(Int) S(Obj),
| CRc|PRc
Create an AsyncFunctionWaitHandle object and suspend the ActRec provided by
S0 into its embedded ActRec, allocating S1 slots for locals/iterators. Set
the native resume address to S2, resume offset to S3, and mark it blocked on
non-finished child S4.
| CreateAGWH, DAllocObj,
| S(FramePtr) S(TCA,Nullptr) C(Int) S(Obj),
| CRc|PRc
Create an AsyncGeneratorWaitHandle object and link it to the AsyncGenerator
associated with the ActRec provided by S0. Set the native resume address
to S1, resume offset to S2, and mark it blocked on non-finished child S3.
| CreateCCWH<local,count>, DAllocObj, S(FramePtr) S(Int), PRc
Create a ConcurrentWaitHandle and add the count elements from frame contiguous
frame locals beginning at local and extending count locals. S1 denotes the
total number of non-completed waithandles. All locals must be subclasses of
WaitHandle.
| CreateSSWH, DAllocObj, S(Cell), CRc|PRc
Call c_StaticWaitHandle::CreateSucceeded.
| AFWHPrepareChild, ND, S(FramePtr) S(Obj), NF
Prepare unfinished WaitableWaitHandle object specified by S1 for getting
awaited by an AsyncFunctionWaitHandle object specified by its ActRec
provided by S0.
Injects S1 into the currently running scheduler instance and performs
cross-scheduler and intra-scheduler cycle detection. Throws if the
dependency cannot be established.
| AFWHPushTailFrame, ND, S(Obj) C(Int), B
If S0 is eligible for the tail frame optimization and has any free tail
frame ID slots, pushes S1 as a new tail frame ID. Otherwise, branches to B.
This IR op assumes that S0 is an Awaitable in the blocked state.
S0 is eligible for the optimization if it is an AsyncFunctionWaitHandle,
if this site "owns" it (i.e. if it has a refcount of exactly 2 - this site
and its child's back pointer), and if it has space in its tail-frames list.
| StArResumeAddr<offset>, ND, S(FramePtr) S(TCA), NF
Store the resume address S1 into the Resumable whose ActRec is given by S0,
marking the offset to resume at as `offset'.
| ContEnter<spOffset,callBCOffset>,
| DGenIter,
| S(StkPtr) S(FramePtr) S(FramePtr) S(TCA) S(Cell),
| CRc
Enters a generator body. S0 + `spOffset' (in cells) is a pointer to the
stack, S1 is the current frame pointer, S2 is the generator frame pointer
embedded in the Generator object, S3 is the address to jump to, and S4 is
the value that will be pushed onto the stack to send it to the output of
the yield statement. The `callBCOffset' will be stored to the m_callOff
field of the ActRec in the generator.
| ContCheckNext, ND, S(Obj) C(Bool), B
Check whether the generator S0 can be iterated. If the generator is already
running or finished, or it was not started yet and the S1 check-started flag
is set, the branch B is taken.
| ContValid, D(Bool), S(Obj), NF
Return true if a generator is not done, false otherwise.
| ContArIncKey, ND, S(FramePtr), NF
Special-case key update for generator, ActRec of which is S0, which
increments the key of a generator if that generator's key is an Int.
This will cause undefined behavior if the generator's key is not an Int.
| ContArIncIdx, D(Int), S(FramePtr), NF
Increment the internal index in the Generator in S0, and return the new index
value.
| ContArUpdateIdx, ND, S(FramePtr) S(Int), NF
Updates the internal index of generator with S1 if necessary, i.e. if S1
is larger than the index. S0 is the pointer to the embedded ActRec.
| LdContActRec, D(FramePtr), S(Obj), NF
Loads the Generator object's ActRec, given a pointer to the generator
object in S0.
| LdContResumeAddr, D(TCA|Nullptr), S(Obj), NF
Load the resume addr from the Generator in S0.
| StContArState<state>, ND, S(FramePtr), NF
Change the state of the Generator object which has frame pointer S0.
| LdContArValue, DParam(InitCell), S(FramePtr), NF
Loads 'value' from the Generator object ActRec of which is S0.
| StContArValue, ND, S(FramePtr) S(InitCell), CRc
Stores 'value' into the Generator object ActRec of which is S0. S1 is the
new value.
| LdContArKey, DParam(InitCell), S(FramePtr), NF
Loads 'key' from the Generator object ActRec of which is S0.
| StContArKey, ND, S(FramePtr) S(InitCell), CRc
Stores 'key' into the Generator object ActRec of which is S0. S1 is the
new value.
| AFWHBlockOn, ND, S(FramePtr) S(Obj), CRc
Establish dependency between parent AsyncFunctionWaitHandle object, whose
ActRec is given by S0, and child WaitableWaitHandle object referenced by S1.
| LdWHState, D(Int), S(Obj), NF
Loads the state of the WaitHandle in S0, which is a value from the wait
handle states in ext_asio.h. This instruction has undefined behavior if S0 is
not a WaitHandle.
| LdWHResult, DParam(InitCell), S(Obj), NF
Loads the result of the WaitHandle in S0. This instruction has undefined
behavior if S0 is not a WaitHandle, or if S0 is not finished.
| LdWHNotDone, D(Int), S(Obj), NF
Returns 1 if S0 is not finished, and 0 if S0 is finished.
| CountWHNotDone<local,count>, D(Int), S(FramePtr), B
Returns the number of unfinished awaitables contained in the contiguous
locals beginning at local and extending count, skipping all nulls. A branch
is taken if a non-Awaitable non-null value is encountered.
| LdAFWHActRec, D(FramePtr), S(Obj), NF
Loads the AsyncFunctionWaitHandle object's ActRec, given a pointer to the
AsyncFunctionWaitHandle object in S0.
| CreateSpecialImplicitContext, DSpecialIC, S(Int) S(Str|InitNull) S(Func), NF
Creates an implicit context for special circumstances, as if by calling the
create_special_implicit_context builtin function.
15. Debugging, instrumentation, and profiling
| IncStat, ND, C(Int), NF
Increment stat counter. S0 is the implementation defined stat counter index.
| IncProfCounter<TransID>, ND, NA, NF
Increment the profiling counter associated with translation TransID.
| IncCallCounter<callee>, ND, S(FramePtr), NF
Increment the counter associated associated with the last call, namely from
the function containing the previous translation in the call stack into the
current function given by `callee'.
| DbgAssertRefCount<AssertReason>, ND, S(Cell), NF
Assert that S0 has a valid refcount. If S0 has a reference counted type and
its count is implausible then execute a hardware trap instruction.
| DbgTraceCall<spOffset>, ND, S(FramePtr) S(StkPtr), NF
When EvalHHIRGenerateAsserts is on, this instruction is inserted at the
start of each region, to emit some sanity checking code.
| DbgAssertFunc, ND, S(FramePtr), NF
Assert that the func on the srckey is the current function in Frame S0.
If the assertion fails, execution is aborted via a hardware exception.
| DbgCheckLocalsDecRefd, ND, S(FramePtr), NF
In debug builds, if LocalsDecRefd flag is set on S0, causes runtime failure by
emitting a trap instruction. Otherwise, this instruction does nothing.
| DbgTrashStk<offset>, ND, S(StkPtr), NF
For debugging purposes. Store kTVTrashJITStk to the stack slot pointed to
by S0, at a given offset (in cells).
| DbgTrashFrame<offset>, ND, S(StkPtr), NF
For debugging purposes. Store kTVTrashJITFrame to kNumActRecCells stack
slots starting at the offset (in cells), and going toward higher memory
addresses.
| DbgTrashMem, ND, S(Mem), NF
For debugging purposes. Store kTVTrashJITHeap to a heap slot pointed to by
S0.
| RBTraceEntry, ND, NA, NF
| RBTraceMsg, ND, NA, NF
Ring buffer tracing.
| ZeroErrorLevel, D(Int), NA, NF
| RestoreErrorLevel, ND, S(Int), NF
Helper instructions for fast implementation of the PHP error silencing
operator (@foo()).
16. Iterators
| IterInit<IterData>, D(Bool), S(ArrLike,Obj) S(FramePtr), CRc|LA
| IterInitK<IterData>, D(Bool), S(ArrLike,Obj) S(FramePtr), CRc|LA
| LIterInit<IterData>, D(Bool), S(ArrLike) S(FramePtr), LA
| LIterInitK<IterData>, D(Bool), S(ArrLike) S(FramePtr), LA
<IterData> consists of three indices, iterId, keyId and valId. iterId is
the index of the iterator variable, keyId and valId are indices of local
variables.
Initializes the iterator variable whose index is given by iterId.
This instruction creates the appropriate iterator for the array or object that
S0 references, and rewinds the new iterator to its start. S0 points to the
stack frame containing the iterator and local variables with the indices
iterId, keyId and valId.
If the new iterator is at its end (i.e., has no elements to iterate over),
this instruction decrements the refcount of S0 and returns false; otheriwse,
it stores a reference to S0 in the new iterator and returns true. If the
iterator is not at its end, then this instruction stores the iterator's first
value (and key) into the local variable with index valId (and keyId,
respectively).
The LIter variations only accept arrays and do not take ownership of their
base. Instead, the base is provided on each operation on the iterator, so
that we can avoid inc-ref-ing the base (in cases where that's safe).
This instruction has the ConsumesRC property because it either decrements the
reference count of S0 or stores a reference to S0 into the new iterator.
| IterNext<IterData>, D(Bool), S(FramePtr), NF
| IterNextK<IterData>, D(Bool), S(FramePtr), NF
| LIterNext<IterData>, D(Bool), S(ArrLike) S(FramePtr), LA
| LIterNextK<IterData>, D(Bool), S(ArrLike) S(FramePtr), LA
<IterData> consists of three indices, iterId, keyId and valId. iterId is
the index of the iterator variable, keyId and valId are indices of local
variables. S0 points to the stack frame containing the iterator and local
variables with the indices iterId, keyId and valId.
Advances the iterator variable whose index is given by iterId.
If the iterator has reached the end, this instruction frees the iterator
variable and returns false; otherwise, it returns true. If the iterator has
not reached its end, then this instruction stores the iterator's next value
(and key) into the local variable with index valId (and keyId, respectively).
| IterFree<iterId>, ND, S(FramePtr), NF
Free the iterator variable with id `iterId` in the stack frame of S0.
For non-local iterators, this instruction will dec-ref the stored base.
| KillActRec, ND, S(FramePtr), NF
| KillLoc<localId>, ND, S(FramePtr), NF
| KillIter<iterId>, ND, S(FramePtr), NF
Mark a given ActRec, local, or iterator as being dead for the purposes of
memory effects. It no longer contains a meaningful value.
These operations are used for both correctness and performance. Inserting a
kill can help us do store-elimination; if a write is followed by a kill on
all paths before any reads, then it can be eliminated. However, we also use
kills to mark when a given location cannot be reused by load-elimination or
similar optimizations, in which case, the kill ops are required.
In debug builds, these ops will write poison values to these fields.
| GetDictPtrIter, D(PtrToElem), S(Dict) S(Int), NF
| GetVecPtrIter, D(PtrToElem), S(Vec) S(Int), NF
Returns a pointer to the elm S1 of a vanilla dict or vec S0. S1 does not need
to be a valid array position; for example, it may equal the size of the array
(so that the "elm" returned is the pointer-iteration end for S0).
| AdvanceDictPtrIter<offset>, D(PtrToElem), S(PtrToElem), NF
| AdvanceVecPtrIter<offset>, D(PtrToElem), S(PtrToElem), NF
Increments the pointer S0 to the array element with the given layout `offset`
positions forward. `offset` is allowed to be negative.
| LdPtrIterKey<T>, DParam(Int|Str), S(PtrToElem), NF
| LdPtrIterVal<T>, DParam(InitCell), S(PtrToElem), NF
Loads the key or val from the array element pointed to by S0. S0 must be a
valid elm; that is, it can't point to the end of the array data. LdPtrIterKey
can only be used for mixed elms, but LdPtrIterVal supports mixed and packed.
T is used to type the result. For LdPtrIterKey, it must be a valid type for
the array's keys - i.e., a subtype of TInt|TStr.
| EqPtrIter, D(Bool), S(PtrToElem) S(PtrToElem), NF
Compares two pointer iterators for equality.
17. Member instruction support
| LdMIStateTempBaseAddr, D(PtrToMISTemp), NA, NF
Returns a pointer to the tvTempBase field within the current MInstrState.
| LdMBase, DParam(Lval), NA, NF
Load the current value of the member base register.
| StMBase, ND, S(Lval), NF
Store a new value to the member base register. It is illegal for any
instruction other than StMBase or InterpOne (when interpreting a member
instruction) to modify the member base register.
| CheckMROProp, ND, NA, B
If the roProp field of MInstrState is false, branch.
| StMROProp, ND, S(Bool), NF
Set the roProp field of MInstrState to S0.
| FinishMemberOp, ND, NA, NF
Mark the end of a member operation. This has no effect at runtime but exists
to provide information for certain optimizations.
All of the remaining opcodes in this section are simple wrappers around helper
functions (specified in S0) to perform the corresponding vector operation. If
S1 is a ConstCls it represents the context class for the operation.
SetElem, SetProp, and SetNewElem are used to implement part of the SetM hhbc
opcode, which almost always pushes its first stack input or a StaticStr as its
stack result. The combinations of input types that cause SetM to push anything
other than those two values are vanishingly rare in correct PHP programs, so
these three instructions have been optimized for the common cases. SetNewElem
and SetProp have no destination, allowing the compiler to predict that the
SetM's output will be the same as its input (and optimize accordingly). If that
turns out to not be the case at runtime, the instruction will throw an
InvalidSetMException. The exception will hold a Cell containing the value the
SetM should push on the stack instead of its input value. The runtime is
responsible for catching this exception, finishing execution of the SetM
instruction, pushing the value from the exception on the stack, and proceeding
as appropriate (most likely with a side exit to the next bytecode instruction,
since it has pushed an unexpected type onto the stack).
SetElem is similar to SetProp and SetNewElem but can also be used for setting
characters within strings. When given a string base and a valid offset, SetElem
returns a string representation of the newly inserted character. In all other
cases it returns nullptr or throws an InvalidSetMException. It will throw this
exception when it detects invalid input types, or when trying to set a string
offset that would grow the string beyond the maximum supported size.
The input types that will cause the errors described above are listed here:
SetNewElem will fail if the base is not a subtype of {Null|Str|Arr|Obj} and not
Bool<false>.
SetElem has the same base constraint as SetNewElem. In addition, the key must
not be a subtype of {Arr|Obj}.
SetProp will fail if the base is not a subtype of {Obj|Null}.
Any instructions that take a pointer to an MInstrState struct use the various
fields of that struct for holding intermediate values.
| BaseG, D(LvalToGbl|LvalToConst), S(Str), NF
Get a base from global named S0. If it is not a defining BaseG it can also
return the init_null_variant
| PropX, DPropLval, S(Cell) S(Cell) S(PtrToMISTemp|Nullptr), NF
Lookup intermediate property in S0, with key S1. An exception is thrown if
the property is Readonly when it was required to be Mutable.
| PropQ, DPropLval, S(Cell) S(StaticStr) S(PtrToMISTemp|Nullptr), NF
A nullsafe version of PropX, returns null if the base S0 is null. An exception
is thrown if the property is Readonly when it was required to be Mutable.
| PropDX, DPropLval, S(Cell) S(Cell) S(PtrToMISTemp|Nullptr), NF
Like PropX, but used for intermediate element lookups that may modify the
base. An exception is thrown if the property is Readonly when it was
required to be Mutable.
| CGetProp, D(InitCell), S(Cell) S(Cell), PRc
Get property with key S1 from S0.
| CGetPropQ, D(InitCell), S(Cell) S(StaticStr), PRc
A nullsafe version of CGetProp, returns null if the base S0 is null.
| SetProp, ND, S(Cell) S(Cell) S(Cell), NF
Set property with key S1 in S0 to S2.
| UnsetProp, ND, S(Cell) S(Cell), NF
Unset an object property.
| SetOpProp<op>, D(InitCell), S(Cell) S(Cell) S(Cell), PRc
Set op propery with key S1 in base S0, using S2 as the right hand side.
| IncDecProp<op>, D(InitCell), S(Cell) S(Cell), PRc
Increment/decrement property with key S1 in base S0.
| IssetProp, D(Bool), S(Cell) S(Cell), NF
Returns true iff the property with key S1 in base S0 is set.
| ElemX, D(InitCell), S(Cell) S(Cell), NF
Get intermediate element with key S1 from base S0. The base will not be
modified and the result will not be inc-ref-ed.
| CheckDictKeys<T>, ND, S(Dict), B
Check that the given mixed array is free of tombstones and that all of its
elements' keys match the type T. If any check fails, branch to block B.
Like CheckMixedArrayOffset, this check is allowed to have false negatives -
it may fail even if the array has keys of the given type.
| CheckArrayCOW, DCOW, S(ArrLike), B|PRc|CRc|LP
Check that S0 has a refcount of exactly 1; if not, branch to B.
| CopyArray, DCOW, S(ArrLike), PRc|LP
Make a copy of S0 (regardless of its refcount). The resultant copy will
always be counted with a ref-count of 1. The array must be of a kind that
allows for counted variants.
| ProfileDictAccess, ND, S(Dict) S(Int,Str), NF
Profile access of the element keyed by S1 in S0, tracking sizes and offsets.
| CheckDictOffset<pos>, ND, S(Dict) S(Int,Str), B
Check that `pos' is within the usage bounds of S0 (including tombstones), and
that S1 exactly matches the element key of S0 at `pos'. If any of the checks
fail, branch to B. This check is allowed to have false negatives.
| ProfileKeysetAccess, ND, S(Keyset) S(Int,Str), NF
Profile access of the element keyed by S1 in S0, tracking sizes and offsets.
| CheckKeysetOffset<pos>, ND, S(Keyset) S(Int,Str), B
Check that `pos' is within the usage bounds of S0 (including tombstones), and
that S1 exactly matches the element key of S0 at `pos'. If any of the checks
fail, branch to B. This check is allowed to have false negatives.
| ProfileArrayCOW, ND, S(ArrLike), LA
Profile whether S0 would require a COW before being mutated.
| CheckMissingKeyInArrLike, ND, S(ArrLike) S(StaticStr), B
Uses the StrKeyTable to check if S1 is guaranteed to be missing in S0.
If S1 may be present, branches to block B. If we branch here, the key may or
may not be present.
| ElemDictD, DElemLval, S(Lval) S(Int,Str), NF
| ElemDictU, DElemLval, S(Lval) S(Int,Str), NF
Similar to ElemDX and ElemUX, but specialized for when the base S0 is a
dict and the key S1 is an int/str.
| ElemDictK, DElemLvalPos, S(Dict) S(Int,Str) S(Int) S(Dict), NF
Returns an lval to the element of dict S0 at the known position S2
(corresponding to the key S1). S3 is S0 with a potentially more refined type
and used to calculate the elem type. It is not used at runtime.
| ElemDX, D(LvalToElemOrConst), S(Lval) S(Cell), NF
Like ElemX, but used for intermediate element lookups that may modify the
base.
| ElemUX, D(LvalToElemOrConst), S(Lval) S(Cell), NF
Like ElemX, but used for intermediate element lookups that may modify the
base as part of an unset operation.
| DictGet, DDictElem, S(Dict) S(Int,Str), NF
Get element with key S1 from base S0, throwing if the element is not present.
| DictGetQuiet, DDictElem, S(Dict) S(Int,Str), NF
Get element with key S1 from base S0, returning null if the element is not
present.
| DictGetK, DDictElem, S(Dict) S(Int,Str) S(Int), NF
Like DictGet, but the element for S1 is at a known position S2 in S0.
| KeysetGet, DKeysetElem, S(Keyset) S(Int,Str), NF
Get element with key S1 from base S0, throwing if the element is not present.
| KeysetGetQuiet, DKeysetElem, S(Keyset) S(Int,Str), NF
Get element with key S1 from base S0, returning null if the element is not
present.
| KeysetGetK, DKeysetElem, S(Keyset) S(Int,Str) S(Int), NF
Like KeysetGet, but the element for S1 is at a known position S2 in S0.
| StringGet, D(StaticStr), S(Str) S(Int), PRc
Get string representing character at position S1 from base string S0. Raises
a notice if the position is out of bounds.
| MapGet, D(InitCell), S(Obj) S(Int,Str), PRc
Get element with key S1 from base S0.
| CGetElem, D(InitCell), S(Cell) S(Cell), PRc
Get element with key S1 from S0.
| MemoGetStaticValue<func,T>, DParam(InitCell), NA, B
Get the memo value associated with the static function "func". If the value
is not present, branch. The returned value is not inc-reffed. This op can
only be used inside a memoize wrapper.
| MemoGetStaticCache<func,keys,T>, DParam(InitCell), S(FramePtr), B
Perform a lookup on the memo cache associated with the static function
"func". The keys for the lookup are read from the locals on the frame pointed
to by S0 (which must be ints or strings). If the lookup fails, branch. The
returned value is not inc-reffed. This op can only be used inside a memoize
wrapper.
| MemoGetLSBValue<func,T>, DParam(InitCell), S(Cls), B
Get the memo value associated with the static function "func" and late static
bound class S0. If the value is not present, branch. The returned value is not
inc-reffed. This op can only be used inside a memoize wrapper.
| MemoGetLSBCache<func,keys,T>, DParam(InitCell), S(FramePtr) S(Cls), B
Perform a lookup on the memo cache associated with the static function
"func" and late static bound class S1. The keys for the lookup are read from
the locals on the frame pointed to by S0 (which must be ints or strings). If
the lookup fails, branch. The returned value is not inc-reffed. This op can
only be used inside a memoize wrapper.
| MemoGetInstanceValue<slot,func,T>, DParam(InitCell), S(Obj), B
Get the memo value at the specified memo slot on S0. If the value is not
present, branch. The returned value is not inc-reffed. This op can only be
used inside a memoize wrapper.
| MemoGetInstanceCache<func,keys,T>, DParam(InitCell), S(FramePtr) S(Obj), B
Perform a lookup on the memo cache at the specified memo slot on S1. The keys
for the lookup are read from the locals on the frame pointed to by S0 (which
must be ints or strings). If the lookup fails, branch. The returned value is
not inc-reffed. This op can only be used inside a memoize wrapper.
| MemoSetStaticValue<func>, ND, S(InitCell), NF
Set S0 as the memo value associated with the static function "func". Store
the value, overwriting any previous value, with appropriate ref-count
manipulations. This op can only be used inside a memoize wrapper.
| MemoSetStaticCache<func,keys>, ND, S(FramePtr) S(InitCell), NF
Store S1 in the memo cache associated with the static function "func". The
keys for the lookup are read from the locals on the frame pointed to be S0
(which must be ints or strings). Store the value, overwriting any previous
value, with appropriate ref-count manipulations. This op can only be used
inside a memoize wrapper.
| MemoSetLSBValue<func>, ND, S(InitCell) S(Cls), NF
Set S0 as the memo value associated with the static function "func" and
late static bound class S1. Store the value, overwriting any previous
value, with appropriate ref-count manipulations. This op can only be used
inside a memoize wrapper.
| MemoSetLSBCache<func,keys>, ND, S(FramePtr) S(Cls) S(InitCell), NF
Store S2 in the memo cache associated with the static function "func" and
late static bound class S1. The keys for the lookup are read from the
locals on the frame pointed to be S0 (which must be ints or strings).
Store the value, overwriting any previous value, with appropriate
ref-count manipulations. This op can only be used inside a memoize wrapper.
| MemoSetInstanceValue<slot,func>, ND, S(Obj) S(InitCell), NF
Set S2 as the memo value at the specified memo slot on S1. Store the value,
overwriting any previous value, with appropriate ref-count
manipulations. This op can only be used inside a memoize wrapper.
| MemoSetInstanceCache<slot,func,keys>, ND, S(FramePtr) S(Obj) S(InitCell), NF
Store S2 in the memo cache at the specified memo slot on S1. Store the value,
overwriting any previous value, with appropriate ref-count
manipulations. This op can only be used inside a memoize wrapper.
| InitObjMemoSlots<class>, ND, S(Obj), NF
Initialize the memoization instance slots for object S0 of the given class.
| DictSet, DArrLikeSet, S(Dict) S(Int,Str) S(InitCell), PRc|CRc
Set element with key S1 in S0 to S2. The dest will be a new Dict that should
replace S0.
| BespokeGet, DBespokeElemUninit, S(Vec,Dict,Keyset) S(Int,Str), LA
Get element with key S1 in the array S0, which may have an arbitrary layout.
This op returns TUninit if the key is not in the array.
| BespokeGetThrow, DBespokeElem, S(Vec,Dict,Keyset) S(Int,Str), LA
Get element with key S1 in the array S0, which may have an arbitrary layout.
This op throws if the key is not in the array.
| BespokeElem, DElemLval, S(Lval) S(Int,Str) C(Bool), LA
Get a half-lval to an element of type T with key S1 in the array S0, which
may have an arbitrary layout. The op will copy or escalate the S0 as needed,
with ElemInt/ElemStr semantics.
If the key is missing in the array, we'll use S2 to determine what to do.
If S2 is true, we'll throw; else, we'll return an immutable lval to null.
| BespokeSet, DArrLikeSet, S(Vec,Dict,Keyset) S(Int,Str) S(InitCell), LA|PRc|CRc
Set element with key S1 in the array S0, which may have an arbitrary layout.
This op has SetMove semantics; it consumes a refcount on the input array and
produces one on the output, and does no refcounting to the value S2.
| BespokeUnset, DArrLikeUnset, S(Vec,Dict,Keyset) S(Int,Str), LA|PRc|CRc
Unset element with key S1 in the array S0, which may have an arbitrary layout.
| BespokeAppend, DArrLikeAppend, S(Vec,Dict,Keyset) S(InitCell), LA|PRc|CRc
Set element with key S1 in the array S0, which may have an arbitrary layout.
This op has AppendMove semantics; it consumes a refcount on the input array
and produces one on the output, and does no refcounting to the value S2.
| BespokeIterFirstPos, D(Int), S(ArrLike), LA
Obtain the pos coresponding to the first valid element in the non-empty
array S0, which may have an arbitrary layout.
| BespokeIterLastPos, D(Int), S(ArrLike), LA
Obtain the pos coresponding to the last valid element in the non-empty
array S0, which may have an arbitrary layout.
| BespokeIterEnd, D(Int), S(ArrLike), LA
Returns the "end" iterator position for the given array. Unlike the "last"
iterator position, the end is never a valid iterator position.
| BespokeIterGetKey, DBespokePosKey, S(ArrLike) S(Int), LA
Obtain the key at the valid pos S1 in the array S0.
| BespokeIterGetVal, DBespokePosVal, S(ArrLike) S(Int), LA
Obtain the value at the valid pos S1 in the array S0.
| BespokeEscalateToVanilla<Layout>, DEscalateToVanilla, SBespokeArr CStr, PRc
Escalate the bespoke array S0 to a vanilla array for the reason S1.
| LdMonotypeDictTombstones, D(Int), SMonotypeDict, NF
Returns the number of tombstones in the given MonotypeDict's value array.
The MonotypeDict's IterEnd is equal to its size plus its tombstone count.
| LdMonotypeDictKey, DBespokePosKey, SMonotypeDict S(Int), NF
| LdMonotypeDictVal, DBespokePosVal, SMonotypeDict S(Int), NF
Specializations of BespokeIterGetKey and BespokeIterGetVal for when the base
is a monotype dict. The GetKey and GetVal ops require that S1 is a valid
iterator position for S0.
| LdMonotypeVecElem, DBespokeElem, SMonotypeVec S(Int), NF
Loads the element of the monotype vec in S0 at offset S1. This instruction
assumes that the vec actually contains an element at that offset (i.e. the
index is within bounds).
| StructDictUnset, DArrLikeUnset, SStructDict C(StaticStr), PRc|CRc
Unset element with key S1 in the struct-layout dict S0. This op has move
semantics: it consumes a refcount on S0 and produces one on the output.
| StructDictSlot, D(Int), SStructDict S(Str), B
Calculate the slot corresponding to key S1 in struct dict S0, branching if
that key is not present.
| StructDictElemAddr, D(LvalToElem), SStructDict S(Str) S(Int) SStructDict, NF
Calculate the pointer to the value in struct dict S0 corresponding to slot
S2. S1 is the corresponding key used to calculate the slot, and S3 is the
struct dict to use for type calculations (if the struct dict has been COWed,
this will correspond to the pre-COW version). S1 and S3 are only used for
type calculations and not used at runtime.
| StructDictAddNextSlot, ND, SStructDict S(Int), NF
Mark the slot S1 as being used in struct dict S0. The slot must not have been
previously in use (its corresponding value must have been Uninit).
| StructDictTypeBoundCheck, DStructTypeBound, S(InitCell) SStructDict S(Int), B|P
Check if S0 passes the type bound associated with slot S2 in struct dict
S1. If not, branch. The returned value is a passthrough of S0, possibly
refined with what we know statically of the type-bound. This is like a
CheckType, but the type isn't known statically.
| StructDictSlotInPos, D(Int), SStructDict S(Int), NF
Returns the slot at iterator position S1 in the struct dict S0.
| LdStructDictKey, DBespokePosKey, SStructDict S(Int), NF
| LdStructDictVal, DBespokePosVal, SStructDict S(Int), NF
Returns the key or value at slot S1 in the struct dict S0. This slot must be
known to be present. (The result of StructDictSlotInPos is always present.)
| LdTypeStructureVal, DBespokeElem, STypeStructure S(Str), B|LA
Specialization of BespokeGet for when the base is a type structure. Get
element with key S1 in the type structure S0. Branches if the key is not in
the type structure.
| LdTypeStructureValCns<key>, DTypeStructElem, STypeStructure, LA
Simplification of LdTypeStructureVal for when the key is a constant value.
| MapSet, ND, S(Obj) S(Int,Str) S(InitCell), CRc
Set element with key S1 in S0 to S2.
| VectorSet, ND, S(Obj) S(Int,Str) S(InitCell), CRc
Set element with key S1 in S0 to S2.
| SetElem<T>, DSetElem, S(Lval) S(Cell) S(InitCell), NF
Set element with key S1 in S0 to S2. SetElem returns a Nullptr in the common
case, where the logical result of the hhbc SetM is its right hand side. In
the case of string bases, the SetM returns a new string containing the newly
inserted character. So the return value of this instruction is Nullptr unless
SetM needed to return a static string. The type param is the known type of
the base that S0 points to.
Furthermore, in the case of "invalid offsets", SetElem may throw an
InvalidSetMException (see discussion above).
| SetRange, ND, S(Lval) S(Int) S(Cell) S(Int) S(Int), NF
| SetRangeRev, ND, S(Lval) S(Int) S(Cell) S(Int) S(Int), NF
Perform a range set or reverse range set operation, with the same arguments
and semantics as the RangeSet bytecode instruction.
| UnsetElem, ND, S(Lval) S(Cell), NF
Unsets the element at key S1 in the base S0.
| SetOpElem<op>, D(InitCell), S(Lval) S(Cell) S(Cell), PRc
Set op elem with key S1 in base S0, using S2 as the right hand side.
| IncDecElem, D(InitCell), S(Lval) S(Cell), PRc
Increment/decrement element with key S1 in base S0.
| SetNewElem, ND, S(Lval) S(InitCell), NF
Append the value in S1 to S0.
| SetNewElemDict, ND, S(Lval) S(InitCell), NF
| SetNewElemVec, ND, S(Lval) S(InitCell), NF
| SetNewElemKeyset, ND, S(Lval) S(Int,Str), NF
Specializations of SetNewElem for pointers to dicts, vecs, and keysets.
| DictIsset, D(Bool), S(Dict) S(Int,Str), NF
Returns true iff the element at key S1 in the base S0 is set.
| KeysetIsset, D(Bool), S(Keyset) S(Int,Str), NF
Returns true iff the element at key S1 in the base S0 is set.
| StringIsset, D(Bool), S(Str) S(Int), NF
Returns true iff the string S0 has a character at position S1.
| VectorIsset, D(Bool), S(Obj) S(Int), NF
Returns true iff the element at key S1 in the base S0 is set.
| PairIsset, D(Bool), S(Obj) S(Int), NF
Returns true iff the element at key S1 in the base S0 is set.
| MapIsset, D(Bool), S(Obj) S(Int,Str), NF
Returns true iff the element at key S1 in the base S0 is set.
| IssetElem, D(Bool), S(Cell) S(Cell), NF
Returns true iff the element at key S1 in S0 is set.
| CheckRange, D(Bool), S(Int) S(Int), NF
Returns true iff S0 is in the range [0, S1).
| ThrowArrayIndexException, ND, S(ArrLike) S(Int), T
Throws an OutOfBoundsException if S0 is an undefined index for an array.
| ThrowArrayKeyException, ND, S(Dict) S(Str), T|LA
Throws an OutOfBoundsException if S0 is an undefined key for a darray or dict.
| ThrowOutOfBounds, ND, S(ArrLike|Obj) S(Cell), T|LA
Throws an OutOfBoundsException corresponding to an access of S0 with the key
S1.
| ThrowInvalidArrayKey, ND, S(ArrLike) S(Cell), T|LA
Throws an InvalidArgumentException corresponding to an access of S0 with the
key S1, which has a type invalid for that array.
| ThrowInvalidOperation, ND, S(Str), T
Throws an InvalidOperationException with a message indicating S0.
| ThrowDivisionByZeroException, ND, NA, T
Throws a DivisionByZeroException.
| ThrowLateInitPropError, ND, S(Cls) S(Str) S(Bool), T
Throws an InvalidOperationException indicating an access of a unset LateInit
property. S0 is the class the property was declared on. S1 is the property
name. S2 is true if its a static property, false otherwise.
| ThrowParameterWrongType<expectedType, func, argNum>, ND, S(Cell), T
Throws a RuntimeException if calling a function with an argument that has the
wrong type
| ThrowMustBeMutableException<cls>, ND, S(Str), T
Throws an InvalidOperationException indicating a readonly semantics violation
where the property was required to be mutable. cls is the class the property
was declared on. S0 is the property name.
| ThrowMustBeReadonlyException<cls>, ND, S(Str), T
Throws an InvalidOperationException indicating a readonly semantics violation
where the property was required to be readonly. cls is the class the property
was declared on. S0 is the property name.
| ThrowLocalMustBeValueTypeException, ND, S(Str), T
Throws an InvalidOperationException indicating a readonly semantics violation
where the local was required to be a value type. S0 is the local name.
| ThrowMustBeValueTypeException<cls>, ND, S(Str), T
Throws an InvalidOperationException indicating a readonly semantics violation
where the property was required to be a value type. S0 is the property
name. cls is the class the property was declared on.
| ThrowMustBeEnclosedInReadonly<cls>, ND, S(Str), T
Throws an InvalidOperationException indicating a readonly semantics violation
where the property was required to be enclosed in a readonly expression. cls
is the class the property was declared on. S0 is the property name.
| ThrowCannotModifyReadonlyCollection, ND, NA, T
Throws an InvalidOperationException indicating a readonly semantics violation
where a collection is modified.
| ProfileType, ND, S(Cell), NF
Profile the type of S0.
| ProfileCall<rdsHandle>, ND, S(Func), NF
Profile the call to a function S0.
| ProfileMethod<rdsHandle>, ND, S(Cls) S(Func), NF
Profile the method S1 called with class context S0.
| ProfileProp, ND, C(StaticStr) C(StaticStr), NF
Profile the access to property S(1) with base class S(0).
| CheckVecBounds, ND, S(Vec) S(Int), B|LA
Checks that the index in S1 is within the bounds of the packed array or
vector array in S0. Branches to B if the index is out of bounds.
| LdVecElemAddr, DElemLvalPos, S(Vec) S(Int) S(Vec), NF
Loads the address of the element at index S1 of the vec array in S0. This
instruction assumes the array actually contains an element at that offset
(IE, the array has the proper length). S2 is S0 with a potentially more
refined type and used to calculate the elem type. It is not used at runtime.
| ReserveVecNewElem, D(Int), S(CountedVec), B
If there is room in the packed or vec array (which is assumed to be mutable),
increments the array size and returns the index of the new last element
(which you must initialize); else jumps to the taken branch.
| LdVecElem, DVecElem, S(Vec) S(Int), NF
Loads the element of the vec array in S0 at offset S1. This instruction
assumes that the vec actually contains an element at that offset (IE, the vec
has the proper length).
| LdVectorSize, D(Int), S(Obj), NF
Returns the size of the given Vector collection in S0.
| ColIsEmpty, D(Bool), S(Obj), NF
| ColIsNEmpty, D(Bool), S(Obj), NF
Returns whether a collection instance is empty or not. S0 must be known to
be an instance of a collection class at compile time.
| VecFirst, DFirstElem, S(Vec), NF
Returns the first value from the packed or vec array in S0.
If the array is empty, it will return NULL.
| VecLast, DLastElem, S(Vec), NF
Returns the last value from the packed or vec array in S0.
If the array is empty, it will return NULL.
| DictFirst, DFirstElem, S(Dict), NF
Returns the first value from the mixed or dict array in S0.
If the array is empty, it will return NULL.
| DictLast, DLastElem, S(Dict), NF
Returns the last value from the mixed or dict array in S0.
If the array is empty, it will return NULL.
| DictFirstKey, DFirstKey, S(Dict), NF
Returns the first key from the mixed or dict array in S0.
If the array is empty, it will return NULL.
| DictLastKey, DLastKey, S(Dict), NF
Returns the last key from the mixed or dict array in S0.
If the array is empty, it will return NULL.
| KeysetFirst, DFirstElem, S(Keyset), NF
Returns the first value(key) from the keyset in S0.
If the array is empty, it will return NULL.
| KeysetLast, DLastElem, S(Keyset), NF
Returns the first value(key) from the keyset in S0.
If the array is empty, it will return NULL.
| IsLegacyArrLike, D(Bool), S(Vec|Dict), LA
Returns true iff the given array has the legacy bit set.
| ArrayMarkLegacyShallow, DModified(0), S(Vec,Dict), CRc|PRc|LA
| ArrayMarkLegacyRecursive, DModified(0), S(Vec,Dict), CRc|PRc|LA
| ArrayUnmarkLegacyShallow, DModified(0), S(Vec,Dict), CRc|PRc|LA
| ArrayUnmarkLegacyRecursive, DModified(0), S(Vec,Dict), CRc|PRc|LA
Set or unset the legacy bit on the array-like in S0, copying it if needed.
The recursive variants traverse through the array-like's transitive elements
but stop traversal at any non-array-like.
18. Exception/unwinding support
| BeginCatch, ND, NA, NF
Marks the beginning of a catch region. Exact behavior is implementation and
architecture specific.
| EndCatch<spOffset,mode,stublogue>, ND, S(FramePtr) S(StkPtr), T
Marks the end of a catch region and returns control to the unwinder. The
`spOffset' field represents a logical adjustment to S1 (in cells) to yield
the vm stack pointer, however the stack pointer is not actually adjusted
before this instruction returns control to the unwinder. The unwinder
instead relies on fixup map information to find the appropriate stack
pointers. Instead it's part of this instruction to facilitate assertions and
memory effect analysis.
If the `stublogue' flag is set, the native stack pointer is updated to reflect
the state prior to entering the stublogue context.
| UnwindCheckSideExit, ND, S(FramePtr) S(StkPtr), B
Branches to B if the currently executing catch region should return control
to the unwinder rather than side exiting. Used to control behavior in catch
traces for the InvalidSetMException and TVCoercionException situations.
| LdUnwinderValue<T>, DParam(Cell), NA, PRc
Loads the value contained by the current unwinder exception.
| EnterTCUnwind<spOff,teardown>, ND, S(Obj), CRc|T
Enters tc_unwind_resume by doing a side enter, i.e. skipping
itanium ABI. Stores the exception given by S0 to UnwindRDS's exn
field as well as true to sideEnter field.
If teardown is set, it notifies tc_unwind_resume to also teardown the locals.
19. Function prologues
| EnterPrologue, ND, NA, NF
Enter prologue context, which operates in the same mode as unique stubs.
Makes sure the native stack is properly aligned.
| ExitPrologue, ND, NA, NF
Exit prologue context. Undoes the work of EnterPrologue and prepares for
a jump to FuncEntry.
| EnterTranslation, ND, NA, NF
Enter translation context. Makes sure the native stack is properly set up.
| CheckStackOverflow, ND, S(StkPtr), NF
Check if the stack depth has exceeded its limit. If it has, jump to the
stack overflow helper stub, which will throw.
| CheckSurpriseFlagsEnter<func,argc>, ND, S(FramePtr), NF
Test the implementation-specific surprise flags. If they're nonzero, call
the function enter helper.
| CheckSurpriseAndStack<func,args>, ND, S(FramePtr), NF
Test surprise flags and stack overflow at the same time.
| LdARFlags, D(Int), S(FramePtr), NF
Load the flags stored on the ActRec pointed to by the frame
pointer S0. Bits not defined as flags may contain arbitrary garbage.
| LdTVAux<ValidBits>, D(Int), S(Cell), NF
Load the value of m_aux from the TypedValue S0. ValidBits is a mask
specifying which bits are allowed to be set. The runtime may ignore it.
Note that when we pass TypedValues around in registers, we usually use a byte
register for the m_type member, and thus ignore m_aux. LdTVAux is only valid
when we know that S0's m_type and m_aux were both materialized into the same
64-bit register.
/* Local Variables: */
/* fill-column: 79 */
/* End: */
vim:textwidth=80 |
|
hhvm/hphp/doc/limitations | <h2>Known Limitations</h2>
1. If using the PHP setlocale function, because it is implemented in terms of
setlocale(3), it is process-wide and will affect all requests as well as, for
example, internal logging. One way to mitigate the effect is to call
setlocale(LC_ALL, "C"); at the end of a request to restore the default locale
but this does not change the fact that PHP setlocale() is process-wide. |
|
hhvm/hphp/doc/Makefile | help:
@echo "'make daemon' to start up doc server daemon"
@echo "'make server' to start up doc server"
@echo "'make clobber' to clean up directory"
daemon:
sudo ../hhvm/hhvm -m daemon -v "Server.DefaultDocument=index.php" -v "Server.SourceRoot=`pwd`"
server:
sudo ../hhvm/hhvm -m server -v "Server.DefaultDocument=index.php"
DEBUGGER_CMDS = a b c d e f g h i j k l m n o p q r step thread u v w x y z "\&" "!"
# update debugger docs
debugger:
@../hhvm/hhvm -m debug --debug-cmd "help start" > debugger.start
@../hhvm/hhvm -m debug --debug-cmd "help" > debugger.cmds
@../hhvm/hhvm -m debug \
$(patsubst %, --debug-cmd "'h %'", $(DEBUGGER_CMDS)) > debugger.refs
clean: clobber
clobber:
@rm -f *~ |
|
Hierarchical Data Format | hhvm/hphp/doc/mime.hdf | StaticFile {
Extensions {
ai = application/postscript
aif = audio/x-aiff
aifc = audio/x-aiff
aiff = audio/x-aiff
asc = text/plain
atom = application/atom+xml
au = audio/basic
avi = video/x-msvideo
bcpio = application/x-bcpio
bin = application/octet-stream
bmp = image/bmp
cdf = application/x-netcdf
cgm = image/cgm
class = application/octet-stream
cod = application/vnd.rim.cod
cpio = application/x-cpio
cpt = application/mac-compactpro
csh = application/x-csh
css = text/css
dcr = application/x-director
dir = application/x-director
djv = image/vnd.djvu
djvu = image/vnd.djvu
dll = application/octet-stream
dmg = application/octet-stream
dms = application/octet-stream
doc = application/msword
docx = application/vnd.openxmlformats-officedocument.wordprocessingml.document
dotx = application/vnd.openxmlformats-officedocument.wordprocessingml.template
dtd = application/xml-dtd
dvi = application/x-dvi
dxr = application/x-director
eps = application/postscript
etx = text/x-setext
exe = application/octet-stream
ez = application/andrew-inset
flv = video/x-flv
gif = image/gif
gram = application/srgs
grxml = application/srgs+xml
gtar = application/x-gtar
gz = application/x-gzip
hdf = application/x-hdf
hqx = application/mac-binhex40
htm = text/html; charset=UTF-8
html = text/html; charset=UTF-8
ice = x-conference/x-cooltalk
ico = image/x-icon
ics = text/calendar
ief = image/ief
ifb = text/calendar
iges = model/iges
igs = model/iges
jad = text/vnd.sun.j2me.app-descriptor
jnlp = application/x-java-jnlp-file
jpe = image/jpeg
jpeg = image/jpeg
jpg = image/jpeg
js = application/x-javascript
kar = audio/midi
latex = application/x-latex
lha = application/octet-stream
lzh = application/octet-stream
m3u = audio/x-mpegurl
m4u = video/vnd.mpegurl
man = application/x-troff-man
mathml = application/mathml+xml
me = application/x-troff-me
mesh = model/mesh
mid = audio/midi
midi = audio/midi
mif = application/vnd.mif
mov = video/quicktime
movie = video/x-sgi-movie
mp2 = audio/mpeg
mp3 = audio/mpeg
mp4 = video/mp4
mpe = video/mpeg
mpeg = video/mpeg
mpg = video/mpeg
mpga = audio/mpeg
ms = application/x-troff-ms
msh = model/mesh
msi = application/x-msi
mxu = video/vnd.mpegurl
nc = application/x-netcdf
oda = application/oda
ogg = application/ogg
pbm = image/x-portable-bitmap
pdb = chemical/x-pdb
pdf = application/pdf
pgm = image/x-portable-graymap
pgn = application/x-chess-pgn
png = image/png
pnm = image/x-portable-anymap
potx = application/vnd.openxmlformats-officedocument.presentationml.template
ppm = image/x-portable-pixmap
ppsx = application/vnd.openxmlformats-officedocument.presentationml.slideshow
ppt = application/vnd.ms-powerpoint
pptx = application/vnd.openxmlformats-officedocument.presentationml.presentation
ps = application/postscript
qt = video/quicktime
ra = audio/x-pn-realaudio
ram = audio/x-pn-realaudio
ras = image/x-cmu-raster
rdf = application/rdf+xml
rgb = image/x-rgb
rm = application/vnd.rn-realmedia
roff = application/x-troff
rtf = text/rtf
rtx = text/richtext
sgm = text/sgml
sgml = text/sgml
sh = application/x-sh
shar = application/x-shar
silo = model/mesh
sit = application/x-stuffit
skd = application/x-koan
skm = application/x-koan
skp = application/x-koan
skt = application/x-koan
sldx = application/vnd.openxmlformats-officedocument.presentationml.slide
smi = application/smil
smil = application/smil
snd = audio/basic
so = application/octet-stream
spl = application/x-futuresplash
src = application/x-wais-source
sv4cpio = application/x-sv4cpio
sv4crc = application/x-sv4crc
svg = image/svg+xml
swf = application/x-shockwave-flash
t = application/x-troff
tar = application/x-tar
tcl = application/x-tcl
tex = application/x-tex
texi = application/x-texinfo
texinfo = application/x-texinfo
tgz = application/x-tar
tif = image/tiff
tiff = image/tiff
tr = application/x-troff
tsv = text/tab-separated-values
txt = text/plain
ustar = application/x-ustar
vcd = application/x-cdlink
vrml = model/vrml
vxml = application/voicexml+xml
wav = audio/x-wav
wbmp = image/vnd.wap.wbmp
wbxml = application/vnd.wap.wbxml
wml = text/vnd.wap.wml
wmlc = application/vnd.wap.wmlc
wmls = text/vnd.wap.wmlscript
wmlsc = application/vnd.wap.wmlscriptc
wrl = model/vrml
xbm = image/x-xbitmap
xht = application/xhtml+xml
xhtml = application/xhtml+xml
xls = application/vnd.ms-excel
xlsx = application/vnd.openxmlformats-officedocument.spreadsheetml.sheet
xltx = application/vnd.openxmlformats-officedocument.spreadsheetml.template
xml = application/xml
xpi = application/x-xpinstall
xpm = image/x-xpixmap
xsl = application/xml
xslt = application/xslt+xml
xul = application/vnd.mozilla.xul+xml
xwd = image/x-xwindowdump
xyz = chemical/x-xyz
zip = application/zip
}
} |
hhvm/hphp/doc/options.compiled | <h2>Configurable Options for Compiled Program</h2>
Note that, in this documentation, we used "*" for names that the system doesn't
care. In reality, one may always use a string or a name to better describe a
node's purpose. Listed values are defaults, unless examples.
= Logging
Log {
Level = None (default) | Error | Warning | Info | Verbose
NoSilencer = false
AlwaysLogUnhandledExceptions = false
RuntimeErrorReportingLevel = 8191
ForceErrorReportingLevel = 0
Header = false
HeaderMangle = 0
NativeStackTrace = true
MaxMessagesPerRequest = -1
- Level, NoSilencer, AlwaysLogUnhandledExceptions, RuntimeErrorReportingLevel,
ForceErrorReportingLevel
These settings control different logging levels. NoSilencer means even when
silencer operator @ is used, still output errors. Unhandled exceptions are
PHP fatal errors, and AlwaysLogUnhandledExceptions will make sure they get
logged even if a user's error handler is installed for them. We also provided
RuntimeErrorReportingLevel for compatibility with PHP. ForceErrorReportingLevel
is a bitmask that will be ORed with RuntimeErrorReportingLevel to determine the
actual error reporting level.
- Header, NativeStackTrace
These settings control log line formats. Header includes timestamp, process id,
thread id, request id (counted from 1 since server starts), message id
(counted from 1 since request started) and extra header text from command line
option (see util/logger.cpp for implementation).
There are two kinds of stacktraces: (1) C++ stacktrace, which is hex-encoded
and printed on every line of logging right after header. These stacktraces can
be translated into human readable frames by running "-m translate" with the
compiled program. (2) PHP stacktrace from code injection. Generated C++ code
injects stacktrace preparation code into every frame of functions and methods.
- HeaderMangle
This setting controls logging of potentially malicious headers. If
HeaderMangle is greater than 0, then HipHop will log one in every n
requests where a header collision has occurred. Such collisions
almost certainly indicate a malicious attempt to set headers which are
either set or filtered by a proxy.
- MaxMessagesPerRequest
Controls maximum number of messages each request can log, in case some pages
flood error logs.
# error log settings
UseLogFile = true
File = filename
UseSyslog = false
# access log settings
AccessLogDefaultFormat = %h %l %u %t \"%r\" %>s %b
Access {
* {
File = filename
Format = some Apache access log format string
}
* {
File = another filename
Format = some Apache access log format string
}
}
# admin server logging
AdminLog {
File = filename
Format = %h %t %s %U
}
}
= Error Handling
ErrorHandling {
# Bitmask of error constants to upgrade to E_USER_ERROR. Only E_WARNING,
# E_USER_WARNING, E_NOTICE, and E_USER_NOTICE are supported.
UpgradeLevel = 0 (default)
CallUserHandlerOnFatals = false
NoticeFrequency = 1 # 1 out of these many notices to log
WarningFrequency = 1 # 1 out of these many warnings to log
}
= Resource Limits
ResourceLimit {
CoreFileSize = 0 # in bytes
MaxSocket = 0
SocketDefaultTimeout = 60 # in seconds
MaxSQLRowCount = 0
SerializationSizeLimit = 0
}
= Server
PidFile = pid filename
# $_SERVER['name'] = value
ServerVariables {
name = value
}
# $_ENV['name'] = value
EnvVariables {
name = value
}
Server {
Host = www.default_domain.com
IP = 0.0.0.0
Port = 80
Type = proxygen | fastcgi
ThreadCount = 50
ThreadDropCacheTimeoutSeconds = 0
ThreadJobLIFO = false
SourceRoot = path to source files and static contents
IncludeSearchPaths {
* = some path
* = another path
}
# Recommend to turn this on when all the file paths in the file invoke
# table are relative for faster dynamic file inclusion.
AlwaysUseRelativePath = false
RequestTimeoutSeconds = -1
RequestMemoryMaxBytes = 0
# maximum POST Content-Length
MaxPostSize = 10MB
# maximum memory size for image processing
ImageMemoryMaxBytes = Upload.UploadMaxFileSize * 2
# If ServerName is not specified for a virtual host, use prefix + this
# suffix to compose one. If "Pattern" was specified, matched pattern,
# either by parentheses for the first match or without parentheses for
# the whole pattern, will be used as prefix for DefaultServerNameSuffix.
DefaultServerNameSuffix = default_domain.com
# Forcing $_SERVER['SERVER_NAME'] to come from request header
ForceServerNameToHeader = false
# Print file paths traversed onto the 404 page
PathDebug = false
# startup options
TakeoverFilename = filename # for port takeover between server instances
DefaultDocument = index.php
StartupDocument = filename
RequestInitFunction = function_name
RequestInitDocument = filename
ErrorDocument404 = 404.php
ErrorDocument500 = 500.php
FatalErrorMessage = some string
# shutdown options
GracefulShutdownWait = 0 # in seconds
HarshShutdown = true
EvilShutdown = true
# SSL options
EnableSSL = false
SSLPort = 443
SSLCertificateFile = <certificate file> # similar to apache
SSLCertificateKeyFile = <certificate file> # similar to apache
- GracefulShutdownWait, HarshShutdown, EvilShutdown
Graceful shutdown will try admin /stop command and it waits for number of
seconds specified by GracefulShutdownWait. Harsh shutdown looks for pid file
and try to kill that process. Evil shutdown kills anything listening on the
server port it's trying to grab.
# HTTP settings
GzipCompressionLevel = 3
EnableKeepAlive = true
EnableOutputBuffering = false
OutputHandler =
ImplicitFlush = false
EnableEarlyFlush = true
ForceChunkedEncoding = false
MaxPostSize = 8 # in MB
To further control idle connections, set
ConnectionTimeoutSeconds = <some value>
This parameter controls how long the server will timeout a connection after
idle on read or write. It takes effect when EnableKeepAlive is enabled.
- EnableEarlyFlush, ForceChunkedEncoding
EnableEarlyFlush allows chunked encoding responses, and ForceChunkedEncoding
will only send chunked encoding responses, unless client doesn't understand.
# static contents
FileCache = filename
EnableStaticContentFromDisk = true
ExpiresActive = true
ExpiresDefault = 2592000
DefaultCharsetName = UTF-8
- EnableStaticContentFromDisk
A static content cache creates one single file from all static contents,
including css, js, html, images and any other non-PHP files (or even PHP files,
if CachePHPFile is turned on for compiler options). Normally this is prepared
by compiler at compilation time, but it can also be prepared at run-time, if
SourceRoot points to real file directory and EnableStaticContentFromDisk is
true. Otherwise, use FileCache to point to the static content cache file
created by the compiler.
NOTE: the FileCache should be set with absolute path
- ExpiresActive, ExpiresDefault, DefaultCharsetName
These control static content's response headers. DefaultCharsetName is also
used for PHP responses in case no other charset has been set explicitly.
# file access control
SafeFileAccess = false
FontPath = where to look for font files
AllowedDirectories {
* = /tmp
}
AllowedFiles {
* = specific file to allow
}
# files with these extensions cannot be executed
ForbiddenFileExtensions {
* = ...
}
APC {
EnableApc = true
PrimeLibrary = filename
LoadThread = 2
CompletionKeys {
* = key name
}
- APC Priming
There is a way to prepare APC items in dry format, serialized in binary files,
and these files can be loaded (or "primed") extremely fast at startup time.
To prepare these .cpp files, check apc_sample_serializer.php for one way
of doing it. Once prepared, we can compiled them into .so that can be loaded
through PrimeLibrary option. The loading can be done in parallel with
LoadThread count of threads. Once loading is done, it can write to APC with
some specified keys in CompletionKeys to tell web application about priming.
ExpireOnSets = false
PurgeFrequency = 4096
- ExpireOnSets, PurgeFrequency
ExpireOnSets turns on item purging on expiration, and it's only done once per
PurgeFrequency of sets.
# Light process has very little forking cost, because they are pre-forked
# Recommend to turn it on for faster shell command execution.
LightProcessFilePrefix = ./lightprocess
LightProcessCount = 0
# Uploads
Upload {
UploadMaxFileSize = 100 # Size in MB
UploadTmpDir = /tmp/
EnableFileUploads = true
EnableUploadProgress = false
Rfc1867Freq = 262144 # 256K
Rfc1867Prefix = vupload_
Rfc1867Name = video_ptoken
}
}
= Virtual Hosts
# default IpBlockMap that applies to all URLs, if exists
IpBlockMap {
* {
Location = /url
AllowFirst = false
Ip {
Allow {
* = 127.0.0.1
* = 192.0.0.0/8
}
Deny {
* = 192.1.0.0
}
}
}
}
VirtualHost {
* {
Disabled = false
Prefix = prefix.
Pattern = regex pattern
PathTranslation = html
CheckExistenceBeforeRewrite = true
ServerName =
ServerVariables {
name = value
}
RewriteRules {
* {
pattern = regex pattern same as Apache's
to = target format same as Apache's
qsa = false
redirect = 0 (default: off) | 302 | 301 | other status code
conditions {
* {
pattern = regex pattern to match
type = host | request
negate = false
}
}
}
}
IpBlockMap {
# in same format as the IpBlockMap example above
}
# Remove certain query string parameters from access log.
LogFilters {
* {
# regex pattern to match a URL
url = (empty means matching all URLs)
# names of parameters to remove from query string
params = {
* = parameter name
}
# alternatively, use regex pattern to replace with empty string.
pattern = (empty means hiding entire query string)
# optionally, specify what values to replace with
value = (by default it's empty, removing the query parameter)
}
}
}
}
- CheckExistenceBeforeRewrite
With this setting, rewrite rules will not be applied for files that exist in
the original path. This is the default behavior. Set this to false if one wants
to block access to certain files (ex. .htaccess.)
= Administration Server
AdminServer {
Port = 8088
ThreadCount = 1
Password =
}
= Satellite Servers
Satellites {
* {
Type = RPCServer | InternalPageServer
Port = 0 # disabled
ThreadCount = 5
# only for RPCServer
MaxRequest = 500
MaxDuration = 120 # in seconds
TimeoutSeconds = 30 # default to RequestTimeoutSeconds
RequestInitFunction = on_init
RequestInitDocument = filename
Password = authentication
# only for InternalPageServer
BlockMainServer = true
URLs {
* = pattern
}
}
}
- RPCServer
Please refer to its documentation for more details.
- Internal Page Server
Serves internal pages over a private port. These pages will become unavailable
to main HTTP port if BlockMainServer is turned on. Use URLs to specify regex
patterns for pages to server over this port.
Xbox {
ServerInfo {
ThreadCount = 0
Port = 0
MaxRequest = 500
MaxDuration = 120
RequestInitFunction =
RequestInitDocument =
}
ProcessMessageFunc = xbox_process_message
DefaultLocalTimeoutMilliSeconds = 500
DefaultRemoteTimeoutSeconds = 5
}
- Xbox Server
An xbox server provides cross-machine communication, similar to a message
queuing system. It also allows local processing of messages, then working as
a multithreading facility for PHP execution. More documentation will be coming
for xbox applications.
PageletServer {
ThreadCount = 0
}
- Pagelet Server
A pagelet server is essentially the same as local CURL, except it's more
efficient. This allows parallel execution of a web page, preparing two panels
or iframes at the same time.
= Proxy Server
Proxy {
Origin = the server to proxy to
Retry = 3
# serve these URLs and proxy all others, trumping ProxyURLs settings
ServeURLs = false
ServeURLs {
* = urls not to proxy
}
# proxy these URLs
ProxyURLs = false
ProxyURLs {
* = urls to proxy
}
# proxy these patterns
ProxyPatterns {
* = patterns to proxy
}
# proxy this percentage of pages
Percentage = 0
}
- ServeURLs, ProxyURLs
Please note that these two settings are mutually exclusive, and you may turn on
just one. When ProxyURLs is on, you may choose to use ProxyURLs, ProxyPatterns
or Percentage or any combination of them to specify how it should be proxied.
= Static Content
StaticFile {
Extensions {
bmp = image/bmp
}
Generators {
* = static_resource.php
}
FilesMatch {
* {
pattern = .*\.(dll|exe)
headers {
* = Content-Disposition: attachment
}
}
}
}
- Generators
In addition to Static Content Cache, we also support Dynamic Content Cache. If
static_resource.php generates identical files given the same HTTP input, it
can be listed under Generators, so its generated content can be cached for
next requests.
= Stats
Stats = false
Stats {
Web = false
Memory = false
Malloc = false
APC = false
SQL = false
SQLTable = false
NetworkIO = false
XSL = xsl filename
XSLProxy = url to get the xsl file
SlotDuration = 600 # in seconds
MaxSlot = 72 # 10 minutes x 72 = 12 hours
APCSize {
Enable = false
CountPrime = false
Group = false
Individual = false
FetchStats = false
SpecialPrefix {
* = sv:/
}
SpecialMiddle {
* = :sv:/
}
}
}
= Debug Settings
Debug {
ServerErrorMessage = false
RecordInput = false
ClearInputOnSuccess = true
ProfilerOutputDir = /tmp
CoreDumpEmail = email address
CoreDumpReport = true
CoreDumpReportDirectory = /tmp
StackTraceTimeout = 0
}
- ServerErrorMessage
This setting turns on error messages in HTTP responses.
- RecordInput, ClearInputOnSuccess
With these two settings, we can easily capture an HTTP request in a file that
can be replayed with "-m replay" from the compiled program at command line.
We can easily gdb that way to debug any problems. Watch error log for recorded
file's location. ClearInputOnSuccess can automatically delete requests that
had 200 responses and it's useful to capture 500 errors on production without
capturing good responses.
- StackTraceTimeout
This specifies the maximum number of seconds spent for generating a stack trace
when hhvm is crashed. The default is 0 which means no timeout. This can be set
to prevent from deadlocks in the backtrace handler.
- APCSize
There are options for APC size profiling. If enabled, APC overall size will be
updated every time a store or delete happens. This allows querying apc stats
on admin port. 'CountPrime' controls whether size stats includes primed keys.
'Group' controls whether stats break-down by key groups get profiled.
'SpecialPrefix' and 'SpecialMiddle' are used for aggregating some keys groups.
'Individual' controls whether size profiling is enabled for each key, which
could incur some space overhead. 'FetchStats' can be enabled to also profile
apc_fetch, which further increases time overhead.
'FetchStats' implies 'Individual', and 'Individual' implies 'Group'
= Sandbox Environment
A sandbox has pre-defined setup that maps some directory to be source root of
a web server, hence eliminating a lot of setups.
Sandbox {
SandboxMode = false
Pattern = www.[user]-[sandbox].[machine].facebook.com
Home = /home
ConfFile = ~/.hphp
ServerVariables {
name = value
}
}
- Sandbox Configuration
First, pick a name. "default" has special meaning, then URL would be
www.[user].[machine].facebook.com. Say the name is "sandbox", the configuration
will look like this,
[sandbox].path = path
[sandbox].log = path
[sandbox].accesslog = path
"path" points to PHP source files. "log" points to error log location and
"accesslog" points to access log location.
== Debugger Configuration
By default, HHVM does not listen for connections from the HipHop
Debugger (hphpd). To enable this you need to first setup the sandbox
environment described above, then add the following to your config:
Eval.Debugger {
EnableDebugger = true
EnableDebuggerServer = true
Port = 8089
DefaultSandboxPath = path to source files, similar to Server.SourceRoot
}
This will cause HHVM to start a debugger server, and you should see an
entry in the log indicating the debugger server has started before the
entry about all servers having been started.
- Machine Sharing
The benefit is, same server can have one "Sandbox" configuration, and many
users can use the same machine serving their own source files.
= HPHPi Settings
Eval {
CheckSymLink = true
EnableShortTags = true # is <? allowed
EnableXHP = true # XHP extension
NativeXHP = true # Use HPHP to directly handle XHP
VMStackElms = 16384 # Maximum stack size
# debugger
Debugger {
EnableDebugger = false
EnableDebuggerServer = false
Port = 8089
StartupDocument =
DefaultSandboxPath =
RPC {
DefaultPort = 8083
DefaultAuth =
HostDomain =
DefaultTimeout = 30
}
}
TimeoutsUseWallTime = true
# Causes HHVM to disallow constructs that are unavailable when Repo.Authoritative
# is active, without requiring you to run in Repo.Authoritative.
AuthoritativeMode = true
# experimental, please ignore
BytecodeInterpreter = false
DumpBytecode = false
RecordCodeCoverage = false
CodeCoverageOutputFile =
}
- CheckSymLink
Determines whether or not to follow symlinks (and, from a performance
standpoint, make the associated realpath() calls needed) when resolving
includes/requires and loading code. This is enabled by default for parity with
the reference implementation. If not needed for a particular application and/or
configuration, disable this when tuning.
- TimeoutsUseWallTime
Determines whether or not to interpret set_time_limit timeouts as wall time or
CPU time (which the reference implementation uses.) Defaults to wall time.
= MySQL
MySQL {
ReadOnly = false
ConnectTimeout = 1000 # in ms
ReadTimeout = 1000 # in ms
WaitTimeout = -1 # in ms, -1 means "don't set"
SlowQueryThreshold = 1000 # in ms, log slow queries as errors
Socket = # Default location to look for mysql.sock
TypedResults = true
}
- TypedResults
Zend returns strings and NULL only for MySQL results, not integers or floats.
HHVM return ints (and, sometimes, actual doubles). This behavior can be
disabled by setting TypedResults to false.
= HTTP Monitoring
Http {
DefaultTimeout = 30 # in seconds
SlowQueryThreshold = 5000 # in ms, log slow HTTP requests as errors
}
= Mail
Mail {
SendmailPath = sendmail -t -i
ForceExtraParameters =
}
= PCRE
Preg {
BacktraceLimit = 100000
RecursionLimit = 100000
}
- Eval.PCRETableSize
The number of patterns which can be stored in the PCRE cache.
- Eval.PCRECacheType
May be "static", for a very fast cache which never evicts, "lru", for a cache
which evicts the least-recently used item when full, or "scalable" for a cache
which is slightly slower than "lru" at low concurrency but much faster for a
high-concurrency tight-loop workload.
Default: scalable.
- Eval.PCREExpireInterval
If Eval.PCRECacheType is set to "static", then setting this to an integer
number of seconds will cause the cache to be regularly cleared after the
specified number of seconds.
For "lru" and "scalable" type caches, this is not necessary and not supported.
= Tier overwrites
Tiers {
* {
machine = /regex pattern/
overwrite {
# any config settings described in this documentation
}
}
}
This feature allows a machine to overwrite configurations just by matching
machine names with specified regex pattern. This block of configuration can
appear at any location of the file, even at top.
= PHP File Extensions
By default any file with .php in the URL is treated as PHP source code and is
passed through to the execution engine for processing. This configuration option
allows other file extensions to be treated similarly. Note that .php is
automatically treated as such and does not need to be specified in this list.
PhpFile {
Extensions {
phpt = application/x-phpt
hphp = application/x-hhvm-php
}
}
The content type is not used and is just for descriptive purposes. |
|
hhvm/hphp/doc/options.compiler | <h2>Configurable Options for Compiler</h2>
Compiler has a --config option to take a configuration file in HDF format, and
it has a --config-value to take ad-hoc configurable options in HDF format.
This documentation describes what's available for both of them.
When in doubt, look for how these options are parsed in compiler/options.cpp.
= UseHHBBC
Default is true. Determines whether whole-program optimizations should
performed on the bytecode using hhbbc as part of the hphp run. When
false, you can still separately run hhbbc on the repo that hphp
produces (useful for development on hhbbc itself).
= AllDynamic
Default is false. When turned on, all functions and methods can be invoked
dynamically even without a literal string. When turned off, some dynamic
function calls may fail, if compiler is not able to tell that function may be
invoked dynamically. Recommend to turn on this switch.
= AllVolatile
Default is false. When turned on, order-dependent function or class declaration
vs. existence testing can work. When turned off, some function_exists(),
class_exists(), get_defined_functions() or get_declared_classes() may return
different results than PHP does. Most programs don't have dependencies on their
behaviors, so this is recommended to leave as off.
= EnableEval
Default is 0, eval() will throw a fatal error. When 1, eval() is supported in
a limited way, mixed together with compiled program. When 2, eval() is fully
supported as an interpreter mode.
= IncludeRoots
Only needed when --input-list is not used.
In this case, compiler needs to understand every single include. Currently it
only understands these formats (compiler/analysis/dependency_graph.cpp),
include 'literal_file_path';
include $SOME_VARIABLE . 'literal_file_path';
IncludeRoots is designed to look up $SOME_VARIABLE in the second case, so to
resolve file paths at compilation time. It will look like this,
IncludeRoots {
* {
root = $SOME_VARIABLE
path = lib/
}
* {
root = $SOME_OTHER_ROOT
path = lib/another
}
}
= IncludeSearchPaths
Only needed when --input-list is not used.
Specifies more paths to search for include files. For example,
IncludeSearchPaths {
* = lib/thrift/packages
* = lib/common
}
= PackageDirectories
Add all PHP files under these directories. For example,
PackageDirectories {
* = lib/thrift/packages
* = lib/common
}
= PackageExcludeDirs
Exclude file under these directories. Same as --exclude-dir command line
option. For example,
PackageExcludeDirs {
* = scripts/
* = tests/
}
= PackageExcludeFiles
Exclude these files. Same as --exclude-file command line option. For example,
PackageExcludeFiles {
* = scripts/delete_user.php
* = tests/create_database.php
}
= PackageExcludeStaticFiles
Exclude files matching these patterns from static file content cache. A static
content cache creates one single file from all static contents, including
css, js, html, images and other MIME format files. When building static
content cache, the compiler will look for all non-PHP files to try to include
them. This option allows one to exclude certain files. Same as
--exclude-static-pattern command line option. For example,
PackageExcludeStaticFiles {
* = .*\\.js
* = .*\\.css
}
= CachePHPFile
Default is false. Whether to include PHP files in static content cache.
= ScalarArrayFileCount
Default is 1. Scalar arrays are arrays with scalar values, including literal
strings, numbers and scalar arrays as keys and values, so their values are
entirely determined at compilation time. We pre-generate these arrays during
program startup time just once to avoid initialization costs. These scalar
arrays are groups into their own files. This option specifies how many files
to generate or split up scalar arrays, so we can compile them faster. For
large project with a lot of scalar arrays, use a higher value. Then each file
is smaller. When using distcc, normally it finishes compilation faster.
= ScalarArrayOverflowLimit
Default is 2000. Some scalar arrays can become extremely large when nested
with lots of elements and levels. This option controls how to split them up
into small ones, having sub-arrays defined by their own, so to flatten array
data structure for faster compilation.
= LiteralStringFileCount
Default is 1. Similar to scalar arrays, we have literal strings pre-generated
in files. This option specifies how many of these files are generated for
faster compilation.
= EnableShortTags
Default is true. Is <? allowed with PHP code?
= EnableXHP
Whether to enable XHP extension. XHP adds some syntax sugar to allow better and
safer HTML templating. For more information, search XHP.
= NativeXHP
Whether to use HPHP to directly handle XHP.
= ParserThreadCount
How many threads to use when parsing PHP files. By default, it's 2x CPU count.
= FlibDirectory
Facebook specific. Ignore.
= PregenerateCPP
Default is false. In case clustering of output files has been requested and this
option is set to true, the files are pre-generated in memory in order to perform
a more precise partitioning.
= GCCOptimization
Default is disabled. This option allows one to selectively decrease the compiler
optimization level for long functions. It is specified as:
GCCOptimization {
O2 = X
O1 = Y
O0 = Z
}
where X, Y, Z are the minimum length functions must have (measured as line
count) for their optimization level to be decreased respectively to O2, O1,
O0. Note that all these parameters are optional, so it is possible for example
to only specify O1 and O0 but not O2, meaning that O2 will never be used as an
optimization level. In general it is expected that if specified, the minimum
line counts will be increasing for a decreasing optimization level.
= DynamicFunctionPrefix
Deprecating. These are options for specifying which functions may be called
dynamically. This turned out to be hard to configure, and it's replaced by
AllDynamics.
= DynamicFunctionPostfix
Deprecating. These are options for specifying which functions may be called
dynamically. This turned out to be hard to configure, and it's replaced by
AllDynamics.
= DynamicMethodPrefix
Deprecating. These are options for specifying which methods may be called
dynamically. This turned out to be hard to configure, and it's replaced by
AllDynamics.
= ConstantFunctions
This is a list of functions and static class methods that may be assumed to
always return a constant value. Each entry should be in the format
function_name|serialized_value, like so:
ConstantFunctions {
* = SomeClass::GetString|s:8:"a_string";
* = some_function|b:0;
}
= CodeGeneration
Under "CodeGeneration", one can specify alternative name prefixes that are
used in different places of code generation.
- IdPrefix
- LambdaPrefix
- UserFilePrefix |
|
hhvm/hphp/doc/printir_json_schema.ts | type BlockId = number; // unsigned int
type CounterName = string; // uncertain of format
type DisasmString = string; // uncertain of format
type ExtraString = string; // uncertain of format
type FileName = string; //uncertain of format
type FuncName = string; // uncertain of format
type FuncString = string; // uncertain of format
type GuardConstraintString = string; // uncertain of format
type InstrId = number; // uint32_t
type LineNum = number; // int
type Offset = number; // int
type Opcode = string; // Some sort of enum
type OptIndex = number; // int
type ProfCount = number; // uint64_t
type ProfileString = string; // uncertain of format
type SSATmpId = number; // uint32_t
type TCA = string;
type TransId = number; // int32_t
type TypeString = string; // uncertain of format
type UnitString = string; // uncertain of format
type Unit = {
blocks: [Block];
translation: TransContext;
opcodeStats: OpcodeStats;
inliningDecisions: [InliningDecision];
};
type Block = {
label: LabelInfo;
profCount: ProfCount;
preds: [BlockId];
next: LabelInfo | null;
instrs: [Instr];
area: Area;
};
type LabelInfo = {
id: BlockId;
isCatch: boolean;
hint: Hint;
};
enum Hint {
Unused,
Unlikely,
Neither,
Likely,
}
enum Area {
Main,
Cold,
Frozen,
}
type Instr = {
marker: {raw: FuncString} | null; // still not 100% sure what this does
phiPseudoInstrs: [PhiPseudoInstr];
opcodeName: Opcode;
typeParam: TypeString | null;
guard: GuardConstraintString | "unused" | null;
extra: ExtraString | null;
id: InstrId | null;
taken: LabelInfo | null;
tc_ranges: [TC_Range] | null // will be null specifically when asmInfo is null
dsts: [Dst];
srcs: {counterName: CounterName} | [Src];
offset: Offset;
profileData: [ProfileData];
};
type Src = SSATmp;
type Dst = SSATmp;
type SSATmp = {
id: SSATmpId;
type: TypeString;
};
type PhiPseudoInstr = {
srcs: [{
src: Src;
label: LabelInfo;
}];
dst: Dst;
};
type TC_Range = {
area: Area;
start: TCA;
end: TCA;
disasm: DisasmString;
}
type ProfileData = {
offset: Offset;
name: ProfileString;
data: {profileType: ProfileType};
// the rest of the keys in "data" will depend on the value of "profileType"
}
enum ProfileType {
ArrayAccessProfile,
ArrayKindProfile,
CallTargetProfile,
ClsCnsProfile,
DecRefProfile,
IncRefProfile,
MethProfile,
ReleaseVVProfile,
SwitchProfile,
TypeProfile,
}
enum TransKind {
TransAnchor,
TransInterp,
TransLive,
TransProfile,
TransOptimize,
TransLivePrologue,
TransProfPrologue,
TransOptPrologue,
TransInvalid,
}
type TransContext = {
kind: TransKind;
id: TransId;
optIndex: OptIndex;
srcKey: SrcKey;
funcName: FuncName;
sourceFile: FileName;
startLine: LineNum;
endLine: LineNum;
}
type SrcKey = {
func: FuncString;
unit: UnitString;
prologue: boolean;
offset: Offset;
resumeMode: ResumeMode;
hasThis: boolean;
}
type ResumeMode = "" | "ra" | "rg";
type OpcodeStats = {[x in Opcode] : number;};
type InliningDecision = {
wasInlined: boolean;
offset: Offset;
caller: FuncName;
callee: FuncName;
reason: string;
} |
|
hhvm/hphp/doc/server.documents | <h2>Server Documents</h2>
This flow is implemented in hphp/runtime/server/http-server.cpp:
+-------------+
| APC Priming |
+------+------+
|
V
+-----------------+
| StartupDocument |
+--------+--------+
|
|
+-----------------+
|
V
+============================+
| Worker Thread |
|____________________________|
| |
| +-----------+ |
| | | |
| | V |
| | +----------------+ |
| | | RequestInit | |
| | +----------------+ |
| | | |
| | +----<-----+ |
| | V | |
| | +----------------+ R |
| | | URL Handling | P |
| | +----------------+ C |
| | | | |
| | +---->-----+ |
| | | |
| +--(sweep)--+ |
| |
+============================+
1. StartupDocument
This file is executed when web server starts, and it is executed before any
thread gets started but after APC is primed. Example use:
* bootstrapping server
* preparing parameters for service threads (see below): in this case, we have
to use APC to pass those parameters into service threads.
2. This space intentionally left blank.
3. RequestInitDocument and RequestInitFunction
When a worker thread resets, the RequestInitDocument and/or RequestInitFunction
are executed in order to initialize certain states or request specific coding.
If both RequestInitDocument and RequestInitFunction are specified, the
RequestInitDocument is executed before the RequestInitFunction.
4. RPCRequestHandler
RPCRequestHandler will call ExecutionContext::backupSession() right after
RequestInit function/document, and it will call ExecutionContext::
restoreSession() right after it finishes one request processing and it goes
back to a state right after RequestInit is executed.
RPCRequestHandler will reset a worker thread from time to time, either after
the worker thread has processed a certain number of requests or after certain
amount of time. When this happens, it will perform a sweep and re-execute
RequestInit function/document. |
|
hhvm/hphp/doc/server.rewrite_rules | <h2>Server Rewrite Rules</h2>
Here's how server processes a URL.
1. Virtual Host
Determine which virtual host the URL is on. This is done by matching virtual
host's "Prefix" or "Pattern" with this URL's domain name.
2. Path Translation
Prepend with virtual host's PathTranslation.
3. Physical Location
If the URL/file exists physically, that will be the final resolution. No
more processing will happen.
4. Rewrite Rules
Otherwise, each rewrite rule will be checked sequentially in the order of
how they appear in configuration file. Once matched, the URL will be re-written
according to the rule. |
|
hhvm/hphp/doc/server.rpc_server | <h2> How to execute a PHP function over network</h2>
1. Parameters
When RPC server is turned on, HipHop server will listen on a port that takes
RPC requests in a format like this,
http://[server]:[port]/function_name?params=...
"params" needs to be a JSON encoded array. The server will execute the function
and return its result.
Alternatively, one can pass in one parameter a time like this,
http://[server]:[port]/function_name?p=[json value]&p=[json value]...
Each "p" is sequentially fed into the function as its first parameter, second
parameter, etc.. Each parameter needs to be encoded in JSON separately.
There is also an "auth" parameter that needs to be passed in, if server is
configured to authenticate with a simple password. It would normally be a
password specified in /hphp/config.hdf under Satellites.rpc.Password.
2. States
Note that RPC server is considerably faster than an ajax page request, because
it has made an assumption that the function needed to run this way is "pure" or
"stateless", without leaving bad global states to subsequent calls. With this
assumption, the server only initializes libraries once and it will execution
multiple function calls (the number is configurable) within the same loop.
This is very similar to FastCGI's paradigm.
If you need to call a function that leaves bad states to other functions,
please add "reset=1" to the parameter, so the states can be thrown away after
the function is called:
http://[server]:[port]/function_name?reset=1&...
3. Returns
If stdout is needed in addition to function's return, use "output=2", and
HTTP response is a JSON encoded array like this,
array {
"return" => [function's return]
"output" => [stdout from echos or prints]
}
If function's return is not needed, use "output=1" to turn it off. Then HTTP
response is stdout's output (from echos and prints) without JSON encoding.
If none is needed, use "output=-1" to suppress them. Then an empty string will
be returned. This may be useful, if the function is not a query but an action.
If more complete information about the return value is needed, use "output=3"
to obtain a PHP-serialized string.
By default, "output=0" and function's return is encoded in JSON response.
To summarize,
0: (default) just function's return in JSON encoding
1: just stdout as a string without JSON encoding
2: both function's return and stdout in a JSON encoded array
3: just function's return with PHP serialization
-1: none
4. File invocation
RPC server now supports direct invocation of a PHP file, under the same
assumption that the request is stateless.
There are two cases. First, the purpose of the RPC call is still to invoke a
function, but the definition of the function is in a file that is not in the
normal initialization path, defined by the RequestInitDocument. In order to
invoke the function reliably, you can add "include=..." or "include_once=..."
to the request:
http://[server]:[port]/function_name?include_once=file_name.php&...
The difference between include and include_once here is just like that in
normal PHP scripts. A file that is included once on an RPC request will only
execute once for the entire life time of an RPC request handler.
The second case is that you can directly invoke a file, without calling a
function:
http://[server]:[port]/?include=file_name.php&...
or just
http://[server]:[port]/file_name.php?...
In this case, you would probably want to use "output=1" for the file
invocation, as a file does not return a value. You would also want to
use "include=...", rather than "include_once=...". |
|
hhvm/hphp/doc/server.ssl | <h2>SSL Server Setup</h2>
1. Prepare Certificates
Prepare two files: one certificate file and one key file. Make sure they are
readable by the user account that runs web server.
2. Configuration Change
Prepare configurations like this,
Server {
EnableSSL = true
SSLPort = 443
SSLCertificateFile = /full_path_to_certificate
SSLCertificateKeyFile = /full_path_to_certificate_file
} |
|
hhvm/hphp/doc/server.stats | <h2>Server Stats</h2>
HHVM collects stats by time slots. Each time slot is configured as
StatsSlotDuration seconds and server internally keeps StatsMaxSlot number
of slots. Inside each slot, we keep a set of stats. These stats include
one built-in ("hit") and many key-value pairs defined by different parts
of the system.
slot:
time:
hit: total counts
details:
key-value pair
key-value pair
key-value pair
...
<h2>Stats Query</h2>
To query stats aggregated into one list of key value pairs encoded in JSON
format, hit admin port with a URL like this,
http://[server]:8088/stats.kvp?keys=...&to=[t2]...
keys: (optional) comma delimited keys to query, each of which can be decorated
[key] just the key's value, e.g. "sql.conn"
[key]/hit average per page hit, e.g. "sql.conn/hit"
[key]/sec per second rate, e.g. "sql.conn/sec"
#[regex]# keys matching the regular expression
(omitted) all available keys
prefix: (optional) adds a prefix and '.' delimiter to all key names
<h2>Available Keys</h2>
1. SQL Stats:
(1) Connections
sql.conn: number of connections newly created
sql.reconn_new: number of connections newly created when trying to reconnect
sql.reconn_ok: number of connections re-picked up when trying to reconnect
sql.reconn_old: number of connections dropped when trying to reconnect
(2) Queries
sql.query: number of queries executed
sql.query.[table].[verb]: per table-verb stats
sql.query.[verb]: per verb stats, where [verb] can be one of these:
- select
- insert
- update
- replace
- delete
- begin
- commit
- rollback
- unknown
2. MemCache Stats:
mcc.madd: number of multi_add() calls
mcc.madd.count: total count of multi added keys
mcc.mreplace: number of multi_replace() calls
mcc.mreplace.count: total count of multi replaced keys
mcc.set: number of set() calls
mcc.add: number of add() calls
mcc.decr: number of decr() calls
mcc.incr: number of incr() calls
mcc.delete: number of delete() calls
mcc.delete_details: number of delete_details() calls
mcc.get: number of get() calls
mcc.mget: number of multi_get() calls
mcc.mget.count: total count of multi got keys
mcc.replace: number of replace() calls
mcc.set: number of set() calls
mcc.stats: number of stats() calls
3. APC Stats:
apc.miss: number of item misses
apc.hit: number of item hits
apc.write: number of writes
4. Memory Stats:
These two stats are only available when Google heap profler is turned on for
debugging purposes:
mem.malloc.peak: peak malloc()-ed memory
mem.malloc.leaked: leaked malloc()-ed memory
5. Page Sections:
page.wall.[section]: wall time a page section takes
page.cpu.[section]: CPU time a page section takes
mem.[section]: request-local memory that a page section takes
network.uncompressed: total bytes to be sent before compression
network.compressed: total bytes sent after compression
Section can be one of these:
- queuing
- all
- input
- invoke
- send
- psp
- rollback
- free
6. evhttp Stats:
- evhttp.hit used cached connection
- evhttp.hit.[address] used cached connection by URL
- evhttp.miss no cached connection available
- evhttp.miss.[address] no cached connection available by URL
- evhttp.close cached connection got closed
- evhttp.close.[address] cached connection got closed by URL
- evhttp.skip not set to use cached connection
- evhttp.skip.[address] not set to use cached connection by URL
7. Application Stats:
PHP page can collect application-defined stats by calling
hphp_stats($key, $count);
where $key is arbitrary and $count will be tallied across different calls of
the same key.
8. Special Keys:
hit: page hit
load: number of active worker threads
idle: number of idle worker threads
<h2>Example URL</h2>
GET "http://localhost:8088/stats.kvp?prefix=hphp" \
"&keys=apc.hit/sec,hit,load,:sql.query..*.select:," \
"network.compressed/hit,hit/sec"
This URL queries the following data:
hit: page hits
hit/sec: request per second
apc.hit/sec: APC hit per second
load: number of active threads currently
network.compressed/hit: sent bytes per request
:sql.query..*.select: all SELECTs on different tables |
|
hhvm/hphp/doc/server.status | <h2>Server Status</h2>
Server status displays current running status of server process and each worker
thread. To query status, hit admin port with a URL like this,
http://[server]:8088/status.[xml|json|html]
The format is,
[process]
[id]
[build]
...
[threads]
[thread]
[id]
[req]
...
[thread]
...
1. Process Status
- id: process ID
- build: current binary's build ID
- debug: DEBUG build or not
- now: current datetime
- start: when server was started
- up: server up time so far
2. Thread Status
- id: thread ID
- req: number of requests it has processed so far
- bytes: number of bytes it has written to response
- start: thread start time
- duration: how long this thread has been running
- mode: what this thread is doing
<b>idle</b> the thread is not processing any requests
<b>process</b> the thread is processing requests
<b>writing</b> the thread is sending response
<b>psp</b> the thread is doing post-sending processing
- iostatus: which I/O operation this thread is doing
- ioduration: how long this thread has been waiting for the I/O operation
- url: which URL this thread is serving
- client: client IP address
- vhost: virtual host the URL matched |
|
hhvm/hphp/doc/style.css | a:link { color: navy; }
a:visited { color: navy; }
a:active { color: #FF6600; }
a:hover { color: #FF6600; }
body, th, td {
border: 0px solid #bdc7d8;
font-size: 13px;
font-family: "lucida grande", tahoma, verdana, arial, sans-serif
}
b {
color: maroon
}
p {
line-height: 1.6em;
margin-left: 10px;
}
h2 {
font-size: 16px;
background: #3B5998;
color: #EFEFEF;
padding: 4px;
}
h3 {
border-bottom: 1px solid #CCCCCC;
margin-bottom: 0.75em;
margin-top: 2.5em;
margin-left: 10px;
}
ul {
list-style: square;
}
pre {
font-family: "lucida console", monaco, courier;
font-size: 10px;
background: #FFEEBB;
padding: 10px;
margin: 10px;
margin-left: 25px;
border: 1px solid #FFDD88;
white-space: pre;
line-height: 1.4em;
}
.hphp {
font-family: arial;
font-size: 28px;
color: #3B5998;
border-bottom: 5px dotted #DDDDDD;
}
.file {
font-size: 12px;
color: navy;
}
.current_file {
font-size: 12px;
color: $FF6600;
}
.current_file a:link {
color: #FF6600;
}
.topic {
background: #dddddd;
font-size: 12px;
font-weight: bold;
}
.item_header {
margin-left: 25px;
color: #444444;
}
.item_details {
color: #888888;
}
.footer {
border-top: 1px solid #DDDDDD;
padding-bottom: 5px;
font-size: 10px;
} |
|
hhvm/hphp/doc/tcdump_json_schema.ts | // Thin types
type ArchNameStr = string; // uncertain of format
type BCInstrStr = string; // uncertain of format
type BinaryStr = string; // uncertain of format, may be arch-dependent
type BlockId = number; // unsigned int
type CallDestStr = string; // uncertain of format
type CodeStr = string; // uncertain of format
type CodeLen = number; // uint32_t
type ConfigFileStr = string; // maybe empty, should I make this null if empty?
type CounterName = string; // uncertain of format
type DisasmString = string; // uncertain of format
type EventCount = number; // uint64_t
type ExtraString = string; // uncertain of format
type FileName = string; // uncertain of format
type FuncId = number; // uint32_t
type FuncName = string; // uncertain of format
type FuncString = string; // uncertain of format
type GuardString = string; // uncertain of format
type InstrId = number; // uint32_t
type InstrLen = number // uint32_t
type LineNum = number; // int
type Offset = number; // int32_t
type Opcode = string; // Some sort of enum
type OptIndex = number; // int
type ProfCount = number; // uint64_t
type ProfileString = string; // uncertain of format
type RepoSchemaStr = string; // uncertain of format
type SHA1 = string; // SHA1.toString()
type SSATmpId = number; // uint32_t
type TCA = string; // unsigned char*, casted to void* for sformat
type TransId = number; // int32_t
type TypeString = string; // uncertain of format
type UnitFuncStr = string; // maybe fix? see TODO in tc-print.cpp
type TCDump = {
configFile: ConfigFileStr;
repoSchema: RepoSchemaStr;
translations: [Translation | null];
}
type Translation = {
transRec: TransRec;
blocks: [Block];
archName: ArchNameStr;
perfEvents: EventCounts;
regions: {
main: TCARegionInfo | null;
cold: TCARegionInfo | null;
frozen: TCARegionInfo | null;
};
transId: TransId;
ir_annotation: PrintIR_Unit | string;
}
type TransRec = {
id: TransId;
src: TransRecSrc;
kind: TransKind;
hasLoop: boolean;
aStart: TCA;
aLen: CodeLen;
coldStart: TCA;
coldLen: CodeLen;
frozenStart: TCA;
frozenLen: CodeLen;
}
type TransRecSrc = {
sha1: SHA1;
funcId: FuncId;
funcName: FuncName;
resumeMode: ResumeMode;
hasThis: boolean;
prologue: boolean;
bcStartOffset: Offset;
guards: [GuardString];
}
enum ResumeMode {
None,
Async,
GenIter,
}
enum TransKind {
TransAnchor,
TransInterp,
TransLive,
TransProfile,
TransOptimize,
TransLivePrologue,
TransProfPrologue,
TransOptPrologue,
TransInvalid,
}
type Block = {
sha1: SHA1;
start: Offset;
end: Offset;
unit: UnitFuncStr | null;
}
type EventType =
"cycles" |
"branch-misses" |
"L1-icache-misses" |
"L1-dcache-misses" |
"cache-misses" |
"LLC-store-misses" |
"iTLB-misses" |
"dTLB-misses" |
string; // Technically there can be user-defined events too
type EventCounts = {[event in EventType]: EventCount;}
type TCARegionInfo = {
tcRegion: TCRegion;
ranges: [TCARangeInfo];
}
enum TCRegion {
hot,
main,
profile,
cold,
frozen
}
type TCARangeInfo = {
start: TCA;
end: TCA;
bc: Offset | null;
sha1: SHA1 | null;
instrStr: BCInstrStr | null;
lineNum: LineNum | null;
disasm: [TCADisasmInfo];
ir_annotation?: {
area: Area;
start: TCA;
end: TCA;
instrId: InstrId;
blockId: BlockId;
};
}
type TCADisasmInfo = {
binary: BinaryStr;
callDest: CallDestStr;
code: CodeStr;
perfEvents: EventCounts;
ip: TCA;
instrLen: InstrLen;
}
enum Area {
Main,
Cold,
Frozen
}
type PrintIR_Unit = {
transContext: PrintIR_TransContext;
blocks: {[x in string]: PrintIR_Block;};
// This is actually a map from BlockId to Block, but with
// the BlockIds interpreted as strings for JSON object compatibility
inliningDecision: [PrintIR_InliningDecision];
}
type PrintIR_TransContext = {
kind: TransKind;
id: TransId;
optIndex: OptIndex;
srcKey: PrintIR_SrcKey;
funcName: FuncName;
sourceFile: FileName;
startLine: LineNum;
endLine: LineNum;
}
type PrintIR_SrcKey = {
funcStr: FuncString;
unitStr: UnitFuncString;
prologue: boolean;
offset: Offset;
resumeMode: ResumeMode;
hasThis: boolean;
}
type ResumeMode = "" | "ra" | "rg";
type PrintIR_Block = {
id: BlockId;
isCatch: boolean;
hint: Hint;
profCount: ProfCount;
next: BlockId | null;
instrs: {[x in string]: PrintIR_Instr;};
// This is actually a map from InstrId to Instr, but with
// the InstrIds interpreted as strings for JSON object compatibility
}
enum Hint {
Unused,
Unlikely,
Neither,
Likely,
}
type PrintIR_Instr = {
rawMarker: FuncString | null;
phiPseudoInstrs: [PrintIR_PhiPseudoInstrs];
opcode: Opcode;
typeParam: TypeString | null;
guard: GuardString | null;
extra: ExtraString | null;
id: InstrId;
taken: BlockId | null;
tcRanges: [PrintIR_TCRange];
dsts: [PrintIR_SSATmp];
offset: Offset;
profileData: PrintIR_Profile;
srcs: [PrintIR_SSATmp] | null; // exactly one of srcs and counterName should
counterName: CounterName | null; // be defined
}
type PrintIR_PhiPseudoInstrs = {
srcs: [[PrintIR_SSATmp, BlockId]];
dst: PrintIR_SSATmp;
}
type PrintIR_SSATmp = {
id: SSATmpId;
type: TypeString;
}
type PrintIR_TCRange = {
area: Area;
start: TCA;
end: TCA;
disasm: string;
}
type PrintIR_Profile = {
offset: Offset;
name: ProfileString;
data: {profileType: ProfileType};
// the rest of the keys in "data" will depend on the value of "profileType"
}
enum ProfileType {
ArrayAccessProfile,
ArrayKindProfile,
CallTargetProfile,
ClsCnsProfile,
DecRefProfile,
IncRefProfile,
MethProfile,
ReleaseVVProfile,
SwitchProfile,
TypeProfile,
}
type PrintIR_InliningDecision = {
wasInlined: boolean;
offset: Offset;
callerName: FuncName | null;
calleeName: FuncName | null;
reason: string;
} |
|
hhvm/hphp/doc/threading | <h2> Multi-Tasking and Multi-Threading Support</h2>
To perform parallel execution in PHP without forking a new process, you may
take advantage of one of these new facilities:
1. Pagelet Server
The pagelet server is similar to a CURL call to localhost. Look for "Pagelet
Server" in compiled program's options for how to set it up. The new pagelet
server functions work like this:
// This starts a pagelet server thread to process the URL just as if
// it's a new web request with specified headers and post data.
// The request method would be GET if the post data is empty.
$task = <b>pagelet_server_task_start</b>($url, $headers, $post_data);
// Main thread can now do extra work while pagelet server is processing.
...
// Optionally make this non-blocking call any time to check status.
$status = <b>pagelet_server_task_status</b>($task);
...
// Finally, we make a blocking call to wait for pagelet server's result,
// which is the entire output of the web page, with response headers and
// status code. The status code is set to 0 if a flushed partial response is
// successfully returned and the pagelet server is still active.
//
// A timeout of 0 is interpreted as an infinite timeout. The status code is
// set to -1 in the event of a timeout.
$headers = array(); $code = 0;
$result = <b>pagelet_server_task_result</b>($task, $headers, $code,
$timeout_ms);
2. Xbox Tasks
The xbox task system is designed to provide cross-box messaging as described in
"server.xbox_server" documentation. The xbox task system may also be used to
execute a task on the local machine in a separate thread. Here is an example:
// We start an xbox task by sending to localhost a message.
$task = <b>xbox_task_start</b>($message);
// Main thread can now do extra work while xbox task is processing.
...
// Optionally make this non-blocking call any time to check status.
$status = <b>xbox_task_status</b>($task);
...
// Finally, we make a blocking call to check message processing returns.
$ret = null;
$code = <b>xbox_task_result</b>($task, $timeout_ms, $ret);
On the message processing side, one has to implement a PHP function like this:
function <b>xbox_process_message</b>($msg) {
...
return $ret;
}
Note that an xbox thread starts its execution with its own global state without
sharing anything with main thread, other than $msg and $ret that are passed
between these threads at enter and exit points. |
|
hhvm/hphp/doc/tracepoints | User-space defined tracepoints (USDTs) in HHVM.
Tracepoints are implemented with FOLLY_SDT follow the naming
convention hhvm_{event}_{cause}. {event} describes what happened,
and {cause} describes what caused the event. Some tracepoints
may occur more than once in the source (due to implementation details)
or in the binary (due to inlining). Stack traces and arguments provide
additional context.
Example one-liners using some of the multi-purpose bcc tools:
# do you even have the tools?
$ ll $(which trace.py)
# stack traces of String concatinations triggering copy-on-write:
$ sudo trace.py -U\
'u:path/to/hhvm:hhvm_mut_concat "%d %d", arg1, arg2'
# histogram of RHS of in-place concatinations, on 5s intervals
$ sudo argdist.py -i 5 -H\
'u:path/to/hhvm:hhvm_mut_append():u32:arg2'
# counts of all probes on 5s intervals
$ sudo funccount.py -i 5 "u:path/to/hhvm:hhvm_*"
hhvm_cow_concat lsize rsize
hhvm_mut_concat lsize, rsize
Concatenating strings, causing copy-on-write (cow), or in-place appending
to a mutable string. Sometimes the mutable string isn't large enough to
append in place, but we still count it as a "mut" event. When concatinating
more than two strings in a single operation, lsize is the first string
length, and rsize is the sum of the remaining string lengths.
lsize: length in bytes of the left hand side string
rsize: length in bytes of the right-hand string(s).
hhvm_cow_modifychar lsize index
hhvm_mut_modifychar lsize index
A str[index]= assignment occurred on a string, causing a copy-on-write (cow),
or in-place mutation (mut).
lsize: size of the string being modified
index: index of the character being modified
hhvm_cow_bitnot lsize
hhvm_mut_bitnot lsize
A bitwise-not (~) on a string, resulting in copy-on-write (cow)
or in-place mutation (mut).
lsize: the size of the string copied + mutated.
hhvm_cow_sodium lsize
hhvm_mut_sodium lsize
ext_sodium operations.
lsize: the size of the string copied or mutated.
hhvm_cow_setrange lsize rsize
hhvm_mut_setrange lsize rsize
A SetRange operation copied or mutated a string.
lsize: size in bytes of the original string
rsize: number of bytes copied into the string (size * count) |
|
hhvm/hphp/doc/tracing-graph.dot | digraph Tracing {
// top level
"http-request" -> {"autoload-ensure-updated", "invoke", "onShutdownPostSend"}
"cli-request" // need to look into why this isn't getting logged
// second level
"invoke" -> {"warmup", "dispatch", "enter-tc",
"shutdown-pre-send", "autoload", "autoload-native"}
"warmup" -> "invoke"
// I'm not 100% about the circular dependency here if it comes from above.
// I don't think it's enter-tc -> dispatch but rather the actual execution
// check with Rick
"dispatch" -> {"enter-tc", "dispatch-bb", "autoload", "autoload-native"}
// Jitting code
"enter-tc" -> {"dispatch", "emit-interp-no-translate-stub", "retranslate",
"emit-func-prologue", "svcreq::emitStub", "autoload",
"autoload-native"}
"dispatch-bb"
"retranslate" -> {"select-tracelet", "translate", "emit-translation",
"svcreq::emitStub"}
"emit-func-prologue" -> {"hhir-optimize", "vasm-gen"}
"translate" -> {"hhir-gen", "vasm-gen"} // this refers specifically to region translation
"emit-translation" -> "vasm-emit"
"hhir-gen" -> {"select-tracelet", "hhir-optimize"}
"vasm-gen" -> {"vasm-optimize", "vauto-finish"}
"vasm-emit" -> "emit-X64"
"svcreq::emitStub" -> "vauto-finish"
"select-tracelet"
"hhir-optimize"
"vasm-optimize"
"vauto-finish"
// autoloading
"autoload" -> "lookup-unit"
"autoload-native" -> "lookup-unit"
"autoload-ensure-updated"
// unit loading
"lookup-unit" -> "lookup-unit-non-repo-auth"
"lookup-unit-non-repo-auth" -> {"create-unit-from-file",
"load-unit-non-repo-auth",
"prefetch-symbol-refs",
"realpath", "repo-options"}
"prefetch-symbol-refs" -> "prefetch-unit"
"prefetch-unit" -> {"prefetch-unit-enqueue", "realpath"}
"prefetch-unit-enqueue" -> {"create-unit-from-file", "realpath",
"repo-options"}
"load-unit-non-repo-auth" -> {"unit-cache-lock-acquire", "read-file"}
"unit-cache-lock-acquire" -> {"create-unit-from-file", "read-file"}
"create-unit-from-file" -> {"create-unit-from-file-load", "bccache",
"unit-create"}
"create-unit-from-file-load" -> {"parse", "bccache", "unit-create"}
"parse" -> {"unit-compiler-run", "assemble", "parse-load"}
"parse-load" -> {"unit-compiler-run", "assemble"}
"unit-compiler-run" -> {"unit-compiler-run-load", "hackc"}
"unit-compiler-run-load" -> "hackc"
"hackc" -> {"assemble", "unit-create"}
"assemble"
"unit-create"
// bytecode cache
"bccache" -> {"bccache-uncacheable", "bccache-hit", "bccache-timeout",
"bccache-error", "bccache-miss", "bccache-ice"}
"bccache-hit" -> "bccache-blob-decode"
"bccache-miss" -> "bccache-blob-encode"
"bccache-blob-decode"
} |
|
Markdown | hhvm/hphp/doc/hackers-guide/bytecode-interpreter.md | # The HHVM Bytecode Interpreter
A typical interpreter is implemented using a dispatch loop that looks something
like this:
```cpp
while (true) {
switch(*pc) {
case ADD:
/* add implementation */
pc++;
break;
case SUB:
/* sub implementation */
pc++;
break;
/*
* handlers for other bytecodes
*/
}
}
```
That is, you have a loop which steps through the bytecode program, examining
each one. The `switch` statement executes the current bytecode instruction,
advances the program counter, then repeats, until the program terminates.
If you try to find a code structure like this in HHVM, you won’t find it.
Instead, the interpreter is broken up into a number of smaller pieces, some of
which are defined using a series of nested macros. This is to reduce duplication
and to keep it more manageable, although it can make the code seem intimidating
to newcomers.
HHVM's interpreter makes repeated use of the [X
Macro](https://en.wikipedia.org/wiki/X_Macro) pattern to generate code and data
for each bytecode instruction. The list macro is `OPCODES`, defined in
[runtime/vm/hhbc.h](https://github.com/facebook/hhvm/blob/f484e7c597763bff68ad9e0e355aff763b71ec1e/hphp/runtime/vm/hhbc.h#L383).
It contains the name, signature, and attributes for every bytecode instruction.
The "X" macro is `O()`, which you can see is repeatedly invoked by `OPCODES`.
## Bytecode implementations
HHVM's bytecode interpreter lives in
[runtime/vm/bytecode.cpp](../../runtime/vm/bytecode.cpp). For every bytecode
instruction `Foo`, there is a function `iopFoo()`. This function contains the
interpreter implementation of `Foo`, and takes arguments representing all of the
immediate arguments to the instruction. For example, since the `Add` instruction
takes no immediates, `void iopAdd()` takes no arguments. `Int`, on the other
hand, takes a 64-bit integer immediate, so its signature is `void iopInt(int64_t
imm)`.
These functions are not called directly by the dispatch loop. Instead, the
dispatch loop calls `iopWrapFoo()`, giving it a pointer to the appropriate
`iopFoo()` function. These wrapper functions are automatically generated from
each bytecode's signature in `hhbc.h`. Each one decodes the appropriate
immediates, then passes them to the corresponding `iopFoo()` function. You
should not have to modify the machinery that generates these functions unless
you add a new bytecode immediate type.
## InterpOne
In addition to the hand-written `iop*()` functions and the macro-generated
`iopWrap*()` functions, the `OPCODES` macro is used
[here](https://github.com/facebook/hhvm/blob/f484e7c597763bff68ad9e0e355aff763b71ec1e/hphp/runtime/vm/bytecode.cpp#L7475-L7515)
to create a set of functions named `interpOne*()`. These functions are used by
the JIT when a certain instruction would be too complicated to compile to native
machine code: it syncs all live state to memory, calls the appropriate
`interpOne*()` function, then resumes running the native code after the
problematic bytecode.
## Interpreter dispatch loop
The dispatch loop is in `dispatchImpl()`, defined in
[runtime/vm/bytecode.cpp](../../runtime/vm/bytecode.cpp). It’s declared as:
```cpp
template <bool breakOnCtlFlow> TCA dispatchImpl()
```
Two different versions are instantiated, one for each possible value of
`breakOnCtlFlow`. When `breakOnCtlFlow` is `true`, the function will return to
the caller after a control flow (i.e. branch) instruction is encountered. If
`breakOnCtlFlow` is `false`, the interpreter will continue executing
instructions until the current VM entry finishes.
There are two versions of the interpreter loop. The Windows version (indicated
with `_MSC_VER`) implements an ordinary switch-based interpreter loop, while the
Linux version implements a threaded interpreter. In a threaded interpreter, the
handler for each bytecode jumps directly to the handler for the next bytecode
rather than going to a single central switch statement. This eliminates a jump
to a different cache line and improves branch prediction by allowing the
processor’s branch predictor to find associations between the bytecodes. These
different mechanisms are hidden by the `DISPATCH_ACTUAL` macro.
There are three separate parts to each bytecode handler. One part for dealing
with Hack debugging, one part for tracking code coverage, and a third part which
implements the actual handler. These are defined in the `OPCODE_DEBUG_BODY`,
`OPCODE_COVER_BODY`, and `OPCODE_MAIN_BODY` macros, respectively. In the Windows
version, [defined
here](https://github.com/facebook/hhvm/blob/f484e7c597763bff68ad9e0e355aff763b71ec1e/hphp/runtime/vm/bytecode.cpp#L7621-L7628),
the three macros are put in a `case` for each opcode, like the example at the
beginning of this document.
In the Linux version, [defined
here](https://github.com/facebook/hhvm/blob/f484e7c597763bff68ad9e0e355aff763b71ec1e/hphp/runtime/vm/bytecode.cpp#L7631-L7636),
the threaded interpreter uses a dispatch table with [computed
goto](https://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html). Each
instruction gets three labels: one for each `OPCODE_*_BODY` macro. These labels
are collected into three different dispatch tables, [defined
here](https://github.com/facebook/hhvm/blob/f484e7c597763bff68ad9e0e355aff763b71ec1e/hphp/runtime/vm/bytecode.cpp#L7532-L7549).
`optabDirect` points to `OPCODE_MAIN_BODY`, `optabCover` points to
`OPCODE_COVER_BODY`, and `optabDbg` points to `OPTAB_DEBUG_BODY`. The correct
dispatch table is selected at runtime depending on the current code coverage
configuration, and whether or not a debugger is attached.
## Performance
In general, we strongly prefer simplicity over performance in the interpreter.
Any performance-sensitive uses of HHVM rely on the JIT compiler, so we've found
it beneficial to keep the interpreter as straightforward as possible, kind of
like a reference implementation to compare the JIT against. This is an
intentional tradeoff that has resulted in an interpreter that is fairly slow.
One aspect of interpreter performance that we have focused on is inlining
decisions. We've found that most compilers choose to not inline various parts of
the interpreter in ways that measurably hurt performance (even when the
functions are marked `inline`). To counter this, we use the `OPTBLD_INLINE`
macro, which forces the compiler to inline functions in optimized builds. It does
nothing in debug builds, so we can still debug the interpreter. |
Markdown | hhvm/hphp/doc/hackers-guide/data-structures.md | # HHVM Runtime Data Structures
This page contains a brief overview of the most important runtime data
structures in HHVM. It is not meant to be an exhaustive reference for all
details about these data structures, since those details change too often to
reliably document so far away from the code. Rather, reading the descriptions
here should leave you with a good high-level understanding of what each one is
used for and where to find the code implementing it for further investigation.
## Hack-visible values
### `DataType`, `Value`, and `TypedValue`
`DataType`, defined in [datatype.h](../../runtime/base/datatype.h), is an enum
representing the type of a Hack value: `KindOfNull` means `null`,
`KindOfBoolean` means `bool`, `KindOfInt64` means `int`, etc. Some user-visible
types are split into multiple `DataType`s to keep track of whether or not a
value is reference counted: `KindOfPersistentFoo` means "`KindOfFoo` that we
know is not reference counted." The reverse, however, is not true:
`KindOfPersistentFoo` is a subtype of `KindOfFoo`, so it is valid to use
`KindOfFoo` with a non-reference-counted Foo.
`TypedValue`, defined in [typed-value.h](../../runtime/base/typed-value.h)
represents a single Hack value, and appears in many different places in HHVM. It
contains a `DataType` and a `Value`, which is a union with one member for every
possible type. Primitive types (`null`, `bool`, `int`, and `float`) are stored
inline in the `TypedValue`, while all other types heap-allocate a data object
and store a pointer to it in the `TypedValue.`
### `ArrayData`
[`ArrayData`](../../runtime/base/array-data.h) is used to represent all
array-like types in Hack: `array`, `dict`, `vec`, and `keyset`, though you'll
never see any raw `ArrayData` objects created anywhere. Instead, a specific kind
of array is created and tagged using one of the [several current array `HeaderKind` values]
(https://github.com/facebook/hhvm/blob/HHVM-3.27/hphp/runtime/base/header-kind.h#L46-L49).
We use a custom vtable to dispatch to the appropriate implementation for all
`ArrayData` member functions; the current implementation classes are
[`PackedArray`](../../runtime/base/packed-array.h),
[`VanillaDict`](../../runtime/base/vanilla-dict.h), and
[`SetArray`](../../runtime/base/set-array.h). Note that many of these types
don't directly inherit from `ArrayData`, so they're only subtypes of `ArrayData`
by convention.
Arrays in Hack have value semantics, implemented using copy-on-write. As a
result, most member functions that perform mutations take a `bool copy`
parameter to indicate whether or not the array should be copied before
performing the mutation. It is up to the caller to call `cowCheck()` before any
mutations to determine if a copy is necessary. Additionally, any mutation could
cause reallocation of a new `ArrayData`, either to grow or escalate to a
different array kind. To support this, all mutation functions return a new
`ArrayData*`; the `ArrayData*` that was mutated should be considered dead and
you should use the new one in its place.
### `StringData`
[`StringData`](../../runtime/base/string-data.h) represents a Hack `string`
value. Like arrays, strings have value semantics using copy-on-write, so callers
are responsible for calling `cowCheck()` before mutating, although the copy must
be done manually with `StringData::Make()` rather than a `bool copy` parameter
to the mutator function.
Most `StringData`s store their data in space allocated immediately after the
`StringData`. This layout is required in `USE_LOWPTR`, so `StringData::m_data`
is [conditionally
defined](https://github.com/facebook/hhvm/blob/e05d2041a598ff655f594c4fec7e5f1708d9466b/hphp/runtime/base/string-data.h#L539-L542).
For normal builds, `m_data` will usually point right after the `StringData`, but
it may point elsewhere for strings from APC.
### `ObjectData`
[`ObjectData`](../../runtime/base/object-data.h) represents a Hack `object`. It
contains a `Class*` describing its type (described below) and a series of
attributes. Declared properties are stored in an array of `TypedValue`s that is
allocated after the `ObjectData`.
### `Smart pointer wrappers`
All of the types described so far have smart pointer wrappers that are generally
used for high-level non-performance-critical C++ code that needs to work with
one of them. The wrapper type can be found by dropping the `Data` suffix, so
`StringData`'s wrapper is [`String`](../../runtime/base/type-string.h). Note
that like most smart pointer types, these wrappers can all represent a `null`
value, but the pointer in a `TypedValue` representing an array, string, etc.
must *never* be `nullptr`. A `null` value is represented using `KindOfNull`.
## Runtime-internal data structures
### `Unit`
A [`Unit`](../../runtime/vm/unit.h) represents all of the information contained
in one Hack file: classes, functions, constants, top-level code, etc. All
references to entities that could be defined in another file are only referenced
by name, even if they are defined in the current file. This includes, but it not
limited to, function names in call expressions, parent class names, and used
traits. This is to support one of HHBC's core principles: it is always possible
to emit bytecode (and construct a `Unit`) for a single file in complete
isolation. If that file references entities that are undefined at runtime, the
appropriate error will be raised.
### `PreClass` and `Class`
Every `Unit` has a list of classes that are defined in the file it came from.
Each of these classes is stored as a [`PreClass`](../../runtime/vm/preclass.h),
which represents the class definition as it is visible in the source code.
Parent classes, used traits, and any other references to entities other than the
class itself are stored as string names.
When a class is defined at runtime, these references are resolved to concrete
entities, producing a [`Class`](../../runtime/vm/class.h). The resolved parent
class is stored as a `Class*` rather than the string name from the `PreClass`.
This two-level representation of classes is necessary because the mapping
between name and class can change with every request. All classes are redefined
from scratch in each request<sup>[1](#f1)</sup>, and different definitions of a
class can be selected by including different files, or by putting the
definitions on different control flow paths. Properly-typed Hack code will
never have multiple definitions of the same class, but HHVM still supports it.
### `Func`
All Hack functions, including methods on classes, are represented using a
[`Func`](../../runtime/vm/func.h). Each `Func` contains a pointer to the `Unit`
it was defined in, the `Class` it is a member of (if any), its location in the
`Unit`'s bytecode, information about its parameters, and various other metadata.
Every `Func` has a 32-bit `FuncId`, which is assigned in ascending order as each
`Func` is created. There is a global table that allows mapping from this ID back
to a `Func*`, and `FuncId`s are often used in places where we want to store a
reference to a `Func` without a full 64-bit pointer<sup>[2](#f2)</sup>.
`FuncId`s are also guaranteed to be unique for the lifetime of the process,
unlike `Func*`.
--------
<b id="f1">1:</b> This is what HHVM presents to the user, but we have
mechanisms to avoid the work of actually redefining everything in every request
when possible.
<b id="f2">2:</b> HHVM also has a `USE_LOWPTR` build mode that allocates certain
data structures, including `Func`s, in the lower 4GiB of address space, allowing
us to store 32-bit pointers using `HPHP::LowPtr<T>`. However, `LowPtr<T>` is 64
bits in non-`USE_LOWPTR` builds, and some uses of `FuncId` rely on it being 32
bits in all build modes. |
Markdown | hhvm/hphp/doc/hackers-guide/directory-structure.md | ## Directory Structure
There are over 1000 C++ files in HHVM's codebase, split into a number of
different directories. This is a rough overview of what lives where (all paths are under `hphp/`):
`compiler/`: The old parser and bytecode emitter. This is deprecated and is currently being removed; the new replacement is in `hack/src/hhbc`.
`doc/`: Documentation, of varying age and quality.
`hack/src/`: The Hack parser, typechecker, bytecode emitter, and various other Hack-related tools.
`hhbbc/`: HipHop Bytecode-to-Bytecode Compiler, our ahead-of-time static analysis program.
`hhvm/`: `main()` for the `hhvm` binary
`neo/`: Code to read `.hdf` files, used for configuration.
`parser/`: The parser grammar and interface.
`pch/`: Pre-compiled headers for MSVC (not currently supported).
`ppc64-asm/`: Code to read and write [ppc64](https://en.wikipedia.org/wiki/Ppc64) machine code.
`runtime/`: HHVM's runtime.\
`runtime/base/`: Runtime internals related to Hack types and values (strings, objects, etc...).\
`runtime/debugger/`: Support for debugging Hack programs at runtime.\
`runtime/ext/`: Extensions.\
`runtime/server/`: Built-in servers, both for web requests and other HHVM-specific types of requests.\
`runtime/test/`: C++ unit tests for various runtime components.\
`runtime/vm/`: Most of the internals of the VM. The distinction between `runtime/base/` and `runtime/vm` is a bit fuzzy.\
`runtime/vm/jit/`: HHVM's JIT compiler.\
`runtime/vm/verifier/`: The bytecode verifier.
`system/`: systemlib, a collection of Hack code that is embedded into `hhvm` and always available.
`test/`: Integration tests for the whole compiler pipeline, written in PHP and Hack.
`tools/`: Utility programs useful in the development and/or use of HHVM.\
`tools/benchmarks/`: Microbenchmarks. If you run these, take them with a pound of salt; we care much more about macrobenchmarks based on real-life workloads.\
`tools/gdb/`: Python scripts to assist in debugging `hhvm` in `gdb`.\
`tools/hfsort`: HFSort, the algorithm used to lay out functions in both `hhvm` and JIT-compiled code.\
`tools/tc-print/`: TCPrint, a tool to analyze the output of the JIT compiler.
`util/`: Functions and data structures used by HHVM that could be useful to other programs, and have no dependencies on other parts of HHVM.
`vixl/`: Code to read and write [AArch64](https://en.wikipedia.org/wiki/ARM_architecture#AArch64) machine code.
`zend/`: Code imported from the standard [PHP interpreter](https://github.com/php/php-src). |
Markdown | hhvm/hphp/doc/hackers-guide/faq.md | # FAQ
This document contains answers to some of the most common questions we get from
people as they learn HHVM's codebase. Some of the answers may be pointers to
other pieces of documentation, if that information has a natural home somewhere
else.
## How is HHVM configured?
HHVM has a large number of configuration options that can be set with
command-line arguments of the form `-vName=Value`. Unfortunately, documentation
on them is very sparse, but some can be found in
[doc/options.compiled](../options.compiled) and
[doc/options.compiler](../options.compiler). The authoritative source for core
runtime options is the `RuntimeOption` class, in
[runtime/base/runtime-option.h](../../runtime/base/runtime-option.h) and
[runtime/base/runtime-option.cpp](../../runtime/base/runtime-option.cpp).
Multiple options may be collected in a `.hdf` file, [described here](../hdf),
and passed to HHVM with `-c path/to/config.hdf`. Options given with `-v` will
override any found in the `.hdf`. HHVM also has support for PHP's `.ini` files,
documented [here](../ini.md).
## How do I see the bytecode for my Hack/PHP script?
Use the `Eval.DumpBytecode` runtime option:
```sh
$ hhvm -vEval.DumpBytecode=1 hello.php
```
This will give you output that looks something like
[this](https://gist.github.com/swtaarrs/991c37af6e474733c47911731521a8ec).
[bytecode.specification](../bytecode.specification) contains descriptions of the
behavior of every bytecode instruction. Note that running with
`-vEval.DumpBytecode=1` will still execute the script after printing its
bytecode. To prevent this, add the `--lint` option, which will exit after
parsing and printing the bytecode.
You can also use `Eval.DumpHhas`:
```sh
$ hhvm -vEval.DumpHhas=1 hello.php
```
This gives output similar to
[this](https://gist.github.com/swtaarrs/4b2fffacd74c31d4e65298888922805d).
Running with `-vEval.DumpHhas=1` will *not* execute the script after printing
its bytecode.
Why are there two ways to dump bytecode with slightly different semantics? It's
partly for historical reasons that may be cleaned up at some point, but there
are a few meaningful differences between the two:
- `DumpBytecode` shows bytecode offsets, while `DumpHhas` doesn't. This is because HHAS is designed to be human-writable, so it uses labels rather than raw offsets for control flow. However, the offsets are useful if you're working in the interpreter or JIT, as they use bytecode offsets to locate code.
- `DumpBytecode` writes to the current `TRACE` file (see the next section for details), while `DumpHhas` writes to stdout.
- Metadata is printed explicitly, separately from function bodies with `DumpBytecode`, while `DumpHhas` either has the same metadata represented inline in the bytecode, or left out when it can be inferred from the bytecode itself.
In general, if you're working with HackC (the parser and bytecode emitter), you
should use `DumpHhas`, because that's how HHVM and HackC communicate. If you're
working with HHVM's interpreter or JIT, you should use `DumpBytecode`, since its
more verbose format can be helpful during debugging.
## How do I enable debug logging from different parts of HHVM?
The primary method of printing debug information is our tracing system, defined
in [util/trace.h](../../util/trace.h). Most of the functionality is disabled in
release builds for performance reasons, so you should be using a debug build if
you want to enable tracing.
### Basic usage
The `TRACE_MODULES` macro in `trace.h` defines a series of *trace modules*, each
with a short name. To enable one or more modules, set the `TRACE` environment
variable to a comma-separated list of `module:level` pairs. All levels default
to 0, and higher levels are typically more verbose (this is only by convention,
and is not enforced anywhere). For example, to set the `hhir` module to level 1
and the `printir` module to level 2, run:
```sh
$ TRACE=hhir:1,printir:2 hhvm my_script.php
```
If `TRACE` is set, even to an empty value, tracing is written to
`/tmp/hphp.log`; this can be overridden by setting `HPHP_TRACE_FILE` to the file
of your choice. If `TRACE` is not set, all tracing goes to stderr (this applies
to features like `Eval.DumpBytecode`, described above). To send tracing to
stderr even when `TRACE` is set, use `HPHP_TRACE_FILE=/dev/stderr`.
To find out which trace module applies to a specific file, look for a use of the
`TRACE_SET_MOD` macro, usually near the top of the file. To add your own tracing
lines, use either `TRACE(level, "printf-style format", args...);` or
`FTRACE(level, "folly::format-style format", args...);`. The `level` argument is
the lowest level at which the trace will be active.
### `printir`
If you're working in the JIT, `printir` is probably the most important module to
be familiar with. It controls the printing of HHIR units as they're constructed
and optimized, and `TRACE=printir:1` is the most convenient way to see the
output of the JIT. It's also one of the only modules to have well-defined
meanings for each trace level, defined
[here](https://github.com/facebook/hhvm/blob/38ee69496f66e87528a128e22c38e2ee12da5470/hphp/runtime/vm/jit/print.h#L76-L101).
It is enabled in all build modes, so you don't have to create a debug build to
use it.
## My change is crashing HHVM. What do I do?
HHVM can be debugged with the standard debugging tools for your compiler and
platform. For most of us, that means using
[GDB](https://www.gnu.org/software/gdb/):
```sh
$ gdb --args ./hhvm path/to/script.php
```
There are a variety of support scripts in [tools/gdb](../../tools/gdb) to help
with inspecting many of HHVM's data structures; see the README in that directory
for more information on usage.
# Common Pitfalls
This section contains answers to questions that most people don't think to ask,
usually because they involve some non-obvious or surprising behavior in HHVM.
## `jit::Type`
If you spend any time in the JIT, you'll probably deal with the
[`jit::Type`](../../runtime/vm/jit/type.h) struct. [HHIR](../ir.specification)
is statically typed, so every value is tagged with a `Type` and every
instruction has a set of acceptable `Type`s for its inputs. If you're used to
types in languages like Java or C++, `Type` is much more complicated than
something like `int` or `Foo*`. We recommend reading about it
[here](jit-core.md#type-system) before writing any nontrivial HHIR code. You
don't have to memorize every single `Type`, but you should make sure you
understand the [usage guidelines](jit-core.md#usage-guidelines). |
Markdown | hhvm/hphp/doc/hackers-guide/glossary.md | # Glossary
This document contains a list of terms, abbreviations, and acronyms that you may
come across either in this guide or in the HHVM source code. Don't worry if none
of the definitions mean anything to you yet; it may make more sense to revisit
this page as needed once you've read some more of the guide.
## HHVM/Hack/PHP-specific
The terms in this section are specific to HHVM, Hack, or PHP. Concepts that
apply more generally are listed in the [next
section](glossary.md#general).
### APC
The [Alternative PHP Cache](http://php.net/manual/en/intro.apc.php) was
originally added to PHP as a cache for compiled bytecode, to reduce the overhead
of loading a program's source code with every request. It has since been
replaced, but HHVM still supports it as a process-global hash table, used to
share data between active requests and/or cache data to be reused by later
requests.
### BOLT
[BOLT](https://github.com/facebookincubator/BOLT) is a binary optimizer,
developed by Facebook to optimize the performance of large applications.
### FPI/FPI Region
Function parameter info and corresponding regions in the bytecode. Described in
detail in [bytecode.specification](../bytecode.specification).
### HackC
The Hack Compiler, HHVM's new frontend (as of mid-2018). A parser and bytecode
emitter written completely in OCaml.
### HHAS
HipHop assembly: a human-readable and -writable representation of HHBC, HHVM's
bytecode.
### HHBC
HipHop bytecode, HHVM's custom bytecode. Described in
[bytecode.specification](../bytecode.specification).
### HHBBC
HopHop bytecode-to-bytecode compiler. Our static analysis engine that performs a
number of offline optimizations on bytecode.
### HHIR
HipHop intermediate representation, the primary IR in HHVM's JIT. Described in
[ir.specification](../ir.specification).
### HPHPc
[HipHop for PHP](https://en.wikipedia.org/wiki/HipHop_for_PHP), a transpiler
from PHP to C++ and HHVM's predecessor. A large part of HHVM's runtime is
inherited from HPHPc.
### HNI
[HHVM native interface](https://github.com/facebook/hhvm/wiki/Extension-API).
HHVM's mechanism for calling native code from PHP (name inspired by [JNI](https://en.wikipedia.org/wiki/Java_Native_Interface)).
### RDS/Target Cache
The request data segment, defined in
[runtime/base/rds.h](../../runtime/base/rds.h) is a form of [thread-local
storage](https://en.wikipedia.org/wiki/Thread-local_storage), used to hold
information that can vary between PHP requests, like the definitions of classes
and functions. The target cache is a collection of request-local caches used
by the JIT that live in RDS.
### Repo/Repo authoritative
HHVM stores bytecode in a cache we call the [repo](../repo), short for
"repository". Repo authoritative is an execution mode in which HHVM assumes that
no code exists outside of what's already in the currently-loaded repo, allowing
much more aggressive optimizations.
### TC/Translation cache
The block of memory where HHVM stores JIT-compiled code.
### TCA
Translation cache address: a pointer to code or data in the translation cache.
### tc-print
Translation cache printer: a tool for inspecting the code generated by HHVM,
optionally annotated with performance events.
### Tracelet
A region of bytecode used as the basic unit of compilation in some modes of
HHVM's JIT. No larger than a basic block.
### vasm
Virtual assembly, our low-level intermediate representation. Positioned between
HHIR and machine code.
### XLS
Extended linear scan, the algorithm used by our [register
allocator](../../runtime/vm/jit/vasm-xls.cpp).
## General
If you're new to compilers, performance-sensitive applications, or just curious,
you may want to skim through the following terms:
- [ARM](https://en.wikipedia.org/wiki/ARM_architecture): A family of CPU architectures. In the context of HHVM: [AArch64](https://en.wikipedia.org/wiki/ARM_architecture#AArch64), the 64-bit version of ARM.
- [AST/abstract syntax tree](https://en.wikipedia.org/wiki/Abstract_syntax_tree)
- [Bytecode](https://en.wikipedia.org/wiki/Bytecode)
- [Basic block](https://en.wikipedia.org/wiki/Basic_block)
- **CFG**: [control flow graph](https://en.wikipedia.org/wiki/Control_flow_graph) or [context-free grammar](https://en.wikipedia.org/wiki/Context-free_grammar) (nearly always the former here).
- [Compilation unit/translation unit](https://en.wikipedia.org/wiki/Translation_unit_(programming))
- [COW/copy-on-write](https://en.wikipedia.org/wiki/Copy-on-write)
- [CPU cache](https://en.wikipedia.org/wiki/CPU_cache). Also d-cache, data cache, i-cache, instruction cache.
- [IR/intermediate representation](https://en.wikipedia.org/wiki/Intermediate_representation).
- [FP/frame pointer](https://en.wikipedia.org/wiki/Call_stack#Stack_and_frame_pointers).
- **ICF/identical code folding**: a compiler/linker optimization that detects and de-duplicates identical blocks of code.
- [TLB/iTLB/dTLB](https://en.wikipedia.org/wiki/Translation_lookaside_buffer)
- [Bytecode interpreter](https://en.wikipedia.org/wiki/Interpreter_(computing)#Bytecode_interpreters)
- [JIT/just-in-time compilation](https://en.wikipedia.org/wiki/Just-in-time_compilation)
- **Leaf function**: a function that calls no other functions; a leaf node in the control flow graph.
- [NUMA/non-uniform memory access](https://en.wikipedia.org/wiki/Non-uniform_memory_access)
- [PGO/profile-guided optimization](https://en.wikipedia.org/wiki/Profile-guided_optimization)
- [RSS/resident set size](https://en.wikipedia.org/wiki/Resident_set_size)
- [SP/stack pointer](https://en.wikipedia.org/wiki/Call_stack#Stack_and_frame_pointers)
- [x86](https://en.wikipedia.org/wiki/X86-64): A family of CPU architectures. In the context of HHVM: [x86-64](https://en.wikipedia.org/wiki/X86-64), the 64-bit version of x64. |
Markdown | hhvm/hphp/doc/hackers-guide/jit-core.md | # HHVM JIT
HHVM's just-in-time compiler module is responsible for translating sequences of
HipHop Bytecode ([HHBC](../bytecode.specification)) into equivalent sequences
of machine code. The code implementing the JIT lives in the namespace
`HPHP::jit`, in [hphp/runtime/vm/jit](../../runtime/vm/jit). Most file and
class names referenced in this document will be relative to that namespace and
path.
The basic assumption guiding HHVM's JIT is that while PHP is a dynamically
typed language, the types flowing through PHP programs aren't very dynamic in
practice. We observe the types present at runtime and generate machine code
specialized to operate on these types, inserting runtime typechecks where
appropriate to verify assumptions made at compile time.
## Region Selection
The first step in translating HHBC into machine code is deciding exactly which
code to translate, in a process called region selection. The size and shape of
regions depend on many factors, ranging from a single HHBC instruction up to an
entire function with complex control flow. There are a number of different ways
to select a region, all producing a `RegionDesc` struct in the end (defined in
[region-selection.h](../../runtime/vm/jit/region-selection.h)). `RegionDesc`
contains a list of `RegionDesc::Block` structs, and each `Block` represents a
basic block of bytecodes by holding a starting offset and length in
instructions. The list of `Block`s is kept sorted in reverse post order. Blocks
also contain optional metadata about the code they contain and the state of the
VM before, during, and after execution of that code. This metadata includes
type predictions, statically known call destinations, and certain
postconditions.
### Tracelet Region Selection
The first-gear region selector is the tracelet region selector. The name
"tracelet" is somewhat vestigal, and refers to a region of HHBC typically no
larger than a single basic block. This code lives in
[region-tracelet.cpp](../../runtime/vm/jit/region-tracelet.cpp), and its goal is
to select a region given only the current live VM state - most importantly the
current program counter and the types of all locals and eval stack slots. The
`translator` module (see the "HHIR" section) is used to simulate execution of
the bytecode using types instead of values, continuing until a control flow
instruction is encountered or an instruction attempts to consume a value of an
unknown type. These rules are not absolute; there are a number of exceptions. If
an unconditional forward `Jmp` is encountered, the tracelet will continue at the
`Jmp`'s destination. In addition, certain bytecodes (such as `PopC` and
`IsTypeC`) are able to operate on generic values and don't cause a tracelet to
terminate due to imprecise types.
### PGO
HHVM's JIT has support for profile-guided optimizations, or PGO. The code
supporting PGO is spread throughout many parts of the JIT. This section is a
high-level overview of the major pieces.
#### Profiling Regions
The tracelet selector has a special mode for selecting "profiling regions".
There are a few differences in profiling mode, indicated by code that inspects
`env.profiling` in
[region-tracelet.cpp](../../runtime/vm/jit/region-tracelet.cpp):
1. Only the very first instruction in a region is allowed to consume a
referenced value.
2. Certain bytecodes that normally wouldn't break a tracelet do. See the
`instrBreaksProfileBB()` function for the list.
3. All control flow instructions always end the region, including unconditional
`Jmp`s.
The purpose of these restrictions is to create much simpler regions that are
guaranteed to be a single `Block`, with no mid-region side exits due to
referenced values. These smaller regions also contain execution counters to
track the relative hotness of each basic block of bytecode.
#### Hotcfg Region Selection
After translating and executing profiling regions for a while, optimized
retranslation is triggered (the exact details of how and where this happens are
currently changing somewhat rapidly, so this sentence is intentionally vague
for now). Once retranslation is triggered, a number of "hot CFGs" are selected
for each function, by the code in
[region-hot-cfg.cpp](../../runtime/vm/jit/region-hot-cfg.cpp). A hot
[CFG](https://en.wikipedia.org/wiki/Control_flow_graph) is a `RegionDesc`
containing a non-strict subset of the profiling regions from a function,
stitched together into one large region. It may be a single straight-line path
through the hottest part of a function, or it may be the entire body of the
function, containing loops and other control flow. The exact shape of each hot
CFG depends on which parts of the function were executed during the profiling
phase.
## Region Translation
Regardless of the source of the `RegionDesc`, the translation process from this
point forward is the same. The `RegionDesc` containing blocks of HHBC is lowered
into HHIR, which is then lowered to vasm, and finally the vasm is lowered to
x86-64, arm64, or ppc64 machine code, depending on the architecture of the host
CPU. Each level of this pipeline has a number of associated optimization passes.
This section describes each lowering process and some of the optimizations.
## HHIR
Once a bytecode region has been selected, it is lowered into [HipHop
Intermediate Representation](../ir.specification), commonly referred to as
HHIR. HHIR is an
[SSA-form](https://en.wikipedia.org/wiki/Static_single_assignment_form),
strongly-typed intermediate representation positioned between HHBC and machine
code. A few different classes and modules are involved in this process:
* `irgen`: Declared in [irgen.h](../../runtime/vm/jit/irgen.h), this module is
used to convert the bytecode instructions from a RegionDesc into a sequence
of HHIR instructions. One `emitFoo()` function is defined for every HHBC
instruction. The implementations for these functions are grouped into
`irgen-*.cpp` files (e.g.
[irgen-basic.cpp](../../runtime/vm/jit/irgen-basic.cpp),
[irgen-arith.cpp](../../runtime/vm/jit/irgen-arith.cpp)).
* `IRGS`: Defined in [irgen-state.h](../../runtime/vm/jit/irgen-state.h), this
class contains all the state tracked during the irgen process. The two most
important pieces are `IRBuilder` and `IRUnit`:
* `IRBuilder`: Defined in [ir-builder.h](../../runtime/vm/jit/ir-builder.h),
this class tracks state during symbolic execution and performs some very
basic optimizations based on this state.
* `IRUnit`: Defined in [ir-unit.h](../../runtime/vm/jit/ir-unit.h), this class
is responsible for creating and storing the runtime data structures that
represent HHIR instructions, values, and blocks.
* `simplify`: Declared in [simplify.h](../../runtime/vm/jit/simplify.h), this
module is responsible for performing state-free optimizations such as
constant folding and propagation or anything else that only requires
inspecting an instruction and its sources.
### Type System
All values in HHIR have a type, represented by the `Type` class in
[type.h](../../runtime/vm/jit/type.h). A `Type` may represent a primitive type
or any arbitrary union of primitive types. Primitive types exist for
Hack-visible types such as `Int`, `Obj`, and `Bool`, and runtime-internal types
such as `FramePtr`, `Func`, and `Cls`. Primitive types also exist for PHP
references and pointers to PHP/Hack values: for each primitive PHP/Hack type
`T`, `BoxedT`, `PtrToT`, and `PtrToBoxedT` also exist. A number of types
commonly thought of as primitives are actually unions: `Str` is defined as
`{PersistentStr+CountedStr}` and `Arr` is defined as
`{PersistentArr+CountedArr}`. Predefined `constexpr Type` objects are provided
for primitive types and many common union types: simply prepend `T` to the name
of the type (so `TInt` represents the `Int` type, `TCell` represents the `Cell`
type, etc...).
In addition to arbitrary unions of primitive types, `Type` can also represent
constant values and "specialized" types. A constant `Type` may represent the
integer 5 (created with `Type::cns(5)`) or the string "Hello, world!"
(`Type::cns(makeStaticString("Hello, World!"))`), while a specialized type can
represent an object of a specific class (`Type::ExactObj(cls)`) or an array of a
specific kind (`Type::Array(ArrayData::kSomeKind)`).
Since types represent sets of values, we define relations on types in terms of
the sets of values they represent. Two types `S` and `T` are equal (`S == T`)
iff they represent equal sets of values. `S` is a subtype of `T` (`S <= T`) if
the set of values represented by `S` is a subset of the set of values
represented by `T`. `S` and `T` are not related if their intersection is the
empty set (also called `Bottom`).
As previously mentioned, types in HHIR represent a mix of Hack-visible types and
internal types. The following table describes types representing Hack values.
Note that the types used here are more specific than what can be discriminated
by user code (e.g., `StaticStr` and `CountedStr` both appear as type "string" at
the Hack level).
Type | HHVM representation
---------------|-------------------
Uninit | `KindOfUninit`
InitNull | `KindOfNull`
Null | `{Uninit+InitNull}`
Bool | `false=0`, `true=1` (8 bits at runtime)
Int | `int64_t` (64-bit two's complement binary integer)
Dbl | `double` (IEEE 754 64-bit binary floating point)
StaticStr | `StringData*` where `isStatic() == true`
UncountedStr | `StringData*` where `isUncounted() == true`
PersistentStr | `StringData*` `{StaticStr+UncountedStr}`
CountedStr | `StringData*` where `isRefCounted() == true`
Str | `StringData*` `{PersistentStr+CountedStr}`
\*Arr | `ArrayData*` (same variants as `Str`)
\*Vec | `ArrayData*` where `kind() == Vec`
\*Dict | `ArrayData*` where `kind() == Dict`
\*Keyset | `ArrayData*` where `kind() == Keyset`
UncountedInit | `TypedValue`: `{Null+Bool+Int+Dbl+PersistentStr+PersistentArr}`
Uncounted | `TypedValue`: `{UncountedInit+Uninit}`
Obj | `ObjectData*`
Obj<=Class | `ObjectData*` of the specified Class or one of its subclasses
Obj=Class | `ObjectData*` of the specified Class (not a subtype)
Cls | `Class*`
Func | `Func*`
Counted | `{CountedStr+CountedArr+Obj+BoxedCell}`
Cell | `{Null+Bool+Int+Dbl+Str+Arr+Obj}`
The VM also manipulates values of various internal types, which are never
visible at the PHP level.
Type | HHVM representation
---------------|--------------------
PtrToT | Exists for all T in `Cell`. Represents a `TypedValue*`
Bottom | No value, `{}`. Subtype of every other type
Top | Supertype of every other type
VarEnv | `VarEnv*`
NamedEntity | `NamedEntity*`
Cctx | A `Class*` with the lowest bit set (as stored in `ActRec::m_cls`)
Ctx | `{Obj+Cctx}`
RetAddr | Return address
StkPtr | Pointer into VM execution stack
FramePtr | Pointer to a frame on the VM execution stack
TCA | Machine code address
Nullptr | C++ `nullptr`
### Usage guidelines
We've observed some common misuses of `Type` from people new to the codebase.
They are described here, along with how to avoid them.
#### Comparison operators
Since a `Type` represents a set of values, the standard comparison operators on
`Type` perform the corresponding set operations:
- `==`, `!=`: Equality/inequality
- `<`, `>`: Strict subset/strict superset
- `<=`, `>=`: Non-strict subset/non-strict superset
One important consequence of this is that a [strict weak
ordering](https://en.cppreference.com/w/cpp/named_req/Compare) does not exist
for `Type` objects, which means `Type` cannot be used with algorithms like
`std::sort()`. Put another way, many pairs of `Type`s cannot be ordered: both
`TInt < TStr` and `TInt >= TStr` are `false`, for example. As long as you think
in terms of set comparisons and not numerical ordering, it should be fairly
intuitive.
To check if a value is of a certain type, you almost always want to use `<=`.
So, instead of `val->type() == TInt`, use `val->type() <= TInt`, or more
compactly, `val->isA(TInt)`. Using exact equality in this situation would give
unexpected results if `val` had a constant type, like `Int<5>`, or if we ever
added other subtypes of `Int` (with value range information, for example).
A related problem is determining when a value is *not* of a certain type. Here,
the difference between "`val` is not known to be an `Int`" and "`val` is known
to not be an `Int`" is crucial. The former is expressed with `!(val->type() <=
TInt)`, while the latter is `!val->type().maybe(TInt)`. Types like `{Str+Int}`
illustrate the difference between these two predicates: `((TStr | TInt) <= Int)
== false` and `(TStr | TInt).maybe(TInt) == true`.
#### Inner types
HHIR programs are in SSA form, so a value can't change once it has been defined.
This implies that the value's type also can't change, which requires extra
consideration when working with types that have inner types, like `PtrToFoo`.
The immutability of types also applies to these inner types, so when reading
from or writing to a `PtrToInt`, it is safe to assume the the pointee is always
an `Int`.
This may sound obvious, but there are some situations in which you may find
yourself wanting to construct a value with an inner type that can't be relied
on. Flow-sensitive type information should not be used to feed inner types,
especially runtime type guards. Much of HHBBC's type inference is
flow-insensitive, and that can safely feed inner types of pointers.
There is one small tweak to the rule "a value's type cannot change once
defined": if the pointee is destroyed, the pointer can't be safely dereferenced,
and the type doesn't have to be valid anymore. One example of this is an object
property that HHBBC says is always a `Dbl`. It's safe to create a `PtrToDbl`
pointer to the property, because the information from HHBBC is flow-insensitive.
But if the object is freed and a new object is allocated at the same memory
address, it's possible that this pointer will now point to something completely
different. This is fine, because dereferencing the pointer would be analogous a
use-after-free bug in C++, resulting in undefined behavior.
### Values, Instructions, and Blocks
An HHIR program is made up of `Block`s, each containing one or more
`IRInstruction`s, each of which produces and consumes zero or more `SSATmp`
values. These structures are defined in
[block.h](../../runtime/vm/jit/block.h),
[ir-instruction.h](../../runtime/vm/jit/ir-instruction.h), and
[ssa-tmp.h](../../runtime/vm/jit/ssa-tmp.h), respectively.
An `SSATmp` has a type, an SSA id, and a pointer to the `IRInstruction` that
defined it. Every `SSATmp` is defined by exactly one `IRInstruction`, though one
`IRInstruction` may define more than one `SSATmp`. Every instruction has an
`Opcode`, indicating the operation it represents, and a `BCMarker`, which
contains information about the HHBC instruction it is part of. Depending on the
opcode of the instruction, it may also have one or more `SSATmp*` sources, one
or more `SSATmp*` dests, one or more `Edge*`s to other `Block`s, a `Type`
parameter (known as a `typeParam`), and an `IRExtraData` struct to hold
compile-time constants. `IRInstructions` are typically created with the
`irgen::gen()` function, which takes an `Opcode`, `BCMarker`, and a variadic
number of arguments representing the other properties of the instruction.
A `Block` represents a basic block in a control flow graph. A pointer to one
`Block` is stored in `IRUnit` as the entry point to the program; all other
`Block`s must be reached by traversing the CFG. Certain instructions are "block
end" instructions, meaning they must be the last instruction in their `Block`
and they contain one or more `Edge`s to other `Block`s. `Jmp` is the simplest
block end instruction; it represents an unconditional jump to a destination
block. `CheckType` is an example of an instruction with two `Edge`s: "taken" and
"next". It compares the runtime type of its source value to its `typeParam`,
jumping to "taken" block if the type doesn't match, and jumping to the "next"
block if it does.
While block end instructions may only exist at the end of a `Block`, there are
two instructions that may only exist at the beginning of a `Block`: `DefLabel`
and `BeginCatch`. `DefLabel` serves as a phi-node, joining values at control
flow merge points. `BeginCatch` marks the beginning of a "catch block", which
will be covered in more detail later.
### Control Flow
HHIR can represent arbitrary control flow graphs. In place of traditional
phi-nodes, HHIR uses `Jmp` instructions that take sources and pass them to
`DefLabel` instructions. Consider the following program that performs some
numerical computation:
```
B1:
t1:Int = ...
JmpZero t1:Int -> B3
-> B2
B2:
t2:Int = AddInt t1:Int 5
Jmp t2:Int -> B4
B3:
t3:Dbl = ConvIntToDbl t1:Int
t4:Dbl = MulDbl t3:Dbl 3.14
Jmp t4:Dbl -> B4
B4:
t5:{Int|Dbl} = DefLabel
...
```
After control flow splits at the end of B1, B2 and B3 each do their own
computation and then pass the result to the `DefLabel` at the join point,
B4. This is equivalent to the following phi-node: `t5:{Int|Dbl} = phi(B2 ->
t2:Int, B3 -> t4:Dbl)`
## Optimizations
Two types of basic optimizations are performed on each instruction by
`IRBuilder` as it is generated and added to its `Block`: pre-optimization and
simplification. Pre-optimization performs simple optimizations based on tracked
state in `IRBuilder`, such as replacing a `LdLoc` instruction with a use of an
already known value for that local variable. Simplification performs any
optimizations that only require inspecting an instruction and its sources. This
is primarily constant folding but also includes things like eliminating
`CheckType` instructions that are known at compile-time to succeed. Both
pre-optimization and simplification may return an already-defined value to use
instead of the new instruction, or they may generate one or more different
instructions to use in place of the original.
Once the initial translation pass is complete, a series of more involved
optimizations are run on the entire CFG. These are described in detail in
[jit-optimizations.md](jit-optimizations.md).
## Machine Code Generation
### Register Allocation
### Code Generation |
Markdown | hhvm/hphp/doc/hackers-guide/jit-optimizations.md | # HHVM JIT Optimization Passes
## Guard Relaxation
By default, every live program location read in a translation creates a type
guard. There are currently 10 different primitive types that can be guarded on,
so a translation with just 4 guards can have up to 10<sup>4</sup> unique
combinations of input types, leading to a combinatorial explosion in the number
of retranslations required to support all runtime type combinations. This is
bad news for both JIT code size and runtime performance - these retranslations
chain linearly to each other, so the time it takes to make it past the guards
is O(nTranslations) in the worst case. The runtime option
`Eval.JitMaxTranslations` limits the number of translations allowed per
`SrcKey`, and once this limit is hit any further retranslation requests will
result in a call out to the interpreter. This is almost always less desirable
than generating slightly suboptimal machine code in one or more of the
translations, allowing it to accept a variety of input types. The process we
use to determine which type guards can be loosened is called guard relaxation.
There are two parts to guard relaxation: value constraining and the relaxation
itself. Value constraining happens during the initial IR generation pass, and
is managed by the [IRBuilder](../../runtime/vm/jit/ir-builder.h) class. The
important methods in this process are `IRBuilder::constrainValue()`,
`IRBuilder::constrainLocal()`, and `IRBuilder::constrainStack()`. Whenever
the behavior of a sequence of HHIR depends on the types of one or more values,
the types of those values must be constrained using the constrain* methods
previously mentioned. Each takes the value to be constrained and how it should
be constrained. The possible constraints are defined in the `DataTypeCategory`
enum in [datatype.h](../../runtime/base/datatype.h) and are defined in order of
ascending specificity:
* `DataTypeGeneric` indicates that the type of the value does not matter and is
equivalent to not constraining the value at all. This is most often used for
values that are simply moved from place to place.
* `DataTypeCountness` indicates that the only use of the value is to incref or
decref it. If the value's type is not refcounted, the type guard may be
relaxed to `Uncounted`.
* `DataTypeCountnessInit` is similar to `DataTypeCountness`, with the exception
that guards for the type `Uninit` will not be relaxed. This is most commonly
used for bytecodes like `CGetL`, where `Uninit` values cause a notice to be
raised while all other uncounted types are treated equally.
* `DataTypeSpecific` is the default constraint, and indicates that the guard
should not be relaxed past a specific `DataType`.
* `DataTypeSpecialized` indicates that in addition to requiring the value's
`DataType`, there is an additional type tag in the value that must be
checked. Currently this includes object classes and array kinds, used by
[MInstrTranslator](../../runtime/vm/jit/minstr-translator.cpp) to emit more
efficient machine code for some container objects.
The guard relaxation process often needs to track additional information beyond
a single `DataTypeCategory`. The `TypeConstraint` struct, defined in
[type.h](../../runtime/vm/jit/type.h) is used to hold this information. A
value's type constraint is typically specified by passing a `TypeConstraint`
(`DataTypeCategory` implicitly converts to `TypeConstraint`) to value accessor
methods like `HhbcTranslator::popC()` and `HhbcTranslator::ldLoc()`. The former
is used extensively and so its `TypeConstraint` parameter is optional,
defaulting to `DataTypeSpecific`.
`TypeConstraint`, when used with `DataTypeSpecialized`, also requires
additional information about what property of the type is desired. This
information can be specified using the `setWantArrayKind()` and
`setDesiredClass()` methods.
Note that **any** decisions made in the JIT based on a value's type must be
reflected in that value's type constraint. This includes situations where the
absence of code depends on a value's type, such as eliding refcounting
operations on non-refcounted values. Typically, this just means using
`DataTypeSpecific` and giving no further thought to guard relaxation. If,
however, the operation you are translating can tolerate less specific types,
use an appropriate type constraint and ensure that any HHIR opcodes emitted can
tolerate having the types of their inputs loosened.
When `IRBuilder` is instructed to constrain the type of a value, it walks up
the chain of instructions leading to the value's definition, looking for the
instruction that determined the value's type. Sometimes this instruction is an
opcode with a known output type, such as `ConcatStrStr` which always produces a
`Str`. In these cases nothing is constrained, since the value's type does not
depend on a guard. When the source of a value's type is found to be a guard
instruction such as `GuardLoc`, `GuardStk`, or `CheckType`, the current
constraint for that guard is tightened according to the new constraint. All
guards start as `DataTypeGeneric` and may only have their constraints
tightened. The constraints for all guards are stored in a `GuardConstraints`
object owned by `IRBuilder`.
Certain optimizations performed by `Simplifier` are disabled during the initial
IR generation pass. The best example of this pattern is the `IncRef`
opcode. When given an input value of an uncounted type, the IncRef can be
eliminated. However, if the input type of the instruction may be loosened by
guard relaxation, it must not be eliminated. The reoptimize pass, described
below, eliminates instructions that are still eligible for simplification after
guard relaxation runs.
Once initial IR generation is complete, guard relaxation is the first
optimization pass to run. This is to simplify the other passes: any
modifications of the IR performed before guard relaxation would have to be
careful to keep the `GuardConstraints` map updated, and the loosened types
produced by guard relaxation may affect the behavior of other passes. The
relaxation pass is implemented in `relaxGuards()` in
[guard-relaxation.cpp](../../runtime/vm/jit/guard-relaxation.cpp). It is a
fairly simple pass: for all guard instructions present in the trace, their
constraint is looked up in the `GuardConstraints`. If the type of the guard is
more specific than is required by its constraint, the type is loosened. Some
guards will be loosened all the way to `Gen`; these guards will be eliminated
in the reoptimize pass. After loosening guard types as needed, one more pass
over the trace is performed to recompute any types derived from the modified
guards.
Most code in the JIT doesn't have to care about guard relaxation. If you're
implementing an operation that is type agnostic (or can be made type agnostic
without bloating the code much), it may be a good candidate for something more
general than `DataTypeSpecific`. Be aware that guard relaxation's effect on the
generated code is almost always negative, so it should only be used in
situations where the benefits of having fewer translations for the current
`SrcKey` outweigh the increased complexity of the generated code.
## Dead code elimination |
Markdown | hhvm/hphp/doc/hackers-guide/memory-management.md | ## Memory management
### Reference counting
HHVM's user heap is managed primarily with reference counting. All types that
represent a user-visible value inherit from
[`HeapObject`](../../runtime/base/header-kind.h), which contains the value's
reference count, a `HeaderKind` that describes its type, some mark bits for use
by the garbage collector, and some padding bits available for type-specific
uses. `HeapObject`s are created with a reference count of 1, and this count is
incremented or decremented as references to it are created or destroyed.
When a `HeapObject`'s reference count goes to 0, it is destroyed.
Two reference count values are special:
[`Uncounted`](https://github.com/facebook/hhvm/blob/HHVM-3.27/hphp/runtime/base/header-kind.h#L121)
values live longer than a single Hack request, and have their lifetime managed
by other mechanisms.
[`Static`](https://github.com/facebook/hhvm/blob/HHVM-3.27/hphp/runtime/base/header-kind.h#L122)
values are never freed once created. Together, these are called *persistent*
values, which is where the `KindOfPersistent*` `DataType` names come from.
Persistent values are allocated with `malloc()` and cannot refer to counted
values, which live in request-specific heaps and are allocated by HHVM's request
[memory manager](../../runtime/base/memory-manager.h).
### Garbage collection
HHVM also has a [mark-sweep garbage
collector](https://en.wikipedia.org/wiki/Tracing_garbage_collection) that runs
as a backup for reference counting. `HeapObject`s will be freed by the garbage
collector if they are not reachable from the request's roots, for example when
the only references to them are part of a reference cycle. The collector
primarily lives in [heap-collect.cpp](../../runtime/base/heap-collect.cpp) and
[heap-scan.h](../../runtime/base/heap-scan.h), with [scanning
code](https://github.com/facebook/hhvm/search?q=TYPE_SCAN_CUSTOM&unscoped_q=TYPE_SCAN_CUSTOM)
spread throughout various other files, situated with the types being scanned.
For information on annotations, see [type-scan.h](../../util/type-scan.h). |
Markdown | hhvm/hphp/doc/hackers-guide/README.md | # Hacker's Guide to HHVM
This directory contains documentation on the internal architecture of HHVM,
targeted at C++ developers looking to hack on HHVM itself. If you're a
Hack developer looking for documentation on using HHVM, that can be found
[here](https://docs.hhvm.com/).
HHVM is a [virtual
machine](https://en.wikipedia.org/wiki/Virtual_machine#Process_virtual_machines)
that executes [Hack](http://hacklang.org/) programs using a [bytecode
interpreter](https://en.wikipedia.org/wiki/Interpreter_(computing)#Bytecode_interpreters)
and a [JIT compiler](https://en.wikipedia.org/wiki/Just-in-time_compilation)
(the latter is vastly more complex, and will get much more airtime here).
[PHP](https://php.net) is also currently supported for historical reasons.
You should already be comfortable reading and writing C++ (specifically, HHVM is
written in [C++14](https://en.wikipedia.org/wiki/C%2B%2B14)), as well as
navigating around large codebases using
[grep](https://en.wikipedia.org/wiki/Grep),
[ctags](https://en.wikipedia.org/wiki/Ctags), or any similar tool of your
choice. Prior experience with compilers and runtimes will help but is not
strictly necessary.
## Code References
Since this guide is intended to help familiarize readers with the HHVM codebase,
it naturally contains a number of links to the code. Some of these links are to
the current version of a file, and others are to specific lines in specific
versions of files. If you find links of either kind that are out-of-date with
current `master` or are otherwise misleading, please let us know.
## Building HHVM
Instructions for building HHVM and running our primary test suite can be found
[here](https://docs.hhvm.com/hhvm/installation/building-from-source). The
initial build may take an hour or longer, depending on how fast your machine is.
Subsequent builds should be faster, as long as you're not touching core header
files.
## Architecture Overview
HHVM, like most compilers, is best thought of as a pipeline with many different
stages. Source code goes in one end, and after a number of different
transformations, executable machine code comes out the other end. Some stages
are optional, controlled by runtime options. Each stage is described in detail
in the articles listed below, but here's a quick-and-dirty overview:
Source code enters the compiler frontend and is converted to a token stream by
the [lexer](https://en.wikipedia.org/wiki/Lexical_analysis). The
[parser](https://en.wikipedia.org/wiki/Parsing#Computer_languages) reads this
token stream and converts that to an [abstract syntax
tree](https://en.wikipedia.org/wiki/Abstract_syntax_tree), or AST. This AST is
then converted to a stream of [bytecode
instructions](https://en.wikipedia.org/wiki/Bytecode) by the bytecode emitter.
Everything up to this point is written in OCaml; the rest of HHVM is written in
C++ and assembly.
After the bytecode and associated metadata are created, our bytecode optimizer,
HHBBC, is optionally run. Finally, the bytecode, optimized or not, is stored
into a `.hhbc` file we call a "repo".
If the frontend was invoked directly by HHVM, the bytecode also lives in-memory
in the `hhvm` process, and execution can begin right away. If the frontend was
invoked as an ahead-of-time build step, the bytecode will be loaded from the
repo by `hhvm` when it eventually starts. If the JIT is disabled, the bytecode
interpreter steps through the code one instruction at a time, decoding and
executing each one. Otherwise, the JIT is tasked with compiling the bytecode
into machine code.
The first step in the JIT is region selection, which decides how many bytecode
instructions to compile at once, based on a number of complicated heuristics.
The chosen bytecode region is then lowered to HHIR, our primary [intermediate
representation](https://en.wikipedia.org/wiki/Intermediate_representation). A
series of optimizations are run on the HHIR program, then it is lowered into
vasm, our low-level IR. More optimizations are run on the vasm program, followed
by register allocation, and finally, code generation. Once various metadata is
recorded and the code is relocated to the appropriate place, it is ready to be
executed.
## Getting Started
If you're not sure where to start, skimming these articles is a good first step:
* [Directory structure](directory-structure.md)
* [FAQ](faq.md)
* [Glossary](glossary.md)
## HHVM Internals
The articles in this section go into more detail about their respective
components:
* [HHBC spec](../bytecode.specification)
* Frontend
* Parser
* Emitter
* HHBBC
* ...
* VM Runtime
* [Core data structures](data-structures.md)
* [Hack-visible](data-structures.md#hack-visible-values)
* [Datatype, Value, and TypedValue](data-structures.md#datatype-value-and-typedvalue)
* [ArrayData](data-structures.md#arraydata)
* [StringData](data-structures.md#stringdata)
* [ObjectData](data-structures.md#objectdata)
* [Smart pointer wrappers](data-structures.md#smart-pointer-wrappers)
* [Runtime-internal](data-structures.md#runtime-internal-data-structures)
* [Unit](data-structures.md#unit)
* [PreClass/Class](data-structures.md#preclass-and-class)
* [Func](data-structures.md#func)
* [Memory management](memory-management.md)
* Execution Context
* [Bytecode interpreter](bytecode-interpreter.md)
* Unwinder
* Treadmill
* Debugger
* ...
* JIT Compiler
* [Core concepts](jit-core.md)
* [Optimization passes](jit-optimizations.md)
* ... |
Markdown | hhvm/hphp/doc/hands-on/index.md | Welcome to HHVM! This section of our documentation is a hands-on guide for
people who want to learn about compiler and runtime internals. The goal is to
work through making, testing, and measuring a sample optimization to our
compiler. You'll make the most out of this guide if you work through the
exercises as you read it.
[Lesson 0: Building and running HHVM](lesson0.md)
[Lesson 1: Understanding Hack runtime types](lesson1.md)
[Lesson 2: Understanding Hack bytecode](lesson2.md)
[Lesson 3: Refcounting and value types](lesson3.md)
[Lesson 4: Making an IR optimization](lesson4.md) |
Markdown | hhvm/hphp/doc/hands-on/lesson0.md | # Lesson 0: Building and running HHVM
## Lesson goals
* Understand what HHVM is and what it can be used for
* Set up a devserver and build HHVM
* Run HHVM on a small standalone Hack file, and examine its output
---
## Step 0: Starting a build
Kick off the following steps before proceeding through the guide. They take
time, so we want to get them started now.
> As performance engineers, we're always looking for ways to optimize our lives =)
1. [Consult the main documentation for system requirements.](https://docs.hhvm.com/hhvm/installation/building-from-source)
2. [Check out the HHVM repository.](https://docs.hhvm.com/hhvm/installation/building-from-source#downloading-the-hhvm-source-code)
3. [Kick off a build of HHVM.](https://docs.hhvm.com/hhvm/installation/building-from-source#building-hhvm)
---
## What is HHVM?
HHVM is a compiler and runtime for the dynamically-typed language Hack.
Hack is a variant of PHP. Like PHP, Hack is a good language for writing the
backend of a web application. Large websites are written in Hack, including
[facebook.com](https://www.facebook.com/).
## What are our goals for HHVM?
We'd like to improve HHVM in two main ways:
1. **Better performance:** By optimizing the machine code that HHVM generates,
we can realize significant capacity wins for people who are deploying code
and websites built in Hack.
2. **Language improvements:** We work with the Hack language team to improve
the language itself. These improvements include changes that make Hack
simpler and safer (e.g. [Hack Array Migration](https://hhvm.com/blog/10649/improving-arrays-in-hack))
as well as new language features (e.g. [readonly](https://hhvm.com/blog/2022/02/22/announcing-readonly.html)).
> Performance wins in HHVM can save our users lots of money. That's why there
> are so many $$$ in Hack code!
## How does HHVM work?
HHVM has two strategies for executing Hack code:
* [An interpreter:](https://en.wikipedia.org/wiki/Interpreter_(computing))
basically, a while loop that executes Hack code step-by-step.
* [A just-in-time (JIT) compiler:](https://en.wikipedia.org/wiki/JIT_compilation)
a compiler that can compile a whole sequence of Hack code into optimized
machine code, at runtime. Most webservers running HHVM use the JIT to compile
code while serving web requests.
> Let's say that one more time, because it's a bit strange and magical! **HHVM
> is a JIT compiler, which means that it can compile Hack code while executing
> that code.** We'll discuss this idea at length later.
HHVM's interpreter and JIT are interoperable; to execute a given web request,
we might run some JIT-ed code for some Hack functions and use the interpreter
for others. JIT-ed code is typically 10-50x faster than interpreted code. Our
aim is to maximize overall performance, measured as webserver throughput. To
do so, we preferentially JIT the "hottest" (most expensive) Hack functions.
In typical usage, we may only JIT a small fraction (say 1%) of a web codebase,
but that may be enough coverage for us to spend ~99% of our time in JIT-ed code
and only ~1% of time in the interpreter.
---
## Step 1: Running HHVM
Since you've completed step 0 already ;) you should have an HHVM binary sitting
in your build directory. Let's poke it. Here's what mine says if I ask for its
version. Yours will have a different compiler ID, repo schema, etc. The
compiler ID should match the git hash at which you compiled HHVM:
```
$ hg log -r . -T{node}
e0e849921332a9ccacf5d06a6ad05b87bc0ae0ba
$ ./build/hhvm --version
HipHop VM 4.141.0-dev (dbg) (lowptr)
Compiler: default-0-ge0e849921332a9ccacf5d06a6ad05b87bc0ae0ba
Repo schema: 3eabd3c8d14119f88cbf4ade871a893128d72200
```
Let's compile and run a simple PHP file. Create a directory called ~/php -
that's where all of our code examples will live! - and then put the following
code into ~/php/hello-world.php:
```
<?hh
<<__NEVER_INLINE>>
function add($a, $b) {
return $a + $b;
}
<<__EntryPoint>>
function main() {
var_dump(add(7, 10));
var_dump(add(7.0, 10.0));
}
```
We're going to use a script called "hhvm_wrapper" to run HHVM on this file.
This script is unnecessary for simply running a file, but it makes it much
easier to run HHVM in various compilation modes that mimic its production
behavior, and to see debugging output. As a result, we will **always use
hhvm_wrapper** in our examples. Here's a simple invocation:
```
$ hphp/tools/hhvm_wrapper.php ~/php/hello-world.php
int(17)
float(17)
```
Okay, that's good. HHVM can do simple arithmetic correctly...phew!
What's more interesting about this example is that we called add once with two
integers, and once with two floating-point values. Integers and floats have
completely different representations at the hardware level, and we need to use
different machine code instructions (and even different arithmetic circuitry)
to do math on these two types. And yet, HHVM somehow compiles machine code for
"add" that works for both cases. How does that work?
Well, what we do is...
...
...
...
...
...
...
...
...
...
...
...
...
...
...
...
...
Nope! The answer to that question is way beyond the scope of this lesson.
---
## Step 2: Down the rabbit hole
What we can offer you, for now, is a glimpse into what HHVM is doing behind the
scenes. Let's run hhvm_wrapper in two more modes. First off, let's look at
HHVM's bytecode representation. We'll add the flag`--hdf Eval.DumpHhas=1` to
enable bytecode output. This flag disable code executions, so we'll only see
the bytecode, no results:
```
$ hphp/tools/hhvm_wrapper.php --hdf Eval.DumpHhas=1 ~/php/hello-world.php
```
There's no guarantee that you'll get exactly what I got, but if you focus on
the bytecode for the "add" function, you might see something like the following
output:
```
.function{} ["__NEVER_INLINE"("""v:0:{}""")] (4,6) <"" N > add($a, $b) {
.srcloc 5:15,5:16;
CGetL $b
CGetL2 $a
Add
.srcloc 5:3,5:17;
RetC
}
```
We're doing something with locals $a and $b - in a funny order! - and then
we're executing the "Add" bytecode. That makes sense, right? The most important
takeaway from this small test case is that Hack bytecode is untyped. When we
compile Hack source code to bytecode, we don't know the types of Hack values,
so we don't emit type-specific code. The "Add" bytecode takes two inputs and
does "whatever + does in Hack" for those inputs.
> Hack is a dynamic language, and we can write functions - like this "add"
> function, or like "Vec\map" - that operate on many different types of inputs.
> As a result, **Hack bytecode (usually) does not contain type information.**
Now, let's take a look at the next steps of compilation: an intermediate
representation (IR) and the resulting machine code. Let's add a few flags to
our original command:
* `TRACE=printir:1` will cause us to print both IR and machine code.
* `-r 2` will make HHVM run the code twice, then recompile the code in a more optimized way.
* `| less -RN` is just a Unix pager for this output. There's gonna be a lot of output!
```
$ TRACE=printir:1 hphp/tools/hhvm_wrapper.php -r 2 ~/php/hello-world.php | less -RN
```
Don't be intimidated by the volume of output here! Focus on the colors. Aren't
they pretty?
Okay, let's examine a tiny piece of this code. Search for "TransOptimize" in
this output. (In less, you can use "/" to search. That means you should type
"/TransOptimize", then press Enter. Then, you can type "n" to jump to the next
instance of that string, and "N" to jump back to the previous one.)
We're searching for "TransOptimize" because we're only looking for optimized
code generation; unoptimized output looks very, very different. The first
optimized function you'll find is probably "main". You can tell where you are
because the function is labeled (in purple, for me) near that header - I see,
"Function main" (after the digraph output). Keep jumping to the next instance
of "TransOptimize" until you find one for "add". There should be two compiled
sections for this function - one for integers, and one for floats!
> HHVM's JIT automatically specializes functions for the most common input
> types to that function.
Can you tell which compilations of "add" correspond to integers, and which to
floats? Hints:
* Look at the "CheckLoc" - that is, "check a local's type" - operations that appear early in each compilation of "add".
* Remember that "Dbl", or "double", is a 64-bit float.
---
## Lesson summary
* HHVM is a just-in-time or JIT compiler for the dynamically-typed programming language, Hack.
* A JIT compiler can compile code at runtime, and can choose which functions to compile and which to interpret.
* You can use hhvm_wrapper to execute a given Hack file, or to look at intermediate steps of compiler output.
---
## Exercises
Let's modify our "hello-world.php" script to investigate Hack bytecode and HHVM's IR.
1. If we change "add" to "return $a + 2 * $b", how does its bytecode change?
Which arithmetic operation is done first?
2. With this modified "add" function, how do we implement the "times 2" bit for
integer $b? Look at the printir!
3. Suppose, instead, we modify main to include these two additional lines. How
many TransOptimize compilations do we now generate for the "add" function?
Does this number make sense? Why?
```
var_dump(add(7, 10.0));
var_dump(add(7.0, 10));
```
Now, let's go back to the original version of "hello-world.php", and look at
the printir trace for "main", instead.
1. How many function calls do we make in the TransOptimize compilation for "main"?
2. If we remove the `<<__NEVER_INLINE>>` hint on "add", how does the
TransOptimize compilation for "main" change? How many function calls do we
make there now? |
Markdown | hhvm/hphp/doc/hands-on/lesson1.md | # Lesson 1: Understanding Hack runtime types
## Lesson goals
* Learn about the most common user-visible types in Hack.
* See how HHVM uses a "TypedValue" to represent any Hack value.
* Modify the interpreter to treat "+" as the operator for string concatenation!
---
## Step 0: Pull, update, and re-compile HHVM
If you've been following along, you should have [set up your environment](lesson0.md)
in the prior lesson. If not, do so now.
Since it takes quite a while for HHVM to compile, you should generally kick
off a build whenever you plan to edit its code, before making the edits. We're
going to modify the interpreter in this lesson, so start a build now.
---
## Step 1: User-visible Hack types
Hack is a dynamically-typed language. Most Hack values will be one of the
following types:
* null
* bool
* int
* float
* string
* vec
* dict
* keyset
* object
Let's write and execute a small Hack program that uses these types. Save the
following as ~/php/runtime-types.php:
```
<?hh
class C { function __construct() {} }
function show($x) {
$repr = json_encode($x);
if (!$repr) $repr = '<unserializable>';
print('$x is a '.gettype($x).': '.$repr."\n");
}
<<__EntryPoint>>
function main() {
show(null);
show(true);
show(17);
show(0.5);
show('hello');
show(vec[true, 17]);
show(dict['a' => true, 'b' => 17]);
show(keyset[17, 'hello']);
show(new C());
}
```
As always, we'll run this code with HHVM-wrapper:
```
hphp/tools/hhvm_wrapper.php ~/php/runtime-types.php
```
Now, let's try to see if there are any other types. Modify the file to call
"show" on the following additional values:
```
show(shape('a' => true, 'b' => 17)); // A shape
show($x ==> $x * $x); // A closure
show(show<>); // A function pointer
```
Which of these values have new runtime types?
At this point, you may be confused. Hack - that is, the "hh" type checker -
treats shapes and dicts as different types, but as the above examples show,
they have the same type at runtime. To explain what's going on, we need to look
at how this runtime type system differs from the Hack type system.
---
## Runtime types vs. the Hack type system
So far, we've been talking about the type of a value at runtime. That's related
to, but NOT the same as, the type of the value in the Hack type system! Here's
why a runtime type and a type system's type annotation are different:
* A type annotation constrains what a value **could be**, but a runtime type
tells us what the value **actually is**.
* A type annotation can include union types, like Hack's "arraykey" (int | string)
"num" (int | float), or even "mixed" (union of everything!) but at runtime,
any given value must have some specific runtime type.
Let's consider an example. If a parameter $x is typed as an arraykey, then at
runtime, any given value for $x is EITHER an int OR a string. It doesn't make
any sense for it to be both! At the level of the type system, though, we're
just putting constraints on $x, so "arraykey" does make sense as a type
annotation.
To put it yet another way: the "mixed" type annotation means "no constraint".
**No value is "mixed" at runtime!**
Finally, there's an important fact that's specific to the Hack type system: it
is unsound. That means that there's no guarantee that a given Hack type
constraint for a value will actually match the value's type at runtime. We can
use the Hack type system to identify likely bugs, but we cannot assume that all
of its type annotations are correct. Later on, we'll see that HHVM's JIT
includes an alternate, sound type system, and that this type system is
essential to generating efficient machine code!
> A "sound" type system provides some kind of guarantee about runtime behavior.
Typically, the guarantee is that if the whole program type-checks, then when it
is executed, the runtime type of every value will match its type annotation.
One reason that the Hack type system is unsound is that it includes HH_FIXME
and UNSAFE_CAST as "escape hatches" that allow you to ignore type errors. There
are other reasons, too.
For now, we're going to completely ignore Hack's type system. Type annotations
are optional; HHVM can execute Hack code without any type annotations. But as
we've learned, HHVM still tracks the runtime type of the values it operates on!
---
## Step 1: Representing Hack values
At runtime, HHVM needs a way to represent a Hack value that can be any of
Hack's dozen-or-so runtime types.
We use the classic "C-style" solution to this problem: a tagged union.
Let's consider this idea in C-like pseudocode:
```
enum DataType : uint8_t {
KindOfNull = 0,
KindOfBool = 1,
KindOfInt64 = 2,
...
KindOfObject = 8
};
struct Value {
union {
// Null doesn't need a value
bool m_bool;
int64_t m_int64;
...
ObjectData* m_obj;
};
};
struct TypedValue {
Value m_data;
DataType m_type;
};
```
Basically, this code says that every Hack value is represented as a TypedValue
struct. This struct has two fields: a "tag" byte called DataType, which tells
us what kind of value the TypedValue contains, and the Value, which contains
*overlapping* storage for each of the different kinds of values. (The "union"
keyword means: "the following fields occupy the same location in memory".)
The first thing we should note about this kind of code is: **this kind of C++
data structure is not safe.**
If we read or write to m_data using the wrong interpretation of its union
field, we might accidentally read and use an int64_t value as an ObjectData*
pointer. In doing so, we can arbitrarily corrupt the heap memory of our
runtime!
> Folks like to say that bugs like this one could result in [nasal
> demons](http://catb.org/jargon/html/N/nasal-demons.html), but in practice,
> the most common outcome is that we produce incorrect results for some Hack
> functions, then segfault soon afterwards =)
On the HHVM team, we put up with unsafe practices like the above because it's
the easiest way to express the low-level behavior that we want the machine to
execute. JIT compilation is inherently unsafe, since any bug in the compiled
output is going to produce incorrect behavior that's worse than pretty much any
bug in regular C++ code.
Now, let's look at our actual implementation of this idea. It appears in two files:
* [runtime/base/datatype.h](../../runtime/base/datatype.h)
* [runtime/base/typed-value.h](../../runtime/base/typed-value.h)
Take a look through these files. Do you see the "DataType" enum? What about
"struct TypedValue", in the latter file?
---
## Step 2: Operating on Hack values
Now, we're going to look at the simplest possible way to operate on these Hack
values.
As we saw in the previous lesson, HHVM can execute an "Add" bytecode to do
"whatever Hack + does" for two arbitrary Hack values! Like all operations in
HHVM, this "Add" bytecode is implemented in both the interpreter and the JIT.
In the interpreter, the implementation of "Add" is as simple as it gets:
1. We pop two inputs from the stack. (Our bytecode assumes that we're using a "stack machine".)
2. We check the input types. Today, "Add" only works on "numeric" - int or float - inputs. We throw on other types.
3. We switch over each of the valid input types, and implement the addition logic for each one.
4. We push the resulting TypedValue output back onto the stack.
Now, Hack already has a string concatenation operator: ".". But perhaps you may
have wondered: why not use "+" for string concatenation in Hack, like other
languages like JavaScript and Python support? Let's make it happen!
The logic for the interpreter is in the following file:
* [runtime/vm/bytecode.cpp](../../runtime/vm/bytecode.cpp)
Search in this file for a function called "iopAdd". This function implements
the logic above. Read it, and see how it works. You should find that it uses a
generic helper to implement the "pop 2 inputs from the stack and push 1 output"
logic. This generic helper takes the binary operation to perform on the two
inputs as a parameter, and for "iopAdd", that operation is "tvAdd", which is in
this file:
* [runtime/base/tv-arith.cpp](../..//runtime/base/tv-arith.cpp)
Once you've read enough of these functions to have a basic understanding,
modify them so that tvAdd also concatenates two string inputs! Use the
definition of the TypedValue, above, and the API for strings in this file to
help you out:
* [runtime/base/string-data.h](../../runtime/base/string-data.h)
After making these changes, recompile HHVM. Remember that your implementation
ONLY modifies the behavior of the interpreter - NOT the JIT! That means that
HHVM will have two different behaviors based on whether we are interpreting or
compiling a piece of Hack code. That's not good, but for now, we can still test
this interpretation by running HHVM with the JIT disabled. Put the following
test case into ~/php/concat.php:
```
<?hh
function add($x, $y) {
return $x + $y;
}
<<__EntryPoint>>
function main() {
var_dump(add(17, 34));
var_dump(add(17.0, 34.0));
var_dump(add("17", "34"));
}
```
Then run:
```
hphp/tools/hhvm_wrapper.php -i ~/php/concat.php
```
If you've done this step, you should see the results 51, 51.0, and "1734"
printed to stdout!
---
## Lesson summary
- Hack is a dynamically-typed language. At runtime, a Hack value has some
specific runtime type.
- A "type system" constrains what types a value may have. A type system can be
"sound", meaning that its claims about types are guaranteed to hold for
values at runtime.
- The Hack type-checker's type system is unsound. HHVM uses a separate, sound
type system for JIT compilation.
- HHVM represents Hack values as a tagged union: a 1-byte DataType enum, which
can be used to interpret an 8-byte Value. The Value may be a pointer (e.g.
strings, objects), an int or bool value, or unused (e.g. for null).
- In the interpreter, we implement Hack bytecode by doing casework on the value
of this DataType.
---
## Exercises
1. What "secret" DataTypes exist in HHVM but are invisible to Hack? What do you think "KindOfUninit" is?
2. Extend your modified tvAdd so that, if at least one of the two inputs is a string, it uses the logic for Hack "string casts" (the syntax: `(string)$x`) to cast the other input to a string, then concatenates the results.
1. String casts have an associated bytecode, "CastString". That means that you can search bytecode.cpp for "iopCastString" to see how string cast is implemented, and share that logic.
2. Test your work by modifying the file to use "add" to concatenate a string and an int, or a string and a float. Does it work? If so, congratulations: **you have just implemented dime-store JavaScript.**
3. Enable the JIT again. How do the results of the "concat.php" file change?
1. You may be surprised to see that even with the JIT enabled, we still use "+" for string concat - or do we?
2. Enable the JIT and also enable the "TRACE=printir:1" debug output from the previous lesson. Look at how many times we compile "add", and for which types. Does the string version of "add" explain your results?
4. In C++, we can ask for the "sizeof" a given struct, in bytes. Take a look at this example, where we use static_assert (checked at compile time) to check that different types have the sizes we expect: https://godbolt.org/z/bP4xTPMPq
1. Why is a DataType 1 byte?
2. Why is a Value 8 bytes? Does it matter that a bool can fit in 1 byte?
3. How large is a TypedValue? Add static_assert(sizeof(TypedValue) == ...); to confirm.
4. Why is a TypedValue that large? (Hint: read [http://www.catb.org/esr/structure-packing/](http://www.catb.org/esr/structure-packing/)) |
Markdown | hhvm/hphp/doc/hands-on/lesson2.md | # Lesson 2: Understanding Hack bytecode
## Lesson goals:
* Learn how we evaluate Hack expressions using a stack machine.
* See how we implement operators, function calls, and array and object constructors in bytecode.
* Consider how alternative bytecode sequences could result in performance improvements.
* Write Hack code that compiles to specific bytecodes by [reading our documentation](../bytecode.specification).
---
## Step 0: Pull, update, and re-compile HHVM
As in the [previous lessons](lesson0.md), let's start by re-compiling HHVM as
we review some concepts.
---
## Bytecode for a stack machine
Hack bytecode is a linear sequence of instructions that implements the logic of
a Hack program. The main challenge of compiling Hack source code to bytecode is
to flatten an abstract syntax tree (AST) into this form. Compiling for a stack
machine is one natural way to do this flattening.
Let's work through an example. Here's the Hack expression that we're going to
compile to bytecode:
```
$a * f($b, 2, 'x') + $c
```
We're going to assume that we have a working parser for this language, so we
can parse this expression into a tree format. Furthermore, we're going to
assume that this parser handles order of operations for us, so that the root
node of this tree is the "add" operation, even though that operation comes
second. (Note that the root node of the tree is the **last** expression
evaluated!) Here's an AST for this expression:
```
Add
/ \
Mul $c
/ \
$a Call: f
/ | \
$b 2 $x
```
Our goal is to produce Hack bytecode that will push the result of an expression
on the "stack". Here, the stack is an abstract construct of HHVM's virtual
machine. It does not necessarily correspond to a computer's native stack, and
depending on how we compile a function to machine code, we may even optimize
away all access to this stack! However, we can define HHVM's behavior in terms
of this abstract stack.
Let's call the function that produces bytecode for an AST node
"CompileToBytecode". We can implement this function recursively. In pseudocode,
that might look like:
```
def CompileToBytecode(ast_node, bytecode):
if not ast_node.children:
bytecode.push(CompileLeaf(ast_node.leaf))
return
for child of ast_node.children:
CompileToBytecode(child)
bytecode.push(CompileOp(ast_node.op))
```
> It's funny: whenever I say I'm going to write some pseudocode, what comes out
> is valid Python.
## Evaluating leaf nodes
Let's examine this pseudocode. First off, note that there's a distinction
between "leaf" and "internal" nodes – the blue and green nodes in our diagram
above, respectively. Leaf nodes are primitive expressions that don't take any
stack inputs. $a is a leaf node, because pushing $a on the stack is
accomplished with the CGetL bytecode:
```
CGetL <Local ID>: push the local with ID <Local ID> onto the stack
```
CGetL does take an input—the ID of the local to push—but this input is a
constant, so it's **part of the bytecode**, not a stack input! In the compilers
world, we often refer to the constant inputs of a low-level instruction as
"immediates". CGetL is a bytecode that takes one immediate - a local ID - and
no stack inputs, and pushes that local onto the stack.
The constant expressions 2 and "x" can similarly be evaluated by bytecodes with
no stack inputs. Again, these bytecodes have immediates - the constant integer
and the constant string to push, respectively. We have:
```
Int <constant int>: push this integer onto the stack
```
and:
```
String <constant string>: push this string onto the stack
```
These three bytecodes - CGetL, Int, and String - are sufficient to cover all
the leaf nodes in our example. Now, let's move onto the internal nodes. The
second half of our CompileToBytecode deals with these nodes.
## Evaluating internal nodes
We've already seen an example of an internal nodes in the previous lesson: the
"Add" bytecode. This bytecode pops its two inputs from the stack, does
"whatever + does in Hack" for those two inputs, and pushes the result into the
stack. To evaluate `(expr_1) + (expr_2)` it suffices to recursively evaluate
the two expressions (putting each result on the stack), then execute an "Add"
bytecode.
"Add" and "Mul" are simple examples of internal nodes, in that they consume a
fixed number of stack inputs and take no immediates. Other nodes introduce a
few wrinkles to this basic pattern:
1. Some nodes take immediates. For example, "IsTypeC" (used to evaluate `(expr)
is TYPE`) takes one input—the expression—from the stack, but takes the type
as the immediate.
2. Some nodes take a variable number of arguments from the stack. For example,
"NewVec <constant int>" (used to evaluate `vec[(expr_1), (expr_2), ...
(expr_N)]`) takes the number of inputs `N` as an immediate, pops `N` values
from the stack, and pushes the vec constructed from those values.
3. Some nodes take additional, hidden stack inputs in addition to the syntactic
inputs from the original Hack source.
We'll examine a function call node—the "Call" node in our example above—which
includes of the complexity above. There are several bytecodes that deal with
function calls in Hack. We support calls to free functions, instance methods,
and class methods, with static and dynamic versions of each, so we need at
least six function call bytecodes. There are several immediates used by all of
these cases, so we package them all up into "FCallArgs":
```
struct FCallArgs {
uint32_t numInputs;
uint32_t numOutputs;
// Other fields for async calls, inout args, coeffects, etc.
};
```
The two most important bits in this struct are the "numInputs" and "numOutputs"
fields, which tell us how many arguments the call bytecode will pop from the
stack, and how many return values it will push. (We model functions with inout
arguments as pushing those results as additional return values, but for any
"normal" function call without such arguments, numOutputs will equal 1.)
The remaining immediates are specific to each type of function call. They
identify which function is being called. Hopefully, the fact that we have
multiple bytecodes for calls makes sense, because we must take different inputs
to identify the "receiver" of free functions, instance methods, and class
methods:
* A free function is identified by its string name.
* An instance method is identified by an object to call the method on and the name of the function to call.
* A class method is identified by a class pointer to call the method on and the name of the function to call.
The bytecode for a static call to a free function, FCallFuncD, takes two
immediates: FCallArgs, and a constant string function name. It has the
following signature:
```
FCallFuncD <FCallArgs> <constant string>:
Call the function with the given string name, popping FCallArgs.numInputs arguments
from the stack and pushing FCallArgs.numOutputs return values to it.
```
The final wrinkle for these function calls is that they require—for now!—two
stack inputs before the syntactic arguments to the function. These inputs only
serve to pad the stack in memory with enough space for a [machine-level
function call frame](https://en.wikipedia.org/wiki/Call_stack#Structure) (also
called an "activation record" or "ActRec").
> The fact that a call takes two hidden stack inputs is contingent on the
> current size, in bytes, of [HHVM's in-memory representation of an ActRec](../../runtime/vm/act-rec.h).
> In particular, stack slots occupy 16 bytes in memory—each one is a
> TypedValue!—and an ActRec occupies 32 bytes. It's rare for Hack bytecode to
> be sensitive to low-level implementation details, but in this case,
> unfortunately, it is.
To handle these hidden inputs, we have to modify our pseudocode above to
account for them in its "children" loop. In particular, for any call bytecode,
the hidden inputs are pushed first, before the syntactic arguments.
## Putting it together
Let's run our pseudocode on the AST above. Here's the execution trace, showing the recursive calls, in order:
1. Compile(Add):
1. Compile(Mul):
1. Compile(Leaf $a) => **CGetL $a**
2. Compile(Call):
1. Push ActRec padding => **2x NullUninit**
2. Compile(Leaf $b) => **CGetL $b**
3. Compile(Leaf 2) => **Int 2**
4. Compile(Leaf 'x') => **String "x"**
5. Push the op => **FCallFuncD {3, 1} "f"**
3. Push the op => **Mul**
2. Compile(Leaf $c) => **CGetL $c**
3. Push the op => **Add**
Remember that the algorithm produces a linear bytecode sequence via this
in-order tree traversal. Here's the result. Try stepping through it, and verify
that it computes the result of `$a * f($b, 2, 'x') + $c` and leaves it on the
stack!
```
CGetL $a
NullUninit
NullUninit
CGetL $b
Int 2
String "x"
FCallFuncD {3, 1} "f"
Mul
CGetL $c
Add
```
---
## Step 1: Examining basic expressions
In this section, we're going to look at the actual bytecode that HHVM generates
for different cases. There are a few hundred distinct bytecode operations to
keep track of. Luckily, these operations are all documented in a file:
* [hphp/doc/bytecode.specification](../bytecode.specification)
Let's start by double-checking our prediction for the example above. Do so by
creating Hack script that evaluates the given expression and using the
"Eval.DumpHhas=1" debug flag. Put the following code into `~/php/bytecode.php`:
```
<?hh
function test($a, $b, $c) {
return $a * f($b, 2, 'x') + $c;
}
```
Then run this code with:
```
hphp/tools/hhvm_wrapper.php --hdf Eval.DumpHhas=1 ~/php/bytecode.php | grep -v srcloc
```
(The last part of this command filters out "srcloc" annotations in the
generated bytecode, which attribute bytecode back a file and line in the Hack
source.) If we run this command on our test file, we'll get a compilation of
"test" which is basically the bytecode needed to compute our example
expression. The only additional bytecode is a RetC, which turns an expression
into a return statement: it pops one element off the stack and returns that
value to the caller. Here's a (slightly edited form) of what I get:
```
.function{} (3,5) <"" N > test($a, $b, $c) {
NullUninit
NullUninit
CGetL $b;"b"
Int 2
String "x"
FCallFuncD ... 3 1 ... "f"
CGetL2 $a;"a"
Mul
CGetL $c;"c"
Add
RetC
}
```
This bytecode is quite close to our prediction above. There are a few more
components to the FCallArgs struct than what we showed, but that's expected;
Hack supports a variety of special ways to call functions. A bigger difference
is that $a is pushed onto the stack *after* the function call result, and with
a different op—CGetL2, instead of CGetL.
> Wait, what the heck? Shouldn't HHVM evaluate these expressions in order? A
> compiler may re-order expression evaluations as long as doing so has no
> observable effect. For example, we generally can't re-order function calls,
> because function calls could have side effects like writing to the heap,
> throwing errors, or doing IO. Pushing a local onto the stack is side-effect
> free, so in this case, this rewrite is safe.
We should check that this bytecode sequence has the same behavior as the one we
predicted. To do so, we need to understand the semantics of CGetL2. There are a
few ways to find out what a bytecode operation does. You can read:
1. Its docs, by searching for CGetL2 in [bytecode.specification](../bytecode.specification).
2. Its implementation in the interpreter, by searching for "iop$NAME" (i.e. iopCGetL2) in [bytecode.cpp](../../runtime/vm/bytecode.cpp).
3. Its implementation in the JIT, by searching for "emit$NAME" (i.e. emitCGetL2) in the [JIT directory](../../vm/jit).
Here are the docs for CGetL2. (We skip the part about throwing errors, which will not happen in our example.)
```
Get local as cell. If the local variable given by %1 is defined, this
instruction gets the value of the local variable, pushes it onto the stack as
a cell, and then pushes $1 onto the stack.
```
In this explanation, `%1` refers to the first immediate—that is, the local
ID—and `$1` refers to the top element of the stack. Basically, the docs are
saying that CGetL2 pops one element off the stack, pushes the local, then
pushes the popped element on top of it. The net result is that doing CGetL2
after evaluating an expression is equivalent to doing CGetL before evaluating
it.
---
## Step 2: Examining more complex expressions
Let's try out a few more expressions with our setup from above. Try this version:
```
<?hh
class C {}
function test_vec() {
return vec[17, new C()];
}
function test_dict($x) {
return dict['a' => 17, 'b' => $x, 'c' => 'd'];
}
function test_concat($x) {
return 'prefix'.(string)$x.'suffix';
}
```
Here's what I get for these examples. Make sure these outputs make sense. Other
than the complex logic needed to implement the "new C()" syntax, these outputs
are an exact match for what our pseudocode would predict.
```
.function test_vec() {
Int 17
NewObjD "C"
Dup
NullUninit
FCallCtor ... {0, 1} ...
PopC
LockObj
NewVec 2
RetC
}
.function test_dict() {
Int 17
CGetL $x
String "d"
NewStructDict <"a" "b" "c">
RetC
}
.function test_concat($x) {
String "prefix"
CGetL $x
CastString
Concat
String "suffix"
Concat
RetC
}
```
These examples show us a couple of new bytecodes used to implement these basic
elements of Hack syntax:
1. `NewVec 2` pops that two elements from the stack, appends them to a new vec,
and pushes that vec.
2. "new C()" expands to several bytecodes - from `NewObjD "C"` to `LockObj` -
that split up the operations of allocating the object and calling the
constructor. That makes sense, because calling the constructor may require
evaluating further expressions (the constructor arguments).
3. `NewStructDict <"a" "b" "c">` is similar to NewVec, in that it's a bytecode
with a variable number of inputs. It takes its values from the stack, but
the keys are a vector of string immediates.
4. Concatenation happens two inputs as a time - at least, if done with `Concat`
bytecodes.
---
## Lesson summary
* Hack bytecodes operate on a stack machine. Most bytecodes pop inputs from the
stack and push an output to it.
* We can compile an expression to bytecode recursively, by compiling its
subexpressions, then appending a bytecode that applies the top-level
expression transformation to those inputs.
* Bytecodes take "immediates" - constant arguments that are part of the
instruction stream - as well as stack inputs.
* Because Hack is a high-level language, it includes bytecodes for constructing
and operating on complex data structures like objects, strings, vecs, dicts,
and keysets. [You can see a full list of bytecodes here.](../bytecode.specification)
---
## Exercises
1. Search bytecode.specification for other bytecodes related to string concatenation.
1. Is there an alternative bytecode sequence that we could use to implement `test_concat`?
2. Which bytecode sequence would you expect to be faster? Why? (Reading the interpreter may help in this case.)
2. Rewrite `test_dict` from our last example as follows:
```
function test_dict($x, $y, $z) {
return dict['a' => 17, 'b' => $x, 'c' => 'd', $y => $z];
}
```
1. How do we implement this modified dict constructor in bytecode?
2. Are there performance differences between this approach and NewStructDict?
3. Can you provide an alternative bytecode sequence for this expression that could improve its performance?
Investigate how HHVM handles "member operations"—basically, read/write access
into array elements or object properties. Take a look at the bytecode for these
functions:
```
function prop($x) {
$x->y = 17;
}
function elem($x) {
$x['y'] = 17;
}
function nested_one_level($x) {
$x['y']->z = 17;
}
function nested_two_levels($x) {
$x['y']->z[] = 17;
}
```
1. How are we handling these sequences of operations in a compositional way?
2. Read the relevant bytecode docs in bytecode.specification, and write Hack
functions that use the bytecodes BaseH, BaseSC, SetOpM, and UnsetM. Confirm
with the Eval.DumpHhas output.
3. What changes if we replace these mutating ops with "read-only" member-ops
like `return $x['y']->z[17];`
4. What state do we need to keep in between steps of a nested member-op write
sequence? Take a look at [struct MInstrState in this file](../../hhbbc/interp-state.h)
to confirm your guess. |
Markdown | hhvm/hphp/doc/hands-on/lesson3.md | ## Lesson goals:
* Explore a unique feature of the PHP and Hack languages: value-type strings and array-likes.
* Learn about how HHVM uses reference counting to efficiently implement value types and to reclaim memory.
* See how certain Hack bytecodes result in refcounting operations.
* See how HHVM uses an ahead-of-time compilation step to reduce unnecessary refcounting.
* Make and test a small attempt at a performance optimization.
---
## Step 0: Mysterious missing mutations
Save the following Hack code in `~/php/refcounting.php`:
```
<?hh
class C { function __construct(public int $x) {} }
function mutate_int(int $x) {
$x = 34;
}
function mutate_object(C $c) {
$c->x = 34;
}
function mutate_dict(dict<string, int> $d) {
$d['x'] = 34;
}
<<__EntryPoint>>
function main() {
$x = 17;
mutate_int($x);
var_dump($x);
$c = new C(17);
mutate_object($c);
var_dump($c);
$d = dict['x' => 17];
mutate_dict($d);
var_dump($d);
}
```
What would you expect to see if you ran this code? Make a guess before reading on.
...
...
...
...
...
...
...
...
...
...
...
...
...
...
...
...
Okay, let's run the file. I assume you already have HHVM compiled at this point! Run:
```
hphp/tools/hhvm_wrapper.php ~/php/refcounting.php
```
Here's what you should see as output:
```
int(17)
object(C) (1) {
["x"]=>
int(34)
}
dict(1) {
["x"]=>
int(17)
}
```
What is going on here? The caller `main()` sees the modification made to `$c`
in `mutate_object()`, but it doesn't see the modification made to `$d` in
`mutate_dict()`. Clearly, HHVM is broken. It's a compiler bug! Shut it down!
...no. This result is correct. In PHP and Hack, objects are "passed by
reference" or are "reference types", but strings, vecs, dicts, and keysets are
"passed by value" or are "value types". Primitives like ints, floats, booleans,
and the value "null" are also value types.
> Vecs, dicts, and keysets share many similarities. They're heap-allocated
containers of varying size with value-type semantics. We use the term
"array-likes" throughout our codebase to refer to this trio of types.
When we pass an object as a function argument, we're passing a reference to a
unique value. Mutations to that object are visible to anyone who has a
reference to it. On the other hand, **when we pass a value type as a function
argument, the caller's copy and the callee's copy of that value are logically
distinct**. Mutations made to a value type inside a function are not visible
outside it.
Now, it's not surprising that primitives like ints are value types. Indeed, any
other implementation would be quite inefficient! [As we saw in Lesson 1](lesson1.md),
ints, floats, and booleans are stored directly in the TypedValue—they aren't
hidden in heap-memory behind a pointer. When we push `$x` onto the stack to
call `mutate_int()`, the value on the stack is independent of the value in the
local.
It's also not surprising that objects are reference types. Again, from Lesson
1, we saw that objects are heap-allocated. All that we store in a TypedValue is
a pointer to this heap memory; the object's class and its properties are stored
in the heap. When we push the object `$c` onto the stack, we're copying the
pointer, not the object! As a result, when we write through this pointer to
modify the object in `mutate_object()`, the effect is visible in main! (If
`mutate_object()` first replaced `$c` with a new object - i.e. `$c = new C()`
- then modifications to this value would not affect the original one.)
The surprising bit here is that in PHP and Hack, strings and array-likes are
**heap-allocated value types**. Even though these values are represented as a
pointer to the heap, when we pass them to a function, the two pointers act like
distinct values. Take a moment to think about how you might implement this
behavior. One straightforward approach is to copy these values when we pass
them as function arguments, but that would be a performance disaster. For
example, it would turn Shapes::idx from a simple O(1) hash table lookup into an
O(n) dict copy! We need a better way.
---
## Enter refcounting
HHVM implements efficient heap-allocated value types using reference counting.
**In fact, all heap-allocated values are refcounted, regardless of whether
they're reference types or value types.** Refcounting serves two purposes:
1. It's a way to eagerly free heap-allocated values. We also use a
[mark-sweep garbage collector](https://en.wikipedia.org/wiki/Tracing_garbage_collection#Na%C3%AFve_mark-and-sweep)
to handle cycles.
2. It enables [copy-on-write (COW)](https://en.wikipedia.org/wiki/Copy-on-write)
for strings and array-likes, allowing us to pass them as arguments in O(1) time.
The first purpose applies to objects, strings, and array-likes. Like Java and
Python, and unlike C++, Hack automatically manages memory for its users. It
frees heap-allocated values that are no longer used. Refcounting is one
approach to automated memory management: if a heap-allocated value has no
references, we can free its memory. The second purpose only applies to
heap-allocated value types, i.e. strings and array-likes.
> Refcounting is not a complete solution to memory management!
> Read ["Moon instructs a student"](http://www.catb.org/jargon/html/koans.html) to see why.
Let's look at how we implement refcounting. In our code, every heap-allocated
value starts with an 8-byte header, the C++ struct HeapObject. This struct is
defined in [the file header-kind.h](../../runtime/base/header-kind.h).
(Don't ask why. The reason has been lost to the mists of time.) Thus, we can
say that even though strings and array-likes are not Hack objects, all
heap-allocated values are HeapObjects.
> Be careful with the terminology! A HeapObject is any heap-allocated value. It
can be a reference type—e.g. an object—or a value type—e.g. a string. We will
always use "object" to refer to Hack objects, and "HeapObject" to refer to the
union of Hack objects, strings, and array-likes.
Every HeapObject tracks a **reference count**: a count of the number of
references to that value that are currently live. These references might be
locals or stack elements. If a HeapObject appears in a vec, the vec holds a
refcount on the HeapObject. If a HeapObject is stored to an object's property,
then the object holds a refcount to the HeapObject. A HeapObject's refcount
stores the number of distinct places that have a pointer to that HeapObject.
> That's not actually true! There are some cases where HHVM skips decrementing
refcounts, making the refcounts an overestimate. The real constraint is that
**a HeapObject's refcount is an upper bound on the number of distinct places
that have a pointer to that HeapObject**. But let's ignore this subtlety.
Every time we copy a HeapObject pointer—for example, if we push a copy of a
HeapObject local onto the stack—we increment the HeapObject's refcount
("inc-ref"). Every time we overwrite or let go of a HeapObject pointer—for
example, if we overwrite a HeapObject local, or if we pop one off the stack—we
decrement its reference count ("dec-ref").
Using refcounting for memory management is simple. Every time we dec-ref a
HeapObject, we check if its refcount is 0. If it is, then we know that that
object will never be used again, and we can free its associated memory.
Using refcounting to implement efficient heap-allocated value types is a bit
more involved. Whenever we modify a value-type HeapObject, such as a dict, we
use a strategy called copy-on-write:
* If the dict has refcount == 1, we modify it in place. No one besides us has a
pointer to the dict, so we're still maintaining value-type semantics, despite
doing O(1) in-place edits.
* If the dict has refcount != 1, we copy it (to get a copy with refcount == 1)
and the modify the copy. We dec-ref the old value since we've replaced our
pointer to it with a pointer to the copy.
---
## Refcounting and bytecode
The stack machine semantics of a Hack bytecode imply refcounting bookkeeping
that we have to do to implement it.
We can figure out how to do this refcounting by asking ourselves a question: if
the inputs to a given bytecode are refcounted HeapObjects, how many pointers to
those objects exist after the operation is complete?
Let's start with the simplest case, a CGetL bytecode. From
[bytecode.specification](../bytecode.specification):
```
CGetL <local variable id> [] -> [C]
Get local as cell. If the local variable given by %1 is defined, this instruction
gets its value and pushes it onto the stack. If the local variable is not defined,
this instruction raises a warning and pushes null onto the stack.
```
This bytecode takes a local and pushes it onto the stack. If the local is a
HeapObject, then there is now one additional copy of that heap pointer. **In
addition to pushing a value on the stack, CGetL must inc-ref it.**
PushL is a related bytecode that "pushes" a value onto a stack instead of
"getting" it. The difference is that PushL both gets the value of the local on
the stack and unsets the local itself (i.e. sets the local to Uninit, which is
Hack's flavor of "undefined").
```
PushL <local variable id> [] -> [C]
Teleport local value to eval stack. The local given by %1 must be defined.
This instruction pushes the local's value on the stack, then unsets it,
equivalent to the behavior of a CGetL / UnsetL pair.
```
If the local that we pushed with PushL is a HeapObject, then we've gained
another copy of that heap pointer on the stack, but we've unset the pointer in
the local. Overall, its refcount is unchanged. **Because PushL "moves" a heap
pointer, it does no refcounting.**
Let's look at one more case, an operation that uses a stack value to set a local:
```
PopL <local variable id> [C] -> []
Teleport value from the stack into a local. This instruction stores the top
value of the stack $1 to the local %1, then pops this value off of the stack.
```
Let's say the value on top of the stack was a HeapObject. As with PushL, we're
gaining one reference to it (the new value of the local), but we're also losing
one reference to it (the one we pop off the stack). We do not need to inc-ref
or dec-ref the stack value. However, there is refcounting associated with PopL:
if the old value of the local was a HeapObject, we must dec-ref it! **PopL does
not need to inc-ref or dec-ref the stack value that it's storing to a local,
but it must dec-ref the local's old value.**
Now that you've seen these examples, predict the refcounting needed for these bytecodes:
1. **UnsetL:** "unsets" a local by setting its value to Uninit.
2. **NewVec:** pops n values off the stack and creates a vec containing those elements.
3. **RetC:** pops and returns the top value of the stack, cleaning up the function's frame.
4. **Concat:** pops two values off the stack; if they're not both strings, it throws, else, it concatenates them and pushes the result.
...
...
...
...
...
...
...
...
...
...
...
...
...
...
...
...
If UnsetL is operating on a refcounted local, it must dec-ref the old value.
**UnsetL dec-refs the local's old value.**
NewVec can "move" any HeapObjects it's taking as stack inputs. A stack slot
used to refer to them; now, a vec element refers to them; the overall
refcounting effect is neutral. However, NewVec allocates a new, refcounted vec
and pushes a pointer to it onto the stack. This new vec should have a refcount
equal to 1. **Constructors like NewVec create a new HeapObject with a refcount
of 1.**
RetC signifies a function return. It "frees" the memory associated with the
function's frame. (Frames are usually stack-allocated, so freeing the memory is
just decrementing the stack offset.) Functions may hold references to
HeapObjects in their locals, so before we "free" the frame, we must dec-ref
these values. **RetC dec-refs the function's locals.**
Let's assume Concat is operating on two refcounted strings. Concat should
allocate a new string, so it's acting kind of like a constructor; the new
string will have a refcount of 1. Concat also pops two strings off the stack,
so it must dec-ref those. **In general, stack-only bytecodes like Concat
dec-ref their inputs and either allocate a new output with refcount 1, or
inc-ref an existing value to push it as an output.**
---
## Step 1. Visualizing refcounting
When we run HHVM in interpreted mode, we can turn on debug tracing to see refcounting working as above. Put the following code into `~/php/refcounting.php`:
```
<?hh
class LinkedList { function __construct(public $next = null) {} }
<<__EntryPoint>>
function main() {
$x = new LinkedList();
$y = new LinkedList($x);
$z = $y;
$y = 17;
var_dump($z);
$z = 34;
}
```
Then, run this code in interpreter mode with refcounting debug enabled:
```
TRACE=bcinterp:9 hphp/tools/hhvm_wrapper.php -i ~/php/refcounting.php
```
The resulting output shows the types and refcounts of all locals and stack values at each bytecode. For example, just prior to the final RetC that ends the function main(), we have:
```
dispatch: __
dispatch: || === Stack at /home/kshaunak/php/refcounting.php:13 func main ===
dispatch: || {func:main,callOff:0,this:0x0}<C:0x7f06b903e320:c(1):Object(LinkedList) C:0x11 C:0x22> C:Null
dispatch: \/
dispatch: 71: RetC
```
This output says that in function `main()`, local 0 (which is `$x`) contains a
LinkedList with refcount 1 at heap address 0x7f06b903e320. The other two
locals, `$y` and `$z`, are the integers 0x11 and 0x22, respectively, because we
assigned to them earlier. There's one value on the stack, a null, which is the
value that the RetC will return.
Look through this output and see how LinkedList refcounts change with CGetL /
SetL bytecodes. Confirm for yourself that the refcounting semantics match our
predictions above. What is the maximum refcount we see for any object in this
function? Which object has that refcount, which bytecode does it occur at, and
where are all of the references to it?
---
## Step 2: Static strings and array-likes
Strings and array-likes are value types. A bit of Hack code that has a
reference to one of these values can guarantee that no other Hack code can edit
that value. HHVM implements heap-allocated value types by storing a refcount in
their heap data, and blocking mutation for value types with a refcount that is
not equal to 1.
This constraint allows HHVM to make a critical optimization: placing strings
and array-likes in shared memory. Most Hack objects are thread-local. HHVM can
run multiple requests concurrently, and a given object is only referenced on a
single thread: the thread that allocated it. But this approach would be a
disaster for commonly used values like the empty vec or empty dict! Instead,
we'd like to be able to share these values between requests. Multiple requests
would all have concurrent access to a single empty vec and dict.
Here's an approach to sharing these values that does NOT work:
* Store a single, empty vec in a process global with an initial refcount of 1.
* When we retrieve this vec on a request, we inc-ref it, so it starts with a
refcount of at least 2. We inc-ref and dec-ref it as normal when executing
bytecode for each request.
* Because the minimum refcount of this vec, as seen from any Hack code, is 2,
no one will ever modify it.
This approach seems to work. Whenever Hack code is manipulating this global
vec, it will have a refcount of at least 2 (one held by the global itself, and
one held by the value in the request), so it can't be modified. The bug here is
subtle: **modifying any value in shared memory introduces a race condition,
unless done with atomic memory operations**. The bug here is that we can't
actually inc-ref or dec-ref any values in shared memory!
Here's an example of a race condition that can occur when inc-ref-ing the
shared vec:
1. Thread A loads its initial refcount X.
2. Thread B loads its initial refcount X.
3. Thread A writes back the incremented refcount X + 1.
4. Thread B writes back the incremented refcount X + 1.
Even though threads A and B have created two new references to the shared vec,
the refcount has only increased by 1. At some point, when these threads dec-ref
the pointers they hold, they can dec-ref the value to 0 and "free" the array,
causing HHVM to crash!
Instead, here is how HHVM implements shared-memory strings and array-likes:
* The refcount of a string or array-like can be either some positive or
negative value.
* If the refcount is positive, the HeapObject is thread-local; if it is
negative, it is in shared memory.
* When doing any inc-ref or dec-ref op on a string or array-like, we first
check the refcount's sign. If it's negative, we do nothing!
Since concurrent reads on shared memory are safe, this approach doesn't
introduce any race conditions. We can share a single empty vec and empty dict,
and many other constant HeapObjects, between all of our requests! Let's look at
a quick example. HHVM places any "constant value" like a constant string or vec
into shared memory. Put the following code into `~/php/static.php`:
```
<?hh
<<__EntryPoint>>
function main() {
$x = vec[17, 34];
var_dump($x);
$y = 'my constant string';
var_dump($y);
}
```
Then run it with:
```
TRACE=bcinterp:9 hphp/tools/hhvm_wrapper.php -i ~/php/static.php
```
When we manipulate `$x` and `$y` as locals and as stack values, you should see
a refcount of "c(static)" for them: a sentinel negative refcount value. HHVM's
tvIncRefGen and tvDecRefGen, the generic refcounting helpers, avoid mutating
static strings and array-likes by checking that the refcount is positive before
decrementing it. The refcount of a static string or array-like will never
change!
> You might ask, why don't we use atomic increments or decrement operations
like "lock-add" to inc-ref and dec-ref all values? Doing that would save a lot
of instructions: a compare-and-branch on every refcounting op. Unfortunately,
it would also tank performance.
[On x86-64, in order to write to any memory, a core must take exclusive access to that memory address](https://en.wikipedia.org/wiki/MESI_protocol)
(and to the whole cache line containing that address). That means that
inc-ref-ing a shared value is essentially single-threaded.
As compiler engineers, we should be wary of relying on delicate invariants like
"the refcount of a static value is never mutated". Luckily, we have an ace up
our sleeve to help us enforce such guarantees: the debug-mode build! The
refcounts that are 1 less and 1 greater than the "StaticValue" refcount are
considered to be invalid refcounts. In debug mode, if we ever see a heap object
with a negative refcount that's close to, but not equal to, "StaticValue", we
crash with a nice error message.
> Assertions are a critical tool for testing our code. We check these
assertions in debug builds, then compile them out in release builds where
performance is a top priority. In HHVM code, the "assertx" helper checks an
assertion and prints a stack trace and other diagnostics on failure.
**Structure your code so that clear invariants hold, then use assertions to
check them wherever possible!**
---
## Step 3: Refcounting optimizations
As we've seen before, a compiler like HHVM is free to diverge from the
"trivial" implementation of a given bytecode, as long as the logic that HHVM
executes matches the stack machine in any observable behavior. In the previous
lesson, HackC and HHVM used this freedom to re-order some bytecodes, but the
resulting logic was not meaningfully different than the trivial implementation.
Now, we'll look at how we can tweak compilation to gain performance.
**Refcounting is expensive, because it requires accessing heap memory.** Memory
access is one of the most expensive operations on modern CPUs - far more
expensive than doing arithmetic on values in hardware registers. As a result,
saving inc-refs or dec-ref ops is usually a nice win. Furthermore, an inc-ref
and dec-ref on the same value cancel out! The "PushL" bytecode uses this fact
for optimization. Verify for yourself that these bytecode sequences have the
same semantics:
1. CGetL $local; UnsetL $local
2. PushL $local
If we execute the first sequence the "trivial" way, then we'll inc-ref the
local for CGetL (because its value is now in the local AND on the stack), then
immediately dec-ref it (in order to overwrite it with Uninit). Because PushL
does the two ops together, we can cancel these two operations - both when we're
interpreting Push, and when we're JIT-compiling it. HackC does not directly
emit PushL bytecodes, but HHVM comes with an ahead-of-time bytecode optimizer
called HHBBC that does these rewrites. (HHBBC even adds UnsetL ops for locals
that are "dead" - unread by later code in a function - in order to create more
optimizable sequences.)
> Remember that HHVM converts, or "lowers" bytecode to machine code in stages.
It compiles bytecode to intermediate representation (HHIR), then IR to virtual
assembly (vasm), then vasm to machine code. Most optimizing compilers implement
similar optimizations at multiple stages. For example, HHBBC optimizes
bytecode; it replaces bytecode sequences with equivalent sequences with better
refcounting properties. But if we lower bytecode to IR that does an inc-ref
followed by a dec-ref of the same value, we have IR level optimizations (see:
refcount-opts.cpp) that can cancel those ops as well!
Now, let's try to make our own refcounting optimization! Because HHVM has a
garbage collector (GC), we can skip dec-refs without changing the result of any
Hack operations. **Taken to an extreme, we could modify HHVM's DecRef code
generation to return immediately, so dec-refs are a no-op at the machine code
level!** At this point, we'd leak memory to the GC all over the place, but
refcounting would be a lot faster!
> Does this change affect any observable behavior? It turns out that the notion
of "observable" is a bit fuzzy! If we leak this much memory, there's a good
chance the teams monitoring HHVM on live webservers will see a large
increase in memory used. Worse still, the OS's OOM killer (which kills
processes that use too much memory) might kill HHVM servers. Leaking too much
memory is an observable, but leaking a small amount may be okay.
We're going to try a less extreme version of this optimization. The Idx
bytecode takes an array, a key, and a default value as stack inputs. It does
the array access and pushes the result onto the stack, pushing the default
value if the key is not present in the array. Idx must inc-ref the stack output
and dec-ref the stack inputs.
In our codebase, the code that lowers a bytecode $NAME to IR is in a function
called emit$NAME. So, we can find the code to compile Idx by searching for Idx
in the hphp directory. It turns out that it's in
[irgen-builtin.cpp](../../runtime/vm/jit/irgen-builtin.cpp).
1. Modify emitArrayIdx to avoid dec-ref-ing any of the inputs. (It's sufficient to modify the vec and dict cases.)
2. Write a small test file using idx() to confirm that your modification is
working. You can look at the TRACE=printir:1 output to see that it has fewer
dec-refs than before (compare the output between your modified HHVM and the
output from release HHVM). As a shortcut, pass `-b /usr/local/hphpi/bin/hhvm`
to hhvm_wrapper.php to use release HHVM without recompiling!
3. Kick off local unit tests for your code: `hphp/test/run quick slow`
When you have these steps working, you'll have saved a small amount of
instructions, loads and stores at the cost of a significant memory penalty.
Is this tradeoff an overall CPU win? Only data can tell - but my guess is,
it's a regression! In the next lesson, we'll try to find a performance win.
---
## Lesson summary
* Hack has multiple heap-allocated types: objects, strings, and array-likes
(vecs, dicts, and keysets).
* Of these types, objects are reference types, while strings and array-likes
are value types. A mutation to an object is visible to everyone with a
reference to it. On the other hand, two references to a string behave like
distinct values.
* HHVM uses refcounting for two purposes: to implement copy-on-write value
types, and to eagerly free heap values.
* HHVM uses a negative refcount to indicate that strings and array-likes are in
shared memory. This trick allows us to de-duplicate "static", or constant,
strings and array-likes across requests.
* Because Hack bytecodes can create new references to heap values, HHVM must do
inc-refs and dec-refs to implement these bytecodes. For example, we inc-ref the
stack outputs of a bytecode and dec-ref the stack inputs.
---
## Exercises
1. Remember that HeapObjects include reference types like objects as well as
value types like strings and array-likes. Why doesn't HHVM support "static"
(shared-memory) objects?
2. Take a look at HHVM's tvDecRefGen, which does a generic dec-ref of a
TypedValue. Note that this helper can be used on any TypedValue, including
non-heap-allocated values like integers.
1. How many cases does tvDecRefGen have to handle? Confirm by reading the comments in [decref-profile.h](../../runtime/vm/jit/decref-profile.h).
2. Is it safe to use tvDecRefGen to dec-ref an object?
3. Is it optimal to use tvDecRefGen to dec-ref an object? What could we do instead?
3. Modify your code for the Idx bytecode to skip the inc-ref of the new value.
1. Why is the new code unsafe? Why is it okay to skip a dec-ref, but not an inc-ref?
2. Commit this modified version run the tests with `hphp/test/run quick slow`
3. If those tests are passing, you can also add other flags to the command:
a. To test "repo mode", pass "-r" to hphp/test/run. This flag turns on HHBBC.
b. To test optimized JIT compilation, add "--retranslate-all 2".
c. Typical HHVM CI will run a battery of tests including these flags.
Here is an hphp/test/run command that includes all of the flags above:
```
hphp/test/run -m jit -r --retranslate-all 2 hphp/test/quick/dict/basic-getters.php
```
Once you can reproduce a failure, fix them by re-adding the inc-refs you
removed and confirm that the test now passes. |
Markdown | hhvm/hphp/doc/hands-on/lesson4.md | # Lesson 4: Making an IR optimization
## Lesson goals:
* Use profiling tools to find an optimization opportunity.
* Learn how to read printir tracing, the IR documentation, and JIT types.
* Improve IR generation to speed up casting collections (e.g. Vector) to arrays (e.g. vec).
* Move this code to "simplify", one of the IR-to-IR optimization routines, to make it more general.
---
## Step 0: Suboptimal code generation
In this step, we're going to hunt for a potential performance win.
Our main tool in this hunt is profiling. One way to do that is to run
[Linux perf ](https://perf.wiki.kernel.org/index.php/Main_Page)
on a machine with an HHVM webserver under load. HHVM even outputs a
"perf-PID.map" file that can be used to symbolize stack traces in JIT-ed Hack
code. That said, it's easiest to read perf's numbers for C++ helpers, and many
performance opportunities in HHVM come from optimizing C++ functions that are
used to implement the Hack runtime.
In a sample perf trace I took, I saw that ~0.2% CPU is spent doing
object-to-array-like casts (e.g. convObjToVec, convObjToDict, convObjToKeyset).
What makes these helpers a good potential target for optimization? First off,
there is some opportunity there: if we could eliminate most of the cost of
these helpers, it could be a 0.2% gCPU win. Second, the opportunity is
relatively easy to realize. It's difficult to implement general-purpose code to
cast an object to an array-like. That's why HHVM implements these casts by
calling a C++ helper. But after having read enough of web code written in Hack,
I can make an educated guess that most object-to-array casts are casts on a
collections objects. These objects are secretly backed by arrays, which makes
it possible for us to generate faster code to do the cast in this case.
When we have a potential perf idea, it's important to write a small test case
and check that the opportunity shows up. Let's put the following code into
~/php/convert.php:
```
<?hh
<<__EntryPoint>>
function main() {
$x = Vector { 17, 34 };
var_dump(vec($x));
}
```
Now, when we run this code with tracing enabled, we must do so at the highest
optimization level! If we don't, we might go down a rabbit hole trying to make
an optimization that is irrelevant for our production use case. To see how HHVM
compiles the code above, we have to run it with both HHBBC (the
bytecode-to-bytecode optimizer) and RetranslateAll (the two-phase JIT) enabled:
```
TRACE=printir:1 hphp/tools/hhvm_wrapper.php -c -r 2 ~/php/convert.php | less -RN
```
Then we have to jump to the TransOptimize translation for main. Here's the
relevant part:
```
B4: [profCount=2] (preds B0)
--- bc main(id 1078142128)@5, fp 0, fixupFP 0, spOff 1, [profTrans=0]
ColFromArray Vector
(12) t5:Obj=HH\Vector = NewColFromArray<HH\Vector> vec(0x8018d4e0)=Vanilla
Main:
0x3280000e: mov $0x8018d4e0, %edi
0x32800013: callq 0xa712ff0 <HPHP::collections::allocFromArrayVector(HPHP::ArrayData*)>
0x32800018: mov %rax, %rbx
--- bc main(id 1078142128)@24, fp 0, fixupFP 0, spOff 3, [profTrans=0]
CastVec
(22) t6:Vec = ConvObjToVec t5:Obj=HH\Vector -> B5<Catch>
Main:
0x3280001b: mov %rbx, %rdi
0x3280001e: callq 0xa260c00 <HPHP::jit::convObjToVecHelper(HPHP::ObjectData*)>
-> B6
B6: [profCount=2] (preds B4)
(23) StStk<IRSPOff -4> t1:StkPtr, t6:Vec
Main:
0x32800023: movb $0x17, -0x38(%rbp)
0x32800027: movq %rax, -0x40(%rbp)
```
The important thing here is that, even though we know that t5 is an HH\Vector
in instruction (22) ConvObjToVec, we still generate machine code for it that
makes a call to a C++ function convObjToVecHelper. We can do better!
---
## Understanding IR tracing
We last looked at the TRACE=printir:1 output in the first lesson. Since then,
we've learned enough about HHVM to tease its output apart in more detail. This
tracing is useful because it simultaneously displays three levels of code
representation:
1. The input Hack bytecode, [documented in bytecode.specification](../bytecode.specification)
2. HHVM intermediate representation, [documented in ir.specification](../ir.specification)
3. The output machine code, [documented in an x86 manual](https://www.felixcloutier.com/x86/)
Here's the Hack bytecode annotation for the first IR op above. First, we see
the function and bytecode offset where this bytecode came from, a sort of
annotation from the bytecode back to source code. Then, we see the bytecode's
name and immediates:
```
--- bc main(id 1078142128)@5, fp 0, fixupFP 0, spOff 1, [profTrans=0]
ColFromArray Vector
```
Here's the IR op. Like the bytecode, the IR op takes immediates - the
parameters inside the angle brackets. Unlike the bytecode, the IR takes inputs
and outputs inline. HHVM's IR operates on
[static, single-assignment (SSA) temporary values](https://en.wikipedia.org/wiki/Static_single_assignment_form).
For this op, the input SSATmp is a constant, so we hide its ID; the output
SSATmp is not constant, so we show that it's "t5", of type "Obj=HH\Vector". At
the IR level, we're no longer implicitly operating on a stack machine - all
stack operations are explicit!
```
(12) t5:Obj=HH\Vector = NewColFromArray<HH\Vector> vec(0x8018d4e0)=Vanilla
```
Here's the machine code that we generate for this IR op. Machine code can be
placed in multiple regions of memory, depending on whether we think blocks at
the machine code level are likely to be executed. If they are, we put the code
in "Main"; otherwise, we put it in "Cold" or "Frozen". The machine code for
this op is simple because the heavy lifting is done in C++:
```
Main:
0x3280000e: mov $0x8018d4e0, %edi
0x32800013: callq 0xa712ff0 <HPHP::collections::allocFromArrayVector(HPHP::ArrayData*)>
0x32800018: mov %rax, %rbx
```
After the ColFromArray bytecode comes the CastVec bytecode. We compile this
bytecode to two IR ops. ConvObjToVec is doing the main logic of the Cast op,
and, again, is implemented mostly in C++. StStk ("store stack") pushes the
final result of the cast onto the stack. Unlike the other IR ops here, StStk is
compiled directly to machine code. It's simple enough that we don't need to
shell out to C++ for it.
## Where is the rest of the code?
If we look at the bytecode for these operations, we'll see that the IR trace
above skipped several bytecodes. You can see for yourself by running this
command, which is like our command above, except that:
* We're adding in "grep -v srcloc" to filter out source location attribution.
* We drop `TRACE=printir:1` and add `--hdf Eval.DumpHhas=1` to get a bytecode rather than an IR dump.
* We drop `-r 2` (which expands to `--retranslate-all 2`) because RetranslateAll results in more optimized IR generation - it doesn't affect the bytecode!
```
hphp/tools/hhvm_wrapper.php -c --hdf Eval.DumpHhas=1 ~/php/convert.php | grep -v srcloc
```
Here's what I get if I run that command:
```
.function{} [unique persistent "__EntryPoint"("""v:0:{}""")] (4,7) <"" N > main() {
.declvars _0;
Vec @A_0
ColFromArray Vector
AssertRATL _0 Uninit
AssertRATStk 0 Obj=HH\Vector
PopL _0
NullUninit
NullUninit
AssertRATL _0 Obj=HH\Vector
PushL _0
CastVec
FCallFuncD <SkipRepack SkipCoeffectsCheck> 1 1 "" "" - "" "var_dump"
AssertRATStk 0 InitNull
PopC
Null
AssertRATL _0 Uninit
RetC
}
```
In the bytecode, in between creating the Vector and casting it to a vec, we
store it to local 0 (via PopL), then unset the local and push it back onto the
stack (via PushL). Check bytecode.specification to confirm your understanding!
There are also some type assertion bytecodes (AssertRATL, "assert
repo-authoritative type for a local", and AssertRATStk, for the stack), but it
makes sense that these bytecodes don't compile to machine code. They just add
in type information at compile time.
It makes sense that the bytecode stores the Vector to a local - after all, that's what the source code does! The question is, why don't the IR ops match the bytecode above exactly? If the IR were to implement the exact same operations that the bytecode above claims to do, then we'd have several IR ops for each bytecode in this sequence:
1. **ColFromArray** would do the NewColFromArray, then use StStk to push that value onto the stack.
2. **PopL** would use LdStk ("load stack") to load that value into an SSATmp, then use StLoc ("store local") to store it to a local. In general, PopL also needs to load and dec-ref the old value of the local first, but in this case, the **AssertRATL** above tells us that the local is already Uninit, which is not a refcounted type.
3. **PushL** would use LdLoc ("load local") to load the value, then use StStk push it onto the stack. It would use StLoc to store an Uninit value to the local to "unset" it.
4. **CastVec** would use LdStk to read that stack value, do the cast, and then use StStk to push the result.
> Compiler engineers love abbreviations! "Loads" and "stores" are just reads
> and writes to memory. Because interacting with memory is ubiquitous in Hack
> code, we have tons of IR ops that use "Ld" or "St" in their names. We also
> use shorthand for many other ops, like CreateAFWH, DecRefNZ, JmpZ, and Shr.
> These names seem opaque at first, but they make reading code easier once you
> are versed in the domain.
> [If you're ever confused about what an IR op does, consult ir.specification.](../ir.specification)
These are sensible and correct translations of the bytecode above into IR.
They're also wasteful. The ColFromArray, PopL, PushL, and CastVec are pushing
and popping a single Vector value. If the net effect of all of these ops is to
do a cast on the Vector's SSATmp, it's better if we just do that and skip the
intermediate stack and local stores and loads. HHVM produces this more
optimized code in two steps:
1. It does a literal translation of each bytecode to IR, exactly as we did above.
2. Then, it analyzes the resulting IR ops and sees which ones can be optimized away.
There are actually two places we do the analysis needed to make these
optimizations. First, the irgen ("IR generation") code for each bytecode uses a
[forward-only analysis called FrameState](../../runtime/vm/jit/frame-state.h),
which eliminates operations loads that can be trivially removed (e.g. a LdStk
that comes right after a StStk). FrameState can eliminate redundant loads in
straight-line code, but we need to apply a more complex fixed-point algorithm
to handle branches and loops.
[load-elim](../../runtime/vm/jit/load-elim.cpp) and [store-elim](../../runtime/vm/jit/store-elim.cpp)
do these optimizations in general, after irgen is complete.
## Reading the IR spec
[ir.specification](../ir.specification) is actually a kind of executable
documentation. We use [this script](../generate-ir-opcodes.sh)
to process the lines beginning with `|` in this file to produce C++ code
defining a table of IR ops, the single source of truth for the IR. Because the
documentation is the implementation, it's always up to date! Let's look at a
few examples:
```
| ConvObjToVec, D(Vec), S(Obj), PRc|CRc
| StStk<offset>, ND, S(StkPtr) S(Cell), NF
| CheckType<T>, DRefineS(0), S(Cell), B|P
```
The first line says that ConvObToVec is an IR op taking one SSATmp input that
must be a subtype of TObj, and returning a subtype of TVec. The last bit of
that line defines the op's "flags" - in this case, it consumes a refcount on
the input and produces one on the output. That is: this IR op takes care of the
inc-ref and dec-ref required by the CastVec bytecode.
[For a complete list of flags, look arlier in the file.](../ir.specification?lines=119-159)
The second line says that StStk takes two inputs - a stack pointer and an
arbitrary Hack value (Cell == "mixed") - and doesn't return anything (ND == "no
destination"). That should make sense, as StStk simply writes to memory. StStk
doesn't have any additional flags, but it does have an immediate: the stack
offset, which is a constant offset off of the stack pointer. (Recall that an
immediate is an argument to an instruction that is "immediately" available at
compile time, as opposed to inputs, which are only available at runtime.) The
immediates in ir.specification are just comments; we define the immediates for
each op in code [at the end of the file extra-data.h](../../runtime/vm/jit/extra-data.h).
If you look for StStk there, you'll see that it takes an immediate of type
IRSPRelOffsetData.
The third line says that CheckType takes one input, an arbitrary Hack value,
and returns an output. The return type of this op isn't fixed; instead, it must
be computed at compile time based on the input, and based on the immediate type
T that a given CheckType is checking for. For example, we might compile a
CheckType<Bool> op, which takes a Cell input, checks that it's a bool, and
returns a refined SSATmp of type TBool. If the runtime type check fails, then
instead of returning the result, we'd branch (the "B" flag) to a "taken" block.
## The JIT type system
A key difference between Hack bytecode and HHVM's IR is that each IR op places
type constraints on its inputs, and produces typed outputs. The "CastVec"
bytecode will take any type of input from the stack and "do what vec cast does"
for that input. That could mean leaving the input unchanged (if it's a vec),
doing a cast (if it's a dict, keyset, or object), or throwing an error (all
other types). By contrast, the ConvObjToVec IR op will only implement vec
cast behavior for object inputs. It's up to our irgen code to only generate
IR ops whose type constraints are satisfied.
> In debug builds, after completing irgen for some block of code, we have
> assertions to check these type constraints. The only invariants that hold in
> HHVM are the ones that we actively check in debug mode!
We've already seen some examples of JIT types in the sources and destinations
of each op in ir.specification. These examples are instances of the C++ class
jit::Type, defined in [hphp/runtime/vm/jit/type.h](../../runtime/vm/jit/type.h).
A JIT type constraints what a Hack value could be at runtime. Unlike the Hack
type system, the JIT type system is sound, modulo bugs in HHVM: it provides
actual guarantees about runtime values, and without these guarantees, we
wouldn't be able to produce any nontrivial machine code.
Each instance of a JIT type represents a subset of values that match that type.
For example:
* TCell matches "any Hack value" - that is, an int, an array-like, an object, etc.
* TInt matches "any Hack integer" - some int, but we don't know which.
* TInt=5 matches "the integer 5"
That's right - JIT types can be constants! Like most compilers, HHVM does
[constant folding](https://en.wikipedia.org/wiki/Constant_folding): if an IR
instruction has constant inputs, and if it's side effect free, HHVM may compute
its output at compile time. We check if an IR instruction can be const-folded
by checking if all of that instruction's source SSATmps have types of constant
values.
> Based on the previous lesson, on refcounting, you may have a guess as to why
> we could have constant subtypes of TInt, TStr, TVec, etc., but not of TObj.
> There are even some types, like TInitNull, which always represent a constant!
The set of all JIT types forms [a mathematical structure called a lattice](https://en.wikipedia.org/wiki/Lattice_(order)).
That means that JIT types support two key operations: union and intersection.
The union `|` of two JIT types A and B is the type of values that could be an A
or a B. The intersection `&` of A and B is the type of values that must be both
an A and a B. Here are a few examples:
```
TCell | TInt = TCell // A TCell could be any Hack value, so this union could be, too.
TCell & TInt = TInt // A TCell and an int must be an int.
TStr & TInt = TBottom // No value is an int and a string. TBottom is "the empty set".
TInt | TStr = (Int|Str) // jit::Type can represent this union...
(Int|Str) | TInitNull = (Int|Str|InitNull) // ...and this one.
// jit::Type can represent any union of Hack DataTypes (i.e. "top-level" Hack types)!
TInt=3 | TInt=5 = TInt // We can't represent two constants, so we expand this union.
TInt=3 & TInt=5 = TBottom // No value is ever both the int 3 and the int 5.
```
We use the lattice operations to bound the type of values at compile time:
* **Unions represent merged control flow.** If we compile an if statement, and
we assign local $x a value of type X in the "then" block and a value of type
Y in the "else" block, then afterwards, $x could be either one. It has type X | Y.
* **Intersections represent type checks.** If some SSATmp has the type X, and
we do a CheckType<Y> on it, then if the check passes, both type constraints
apply. The CheckType result has type X & Y.
There's a lot more to learn about this type system, but it's best done by
reading code. Luckily, jit::Type is one of the most heavily-tested parts of
HHVM. In addition to exercising this code via the end-to-end tests in the
`hphp/test` directory, we have
[a battery of unit tests which double as examples of usage and expected behavior](../../runtime/test/type.cpp).
---
## Step 1: Optimizing CastVec IR generation
Phew! That was a lot of information. Take five and step away from the computer.
Go for a walk! Get a cup of coffee!
...but before you do, kick off a build of HHVM =)
...
...
...
...
...
...
...
...
...
...
...
...
...
...
...
...
It's time to apply what we've learned to optimize the example from the start of
this lesson - a simple vec cast on a Vector. At a high level, here's what we
have to do:
1. We want to improve irgen for CastVec. By convention, we know that the
interpreter's logic for this bytecode is in iopCastVec and that the JIT's is
in emitCastVec. **We must edit emitCastVec.**
2. Our optimization can only kick in if we have the right type info for the
input. **We must check if the stack's top value is a subtype of
TObj<=HH\Vector**, the specialized jit::Type representing objects of class
Vector.
3. We need to use better - more specialized! - IR ops that will be more
performant for this cast case. **We must find and compose existing IR ops,
e.g. one that fetches the vec that backs a Vector.**
Let's go through these items together. For item 1, we can simply search in the
`hphp/runtime` directory for "emitCastVec". [Here's the code that I found.](../../runtime/vm/jit/irgen-basic.cpp)
At the rev you're working at, the code may be a bit different, but the
high-level structure is probably similar:
```
void emitCastVec(IRGS& env) {
auto const src = popC(env);
auto const raise = [&](const char* type) {
auto const message =
makeStaticString(folly::sformat("{} to vec conversion", type));
gen(env, ThrowInvalidOperation, cns(env, message));
return cns(env, TBottom);
};
push(
env,
[&] {
if (src->isA(TVec)) return src;
if (src->isA(TArrLike)) return gen(env, ConvArrLikeToVec, src);
if (src->isA(TClsMeth)) return raise("ClsMeth");
if (src->isA(TObj)) return gen(env, ConvObjToVec, src);
if (src->isA(TNull)) return raise("Null");
if (src->isA(TBool)) return raise("Bool");
if (src->isA(TInt)) return raise("Int");
if (src->isA(TDbl)) return raise("Double");
if (src->isA(TStr)) return raise("String");
if (src->isA(TFunc)) return raise("Func");
if (src->isA(TRes)) return raise("Resource");
PUNT(CastVecUnknown);
}()
);
}
```
What's going on here? The C++ lambda inside the "push" call returns an SSATmp
that's the result of doing a cast. TVec, TArrLike, TObj, etc. are all JIT
types; most of them represent values of some DataType, but TArrLike is a
special union type that just means - you guessed it! - TVec | TDict | TKeyset.
In the TVec, TArrLike, and TObj cases, we produce IR to actually do the cast.
In all of the other cases, here, the cast is guaranteed to throw, so we emit a
"terminal" (the flag "T" in ir.specification) op `ThrowInvalidOperation`. This
op is guaranteed to halt execution, so we can return a dummy SSATmp of type
TBottom, since we know that value is unreachable.
> When compiling a bytecode to IR, it's critical to keep track of refcounting.
> This bytecode pops one value - the "src" SSATmp - from the stack, and pushes
> one value - the return value of the lambda - onto it. Based on the standard
> refcounting semantics, we must dec-ref the stack input and inc-ref the
> output.But we don't see any DecRef or IncRef IR ops here! That's because the
> ConvArrLikeToVec and ConvObjToVec IR ops handle the refcounting for us. We
> can confirm that by reading their ir.specification entries. The CRc|PRc flags
> on these ops mean that they consume a refcount on their input and produce one
> on their output. If we replace these ops with ones that don't handle
> refcounting, we'll have to generate refcounting IR ops ourselves.
For item 2, we must check the conditions under which our optimization applies.
Since we're trying to optimize vec casts on Vector objects, we only have to
consider the TObj case, but we can't apply our optimization to all objects. We
must use the JIT type system to check for a more specific type here.
Let's make this change by editing the TObj line alone. We can insert another
C++ lambda to give us some syntactic space to make this logic more complex.
Then, we can use the jit::Type's specialized constructor, jit::Type::SubObj, to
produce a type to compare against here:
```
if (src->isA(TObj)) return [&]{
auto const TVector = Type::SubObj(c_Vector::classof());
if (src->isA(TVector)) {
// TODO: We can optimize this case!
}
return gen(env, ConvObjToVec, src);
}();
```
> We create and immediately invoke a closure here as a way to pack complex
> control flow in an expression position. It's the same reason that we create
> and invoke the outer closure, the one that produces the output that we push
> onto the stack. You could consider this pattern a "cute" "trick", or you
> could refactor this code to use named helper methods instead. The choice is
> yours!
Make these edits and check that HHVM still compiles. (You may need to include
"hphp/runtime/ext/collections/ext_collections-vector.h" at the top of the file,
too.) On to item 3! We need to find an IR op to extract the vec that backs a
vector. There are a few ways to look for this op:
* We could do a (case-insensitive) search for "collection" or "vector" in the file.
* We could look for ops that take an object as input, by searching for S(Obj).
* We could look for ops that return a vec as output, by searching for D(Vec).
The first approach quickly leads us to:
```
| LdColVec, D(Vec), S(Obj), NF
Load the vec array backing a collection instance in S0. S0 must be a Vector
or ImmVector, and that specific object type must be known at compile time.
```
That's exactly what we need! However, we now have to generate the appropriate
refcounting IR ops. Unlike ConvObjToVec, LdColVec has no special flags - it
simply does a load. In order to generate any IR ops here, we need to use the
magical templated "gen" helper. This helper takes a variadic inputs and builds
and IR instruction from those inputs. In order, its inputs are:
1. The IRGS& environment struct "env".
2. The IR op for the new instruction, specified by its name. (It's a C++ enum.)
3. (Optional) The "taken" branch, a jit::Block to jump to. Only used for control-flow-y ops (with flags "B" or "T").
4. (Optional) The jit::Type for the op. Only used if the op takes a type immediate, like CheckType does.
5. (Optional) Any other immediates associated with that op. (See extra-data.h to see if each op takes one.)
6. The list of source SSATmps for that op, in order.
To get this optimization working, we must gen an LdColVec, an IncRef of the
result, and a DecRef of the input. LdColVec and IncRef don't need any of the
three optional arguments above. DecRef has an associated immediate, but no
other optional arguments. As a result, this code should work for us:
```
if (src->isA(TObj)) return [&]{
auto const TVector = Type::SubObj(c_Vector::classof());
if (src->isA(TVector)) {
auto const result = gen(env, LdColVec, src);
gen(env, IncRef, result);
gen(env, DecRef, DecRefData(), src);
return result;
}
return gen(env, ConvObjToVec, src);
}();
```
> It's important that we do the IncRef here before the DecRef. Make sure you understand why!
When you've got this code compiling, rerun the TRACE=printir:1 command with the
test file and your new HHVM. You should see that the C++ call in ConvObjToVec
has been replaced with an offset load. Our code generation for this case is
much improved!
---
## Step 2: Moving the optimization to simplify
When writing a compiler optimization, you should always try to make it as
general as possible. If your optimization only applies in certain special
cases, then users of your language may see large performance differences due to
small edits to their code, which is a frustrating experience! Further,
performance measurement can be noisy, and the more cases your optimization
covers, the greater the chance of measuring an unambiguous improvement on real
benchmarks.
> Compiler engineers are greedy. They want to optimize as many cases as
> possible. Compiler engineers are also lazy. They may give up at the point at
> which it becomes hard to extend an optimization further. As a result of these
> warring impulses, compilers exhibit stepwise behavior. Ubiquitous, simple
> cases are heavily optimized. A variety of common cases are handled somewhat
> well. The general case may be slow as molasses.
There are a variety of ways to extend this optimization. We'll look at simpler
extensions in the exercises, but the most important extension here is to take
advantage of late type information.
[HHVM includes a variety of optimization passes.](../../runtime/vm/jit/opt.cpp)
These passes run after irgen is complete. They take an IR unit that implements
some Hack source code, and modify it in place to produce better IR that
implements the same code. These passes can produce tighter type bounds on
values than we originally had at irgen time; for example, if we inline a
function, we may get a better type for its return value, which recursively
results in tighter types for SSATmps computed based on that value.
If we can do an IR optimization without introducing new control flow, we can
take advantage of these late types by [moving the optimization to simplify.cpp](../../runtime/vm/jit/simplify.cpp).
The simplifier is run on every IR op after every optimization pass. If there's
any opportunity to make the optimization, the simplifier will find it. The
simplifier's contract is, uh, simple...it processes a single IR instruction,
and if it's possible to replace it with zero or more IR instructions, it does
so.
> When can we simplify an operation down to zero IR instructions? When we can
> const-fold it! Take a look at simplifyXorInt for an example. One of the cases
> we optimize here is xor-ing an SSATmp with itself. If we do that, we can
> return cns(env, 0). Since this result is a constant SSATmp, we don't need to
> execute any IR instructions to compute it. The simplifier will remove the
> XorInt op and replace its result with the constant.
To move the optimization to the simplifier, we need to:
1. **Revert the change in the irgen code.** The simplify version is strictly
more general. If we leave the irgen change around, then when we examine the
printir, it'll be hard to tell which of our changes kicked in.
2. **Add a simplifyConvObjToVec function to simplify.cpp.** We won't be able to
const-fold here, but based on the input's type, we can still replace this IR
instruction with a more optimized sequence of instructions.
3. **Register the IR op in the "X" macro below.** Search for "XorInt" in the
file. It should appear in a few places, but one line in particular will just
say "X(XorInt)". This line is a macro invocation that dispatches to the new
simplify function.
Here's what I've got for the simplify code (as a diff: D34433352):
```
SSATmp* simplifyConvObjToVec(State& env, const IRInstruction* inst) {
auto const src = inst->src(0);
auto const TVector = Type::SubObj(c_Vector::classof());
if (src->isA(TVector)) {
auto const result = gen(env, LdColVec, src);
gen(env, IncRef, result);
gen(env, DecRef, DecRefData(), src);
return result;
}
return nullptr;
}
```
It's basically the same as the irgen version, but by doing it in the right
place, we optimize more cases! Make this change, get it to compile, and verify
that you can see the optimization on the test case. Then kick off diff testing
and benchmarking!
---
## Lesson summary
* HHVM's IR is typed under the sound JIT type system. This type system
constrains which IR operations we can use on a given value, but we can also
use these types as optimization conditions.
* We use profiling tools to find "hot" (high-cost) functions that may be good
targets for optimization.
* Once we've found a potential optimization target, we create a small test case
and verify that the opportunity still exists at the highest optimization
level.
* When we make an optimization, we should apply it to as many cases as
possible. One way to make an optimization more general is to move it to the
simplifier, so it runs whenever we gain type information.
---
## Exercises
Your first exercise is to think about how else we could extend this optimization. Do so before reading on!
...
...
...
...
...
...
...
...
...
...
...
...
...
...
...
...
For all of these extensions, be sure to create a small test case that shows the current, suboptimal code generation first. Then make the optimization and confirm that the test case is improved!
1. Modify the optimization to apply to vec-cast on ImmVector as well as vec-cast on Vector.
2. Add optimizations for doing a dict cast on a Map, ImmMap, Set, and ImmSet.
3. Add optimizations for doing a vec cast on a Map, ImmMap, Set, or ImmSet, and for doing a dict cast on a Vector or ImmVector. (Look for other relevant IR ops, like the ones that convert between different array-likes. Watch out for refcounting!) |
hhvm/hphp/doc/historical/hack.lambda | Lambda expression syntax for Hack
=================================
The proposed "lambda syntax" feature is alternate syntax for creating closure
objects. The two main selling points of "lambda syntax" are:
(1) Lambda syntax is significantly more concise that traditional closure
syntax.
(2) When lambda syntax is used, the captured variables are determined
automatically based on what variables are explicitly referenced in
the source code of the containing function and any relevant lambda
expressions.
Background
==========
PHP 5 supports a language feature known as "closures", which allow
creating anonymous functions. The existing feature is pretty
reasonable, but has a couple drawbacks:
- The syntax is fairly verbose, and gets some defaults weird:
- Captured variables must always be explicitly listed, and the
"function" and "return" keywords take up a lot of space.
- $this is always captured by default, even if it's not used in
the closure. The syntax "static function() { .. }" is
available to avoid capturing $this, but it's too verbose for
real world use.
- The closure body must always be a compound statement.
- PHP closures are also PHP objects, in a user-visible way, and as
a result it is not trivial to avoid the need to perform
allocations or do as-cheap-as-normal-function invocation for
simple (no-capture) closures in a general way.
- PHP closure objects provide some dangerous member functions (bind
and bindTo) that allow breaking encapsulation (accessing
arbitrary private members of any class).
- Changes to variables captured by value are thrown away---repeated
invocations of the closure will see the originally captured
values each time.
The proposal here attempts to address the first group of syntactic
issues, but does nothing about the other issues mentioned. But a goal
here is not to harm anything we might we want to do in the future
about those other issues.
See https://wiki.php.net/rfc/closures for more on the PHP feature.
Overview in Examples
====================
Expression-like lambdas with a single argument:
$foo = $x ==> $x + 1;
$foo(12); // returns 13
$squared = array_map($x ==> $x*$x, array(1,2,3));
// $squared is array(1,4,9)
Expression-like lambdas with no arguments or more than one argument require
parentheses around the parameter list:
$foo = () ==> 73;
$foo(); // returns 73
$bar = ($x,$y) ==> $x + $y;
$bar(3,8); // returns 11
A compound statement can be given as the body of lambda expression by using
curly braces like so:
$dump_map = ($name, $x) ==> {
echo "Map $name has:\n";
foreach ($x as $k => $v) {
echo " $k => $v\n";
}
};
$dump_map(
"My Map",
Map {'a' => 'b', 'c' => 'd'},
);
Variables are captured automatically and transitively (including $this):
$y = 11;
$foo = () ==> {
return $x ==> $x + $y; // captures $y
};
$bar = $foo();
$bar(5); // returns 16
Use parenthesis if you need to provide parameter or return type hints, or if
you need to provide default values for parameters:
$captured = "test: ";
$bar = (string $k = "foo"): string ==> $captured . $k;
Async lambdas could be defined by prepending async keyword:
$async_fn = async $x ==> {
$y = await $x->foo();
return $y->bar();
}
$awaitable = $async_fn($obj);
$bar = $awaitable->join();
For additional examples see the various unit tests in the implementation.
Syntax/Semantics Details
========================
Closures objects may be allocated with either the traditional closure syntax,
or the new proposed lambda syntax. Closures objects retain their current
semantics, and both syntaxes for creating closures will continue to be
supported going forward.
The proposed lambda syntax creates Closure objects that automatically capture
any referenced variables from enclosing lexical function scopes, transitively
(i.e. nested closures that use named variables from several levels out will
cause each intermediate closure to capture that variable so it can pass it to
the inner one).
Variables are only captured when they are statically visible as names in the
enclosing scope. I.e., the capture list is computed statically, not based on
dynamically defined names in the scope. The Hack typechecker could potentially
refine these rules with ahead-of-time errors on some automatic captures that
the runtime allows.
A lambda expression's captured variables are captured with the same "by value"
semantics that are used for variables in the "use" list of a traditional closure
expression that aren't preceded by "&". ("&" before a variable in a traditional
closure's "use" list makes the variable get captured "by reference".) PHP
programmers who use closures rarely put "&" before a variable in the "use" list
of a traditional closure expression; as such they are familiar enough with
the model of capturing variables "by value" that they can be productive and
write correct code under this model.
Lambda expressions do not support capturing variables by reference. If the
programmer wants to capture variables by reference they must use the traditional
closure syntax.
Lambda expressions do not support returning by reference at present, but support
could be added in the future.
hh_server-related
=================
Also proposed (and not part of the proof of concept on the runtime
implementation) are some things for the hh_server side:
- It is a static error if a closure reads a captured variable that is in the
enclosing scope but that is undefined on any control flow path to the
closure allocation site. Hack is already giving this error for traditional
closure expressions, so this just extends it to lambda expressions.
For example:
function foo() {
$y = $x ==> $x + $blah; // hh error: Undefined variable $blah
$blah = 12;
}
function foo2() {
if (something()) {
$blah = 12;
}
$y = $x ==> $x + $blah; // hh error: $blah is not in scope
}
The runtime will also emit an undefined variable warning when the variable
is read in this case.
Notice though, that it's not an error in case of an assignment, since
lambdas capture variables by value, and the variable after assignment is
always defined in this case inside a closure:
function foo() {
$y = () ==> $blah = 2; // OK
$blah = 12;
}
- Disallow the new style lambda expressions at the top-level (i.e. in
pseudo-mains). All top-level statements are disallowed in strict mode
anyway, and since this is new we can disallow it in partial as well. |
|
hhvm/hphp/doc/historical/hack.shapes | Notes on the `shapes' feature in Hack:
Shapes in <?hh files are a way for us to add typechecking
incrementally to existing php code that uses php arrays somewhat like
structs.
For example, code like this:
<?php
function dot_product($a, $b) {
return $a['x'] * $b['x'] + $a['y'] * $b['y'];
}
Can be annotated with types to look like this:
<?hh
type Point2D = shape('x' => int, 'y' => int);
function dot_product(Point2D $a, Point2D $b): int {
return $a['x'] * $b['x'] + $a['y'] * $b['y'];
}
This is not a complete design rationale, but contains a few notes to
explain why things are the way they are so far.
Notes:
- Classes with public members (or getters/setters) exist and probably
work fine as a sort of record/struct thing. When people want them
(particularly in newer code), they work.
- This is intended to allow type-checking cases where arrays are
currently being used as aggregates (instead of people using
classes). In order for them to be adopted and do what they are
intended to do, migration has to be as painless as possible (which
is why the object-like structs above are not "enough": no one is
going to switch all of www away from struct-like arrays to that).
- Generally we want most <?hh migration to be primarily about adding
type annotations somewhere in a function signature, not about
significantly changing method bodies.
- To be explicit: this last point means $x['foo'] still needs to be
valid syntax for accessing a shape member to ease conversion in the
short term. For construction sites we are just doing s/array/shape/
when you want them checked as shapes (which will also require at
parse time that the keys are string literals, do not start with
digits, etc).
- People sometimes write code that operates on sub-pieces of these
record-like arrays. This means a traditional (C/C++/Java style)
structure is going to be a little weird to add type annotations
for. You can imagine trying to migrate a function to <?hh that
legitimately take "any array with elements named 'id' and 'name'
that are int and string", and is called with various other
struct-like arrays that have other elements lying around in them. To
make this possible to migrate with minimal pain, we're using
structural subtyping.
- Because they don't work like the "structs" people are used to,
calling it a "struct" seemed potentially confusing.
- With enough code in hack so we know the types of things, it might be
possible to automate migration to a new syntax at access
sites. However, in the short term, we definitely can't use $x->foo
since this is an object property access and the runtime can't
compile it differently based on the type of $x. You might also
consider $x.foo (which might work in <?hh files for reasons I'll
leave out), but again for the foreseeable future we probably want
the same syntax to work in a <?php file, and right now this means
the variable $x converted to a string and concatenated with the
value of the constant foo (which will be the string "foo" if
undefined).
- In the syntax for defining shape *types*, our hands are probably a
little less tied. The current choice was to try to keep it somewhat
similar to the access sites and arrays in general (maybe helps so we
can explain this as "shapes are just special/limited arrays"), and
also to fit in with typedefs and possible syntax for adding enums. |
|
hhvm/hphp/doc/historical/hack.typedefs | Notes on the typedef feature in Hack:
In HipHop, <?hh files allow a language extension to define new types
for use in typehinting. The syntax looks like this:
type MyInt = int;
type Point = (int, int);
Or
newtype AbstractInt = int;
This document is not a complete design rationale, but may address a
few questions.
Notes:
- We want to support short names for types that should be compatible
with the underlying representation. The syntax used above is found
in several languages (Haskell, OCaml, F#, Rust, others). Go is
similar, but has no equal sign. C#/Java don't have typedefs so they
don't provide much inspiration. C/C++'s typedef/declaration syntax
isn't appropriate (or particularly reasonable).
- Another use case is making types that abstract away the details of
their underlying implementation. For example:
<?hh
newtype SQLSafeString = string;
function scrub_user_input_for_sql(string $str): SQLSafeString {
// ...
}
Haskell uses "newtype" in this case, but requires a named "value
constructor" which you have to use to turn strings into
SQLSafeString in the first place. OCaml doesn't have this kind of
newtype, but you can use the module system to create abstract types.
C++ has strong typedefs only for integral types, with the syntax
"enum class Foo : int {}", which is clearly not something to
emulate. A language proposal for general strong typedefs has
suggested a "using Foo = private int" syntax, which is probably
worth avoiding based on keyword reuse and being a little strange
(and for that matter not actually part of C++ yet, so it may
change).
The semantics planned here is that a type declared with "newtype" is
exactly the same as one declared with "type" as far as the runtime
is concerned, but Hack will consider it compatible with its
underlying type only in the file that declared it.
- Since we want to use them for signatures on functions, we considered
two choices relating to how typedefs would interact with the
runtime. One option is to "drop" typehints about them the same way
most Hack annotations are dropped. Unfortunately this has an impact
on syntax. Imagine the following function definition:
function is_x_large(Point2D $x): bool { return $x['x'] > 10; }
Since hhvm compiles php files separately, we don't know at bytecode
generation time whether Point2D should be a typehint for a class or
dropped for Hack. One option is to mangle hints for types:
function is_x_large(type Point2D $x): bool { return $x['x'] > 10; }
We figured this syntax (and other variants) might hurt adoption
excessively, and runtime checking of typedefs is also preferable to
wrong behavior.
- Because of this, we need some runtime changes for typedefs. An
identifier in a typehint might be a typedef instead of a class in a
given request.
- We support typedefs from one class to another. For Hack the
motivation is mostly support for typedefs of generics. For example:
newtype LinearModel = Vector<float>;
As far as the runtime is concerned, this just means that the type
LinearModel = Vector. Generic information is currently dropped, and
only checked by Hack.
- Typehints for typedefs only check the "top-level" type for primitive
types. In the case of shapes, this means we only check for now that
the parameter is KindOfArray, and rely on Hack to typecheck the
contents. This is pretty much the best we can do currently: the
runtime doesn't really know much about the "deep" type of most
runtime values.
- We do not support "instanceof SomeTypedef". Rationale: instanceof
is important to perf, and we don't know of an important use case.
Not implementing it yet seems like a safe position, since we could
find a way to add it later if there is a compelling use case.
- You can't allocate a class instance via a typedef name.
Essentially, declaring new types only creates new names for
type---it does not create new "value constructors". This may be
changed later if we find it is useful. |
|
hhvm/hphp/doc/historical/hack.types | ********************
* Type annotations *
********************
Introduction
------------
With the introduction of Hack, our runtime types have gradually become richer.
Some type annotation are currently enforced, others trigger warnings and some
are just ignored. Over time we will probably implement more consistent checks.
Type annotations locations
--------------------------
Type annotations can be used in the following places:
1. to annotate class properties
2. to annotate method/function arguments
3. to annotate method/function return
Code example:
class A {
protected **PROPERTY TYPE** $x;
public function foo(**ARGUMENT TYPE** $a): **RETURN TYPE** {
...
}
}
List of possible annotations
----------------------------
* primitive
e.g.: function foo(int $x) { ... }
list of primitives: int, string, bool, float, resource, array
* class
e.g.: function foo(A $a) { ... }
Can be any user defined class.
Note: Don't try to use object or StdClass to indicate that a function takes
any class. There is no way to express the root class, parent of all.
* xhp
e.g.: function foo(:div $d) { ... }
or: function foo(): :div { ... }
* class with generic
e.g.: function foo(Vector<int> $v) { ... }
or: function foo(Map<string, int> $m) { ... }
or: function foo(Vector<Vector<int>> $v) { ... }
Can be any user defined generic class.
In some cases, the typevar is still present:
class A<T> {
public function foo(T $x) { ... }
}
or:
function foo<T>(T $x) { ... }
or:
function foo<T>(Vector<T> $x) { ... }
Notes:
- it is valid to write function foo(Vector $v) { ... } and is roughly
equivalent to function foo<T>(Vector<T> $v) { ... }.
- array is a generic type, with either one or two type variables, all
three following examples are valid:
function foo(array $a) { ... }
function foo(array<A> $a) { ... }
function foo(array<int, A> $a) { ... }
* typedefs
e.g.: type one = ?int; function foo(one $x) { ... }
or: newtype one = ?int; function foo(one $x) { ... }
* void
e.g.: function foo(): void { ... }
void only makes sense as a return type. A void function actually returns
null, so it's an ambiguous type. In general, ?void or Vector<void>
usually won't make a lot of sense.
* mixed
e.g.: function foo(mixed $x) { ... }
Mixed is a wildcard type and matches everything.
* nullable
e.g.: function foo(?int $x) { ... }
The '?' can be combined with any other type: ?int, ?Vector<?float>, etc.
* tuple
e.g.: function foo((int, string) $x) { ... }
1-element tuples are disallowed (made things easier in the parser).
tuples piggyback on arrays. $x[0] is the first element, $x[1] the second,
etc.
* closure
e.g.: function foo((function(int, string): int) $f) { ... }
or: function foo((function(int)) $f) { ... }
Annotating closures is unfortunately verbose.
Note: In the php docs, closures are called anonymous functions.
* callable
e.g.: function foo(callable $f) { ... }
The callable type is similar to a closure, but is weaker: it does not convey
any signature information about the function. A callable is compatible with
arrays, strings and closures (as long as the array/strings 'point' to valid
functions/methods).
* this
e.g.:
class A {
public function foo(): this {
return $this;
}
}
The this type indicates the code always returns $this. Returning a different
instance of A is an error.
Note: The this type can only be used as a return type annotation. It cannot
be used as a parameter or a property annotation.
Type enforcement
----------------
The runtime currently ignores annotations on properties and return types.
For argument annotations, the current implementation is the following:
* primitive types are enforced.
* class types are enforced.
* :xhp types are enforced.
* generic types are partially enforced. Only the base class is checked.
E.g. Vector<int> is enforced as Vector.
* void types are not enforced.
* mixed types are enforced by being ignored.
* nullable types are partially enforced. A nullable type ?t only triggers
warnings (see soft types below).
* tuple types are enforced as arrays.
* closure types are not enforced.
* callable types are enforced.
* this types are not enforced.
If an annotation is prepended with an @ sign, the annotation is called "soft".
A soft annotation mismatch gets logged as a warning, but the runtime continues
execution. This is designed mainly for debugging or migrating code purpose.
e.g.: function foo(@int $x) { ... }
The presence or absence of the @ modifier does not change how an annotation gets
enforced.
Note: The @ belongs to the annotation, not the type. I.e. Foo<@Bar> is illegal.
Runtime data structures
-----------------------
1. The parser stores the type information in TypeAnnotation objects
(see compiler/type_annotation.h).
2. emitter.cpp uses the TypeAnnotation to populate Func
(see compiler/analysis/emitter.cpp and runtime/vm/func.h).
3. Func uses
- DataType (see runtime/base/datatype.h) for builtin types.
- TypeConstraint (see runtime/vm/type-constraint.h) for checking
the types, stored in m_typeConstraint.
- StringData, used for reflection, stored in m_userType.
4. TypeConstraint contains three pieces of information:
- a Type (see runtime/vm/jit/type.h)
- a set of bits to tell if a type is nullable, soft, etc.
- a StringData which is truncated in the case of generics.
Note: in some cases, e.g. function foo((function(int)) $x), m_typeConstraint
is null since we don't check closures, but m_userType still contains
the reflection information.
5. TODO:
- explain how type information flows into the jit.
- document compiler/analysis/type.h
- document hhbbc/representation.h |
|
Man Page | hhvm/hphp/doc/man/hhvm.1 | .TH HHVM 1
.SH NAME
hhvm \- Execute PHP and Hack files
.SH SYNOPSIS
.B hhvm
.RI [ OPTIONS ]
.RI [ \-f ]
.RI file
.RI [ [ \-\- ] args \.\.\. ]
.br
.B hhvm
.RI \-\-php
.RI [ PHP-OPTIONS ]
.RI [ \-f ]
.RI file
.RI [ [ \-\- ] args \.\.\. ]
.SH DESCRIPTION
.BR hhvm (1)
(aka the HipHop Virtual Machine) is an open-source virtual machine designed for executing programs written in Hack and PHP. HHVM uses a just-in-time compilation approach to run programs. HHVM should be used standalone for command line (CLI) scripts or together with a FastCGI-based web server like nginx or apache to serve websites.
This is the command line interface to HHVM.
The simplest way to parse and execute a program is providing a single parameter of a filename. Specifying
.B \-f
before the filename does this in a more explicit fashion, and allows other
.IR OPTIONS
to follow.
HHVM has its own set of
.IR OPTIONS
, but also has the ability to behave like PHP 5.x
as well. When the
.B \-\-php
flag is specified to
.BR hhvm (1)
(or the binary is named
.B php
), options available to PHP 5.x can be used.
You may use:
.nf
.RS
sudo /usr/bin/update-alternatives --install /usr/bin/php php /usr/bin/hhvm 60
.RE
.fi
to force
.BR hhvm (1)
to be used even if PHP 5.x is installed on the system.
.SH FILES
These are the default configuration files for HHVM. Right now both
.B \.ini
and
.B \.hdf
are supported as HHVM continues to migrate completely to
.B \.ini
.
.SS php\.ini
The default ini settings for
.BR hhvm (1)
are found in
.B /etc/hhvm/php\.ini
.SS config\.hdf
The default hdf options for
.BR hhvm (1)
are found in
.B /etc/hhvm/config\.hdf
.SH OPTIONS
These are the options you can specify to the
.BR hhvm (1)
executable.
.TP
.B \-a, \-\-interactive
Shortcut for the
.B \-\-mode
.I debug
command line option.
.TP
.BI \-\-admin\-port " PORT"
Start admin listener at a specified port. The default value for
.I PORT
is
.BR \-1 .
.TP
.BI \-\-arg " ARG"
Other arguments.
.TP
.BI \-\-build\-id " ID"
Unique identifier to compiled server code.
.TP
.B \-c ", " \-\-config " FILE"
Load specified config (.hdf or .ini)
.IR FILE .
Use multiple
.B \-c
to specify multiple config files.
.TP
.B \-\-compiler\-id
Display the git hash for the compiler.
.TP
.BI \-\-count " NUM"
How many times to repeat execution. The default value for
.I NUM
is
.BR 1 .
.TP
.B \-d ", " \-\-define " SETTING"
Define an ini setting in the same format ( foo[=bar] ) as provided in a .ini file.
.TP
.BI \-\-debug\-cmd " ARG"
In
.B \-\-mode
.I debug
, this executes the debugger command specified by
.I ARG
and returns its output in stdout. Multiple
.B \-\-debug\-cmd
can be specified to execute more than one command.
.TP
.BI \-\-debug\-config " FILE"
In
.B \-\-mode
.I debug
, load the specified debugger configuration
.IR FILE .
.TP
.BI \-\-debug\-extension " ARG"
In
.B \-\-mode
.I debug
,
.I ARG
specifies which debugger extension PHP file to load.
.TP
.BI \-\-debug\-port " PORT"
In
.B \-\-mode
.I debug
, connect to the debugger server at specified
.IR PORT .
.TP
.BI \-\-debug\-sandbox " ARG"
In
.B \-\-mode
.I debug
, initial sandbox to attach to when the debugger is started. The default for
.I ARG
is "default".
.TP
.BI \-\-extra\-header " ARG"
An extra-header to add to the front of each line of logging.
.TP
.B \-f ", " \-\-file " FILE"
In
.B \-\-mode
.I run
, execute the specified
.I FILE .
.TP
.B \-h ", " \-\-debug\-host " ADDRESS"
In
.B \-\-mode
.I debug
, connect to debugger server at specified address. The default for
.I ADDRESS
is
.BR localhost .
.TP
.B \-\-help
Display the list of command line options with short descriptions.
.TP
.BI \-\-instance\-id " ID"
Unique identifier of server instance.
.TP
.B \-l ", " \-\-lint " FILE"
Run lint on the specified
.IR FILE .
.TP
.B \-m ", " \-\-mode " MODE"
.I MODE
can be any of the following values:
.RS
.IP \[bu] 2
run (default): directly executes the program from the command line.
.IP \[bu]
debug (d): starts the debugger.
.IP \[bu]
server (s): starts an HTTP server from command line.
.IP \[bu]
daemon: starts an HTTP server and runs it as a daemon.
.IP \[bu]
replay: replays a previously recorded HTTP request file.
.IP \[bu]
translate (t): translates a hex-encoded stacktrace.
.RE
.TP
.B \-\-no\-config
Do not use the default php.ini
.TP
.BI \-\-no\-safe\-access\-check " BOOL"
Whether to allow any file or directory access without security checking. The default value is
.B 0
(or false).
.TP
.B \-p ", " \-\-port " PORT"
Start an HTTP server at the specified port. The default
.I PORT
is
.BR \-1 .
.TP
.B \-\-php
Emulate the standard
.BR php(1)
command line. You can specify the options allowed in PHP 5.x.
.TP
.B \-\-port\-fd " FD"
Use the specified
.I FD
for HTTP instead of creating a socket. The default value is
.BR \-1 .
.TP
.B \-\-ssl\-port\-fd " FD"
Use the specified
.I FD
for SSL instead of creating a socket. The default value is
.BR \-1 .
.TP
.B \-\-repo\-schema
Display the repository schema id.
.TP
.BI \-\-temp\-file " FILE"
.I FILE
specified is temporary and removed after execution.
.TP
.B \-u ", " \-\-user " USER"
Run server under the specified
.I USER
account.
.TP
.B \-v ", " \-\-config\-value " SETTING"
Individual configuration string in HDF format (name=value), where name can be any valid configuration for a config file. e.g., Section.Name=Value.
.TP
.B \-\-version
Display the
.BR hhvm (1)
version number.
.TP
.B \-w ", " \-\-show " FILE"
Directly output the specified
.I FILE
and do nothing else.
.SH PHP-OPTIONS
These are the options you can specify to the
.BR hhvm (1)
executable when explicitly specifying the
.I \-\-php
flag, or when the binary is named
.B php .
All PHP 5.x options are not supported at this point in time; we are
planning to continue to add more option support.
.TP
.B \-a, \-\-interactive
Similar to the same option when not specifying
.I \-\-php .
Run HHVM interactively. This lets you enter snippets of PHP code that directly get executed.
.TP
.BI \-c " FILE"
Use the specified
.I FILE
for configuration options. The file should be .ini format.
.TP
.BI \-d " SETTING"
Define INI entry specified by
.I SETTING
of the form
.B foo=[bar] .
.TP
.B \-n
Do not use any configuration (e.g., .ini) file when executing.
.TP
.BI \-r " CODE"
Run HHVM code without using script tags, directly on the command line.
.TP
.B \-v
Output the current HHVM build version number.
.TP
.B \-w
Output source with stripped comments and whitespace.
.TP
.BI \-z " FILE"
Load a dynamic extension file.
.SH EXAMPLES
.TP
\fIhhvm foo\.php\fP
Execute a PHP file.
.TP
\fIhhvm \-c custom\.ini foo\.php\fP
Execute a PHP file given a certain configuration.
.TP
\fIhhvm \-\-php \-r 'echo "Hello Everyone!";'\fP
Execute code from the command line using the --php option and -r.
.TP
\fIhhvm \-m server \-\-port 9090\fP
Execute HHVM in server mode using a specified port.
.SH BUGS
The list of current known HHVM issues are found at:
.IR https://github.com/facebook/hhvm/issues
.SH AUTHORS
The contributors to HHVM can be found here:
.IR https://github\.com/facebook/hhvm/graphs/contributors
.SH VERSION INFORMATION
This manpage describes
.BR hhvm (1)
, version 3.0 and greater
.SH COPYRIGHT
HHVM is licensed under the PHP and Zend licenses except as otherwise noted. The full license can be found at:
.IR https://github\.com/facebook/hhvm
.SH SEE ALSO
.BR hphpize (1)
.br
.I http://docs\.hhvm\.com |
Man Page | hhvm/hphp/doc/man/hphpize.1 | .TH HPHPIZE 1
.SH NAME
hphpize \- prepare an HHVM extension for compiling DSOs.
.SH SYNPOSIS
.B hphpize
.SH DESCRIPTION
.BR hphpize (1)
is a shell script to prepare an HHVM extension for compiling a dynamically
shared object (DSO).
.BR hphpize (1)
requires, at a minimum, a PHP|Hack|C++ extension and a
.B config\.cmake
file that contains the build information for cmake.
.B config\.cmake
must contain at least the following two lines:
.nf
.RS
HHVM_EXTENSION(${extension_name} ${filestobuild})
.RE
.RS
HHVM_SYSTEMLIB(${extension_name} ${PHPfiletobuild})
.RE
.fi
specifying the C++ source file(s) to build and one PHP file to embed in the DSO
as a system library (systemlib).
.B config\.cmake can contain other information as well, such as links to other
libraries, etc.
.SH EXAMPLE
.I https://github.com/hhvm/extension-example
.SH VERSION INFORMATION
This manpage describes
.BR hphpize (1),
version 3.0 and greater
.SH COPYRIGHT
HHVM is licensed under the PHP and Zend licenses except as otherwise noted. The full license can be found at:
.IR https://github\.com/facebook/hhvm
.SH SEE ALSO
.BR hhvm (1)
.br
.I https://github.com/facebook/hhvm/wiki/extension-api
.br
.I https://github.com/hhvm/extension-example
.br
.I http://docs\.hhvm\.com |
hhvm/hphp/hack/.gitignore | /.vscode/tasks.json
/facebook/cargo/.package-cache
/facebook/cargo/git
/facebook/cargo/registry/
bin/
src/inotify-1.0/
src/_build/
src/parsing/parser.output
src/parsing/lexer.ml
src/parsing/lexer_hack.ml
src/parsing/parser.ml
src/parsing/parser.mli
_obuild/
.ocp/
hhi/**/INDEX
facebook/redirect
facebook/opam2-mini-repository
facebook/opam2-mini-repository-*
facebook/opam2-mini-repository-*.tar.gz
target/
dev_env.sh
.merlin
# Generated by scripts/build_rust_to_ocaml.sh; the script can't delete it
# without creating a race condition when we're doing parallel builds.
#
.merlin.reader |
|
hhvm/hphp/hack/.ocamlformat | # -*- conf -*-
break-cases = all
break-fun-decl = smart
break-infix = fit-or-vertical
break-separators = after
break-sequences = true
break-string-literals = never
cases-exp-indent = 2
disambiguate-non-breaking-match = true
doc-comments = before
dock-collection-brackets = true
exp-grouping = preserve
field-space = tight-decl
if-then-else = k-r
indicate-nested-or-patterns = unsafe-no
leading-nested-match-parens = true
parens-tuple-patterns = always
module-item-spacing = sparse
sequence-blank-line = preserve-one
sequence-style = terminator
space-around-lists = false
space-around-records = true
space-around-variants = true
type-decl = sparse
wrap-fun-args = false |
|
Text | hhvm/hphp/hack/CMakeLists.txt | option(BUILD_HACK "True if we should build the Hack typechecker." ON)
include(CMakeParseArguments)
if (NOT BUILD_HACK)
message(STATUS "Skipping hack")
return()
endif()
message(STATUS "Building hack")
find_package(LZ4)
find_package(LibElf)
# native_libraries: values for `-l` flags
# lib_paths: values for `-L` flags (directories)
# extra_link_opts: opaque options passed to the linker
#
# We need extra_link_opts for:
# - static libraries
# - anything built from third-party: cmake gives us the link flags
unset(extra_include_paths)
unset(extra_native_libraries)
unset(extra_lib_paths)
unset(extra_link_opts)
unset(extra_cc_flags)
# Allows '#include "hphp/path/to/library/"' paths to start from hphp
# project directory which is consistent with fbmake's include paths.
list(APPEND extra_include_paths ${HPHP_HOME})
list(APPEND extra_cc_flags -pthread)
# Xcode/Ninja generators undefined MAKE
if(NOT MAKE)
set(MAKE make)
endif()
if ("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_CURRENT_BINARY_DIR}")
set(DUNE_BUILD_DIR "${CMAKE_CURRENT_SOURCE_DIR}/_build")
set(OPAM_STAMP_FILE "_build/opam.stamp")
set(RUST_FFI_BUILD_ROOT "${DUNE_BUILD_DIR}/rust_ffi")
set(CARGO_HOME "${DUNE_BUILD_DIR}/cargo_home")
else()
set(DUNE_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}")
set(RUST_FFI_BUILD_ROOT "${CMAKE_BINARY_DIR}")
set(CARGO_HOME "${CMAKE_CURRENT_BINARY_DIR}/cargo_home")
set(OPAM_STAMP_FILE "opam.stamp")
endif()
set(HACK_BUILD_ROOT "${DUNE_BUILD_DIR}/default")
get_target_property(OPAM_EXECUTABLE opam IMPORTED_LOCATION)
add_custom_command(
OUTPUT "${OPAM_STAMP_FILE}"
DEPENDS opam opam_setup.sh
COMMAND
${CMAKE_CURRENT_SOURCE_DIR}/opam_setup.sh
"${OPAM_EXECUTABLE}"
"${DUNE_BUILD_DIR}"
&& cmake -E touch "${OPAM_STAMP_FILE}"
)
add_custom_target(opam_setup DEPENDS "${OPAM_STAMP_FILE}" opam_setup.sh)
if (SKIP_OPAM)
set(OPAMROOT "~/.opam")
else ()
set(OPAMROOT "${DUNE_BUILD_DIR}/opam")
endif()
if(LZ4_FOUND)
list(APPEND extra_include_paths ${LZ4_INCLUDE_DIR})
get_filename_component(pth ${LZ4_LIBRARY} DIRECTORY)
list(APPEND extra_lib_paths ${pth})
list(APPEND extra_native_libraries "lz4")
else()
get_target_property(LZ4_INCLUDE_DIRS lz4 INTERFACE_INCLUDE_DIRECTORIES)
list(APPEND extra_include_paths ${LZ4_INCLUDE_DIRS})
# If LZ4_FOUND is false either we didn't find lz4 or we found it but it's the
# wrong version. We can't just add the new path and a native_lib because we
# can't control the order (and -l won't accept the raw path to the lib). By
# doing it this way we specify the path explicitly.
get_target_property(LZ4_LIBS lz4 INTERFACE_LINK_LIBRARIES)
list(APPEND extra_link_opts ${LZ4_LIBS})
endif()
get_target_property(ZSTD_INCLUDE_DIRS zstd INTERFACE_INCLUDE_DIRECTORIES)
list(APPEND extra_include_paths ${ZSTD_INCLUDE_DIRS})
get_target_property(ZSTD_LIBS zstd INTERFACE_LINK_LIBRARIES)
list(APPEND extra_link_opts ${ZSTD_LIBS})
list(APPEND extra_include_paths ${LIBSQLITE3_INCLUDE_DIR})
get_filename_component(pth ${LIBSQLITE3_LIBRARY} DIRECTORY)
list(APPEND extra_lib_paths ${pth})
list(APPEND extra_native_libraries "sqlite3")
get_target_property(RUSTC_EXE rustc LOCATION)
get_target_property(CARGO_EXE cargo LOCATION)
get_filename_component(RUSTC_BIN_DIR "${RUSTC_EXE}" DIRECTORY)
get_filename_component(CARGO_BIN_DIR "${CARGO_EXE}" DIRECTORY)
function(invoke_dune name target)
add_custom_target(
${name}
COMMAND
. "${CMAKE_CURRENT_BINARY_DIR}/dev_env.sh" &&
opam exec --
$(MAKE) --makefile=Makefile.dune ${target}
BYTECODE="${EMIT_OCAML_BYTECODE}"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
)
add_dependencies(${name} rustc cargo zstd)
if (NOT SKIP_OPAM)
add_dependencies(${name} opam_setup)
endif()
endfunction()
invoke_dune(hack_dune_debug debug)
invoke_dune(hack_dune_test test)
invoke_dune(hack_dune all)
set(INVOKE_CARGO "${CMAKE_SOURCE_DIR}/hphp/hack/scripts/invoke_cargo.sh")
if(DEFINED ENV{HACKDEBUG})
set(PROFILE "debug")
else()
set(PROFILE "release")
endif()
set(RUST_OPCODES_DIR "${CMAKE_BINARY_DIR}/hphp/hack/src/hackc")
set(RUST_OPCODES "${RUST_OPCODES_DIR}/opcodes.rs")
set(HHBC_PREFIX "${CMAKE_CURRENT_SOURCE_DIR}/src/hackc")
set(HHBC_AST_SRCS
"${HHBC_PREFIX}/hhbc/adata.rs"
"${HHBC_PREFIX}/hhbc/attribute.rs"
"${HHBC_PREFIX}/hhbc/body.rs"
"${HHBC_PREFIX}/hhbc/class.rs"
"${HHBC_PREFIX}/hhbc/coeffects.rs"
"${HHBC_PREFIX}/hhbc/constant.rs"
"${HHBC_PREFIX}/hhbc/function.rs"
"${HHBC_PREFIX}/hhbc/id.rs"
"${HHBC_PREFIX}/hhbc/instruct.rs"
"${HHBC_PREFIX}/hhbc/method.rs"
"${HHBC_PREFIX}/hhbc/module.rs"
"${HHBC_PREFIX}/hhbc/param.rs"
"${HHBC_PREFIX}/hhbc/pos.rs"
"${HHBC_PREFIX}/hhbc/property.rs"
"${HHBC_PREFIX}/hhbc/symbol_refs.rs"
"${HHBC_PREFIX}/hhbc/types.rs"
"${HHBC_PREFIX}/hhbc/type_const.rs"
"${HHBC_PREFIX}/hhbc/typedef.rs"
"${HHBC_PREFIX}/hhbc/typed_value.rs"
"${HHBC_PREFIX}/hhbc/unit.rs"
"${HHBC_PREFIX}/hhbc/unit_cbindgen.rs" # Not in a crate.
"${RUST_OPCODES}"
)
set(NAMING_SPECIAL_NAMES_PREFIX "${CMAKE_CURRENT_SOURCE_DIR}/src/naming")
set(NAMING_SPECIAL_NAMES_SRCS
"${NAMING_SPECIAL_NAMES_PREFIX}/naming_special_names.rs"
"${NAMING_SPECIAL_NAMES_PREFIX}/naming_special_names_ffi_cbindgen.rs"
)
set(FFI_PREFIX "${CMAKE_CURRENT_SOURCE_DIR}/src/utils/ffi")
set(FFI_SRCS
"${FFI_PREFIX}/ffi.rs"
"${FFI_PREFIX}/ffi_ffi_cbindgen.rs"
)
add_custom_command(
OUTPUT ${RUST_OPCODES}
COMMAND
${CMAKE_COMMAND} -E make_directory "${RUST_OPCODES_DIR}" &&
. "${CMAKE_CURRENT_BINARY_DIR}/dev_env_rust_only.sh" &&
${INVOKE_CARGO} dump-opcodes dump-opcodes --exe &&
${INVOKE_CARGO} dump-opcodes dump-opcodes --bin dump_opcodes
-o "${RUST_OPCODES}"
COMMENT "Generating Rust opcode struct for cbindgen to use to generate hhbc-ast.h"
DEPENDS "${OPCODE_DATA}"
)
set(HHBC_AST_HEADER "${RUST_FFI_BUILD_ROOT}/hphp/hack/src/hackc/hhbc-ast.h")
set(FFI_HEADER "${RUST_FFI_BUILD_ROOT}/hphp/hack/src/utils/ffi.h")
set(NAMING_SPECIAL_NAMES_HEADER "${RUST_FFI_BUILD_ROOT}/hphp/hack/src/naming/naming-special-names.h")
set(TYPE_CONSTRAINT_HEADER "${CMAKE_SOURCE_DIR}/hphp/runtime/vm/type-constraint-flags.h")
set(FCALL_HEADER "${CMAKE_SOURCE_DIR}/hphp/runtime/vm/fcall-args-flags.h")
set(HHBC_HEADER "${CMAKE_SOURCE_DIR}/hphp/runtime/vm/hhbc-shared.h")
set(ATTR_HEADER "${CMAKE_SOURCE_DIR}/hphp/runtime/base/attr.h")
set(FFI_EXTRA_HEADER "${CMAKE_SOURCE_DIR}/hphp/hack/src/utils/ffi/ffi_extra.h")
add_custom_command(
OUTPUT ${HHBC_AST_HEADER}
COMMAND
. "${CMAKE_CURRENT_BINARY_DIR}/dev_env_rust_only.sh" &&
${INVOKE_CARGO} ffi_cbindgen ffi_cbindgen --exe &&
${INVOKE_CARGO} ffi_cbindgen ffi_cbindgen --bin ffi_cbindgen
--header "${FFI_HEADER}" --namespaces "HPHP,hackc"
--includes "${FFI_EXTRA_HEADER}"
${FFI_SRCS} &&
${INVOKE_CARGO} ffi_cbindgen ffi_cbindgen --bin ffi_cbindgen
--header "${NAMING_SPECIAL_NAMES_HEADER}" --namespaces "HPHP,hackc,hhbc"
${NAMING_SPECIAL_NAMES_SRCS} &&
${INVOKE_CARGO} ffi_cbindgen ffi_cbindgen --bin ffi_cbindgen
--header "${HHBC_AST_HEADER}" --namespaces "HPHP,hackc,hhbc"
--includes "${FFI_HEADER},${NAMING_SPECIAL_NAMES_HEADER},${TYPE_CONSTRAINT_HEADER},${ATTR_HEADER},${FCALL_HEADER},${HHBC_HEADER}"
${HHBC_AST_SRCS}
DEPENDS rustc cargo "${RUST_OPCODES}"
COMMENT "Generating hhbc-ast.h"
)
add_custom_target(
"hhbc_ast_cbindgen"
DEPENDS ${HHBC_AST_HEADER}
)
add_library("hhbc_ast_header" INTERFACE)
add_dependencies("hhbc_ast_header" "hhbc_ast_cbindgen")
add_custom_target(hack_rust_ffi_bridge_targets)
# Compiling cxx entrypoints for hhvm
#
# Usage:
# build_cxx_bridge(
# name
# DIR directory
# [EXTRA_SRCS src [src ...]]
# [LINK_LIBS lib [lib ...]]
# )
#
# Where:
# `name` is the target name of the cxx_bridge.
# `directory` is the required directory of the cxx_bridge sources.
# `src` are extra source files to include in the bridge.
# `lib` are extra link libraries to include in the bridge.
#
function(build_cxx_bridge NAME)
cmake_parse_arguments(CXX_BRIDGE "" "DIR" "EXTRA_SRCS;LINK_LIBS" ${ARGN})
if ("${CXX_BRIDGE_DIR}" STREQUAL "")
message(FATAL_ERROR "Missing DIR parameter")
endif()
if (NOT "${CXX_BRIDGE_UNPARSED_ARGUMENTS}" STREQUAL "")
message(FATAL_ERROR "Unexpected parameters: ${CXX_BRIDGE_UNPARSED_ARGUMENTS}")
endif()
set(FFI_BRIDGE_SRC "${CMAKE_CURRENT_SOURCE_DIR}/${CXX_BRIDGE_DIR}")
set(FFI_BRIDGE_BIN "${RUST_FFI_BUILD_ROOT}/hphp/hack/${CXX_BRIDGE_DIR}")
set(RUST_PART_LIB "${FFI_BRIDGE_BIN}/${PROFILE}/${CMAKE_STATIC_LIBRARY_PREFIX}${NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}")
set(RUST_PART_CXX "${FFI_BRIDGE_BIN}/${NAME}.cpp")
set(RUST_PART_HEADER "${FFI_BRIDGE_BIN}/${NAME}.rs.h")
set(GENERATED "${FFI_BRIDGE_BIN}/cxxbridge/${NAME}/${NAME}")
set(GENERATED_CXXBRIDGE "${FFI_BRIDGE_BIN}/cxxbridge")
add_custom_command(
OUTPUT
${RUST_PART_CXX}
${RUST_PART_HEADER}
${RUST_PART_LIB}
${GENERATED_CXXBRIDGE}
COMMAND
${CMAKE_COMMAND} -E make_directory "${FFI_BRIDGE_BIN}" &&
. "${CMAKE_CURRENT_BINARY_DIR}/dev_env_rust_only.sh" &&
${INVOKE_CARGO} "${NAME}" "${NAME}" --target-dir "${FFI_BRIDGE_BIN}" &&
${CMAKE_COMMAND} -E copy "${GENERATED}.rs.cc" "${RUST_PART_CXX}" &&
${CMAKE_COMMAND} -E copy "${GENERATED}.rs.h" "${RUST_PART_HEADER}"
WORKING_DIRECTORY ${FFI_BRIDGE_SRC}
DEPENDS rustc cargo "${OPCODE_DATA}"
)
add_custom_target(
"${NAME}_cxx"
DEPENDS ${RUST_PART_LIB}
)
add_library("${NAME}" STATIC ${RUST_PART_CXX} ${CXX_BRIDGE_EXTRA_SRCS} )
add_dependencies(hack_rust_ffi_bridge_targets "${NAME}")
add_library("${NAME}_rust_part" STATIC IMPORTED)
add_dependencies("${NAME}_rust_part" "${NAME}_cxx")
# Intentionally create link-time cyclic dependency between ${NAME}_rust_part
# and ${NAME} so that CMake will automatically construct the link line so
# that the linker will scan through involved static libraries multiple times.
set_target_properties(
"${NAME}_rust_part"
PROPERTIES
IMPORTED_LOCATION ${RUST_PART_LIB}
IMPORTED_LINK_DEPENDENT_LIBRARIES "${NAME}"
)
target_link_libraries(
"${NAME}"
PUBLIC
"${NAME}_rust_part"
${CXX_BRIDGE_LINK_LIBS}
)
target_include_directories("${NAME}" INTERFACE "${RUST_FFI_BUILD_ROOT}")
target_include_directories("${NAME}" PRIVATE "${GENERATED_CXXBRIDGE}")
endfunction()
build_cxx_bridge(
package_ffi
DIR "src/package/ffi_bridge"
)
build_cxx_bridge(
parser_ffi
DIR "src/parser/ffi_bridge"
)
build_cxx_bridge(
compiler_ffi
DIR "src/hackc/ffi_bridge"
EXTRA_SRCS "${CMAKE_CURRENT_SOURCE_DIR}/src/hackc/ffi_bridge/external_decl_provider.cpp"
LINK_LIBS hdf
)
build_cxx_bridge(
hdf
DIR "src/utils/hdf"
EXTRA_SRCS "${CMAKE_CURRENT_SOURCE_DIR}/src/utils/hdf/hdf-wrap.cpp"
LINK_LIBS folly
)
build_cxx_bridge(
hhvm_types_ffi
DIR "src/hackc/hhvm_cxx/hhvm_types"
EXTRA_SRCS "${CMAKE_CURRENT_SOURCE_DIR}/src/hackc/hhvm_cxx/hhvm_types/as-base-ffi.cpp"
)
build_cxx_bridge(
hhvm_hhbc_defs_ffi
DIR "src/hackc/hhvm_cxx/hhvm_hhbc_defs"
EXTRA_SRCS "${CMAKE_CURRENT_SOURCE_DIR}/src/hackc/hhvm_cxx/hhvm_hhbc_defs/as-hhbc-ffi.cpp"
)
if (NOT LZ4_FOUND)
add_dependencies(hack_dune lz4)
add_dependencies(hack_dune_debug lz4)
add_dependencies(hack_dune_test lz4)
endif()
# Intentionally not using `hack_dune_debug` as it generates output files of a
# different format (bytecode instead of raw executables, which is useful if
# you're working with Hack. Keep it around, but require it to be explicitly used
add_custom_target(hack ALL DEPENDS hack_dune "${OPCODE_DATA}")
add_custom_target(hack_test DEPENDS hack_dune_test "${OPCODE_DATA}")
configure_file(dev_env.sh.in dev_env.sh ESCAPE_QUOTES @ONLY)
configure_file(dev_env_common.sh.in dev_env_common.sh ESCAPE_QUOTES @ONLY)
configure_file(dev_env_rust_only.sh.in dev_env_rust_only.sh ESCAPE_QUOTES @ONLY)
install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/bin/hh_client
DESTINATION bin
COMPONENT dev)
install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/bin/hh_server
DESTINATION bin
COMPONENT dev)
install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/bin/hackfmt
DESTINATION bin
COMPONENT dev)
install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/bin/hh_parse
DESTINATION bin
COMPONENT dev) |
hhvm/hphp/hack/configure | #!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/../../"
if [ "$1" = '--help' ] || [ "$1" = '-h' ]; then
echo "usage: $0 -Dvariable=argument ..."
echo ''
echo 'Variables: '
options=`cat $DIR/CMake/Options.cmake | grep option | sed -e 's/^[ \t]*//' |
sed 's/\s*option(/ -D/; s/ "/=ON|OFF : /;
s/" / : Default: /; s/)$//' | sort`
options=" -DCMAKE_BUILD_TYPE=Debug|Release|RelWithDebInfo|MinSizeRel : Sets build type \
: Default: Release
$options"
if which column > /dev/null; then
options=`echo "$options" | column -t -s : `
fi
echo "$options"
exit 2
fi
options+=" -DCMAKE_MODULE_PATH=$DIR/CMake"
cmake $options "$@" . |
|
Inno Setup Script | hhvm/hphp/hack/dev_env.sh.in | # Copyright (c) 2019, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the "hack" directory of this source tree.
# This file is processed by cmake; the produced file is intended for both
# internal usage by the build system, and for direct usage by people working on
# Hack itself from CMake builds:
#
# source $BUILD_DIR/hphp/hack/dev_env.sh
. "@CMAKE_CURRENT_BINARY_DIR@/dev_env_common.sh"
eval $(opam env) |
Inno Setup Script | hhvm/hphp/hack/dev_env_common.sh.in | #!/bin/sh
# Copyright (c) 2019, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the "hack" directory of this source tree.
# Do not use this file directly - either use dev_env.sh or dev_env_rust_only.sh
export CMAKE_SOURCE_DIR="@CMAKE_SOURCE_DIR@"
export CMAKE_INSTALL_FULL_SYSCONFDIR="@CMAKE_INSTALL_FULL_SYSCONFDIR@"
export CMAKE_INSTALL_FULL_BINDIR="@CMAKE_INSTALL_FULL_BINDIR@"
export HACK_NO_CARGO_VENDOR=true
export OPAMROOT="@OPAMROOT@"
export PYTHONPATH="@HPHP_HOME@" # needed for verify.py for `hack_dune_test`
export CARGO_HOME="@CARGO_HOME@"
export CMAKE_BINARY_DIR="@CMAKE_BINARY_DIR@"
export RUSTC="@RUSTC_BIN_DIR@/rustc"
export DUNE_BUILD_DIR="@DUNE_BUILD_DIR@"
export HACK_SOURCE_ROOT="@CMAKE_CURRENT_SOURCE_DIR@"
export HACK_BUILD_ROOT="@HACK_BUILD_ROOT@"
export HACK_BIN_DIR="@CMAKE_BINARY_DIR@/hphp/hack/bin"
export PATH="@RUSTC_BIN_DIR@:@CARGO_BIN_DIR@:$(dirname "@OPAM_EXECUTABLE@"):$PATH"
export HACK_EXTRA_INCLUDE_PATHS="@extra_include_paths@"
export HACK_EXTRA_LINK_OPTS="@extra_link_opts@"
export HACK_EXTRA_LIB_PATHS="@extra_lib_paths@"
export HACK_EXTRA_NATIVE_LIBRARIES="@extra_native_libraries@" |
Inno Setup Script | hhvm/hphp/hack/dev_env_rust_only.sh.in | # Copyright (c) 2019, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the "hack" directory of this source tree.
# This file is processed by cmake; the produced file is intended for both
# internal usage by the build system, and for direct usage by people working on
# Hack itself from CMake builds:
#
# . $BUILD_DIR/hphp/hack/dev_env_rust_only.sh
. "@CMAKE_CURRENT_BINARY_DIR@/dev_env_common.sh"
# Nothing else to do for rust :) |
hhvm/hphp/hack/dune | (data_only_dirs bin doc facebook man tools)
(alias
(name all-hack)
(deps
(alias_rec exe)
(alias_rec debug))) |
|
hhvm/hphp/hack/LICENSE | MIT License
Copyright (c) 2013-present, Facebook, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
|
hhvm/hphp/hack/Makefile.dune | BYTECODE=
ROOT=$(shell pwd)
# Dune creates read-only files as of https://github.com/ocaml/dune/pull/3092
#
# We later copy these executables into the `bin/` dir; if we just use `cp`,
# this will only succeed once: a second build will fail as `cp` will refuse
# to overwrite files that do not have write permissions.
COPY_EXE := install -m 755
################################################################################
# Rules #
################################################################################
.NOTPARALLEL:
all: build-hack copy-hack-files
debug: build-hack-debug copy-hack-debug-files
clean:
find ./bin -mindepth 1 -not -path ./bin/README -delete
dune clean
# First argument is the extension to use.
# Second argument is a suffix for the rules name. Optional
#
# The only supported configuration are:
# 1=exe 2= (literaly nothing, not even a space)
# 1=bc 2=-debug
define build_hack
$(eval ext := $(if $(filter $(2),-debug),".bc",""))
build-hack$(2):
dune build \
src/hh_server.$(1) \
src/hh_client.$(1) \
src/hh_single_type_check.$(1) \
src/hackfmt.$(1) \
src/hh_parse.$(1) \
src/generate_full_fidelity.$(1) \
src/hh_fanout/hh_fanout.$(1)
copy-hack$(2)-files: build-hack$(2)
mkdir -p "$(HACK_BIN_DIR)"
${COPY_EXE} "$(DUNE_BUILD_DIR)/default/hack/src/hh_server.$(1)" "$(HACK_BIN_DIR)/hh_server$(ext)"
${COPY_EXE} "$(DUNE_BUILD_DIR)/default/hack/src/hh_client.$(1)" "$(HACK_BIN_DIR)/hh_client$(ext)"
${COPY_EXE} "$(DUNE_BUILD_DIR)/default/hack/src/hh_single_type_check.$(1)" "$(HACK_BIN_DIR)/hh_single_type_check$(ext)"
${COPY_EXE} "$(DUNE_BUILD_DIR)/default/hack/src/hackfmt.$(1)" "$(HACK_BIN_DIR)/hackfmt$(ext)"
${COPY_EXE} "$(DUNE_BUILD_DIR)/default/hack/src/hh_parse.$(1)" "$(HACK_BIN_DIR)/hh_parse$(ext)"
${COPY_EXE} "$(DUNE_BUILD_DIR)/default/hack/src/generate_full_fidelity.$(1)" "$(HACK_BIN_DIR)/generate_full_fidelity$(ext)"
${COPY_EXE} "$(DUNE_BUILD_DIR)/default/hack/src/hh_fanout/hh_fanout.$(1)" "$(HACK_BIN_DIR)/hh_fanout$(ext)"
endef
# Define rules for normal build / debug build
# The name of the rules is important as it matches what is expected by cmake
$(eval $(call build_hack,exe,))
$(eval $(call build_hack,bc,-debug))
.PHONY: test do-test
test: build-hack copy-hack-files
$(MAKE) -f Makefile.dune do-test
do-test:
dune runtest
# python3 ./test/integration/runner.py ./bin/hh_server ./bin/hh_client |
|
Shell Script | hhvm/hphp/hack/ocaml_deps_data.sh | #!/bin/bash
export OCAML_VERSION="4.14.0+options"
export HACK_OPAM_DEPS=(
base.v0.15.1
base64.3.5.0
camlp4.4.14+1
camlp-streams.5.0.1
cmdliner.1.1.1
core_kernel.v0.15.0
core_unix.v0.15.2
dtoa.0.3.2
dune.3.5.0
fileutils.0.6.4
fmt.0.9.0
landmarks-ppx.1.4
lru.0.3.1
lwt.5.6.1
lwt_log.1.1.2
lwt_ppx.2.1.0
memtrace.0.2.3
merlin.4.6-414
mtime.1.4.0
ocp-indent.1.8.1
ounit2.2.2.6
pcre.7.5.0
ppx_deriving.5.2.1
ppx_gen_rec.2.0.0
ppx_sexp_conv.v0.15.1
ppx_yojson_conv.v0.15.1
sedlex.3.0
sexplib.v0.15.1
sqlite3.5.1.0
uchar.0.0.2
uutf.1.0.3
visitors.20210608
wtf8.1.0.2
yojson.2.0.2
ocaml-option-flambda
)
# The rest of the file exports variables based on the above configuration.
export HACK_OCAML_VERSION="${OCAML_VERSION}"
export OCAML_BASE_NAME=ocaml-variants
export OCAML_COMPILER_NAME="${OCAML_BASE_NAME}.${HACK_OCAML_VERSION}"
UNAME=$(uname -s)
if [ "$UNAME" != "Linux" ]; then
# Some variants are not supported on other platforms, so we use the base
# version instead.
# +fp is known not to work on Macs, but other combinations have not been
# tested.
echo 'Non linux platform detected, skipping +fp'
else
HACK_OPAM_DEPS+=(ocaml-option-fp)
export HACK_OPAM_DEPS
fi |
Shell Script | hhvm/hphp/hack/opam_cleanup.sh | #!/bin/sh
# Copyright (c) 2019, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the "hack" directory of this source tree.
HACK_DIR="$(realpath "$(dirname "$0")")"
# cleanup OSS locations
if [ -d "${HACK_DIR}/_build" ]; then
rm -rf "${HACK_DIR}/_build/opam"
fi
# cleanup FB locations
if [ -d "${HACK_DIR}/facebook" ]; then
rm -rf "${HACK_DIR}/facebook/redirect/opam"
rm -rf "${HACK_DIR}/facebook/opam2-mini-repository"
fi |
Shell Script | hhvm/hphp/hack/opam_helpers.sh | #!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# To be sourced by main script
# Shamelessly copied from
# https://github.com/facebook/infer/blob/master/scripts/opam_utils.sh
# Many thanks to the infer team :D
opam_require_version_2 () {
local status=0
local version=0
{ version=$(opam --version 2>/dev/null); status=$?; }
if [ "$status" != 0 ]; then
# Suppress Warning: the `` without quotes in the next line is intentional
# shellcheck disable=SC2016
printf '*** ERROR: `opam --version` failed, please install opam version 2\n' >&2
env >&2
exit 1
fi
case $version in
2*) ;;
*)
printf '*** ERROR: opam version "%s" is not supported, please install opam version 2\n' "$version" >&2
printf '*** NOTE: opam is "%s"\n' "$(command -v opam)" >&2
env >&2
exit 1
esac
} |
Shell Script | hhvm/hphp/hack/opam_setup.sh | #!/bin/bash
# Copyright (c) 2017, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the "hack" directory of this source tree.
set -euf
unset DUNE_BUILD_DIR
if [ -z "$1" ]; then
echo "Usage: $0 /path/to/bin/opam [/path/to/build/dir]"
exit 1
fi
SOURCE_ROOT="$(dirname "$0")"
OPAM_EXECUTABLE="$1"
BUILD_ROOT="${2:-"${SOURCE_ROOT}/_build"}"
OPAM_EXECUTABLE_DIR="$(dirname "$OPAM_EXECUTABLE")"
export PATH="$OPAM_EXECUTABLE_DIR:$PATH"
# detect if we are building inside FB by checking a specific dune file
if [ -e "$SOURCE_ROOT/src/facebook/dune" ]; then
# FB script must have already set OPAMROOT, and we reuse it
echo "FB build"
if [ -z ${OPAMROOT+x} ]; then
echo "OPAMROOT must be set by dune.sh"
exit 1
fi
echo "OPAMROOT = $OPAMROOT"
else
echo "Non-FB build"
OPAMROOT="${BUILD_ROOT}/opam"
fi
export OPAMROOT="$OPAMROOT"
mkdir -p "$OPAMROOT"
export OPAMYES="1"
# Prevents opam from trying to invoke brew install and dpkg, because all the
# dependencies should have been installed by Nix, Apt or Homebrew.
export OPAMASSUMEDEPEXTS="1"
export OPAMNODEPEXTS="1"
# shellcheck disable=SC1090
source "$SOURCE_ROOT/opam_helpers.sh"
# shellcheck disable=SC1090
source "$SOURCE_ROOT/ocaml_deps_data.sh"
# Shamelessly copied from
# https://github.com/facebook/infer/blob/master/scripts/opam_utils.sh
# Many thanks to the infer team :D
# assumes opam is available and initialized
opam_switch_create_if_needed () {
local name=$1
local switch=$2
local switch_exists=no
for installed_switch in $(opam switch list --short); do
if [ "$installed_switch" == "$name" ]; then
switch_exists=yes
break
fi
done
if [ "$switch_exists" = "no" ]; then
opam switch create "$name" "$switch"
eval "$(opam env)"
fi
}
opam_require_version_2
# End of shame
HACK_OPAM_SWITCH="${HACK_OCAML_VERSION}"
HACK_OPAM_DEFAULT_NAME="hack-switch"
HACK_OPAM_NAME=${HACK_OPAM_NAME:-$HACK_OPAM_DEFAULT_NAME}
SKIP_MINI_REPO=${SKIP_MINI_REPO:-0}
OCAML_PATCH=${OCAML_PATCH:-""}
if [[ "${SKIP_MINI_REPO}" -eq 1 ]]; then
echo "SKIP_MINI_REPO is set."
echo "This setup will fetch from the internet."
echo "Make sure you know what you are doing."
export http_proxy=http://fwdproxy:8080
export https_proxy=http://fwdproxy:8080
fi
MINI_REPO_FETCH_SCRIPT="${SOURCE_ROOT}/facebook/fetch_opam2_repo_hack.sh"
# OSS does not provide bubblewrap yet so we disable it
if [[ -f "${MINI_REPO_FETCH_SCRIPT}" && "${SKIP_MINI_REPO}" -eq 0 ]]; then
MINI_REPO_DIR="$("${MINI_REPO_FETCH_SCRIPT}")"
MINI_REPO_TARBALL="${MINI_REPO_DIR}.tar.gz"
rm -rf "$MINI_REPO_DIR" ||:
TARGET_OPAM_DIR="$SOURCE_ROOT/facebook/$(basename "$MINI_REPO_DIR")"
mkdir "$TARGET_OPAM_DIR"
tar xzf "$MINI_REPO_TARBALL" -C "$TARGET_OPAM_DIR"
opam init --disable-sandboxing --reinit offline_clone "$MINI_REPO_DIR" --no-setup --bare
else
opam init --disable-sandboxing --reinit --no-setup --bare
fi
opam_switch_create_if_needed "$HACK_OPAM_NAME" "$HACK_OPAM_SWITCH"
opam switch "$HACK_OPAM_NAME"
eval "$(opam env)"
opam install "${HACK_OPAM_DEPS[@]}"
dune_version=$(dune --version)
echo ""
echo "opam switch correctly installed at $OPAMROOT"
echo "dune version is $dune_version" |
Markdown | hhvm/hphp/hack/README.md | # What is Hack?
Hack is a programming language for [HHVM](https://hhvm.com) that interoperates
seamlessly with PHP. Hack reconciles the fast development cycle of PHP with the
discipline provided by static typing, while adding many features commonly found
in other modern programming languages.
Hack provides instantaneous type checking via a local server that watches the
filesystem. It typically runs in less than 200 milliseconds, making it easy to
integrate into your development workflow without introducing a noticeable delay.
For more information, see the [website](http://hacklang.org/). |
hhvm/hphp/hack/README.ocpbuild | This document describes an alternative build-system for Hack. This is
currently the only build system available on Windows. Meanwhile, this
document focuses on using this alternative build system on Linux and
OS X; for Windows, see `README.win32`.
### ocp-build, a build system for OCaml applications
ocp-build is a build system for OCaml applications, based on simple
descriptions of packages. ocp-build combines the descriptions of
packages in different directories, and optimize the parallel incremental
compilation of files depending on the number of cores and the
automatically-inferred dependencies between source files.
For more details, you can take a look at
http://www.typerex.org/ocp-build.html
### Download and install `ocp-build` by using OPAM
OPAM is a source-based package manager for OCaml. It supports multiple
simultaneous compiler installations, flexible package constraints, and
a Git-friendly development workflow.
To install OPAM, please check http://opam.ocaml.org/doc/Install.html
Once OPAM is installed correctly with an OCaml compiler, you can setup
your configuration and installl `ocp-build`:
$ opam init
$ eval $(opam config env)
$ opam install ocp-build
Note that you will always need to use `eval $(opam config env)` when restarting
a shell in a terminal, to get your PATH properly configured.
### Compiling Hack with ocp-build
For Windows, see `README.win32.` Otherwise in `hack/src`, run:
$ make fast
Optionally, you may run:
$ make test-ocp |
|
hhvm/hphp/hack/README.win32 | We are currently porting Hack and Flow to Windows 64 bits. This
document describes the build system, if you want to test
the prototype. Be aware that this work will take some time
and most tests are currently known to fail.
### Dependency: OCPWin
OCPWin is a binary OCaml distribution for Windows. It is
self-contained and runs natively on Windows, which means you can
generate OCaml applications with it without installing Cygwin or
Windows SDK.
To download and install OCPWin, go to http://www.typerex.org/ocpwin.html,
and choose the "full" version. OCPWin installer is known to have problems
with PATHs exceeding 1024 characters. Be careful to save your PATH before
the installation to be able to restore it in case of problem, until the
problem is fixed.
### Configuration and Compilation
In the Hack root directory, edit the file `00_config.ocp`
and adjust the two following variables:
debug = false (* true if you want to activate the debug mode *)
Then, execute the `make.bat` script:
./make
This will call `ocp-build` and copy the generated files into the bin/
sub-directory.
If you want to clean your project, you may use:
./make clean
Optionally, to run the test-suite adjust the path to the `python.exe`
in `make.bat`, and run:
./make test
### Installing
Coming soon
### FAQ
Q: While installing OCPWin, I got this message error:
This version of C:\xxxx is not compatible with the version of
Windows that you're running. Check your computer's system information
and then contact the software publisher.
What should I do ?
A: You probably installed the wrong version of OCPWin, check that you
got the 32 or 64 bits version according to your system. Note that Hack
and Flow currently only work on 64 bits systems. |
|
hhvm/hphp/hack/THIRD-PARTY | The repository contains 3rd-party code in the following locations and
under the following licenses:
- src/third-party/avl: Files from the OCaml standard library, under the GNU
Library General Public License 2 with a linking exception. License can be
found in src/third-party/avl/LICENSE.
- src/third-party/core: Files from Jane Street's Core libraries, under the
Apache 2.0 License. License can be found in src/third-party/core/LICENSE.
- src/third-party/inotify: Files from ocaml-inotify, under the GNU Library
General Public License 2.1 with a linking exception. License can be found in
src/third-party/inotify/LICENSE.
- src/third-party/libancillary: Files from Nicolas George's libancillary, under
its license. See license in src/third-party/libancillary/COPYING
- src/third-party/ppx_deriving: Files from whitequark's ppx_deriving library,
under the MIT License. See license in src/third-party/ppx_deriving/LICENSE. |
|
Markdown | hhvm/hphp/hack/doc/async/await-as-an-expression-spec.md | For a summary, see the [await-as-an-expression](https://docs.hhvm.com/hack/asynchronous-operations/await-as-an-expression) docs.
The guiding principle of “unconditionally consumed with the statement” is to prevent computation from being thrown away. Since we could run the awaits before the statement, if they turn out to not be consumed, it will result in waste. We intentionally ignore the possibility of Exceptions being thrown for the definition of “conditional”.
For the position to be considered “unconditionally consumed” we require all parents of the await expression until the closest statement pass this check.
Valid positions:
* ConditionalExpression: Only first position is allowed
* `(await $yes) ? (await $no) : (await $no)`
* FunctionCallExpression: Any position is allowed as long as receive isn't conditionally executed. Only the receiver is allowed if the receiver is conditional.
* `(await $yes)->foo(await $yes, await $yes)`
* `(await $yes)?->foo(await $no, await $no)`
* SafeMemberSelectionExpression: Only the object is allowed.
* `(await $yes)?->(await $no)`
* Allow any valid expression position of all of the following expressions:
* CastExpression: `(<token>)(await $yes)`
* MemberSelectionExpression: `(await $yes)->(await $yes)`
* ScopeResolutionExpression: `(await $yes)::(await $yes)`
* IsExpression: `(await $yes) is Int`
* AsExpression: `(await $yes) as Int`
* NullableAsExpression: `(await $yes) ?as Int`
* EmptyExpression: `empty(await $yes)`
* IssetExpression: `isset(await $yes)`
* ParenthesizedExpression: `(await $yes)`
* BracedExpression: `{await $yes}`
* EmbeddedBracedExpression: `“{await $yes}"`
* CollectionLiteralExpression: `Map { await $yes => await $yes }` or `Vector { await $yes }`
* ObjectCreationExpression, ConstructorCall: `new (await $yes)(await $yes)`
* ShapeExpression, FieldInitializer: `shape('key'`` => await $yes)`
* TupleExpression: `tuple(await $yes, await $yes)`
* ArrayCreationExpression: `array(await $yes => await $yes)`
* ArrayIntrinsicExpression: `[await $yes => await $yes]`
* DarrayIntrinsicExpression: `darray[await $yes => await $yes]`
* VarrayIntrinsicExpression: `varray[await $yes]`
* DictionaryIntrinsicExpression: `dict[await $yes => await $yes]`
* KeysetIntrinsicExpression: `keyset[await $yes]`
* VectorIntrinsicExpression: `vec[await $yes]`
* ElementInitializer: `await $yes => await $yes`
* SubscriptExpression: `(await $yes)[await $yes]`
* EmbeddedSubscriptExpression: `{(await $yes)[await $yes]}`
* YieldExpression: `yield (await $yes)`
* SyntaxList, ListItem: `await $yes, await $yes`
* PrefixUnaryExpression | PostfixUnaryExpression | DecoratedExpression
* `!`: `!(await $yes)`
* `~`: `~(await $yes)`
* `+`: `+(await $yes)`
* `-`: `-(await $yes)`
* `@`: `@(await $yes)`
* `clone`: `clone (await $yes)`
* `print`: `print (await $yes)`
* BinaryExpression
* Binary operators that only allow await in the left position:
* `AND`: `(await $yes) AND (await $no)`
* `OR`: `(await $yes) OR (await $no)`
* `||`: `(await $yes) || (await $no)`
* `&&`: `(await $yes) && (await $no)`
* `?:`: `(await $yes) ?: (await $no)`
* `??`: `(await $yes) ?? (await $no)`
* Binary operators that do assignment:
* `=`: `(await $no) = (await $yes)`
* `|=`: `(await $no) |= (await $yes)`
* `+=`: `(await $no) += (await $yes)`
* `*=`: `(await $no) *= (await $yes)`
* `**=`: `(await $no) **= (await $yes)`
* `/=`: `(await $no) /= (await $yes)`
* `.=`: `(await $no) .= (await $yes)`
* `-=`: `(await $no) -= (await $yes)`
* `%=`: `(await $no) %= (await $yes)`
* `^=`: `(await $no) ^= (await $yes)`
* `&=`: `(await $no) &= (await $yes)`
* `<<=`: `(await $no) <<= (await $yes)`
* `>>=`: `(await $no) >>= (await $yes)`
* Null Coalescing Assignment is both, so it doesn't allow either:
* `??=`: `(await $no) ??= (await $no)`
* Binary operators that allow await in both positions:
* `+`: `(await $yes) + (await $yes)`
* `-`: `(await $yes) - (await $yes)`
* `*`: `(await $yes) * (await $yes)`
* `/`: `(await $yes) / (await $yes)`
* `**`: `(await $yes) ** (await $yes)`
* `===`: `(await $yes) === (await $yes)`
* `<`: `(await $yes) < (await $yes)`
* `>`: `(await $yes) > (await $yes)`
* `==`: `(await $yes) == (await $yes)`
* `%`: `(await $yes) % (await $yes)`
* `.`: `(await $yes) . (await $yes)`
* `!=`: `(await $yes) != (await $yes)`
* `<>`: `(await $yes) <> (await $yes)`
* `!==`: `(await $yes) !== (await $yes)`
* `<=`: `(await $yes) <= (await $yes)`
* `<=>`: `(await $yes) <=> (await $yes)`
* `>=`: `(await $yes) >= (await $yes)`
* `&`: `(await $yes) & (await $yes)`
* `|`: `(await $yes) | (await $yes)`
* `<<`: `(await $yes) << (await $yes)`
* `>>`: `(await $yes) >> (await $yes)`
* `^`: `(await $yes) ^ (await $yes)`
* Pipe BinaryExpression (`|>`): Since we disallow dependent awaits in a single statement (or concurrent block), we need to disallow awaits in pipe operators that de-sugar to nested awaits. The simple rule to disallow this: we treat the `$$` as an `await` if the left side contains an `await`.
* Disallowed: `(await $x) |> (await $$)` de-sugars into `(await (await $x))`
* Allowed: `$x |> (await $$)` de-sugars into `(await $x)`
* Allowed: `(await $x) |> f($$)` de-sugars into `f(await $x)`
* Allowed: `(await $x) |> (f($$) + await $y)` de-sugars into `f(await $x) + await $y`
* Disallowed: `(await $x) |> f($$) |> await g($$)` de-sugars into `await g(f(await $x))`
* Disallowed: `(await $x) |> $y ?? $$` de-sugars into `$y ?? (``await $x)`
* Statement position:
* ExpressionStatement: `await $yes;`
* ReturnStatement: `return await $yes;`
* UnsetStatement: `unset($a[await $yes]);`
* EchoStatement: `echo (await $yes);`
* PrintStatement: `print (await $yes);`
* IfStatement: `if (await $yes) { await $yes_but_new_statement; }`
* ThrowStatement: `throw (await $yes);`
* SwitchStatement: `switch (await $yes) { ... }`
* ForeachStatement: `foreach ((await $yes) [await] as ... ) { ... }`
* ForStatement: `for (await $yes; await $no; await $no) { ... }`
* Disallowed in all other unmentioned positions. |
hhvm/hphp/hack/doc/coeffects/context_to_capabilities.dot | digraph G {
subgraph cluster_legend {
label="Legend: semantic model"
Cap2 -> Cap1 [label="extends"];
context [shape="rect", color=lightsteelblue1, style=filled];
context -> Cap1 [label="alias", style=dashed, color=blue];
context -> Cap2 [label="unsafe alias", color=red];
intersect_context [shape="rect", color=lightsteelblue1, style=filled];
intersect_context -> Cap3 [label="alias to (Cap3 & Cap4)", style=dashed, color=blue];
intersect_context -> Cap4 [style=dashed, color=blue];
};
// contexts
"defaults" [shape="rect", color=orange, style=filled];
"rx" [shape="rect", color=lightsteelblue1, style=filled];
"rx_shallow" [shape="rect", color=lightsteelblue1, style=filled];
"rx_local" [shape="rect", color=lightsteelblue1, style=filled];
"cipp_global" [shape="rect", color=lightsteelblue1, style=filled];
"cipp" [shape="rect", color=lightsteelblue1, style=filled];
"cipp_of<T>" [shape="rect", color=lightsteelblue1, style=filled];
"local" [shape="rect", color=lightsteelblue1, style=filled];
"local" -> WriteProperty [color="blue", style="dashed"];
"WriteProperty" -> "IFCMutations"
// cipp domain
"CippContextOf<T>" -> "CippContext"
// reactive domain
"RxShallow" -> "Rx"
"RxLocal" -> "RxShallow"
// unsafe relations
"rx_local" -> "defaults" [color="red"];
"rx_shallow" -> "RxLocal" [color="red"];
// safe relations
"cipp_global" -> "AccessStaticVariables" [color="blue", style="dashed"];
"cipp_global" -> "Output" [color="blue", style="dashed"];
"cipp_global" -> "IFCMutations" [color="blue", style="dashed"];
"cipp" -> "CippContext" [color="blue", style="dashed"];
"cipp_of<T>" -> "CippContextOf<T>" [color="blue", style="dashed"];
"rx_shallow" -> "rx" [color="blue", style="dashed"];
"rx_local" -> "rx_shallow" [color="blue", style="dashed"];
// defaults intersection
"defaults" -> "rx_local" [color="blue", style="dashed"];
"defaults" -> "cipp_global" [color="blue", style="dashed"];
"defaults" -> "WriteProperty" [color="blue", style="dashed"];
// we could expand the type aliases, but the graph gets messier
"cipp" -> "cipp_global" [color="blue", style="dashed"];
"cipp_of<T>" -> "cipp" [color="blue", style="dashed"];
// reactive domain
subgraph cluster_rx {
"rx" -> "Rx" [color="blue", style="dashed"];
};
subgraph cluster_rxl {
"rx_local" -> "RxLocal" [color="blue", style="dashed"];
};
subgraph cluster_rxs {
"rx_shallow" -> "RxShallow" [color="blue", style="dashed"];
};
// makes arrows point bottom to top
rankdir = "BT"
} |
|
hhvm/hphp/hack/doc/coeffects/graph_v0.dot | digraph G {
subgraph cluster_legend {
label="Legend: semantic model"
Cap2 -> Cap1 [label="extends"];
context [shape="rect", color=lightsteelblue1, style=filled];
context -> Cap1 [label="alias", style=dashed, color=blue];
context -> Cap2 [label="unsafe alias", style=dotted, color=red];
intersect_context [shape="rect", color=lightsteelblue1, style=filled];
intersect_context -> Cap3 [label="alias to (Cap3 & Cap4)", style=dashed, color=blue];
intersect_context -> Cap4 [style=dashed, color=blue];
};
// contexts
"defaults" [shape="rect", color=orange, style=filled, label="defaults = nothing"];
"local" [shape="rect", color=lightsteelblue1, style=filled];
"rx" [shape="rect", color=lightsteelblue1, style=filled];
"rx_local" [shape="rect", color=lightsteelblue1, style=filled];
"rx_shallow" [shape="rect", color=lightsteelblue1, style=filled];
// capabilities
"AccessStaticVariable";
"Output";
"WriteProperty";
"RxLocal" -> "RxShallow"
"RxShallow" -> "Rx"
// safe arrows
"defaults" -> "WriteProperty" [color=blue, style=dashed];
"defaults" -> "AccessStaticVariable" [color=blue, style=dashed];
"defaults" -> "Output" [color=blue, style=dashed];
"local" -> "WriteProperty" [color=blue, style=dashed];
"rx" -> "WriteProperty" [color=blue, style=dashed];
"rx_local" -> "rx_shallow" [color=blue, style=dashed];
"rx_shallow" -> "rx" [color=blue, style=dashed];
// unsafe arrows
// note: defaults=nothing already connects defaults to everything!
"rx_local" -> "defaults" [color=red, style=dotted];
"rx_shallow" -> "RxLocal" [color=red, style=dotted];
subgraph cluster_rx {
"rx" -> "Rx" [color=blue, style=dashed];
};
subgraph cluster_rxl {
"rx_local" -> "RxLocal" [color=blue, style=dashed];
};
subgraph cluster_rxs {
"rx_shallow" -> "RxShallow" [color=blue, style=dashed];
};
// cosmetic changes
"defaults" -> "(any capability)" [label="...", color=blue, style=dashed]
dummy1 [ style=invis ]
"rx_local" -> dummy1 [ style=invis ]
dummy1 -> "defaults" [ style=invis ]
// makes arrows point bottom to top
rankdir = BT
} |
|
Markdown | hhvm/hphp/hack/doc/HIPs/contexts_and_coeffects.md | ### Feature Name: Contexts and CoEffects
### Start Date: July 14, 2020
### Status: Candidate
# Summary:
A generalized system for the description and enforcement of permissions and restrictions of a context.
This HIP presents feature that overlays a coeffect system into the Hack type system through lightweight context annotations that logically map into a set of capabilities. These capabilities establish both the calling conventions (i.e., which functions/method may call which other functions/methods) as well as the operations permitted within the present context.
“Contexts are like onions.”
# Feature motivation:
Several important in-progress language features require us to alter behavior dependent on the executing context. Examples of this include:
* **Context Implicit Purpose Policies (CIPP)**: Inside of a CIPP context, there are stricter rules about how data can be handled in order to ensure their purposes are maintained.
* **Purity/Reactivity:** In a pure/reactive context, the programmer cannot access global data nor mutate external references in order to achieve enforced determinism.
In order to support execution contexts with different permissions/restrictions, we propose adding **Contextual Effects (Coeffects)** to the Hack type system. Coeffects are expressive enough to support generalized execution scopes while also lending themselves nicely to syntactic sugar that will make them easy to use and require minimal changes to typechecker internals.
# Definitions
In the following document we will use a few terms either not previously defined within the context of Hack or else potentially nonspecific. For the sake of clarity, we will first have a list of definitions for reference throughout the rest of the document.
Capability: a permission or description of a permission. For example, one might consider the ability to do `io` or access globals as capabilities.
Context: a higher level representation of a set of capabilities. A function may be comprised of one or more contexts which represent the set union of the underlying capabilities.
Example context: for the purpose for the rest of the definitions, a context created solely for the purpose of this document rather than for intended release.
The Pure Context: A simple way of saying the context representing the empty list of capabilities `{}`.
`rand`: An example context representing the capabilities required to do random number generation. Refers to the set of capabilities `{Rand}`.
`io`: An example context representing the capabilities required to do IO. Refers to the set of capabilities `{IO}`.
`throws<T>`: A parameterized example context representing the capabilities required to throw exception type `T` or any children of `T`. Refers to the set of capabilities `{Throws<T>}`. One may consider that the type parameter of this context is covariant.
`defaults`: The context representing the set of capabilities present in a function prior to the introduction of this feature. Refers to the set of capabilities `{Throws<mixed>, IO, Rand}`.
Higher order functions: A function accepting, as one of its arguments, a value containing a function-type such as a function pointer or closure.
# User Experience - Syntax
Note that all of the below syntax is still very much up for bikeshedding and may not be finalized.
The specification for the syntax of a function including co-effects is as follows, with the list of co-effects itself being optional.
```
function function_name<list_of_generics>(
parameter_type_hint variable_name,
...
)[list_of_contexts] : return_type_hint where_clauses {
function_body
}
```
## Declaring Contexts and Capabilities
This is done natively within the typechecker and the runtime. More information on this is provided under those sections.
## Basic Declarations and Closures
A function or method may optionally choose to list one or more contexts:
```
function no_contexts(): void {...}
function one_context()[C]: void {...}
function many_context()[C1, C2, ..., Cn]: void {...}
```
Additionally, the context list may appear in a function type:
```
function has_fn_args(
(function (): void) $no_list,
(function ()[io, rand]: void) $list,
(function ()[]: void) $empty_list,
): void {...}
```
As with standard functions, closures may optionally choose to list one or more contexts. Note that the outer function may or may not have its own context list. Lambdas wishing to specify a list of contexts must include a (possibly empty) parenthesized argument list.
```
function some_function(): void {
$no_list = () ==> {...};
$single = ()[C] ==> {...};
$multiple = ()[C1, C2, ..., Cn] ==> {...};
$with_types = ()[C]: void ==> {...};
// legacy functions work too
$legacy = function()[C]: void {};
// does not parse
$x[C] ==> {}
}
```
## Higher Order Functions With Dependent Contexts
One may define a higher order function whose context depends on the dynamic context of one or more passed in function arguments.
```
function has_dependent_fn_arg(
(function()[_]: void) $f,
)[rand, ctx $f]: void {... $f(); ...}
function has_dependent_fn_args(
(function()[_]: void) $f,
(function()[_]: void) $f2,
)[rand, ctx $f, ctx $f2]: void {... $f(); ... $f2(); ...}
```
One may reference the dependent context of a function argument in later arguments as well as in the return type.
```
function has_double_dependent_fn_arg(
(function()[_]: void) $f1,
(function()[ctx $f1]: void) $f2,
)[rand, ctx $f1]: void {$f1(); $f2(); }
function has_dependent_return(
(function()[_]: void) $f,
)[rand, ctx $f]: (function()[ctx $f]: void) {
$f();
return $f;
}
```
Attempting to use the dependent context of an argument before it is defined will result in an error about using an undefined variable.
Note that the special `_` placeholder context may only be used on function arguments.
```
// The following are all disallowed
type A = (function()[_]: void);
newtype A = (function()[_]: void);
Class Example {
public (function()[_]: void) $f;
public static (function()[_]: void) $f2;
}
function example(): (function()[_]: void) {}
```
## Constants
In addition to standard and type constants, classes may define context constants:
```
class WithConstant {
const ctx C = [io];
...
}
```
Context constants may be abstract, and possibly have a default. If they are abstract, they may additionally contain one or both of the as and super constraints.
```
interface WithAbstractConstants<T> {
abstract const ctx C1; // bare abstract
abstract const ctx C2 = [io]; // abstract with default
abstract const ctx C3 as [io, rand]; // abstract with bound
abstract const ctx C4 super [io, rand] = [io]; // abstract with bound and default
// Disallowed: Concrete with bound.
const ctx C5 super [io, rand] = [io];
}
```
Context constants are accessed off of function arguments in a similar manner to function-type arguments. The same restrictions about use-before-define apply.
```
function type_const(SomeClassWithConstant $t)[$t::C]: void { $t->f(); }
```
Context constants are accessed off `this` or a specific type directly within the contexts list:
```
public function access_directly()[this::C1, MySpecificType::C2]: T {...}
```
Context constants may not be referenced off of a dependent/nested type. Said another way, context constants may only have the form `$arg::C`, not `$arg::T::C`, etc. It is possible we will relax this restriction in a future version.
```
interface IHasCtx {
abstract ctx C;
}
interface IHasConst {
abstract const type TC as IHasCtx;
}
// Disallowed: nested type access
function type_const(IHasConst $t)[$t::TC::C]: void {}
abstract class MyClass implements IHasConst {
// Disallowed: also applies to dependent types of this
public function type_const()[this::TC::C]: void{}
}
```
For the sake of simplicity, `this` must be used in lieu of `$this` within the context list.
## Additional Dependent Contexts Information
Dependent contexts may be accessed off of nullable parameters. If the dynamic value of the parameter is `null`, then the contexts list will be empty.
```
function type_const(
?SomeClassWithConstant $t,
?(function()[_]: void) $f,
)[$t::C, ctx $f]: void {
$t?->foo();
if ($f is nonnull) {
$f();
}
}
```
Parameters used for accessing a dependent context may not be reassigned within the function body.
```
function type_const(SomeClassWithConstant $t)[$t::C]: void {
// disallowed
$t = get_some_other_value();
}
function has_dependent_fn_arg((function()[_]: void) $f)[ctx $f]: void {
// disallowed
$f = get_some_other_value();
}
```
Dependent contexts may not be referenced within the body of a function. This restriction may be relaxed in a future version.
```
function f(
(function()[_]: void $f,
SomeClassWithConstant $t,
)[rand, ctx $f, $t::C]: void {
(()[ctx $f] ==> 1)(); // Disallowed
(()[$t::C] ==> 1)(); // Disallowed
(()[rand] ==> 1)(); // Allowed, not a dependent context
(()[] ==> 1)(); // Allowed
(() ==> 1)(); // Allowed. Note that this is logically equivalent to [rand, ctx $f, $t::C]
}
```
# User Experience - Semantics
## Basic Function Declarations
As this feature is fully opt-in, the lack of a context list results in implicity having the `defaults` context.
I.E. the following to definitions are functionally identical:
```
function default_context1(): void {...}
function default_context2()[defaults]: void {...}
```
## How Contextful Functions Interact
Contexts represent a set of capabilities. A list of contexts represent the set union of their capabilities. In order to invoke a function, one must have access to all capabilities required by the callee. However, the caller may have more capabilities than is required by the callee, in which case simply not all capabilities are "passed" to the callee.
This is perhaps more easily seen via example:
```
function pure_fun()[]: void { /* has {} capability set */}
function rand_int()[rand]: void {/* has {Rand} capability set */}
function rand_fun()[rand]: void {
pure_fun(); // fine: {} ⊆ {Rand}
rand_int(); // fine: {Rand} ⊆ {Rand}
}
function unannotated_fun(): void {
rand_fun(); // fine: {Rand} ⊆ {IO, Rand, Throws<mixed>} aka the default set
}
function pure_fun2()[]: void {
rand_fun(); // error: {Rand} ⊈ {}
}
```
Above, `rand_fun` is logically safe, as its ability to do nondeterministic computation shouldn’t prevent it from invoking functions without that ability. However, note that `pure_fun2` is unsafe, as it does not have that capability, and therefore must refrain from invoking those that require it.
Now consider the following:
```
function pp_coinflip()[io, rand]: void {
pretty(conflip());
}
function coinflip()[rand]: bool {
return rand_int() == 0;
}
function pretty(bool $b)[io]: void {
print($b ? "heads" : "tails");
}
function pure_fun()[]: void {
pp_coinflip(); // whoops
}
```
The invocation of `pp_coinflip` from `pure_fun` is obviously unsafe, as invoking `pure_fun` could, in fact, actually result in impure actions. Therefore, functions with the pure context only invoke other functions with the pure context. Note, however, that `pp_coinflip` is fine to invoke `coinflip` and `pretty`.
## Subtyping & Hierarchies
Semantically, capabilities work as if they were required parameters
to functions, and are thus contravariant. This means that, for example,
a closure that requires a `[rand]` or `[]` (pure) context may be passed
where the expected type is a function that requires `[rand, io]`.
(The converse is disallowed because that would mean giving an
additional capability for randomness out of thin air.)
The errors then fall out by normal subtyping rules by internally
treating permissions as (implicit) arguments of a function/method.
```
class Parent {
public function maybeRand()[rand]: void {...} // {Rand}
public function maybePure(): void {...} // {Throws<mixed>, IO, Rand}
}
class Mid extends Parent {
public function maybeRand()[rand]: void {...} // {Rand} -> fine {Rand} ⊆ {Rand}
public function maybePure()[io]: void {...} // {IO} -> fine {IO} ⊆ {Throws<mixed>, IO, Rand}
}
class Child extends Mid {
public function maybeRand()[]: void {...} // {} -> fine {} ⊆ {Rand}
public function maybePure()[]: void {...} // {} -> fine {} ⊆ {IO}
}
```
In the above, the contexts on the methods in `Parent` and `Child` are required for `Mid` to typecheck successfully. Note also that `maybePure` in `Parent` need not be the pure context, and that `maybeRand` in `Child` need not be `rand`.
### Capability subtyping
In reality, there may also exist a subtyping relationship between
capabilities; suppose that a new capability `FileInput` is defined.
Since reading from a file does *not* preclude one from reading
a special file such as `/dev/random` on a UNIX-like system,
the semantic model should conservatively assume that a function
with capability `FileInput` must also have the `Rand` capability.
Therefore, `FileInput` must be a subtype (subcapability) of `Rand`.
This has an important consequence that falls out by design:
whenever some capability `B` that is subtype of capability `A`
is available (in scope), any function (or operation) that
requires `A` can be called (or performed, respectively).
## Interaction with closures
By default, closures require the same capabilities as the context in which they are created. Explicitly annotating the closure can be used to opt-out of this implicit behaviour. This is most useful when requiring the capabilities of the outer scope result in unnecessary restrictions, such as if the closure is returned rather than being invoked within the enclosing scope.
```
function foo()[io]: void { // scope has {IO}
$callable1 = () ==> {...}; // requires {IO} - By far the most common usage
$callable2 = ()[] ==> {...}; // does not require {IO}, requires {}
$uncallable1 = ()[rand] ==> {...}; // does not require {IO}, requires {Rand}
$uncallable2 = ()[defaults] ==> {...}; // does not require {IO}, requires the default set
}
```
Note that in the previous example, `$uncallable1` cannot be called as `foo` cannot provide the required `Rand` capability. `$callable2` is invocable because it requires strictly fewer capabilities than `foo` can provide.
## Higher-order Functions With Dependent Contexts
Higher-order functions are typically used for generalization purposes, with common examples including standard `map` and `filter` functions. For these functions, a common pattern is to generalize over the inputs and/or outputs of their function-typed arguments. It is imperative that the addition of contexts does not remove this generalizability while maintaining simplicity of their definitions.
Consider the following higher-order function declaration and calling functions. In order to maintain generality, safety, and backwards compatibility, the end result needs to be that `good_caller` and `nocontext_caller` should typecheck while `bad_caller` should not. We solve this problem via the use of dependent contexts, defined above.
```
function callee(
(function()[_]: void) $f,
)[rand, ctx $f]: void {... $f(); ...}
function good_caller()[io, rand]: void {
// pass pure closure
callee(()[] ==> {...}); // callee is {Rand}
// pass {IO} closure
callee(()[io] ==> echo "output"); // callee is {Rand, IO}
// pass {IO, Rand} closure
callee(() ==> echo "output"); // callee is {Rand, IO}
callee(() ==> {...}); // callee is {Rand, IO}
}
function bad_caller()[]: void {
// pass {} closure but tries to do IO
callee(()[] ==> echo "output"); // // C is {} -> callee is {Rand}
// pass {} closure
callee(() ==> {...}); // C is {} -> callee is {Rand}
}
function nocontext_caller(): void {
// this closure requires the default context
callee(() ==> {...}); // callee is {Rand, Throws<mixed>, IO}
}
```
Note that, logically, this suggests and requires that all other statements within `callee` require only the `Rand` capability, as the actual `C` passed cannot be depended upon to be any specific capability (and can in fact be the empty set of capabilities).
A potentially more compelling example is the `Vec\map` function in the Hack Standard Library.
```
function map<Tv1, Tv2>(
Traversable<Tv1> $traversable,
(function(Tv1)[_]: Tv2) $value_func,
)[ctx $value_func]: vec<Tv2> { ... }
```
## Partially Contextful Hierarchies aka Context Constants
### Background
It is not uncommon to want your API to accept an object implementing an interface and then invoke a method appearing on that interface. That is fine in the common case, wherein the context of that function within the hierarchy is invariant. However, it is possible for a hierarchy to exist for which it is difficult or impossible to guarantee that all implementations of a method have the same context. There are a good number of these situations within the Facebook codebase, but the easiest example is the `Traversable` hierarchy.
The following is an oversimplification of the hierarchy and methodology for how `Traversable`s work in Hack. Do not consider this an actual reference to their inner workings. Consider a `Traversable` interface that defines a `next` function which gives you the next item and an `isDone` function that tells you if there are more elements.
```
interface Traversable<T> {
public function next()[???]: T; // what do we put here?
public function isDone()[]: bool; // always pure
}
```
The most common children of `Traversable` are the builtin `Containers`, with children like `vec` and `Map`. However, non-builtin objects are allowed to extend `Traversable` as well, creating arbitrarily traversable objects.
```
interfact Container<T> implements Traversable<T> {
public function next()[]: T; // {}
}
final class CreateTenNumbers implements Traversable<int> {
private int $nums = 0;
private bool $done = false;
public function isDone()[]: bool { return $this->done; } // {}
public function next()[rand]: int { // {Rand}
invariant(!$this->done, 'off the end');
if ($this->nums++ === 10) { $this->done = true; }
return rand_int();
}
}
```
Now consider the following function:
```
function sum(Traversable<int> $nums)[]: int { // Has {}!!!
$sum = 0;
while(!$nums->isDone()) {
// if $nums is CreateTenNumbers, this is unsafe!
$sum += $nums->next(); // hmmmmm
}
return $sum;
}
```
This code should not typecheck! The `sum` function has no capabilities, but what are the capability requirements of the call to `next`?
### Solution
The solution to this problem is the capability constants described above; the idea simply being that the interface has an abstract capability list, usable on methods of the interface, and concretized by children. In our case, we would use such a capability constant to describe the `next` function:
```
interface Traversable<T> {
abstract const ctx C;
public function next()[this::C]: T;
public function isDone()[]: T;
}
interface Container<T> implements Traversable<T> {
const ctx C = [];
}
final class CreateTenNumbers implements Traversable<int> {
...
const ctx C = [rand];
public function next()[rand]: int { ... }
...
}
function sum(Traversable<int> $nums)[$nums::C]: int { ... }
```
In fact, the `Vec\map` function above would likely actually look something like this:
```
function map<Tv1, Tv2>(
Traversable<Tv1> $traversable,
(function(Tv1)[_]: Tv2) $value_func,
)[$traversable::C, ctx value_func]: vec<Tv2> { ... }
```
As with normal type constants, one cannot override a concrete capability constant.
### Context Constant Defaults
As with normal type constants, we don’t want to force all children to specify the constant. Thus we expose abstract context constants with defaults.
```
abstract const ctx C = [defaults];
```
Here, the first non-abstract class that doesn’t define a concrete context for `C` gets `[defaults]` synthesized for it.
## Local operations
Examples of coeffect-enforced operations include printing,
static property or superglobal access, as well as throwing exceptions.
For output such as from invocation of the `echo` built-in, the `IO`
capability must be locally available in the function/method body.
```
function io_good()[io]: void {
echo "good"; // ok: {IO} ⊆ {IO}
}
function io_bad()[]: void {
echo "bad"; // error: {} ⊈ {IO}
}
```
## Parameterized Contexts / Capabilities
Thus far, we’ve covered examples of relatively simple contexts, effectively representing a binary option. However, contexts must also be usable for situations requiring more than that binary choice. Consider, now in more detail, the `throw` context defined above. Rather than describing that a function can throw, this would describe `which` classes of exceptions a function can throw. In that scenario, the context would require a parameter representing the exception class: `throws<-T as Exception>`.
One might note, however, that it would be reasonable for a function to throw multiple kinds of exceptions, often without a unified hierarchy. While it is possible to use an additional interface to unify those exceptions, that would quickly result in a combinatorial explosion. Instead, the result would look like this:
```
function throws_foo_exception()[throws<FooException>]: void { // {Throws<FooException>}
throw new FooException();
}
function throws_bar_exception()[throws<BarException>]: void { // {Throws<BarException>}
throw new BarException();
}
function throws_foo_or_bar_exception(bool $cond)[
throws<FooException>, throws<BarException> // {Throws<FooException>, Throws<BarException>}
]: void {
if ($cond) {
throws_foo_exception();
} else {
throws_bar_exception();
}
}
```
The above would indicate that `throws_foo_or_bar_exception` may throw any of the listed exception classes.
This also applies to to the introduction of additional instances of a parameterized context due to dependent contexts:
```
function throws(
(function()[_]: void) $f,
)[throws<FooException>, ctx $f]: void {...}
```
In the above, if `ctx $f` is `throws<BarException>` then `throws($f)` would be `{Throws<FooException>, Throws<BarException>}`.
# IDE experience:
The IDE experience for this feature will be similar to that of standard types. Autocomplete, syntax highlighting, etc will all be available.
The error messages will be customizable for each context such that the errors make it clear
1. What’s missing in order to typecheck function calls successfully
2. What illegal operation is taking place within the body of a contextful function.
For example, attempting to `echo` within a function having the pure context would simply indicate that doing IO is not permissable due to that context.
# Implementation details:
## Typechecker
The proposed system is modeled as implicit parameters in the
typechecker. These parameters do not exist at runtime, and the
locals that carry them are not denotable by users (`$#` prefix).
The parameters are automatically looked up and checked during type
inference of call expressions via the `$#capability` local.
Coeffect-enforced local operations (such as I/O and throwing)
look up and check if the `$#local_capability` has the
appropriate type. Multiple capabilities are encoded using
an intersection type, i.e., a capability set `{C1, C2, C3}`
would be represented as `(C1 & C2 & C3)`, assuming an
oversimplification (for now) that there exists a one-to-one mapping
between contexts and same-named types. In either case,
the *available* capability needs to be a subtype of the *required*
one in order for type-checking to succeed (note that a set is a
subtype of any of its supersets). This way, we reuse existing
typing infrastructure, and get subtyping for free.
```
namespace HH\Capabilities {
interface A {}
interface B extends A {}
interface A2 {}
}
function f(/* A $#capability */)[A]: void {
f(/* $#capability */); // ok
g(/* $#capability */); // bad, A </: B
} // available^ ^required
function g(/* B $#capability */)[B]: void {
g(/* $#capability */); // ok
f(/* $#capability */); // ok (B <: A)
} // available^ ^required
function h(/* (A & A2) $#capability */)[A, A2]: void {
g(/* $#capability */); // bad (A & A2) </: B)
f(/* $#capability */); // ok (A & A2) <: A)
} // available^^^^^^ ^required
```
So, a function with a `mixed` coeffect has *none* of the capabilities we define, and can be called by any function. By comparison, a function with a nothing coeffect has every capability, but can only be called by other functions whose coeffect is `nothing`. In practice, we won’t use `nothing`, but rather an intersection type that covers a *default* set of capabilities, namely `defaults`.
### Mapping of contexts to capabilities
The intended place to define new contexts and capabilities is an
`.hhi` file under `hphp/hack/src/hhi/coeffect`. In the same directory,
there is also a GraphViz visualization that concisely describes the
relationship between contexts and capabilities system in a way
that may be more understandable to an average Hack developer.
The kind of coeffects coincides with the kind of types. The syntactic
piece `[C1, ..., Cn]` is interpreted during naming as follows. Each
annotation `Ck` must either be:
* a *fully* namespace-qualified type using the preferred CamelCase naming,
e.g., `\HH\Capabilities\Rand`;
* or an *unqualified* type representing a *context* using a snake case name,
e.g., `rand`, which is an alias to one or more of the above
(multiple types are intersected on the right-hand side)
The former would be sufficient and constitutes a sound (co)effect system;
however, we encourage uniformly using the latter instead because
it provides a general mechanism that facilitates top-down migrations
(more details on that below).
In case a context requires and provides multiple capabilities, there
are two choices:
* declare a (sealed interface) supertype of the desired capabilities; or
* declare an alias and use intersection types (preferred).
Declaring an alias to an intersection of capabilities (or other
aliases) is strongly preferred as it allow the requirements to be
constructible in multiple ways; e.g., if a context `composite_context`
maps to capabilities (and contexts) `Cap1` (`context1`) and `Cap2
`(`context2`), then the following code would still work fine as expected,
```
function callee()[composite_context]: void {}
function caller()[context1, context2]: void {
callee(); // ok (composite_context = context1 & context2)
}
```
unlike the former hierarchy-based approach
(`CompositeCap extends CompositeCap1, CompositeCap2`)
that would fail with the following message:
```
This call is not allowed because its coeffects are incompatible with the context (Typing[4390])
... context of this function body provides the capability set
{Capability1, Capability2}
... But the function being called requires the capability
CompositeCapability
```
### Enforcing local operations
To enforce local operations (such as throwing exceptions),
the typechecker keeps track of a fake local variable
`$#local_capability`. At the beginning of `f`'s body, it has type:
`\HH\Contexts\context1 & ... & \HH\Contexts\contextN`
where `context1` through `contextN` are the listed on `f`:
```
function f(/*params*/)[context1, ..., contextN]: ReturnType
```
Since each context `contextI` maps to some underlying set of
capabilities, the type of `$#local_capability` simplifies to:
`Cap1 & ... & CapK`
This kind of coeffect(s) is tracked orthogonally to the one used to
enforce calling conventions and they are always sound; no context
can *unsafely* conjure a capability to perform a local operation.
### Enforcing calling conventions
To establish calling conventions in the typechecker,
user-denoted contexts are mapped to two sets of capabilities:
- *C*: intersection of types define by `\HH\Contexts\contextI`
(exactly as described in the previous subsection)
- *Cunsafe*: intersection of types defined by
`\HH\Contexts\Unsafe\contextI`
(analogous to the above modulo the namespace resolution)
Then the `$#capability` local is typed as `(C & Cunsafe)`;
intuitively, this means that the set of available capabilities
for performing a function/method call is the set union of
the safe and unsafe counterparts of each context, where `mixed`
corresponds to an empty set (i.e., it is no-op). Notably,
this means that usage of a context is *always* sound if the
underlying type `\HH\Contexts\Unsafe\context = mixed`.
On the contrary, `\HH\Contexts\Unsafe\context = (C1 & C2)`,
for example, would mean that the usage of the context is unsound
as it *unsafely* conjures capabilities `C1` and `C2`,
thereby potentially allowing calls into contexts that require
capabilities `C1` and `C2` that would otherwise not be available.
The errors then fall out by normal subtyping rules by internally
treating `$#capability` as an implicit function/method argument
that has type: `C & Cunsafe`.
### A comprehensive example
```
// defined in an .hhi
namespace \HH\Contexts {
type io = IO;
namespace Unsafe { type safe_context = mixed; } // == {}
type unsafe_context = (Rand & Throws<Exception>);
namespace Unsafe { type unsafe_context = IO; }
}
// user code (WWW)
function do_io()[io]: void {
// $#local_capability has type: IO
// $#capability has type: (mixed & IO) = IO
echo(/* $#local_capability */) "good"; // ok (IO <: IO)
}
function cannot_make_local_io_but_can_call_into_io_ctx()[
unsafe_context
]: void {
// $#local_capability has type: Rand & Throws<Exception>
// $#capability has type: IO & (Rand & Throws<Exception>)
if (coinflip(/* $#capability */)) { // <: Rand (ok)
echo(/* $#local_capability */) "bad"; // </: IO
} else {
do_io(/* capability); // ok
} // ^ (Rand & Throws<Exception>) & IO <: IO
}
```
### Capturing of capabilities/contexts
When a context list is omitted from a lambda, the type-checker
does not need redefine the two coeffects mentioned above
(for calling and for performing local operations); instead
it exploits capturing of `$#capability` and `$#local_capability`
from the *enclosing scope* (since they are local variables).
This enables memory and CPU savings during type-checking.
Observe that there is no semantic difference between inheriting
capabilities on lambda vs capturing (some of) them from the
enclosing function/method, e.g.:
```
function pp_coinflip()[io, rand]: void {
$coinflip = () ==> { // capture `rand` */
// VS. ()[io, rand] ==>
return rand_int() == 0;
};
...
}
```
Such capturing is disallowed when context list is present
by merely overwriting the locals with the types resolved
during the context resolution into capabilities. Partial
capturing (i.e., capturing some capabilities but explicitly
requiring others through the context list) is anyway
discouraged for reasons explained in the [vision document](https://www.internalfb.com/intern/diffusion/FBS/browsefile/master/fbcode/hphp/hack/facebook/vision_docs/tracking_effects_and_coeffects.md):
```
function with_rand_and_io()[rand, io]: void {
// doesn't make much sense (has benefits of neither)
$bad = ()[rand] ==> { /* capture `io` */ ... };
}
```
## HHVM
### Runtime Semantics of Co-effects
This section will discuss the user observable behavior of co-effects in the runtime. Note that this section does not assume the code type checks via the Hack typechecker as Hack is not a fully static and sound language as well as the typechecker can be bypassed via HH_FIXME and other means. That being said, this section does assume that the code is syntactically correct. For syntactically incorrect programs, the parser will produce an error that will invalidate the entire file in which the syntactical incorrectness is present.
Co-effects will be enforced natively as part of the calling convention; however, the enforcement will be opt-in by the author of the co-effect. This means that each co-effect will always be enforced or never enforced. We will not allow Hack developers to opt out of enforcement; however, we will selectively provide local and shallow enforcement for migratory purposes as well as a global mode where we raise warnings instead of exception. Using the local, shallow or warning level enforcement will not allow you to use features that are enabled by the enforcement but it will be a stepping stone for full enforcement (See migration section for more information).
We will separate co-effects into two buckets from the runtime’s perspective: 1) Erased co-effects, 2) Enforced co-effects.
The erased co-effects represents co-effects where the enforcement is not needed for correctness in runtime nor gives any benefit to the runtime. Hence these co-effects do not need to be enforced in the runtime. These will be dropped from runtime after their syntactical correctness is ensured
The enforced co-effects represents co-effects where the enforcement is required in order to establish correctness and power potential features such as reactive cache and implicit contexts respectively. From here onwards, we are going to assume the co-effects discussed will all be enforced co-effects.
Runtime will have a native knowledge of each co-effect. We will not allow co-effects to be defined in user-land code. All possible list of co-effects will be defined in the runtime via the RuntimeOptions. In addition to their definition, the enforcement level(which includes whether we raise a warning, throw an exception or nothing upon enforcement failures) will also be configurable via the RuntimeOptions to enable safe migration. An important aspect to note here is that certain co-effects that need deep support from runtime such as `io` and `pure` will need to be implemented in the runtime (i.e. runtime is aware of them without the RuntimeOptions definition) and RuntimeOption definition of it only denotes the enforcement level of these co-effects.
These restrictions are in place due to performance and correctness requirements of the implementation. Correctness requirement is presence and absence of some of the enforced co-effects enable usage of various features discussed above and in order to grant access to these features, runtime needs to natively know these co-effects. Performance requirement is that the runtime will need to implement some sort of validation and the number of co-effects present will heavily influence the cost of this check. Specific runtime strategies for this check is discussed in implementation details section.
The runtime will enforce the co-effects as a part of the calling convention. The exact order of these checks is still subject to change as most of these checks do not depend on each other for correctness but for those that do, the runtime will create the best topological sorting of the checks. The list of checks that happen as a part of calling convention are the following in their current order:
1) Function name resolution
2) Visibility checks
3) Inout arity and parity checks
4) Forbidden dynamic-call checks
5) Reified generic arity/parity checks
6) Function parameter arity checks
7) Co-effect enforcement
8) Parameter type enforcement
This means that co-effect enforcement errors will be triggered prior to parameter type errors and other similar errors that trigger the error handler. Co-effect enforcement error will trigger a `BadMethodCallException` that can be caught in the userland to recover.
If preferable, one may think of the runtime tracking of coeffects as an implicit parameter containing a bitset of active capabilities that is passed to and from all function invocations. At the call boundary, a special check is made to ensure that the parameter as requested is sufficiently provided by the caller. Following that check, however, the passed in implicit parameter is not simply forwarded, but logically replaced by one exactly matching the requirements of the called function. This is similar to "dropping" the unrequired coeffects from the passed in implicit parameter, such that they may not be used inside the callee even if provided by the caller.
### Why enforce in HHVM at all?
One may ask why it is necessary to enforce these guarantees at runtime. The language as a whole is generally unsound, and we're working to progress that forward. Why not just add this to the list of pieces that will get iteratively fixed with the rest of the language? Further, what terrible thing actually happens if the rules aren't enforced dynamically?
The answer to the first question is relatively simple. The planned system for a sound dynamic type requires that types implementing it make some guarantees about their properties, methods, etc, such that writing into one via a dynamic type won't result in unsoundness. If all functions have unenforced capabilities, then they would all necessarily preempt a type from implementing dynamic.
There are multiple planned contexts for whom the goal is to make strong guarantees about chain of trust. Without dynamic enforcement, chain of trust is broken and those guarantees aren't successful. This would be tantamount to a constant recurring SEV.
### Rules received by HHVM and how they get translated from the syntax
As a part of bytecode emission, HHVM will receive a list of rules for how to compute the enforced capability set for each function declaration (including closure declarations). The final capability set for an invocation of that function is the set union of the results of computing the rules.
The rules are as follows:
1. **STATIC\<set of zero or more capabilities>**: The set of statically known capabilities.
2. **FUN_ARG\<arg num>**: Argument <arg num> is a function-type argument. Read the list of capabilities for the passed in function. Read the empty set if the value is null. Fatal if the argument is not null, a function pointer, a closure, or a PHP-style reference to a function.
3. **CC_ARG\<arg num, context constant name>**: Argument <arg num> is an object that defines a concrete context constant named <context constant name>. Read the contents of that context constant. Read empty set if the value is null. Fatal if the context constant doesn’t exist, is abstract, or if the argument is not null or an object. Arrays are special-cased to have any abstract context constants defined in their class hierarchy.
4. **CC_THIS\<context constant name>**: `this` defines a concrete context constant named <context constant name>. Read the contents of that context constant. Fatal if the context constant doesn’t exist or is abstract.
Given a list of coeffects [*concrete<sub>1</sub>*, ... *concrete<sub>n</sub>*, *this::C<sub>1</sub>*, ... *this::C<sub>n</sub>*, *ctx $arg<sub>a</sub>*, ... *ctx $arg<sub>z</sub>*, *$arg<sub>a</sub>::C<sub>1</sub>*, ... *$arg<sub>z</sub>::C<sub>n</sub>*]
* *concrete<sub>k</sub>* -> STATIC<{concrete<sub>k</sub>}>
* *ctx $arg<sub>k</sub>* -> if *$arg<sub>k</sub>* is the name of argument *i* -> FUN_ARG<*i*>. If no arguments are named *$arg<sub>k</sub>*, raise an error.
* *$arg<sub>k</sub>::C<sub>l</sub>* -> if *$arg<sub>k</sub>* is the name of argument *i* -> CC_ARG<*i*, *C<sub>l</sub>*>. If no arguments are named *$arg<sub>k</sub>*, raise an error.
* *this::C<sub>k</sub>* -> CC_THIS<*C<sub>k</sub>*>
Following generation, the STATIC rules are set-unioned into a single rule, and rules are otherwise deduplicated syntactically.
Some examples are as follows along with the emitted list of rules for that declaration.
```
function f(mixed $x)[io, rand] {}
Rules: [STATIC<{io, rand}>]
function f((function()[_]: void) $x)[io, ctx $x] {}
Rules: [STATIC<{io}>, FUN_ARG<0>]
function f(
(function()[_]: void) $x1,
(function()[_]: void) $x2,
)[io, ctx $x1, ctx $x2] {}
Rules: [STATIC<{io}>, FUN_ARG<0>, FUN_ARG<1>]
function f(Something $x)[io, $x::C] {}
Rules: [STATIC<{io}>, CC_ARG<0, C>]
function f(Something $x1, Something $x2)[$x1::C, $x2::C] {}
Rules: [STATIC<{}>, CC_ARG<0, C>, CC_ARG<1, C>]
public function f()[this::C, rand] {}
Rules: [STATIC<{rand}>, CC_THIS<C>]
public function f(Something $x1)[$x1::C, this::C] {}
Rules: [STATIC<{rand}>, CC_ARG<0, C>, CC_THIS<C>]
public function f(
Something $x1,
(function()[_]: void) $x2,
)[$x1::C, ctx $x2, this::C, IO] {}
Rules: [STATIC<{io}>, CC_ARG<0, C>, FUN_ARG<1>, CC_THIS<C>]
```
### What HHVM does with the given rules
HHVM receives a list of rules from the bytecode emitter and processes these rules to create a set of capabilities that will be used to enforce the capabilities of the function call. Note that HHVM will have a list of capabilities but not a mapping between capabilities and the generics that Hack sees which means that the runtime will not not have access to the original forms of the capabilities, generics and context types. This means that the capabilities cannot be reified, used in is/as expressions or accessed via reflection.
HHVM will convert each rule into a set of capabilities and union the sets generated by each rule to finalize the set of capabilities for the function. When HHVM is unable to convert a rule into a set of capabilities, a `BadCapabilityException` will be raised.
Next, we will discuss how each rule gets converted to a set of capabilities and we will follow it with how HHVM compares the rules for a function call.
For all the following rules, runtime will enforce that <arg num> field is within the boundaries of the function and that <context constant name> exists and is not abstract. One more aspect to note here is that only certain type of defaulted arguments may be used for capability rules: null and compile time known static Hack Arrays. This is a limitation of the runtime due to performance reasons as runtime needs to enforce capabilities prior to executing the default value initializers.
1. **STATIC\<set of zero or more capabilities>**
This rule is the most straight forward of the rules. Runtime will iterate over the capabilities and add them to the result set. If any of the capabilities listed on this list is not natively known to the runtime, runtime will raise an exception.
2. **FUN_ARG\<arg num>**
Runtime will access the closure object, or the function pointer after loading it, associated with the function in argument slot <arg num> and extract its set of capabilities and return it. Runtime will store the capabilities on the closure object.
3. **CC_ARG\<arg num, context constant name>**
Runtime will fetch the argument slot <arg num> and access the late static bound class of this object. On the runtime class object, runtime will return the set of capabilities named <context constant name>.
4. **CC_THIS\<context constant name>**
Runtime will access the runtime class of the current function and return the set of capabilities named <context constant name>. If the current function is not a non-static method, `BadCapabilityException will` be raised.
Once set of capabilities are extracted from each rule, the runtime will union the capabilities and generate a final list of capabilities for the function. Note that this list of capabilities contains unique capabilities that are each known to the runtime. At this point, no capability is polymorphic or based off of some other context. We will call this set of capabilities the **ambient set of capabilities** of the function. As a result of this, polymorphic co-effects cannot be be referenced inside the body of the function, as they have already been resolved at that point.
Due to runtime limitations, specifically, runtime not being able to distinguish between each polymorphic co-effect since runtime operates over the entire set of co-effects, polymorphic co-effects cannot be be referenced inside the body of the function.
Now we will discuss how the runtime enforces a function call based on these set of capabilities. Note that from here on the runtime is operating over the ambient set of capabilities, we will no more need to discuss polymorphism or any other co-effect capabilities.
When a function `f` calls a function `g`, the runtime will fetch the ambient set of capabilities for each function. In order to enforce the correctness of capabilities, the caller environment must be able to satisfy all the capabilities of the callee. More concretely, in order for the function call from `f` to `g` to happen, the ambient set of capabilities of `f` must be a superset of that of `g`. If this is true, the function call will happen with the enforcement guarantee. Otherwise, the runtime will raise an exception, raise a warning or silently ignore based on the enforcement level specified by the compiler configuration.
Due to runtime limitations, specifically, runtime not being able to distinguish between each polymorphic co-effect since runtime operates over the entire set of co-effects, polymorphic co-effects cannot be be referenced inside the body of the function.
### End-to-end Example
In this section, we will walk through a real life example end-to-end to illustrate each component. Note that the following example has two function calls to `map_with_logging`, one good and one bad. We will discuss how and why the good one passes the enforcement and similarly how and why the bad one fails.
```
function map_with_logging<Tv1, Tv2,>(
Traversable<Tv1> $traversable,
(function(Tv1)[_]: Tv2) $value_func,
)[io, $traversable::C, ctx $value_func]: vec<Tv2> { ... }
function caller_good(vec<int> $v)[rand, io]: void {
map_with_logging($v, ($i)[rand] ==> generate_rand_int());
}
function caller_bad(vec<int> $v)[rand]: void {
map_with_logging($v, ($i)[rand] ==> generate_rand_int());
}
```
First, the bytecode generator will generate the following rules for each function.
```
Rules for map_with_logging: [Static<{io}>, CC_ARG<0, C>, FUN_ARG<1>]
Rules for caller_good: [Static<{rand, io}>]
Rules for caller_bad: [Static<{rand}>]
```
Secondly, the runtime will extract the ambient set of capabilities of each function.
```
caller_good: [Static<{rand, io}>] => {rand, io}
caller_bad: [Static<{rand}>] => {rand}
```
Now, let’s inspect map_with_logging for each call.
```
map_with_logging called by caller_good: [Static<{io}>, CC_ARG<0, C>, FUN_ARG<1>]
Static<{io}> => {io}
CC_ARG<0, C> => {}
FUN_ARG<1> => {rand}
Ambient: {io} | {} | {rand} = {rand, io}
```
```
map_with_logging called by caller_bad: [Static<{io}>, CC_ARG<0, C>, FUN_ARG<1>]
Static<{io}> => {io}
CC_ARG<0, C> => {}
FUN_ARG<1> => {rand}
Ambient: {io} | {} | {rand} = {rand, io}
```
Notice in both examples the ambient set of capabilities of `map_with_logging` resulted in `{rand, io}`. This is due to both functions passing the lambda argument with same capabilities.
Lastly, we will execute the enforcement section.
```
Ambient set of capabilities of caller_good: {rand, io}
Ambient set of capabilities of map_with_logging called by caller_good: {rand, io}
{rand, io} is a superset of {rand, io}. Function call succeeds.
Ambient set of capabilities of caller_good: {rand}
Ambient set of capabilities of map_with_logging called by caller_bad: {rand, io}
{rand} is NOT a superset of {rand, io}. Function call fails.
```
### Eager versus Lazy Enforcement of Conditional Co-effects
The above implementation discusses the eager enforcement of conditional co-effects. Another way to enforce co-effects would be to lazily enforce them. This type of enforcement changes the user observe-able effects of the enforcement since with eager enforcement note that we enforce the lambda on the above example when the lambda is passed as an argument, whereas; with lazy enforcement the enforcement would be done when the lambda is used. This also means that with lazy enforcement more programs would be valid as if the lambda is not used, the enforcement would never be done. Lazy enforcement would allow us to enforce more programs that would otherwise be disallowed with eager enforcement such as passing a vector of co-effectful lambdas. With eager enforcement, since the runtime currently does not enforce inner type of containers, we would disallow such programs from being syntactically correct; however, with lazy enforcement since we’d be enforcing them when the lambda is used, we could allow them.
It is worth re-iterating that lazy enforcement would potentially allow other types of illegal programs to execute:
```
function callee((function(): void)[_] $f, dynamic $d)[ctx $f] {
// callee inherits only the co-effects from $f (io in this case)
// $d is a dynamic function pointer to a function that requires the rand co-effect
// Because of lazy enforcement we simply forward all co-effects from
// caller() to callee() and work them out later. In this case, we erroneously
// pick up the "rand" co-effect which would allow the invocation of
// $d to succeed incorrectly.
$d();
}
function caller()[io, rand] {
callee(() [io] ==> print(1), random_int<>);
}
```
We have decided the employ the eager enforcement since we believe that it will be more performant for the runtime and less complex since after each function call, we know that everything is enforced and we do not need to keep track of what needs to be enforced.
Since eager enforcement is stricter than lazy, it will be possible to switch to lazy enforcement if needed in the future and we could employ other strategies to get the benefits of lazy enforcement as well by introducing some sort of runtime tagging of containers.
### Trade-offs and Restrictions
Historically, adding enforcement to the runtime (parameter and return type enforcement, upper bound generic enforcement, property type enforcement, etc) has often come at a cost as the associated checks are not always free. Ideally the information we gain from this enforcement improves the efficiency of generated code by allowing the JIT to specialize for the enforced properties. This benefit will in many cases offset the cost of the enforcement by allowing the JIT to specialize in places where without enforcement the search space would be too large. In this case, however, the runtime cannot take advantage of the co-effect enforcement to improve code emission. Later, we will discussed some strategies to mitigate possible runtime cost that of this enforcement. Before that we will discuss trade-offs and requirements in order to enforce co-effects with minimal runtime cost.
1. Emit significantly more code for these functions: Allowing any arbitrary function to have these polymorphic co-effects would lead to code bloat and instruction cache problems. Our assumption is that polymorphic co-effects will only be common for library functions, and since they are extremely hot and mostly inlined, this would be a good tradeoff.
2. Have the runtime aware of each possible co-effect: This means that each co-effect must be defined in the runtime.
3. Have a hard limit on how many co-effects there can be: A bounded set of co-effects may be efficiently enforced in O(1) time, while an unbounded set of co-effects will require O(n) checks on each call. The chosen bound will be influenced by call ABI and implementation specific constraints within the runtime.
4. Build tracking of more types than we currently do: Currently HHVM does not do any sort of tracking of types for lambdas and type constants. In order to efficiently enforce the above requirements we would need to build such tracking in HHBBC as well as the JIT so that the aforementioned specialized translations can be done. This is, in general, a good direction for the runtime; however, it is also massive amounts of work.
5. Future co-effects which may influence runtime behavior and have additional correctness requirements: An example to this would be, without going into too much detail, banning memoization of functions that use polymorphic co-effects and co-effect type constants. For co-effects such as cipp, we will be depending on the enforcement to assure correctness for implicit contexts. Since for memoized functions we need to know at JIT time whether to memoize the context or not, and we cannot specialize on this for correctness reasons. Any function that must be memoized along with the context must be annotated with non-polymorphic coeffect list, such as [cipp] and have the __Memoize attribute. Runtime will not need to determine dynamically whether to capture the context during memoization.
6. We will disallow partially forwarding polymorphic co-effects: This is discussed with an example above.
# Motivating usecases
## Purity
Pure code is not able to do very much, but most standard library functions fit neatly into the model. The most important aspect of an example for this is that both pure and impure code can safely and cleanly utilize the pure framework code.
For hack, as above, the pure context is simply one with the empty context list.
```
function map<Tv1, Tv2>(
Traversable<Tv1> $traversable,
(function(Tv1)[_]: Tv2) $value_func,
)[$traversable::C, ctx value_func]: vec<Tv2> { ... }
function double_it(vec<int> $v)[]: vec<int> { // {}
return map($v, $i ==> $i*2); // C -> {}, Ce -> {}, overall -> {}
}
// impure code -> not an error
function double_and_print_it(vec<int> $v)[io]: vec<int> {
return map($v, $i ==> {
$doubled = $i*2;
echo "$doubled\n";
return $doubled;
});
}
// ERROR (both with and without [io] on lambda)
function whoops(vec<int> $v)[]: vec<int> {
return map($v, ($i)[io] ==> {
$doubled = $i*2;
echo "$doubled\n";
return $doubled;
});
}
```
## CIPP
For CIPP, the most important capability is having access to the runtime Purpose Policy. This is a capability that needs to be added to the context by setting the implicit. Most likely, this will be done using a magic builtin function:
```
function with_implicit<TIn, TOut>(
TIn $implicit,
(function()[implicit<TIn>]: TOut) $f
): TOut {
..
}
```
Here, the `implicit<T>` capability gives code the ability to access a value of type `T` at runtime. The `cipp<T>` capability is an alias signaling that code should have access to an implicit `T` and also must be analyzed for data leaks. The API for entering into a CIPP context might look like this:
```
function eval<Tcipp, T>(
Tcipp $purpose_policy,
(function()[cipp<Tcipp>]: T) $f,
): Wrapped<Tcipp, T> {
...
}
```
Developers would then do something approximating this:
```
$wrapped_result = Cipp\eval(Policy::MY_POLICY, () ==> {
// Do My Policy things
...
});
```
# Design rationale and alternatives:
The [vision document on tracking (co)effects](https://www.internalfb.com/intern/diffusion/FBS/browsefile/master/fbcode/hphp/hack/facebook/vision_docs/tracking_effects_and_coeffects.md)
motivates our decision to use a *capability*-based **coeffect** system
as opposed to alternatives documented there.
## Why this needs to be a language feature
The major question to consider in this section is whether the feature is truly necessary or whether it is possible to emulate this within the language presently. It is generally possible to emulate the cross-function interaction via exposing the hierarchies of permissions in user-space, and then mimicking the behavior with explicit parameters. However, the user experience of that would be absolutely horrendous, especially consider the planned usage of this in such things as the Hack Standard Library. Further, implementing the dependent contexts behavior in user-space, if even possible, would be very complicated and likely extremely confusing.
However, even if one were determined to implement the cross-function behavior of contexts in user-space, there is no way to replicate the special typing rules and effects on function bodies that this proposal provides. Consider the pure context - logically, this requires banning the usage of the `echo` statement within the body of that function. This is only implementable via a lint rule, which will never be 100% effective, resulting in any given context being transitively untrustable.
## Drawbacks
The current design involves checking an extra argument at every function invocation in the codebase. Because the types in question are big intersections, this has the potential to considerably slow down overall typechecking.
Initially, the default context will have 2-3 capabilities (permission for external mutable references, global state, and to call reactive/pure code). Reactive code would have 1 capability and pure would have 0. Multiple capabilities are currently represented via an intersection type in the typechecker, so we need only a single subtyping check for each function/method call, albeit with a large constant factor as subtyping type intersections (and unions) could be expensive.
Options to mitigate this (within the typechecker, not shown to users):
1. Special-case the most common cases to avoid full intersection type subtyping/simplification, e.g. if both caller and callee is unannotated, both are fully pure/CIPP, etc. This is probably simplest optimization to try and introduces a fast path and makes the slow path a tiny bit slower.
2. Ad-hoc replace intersections of common combinations of capabilities with a single interface type, so that the check is cheap. This wouldn't be too conservative (incomplete but sound) if we can prove that 2 or more capabilities in the intersection of interest cannot be introduced independently (i.e., they always appear in tandem). See subsection "Encoding multiple capabilities" in the Vision doc
# Prior art:
See this [vision document](https://www.internalfb.com/intern/diffusion/FBS/browsefile/master/fbcode/hphp/hack/facebook/vision_docs/tracking_effects_and_coeffects.md), section Prior art, for existing systems, which also deal with the challenge of minimizing the burden on users via a “lightweight” syntax.
## Effects in Haskell
Haskell is one of the few programming languages that already has an effect system. Although effects in Haskell are monadic rather than capability-based, we can still learn about from how they are used. Haskell functions are pure by default and Haskell has many of the same effects that we have discussed in this document including failure (Either), implicit arguments (Reader), mutation (State), nondeterminism (Random), and IO. Most monads are single purpose, but the IO monad can do nearly everything. IO in Haskell is similar to the default set of capabilities in Hack.
Although Haskell has monad transformers which allows users to combine effects, the common usage pattern is to use a single Monad if you need a single effect, or IO if you need many effects. It is fair to assume that the same will be true in Hack; the best practice should be to use default rather than specifying a long list of individual effects.
# Unresolved questions:
## Tracking Dynamic calls
A major issue for this proposal, as well as hack generally, is the existence of dynamic invocations. This is worse for contexts, as a dynamic invocation down the stack breaks the transitive guarantees up the stack. A pure function might actually result in IO due to some dynamic call far down the stack. This is not a problem for contexts whose calling conventions HHVM will enforce, but that will almost certainly not be universally the case.
Somewhat ironically, the solution for this is an additional context: `can_dyn_call`. `can_dyn_call` gives the capability to invoke functions on receivers that are not known to hack to be static function types. Common examples include `HH\dynamic_fun` and invoking off of `mixed` via a `HH_FIXME`.
```
function call_dynamically_bad(string $funname, mixed $fun): void {
// HH_FIXME[4009]
$fun('foobar'); // this would be unfixmeable
HH\dynamic_fun($funname)('foobar');
}
function call_dynamically_safe(string $funname, mixed $fun)[can_dyn_call]: void {
// HH_FIXME[4009] - still needs to be fixmed
$fun('foobar'); // but no additional unfixmeable error
HH\dynamic_fun($funname)('foobar');
}
```
This would extend to other types of dynamic invocations as well, such as invoking off a dynamic type, TAny, etc.
Note that this context gives a capability to functions, therefor requiring it to be present all the way up the stack from the entry point to the call that invokes the dynamic function.
Unfortunately, even the above is not sufficient due to the following case:
```
function callit(
(function()[_]: void) $f,
)[ctx $f]: void { $f(); }
function breakit(mixed $fun): void {
/* HH_FIXME[4110] */
callit($fun); // whoops
/* HH_FIXME[4110] */
callit(HH\dynamic_fun($fun)); // still whoops
}
```
Therefore, we would also require an additional annotation: whether a function type *may* contain a value that would trigger a dynamic invocation error if used directly.
In the above case, something like the following would be required:
`function callit(<<__MaybeDynFun>> (function(): void) $f): void { $f(); }`
Combing the above two features allows us to generally trust the transitive promises of unenforced contexts.
There is technically one further issue, but we do not believe it is a blocking one:
```
function callit((function(): void) $f): void { $f(); }
function breakit(mixed $var_containing_callit, mixed $fun)[can_dyn_call]: void {
/* HH_FIXME[4009] HH_FIXME[4110] */
$var_containing_callit($fun); // whoops
}
```
Note that in the above case, this is only possible because `breakit` is marked as `can_dyn_call`, so even though `callit` isn’t marked as such, we’re not accidentally introducing a capability not provided by the call stack. This is similar to the question of coercibility for like types.
However, due to the above, we would need to ban passing mismatching function-type args unless the caller’s arg is marked as `<<__MaybeDynFun>>`.
This work would necessarily be a prerequisite for adding sound capabilities whose calling conventions aren’t guaranteed by the runtime.
Further questions: Does the ability to do `$x->foo = $my_fun` where `$x` is unknown break this because `$foo` could be not marked as `__MaybeDynFun?` Do we need to hard ban this? What does this mean for generics, reified generics, and higher kinded types?
Even with all of the above, it might not generally be possible to guarantee safety in all cases, meaning that we can’t soundly roll out unenforced contexts.
# Future possibilities:
One can imagine any number of applications for this feature. This is quite a powerful and flexible feature that can be used to represent a number of things, depending on our wishes. Some ideas are given below.
## Use Capabilities to Model Async/Await
Rather than the current version:
```
async function gen_do_things(): Awaitable<void> {
await gen_other_things();
}
```
We could model the permission to use await as a capability:
```
function gen_do_things()[async]: Awaitable<void> {
await gen_other_things();
}
```
Besides uniform treatment (resulting in a simpler overall system), this would allow for abstracting over synchronicity (i.e., `async` vs normal). E.g., the following snippet would type-check:
```
function do_later((function()[_]: void) $work)[ctx $work]: void { ... }
function with_async_io()[async, io]: void {
do_later(() ==> print("quick")); // captures `io` capability
do_later(() ==> await slow_fun()); // captures `async` cap.
do_later(()[async] ==> await slow_fun()) // explicitly has `async` cap
}
```
One could further use dependent contexts and have fine-grained callbacks:
```
function wrap_callback(
(function()[_]: void) $f
):(function()[ctx $f, io]: void) {
return () ==> {
$ret = $f();
print("done");
return $ret;
};
}
function demo_callback(): void {
wrap_callback(() ==> 42);
wrap_callback(()[async] ==> await slow_fun());
}
```
## Modules
Modules are a proposed Hack feature that would enforce the boundaries between Prod, Intern, and Test code. The idea is that the typechecker would prevent developers from calling from Prod into Intern or Test because that code is not available on production machines and would otherwise fail at runtime. This feature can be easily implemented using capabilities. Prod code simply would not have the permission required to call into intern or test code.
```
// file_in_prod.php
module Prod; // implicitly gives [prod] context
function calls_into_intern()/*[prod]*/: void {
callee_in_intern(); // ERROR
}
function callee_in_prod()/*[prod]*/: void { ... }
// file_in_intern.php
module Intern; // implicitly gives [intern] context
function calls_into_prod()/*[intern]*/: void {
callee_in_prod(); // OK
}
function callee_in_intern()/*[intern]*/: void { ... }
```
## Ability to log to Stdout/Stderr
An unfortunately common mistake is one is which a developer adds a `slog`
statement within their code for the purpose of debugging but
then forgets to remove it before committing. We could make the capability
to log to stderr/stdout represented via the context system, such that most
code utilizing it would have a hack error, rendering it uncommittable.
A debugging or development environment could conjure this capability
to facilitate quick development cycle (see the previous subsection).
## Levels of Dynamic Coercibility
In the forthcoming like-types project, there is the concept of coercion for dynamic values. In the base version, there will be some ability for a dynamic value to be passed where another is expect. Much of this is still up in the air, but one exactly is passing a dynamic value into an enforced typehint.
There has been some discussion of having a way to turn off that implicit coercion. We could present this as a capability represented via capabilities, regardless of the desired default.
## Flow-sensitive capabilities
For `throws` context to be fully practical, we would need to refine the
local and calling capabilities depending on the language constructs.
Specifically, a `try`-block would introduce the `Throws` capability
regardless of whether the function provides it, and the keyword
`throw` would require that capability or error otherwise, e.g.:
```
function try_once<T> {
(function()[throws<Foo>]: T) $f
)[rand]: ?T { $t = null;
try { // adds Throws<mixed> as a local & calling capability
if (coinflip()) {
throw "ok"; // {Rand, Throws<mixed>} ⊆ {Throws<mixed>}
} else {
$t = $f(); // ok: {Rand, Throws<mixed>} ⊆ {Throws<mixed>}
}
} catch(...) { // Throws<Foo> is no longer available here
throw "bad"; // error: {Rand} ⊈ {Throws<mixed>}
}
return $t;
}
```
It is questionable if the run-time would every support throwing, since
capability violations result in exceptions (chicken and egg problem).
However, the typechecker could track the capabilities as follows:
```
function try_once<T> {
(function()[throws<Foo>]: T) $f
)[rand]: ?T {
$t = null;
// $#local_capability = Rand
try { // $#local_capability = Rand & Throws<mixed>
if (...) {
throw(/* $#local_capability */) "ok";
// $#local_capability = Rand & Throws<mixed>
// <: Throws<Foo>
}
// $#capability = $#capability & $#local_capability
// == Rand & Throws<mixed>
$t = $f(/* $#capability */); // ok: Rand & Throws<mixed>
// <: Throws<mixed>
// <: Throws<Foo>
} catch (...) { // $#local_capability = Rand
throw(/* $#local_capability */) "bad"; // error
// $#local_capability = Rand </: Throws<Foo>
}
return $t;
}
``` |
Markdown | hhvm/hphp/hack/doc/HIPs/converging_constants.md | Status: This is not a full HIP following the usual template/process, but shared as a HIP for external visibility.
# Converging constants in Hack and HHVM
I’d like to ease a few HHVM restrictions on type constant overriding, and along the way to consolidate the rules around constant inheritance in general.
## Motivation
The attribute-based approach to conditional purity (and conditional reactivity) worked like this.
```Hack
abstract class A {
<<__Pure, __OnlyRxIfImpl(I::class)>>
abstract public function f(): void;
}
interface I {
<<__Pure>>
public function f(): void;
}
class C extends A implements I {
<<__Pure>>
public function f(): void {}
}
class D extends A {
// impure
public function f(): void {}
}
```
The `__OnlyRxIfImpl` attribute says that a method invocation on a value of type A is pure only if that value is also an instance of interface I. Under coeffects, we express this conditional purity with an abstract context constant.
```Hack
abstract class A {
abstract const ctx C = [defaults];
abstract public function f()[this::C]: void;
}
interface I {
const ctx C = [];
}
class C extends A implements I {
public function f()[]: void {}
}
class D extends A {
public function f(): void {}
}
```
HHVM doesn’t allow non-abstract constants in an interface to conflict with non-abstract constants in the parent class, and since abstract type constants with defaults are simple non-abstract constants at runtime, we can’t use the current HHVM constant semantics to express the definition of class C.
## Background
*Note: I’ve compacted class definitions as I think the examples are easier to read this way.*
Constants in HHVM can be overridden in subclasses, which is a behavior inherited from PHP.
```Hack
class A { const int X = 3; }
class B extends A { const int X = 4; /* allowed*/ }
```
Hack allows this behavior to match the runtime. However, if an *interface* declares a constant with the same name, then we have an error when the subclass is loaded.
```Hack
class A { const int X = 3; }
interface I { const int X = 4; }
class B extends A implements I {} // runtime error
```
If X were instead an abstract constant, then class B can be defined.
```Hack
abstract class A { abstract const int X; }
interface I { const int X = 4; }
class B extends A implements I {} // ok
```
Of course, if the interface weren’t present, class B would have to define the constant itself to satisfy the requirements of A.
## **Type Constants**
The type constants feature of Hack, which provides a form of path-dependent types via where constraints, was built on top of class constants. A type constant can be thought of as a class constant whose value is a type structure. As such, the overriding rules for type constants in the runtime are the same — the following code runs without error.
```Hack
class A { const type T = int; }
class B extends A { const type T = string; } // runtime ok, Hack error
```
For type safety, Hack bans this overriding behavior. We’ve reached our first major point of divergence between Hack and HHVM.
There is an escape hatch in Hack, however, in the form of partially abstract type constants. These type constants have an `as` constraint and their values can be overridden in a subclass. At runtime, the `as` constraint is simply erased and the constant is interpreted as a regular, non-abstract type constant. In other words, the following example is identical to the previous example from the runtime’s perspective.
```Hack
class A { const type T as arraykey = arraykey; }
// runtime sees `const type T = arraykey;`
class B extends A { const type T = int; } // no Hack error, runtime remains ok
```
This is convenient to developers, who use the feature to define a value for a type constant in a superclass and only override it in a few subclasses, but it adds complexity because it requires Hack to make judgements about the type `this::T` based on the exactness of types.
I added the ability to define default values for abstract type constants.
```Hack
abstract class A { abstract const type T = arraykey; }
// runtime sees `const type T = arraykey;`
class B extends A { const type T = int; }
```
At runtime, this uses the same machinery as partially abstract type constants, which allows for drop in replacements without risking behavior changes. In the typechecker, abstract type constants are generally much better behaved than partially abstract type constants. They cannot appear in concrete classes, ban direct references via `self::` and `A::`, and cannot be read with `type_structure`. Without partially abstract type constants, `this::T` can be unambiguously resolved in concrete classes.
Unfortunately, the combination of all the implementation details discussed above thereby prevents the following convenient use case.
```Hack
abstract class A { abstract const type T = arraykey; }
interface I { const type T = int; }
class B extends A implements I {} // runtime error
```
This is the mirror of the context constant example discussed in the motivation section.
Finally, [traits now support constants](https://hhvm.com/blog/2021/02/16/hhvm-4.97.html). Their previous ban of constants could be trivially circumvented by having the trait implement an interface. However, if a constant conflict comes from an interface via a trait, the conflict is silently dropped. With this in mind, the prior interface conflict example can be side-stepped using a trait.
```Hack
class A { const int X = 3; }
interface I { const int X = 4; }
trait T implements I {}
class B extends A { use T; } // no runtime error!
```
## Proposal
* We use the abstract keyword instead of the presence of a value to denote abstractness for type constants in HHVM. We have committed changes to expose this in HackC and HHVM.
* We require that any classlike (class, interface, trait) has *at most* a single, canonical concrete constant for a given name. If there is a conflict in two defaults for an abstract type constant, we require the developer to redeclare the desired one. Finally, if there are two conflicting defaults but also a single concrete type constant in the hierarchy, the concrete one wins without error.
* Trivial, existing behavior
```Hack
abstract class A { abstract const type T = int; }
class C extends A {} /* const type T = int; */
class D extends A { const type T = string; }
class X extends C {
const type T = string; // error, cannot override concrete C::T
}
```
* Inherited concrete winning over a inherited abstract
```Hack
abstract class A { abstract const type T = int; }
interface I { const type T = string; }
class C extends A implements I {} // ok, C::T = I::T
class D extends A implements I {
const type T = int; // error, conflicts with I::T
}
```
* Two conflicting defaults
```Hack
abstract class A { abstract const type T = int; }
interface IAbs { abstract const type T = string; }
class C extends A implements IAbs {
// error, must declare a concrete T
}
abstract class D extends A implements IAbs {
// error, must redeclare T and choose a default
// (can do abstract or concrete)
}
```
* Two conflicting defaults, but concrete wins
```Hack
abstract class A { abstract const type T = int; }
interface IAbs { abstract const type T = string; }
interface IConc { const type T = float; }
class C extends A implements IAbs, IConc {
// ok, C::T = IConc
}
```
* **Design question**: Two conflicting defaults **with same default**
```Hack
abstract class A { abstract const type T = int; }
interface IAbs { abstract const type T = int; }
class C extends A implements IAbs {
// should this error?
}
```
* Abstract override concrete (from class)
```Hack
abstract class A { const type T = int; }
abstract class B extends A {
// error, abstract cannot override concrete
abstract const type T = string;
};
interface I { // same error
require extends A;
abstract const type T = bool;
}
trait T { // same error
require extends A;
abstract const type T = float;
}
```
* Abstract override concrete (from interface)
* Note: the runtime doesn’t check `require extends` or `require implements`, so those local errors would be in Hack only. The runtime would error later when the interface is implemented or the trait is used.
```Hack
interface I { const type T = int; }
abstract class A implements I {
// error, abstract cannot override concrete
abstract const type T = string;
};
interface I1 extends I { // same error
abstract const type T = bool;
}
trait T1 implements I { // same error
abstract const type T = float;
}
trait T2 { // same error
require implements I;
abstract const type T = float;
}
```
* Abstract override concrete (from trait)
```Hack
trait T { const type T = int; }
abstract class A {
// error, abstract cannot override concrete
use T;
abstract const type T = string;
};
trait U { // same error
use T;
abstract const type T = float;
}
```
* Two conflicting concretes
```Hack
interface I { const type T = int; }
interface J { const type T = string; }
interface K extends I {
const type T = string; // error, conflict with I::T;
}
interface L extends I, J {} // error, J::T conflicts with I::T
```
* Inherited concrete winning over inherited abstract
```Hack
interface I { abstract const type T = int; }
trait Tr { const type T = string; }
class C implements I { use Tr; } // C::T = string
```
```Hack
interface I { const type T = int; }
trait Tr { abstract const type T = string; }
class C implements I { use Tr; } // C::T = int
```
Note that while we are requiring users to redeclare in cases of conflicting defaults, the bounds in the redeclared type constant must still satisfy the all of the bounds in all of the parents. All of the examples above are `as mixed super nothing`. Example:
```Hack
abstract class A { abstract const type T as arraykey = arraykey; }
interface IAbs { abstract const type T as int; }
interface IConc { const type T = float; }
class C extends A implements IConc {
// error, IConc::T = float, but T is `as arraykey`
}
abstract class D extends A implements IAbs {
// error, IAbs::T brings in constraint `as int`
// must redeclare T with a default that satisfies this constraint
}
```
* The first draft of this proposal did not require users to resolve default conflicts, instead taking the last default in the linearization. Feedback was to require explicit resolution and drop the inherited conflicting defaults.
* We make type_structure start failing on abstract type constants with defaults, to push them to behave closer to abstract type constants without defaults. We also do the same for direct references via the class. Of course, as with all runtime changes, we start by logging and sampling before moving to hard enforcement.
```Hack
abstract class A {
abstract const type T;
abstract const type U = int;
}
type_structure("A", "T"); // this fatals
type_structure("A", "U"); // this currently does not
function f(
A::T $a, // this fatals
A::U $b, // this currently does not
): void {}
```
* Optional: We ban overriding of class constants in Hack to match the type constant behavior. A benefit of this is that `static::X` and `self::X` will have identical meanings in concrete classes. We then ban overriding of concrete class constants and concrete type constants in HHVM.
```Hack
class A { const int X = 3; }
class B extends A { const int X = 4; /* ban */ }
```
* Possibly for coherence, we should add default values to abstract class constants as well.
```Hack
abstract class A { abstract const int X = 3; }
class B extends A {}
class C extends A { const int X = 4; /* override */ }
```
* If we implement this and the previous bullet, the semantics of class constants will become identical to type constants across HHVM and Hack. |
Markdown | hhvm/hphp/hack/doc/HIPs/data_classes.md | Title: Data Classes
Status: Draft
## The Problem
The Hack runtime does not know which fields are defined in an arbitrary
shape, and does not enforce them. It also cannot reorder shape fields, and
cannot store them contiguously.
This forces the runtime to use data representations that are slower
and use more memory.
By defining a new data type with runtime enforcement, we can provide a
high performance data structure where HHVM knows the fields.
## Design Summary
Data classes are high-performance immutable classes, enforced at
runtime, with syntactic sugar for creation and updates.
### Basic Usage
Data classes are defined using a syntax similar to other forms of
classes.
```
data class User {
string $name;
bool $admin;
?string $email = null;
}
```
Data classes live in the type namespace, so they must have a name
distinct from other classes.
Data classes are instantiated with `[]`. This enables HackC to produce
different bytecodes for normal class instantiation and data class
instantiation.
```
$p = Person[name = "wilfred", admin = false];
```
Properties on data classes are accessed with `->`, consistent with
normal classes.
```
// Access
$n = $p->name;
```
Data classes are immutable. To create a new data class with slightly
modified fields, use `...` splat syntax.
```
$p2 = Person[...$p, name = "anonymous"];
```
### Inheritance
Data classes can inherit from other data classes with single
inheritance. Child classes may only add new fields.
```
abstract data class Named {
string $name;
}
data class User extends Named {
bool $admin;
}
```
Only abstract data classes may have children. Abstract data classes
may not be instantiated.
### Runtime Type Information
Data classes have distinct type tags. This enables HHVM to enforce
the names and types of properties. These runtime tags may also be
inspected with `is` and `as`, consistent with normal classes.
```
$p is Person; // true
```
This enables runtime type enforcement, unlike shapes.
```
// Type checker error and runtime error.
$p = Person[...$p, name = 123];
```
Function boundaries can also enforce types at runtime.
```
data class Server {
string $name;
}
data class Person {
string $name;
}
// Type checker error and runtime error.
takes_person(Server[server = 'db-001']);
```
This is nominal subtyping. Note that shapes have structural subtyping,
so `takes_person_shape(shape('name' => 'db-001'))` produces no errors.
## Release and Migration
HHVM will have a feature flag to prevent people using data
classes.
This enables us to ship frontend support (parser, codegen, type
checker) for data classes without accidentally allowing users to use this.
This does not replace any existing Hack features, so we do not
anticipate large scale codemods. Hotspots in performance sensitive
code will be worth migrating. Users may also prefer to migrate shape
code that doesn't heavily rely on structural subtyping.
## Implementation Details
Since data classes have a distinct runtime representation, HHVM will
need updating so `->` bytecodes work with this new datatype.
Data class splat updates may require a new bytecode entirely.
## Alternatives Considered
### Structs
Adding a simple 'bag of data' struct was also a [design we
explored](https://fb.intern.workplace.com/groups/468328013809203/permalink/541895673119103/).
```
record Person {
string $name;
bool $admin;
?string $email = null;
}
```
Since we want a nominally typed data model for performance, it would
not be feasible to codemod shapes code to use structs. We
would end up with a larger programming language with more features
that users must learn.
This also makes it difficult for us to explore adding methods in future.
### Alternative update syntax
```
$p2 = clone $p with name = "anonymous", email = "[email protected]";
```
Whilst `clone` is legal PHP/Hack syntax, it's not well known. The name
is also unfortunate, as it's a shallow copy. It's more verbose too.
```
$p2 = $p with name = "anonymous", email = "[email protected]";
```
This syntax would enable good code completion, but has little Hack
precedent. It also means data classes have more distinct syntax from
normal classes, making it hard for us to unify syntax in future.
### Alternative instantiation syntax
We plan to emit a different bytecode for data class
instantiation. This requires syntax that unambiguously represents a
record instantiation.
This bytecode exploits that data classes cannot be in a partially
constructed state, because they don't have constructors. HHVM can
treat it specially.
```
$p = Person(name = "anonymous");
$p2 = Person();
```
This is ambiguous with function calls when all the properties have
default values.
```
$p = new Person(name = "anonymous");
$p2 = new Person();
```
This is ambiguous with normal classes when all the properties have
default values.
### Reusing class declaration syntax
```
final data class User {
public string $name;
public bool $admin;
public ?string $email = null;
}
```
This is significantly more verbose for limited benefit. We want this
to be a compelling alternative to shapes, which have a lightweight
syntax.
### Open Data Classes
```
// '...' allowing additional fields, like shapes.
data class OpenUser {
string $name;
bool $admin;
...
}
```
Similar to open shapes, this would additional fields to be added
dynamically.
This would require additional runtime work, and wouldn't enjoy the
performance benefits. We believe that an explicit `extra` property is
sufficient.
```
data class OpenUser {
string $name;
bool $admin;
shape(..) $extra = shape();
}
```
## Open Questions
### Equality
We'd probably want structural equality for `===`.
```
Person[name = "x"] === Person[name = "x"]; // true
```
This is inconsistent with other forms of classes though.
It's also unclear what semantics `==` should have for data classes.
### Serialization
It's not clear how data classes should serialize and deserialize.
### Shape Interoperability
It's not clear what APIs we will offer for converting between data
classes and shapes. This is potentially a lossy transform, due to the
nominal typing of data classes and the observable ordering of shapes.
### Generics
We have avoided considering generics so far, to keep the typing rules
simple. We're hoping to ship v1 without generics.
This is definitely a feature we want to add after v1 is feature
complete and shipped.
### Nested Records
Shapes are copy-on-write, allowing updates of nested items.
```
$s['x']['y'] = 42;
```
There is no equivalent syntax for data classes, making this use case
more verbose.
### `$this` semantics
If we do add methods in the future, it is not clear how we handle
`$this`. Would methods be able to change `$this`? If not, what can
methods return?
### Field Ordering
HHVM has had performance wins by reordering fields in classes. We'd
like to retain that flexibility with data classes. If we really need
observable ordering for data classes, it should probably be opt-in.
## Prior Art
C# has [value
types](https://docs.microsoft.com/en-us/dotnet/standard/base-types/common-type-system?redirectedfrom=MSDN#structures)
which are introduced with the `struct` keyword. They do not support inheritance.
Scala has [case
classes](https://docs.scala-lang.org/tour/case-classes.html). They are
immutable classes with public properties, and also provide a `.copy`
method for creating modified copies. They use structural equality.
Kotlin has [data
classes](https://kotlinlang.org/docs/reference/data-classes.html). They
support inheritance and interfaces, and automatically provide equality
and copy methods. |
Markdown | hhvm/hphp/hack/doc/HIPs/enum-class-labels.md | # HIP: Enum Class Labels
### Status: Candidate
**Depends on:** Enum Classes project.
**Implementation status:** Done. Depends on a couple of lags
* ~~Enum classes flag, now enabled by default. (HHVM: `-v Hack.Lang.EnableEnumClasses=1`)~~ This is now removed
* ~~Unstable Features `enum_class_label` (HHVM: `-v Hack.Lang.AllowUnstableFeatures=1`)~~ No longer an unstable feature
* ~~common `disallow-hash-comments` flag~~ this flag has been removed from the parser.
## Motivation
Enums and Enum classes are sets of values. Enum constants and enum class values can be accessed via bindings, but the bindings themselves do not have denotable expressions. Consider for instance:
```
enum E : int {
A = 42;
B = 42;
}
function f(E $x): void {
switch ($x) {
case E::A: echo "from A I get "; break;
case E::B: echo "from B I get "; break;
}
echo "$x\n";
}
function test(): void {
// Both echoes "from A I get 42"
f(E::A);
f(E::B);
}
```
In the code above, `E::A` and `E::B` are values of type `E` and the programmer used the `switch` statement to recover which binding was used to access the enum constant bound to `$x`. However, since the runtime is only aware of enum constants and not bindings, in both invocations of `f` the runtime will always match the constant `42` (bound in both cases to `$x`) against the constant `42` (obtained via `E::A` in the first case statement): in both invocations the first case statement is selected and `"from A I get 42"` is displayed twice.
While using Enum classes to replace codegen with generic programming, we faced some situations where we needed these bindings to be denotable and first class. Neither enums, nor enum classes, provide this at the moment. We also realised that using enum classes in www increases verbosity compared to the old codegened APIs: a call to an “old-fashioned" codegened `getName()` method might now look like `get(MyLongClassNameParameters::Name)`.
## Proposal
This proposal introduces the concept of **labels** for **enum class** constants. Fully qualified labels are new expressions of the form `Foo#Bar`, where `Foo` is the name of an enum class and `Bar` is the one of `Foo`’s constant. Labels provide first-class bindings for enum classes and can be used in two new builtin functions to obtain the name and value of the corresponding enum class constant. Labels also have a short syntax, of the form `#Bar`, that limits verbosity whenever type inference can be leveraged to reconstruct internally the full `Foo#Bar` label.
**Expressions & types.** A label from the enum class `Foo` binding a constant to the type `T` has type `HH\EnumClass\Label<Foo, T>`. The new type `HH\EnumClass\Label` is opaque.
```
enum class E : I {
C Name = ...;
}
E#Name : HH\EnumClass\Label<E, C>
$x = E#Name; // ok
f(E#Name); // ok
```
In practice, Hack often has enough information to infer the correct enum class name, so we can make it implicit: the `f(E#Name)` call can then be shortened as `f(#Name)`. When type information are not sufficient, Hack reports an error and asks for a fully qualified annotation. We currently only allow the short form in some function calls, but we plan to allow more locations in the future.
```
$x = #Name; // error, not enough context to infer the enum
f(#Name); // ok, because Hack knows f's first arg type. It behaves like f(E#Name)
```
**New builtin functions.** This proposal extends the definition of enum classes to provide two new static methods that can consume labels:
* `nameOf` which takes a label and returns a string representation of it
* `valueOf` which takes a label and returns the value associated with this label
Here is a short example of their use:
```
function show_label<T>(HH\EnumClass\Label<E, T> $label) : void {
echo E::nameOf($label);
}
function get<T>(HH\EnumClass\Label<E, T> $label) : T {
return E::valueOf($label);
}
```
In this example:
* `show_label(E#Name)` will display a string representation of `E#Name`. For now, it would display the string `Name`.
* `get(E#Name)` returns the value `E::Name`.
As long as Hack can infer the enum class name, fully qualified and short expressions can be used transparently:
Since `get(#Name)` behaves like `get(E#Name)`, it will return the same value `E::Name`.
### Sugar coating requested by Beta testers
As requested by some beta testers, we also provide an alternative way write the special case where the **first** function argument is a label, allowing to write `f(#X, ...)` as `f#X(...).` This will enable the developers to use a syntax closer to the old-fashioned `getName()`, by writing `get#Name()`.
Note that this is only syntactic sugar and does not provide additional possibilities like taking a function pointer or a lambda: `get#Name<>` is forbidden, as it would be a function pointer on a partially applied function.
### IDE Integration
* Automatic completion provides constant names when writing a valid prefix like `getParam#` (implemented by Vassil)
* Documentation hover shows as hover types with doc for labels and function calls sugar like `get#Name()` (implemented by Tom)
## Implementation
### Forewords
It is important to note that until recently, the `#` symbol was a marker for a one line comment. This has been removed, and the `#` symbol is now reused by this proposal. This is not a limitation as Hack still supports one line comments using the `//` syntax.
### Runtime
This proposal introduces two new expressions in the AST, the fully qualified enum class labels (`E#Name`) and the short form (`#Name`). The current sample runtime implementation works as follow:
* both fully qualified labels and short labels are compiled to the `Name` label identifier, discarding the `E` part;
* the base class of enum classes, `BuiltinEnumClass`, is extended with two new final static methods, `nameOf` and `valueOf`. Given a label identifier, and the class name they are invoked on, these can perform name and value resolution via a special HHVM intrinsics.
### Type checking
To type fully qualified enum class labels, we introduce a new type `HH\EnumClass\Label<-Tenum, Tdata>` which reflects the enum class where the label is defined, and its associated type. The variance of the first type parameter is the same as the one for the `HH\MemberOf` type, introduced by enum classes. Please refer to the enum class HIP for a detailed discussion of variance.
The use of fully qualified enum class labels does not require any type checker change. The shorter version instead needs minor changes to type checking function calls. We detail how Hack’s type inference is modified to reconstruct the enum class name.
Let us do one step backward and consider Hack's handling of function calls. At any point in a program, Hack has access to the signature of all previously defined functions. Suppose that Hack is inferring the type of the expression `f(expr)`:
* it already knows the type of the top level function/method `f` from the context : `A -> B`
* it recursively infers the type of `expr` : `C`
* it checks that `C` is a valid subtype of `A` : `C <: A`
* if that’s the case, the whole expression has type `B`
In general the type of `f` is not needed to infer the type of `expr`. However, when short syntax is encountered, the type information about `f` arguments is used to infer the type of a label. As an example, let us consider again the `get` function:
```
function get<T>(HH\EnumClass\Label<E, T> $x) : T
```
When inferring the type of `get(#Name)` Hack can perform the following steps:
* from the context, it knows that `get` has type `HH\EnumClass\Label<E, T> -> T`
* it recursively infers the type of `#Name`, *knowing that a constant from `E` is expected*
* there is indeed a constant named `Name` in the enum class `E` so the call is valid
* the full expression will thus behave as if the developer wrote `get(E#Name)`
We currently support this mechanic only for function call when the top level type is a `Label`. In the next section, we’ll discuss ways to support the short syntax in more locations, but this HIP only guarantees the support for function calls.
## Future extensions
### Extension to enumeration types
Standard enumeration types could benefit from the same usage of fully qualified and short labels. In this proposal, we only focus on enum classes, but the similar machinery might be ported to enums, and extend the expressivity of labels to them. Since there is currently no demand for it, we restrict this feature to enum classes for now.
The main differences we expect are:
* A dedicated label type, `HH\Enum\Label<Tenum, Tdata>` with different constraints on the type parameter (`Tenum` will probably be invariant since enum inclusion does not entail subtyping, and `Tdata` constraint by `arraykey`)
* The `nameOf` and `valueOf` methods would have to be defined in the `BuiltinEnum` class, and the signature of `valueOf` would need to be adapted.
### Allowing short syntax at more locations
Internally Hack keeps an optional **expected type** information when type checking expressions. We plan to use this information to allow the short labels more broadly, e.g. in return statements.
### Performance sensitive code
The current proposal only addresses the verbosity issue for labels. For enum class constant themselves, the full name must still be passed to functions that expect constants, not labels, as in the example below:
```
function get<T>(HH\MemberOf<E, T> $member) : T {
return $member;
}
get(E::Name); // ok
get(#Name); // not allowed
```
A solution to this issue is to call `valueOf` by hand and rewrite this `get` method like this:
```
function get<T>(HH\EnumClass\Label<E, T> $label) : T {
return E::valueOf($label);
}
get(E::Name); // not allowed
get(#Name); // ok
```
In performance-sensitive code, this extra function call might be too expensive. **_If such code exists_** and the shorter enum class invocation syntax needs to be supported, we might provide an attribute that enables the short notation for constants too, eg.:
```
function get_via_label<T>(<<__ViaLabel>>HH\MemberOf<E, T> $member) : T {
return $member;
}
get(E::Name); // not allowed
get(#Name); // ok
```
The logic added to deal with labels has been added at function definition sites: until more type information is available at compile time, or information from HHBBC can be exploited, we cannot support `<<__ViaLabel>>` by instrumenting call sites. Paul Bissonnette has ideas for that, but it won’t be discussed in this proposal.
We are emitting a special byte code sequence when `__ViaLabel` is used to transform the label (just a constant name) into the right class constant access, in the function **prologue**. This implies that inside `get_via_label`, even if called with the label `#Name`, `$member` is the *constant* value `E::Name`, not the label. There is no way to fetch back the label.
We currently have a running prototype to test this feature, but I don’t think we should ship this unless there are effectively some performance issues in the framework that will use labels. |
Markdown | hhvm/hphp/hack/doc/HIPs/equality_and_identity.md | Title: Equality, Identity, and Comparisons
Start Date: June 24 2020
Status: Draft
## Summary
Unify `==` and `===`, introduce sane alternatives for classes that need to use
behaviour similar to the old versions.
## Feature motivation
Hack has two main comparison operators `==` and `===`. They're individually
comprised of behaviours both good and bad, and combine to a muddied, confusing,
and overall broken experience. `==` does coercing value equality, including on
objects, and compares arrays (sometimes unorderedly). `===` does pointer equality
checks on objects (including collections), and breaks some invariants for
reactive and pure code.
We'd like to simplify and correct the mistakes of the previous design
### The current state
1. `===`
1. Objects: pointer equality (note that closures are instances of different objects)
2. arrays: compares all elements (keys and values) using `===`, requiring identical internal ordering.
3. values: value equality
4. Hack Collections: pointer equality (they are objects)
2. `==`
1. Objects: compares all properties using `==`
1. note that closures still don’t compare equal because they’re instances of different objects
2. arrays: compares all elements (keys and values) using `===` for keys and `==` for values, ignoring order for `dicts`/`darrays`/`keysets`
3. values: value equality following complex PHP coercion rules.
4. Hack Collections: compares all elements using `==`, ignoring order for `Map`/`Set`
5. If comparing different types will attempt to coerce one side to the type of the other before comparing
3. `<=`, `>=`: Use `==` under the hood. No `===` variant.
4. Sorts and switch statements implicitly use the == behaviour under the hood
## User experience
1. To test object pointer equality, use builtin `is_same_obj($a, $b)` that does a pointer check
1. Potentially would require a marker interface such as `IIdentityTestable` (name to be bikeshed).
2. In that case, whether `is_same_obj` is free standing or works as `$a→isSame($b)` to be bikeshed
1. In the method case, this is non-overridable
2. To get object structural equality require explicit opt-in via an interface such as `IEquatable`
1. To be bikeshed: does this allow using `==` or does it do something like `$a→eq($b)`
1. In the method case, this is non-overridable
2. Would require all properties to be either values or also `IEquatable`
3. Note that this would probably do an identity test first as an optimization
1. This also would require reflexivity for the optimization to be sound.
4. If we can’t compare Collections and Arrays, how do props of those types work? Do we allow it implicitly in this case?
3. For the previous two, if we go the object method route of comparison, absolutely cannot allow overriding implementations.
4. `==` works as follows
1. never coerces the arguments
2. Objects: see 1 and 2.
1. If `IEquatable` means can use `==`, do we return false or throw when not present?
2. What happens when objects have different types?
3. Bikeshed more: Cannot compare arrays/collections. Use HSL or Collections methods
4. Values: work the same
5. Closures are not `IEquatable`, and pointer equality on two of them is always false (or throws?).
1. This will cause issues with reflexivity.
6. bikeshed: `resource` and `NaN`? Other edge cases?
1. Note that NaN is already weird. Currently `NaN !== Nan` but `vec[NAN] === vec[NAN]`
5. `===` doesn’t exist
6. `<=`,`>=`, etc inherit the changes to ==
1. Are `IEquatable` objects comparable this way? I expect not.
2. Do we allow them on Containers?
7. Having an object-arg to a memoized `pure`/`rx` function requires them to be `IEquatable`
8. Sorts and switch statements use the new sane == under the hood
9. Will likely need a builtin coercing_eq_yikity_yikes() that does the old == behaviour for migration purposes
## IDE experience
N/A
## Implementation details
Mostly still open questions above or below. Did I miss any?
What the typing rules of == under this proposal? Is it still a function that takes two values of type mixed?
## Design rationale and alternatives
TODO once we get more consensus about the open questions above
## Drawbacks
TODO
It's a ton of work. Probably not HAM level, but definitely a major project from the HF perspective
## Prior art
TODO
Note rust's ord, eq, partial ord, and partial eq. Rust (which took these ideas from Haskell) only allows you to compare values that implement these traits (typeclasses in Haskell).
## Unresolved Questions (in addition to the currently inline ones above)
For opt-in structural equality, what if I have a non-opted-in parent class? By opting in do I also opt it's fields in to being inspected? What about traits?
Should switches always use pointer equality on objects? This seems very relevant to enum classes.
How do function pointers work? And are you guaranteed to get the same pointer for two different calls
Does structural equality on well-formed IEquatable classes always work?
```
class ListNode implements IEquatable {
public function __construct(
public int $val,
public ?ListNode $prev = null,
public ?ListNode $next = null,
) {}
}
function compare_nodes(): void {
// Build a linked list [1, 2].
$n = new ListNode(1);
$n2 = new ListNode(2, $n);
$n->next = $n2;
// What value does $b have?
$b = $n == $n2;
}
```
The obvious recursive function for structural equality would loop infinitely on $n == $n2.
## Future possibilities
I think this is mostly N/A? Unless we want to reuse === for something? |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.