text
stringlengths
0
2.2M
// If the state of the core being proxied had a callback that allows inline
// execution, maintain this information in the proxy
futures::detail::InlineContinuation allowInline =
(priorState == State::OnlyCallbackAllowInline
? futures::detail::InlineContinuation::permit
: futures::detail::InlineContinuation::forbid);
state_.store(State::Empty, std::memory_order_relaxed);
proxy_->setExecutor(std::move(executor_));
proxy_->setCallback_(std::move(callback_), std::move(context_), allowInline);
proxy_->detachFuture();
context_.~Context();
callback_.~Callback();
}
void CoreBase::detachOne() noexcept {
auto a = attached_.fetch_sub(1, std::memory_order_acq_rel);
assert(a >= 1);
if (a == 1) {
delete this;
}
}
void CoreBase::derefCallback() noexcept {
auto c = callbackReferences_.fetch_sub(1, std::memory_order_acq_rel);
assert(c >= 1);
if (c == 1) {
context_.~Context();
callback_.~Callback();
}
}
#if FOLLY_USE_EXTERN_FUTURE_UNIT
template class Core<folly::Unit>;
#endif
} // namespace detail
} // namespace futures
} // namespace folly
#include <torch/csrc/jit/codegen/fuser/kernel_cache.h>
#include <torch/csrc/jit/passes/canonicalize.h>
#include <torch/csrc/jit/passes/shape_analysis.h>
#include <cstdint>
#include <mutex>
#include <unordered_map>
namespace torch {
namespace jit {
namespace fuser {
struct KernelCacheImpl {
// Note: std::unordered_map does not invalidate references even if rehashing
// occurs. This is a critical property for thread-safety.
std::mutex mutex_;
int64_t kernel_counter{0};
// Map of fusion key to KernelSpec
std::unordered_map<int64_t, KernelSpec> specMap_;
// Map of pretty-printed graph string to fusion key
// Used to check if a graph has already been cached in specMap_
std::unordered_map<std::string, int64_t> graphToKey_;
};
static KernelCacheImpl& getKernelCache() {
static KernelCacheImpl cache;
return cache;
}
int64_t debugNumCachedKernelSpecs() {
auto& cache = getKernelCache();
std::lock_guard<std::mutex> guard{cache.mutex_};
return cache.specMap_.size();
}
std::shared_ptr<Graph> normalizeGraphForCache(
const std::shared_ptr<Graph>& graph) {
auto result = Canonicalize(graph, /*keep_unique_names=*/false);
EraseShapeInformation(result);
return result;
}
// TODO: lookup by historic string key to start, then issue key
// as appropriate for faster lookup in the future
// precondition: graph has been normalized via normalizeGraphForCache
int64_t store(std::shared_ptr<Graph> graph) {
auto& cache = getKernelCache();
std::string repr = graph->toString(false);
std::lock_guard<std::mutex> guard{cache.mutex_};
const auto key = cache.kernel_counter++;
cache.specMap_.emplace(
std::piecewise_construct,
std::forward_as_tuple(key),
std::forward_as_tuple(key, graph));
cache.graphToKey_.emplace(std::make_pair(std::move(repr), key));
return key;
}