Llama-3.1-8B-DALv0.1
/
venv
/lib
/python3.12
/site-packages
/torchgen
/packaged
/autograd
/templates
/Functions.h
// ${generated_comment} | |
namespace torch { namespace autograd { namespace generated { | |
using at::Scalar; | |
using at::Tensor; | |
using at::IntArrayRef; | |
using at::ArrayRef; | |
using at::Type; | |
using at::TensorGeometry; | |
using at::ScalarType; | |
using std::optional; | |
using c10::fmap; | |
inline std::vector<Tensor> unpack_list(at::ArrayRef<SavedVariable> xs, std::shared_ptr<Node> saved_for = nullptr) { | |
// NB: we must explicitly do the conversion in the lambda, otherwise template | |
// deduction will give a Tensor of Variable which is not convertible | |
return fmap(xs, [&saved_for](const SavedVariable& x) { | |
// TODO(crcrpar): Use `std::move(saved_for)` to avoid incrementing refcount, which would need refactoring. | |
return static_cast<Tensor>(x.unpack(saved_for)); | |
}); | |
} | |
inline c10::List<std::optional<Tensor>> unpack_opt_list(at::ArrayRef<SavedVariable> xs, std::shared_ptr<Node> saved_for = nullptr) { | |
torch::List<std::optional<Tensor>> result; | |
result.reserve(xs.size()); | |
for (const SavedVariable& v : xs) { | |
auto var = v.unpack(saved_for); | |
result.push_back(var.defined() ? std::optional<Tensor>(var) : c10::nullopt); | |
} | |
return result; | |
} | |
using torch::autograd::TypeAndSize; | |
${autograd_function_declarations} | |
}}} // namespace torch::autograd::generated | |