Llama-3.1-8B-DALv0.1
/
venv
/lib
/python3.12
/site-packages
/torchgen
/packaged
/ATen
/templates
/Functions.h
// ${generated_comment} | |
// NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS] | |
// | |
// In ATen, certain generated headers files include the definitions of | |
// every single operator in PyTorch. Unfortunately this means every | |
// time an operator signature is updated or changed in | |
// native_functions.yaml, you (and every other PyTorch developer) need | |
// to recompile every source file that includes any of these headers. | |
// | |
// To break up these header dependencies, and improve incremental | |
// build times for all PyTorch developers. These headers are split | |
// into per-operator headers in the `ATen/ops` folder. This limits | |
// incremental builds to only changes to methods of `Tensor`, or files | |
// that use the specific operator being changed. With `at::sum` as an | |
// example, you should include | |
// | |
// <ATen/ops/sum.h> // instead of ATen/Functions.h | |
// <ATen/ops/sum_native.h> // instead of ATen/NativeFunctions.h | |
// <ATen/ops/sum_ops.h> // instead of ATen/Operators.h | |
// <ATen/ops/sum_cpu_dispatch.h> // instead of ATen/CPUFunctions.h | |
// | |
// However, even if you're careful to use this in your own code. | |
// `Functions.h` might be included indirectly through another header | |
// without you realising. To avoid this, you can add | |
// | |
// #define TORCH_ASSERT_ONLY_METHOD_OPERATORS | |
// | |
// to the top of your source file. This way any time the non-specific | |
// headers are included, the compiler will error out. | |
// | |
// Also, be aware that `ops` are not available in all build | |
// configurations (namely fb-internal) so you must guard these | |
// includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g. | |
// | |
// #ifndef AT_PER_OPERATOR_HEADERS | |
// #include <ATen/Functions.h> | |
// #else | |
// #include <ATen/ops/sum.h> | |
// #endif | |
${Functions_includes} | |
namespace at { | |
${Functions_declarations} | |
// Special C++ only overloads for std()-like functions (See gh-40287) | |
// These are needed because int -> bool conversion takes precedence over int -> IntArrayRef | |
// So, for example std(0) would select the std(unbiased=False) overload | |
TORCH_API inline Tensor var(const Tensor& self, int dim) { | |
return at::var(self, IntArrayRef{dim}); | |
} | |
TORCH_API inline std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) { | |
return at::var_mean(self, IntArrayRef{dim}); | |
} | |
TORCH_API inline Tensor std(const Tensor& self, int dim) { | |
return at::std(self, IntArrayRef{dim}); | |
} | |
TORCH_API inline std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) { | |
return at::std_mean(self, IntArrayRef{dim}); | |
} | |
inline int64_t numel(const Tensor& tensor) { | |
return tensor.numel(); | |
} | |
inline int64_t size(const Tensor& tensor, int64_t dim) { | |
return tensor.size(dim); | |
} | |
inline int64_t stride(const Tensor& tensor, int64_t dim) { | |
return tensor.stride(dim); | |
} | |
inline bool is_complex(const Tensor& tensor) { | |
return tensor.is_complex(); | |
} | |
inline bool is_floating_point(const Tensor& tensor) { | |
return tensor.is_floating_point(); | |
} | |
inline bool is_signed(const Tensor& tensor) { | |
return tensor.is_signed(); | |
} | |
inline bool is_inference(const Tensor& tensor) { | |
return tensor.is_inference(); | |
} | |
inline bool _is_zerotensor(const Tensor& tensor) { | |
return tensor._is_zerotensor(); | |
} | |
inline bool is_conj(const Tensor& tensor) { | |
return tensor.is_conj(); | |
} | |
inline Tensor conj(const Tensor& tensor) { | |
return tensor.conj(); | |
} | |
inline bool is_neg(const Tensor& tensor) { | |
return tensor.is_neg(); | |
} | |
} | |