|
#pragma once |
|
|
|
#include <torch/torch.h> |
|
|
|
bool cutlass_scaled_mm_supports_fp8(int64_t cuda_device_capability); |
|
|
|
void cutlass_scaled_mm(torch::Tensor& out, torch::Tensor const& a, |
|
torch::Tensor const& b, torch::Tensor const& a_scales, |
|
torch::Tensor const& b_scales, |
|
c10::optional<torch::Tensor> const& bias); |
|
|
|
void cutlass_scaled_mm_azp(torch::Tensor& out, torch::Tensor const& a, |
|
torch::Tensor const& b, |
|
torch::Tensor const& a_scales, |
|
torch::Tensor const& b_scales, |
|
torch::Tensor const& azp_adj, |
|
c10::optional<torch::Tensor> const& azp, |
|
c10::optional<torch::Tensor> const& bias); |
|
|
|
void static_scaled_int8_quant(torch::Tensor& out, torch::Tensor const& input, |
|
torch::Tensor const& scale, |
|
c10::optional<torch::Tensor> const& azp); |
|
|
|
void dynamic_scaled_int8_quant(torch::Tensor& out, torch::Tensor const& input, |
|
torch::Tensor& scales, |
|
c10::optional<torch::Tensor> const& azp); |
|
|
|
torch::Tensor gptq_gemm(torch::Tensor a, torch::Tensor b_q_weight, |
|
torch::Tensor b_gptq_qzeros, |
|
torch::Tensor b_gptq_scales, torch::Tensor b_g_idx, |
|
bool use_exllama, int64_t bit); |
|
|
|
void gptq_shuffle(torch::Tensor q_weight, torch::Tensor q_perm, int64_t bit); |
|
|
|
void static_scaled_fp8_quant(torch::Tensor& out, torch::Tensor const& input, |
|
torch::Tensor const& scale); |
|
|
|
void dynamic_scaled_fp8_quant(torch::Tensor& out, torch::Tensor const& input, |
|
torch::Tensor& scale); |
|
|
|
void dynamic_per_token_scaled_fp8_quant( |
|
torch::Tensor& out, torch::Tensor const& input, torch::Tensor& scale, |
|
c10::optional<torch::Tensor> const& scale_ub); |
|
|
|
torch::Tensor fp8_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, |
|
torch::Tensor& b_scales, torch::Tensor& workspace, |
|
int64_t num_bits, int64_t size_m, int64_t size_n, |
|
int64_t size_k); |
|
|