index
int64 0
0
| repo_id
stringclasses 179
values | file_path
stringlengths 26
186
| content
stringlengths 1
2.1M
| __index_level_0__
int64 0
9
|
---|---|---|---|---|
0 | hf_public_repos/candle/candle-core | hf_public_repos/candle/candle-core/src/layout.rs | //! Tensor Layouts including contiguous or sparse strides
use crate::{Error, Result, Shape};
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct Layout {
shape: Shape,
// The strides are given in number of elements and not in bytes.
stride: Vec<usize>,
start_offset: usize,
}
impl Layout {
pub fn new(shape: Shape, stride: Vec<usize>, start_offset: usize) -> Self {
Self {
shape,
stride,
start_offset,
}
}
pub fn contiguous_with_offset<S: Into<Shape>>(shape: S, start_offset: usize) -> Self {
let shape = shape.into();
let stride = shape.stride_contiguous();
Self {
shape,
stride,
start_offset,
}
}
pub fn contiguous<S: Into<Shape>>(shape: S) -> Self {
Self::contiguous_with_offset(shape, 0)
}
pub fn dims(&self) -> &[usize] {
self.shape.dims()
}
/// The dimension size for a specified dimension index.
pub fn dim<D: crate::shape::Dim>(&self, dim: D) -> Result<usize> {
let dim = dim.to_index(&self.shape, "dim")?;
Ok(self.dims()[dim])
}
pub fn shape(&self) -> &Shape {
&self.shape
}
pub fn stride(&self) -> &[usize] {
&self.stride
}
pub fn start_offset(&self) -> usize {
self.start_offset
}
/// Returns the appropriate start and stop offset if the data is stored in a C
/// contiguous (aka row major) way.
pub fn contiguous_offsets(&self) -> Option<(usize, usize)> {
if self.is_contiguous() {
let start_o = self.start_offset;
Some((start_o, start_o + self.shape.elem_count()))
} else {
None
}
}
/// Returns true if the data is stored in a C contiguous (aka row major) way.
/// Note that this does not implies that the start offset is 0 or that there are no extra
/// elements at the end of the storage.
pub fn is_contiguous(&self) -> bool {
self.shape.is_contiguous(&self.stride)
}
/// Returns true if the data is stored in a Fortran contiguous (aka column major) way.
pub fn is_fortran_contiguous(&self) -> bool {
self.shape.is_fortran_contiguous(&self.stride)
}
pub fn narrow(&self, dim: usize, start: usize, len: usize) -> Result<Self> {
let dims = self.shape().dims();
if dim >= dims.len() {
Err(Error::DimOutOfRange {
shape: self.shape().clone(),
dim: dim as i32,
op: "narrow",
}
.bt())?
}
if start + len > dims[dim] {
Err(Error::NarrowInvalidArgs {
shape: self.shape.clone(),
dim,
start,
len,
msg: "start + len > dim_len",
}
.bt())?
}
let mut dims = dims.to_vec();
dims[dim] = len;
Ok(Self {
shape: Shape::from(dims),
stride: self.stride.clone(),
start_offset: self.start_offset + self.stride[dim] * start,
})
}
pub fn transpose(&self, dim1: usize, dim2: usize) -> Result<Self> {
let rank = self.shape.rank();
if rank <= dim1 || rank <= dim2 {
Err(Error::UnexpectedNumberOfDims {
expected: usize::max(dim1, dim2),
got: rank,
shape: self.shape().clone(),
}
.bt())?
}
let mut stride = self.stride().to_vec();
let mut dims = self.shape().dims().to_vec();
dims.swap(dim1, dim2);
stride.swap(dim1, dim2);
Ok(Self {
shape: Shape::from(dims),
stride,
start_offset: self.start_offset,
})
}
pub fn permute(&self, idxs: &[usize]) -> Result<Self> {
let is_permutation =
idxs.len() == self.shape.rank() && (0..idxs.len()).all(|i| idxs.contains(&i));
if !is_permutation {
crate::bail!(
"dimension mismatch in permute, tensor {:?}, dims: {:?}",
self.dims(),
idxs
)
}
let stride = self.stride();
let dims = self.shape().dims();
let mut perm_stride = stride.to_vec();
let mut perm_dims = dims.to_vec();
for (i, &idx) in idxs.iter().enumerate() {
perm_stride[i] = stride[idx];
perm_dims[i] = dims[idx];
}
Ok(Self {
shape: Shape::from(perm_dims),
stride: perm_stride,
start_offset: self.start_offset,
})
}
pub fn broadcast_as<S: Into<Shape>>(&self, shape: S) -> Result<Self> {
let shape = shape.into();
if shape.rank() < self.shape().rank() {
return Err(Error::BroadcastIncompatibleShapes {
src_shape: self.shape().clone(),
dst_shape: shape,
}
.bt());
}
let added_dims = shape.rank() - self.shape().rank();
let mut stride = vec![0; added_dims];
for (&dst_dim, (&src_dim, &src_stride)) in shape.dims()[added_dims..]
.iter()
.zip(self.dims().iter().zip(self.stride()))
{
let s = if dst_dim == src_dim {
src_stride
} else if src_dim != 1 {
return Err(Error::BroadcastIncompatibleShapes {
src_shape: self.shape().clone(),
dst_shape: shape,
}
.bt());
} else {
0
};
stride.push(s)
}
Ok(Self {
shape,
stride,
start_offset: self.start_offset,
})
}
pub(crate) fn strided_index(&self) -> crate::StridedIndex {
crate::StridedIndex::from_layout(self)
}
pub(crate) fn strided_blocks(&self) -> crate::StridedBlocks {
let mut block_len = 1;
let mut contiguous_dims = 0; // These are counted from the right.
for (&stride, &dim) in self.stride().iter().zip(self.dims().iter()).rev() {
if stride != block_len {
break;
}
block_len *= dim;
contiguous_dims += 1;
}
let index_dims = self.dims().len() - contiguous_dims;
if index_dims == 0 {
crate::StridedBlocks::SingleBlock {
start_offset: self.start_offset,
len: block_len,
}
} else {
let block_start_index = crate::StridedIndex::new(
&self.dims()[..index_dims],
&self.stride[..index_dims],
self.start_offset,
);
crate::StridedBlocks::MultipleBlocks {
block_start_index,
block_len,
}
}
}
// Returns the contiguous offsets with broadcast if applicable.
pub(crate) fn offsets_b(&self) -> Option<ContiguousOffsetsWithBroadcast> {
let mut left_broadcast = 1;
let mut right_broadcast = 1;
let strides = self.stride();
let dims = self.dims();
let mut start_cont = 0;
let mut end_cont = dims.len();
for (&s, &d) in strides.iter().zip(dims.iter()) {
if s != 0 {
break;
}
start_cont += 1;
left_broadcast *= d;
}
if start_cont == dims.len() {
return Some(ContiguousOffsetsWithBroadcast {
start: self.start_offset,
len: 1,
left_broadcast,
right_broadcast: 1,
});
}
for (&s, &d) in strides.iter().zip(dims.iter()).rev() {
if s != 0 {
break;
}
end_cont -= 1;
right_broadcast *= d;
}
// Check that the inner dims are contiguous
let strides = &strides[start_cont..end_cont];
let dims = &dims[start_cont..end_cont];
let mut len = 1;
for (&stride, &dim) in strides.iter().zip(dims.iter()).rev() {
if stride != len {
return None;
}
len *= dim;
}
Some(ContiguousOffsetsWithBroadcast {
start: self.start_offset,
len,
left_broadcast,
right_broadcast,
})
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ContiguousOffsetsWithBroadcast {
pub start: usize,
pub len: usize,
pub left_broadcast: usize,
pub right_broadcast: usize,
}
| 0 |
0 | hf_public_repos/candle/candle-core | hf_public_repos/candle/candle-core/src/conv.rs | //! 1D and 2D Convolutions
//!
use crate::{op::BackpropOp, op::Op, Error, Result, Tensor};
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ParamsConv1D {
pub(crate) b_size: usize,
// Maybe we should have a version without l_in as this bit depends on the input and not only on
// the weights.
pub(crate) l_in: usize,
pub(crate) c_out: usize,
pub(crate) c_in: usize,
pub(crate) k_size: usize,
pub(crate) padding: usize,
pub(crate) stride: usize,
pub(crate) dilation: usize,
}
impl ParamsConv1D {
pub(crate) fn l_out(&self) -> usize {
(self.l_in + 2 * self.padding - self.dilation * (self.k_size - 1) - 1) / self.stride + 1
}
pub(crate) fn out_dims(&self) -> Vec<usize> {
let l_out = self.l_out();
vec![self.b_size, self.c_out, l_out]
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ParamsConvTranspose1D {
pub(crate) b_size: usize,
pub(crate) l_in: usize,
pub(crate) c_out: usize,
pub(crate) c_in: usize,
pub(crate) k_size: usize,
pub(crate) padding: usize,
pub(crate) output_padding: usize,
pub(crate) stride: usize,
pub(crate) dilation: usize,
}
impl ParamsConvTranspose1D {
pub(crate) fn l_out(&self) -> usize {
(self.l_in - 1) * self.stride - 2 * self.padding
+ self.dilation * (self.k_size - 1)
+ self.output_padding
+ 1
}
pub(crate) fn out_dims(&self) -> Vec<usize> {
let l_out = self.l_out();
vec![self.b_size, self.c_out, l_out]
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum CudnnFwdAlgo {
ImplicitGemm,
ImplicitPrecompGemm,
Gemm,
Direct,
Fft,
FftTiling,
Winograd,
WinogradNonFused,
Count,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ParamsConv2D {
pub(crate) b_size: usize,
pub(crate) i_h: usize,
pub(crate) i_w: usize,
pub(crate) k_h: usize,
pub(crate) k_w: usize,
pub(crate) c_out: usize,
pub(crate) c_in: usize,
pub(crate) padding: usize,
pub(crate) stride: usize,
pub(crate) dilation: usize,
pub cudnn_fwd_algo: Option<CudnnFwdAlgo>,
}
impl ParamsConv2D {
pub(crate) fn out_h(&self) -> usize {
(self.i_h + 2 * self.padding - self.dilation * (self.k_h - 1) - 1) / self.stride + 1
}
pub(crate) fn out_w(&self) -> usize {
(self.i_w + 2 * self.padding - self.dilation * (self.k_w - 1) - 1) / self.stride + 1
}
pub(crate) fn out_dims(&self) -> Vec<usize> {
vec![self.b_size, self.c_out, self.out_h(), self.out_w()]
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ParamsConvTranspose2D {
pub(crate) b_size: usize,
pub(crate) i_h: usize,
pub(crate) i_w: usize,
pub(crate) k_h: usize,
pub(crate) k_w: usize,
pub(crate) c_out: usize,
pub(crate) c_in: usize,
pub(crate) padding: usize,
pub(crate) output_padding: usize,
pub(crate) stride: usize,
pub(crate) dilation: usize,
}
impl ParamsConvTranspose2D {
pub(crate) fn out_h(&self) -> usize {
(self.i_h - 1) * self.stride + self.dilation * (self.k_h - 1) + self.output_padding + 1
- 2 * self.padding
}
pub(crate) fn out_w(&self) -> usize {
(self.i_w - 1) * self.stride + self.dilation * (self.k_w - 1) + self.output_padding + 1
- 2 * self.padding
}
pub(crate) fn out_dims(&self) -> Vec<usize> {
vec![self.b_size, self.c_out, self.out_h(), self.out_w()]
}
}
impl Tensor {
fn conv1d_single_group(&self, kernel: &Self, params: &ParamsConv1D) -> Result<Self> {
let storage =
self.storage()
.conv1d(self.layout(), &kernel.storage(), kernel.layout(), params)?;
let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::Conv1D {
arg,
kernel,
padding: params.padding,
stride: params.stride,
dilation: params.dilation,
});
let out_dims = params.out_dims();
Ok(crate::tensor::from_storage(storage, out_dims, op, false))
}
/// Applies a 1D convolution over the input tensor.
pub fn conv1d(
&self,
kernel: &Self,
padding: usize,
stride: usize,
dilation: usize,
groups: usize,
) -> Result<Self> {
let (c_out, c_in_k, k_size) = kernel.dims3()?;
let (b_size, c_in, l_in) = self.dims3()?;
if c_in != c_in_k * groups {
Err(Error::Conv1dInvalidArgs {
inp_shape: self.shape().clone(),
k_shape: kernel.shape().clone(),
padding,
stride,
msg: "the number of in-channels on the input doesn't match the kernel size",
}
.bt())?
}
let params = ParamsConv1D {
b_size,
l_in,
c_out: c_out / groups,
c_in: c_in / groups,
k_size,
padding,
stride,
dilation,
};
if groups == 1 {
self.conv1d_single_group(kernel, ¶ms)
} else {
let blocks = self.chunk(groups, 1)?;
let kernel = kernel.chunk(groups, 0)?;
let blocks = blocks
.iter()
.zip(&kernel)
.map(|(block, kernel)| block.conv1d_single_group(kernel, ¶ms))
.collect::<Result<Vec<_>>>()?;
Tensor::cat(&blocks, 1)
}
}
fn conv_transpose1d_single_group(
&self,
kernel: &Self,
params: &ParamsConvTranspose1D,
) -> Result<Self> {
let storage = self.storage().conv_transpose1d(
self.layout(),
&kernel.storage(),
kernel.layout(),
params,
)?;
let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::ConvTranspose1D {
arg,
kernel,
padding: params.padding,
output_padding: params.output_padding,
stride: params.stride,
dilation: params.dilation,
});
let out_dims = params.out_dims();
Ok(crate::tensor::from_storage(storage, out_dims, op, false))
}
/// Applies a 1D transposed convolution over the input tensor.
pub fn conv_transpose1d(
&self,
kernel: &Self,
padding: usize,
output_padding: usize,
stride: usize,
dilation: usize,
groups: usize,
) -> Result<Self> {
let (c_in_k, c_out, k_size) = kernel.dims3()?;
let (b_size, c_in, l_in) = self.dims3()?;
if c_in != c_in_k {
crate::bail!("in_channel mismatch between input ({c_in}) and kernel ({c_in_k})")
}
if c_in % groups != 0 {
crate::bail!("in_channel {c_in} is not divisible by the number of groups")
}
let params = ParamsConvTranspose1D {
b_size,
l_in,
k_size,
c_out,
c_in: c_in / groups,
padding,
output_padding,
stride,
dilation,
};
if groups == 1 {
self.conv_transpose1d_single_group(kernel, ¶ms)
} else {
let blocks = self.chunk(groups, 1)?;
let kernel = kernel.chunk(groups, 0)?;
let blocks = blocks
.iter()
.zip(&kernel)
.map(|(block, kernel)| block.conv_transpose1d_single_group(kernel, ¶ms))
.collect::<Result<Vec<_>>>()?;
Tensor::cat(&blocks, 1)
}
}
fn conv2d_single_group(&self, kernel: &Self, params: &ParamsConv2D) -> Result<Self> {
let storage =
self.storage()
.conv2d(self.layout(), &kernel.storage(), kernel.layout(), params)?;
let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::Conv2D {
arg,
kernel,
padding: params.padding,
stride: params.stride,
dilation: params.dilation,
});
let out_dims = params.out_dims();
Ok(crate::tensor::from_storage(storage, out_dims, op, false))
}
/// Applies a 2D convolution over the input tensor.
pub fn conv2d(
&self,
kernel: &Self,
padding: usize,
stride: usize,
dilation: usize,
groups: usize,
) -> Result<Self> {
let (b_size, c_in, i_h, i_w) = self.dims4()?;
let (c_out, c_in_k, k_h, k_w) = kernel.dims4()?;
if c_in != c_in_k * groups {
crate::bail!(
"in_channel mismatch between input ({c_in}, groups {groups}) and kernel ({c_in_k})"
)
}
let params = ParamsConv2D {
b_size,
i_h,
i_w,
k_h,
k_w,
c_out: c_out / groups,
c_in: c_in / groups,
padding,
stride,
dilation,
cudnn_fwd_algo: None,
};
if groups == 1 {
self.conv2d_single_group(kernel, ¶ms)
} else {
let blocks = self.chunk(groups, 1)?;
let kernel = kernel.chunk(groups, 0)?;
let blocks = blocks
.iter()
.zip(&kernel)
.map(|(block, kernel)| block.conv2d_single_group(kernel, ¶ms))
.collect::<Result<Vec<_>>>()?;
Tensor::cat(&blocks, 1)
}
}
/// Applies a 2D transposed convolution over the input tensor.
pub fn conv_transpose2d(
&self,
kernel: &Self,
padding: usize,
output_padding: usize,
stride: usize,
dilation: usize,
) -> Result<Self> {
let (b_size, c_in, i_h, i_w) = self.dims4()?;
let (c_in_k, c_out, k_h, k_w) = kernel.dims4()?;
if c_in != c_in_k {
crate::bail!("in_channel mismatch between input ({c_in}) and kernel ({c_in_k})")
}
let params = ParamsConvTranspose2D {
b_size,
i_h,
i_w,
k_h,
k_w,
c_out,
c_in,
padding,
output_padding,
stride,
dilation,
};
let storage = self.storage().conv_transpose2d(
self.layout(),
&kernel.storage(),
kernel.layout(),
¶ms,
)?;
let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::ConvTranspose2D {
arg,
kernel,
padding: params.padding,
output_padding: params.output_padding,
stride: params.stride,
dilation: params.dilation,
});
let out_dims = params.out_dims();
Ok(crate::tensor::from_storage(storage, out_dims, op, false))
}
}
| 1 |
0 | hf_public_repos/candle/candle-core | hf_public_repos/candle/candle-core/src/variable.rs | // Variables are wrappers around tensors that can be modified, they are typically used for holding
// weights and being modified by gradient descent.
// We do not expose a public way to create variables as this would break the invariant that the
// tensor within a variable is actually with `is_variable` set to `true`.
use crate::{DType, Device, Error, Result, Shape, Tensor};
/// A variable is a wrapper around a tensor, however variables can have their content modified
/// whereas tensors are immutable.
#[derive(Clone, Debug)]
pub struct Var(Tensor);
impl std::fmt::Display for Var {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(&self.0, f)
}
}
impl std::ops::Deref for Var {
type Target = Tensor;
fn deref(&self) -> &Self::Target {
self.0.as_ref()
}
}
impl Var {
pub fn zeros<S: Into<Shape>>(shape: S, dtype: DType, device: &Device) -> Result<Self> {
let inner = Tensor::zeros_impl(shape, dtype, device, true)?;
Ok(Self(inner))
}
pub fn ones<S: Into<Shape>>(shape: S, dtype: DType, device: &Device) -> Result<Self> {
let inner = Tensor::ones_impl(shape, dtype, device, true)?;
Ok(Self(inner))
}
// Convert a tensor to a variable, if the tensor is already a variable then it is returned as is.
pub fn from_tensor(t: &Tensor) -> Result<Self> {
if t.is_variable() {
Ok(Self(t.clone()))
} else {
let inner = t.make_var()?;
Ok(Self(inner))
}
}
pub fn rand_f64<S: Into<Shape>>(
lo: f64,
up: f64,
s: S,
dtype: DType,
device: &Device,
) -> Result<Self> {
let inner = Tensor::rand_f64_impl(lo, up, s, dtype, device, true)?;
Ok(Self(inner))
}
pub fn randn_f64<S: Into<Shape>>(
mean: f64,
std: f64,
s: S,
dtype: DType,
device: &Device,
) -> Result<Self> {
let inner = Tensor::randn_f64_impl(mean, std, s, dtype, device, true)?;
Ok(Self(inner))
}
pub fn rand<S: Into<Shape>, T: crate::FloatDType>(
lo: T,
up: T,
s: S,
device: &Device,
) -> Result<Self> {
let inner = Tensor::rand_impl(lo, up, s, device, true)?;
Ok(Self(inner))
}
pub fn randn<S: Into<Shape>, T: crate::FloatDType>(
mean: T,
std: T,
s: S,
device: &Device,
) -> Result<Self> {
let inner = Tensor::randn_impl(mean, std, s, device, true)?;
Ok(Self(inner))
}
/// Creates a new tensor on the specified device using the content and shape of the input.
/// This is similar to `new` but the resulting tensor is a variable.
pub fn new<A: crate::device::NdArray>(array: A, device: &Device) -> Result<Self> {
let shape = array.shape()?;
let inner = Tensor::new_impl(array, shape, device, true)?;
Ok(Self(inner))
}
pub fn from_vec<S: Into<Shape>, D: crate::WithDType>(
data: Vec<D>,
shape: S,
device: &Device,
) -> Result<Self> {
let inner = Tensor::from_vec_impl(data, shape, device, true)?;
Ok(Self(inner))
}
pub fn from_slice<S: Into<Shape>, D: crate::WithDType>(
array: &[D],
shape: S,
device: &Device,
) -> Result<Self> {
let inner = Tensor::new_impl(array, shape.into(), device, true)?;
Ok(Self(inner))
}
pub fn as_detached_tensor(&self) -> Tensor {
self.0.detach()
}
pub fn as_tensor(&self) -> &Tensor {
&self.0
}
/// Consumes this `Var` and return the underlying tensor.
pub fn into_inner(self) -> Tensor {
self.0
}
/// Sets the content of the inner tensor, this does not require a mutable reference as inner
/// mutability is used.
pub fn set(&self, src: &Tensor) -> Result<()> {
if self.same_storage(src) {
let msg = "cannot set a variable to a tensor that is derived from its value";
Err(Error::CannotSetVar { msg }.bt())?
}
let (mut dst, layout) = self.storage_mut_and_layout();
if !layout.is_contiguous() {
let msg = "cannot set a non-contiguous variable";
Err(Error::CannotSetVar { msg }.bt())?
}
let (src, src_l) = src.storage_and_layout();
if layout.shape() != src_l.shape() {
Err(Error::ShapeMismatchBinaryOp {
lhs: layout.shape().clone(),
rhs: src_l.shape().clone(),
op: "set",
}
.bt())?
}
src.copy_strided_src(&mut dst, layout.start_offset(), src_l)?;
Ok(())
}
}
| 2 |
0 | hf_public_repos/candle/candle-core | hf_public_repos/candle/candle-core/src/storage.rs | use crate::backend::BackendStorage;
use crate::op::{self, CmpOp, ReduceOp};
use crate::{CpuStorage, CudaStorage, DType, Device, Error, Layout, MetalStorage, Result, Shape};
use crate::{CustomOp1, CustomOp2, CustomOp3, InplaceOp1, InplaceOp2, InplaceOp3};
// We do not want to implement Clone on Storage as cloning may fail because of
// out of memory. Instead try_clone should be used.
#[derive(Debug)]
pub enum Storage {
Cpu(CpuStorage),
Cuda(CudaStorage),
Metal(MetalStorage),
}
impl Storage {
pub fn try_clone(&self, layout: &Layout) -> Result<Self> {
match self {
Self::Cpu(storage) => Ok(Self::Cpu(storage.clone())),
Self::Cuda(storage) => {
let storage = storage.try_clone(layout)?;
Ok(Self::Cuda(storage))
}
Self::Metal(storage) => {
let storage = storage.try_clone(layout)?;
Ok(Self::Metal(storage))
}
}
}
pub fn device(&self) -> Device {
match self {
Self::Cpu(_) => Device::Cpu,
Self::Cuda(storage) => Device::Cuda(storage.device().clone()),
Self::Metal(storage) => Device::Metal(storage.device().clone()),
}
}
pub fn dtype(&self) -> DType {
match self {
Self::Cpu(storage) => storage.dtype(),
Self::Cuda(storage) => storage.dtype(),
Self::Metal(storage) => storage.dtype(),
}
}
pub(crate) fn same_device(&self, rhs: &Self, op: &'static str) -> Result<()> {
let lhs_device = self.device();
let rhs_device = rhs.device();
let lhs = lhs_device.location();
let rhs = rhs_device.location();
let same_device = if self.device().is_metal() {
// On metal, we require the device to be exactly the same rather than
// having the same location. In cuda this is not necessary as all CudaDevice on the
// same GPU will use the same cuda stream.
lhs_device.same_device(&rhs_device)
} else {
lhs == rhs
};
if !same_device {
Err(Error::DeviceMismatchBinaryOp { lhs, rhs, op }.bt())
} else {
Ok(())
}
}
pub(crate) fn same_dtype(&self, rhs: &Self, op: &'static str) -> Result<()> {
let lhs = self.dtype();
let rhs = rhs.dtype();
if lhs != rhs {
Err(Error::DTypeMismatchBinaryOp { lhs, rhs, op }.bt())
} else {
Ok(())
}
}
pub(crate) fn affine(&self, layout: &Layout, mul: f64, add: f64) -> Result<Self> {
match self {
Storage::Cpu(storage) => {
let storage = storage.affine(layout, mul, add)?;
Ok(Self::Cpu(storage))
}
Self::Cuda(storage) => {
let storage = storage.affine(layout, mul, add)?;
Ok(Self::Cuda(storage))
}
Self::Metal(storage) => {
let storage = storage.affine(layout, mul, add)?;
Ok(Self::Metal(storage))
}
}
}
pub(crate) fn powf(&self, layout: &Layout, alpha: f64) -> Result<Self> {
match self {
Storage::Cpu(storage) => {
let storage = storage.powf(layout, alpha)?;
Ok(Self::Cpu(storage))
}
Self::Cuda(storage) => {
let storage = storage.powf(layout, alpha)?;
Ok(Self::Cuda(storage))
}
Self::Metal(storage) => {
let storage = storage.powf(layout, alpha)?;
Ok(Self::Metal(storage))
}
}
}
pub(crate) fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> {
match self {
Storage::Cpu(storage) => {
let storage = storage.elu(layout, alpha)?;
Ok(Self::Cpu(storage))
}
Self::Cuda(storage) => {
let storage = storage.elu(layout, alpha)?;
Ok(Self::Cuda(storage))
}
Self::Metal(storage) => {
let storage = storage.elu(layout, alpha)?;
Ok(Self::Metal(storage))
}
}
}
pub(crate) fn cmp(
&self,
op: CmpOp,
rhs: &Self,
lhs_layout: &Layout,
rhs_layout: &Layout,
) -> Result<Self> {
self.same_device(rhs, "cmp")?;
self.same_dtype(rhs, "cmp")?;
match (self, rhs) {
(Storage::Cpu(lhs), Storage::Cpu(rhs)) => {
let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?;
Ok(Self::Cpu(storage))
}
(Self::Cuda(lhs), Self::Cuda(rhs)) => {
let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?;
Ok(Self::Cuda(storage))
}
(Self::Metal(lhs), Self::Metal(rhs)) => {
let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?;
Ok(Self::Metal(storage))
}
(lhs, rhs) => {
// Should not happen because of the same device check above but we're defensive
// anyway.
Err(Error::DeviceMismatchBinaryOp {
lhs: lhs.device().location(),
rhs: rhs.device().location(),
op: "cmp",
}
.bt())
}
}
}
pub(crate) fn reduce_op(&self, op: ReduceOp, layout: &Layout, s: &[usize]) -> Result<Self> {
match self {
Storage::Cpu(storage) => {
let storage = storage.reduce_op(op, layout, s)?;
Ok(Self::Cpu(storage))
}
Self::Cuda(storage) => {
let storage = storage.reduce_op(op, layout, s)?;
Ok(Self::Cuda(storage))
}
Self::Metal(storage) => {
let storage = storage.reduce_op(op, layout, s)?;
Ok(Self::Metal(storage))
}
}
}
pub(crate) fn to_dtype(&self, layout: &Layout, dtype: DType) -> Result<Self> {
match self {
Storage::Cpu(storage) => {
let storage = storage.to_dtype(layout, dtype)?;
Ok(Self::Cpu(storage))
}
Self::Cuda(storage) => {
let storage = storage.to_dtype(layout, dtype)?;
Ok(Self::Cuda(storage))
}
Self::Metal(storage) => {
let storage = storage.to_dtype(layout, dtype)?;
Ok(Self::Metal(storage))
}
}
}
pub(crate) fn apply_op1(&self, l: &Layout, c: &dyn CustomOp1) -> Result<(Self, Shape)> {
match self {
Self::Cpu(storage) => {
let (storage, shape) = c.cpu_fwd(storage, l)?;
Ok((Self::Cpu(storage), shape))
}
Self::Cuda(storage) => {
let (storage, shape) = c.cuda_fwd(storage, l)?;
Ok((Self::Cuda(storage), shape))
}
Self::Metal(storage) => {
let (storage, shape) = c.metal_fwd(storage, l)?;
Ok((Self::Metal(storage), shape))
}
}
}
pub(crate) fn apply_op2(
&self,
l1: &Layout,
t2: &Self,
l2: &Layout,
c: &dyn CustomOp2,
) -> Result<(Self, Shape)> {
self.same_device(t2, c.name())?;
match (self, t2) {
(Self::Cpu(s1), Self::Cpu(s2)) => {
let (s, shape) = c.cpu_fwd(s1, l1, s2, l2)?;
Ok((Self::Cpu(s), shape))
}
(Self::Cuda(s1), Self::Cuda(s2)) => {
let (s, shape) = c.cuda_fwd(s1, l1, s2, l2)?;
Ok((Self::Cuda(s), shape))
}
(Self::Metal(s1), Self::Metal(s2)) => {
let (s, shape) = c.metal_fwd(s1, l1, s2, l2)?;
Ok((Self::Metal(s), shape))
}
_ => unreachable!(),
}
}
pub(crate) fn apply_op3(
&self,
l1: &Layout,
t2: &Self,
l2: &Layout,
t3: &Self,
l3: &Layout,
c: &dyn CustomOp3,
) -> Result<(Self, Shape)> {
self.same_device(t2, c.name())?;
self.same_device(t3, c.name())?;
match (self, t2, t3) {
(Self::Cpu(s1), Self::Cpu(s2), Self::Cpu(s3)) => {
let (s, shape) = c.cpu_fwd(s1, l1, s2, l2, s3, l3)?;
Ok((Self::Cpu(s), shape))
}
(Self::Cuda(s1), Self::Cuda(s2), Self::Cuda(s3)) => {
let (s, shape) = c.cuda_fwd(s1, l1, s2, l2, s3, l3)?;
Ok((Self::Cuda(s), shape))
}
(Self::Metal(s1), Self::Metal(s2), Self::Metal(s3)) => {
let (s, shape) = c.metal_fwd(s1, l1, s2, l2, s3, l3)?;
Ok((Self::Metal(s), shape))
}
_ => unreachable!(),
}
}
pub(crate) fn inplace_op1(&mut self, l: &Layout, c: &dyn InplaceOp1) -> Result<()> {
match self {
Self::Cpu(storage) => c.cpu_fwd(storage, l),
Self::Cuda(storage) => c.cuda_fwd(storage, l),
Self::Metal(storage) => c.metal_fwd(storage, l),
}
}
pub(crate) fn inplace_op2(
&mut self,
l1: &Layout,
t2: &Self,
l2: &Layout,
c: &dyn InplaceOp2,
) -> Result<()> {
self.same_device(t2, c.name())?;
match (self, t2) {
(Self::Cpu(s1), Self::Cpu(s2)) => c.cpu_fwd(s1, l1, s2, l2),
(Self::Cuda(s1), Self::Cuda(s2)) => c.cuda_fwd(s1, l1, s2, l2),
(Self::Metal(s1), Self::Metal(s2)) => c.metal_fwd(s1, l1, s2, l2),
_ => unreachable!(),
}
}
pub(crate) fn inplace_op3(
&mut self,
l1: &Layout,
t2: &Self,
l2: &Layout,
t3: &Self,
l3: &Layout,
c: &dyn InplaceOp3,
) -> Result<()> {
self.same_device(t2, c.name())?;
self.same_device(t3, c.name())?;
match (self, t2, t3) {
(Self::Cpu(s1), Self::Cpu(s2), Self::Cpu(s3)) => c.cpu_fwd(s1, l1, s2, l2, s3, l3),
(Self::Cuda(s1), Self::Cuda(s2), Self::Cuda(s3)) => c.cuda_fwd(s1, l1, s2, l2, s3, l3),
(Self::Metal(s1), Self::Metal(s2), Self::Metal(s3)) => {
c.metal_fwd(s1, l1, s2, l2, s3, l3)
}
_ => unreachable!(),
}
}
pub(crate) fn unary_impl<B: op::UnaryOpT>(&self, layout: &Layout) -> Result<Self> {
match self {
Storage::Cpu(storage) => {
let storage = storage.unary_impl::<B>(layout)?;
Ok(Self::Cpu(storage))
}
Self::Cuda(storage) => {
let storage = storage.unary_impl::<B>(layout)?;
Ok(Self::Cuda(storage))
}
Self::Metal(storage) => {
let storage = storage.unary_impl::<B>(layout)?;
Ok(Self::Metal(storage))
}
}
}
pub(crate) fn binary_impl<B: op::BinaryOpT>(
&self,
rhs: &Self,
lhs_layout: &Layout,
rhs_layout: &Layout,
) -> Result<Self> {
self.same_device(rhs, B::NAME)?;
self.same_dtype(rhs, B::NAME)?;
match (self, rhs) {
(Storage::Cpu(lhs), Storage::Cpu(rhs)) => {
let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?;
Ok(Self::Cpu(storage))
}
(Self::Cuda(lhs), Self::Cuda(rhs)) => {
let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?;
Ok(Self::Cuda(storage))
}
(Self::Metal(lhs), Self::Metal(rhs)) => {
let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?;
Ok(Self::Metal(storage))
}
(lhs, rhs) => {
// Should not happen because of the same device check above but we're defensive
// anyway.
Err(Error::DeviceMismatchBinaryOp {
lhs: lhs.device().location(),
rhs: rhs.device().location(),
op: B::NAME,
}
.bt())
}
}
}
pub(crate) fn conv1d(
&self,
l: &Layout,
kernel: &Self,
kernel_l: &Layout,
params: &crate::conv::ParamsConv1D,
) -> Result<Self> {
self.same_device(kernel, "conv1d")?;
self.same_dtype(kernel, "conv1d")?;
match (self, &kernel) {
(Storage::Cpu(inp), Storage::Cpu(kernel)) => {
let s = inp.conv1d(l, kernel, kernel_l, params)?;
Ok(Self::Cpu(s))
}
(Storage::Cuda(inp), Storage::Cuda(kernel)) => {
let s = inp.conv1d(l, kernel, kernel_l, params)?;
Ok(Self::Cuda(s))
}
(Storage::Metal(inp), Storage::Metal(kernel)) => {
let s = inp.conv1d(l, kernel, kernel_l, params)?;
Ok(Self::Metal(s))
}
(lhs, rhs) => Err(Error::DeviceMismatchBinaryOp {
lhs: lhs.device().location(),
rhs: rhs.device().location(),
op: "conv1d",
}
.bt()),
}
}
pub(crate) fn conv_transpose1d(
&self,
l: &Layout,
kernel: &Self,
kernel_l: &Layout,
params: &crate::conv::ParamsConvTranspose1D,
) -> Result<Self> {
self.same_device(kernel, "conv-transpose1d")?;
self.same_dtype(kernel, "conv-transpose1d")?;
match (self, &kernel) {
(Storage::Cpu(inp), Storage::Cpu(kernel)) => {
let s = inp.conv_transpose1d(l, kernel, kernel_l, params)?;
Ok(Self::Cpu(s))
}
(Storage::Cuda(inp), Storage::Cuda(kernel)) => {
let s = inp.conv_transpose1d(l, kernel, kernel_l, params)?;
Ok(Self::Cuda(s))
}
(Storage::Metal(inp), Storage::Metal(kernel)) => {
let s = inp.conv_transpose1d(l, kernel, kernel_l, params)?;
Ok(Self::Metal(s))
}
(lhs, rhs) => Err(Error::DeviceMismatchBinaryOp {
lhs: lhs.device().location(),
rhs: rhs.device().location(),
op: "conv-transpose1d",
}
.bt()),
}
}
pub(crate) fn conv2d(
&self,
l: &Layout,
kernel: &Self,
kernel_l: &Layout,
params: &crate::conv::ParamsConv2D,
) -> Result<Self> {
self.same_device(kernel, "conv2d")?;
self.same_dtype(kernel, "conv2d")?;
match (self, &kernel) {
(Storage::Cpu(inp), Storage::Cpu(kernel)) => {
let s = inp.conv2d(l, kernel, kernel_l, params)?;
Ok(Self::Cpu(s))
}
(Storage::Cuda(inp), Storage::Cuda(kernel)) => {
let s = inp.conv2d(l, kernel, kernel_l, params)?;
Ok(Self::Cuda(s))
}
(Storage::Metal(inp), Storage::Metal(kernel)) => {
let s = inp.conv2d(l, kernel, kernel_l, params)?;
Ok(Self::Metal(s))
}
(lhs, rhs) => Err(Error::DeviceMismatchBinaryOp {
lhs: lhs.device().location(),
rhs: rhs.device().location(),
op: "conv2d",
}
.bt()),
}
}
pub(crate) fn conv_transpose2d(
&self,
l: &Layout,
kernel: &Self,
kernel_l: &Layout,
params: &crate::conv::ParamsConvTranspose2D,
) -> Result<Self> {
self.same_device(kernel, "conv_transpose2d")?;
self.same_dtype(kernel, "conv_transpose2d")?;
match (self, &kernel) {
(Storage::Cpu(inp), Storage::Cpu(kernel)) => {
let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?;
Ok(Self::Cpu(s))
}
(Storage::Cuda(inp), Storage::Cuda(kernel)) => {
let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?;
Ok(Self::Cuda(s))
}
(Storage::Metal(inp), Storage::Metal(kernel)) => {
let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?;
Ok(Self::Metal(s))
}
(lhs, rhs) => Err(Error::DeviceMismatchBinaryOp {
lhs: lhs.device().location(),
rhs: rhs.device().location(),
op: "conv_transpose2d",
}
.bt()),
}
}
pub(crate) fn avg_pool2d(
&self,
layout: &Layout,
kernel_size: (usize, usize),
stride: (usize, usize),
) -> Result<Self> {
match self {
Storage::Cpu(storage) => {
let storage = storage.avg_pool2d(layout, kernel_size, stride)?;
Ok(Self::Cpu(storage))
}
Self::Cuda(storage) => {
let storage = storage.avg_pool2d(layout, kernel_size, stride)?;
Ok(Self::Cuda(storage))
}
Self::Metal(storage) => {
let storage = storage.avg_pool2d(layout, kernel_size, stride)?;
Ok(Self::Metal(storage))
}
}
}
pub(crate) fn max_pool2d(
&self,
layout: &Layout,
kernel_size: (usize, usize),
stride: (usize, usize),
) -> Result<Self> {
match self {
Storage::Cpu(storage) => {
let storage = storage.max_pool2d(layout, kernel_size, stride)?;
Ok(Self::Cpu(storage))
}
Self::Cuda(storage) => {
let storage = storage.max_pool2d(layout, kernel_size, stride)?;
Ok(Self::Cuda(storage))
}
Self::Metal(storage) => {
let storage = storage.max_pool2d(layout, kernel_size, stride)?;
Ok(Self::Metal(storage))
}
}
}
pub(crate) fn upsample_nearest1d(&self, layout: &Layout, sz: usize) -> Result<Self> {
match self {
Storage::Cpu(storage) => {
let storage = storage.upsample_nearest1d(layout, sz)?;
Ok(Self::Cpu(storage))
}
Self::Cuda(storage) => {
let storage = storage.upsample_nearest1d(layout, sz)?;
Ok(Self::Cuda(storage))
}
Self::Metal(storage) => {
let storage = storage.upsample_nearest1d(layout, sz)?;
Ok(Self::Metal(storage))
}
}
}
pub(crate) fn upsample_nearest2d(&self, layout: &Layout, h: usize, w: usize) -> Result<Self> {
match self {
Storage::Cpu(storage) => {
let storage = storage.upsample_nearest2d(layout, h, w)?;
Ok(Self::Cpu(storage))
}
Self::Cuda(storage) => {
let storage = storage.upsample_nearest2d(layout, h, w)?;
Ok(Self::Cuda(storage))
}
Self::Metal(storage) => {
let storage = storage.upsample_nearest2d(layout, h, w)?;
Ok(Self::Metal(storage))
}
}
}
pub(crate) fn where_cond(
&self,
layout: &Layout,
t: &Self,
layout_t: &Layout,
f: &Self,
layout_f: &Layout,
) -> Result<Self> {
self.same_device(t, "where")?;
self.same_device(f, "where")?;
t.same_dtype(f, "where")?;
match (self, t, f) {
(Storage::Cpu(cond), Storage::Cpu(t), Storage::Cpu(f)) => {
let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?;
Ok(Self::Cpu(storage))
}
(Self::Cuda(cond), Self::Cuda(t), Self::Cuda(f)) => {
let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?;
Ok(Self::Cuda(storage))
}
(Self::Metal(cond), Self::Metal(t), Self::Metal(f)) => {
let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?;
Ok(Self::Metal(storage))
}
(_, lhs, rhs) => Err(Error::DeviceMismatchBinaryOp {
lhs: lhs.device().location(),
rhs: rhs.device().location(),
op: "where",
}
.bt()),
}
}
pub(crate) fn gather(
&self,
l: &Layout,
indexes: &Self,
indexes_l: &Layout,
d: usize,
) -> Result<Self> {
self.same_device(indexes, "index-add")?;
match (self, indexes) {
(Self::Cpu(s), Self::Cpu(indexes)) => {
let storage = s.gather(l, indexes, indexes_l, d)?;
Ok(Self::Cpu(storage))
}
(Self::Cuda(s), Self::Cuda(indexes)) => {
let storage = s.gather(l, indexes, indexes_l, d)?;
Ok(Self::Cuda(storage))
}
(Self::Metal(s), Self::Metal(indexes)) => {
let storage = s.gather(l, indexes, indexes_l, d)?;
Ok(Self::Metal(storage))
}
_ => unreachable!(),
}
}
pub(crate) fn scatter_add(
&self,
l: &Layout,
indexes: &Self,
indexes_l: &Layout,
source: &Self,
source_l: &Layout,
d: usize,
) -> Result<Self> {
self.same_device(indexes, "scatter-add")?;
self.same_device(source, "scatter-add")?;
match (self, indexes, source) {
(Self::Cpu(s), Self::Cpu(indexes), Self::Cpu(source)) => {
let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?;
Ok(Self::Cpu(storage))
}
(Self::Cuda(s), Self::Cuda(indexes), Self::Cuda(source)) => {
let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?;
Ok(Self::Cuda(storage))
}
(Self::Metal(s), Self::Metal(indexes), Self::Metal(source)) => {
let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?;
Ok(Self::Metal(storage))
}
_ => unreachable!(),
}
}
pub(crate) fn index_add(
&self,
l: &Layout,
indexes: &Self,
indexes_l: &Layout,
source: &Self,
source_l: &Layout,
d: usize,
) -> Result<Self> {
self.same_device(indexes, "index-add")?;
self.same_device(source, "index-add")?;
match (self, indexes, source) {
(Self::Cpu(s), Self::Cpu(indexes), Self::Cpu(source)) => {
let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?;
Ok(Self::Cpu(storage))
}
(Self::Cuda(s), Self::Cuda(indexes), Self::Cuda(source)) => {
let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?;
Ok(Self::Cuda(storage))
}
(Self::Metal(s), Self::Metal(indexes), Self::Metal(source)) => {
let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?;
Ok(Self::Metal(storage))
}
_ => unreachable!(),
}
}
pub(crate) fn index_select(
&self,
rhs: &Self,
lhs_l: &Layout,
rhs_l: &Layout,
d: usize,
) -> Result<Self> {
self.same_device(rhs, "index-select")?;
match (self, rhs) {
(Self::Cpu(lhs), Self::Cpu(rhs)) => {
let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?;
Ok(Self::Cpu(storage))
}
(Self::Cuda(lhs), Self::Cuda(rhs)) => {
let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?;
Ok(Self::Cuda(storage))
}
(Self::Metal(lhs), Self::Metal(rhs)) => {
let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?;
Ok(Self::Metal(storage))
}
(lhs, rhs) => Err(Error::DeviceMismatchBinaryOp {
lhs: lhs.device().location(),
rhs: rhs.device().location(),
op: "index-select",
}
.bt()),
}
}
pub(crate) fn matmul(
&self,
rhs: &Self,
bmnk: (usize, usize, usize, usize),
lhs_layout: &Layout,
rhs_layout: &Layout,
) -> Result<Self> {
self.same_device(rhs, "matmul")?;
self.same_dtype(rhs, "matmul")?;
match (self, rhs) {
(Self::Cpu(lhs), Self::Cpu(rhs)) => {
let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?;
Ok(Self::Cpu(storage))
}
(Self::Cuda(lhs), Self::Cuda(rhs)) => {
let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?;
Ok(Self::Cuda(storage))
}
(Self::Metal(lhs), Self::Metal(rhs)) => {
let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?;
Ok(Self::Metal(storage))
}
(lhs, rhs) => Err(Error::DeviceMismatchBinaryOp {
lhs: lhs.device().location(),
rhs: rhs.device().location(),
op: "matmul",
}
.bt()),
}
}
// self, the source can be strided whereas dst is contiguous.
pub(crate) fn copy_strided_src(
&self,
dst: &mut Self,
dst_offset: usize,
src_l: &Layout,
) -> Result<()> {
match (self, dst) {
(Self::Cpu(src), Self::Cpu(dst)) => src.copy_strided_src(dst, dst_offset, src_l),
(Self::Cuda(src), Self::Cuda(dst)) => Ok(src.copy_strided_src(dst, dst_offset, src_l)?),
(Self::Metal(src), Self::Metal(dst)) => {
Ok(src.copy_strided_src(dst, dst_offset, src_l)?)
}
(lhs, rhs) => Err(Error::DeviceMismatchBinaryOp {
lhs: lhs.device().location(),
rhs: rhs.device().location(),
op: "copy",
}
.bt()),
}
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn copy2d(
&self,
dst: &mut Self,
d1: usize,
d2: usize,
src_s: usize,
dst_s: usize,
src_o: usize,
dst_o: usize,
) -> Result<()> {
match (self, dst) {
(Self::Cpu(src), Self::Cpu(dst)) => src.copy2d(dst, d1, d2, src_s, dst_s, src_o, dst_o),
(Self::Cuda(src), Self::Cuda(dst)) => {
Ok(src.copy2d(dst, d1, d2, src_s, dst_s, src_o, dst_o)?)
}
(Self::Metal(src), Self::Metal(dst)) => {
Ok(src.copy2d(dst, d1, d2, src_s, dst_s, src_o, dst_o)?)
}
(lhs, rhs) => Err(Error::DeviceMismatchBinaryOp {
lhs: lhs.device().location(),
rhs: rhs.device().location(),
op: "copy2d",
}
.bt()),
}
}
}
| 3 |
0 | hf_public_repos/candle/candle-core | hf_public_repos/candle/candle-core/src/accelerate.rs | #![allow(dead_code)]
use libc::{c_char, c_double, c_float, c_int, c_long, c_ulong};
mod ffi {
use super::*;
extern "C" {
// It would be nice to be able to switch to the NEWLAPACK version of the function but this
// seems to trigger some link error. Available function names can be seen here:
// /Library/Developer/CommandLineTools/SDKs/MacOSX13.3.sdk/System/Library/Frameworks/Accelerate.framework/Versions/A/Accelerate.tbd
#[link_name = "sgemm_"]
pub fn sgemm_ffi(
transa: *const c_char,
transb: *const c_char,
m: *const c_int,
n: *const c_int,
k: *const c_int,
alpha: *const c_float,
a: *const c_float,
lda: *const c_int,
b: *const c_float,
ldb: *const c_int,
beta: *const c_float,
c: *mut c_float,
ldc: *const c_int,
);
#[link_name = "dgemm_"]
pub fn dgemm_ffi(
transa: *const c_char,
transb: *const c_char,
m: *const c_int,
n: *const c_int,
k: *const c_int,
alpha: *const c_double,
a: *const c_double,
lda: *const c_int,
b: *const c_double,
ldb: *const c_int,
beta: *const c_double,
c: *mut c_double,
ldc: *const c_int,
);
pub fn vvexpf(dst: *mut c_float, src: *const c_float, len: *const c_int);
pub fn vvexp(dst: *mut c_double, src: *const c_double, len: *const c_int);
pub fn vvsqrtf(dst: *mut c_float, src: *const c_float, len: *const c_int);
pub fn vvsqrt(dst: *mut c_double, src: *const c_double, len: *const c_int);
pub fn vvsinf(dst: *mut c_float, src: *const c_float, len: *const c_int);
pub fn vvsin(dst: *mut c_double, src: *const c_double, len: *const c_int);
pub fn vvcosf(dst: *mut c_float, src: *const c_float, len: *const c_int);
pub fn vvcos(dst: *mut c_double, src: *const c_double, len: *const c_int);
pub fn vvlogf(dst: *mut c_float, src: *const c_float, len: *const c_int);
pub fn vvlog(dst: *mut c_double, src: *const c_double, len: *const c_int);
pub fn vvtanhf(dst: *mut c_float, src: *const c_float, len: *const c_int);
pub fn vvtanh(dst: *mut c_double, src: *const c_double, len: *const c_int);
pub fn vDSP_vaddD(
_: *const c_double,
_: c_long,
_: *const c_double,
_: c_long,
_: *mut c_double,
_: c_long,
_: c_ulong,
);
pub fn vDSP_vadd(
_: *const c_float,
_: c_long,
_: *const c_float,
_: c_long,
_: *mut c_float,
_: c_long,
_: c_ulong,
);
pub fn vDSP_vsubD(
_: *const c_double,
_: c_long,
_: *const c_double,
_: c_long,
_: *mut c_double,
_: c_long,
_: c_ulong,
);
pub fn vDSP_vsub(
_: *const c_float,
_: c_long,
_: *const c_float,
_: c_long,
_: *mut c_float,
_: c_long,
_: c_ulong,
);
pub fn vDSP_vmulD(
_: *const c_double,
_: c_long,
_: *const c_double,
_: c_long,
_: *mut c_double,
_: c_long,
_: c_ulong,
);
pub fn vDSP_vmul(
_: *const c_float,
_: c_long,
_: *const c_float,
_: c_long,
_: *mut c_float,
_: c_long,
_: c_ulong,
);
pub fn vDSP_vdivD(
_: *const c_double,
_: c_long,
_: *const c_double,
_: c_long,
_: *mut c_double,
_: c_long,
_: c_ulong,
);
pub fn vDSP_vdiv(
_: *const c_float,
_: c_long,
_: *const c_float,
_: c_long,
_: *mut c_float,
_: c_long,
_: c_ulong,
);
pub fn vDSP_vminD(
_: *const c_double,
_: c_long,
_: *const c_double,
_: c_long,
_: *mut c_double,
_: c_long,
_: c_ulong,
);
pub fn vDSP_vmin(
_: *const c_float,
_: c_long,
_: *const c_float,
_: c_long,
_: *mut c_float,
_: c_long,
_: c_ulong,
);
pub fn vDSP_vmaxD(
_: *const c_double,
_: c_long,
_: *const c_double,
_: c_long,
_: *mut c_double,
_: c_long,
_: c_ulong,
);
pub fn vDSP_vmax(
_: *const c_float,
_: c_long,
_: *const c_float,
_: c_long,
_: *mut c_float,
_: c_long,
_: c_ulong,
);
}
}
#[allow(clippy::too_many_arguments)]
#[inline]
pub unsafe fn sgemm(
transa: u8,
transb: u8,
m: i32,
n: i32,
k: i32,
alpha: f32,
a: &[f32],
lda: i32,
b: &[f32],
ldb: i32,
beta: f32,
c: &mut [f32],
ldc: i32,
) {
ffi::sgemm_ffi(
&(transa as c_char),
&(transb as c_char),
&m,
&n,
&k,
&alpha,
a.as_ptr(),
&lda,
b.as_ptr(),
&ldb,
&beta,
c.as_mut_ptr(),
&ldc,
)
}
#[allow(clippy::too_many_arguments)]
#[inline]
pub unsafe fn dgemm(
transa: u8,
transb: u8,
m: i32,
n: i32,
k: i32,
alpha: f64,
a: &[f64],
lda: i32,
b: &[f64],
ldb: i32,
beta: f64,
c: &mut [f64],
ldc: i32,
) {
ffi::dgemm_ffi(
&(transa as c_char),
&(transb as c_char),
&m,
&n,
&k,
&alpha,
a.as_ptr(),
&lda,
b.as_ptr(),
&ldb,
&beta,
c.as_mut_ptr(),
&ldc,
)
}
#[inline]
pub fn vs_exp(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vvexpf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) }
}
#[inline]
pub fn vd_exp(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vvexp(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) }
}
#[inline]
pub fn vs_sqrt(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vvsqrtf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) }
}
#[inline]
pub fn vd_sqrt(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vvsqrt(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) }
}
#[inline]
pub fn vs_sin(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vvsinf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) }
}
#[inline]
pub fn vd_sin(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vvsin(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) }
}
#[inline]
pub fn vs_cos(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vvcosf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) }
}
#[inline]
pub fn vd_cos(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vvcos(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) }
}
#[inline]
pub fn vs_tanh(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vvtanhf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) }
}
#[inline]
pub fn vd_tanh(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vvtanh(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) }
}
#[inline]
pub fn vs_ln(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vvlogf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) }
}
#[inline]
pub fn vd_ln(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
unsafe { ffi::vvlog(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) }
}
#[inline]
pub fn vs_sqr(a: &[f32], y: &mut [f32]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
y.iter_mut().zip(a.iter()).for_each(|(y, a)| *y = *a * *a)
}
#[inline]
pub fn vd_sqr(a: &[f64], y: &mut [f64]) {
let a_len = a.len();
let y_len = y.len();
if a_len != y_len {
panic!("a and y have different lengths {a_len} <> {y_len}")
}
y.iter_mut().zip(a.iter()).for_each(|(y, a)| *y = *a * *a)
}
#[inline]
pub fn vs_tanh_inplace(y: &mut [f32]) {
unsafe { ffi::vvtanhf(y.as_mut_ptr(), y.as_ptr(), &(y.len() as i32)) }
}
#[inline]
pub fn vd_tanh_inplace(y: &mut [f64]) {
unsafe { ffi::vvtanh(y.as_mut_ptr(), y.as_ptr(), &(y.len() as i32)) }
}
#[inline]
pub fn vs_exp_inplace(y: &mut [f32]) {
unsafe { ffi::vvexpf(y.as_mut_ptr(), y.as_ptr(), &(y.len() as i32)) }
}
#[inline]
pub fn vd_exp_inplace(y: &mut [f64]) {
unsafe { ffi::vvexp(y.as_mut_ptr(), y.as_ptr(), &(y.len() as i32)) }
}
#[inline]
pub fn vs_gelu(vs: &[f32], ys: &mut [f32]) {
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = (2.0f32 / std::f32::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v)
}
vs_tanh_inplace(ys);
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = 0.5 * v * (1.0 + *y)
}
}
#[inline]
pub fn vd_gelu(vs: &[f64], ys: &mut [f64]) {
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = (2.0f64 / std::f64::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v)
}
vd_tanh_inplace(ys);
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = 0.5 * v * (1.0 + *y)
}
}
#[inline]
pub fn vs_silu(vs: &[f32], ys: &mut [f32]) {
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = -v
}
vs_exp_inplace(ys);
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = v / (1.0 + *y)
}
}
#[inline]
pub fn vd_silu(vs: &[f64], ys: &mut [f64]) {
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = -v
}
vd_exp_inplace(ys);
for (&v, y) in vs.iter().zip(ys.iter_mut()) {
*y = v / (1.0 + *y)
}
}
macro_rules! binary_op {
($fn_name:ident, $ty:ty, $accelerate_name:ident) => {
#[inline]
pub fn $fn_name(a: &[$ty], b: &[$ty], y: &mut [$ty]) {
let a_len = a.len();
let b_len = b.len();
let y_len = y.len();
if a_len != y_len || b_len != y_len {
panic!(
"{} a,b,y len mismatch {a_len} {b_len} {y_len}",
stringify!($fn_name)
);
}
unsafe {
// Weird quirk of accelerate, the rhs comes before the lhs.
ffi::$accelerate_name(
b.as_ptr(),
1,
a.as_ptr(),
1,
y.as_mut_ptr(),
1,
a_len as u64,
)
}
}
};
}
binary_op!(vs_add, f32, vDSP_vadd);
binary_op!(vd_add, f64, vDSP_vaddD);
binary_op!(vs_sub, f32, vDSP_vsub);
binary_op!(vd_sub, f64, vDSP_vsubD);
binary_op!(vs_mul, f32, vDSP_vmul);
binary_op!(vd_mul, f64, vDSP_vmulD);
binary_op!(vs_div, f32, vDSP_vdiv);
binary_op!(vd_div, f64, vDSP_vdivD);
binary_op!(vs_max, f32, vDSP_vmax);
binary_op!(vd_max, f64, vDSP_vmaxD);
binary_op!(vs_min, f32, vDSP_vmin);
binary_op!(vd_min, f64, vDSP_vminD);
| 4 |
0 | hf_public_repos/candle/candle-core | hf_public_repos/candle/candle-core/src/strided_index.rs | use crate::Layout;
/// An iterator over offset position for items of an N-dimensional arrays stored in a
/// flat buffer using some potential strides.
#[derive(Debug)]
pub struct StridedIndex<'a> {
next_storage_index: Option<usize>,
multi_index: Vec<usize>,
dims: &'a [usize],
stride: &'a [usize],
}
impl<'a> StridedIndex<'a> {
pub(crate) fn new(dims: &'a [usize], stride: &'a [usize], start_offset: usize) -> Self {
let elem_count: usize = dims.iter().product();
let next_storage_index = if elem_count == 0 {
None
} else {
// This applies to the scalar case.
Some(start_offset)
};
StridedIndex {
next_storage_index,
multi_index: vec![0; dims.len()],
dims,
stride,
}
}
pub(crate) fn from_layout(l: &'a Layout) -> Self {
Self::new(l.dims(), l.stride(), l.start_offset())
}
}
impl Iterator for StridedIndex<'_> {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
let storage_index = match self.next_storage_index {
None => return None,
Some(storage_index) => storage_index,
};
let mut updated = false;
let mut next_storage_index = storage_index;
for ((multi_i, max_i), stride_i) in self
.multi_index
.iter_mut()
.zip(self.dims.iter())
.zip(self.stride.iter())
.rev()
{
let next_i = *multi_i + 1;
if next_i < *max_i {
*multi_i = next_i;
updated = true;
next_storage_index += stride_i;
break;
} else {
next_storage_index -= *multi_i * stride_i;
*multi_i = 0
}
}
self.next_storage_index = if updated {
Some(next_storage_index)
} else {
None
};
Some(storage_index)
}
}
#[derive(Debug)]
pub enum StridedBlocks<'a> {
SingleBlock {
start_offset: usize,
len: usize,
},
MultipleBlocks {
block_start_index: StridedIndex<'a>,
block_len: usize,
},
}
| 5 |
0 | hf_public_repos/candle/candle-core | hf_public_repos/candle/candle-core/src/tensor.rs | //! Tensors are N-dimensional matrixes of elements using a single data type.
#![allow(clippy::redundant_closure_call)]
use crate::backend::{BackendDevice, BackendStorage};
use crate::op::{BackpropOp, BinaryOp, CmpOp, Op, ReduceOp, UnaryOp};
use crate::scalar::TensorOrScalar;
use crate::shape::{Dim, Dims};
use crate::{bail, storage::Storage, DType, Device, Error, Layout, Result, Shape};
use std::sync::{Arc, RwLock};
/// Unique identifier for tensors.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct TensorId(usize);
impl TensorId {
fn new() -> Self {
// https://users.rust-lang.org/t/idiomatic-rust-way-to-generate-unique-id/33805
use std::sync::atomic;
static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(1);
Self(COUNTER.fetch_add(1, atomic::Ordering::Relaxed))
}
}
pub struct Tensor_ {
id: TensorId,
// As we provide inner mutability on the tensor content, the alternatives are:
// - Using a mutex, this would have the highest cost when retrieving the storage but would
// prevent errors when concurrent access takes place. Mutex would also be subject to
// deadlocks for example using the current code if the same tensor is used twice by a single
// binary op.
// - Using a refcell unsafe cell would have some intermediary cost, borrow checking would be
// verified dynamically, but the resulting tensors would not be send or sync.
// - Using an unsafe cell would have the lowest cost but undefined behavior on concurrent
// accesses.
// Ideally, we would use Arc<Storage> for tensors on which we don't plan on modifying the data
// and Arc<Mutex<Storage>> for tensors where the data could be modified, e.g. variables but
// that's tricky to encode in the current setup.
storage: Arc<RwLock<Storage>>,
layout: Layout,
op: BackpropOp,
is_variable: bool,
dtype: DType,
device: Device,
}
impl AsRef<Tensor> for Tensor {
fn as_ref(&self) -> &Tensor {
self
}
}
// Tensors are refcounted so that cloning is cheap when building the op graph.
// Storages are also refcounted independently so that its possible to avoid
// copying the storage for operations that only modify the shape or stride.
#[derive(Clone)]
/// The core struct for manipulating tensors.
///
/// ```rust
/// use candle_core::{Tensor, DType, Device};
///
/// let a = Tensor::arange(0f32, 6f32, &Device::Cpu)?.reshape((2, 3))?;
/// let b = Tensor::arange(0f32, 12f32, &Device::Cpu)?.reshape((3, 4))?;
///
/// let c = a.matmul(&b)?;
/// # Ok::<(), candle_core::Error>(())
/// ```
///
/// Tensors are reference counted with [`Arc`] so cloning them is cheap.
pub struct Tensor(Arc<Tensor_>);
impl std::ops::Deref for Tensor {
type Target = Tensor_;
fn deref(&self) -> &Self::Target {
self.0.as_ref()
}
}
macro_rules! unary_op {
($fn_name:ident, $op_name:ident) => {
pub fn $fn_name(&self) -> Result<Self> {
let shape = self.shape();
if shape.elem_count() == 0 {
return Ok(self.clone());
}
let storage = self
.storage()
.unary_impl::<crate::op::$op_name>(self.layout())?;
let op = BackpropOp::new1(self, |s| Op::Unary(s, UnaryOp::$op_name));
Ok(from_storage(storage, shape.clone(), op, false))
}
};
}
macro_rules! binary_op {
($fn_name:ident, $op_name:ident) => {
pub fn $fn_name(&self, rhs: &Self) -> Result<Self> {
let shape = self.same_shape_binary_op(rhs, stringify!($fn_name))?;
if shape.elem_count() == 0 {
return Ok(self.clone());
}
let storage = self.storage().binary_impl::<crate::op::$op_name>(
&*rhs.storage(),
self.layout(),
rhs.layout(),
)?;
let op = BackpropOp::new2(self, rhs, |t1, t2| Op::Binary(t1, t2, BinaryOp::$op_name));
Ok(from_storage(storage, shape.clone(), op, false))
}
};
}
macro_rules! binary_op_scalar {
($fn_name:ident, $op_name:ident) => {
pub fn $fn_name<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> {
let rhs = match rhs.to_tensor_scalar()? {
crate::scalar::TensorScalar::Tensor(rhs) => rhs,
crate::scalar::TensorScalar::Scalar(rhs) => rhs
.to_dtype(self.dtype())?
.to_device(self.device())?
.broadcast_as(self.shape())?,
};
let shape = self.same_shape_binary_op(&rhs, stringify!($fn_name))?;
if self.elem_count() == 0 {
return Ok(self.clone());
}
let storage = self.storage().binary_impl::<crate::op::$op_name>(
&*rhs.storage(),
self.layout(),
rhs.layout(),
)?;
let op = BackpropOp::new2(self, &rhs, |t1, t2| Op::Binary(t1, t2, BinaryOp::$op_name));
Ok(from_storage(storage, shape.clone(), op, false))
}
};
}
macro_rules! broadcast_binary_op {
($fn_name:ident, $inner_fn_name:ident) => {
pub fn $fn_name(&self, rhs: &Self) -> Result<Self> {
let lhs = self;
let shape = lhs
.shape()
.broadcast_shape_binary_op(rhs.shape(), stringify!($fn_name))?;
let l_broadcast = shape != *lhs.shape();
let r_broadcast = shape != *rhs.shape();
match (l_broadcast, r_broadcast) {
(true, true) => lhs
.broadcast_as(&shape)?
.$inner_fn_name(&rhs.broadcast_as(&shape)?),
(false, true) => lhs.$inner_fn_name(&rhs.broadcast_as(&shape)?),
(true, false) => lhs.broadcast_as(&shape)?.$inner_fn_name(rhs),
(false, false) => lhs.$inner_fn_name(rhs),
}
}
};
}
/// Creates a fresh tensor structure based on a storage and a shape, this uses contiguous strides.
pub(crate) fn from_storage<S: Into<Shape>>(
storage: Storage,
shape: S,
op: BackpropOp,
is_variable: bool,
) -> Tensor {
let dtype = storage.dtype();
let device = storage.device();
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: Arc::new(RwLock::new(storage)),
layout: Layout::contiguous(shape),
op,
is_variable,
dtype,
device,
};
Tensor(Arc::new(tensor_))
}
impl Tensor {
pub(crate) fn ones_impl<S: Into<Shape>>(
shape: S,
dtype: DType,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let none = BackpropOp::none();
let shape = shape.into();
let storage = device.ones(&shape, dtype)?;
Ok(from_storage(storage, shape, none, is_variable))
}
/// Creates a new tensor filled with ones.
///
/// ```rust
/// use candle_core::{Tensor, DType, Device};
/// let a = Tensor::ones((2, 3), DType::F32, &Device::Cpu)?;
/// let b = Tensor::from_slice(&[1.0f32, 1.0, 1.0, 1.0, 1.0, 1.0], (2, 3), &Device::Cpu)?;
/// // a == b
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn ones<S: Into<Shape>>(shape: S, dtype: DType, device: &Device) -> Result<Self> {
Self::ones_impl(shape, dtype, device, false)
}
/// Creates a new tensor filled with ones with same shape, dtype, and device as the other tensor.
///
/// ```rust
/// use candle_core::{Tensor, DType, Device};
/// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
/// let b = a.ones_like()?;
/// // b == a + 1
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn ones_like(&self) -> Result<Self> {
Tensor::ones(self.shape(), self.dtype(), self.device())
}
// Do not expose outside of the crate, the `is_variable=true` case should only be accessed from
// the variable module.
pub(crate) fn zeros_impl<S: Into<Shape>>(
shape: S,
dtype: DType,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let none = BackpropOp::none();
let shape = shape.into();
let storage = device.zeros(&shape, dtype)?;
Ok(from_storage(storage, shape, none, is_variable))
}
/// Creates a new tensor filled with zeros.
///
/// ```rust
/// use candle_core::{Tensor, DType, Device};
/// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
/// let b = Tensor::from_slice(&[0.0f32, 0.0, 0.0, 0.0, 0.0, 0.0], (2, 3), &Device::Cpu)?;
/// // a == b
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn zeros<S: Into<Shape>>(shape: S, dtype: DType, device: &Device) -> Result<Self> {
Self::zeros_impl(shape, dtype, device, false)
}
/// Creates a new tensor filled with zeros with same shape, dtype, and device as the other
/// tensor.
///
/// ```rust
/// use candle_core::{Tensor, DType, Device};
/// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
/// let b = a.zeros_like()?;
/// // b is on CPU f32.
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn zeros_like(&self) -> Result<Self> {
Tensor::zeros(self.shape(), self.dtype(), self.device())
}
pub(crate) fn rand_impl<S: Into<Shape>, T: crate::FloatDType>(
lo: T,
up: T,
s: S,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let s = s.into();
let storage = device.rand_uniform(lo, up, &s)?;
let none = BackpropOp::none();
Ok(from_storage(storage, s, none, is_variable))
}
pub(crate) fn rand_f64_impl<S: Into<Shape>>(
lo: f64,
up: f64,
s: S,
dtype: DType,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let s = s.into();
let storage = device.rand_uniform_f64(lo, up, &s, dtype)?;
let none = BackpropOp::none();
Ok(from_storage(storage, s, none, is_variable))
}
/// Creates a new tensor initialized with values sampled uniformly between `lo` and `up`.
pub fn rand<S: Into<Shape>, T: crate::FloatDType>(
lo: T,
up: T,
s: S,
device: &Device,
) -> Result<Self> {
Self::rand_impl(lo, up, s, device, false)
}
pub fn rand_like(&self, lo: f64, up: f64) -> Result<Self> {
Tensor::rand_f64_impl(lo, up, self.shape(), self.dtype(), self.device(), false)
}
pub(crate) fn randn_impl<S: Into<Shape>, T: crate::FloatDType>(
mean: T,
std: T,
s: S,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let s = s.into();
let storage = device.rand_normal(mean, std, &s)?;
let none = BackpropOp::none();
Ok(from_storage(storage, s, none, is_variable))
}
pub(crate) fn randn_f64_impl<S: Into<Shape>>(
mean: f64,
std: f64,
s: S,
dtype: DType,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let s = s.into();
let storage = device.rand_normal_f64(mean, std, &s, dtype)?;
let none = BackpropOp::none();
Ok(from_storage(storage, s, none, is_variable))
}
pub fn randn_like(&self, mean: f64, stdev: f64) -> Result<Self> {
Tensor::randn_f64_impl(
mean,
stdev,
self.shape(),
self.dtype(),
self.device(),
false,
)
}
/// Creates a new tensor initialized with values sampled from a normal distribution with the
/// specified `mean` and standard deviation `std`.
pub fn randn<S: Into<Shape>, T: crate::FloatDType>(
mean: T,
std: T,
s: S,
device: &Device,
) -> Result<Self> {
Self::randn_impl(mean, std, s, device, false)
}
pub(crate) fn new_impl<A: crate::device::NdArray>(
array: A,
shape: Shape,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let n: usize = shape.elem_count();
let buffer_size: usize = array.shape()?.elem_count();
if buffer_size != n {
return Err(Error::ShapeMismatch { buffer_size, shape }.bt());
}
let storage = device.storage(array)?;
let none = BackpropOp::none();
Ok(from_storage(storage, shape, none, is_variable))
}
/// Creates a new tensor on the specified device using the content and shape of the input.
pub fn new<A: crate::device::NdArray>(array: A, device: &Device) -> Result<Self> {
let shape = array.shape()?;
Self::new_impl(array, shape, device, false)
}
/// Returns a new tensor with all the elements having the same specified value. Note that
/// the tensor is not contiguous so you would have to call `.contiguous()` on it if needed.
///```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::full(3.5, (2, 4), &Device::Cpu)?;
///
/// assert_eq!(a.to_vec2::<f64>()?, &[
/// [3.5, 3.5, 3.5, 3.5],
/// [3.5, 3.5, 3.5, 3.5],
/// ]);
/// # Ok::<(), candle_core::Error>(())
pub fn full<D: crate::WithDType, S: Into<Shape>>(
value: D,
shape: S,
device: &Device,
) -> Result<Self> {
Self::from_vec_impl(vec![value], (), device, false)?.broadcast_as(shape)
}
/// Creates a new 1D tensor from an iterator.
///```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::from_iter( [1.0, 2.0, 3.0, 4.0].into_iter(), &Device::Cpu)?;
///
/// assert_eq!(a.to_vec1::<f64>()?, &[1.0, 2.0, 3.0, 4.0]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn from_iter<D: crate::WithDType>(
iter: impl IntoIterator<Item = D>,
device: &Device,
) -> Result<Self> {
let data = iter.into_iter().collect::<Vec<_>>();
let len = data.len();
Self::from_vec_impl(data, len, device, false)
}
/// Creates a new 1D tensor with values from the interval `[start, end)` taken with a common
/// difference `1` from `start`.
///```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::arange(2., 5., &Device::Cpu)?;
///
/// assert_eq!(a.to_vec1::<f64>()?, &[2., 3., 4.]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn arange<D: crate::WithDType>(start: D, end: D, device: &Device) -> Result<Self> {
Self::arange_step(start, end, D::one(), device)
}
/// Creates a new 1D tensor with values from the interval `[start, end)` taken with a common
/// difference `step` from `start`.
///```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::arange_step(2.0, 4.0, 0.5, &Device::Cpu)?;
///
/// assert_eq!(a.to_vec1::<f64>()?, &[2.0, 2.5, 3.0, 3.5]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn arange_step<D: crate::WithDType>(
start: D,
end: D,
step: D,
device: &Device,
) -> Result<Self> {
if D::is_zero(&step) {
bail!("step cannot be zero")
}
let mut data = vec![];
let mut current = start;
if step >= D::zero() {
while current < end {
data.push(current);
current += step;
}
} else {
while current > end {
data.push(current);
current += step;
}
}
let len = data.len();
Self::from_vec_impl(data, len, device, false)
}
pub(crate) fn from_vec_impl<S: Into<Shape>, D: crate::WithDType>(
data: Vec<D>,
shape: S,
device: &Device,
is_variable: bool,
) -> Result<Self> {
let shape = shape.into();
let buffer_size = data.len();
if buffer_size != shape.elem_count() {
return Err(Error::ShapeMismatch { buffer_size, shape }.bt());
}
let storage = device.storage_owned(data)?;
let none = BackpropOp::none();
Ok(from_storage(storage, shape, none, is_variable))
}
/// Creates a new tensor initialized with values from the input vector. The number of elements
/// in this vector must be the same as the number of elements defined by the shape.
/// If the device is cpu, no data copy is made.
///```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::from_vec(vec!{1., 2., 3., 4., 5., 6.}, (2, 3), &Device::Cpu)?;
///
/// assert_eq!(a.to_vec2::<f64>()?, &[
/// [1., 2., 3.],
/// [4., 5., 6.]
/// ]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn from_vec<S: Into<Shape>, D: crate::WithDType>(
data: Vec<D>,
shape: S,
device: &Device,
) -> Result<Self> {
Self::from_vec_impl(data, shape, device, false)
}
/// Creates a new tensor initialized with values from the input slice. The number of elements
/// in this vector must be the same as the number of elements defined by the shape.
///```rust
/// use candle_core::{Tensor, Device};
/// let values = vec![1., 2., 3., 4., 5., 6., 7., 8.];
/// let a = Tensor::from_slice(&values[1..7], (2, 3), &Device::Cpu)?;
///
/// assert_eq!(a.to_vec2::<f64>()?, &[
/// [2., 3., 4.],
/// [5., 6., 7.]
/// ]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn from_slice<S: Into<Shape>, D: crate::WithDType>(
array: &[D],
shape: S,
device: &Device,
) -> Result<Self> {
let shape = shape.into();
let n: usize = shape.elem_count();
let buffer_size: usize = array.len();
if buffer_size != n {
return Err(Error::ShapeMismatch { buffer_size, shape }.bt());
}
let storage = device.storage_from_slice(array)?;
let none = BackpropOp::none();
Ok(from_storage(storage, shape, none, false))
}
pub(crate) fn same_shape_binary_op(&self, rhs: &Self, op: &'static str) -> Result<&Shape> {
let lhs = self.shape();
let rhs = rhs.shape();
if lhs != rhs {
Err(Error::ShapeMismatchBinaryOp {
lhs: lhs.clone(),
rhs: rhs.clone(),
op,
}
.bt())
} else {
Ok(lhs)
}
}
/// Returns true if the computation graph should track this op, that is if it is
/// a variable or if it has some variable as dependencies.
pub fn track_op(&self) -> bool {
self.is_variable || self.op.is_some()
}
// TODO: Also make an inplace version or a pre-allocated? This could be tricky
// if this can create cycles in the compute graph.
binary_op!(add, Add);
binary_op!(mul, Mul);
binary_op!(sub, Sub);
binary_op!(div, Div);
binary_op_scalar!(maximum, Maximum);
binary_op_scalar!(minimum, Minimum);
broadcast_binary_op!(broadcast_add, add);
broadcast_binary_op!(broadcast_mul, mul);
broadcast_binary_op!(broadcast_sub, sub);
broadcast_binary_op!(broadcast_div, div);
broadcast_binary_op!(broadcast_maximum, maximum);
broadcast_binary_op!(broadcast_minimum, minimum);
broadcast_binary_op!(broadcast_eq, eq);
broadcast_binary_op!(broadcast_ne, ne);
broadcast_binary_op!(broadcast_lt, lt);
broadcast_binary_op!(broadcast_le, le);
broadcast_binary_op!(broadcast_gt, gt);
broadcast_binary_op!(broadcast_ge, ge);
unary_op!(recip, Recip);
unary_op!(neg, Neg);
unary_op!(exp, Exp);
unary_op!(log, Log);
unary_op!(sin, Sin);
unary_op!(cos, Cos);
unary_op!(tanh, Tanh);
unary_op!(abs, Abs);
unary_op!(sqr, Sqr);
unary_op!(sqrt, Sqrt);
unary_op!(gelu, Gelu);
unary_op!(gelu_erf, GeluErf);
unary_op!(erf, Erf);
unary_op!(relu, Relu);
unary_op!(silu, Silu);
unary_op!(ceil, Ceil);
unary_op!(floor, Floor);
unary_op!(round, Round);
unary_op!(sign, Sign);
/// Round element of the input tensor to the nearest integer.
///
/// If the number of decimals is negative, it specifies the number of positions to the left of
/// the decimal point.
pub fn round_to(&self, decimals: i32) -> Result<Self> {
let mult = 10f64.powi(decimals);
(self * mult)?.round()? * (1f64 / mult)
}
/// Retrieves the single scalar value hold in the tensor. If the tensor contains multiple
/// dimensions, an error is returned instead.
pub fn to_scalar<S: crate::WithDType>(&self) -> Result<S> {
if self.rank() != 0 {
Err(Error::UnexpectedNumberOfDims {
expected: 0,
got: self.rank(),
shape: self.shape().clone(),
}
.bt())?
}
let from_cpu_storage = |cpu_storage: &crate::CpuStorage| {
let data = S::cpu_storage_as_slice(cpu_storage)?;
Ok::<_, Error>(data[self.layout().start_offset()])
};
match &*self.storage() {
Storage::Cpu(cpu_storage) => from_cpu_storage(cpu_storage),
Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
}
}
/// An alias for `to_scalar`.
pub fn to_vec0<S: crate::WithDType>(&self) -> Result<S> {
self.to_scalar::<S>()
}
/// Repeat this tensor along the specified dimensions.
pub fn repeat<S: Into<Shape>>(&self, shape: S) -> Result<Tensor> {
// Similar to PyTorch, we extend the number of dimensions of self if needed.
let repeats = shape.into();
let repeats = repeats.dims();
let mut inp = if self.rank() < repeats.len() {
let shape = [vec![1; repeats.len() - self.rank()], self.dims().to_vec()].concat();
self.reshape(shape)?
} else {
self.clone()
};
for (idx, &repeat) in repeats.iter().enumerate() {
if repeat > 1 {
inp = Tensor::cat(&vec![&inp; repeat], idx)?
}
}
Ok(inp)
}
/// Creates grids of coordinates specified by the 1D inputs.
///
/// # Arguments
///
/// * `args` - A slice of 1D tensors.
/// * `xy_indexing` - Whether to use xy indexing or ij indexing. If xy is selected, the
/// first dimension corresponds to the cardinality of the second input and the second
/// dimension corresponds to the cardinality of the first input. If ij is selected, the
/// dimensions are in the same order as the cardinality of the inputs.
///
/// # Examples
///
/// ```rust
/// use candle_core::{Tensor, Device, Shape};
/// let x = Tensor::new(&[1f32, 2., 3.], &Device::Cpu)?;
/// let y = Tensor::new(&[4f32, 5., 6.], &Device::Cpu)?;
///
/// let grids_xy = Tensor::meshgrid(&[&x, &y], true)?;
///
/// assert_eq!(grids_xy.len(), 2);
/// assert_eq!(grids_xy[0].dims(), &[3, 3]);
///
/// assert_eq!(grids_xy[0].to_vec2::<f32>()?, &[[1., 2., 3.], [1., 2., 3.], [1., 2., 3.]]);
/// assert_eq!(grids_xy[1].to_vec2::<f32>()?, &[[4., 4., 4.], [5., 5., 5.], [6., 6., 6.]]);
///
/// let grids_ij = Tensor::meshgrid(&[&x, &y], false)?;
///
/// assert_eq!(grids_ij[0].to_vec2::<f32>()?, &[[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]]);
/// assert_eq!(grids_ij[1].to_vec2::<f32>()?, &[[4., 5., 6.], [4., 5., 6.], [4., 5., 6.]]);
/// # Ok::<(), candle_core::Error>(())
/// ```
///
/// # Errors
///
/// * Will return `Err` if `args` contains less than 2 tensors.
///
pub fn meshgrid<A: AsRef<Tensor>>(args: &[A], xy_indexing: bool) -> Result<Vec<Self>> {
if args.len() <= 1 {
Err(Error::OpRequiresAtLeastTwoTensors { op: "meshgrid" }.bt())?
}
let args: Vec<_> = if xy_indexing {
args.iter().rev().collect()
} else {
args.iter().collect()
};
let mut shape = Vec::with_capacity(args.len());
for arg in args.iter() {
shape.push(arg.as_ref().dims1()?)
}
let mut grids = Vec::with_capacity(args.len());
for idx in 0..args.len() {
let mut ones = vec![1usize; args.len()];
ones[idx] = shape[idx];
let arg = args[idx].as_ref().reshape(ones)?;
let mut repeats = shape.clone();
repeats[idx] = 1;
let repeated_tensor = arg.repeat(repeats)?;
grids.push(repeated_tensor);
}
if xy_indexing {
grids.reverse();
}
Ok(grids)
}
/// This operation multiplies the input tensor by `mul` then adds `add` and return the result.
/// The input values `mul` and `add` are casted to the appropriate type so some rounding might
/// be performed.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?;
/// let a = a.affine(4., -2.)?;
/// assert_eq!(a.to_vec2::<f32>()?, &[[-2.0, 2.0], [6.0, 10.0]]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn affine(&self, mul: f64, add: f64) -> Result<Self> {
if self.elem_count() == 0 {
return Ok(self.clone());
}
let storage = self.storage().affine(self.layout(), mul, add)?;
let op = BackpropOp::new1(self, |arg| Op::Affine { arg, mul, add });
Ok(from_storage(storage, self.shape(), op, false))
}
/// Applies the Exponential Linear Unit (ELU) function on each element of the input tensor.
pub fn elu(&self, alpha: f64) -> Result<Self> {
if self.elem_count() == 0 {
return Ok(self.clone());
}
let storage = self.storage().elu(self.layout(), alpha)?;
let op = BackpropOp::new1(self, |t| Op::Elu(t, alpha));
Ok(from_storage(storage, self.shape(), op, false))
}
/// Raise the tensor to some float exponent `e`.
pub fn powf(&self, e: f64) -> Result<Self> {
if self.elem_count() == 0 {
return Ok(self.clone());
}
let storage = self.storage().powf(self.layout(), e)?;
let op = BackpropOp::new1(self, |t| Op::Powf(t, e));
Ok(from_storage(storage, self.shape(), op, false))
}
pub(crate) fn check_dim(&self, dim: usize, op: &'static str) -> Result<()> {
if dim >= self.dims().len() {
Err(Error::DimOutOfRange {
shape: self.shape().clone(),
dim: dim as i32,
op,
}
.bt())?
} else {
Ok(())
}
}
/// Split a tensor into the specified number of chunks, this may return less chunks than
/// specified.
pub fn chunk<D: Dim>(&self, chunks: usize, dim: D) -> Result<Vec<Self>> {
let dim = dim.to_index(self.shape(), "chunk")?;
let size = self.dim(dim)?;
if size < chunks {
(0..size).map(|i| self.narrow(dim, i, 1)).collect()
} else {
let chunk_size = size / chunks;
let cnt_additional = size % chunks;
let mut tensors = vec![];
let mut sum_chunk_size = 0;
for i in 0..chunks {
let chunk_size = if i < cnt_additional {
chunk_size + 1
} else {
chunk_size
};
let tensor = self.narrow(dim, sum_chunk_size, chunk_size)?;
tensors.push(tensor);
sum_chunk_size += chunk_size
}
Ok(tensors)
}
}
/// Returns a new tensor that is a narrowed version of the input, the dimension `dim`
/// ranges from `start` to `start + len`.
/// ```
/// use candle_core::{Tensor, Device};
/// let a = Tensor::new(&[
/// [0f32, 1., 2.],
/// [3. , 4., 5.],
/// [6. , 7., 8.]
/// ], &Device::Cpu)?;
///
/// let b = a.narrow(0, 1, 2)?;
/// assert_eq!(b.shape().dims(), &[2, 3]);
/// assert_eq!(b.to_vec2::<f32>()?, &[
/// [3., 4., 5.],
/// [6., 7., 8.]
/// ]);
///
/// let c = a.narrow(1, 1, 1)?;
/// assert_eq!(c.shape().dims(), &[3, 1]);
/// assert_eq!(c.to_vec2::<f32>()?, &[
/// [1.],
/// [4.],
/// [7.]
/// ]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn narrow<D: Dim>(&self, dim: D, start: usize, len: usize) -> Result<Self> {
let dims = self.dims();
let dim = dim.to_index(self.shape(), "narrow")?;
let err = |msg| {
Err::<(), _>(
Error::NarrowInvalidArgs {
shape: self.shape().clone(),
dim,
start,
len,
msg,
}
.bt(),
)
};
if start > dims[dim] {
err("start > dim_len")?
}
if start.saturating_add(len) > dims[dim] {
err("start + len > dim_len")?
}
if start == 0 && dims[dim] == len {
Ok(self.clone())
} else {
let op = BackpropOp::new1(self, |t| Op::Narrow(t, dim, start, len));
let layout = self.layout().narrow(dim, start, len)?;
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout,
op,
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
}
}
fn squeeze_dims(self, dims: &[usize]) -> Result<Self> {
match dims {
[] => Ok(self),
[i] => self.squeeze(*i),
dims => {
let dims = self
.dims()
.iter()
.enumerate()
.filter_map(|(dim_idx, &v)| {
if dims.contains(&dim_idx) {
None
} else {
Some(v)
}
})
.collect::<Vec<_>>();
self.reshape(dims)
}
}
}
fn reduce_impl<D: Dim>(&self, dim: D, keepdim: bool, op: ReduceOp) -> Result<Self> {
let dim = dim.to_index(self.shape(), op.name())?;
let storage = self.storage().reduce_op(op, self.layout(), &[dim])?;
let mut dims = self.dims().to_vec();
dims[dim] = 1;
let op = match op {
ReduceOp::Sum | ReduceOp::Min | ReduceOp::Max => {
BackpropOp::new1(self, |arg| Op::Reduce(arg, op, dims.to_vec()))
}
ReduceOp::ArgMin | ReduceOp::ArgMax => BackpropOp::none(),
};
let res = from_storage(storage, dims, op, false);
if keepdim {
Ok(res)
} else {
res.squeeze_dims(&[dim])
}
}
fn sum_impl<D: Dims>(&self, sum_dims: D, keepdim: bool) -> Result<Self> {
let sum_dims = sum_dims.to_indexes(self.shape(), "sum")?;
let storage = self
.storage()
.reduce_op(ReduceOp::Sum, self.layout(), &sum_dims)?;
let mut dims = self.dims().to_vec();
for &sum_dim in sum_dims.iter() {
dims[sum_dim] = 1
}
let op = BackpropOp::new1(self, |a| Op::Reduce(a, ReduceOp::Sum, dims.to_vec()));
let sum = from_storage(storage, dims, op, false);
if keepdim {
Ok(sum)
} else {
sum.squeeze_dims(&sum_dims)
}
}
/// Roll the tensor input along the given dimension.
/// Elements that are shifted beyond the last position are re-introduced at the first position.
///
/// ```rust
/// # use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let tensor = tensor.roll(1, 0)?;
/// assert_eq!(tensor.to_vec2::<f32>()?, &[[4., 5.], [0., 1.], [2., 3.]]);
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let tensor = tensor.roll(-1, 0)?;
/// assert_eq!(tensor.to_vec2::<f32>()?, &[[2., 3.], [4., 5.], [0., 1.]]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn roll<D>(&self, shift: i32, dim: D) -> Result<Self>
where
D: Dim + Clone,
{
let dim = dim.to_index(self.shape(), "roll")?;
let dim_size = self.dim(dim)?;
let shift = shift.rem_euclid(dim_size as i32) as usize;
if shift == 0 {
Ok(self.clone())
} else {
let a = self.narrow(dim, 0, dim_size - shift)?;
let b = self.narrow(dim, dim_size - shift, shift)?;
Tensor::cat(&[&b, &a], dim)
}
}
/// Returns the sum of all elements in the input tensor. The sum is performed over all the
/// input dimensions.
///
/// The resulting tensor has a shape that is similar to the shape of the input tensor, except
/// that the number of elements for each dimension index in `sum_dims` is 1.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?;
/// let s = a.sum_keepdim(0)?;
/// assert_eq!(s.to_vec2::<f32>()?, &[[2., 4.]]);
/// let s = a.sum_keepdim(1)?;
/// assert_eq!(s.to_vec2::<f32>()?, &[[1.], [5.]]);
/// let s = a.sum_keepdim((0, 1))?;
/// assert_eq!(s.to_vec2::<f32>()?, &[[6.]]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn sum_keepdim<D: Dims>(&self, sum_dims: D) -> Result<Self> {
self.sum_impl(sum_dims, true)
}
/// Returns the sum of all elements in the input tensor. The sum is performed over all the
/// input dimensions and compared to `sum_keepdim` these dimensions are squeezed rather than
/// kept.
pub fn sum<D: Dims>(&self, sum_dims: D) -> Result<Self> {
self.sum_impl(sum_dims, false)
}
/// Returns the mean of all elements in the input tensor. The mean is performed over all the
/// input dimensions.
///
/// The resulting tensor has a shape that is similar to the shape of the input tensor, except
/// that the number of elements for each dimension index in `mean_dims` is 1.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?;
/// let s = a.mean_keepdim(0)?;
/// assert_eq!(s.to_vec2::<f32>()?, &[[1., 2.]]);
/// let s = a.mean_keepdim(1)?;
/// assert_eq!(s.to_vec2::<f32>()?, &[[0.5], [2.5]]);
/// let s = a.mean_keepdim((0, 1))?;
/// assert_eq!(s.to_vec2::<f32>()?, &[[1.5]]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn mean_keepdim<D: Dims>(&self, mean_dims: D) -> Result<Self> {
let mean_dims = mean_dims.to_indexes(self.shape(), "mean-keepdim")?;
let reduced_dim: usize = mean_dims.iter().map(|i| self.dims()[*i]).product();
let scale = 1f64 / (reduced_dim as f64);
self.sum_impl(mean_dims, true)? * scale
}
/// Returns the mean of all elements in the input tensor. The mean is performed over all the
/// input dimensions and compared to `mean_keepdim` these dimensions are squeezed rather than
/// kept.
pub fn mean<D: Dims>(&self, mean_dims: D) -> Result<Self> {
let mean_dims = mean_dims.to_indexes(self.shape(), "mean")?;
let reduced_dim: usize = mean_dims.iter().map(|i| self.dims()[*i]).product();
let scale = 1f64 / (reduced_dim as f64);
self.sum_impl(mean_dims, false)? * scale
}
/// Returns the unbiased variance over the selected dimension.
pub fn var_keepdim<D: Dim>(&self, dim: D) -> Result<Self> {
let dim = dim.to_index(self.shape(), "var")?;
let mean = self.mean_keepdim(dim)?;
let squares = self.broadcast_sub(&mean)?.sqr()?;
squares.sum_impl(dim, true)? / (self.dim(dim)? - 1) as f64
}
/// Returns the unbiased variance over the selected dimension.
pub fn var<D: Dim>(&self, dim: D) -> Result<Self> {
let dim = dim.to_index(self.shape(), "var")?;
self.var_keepdim(dim)?.squeeze(dim)
}
/// Gathers the maximum value across the selected dimension. The resulting shape has the same
/// number of dimensions as the original tensor and the select dimension has a single element.
pub fn max_keepdim<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, true, ReduceOp::Max)
}
/// Similar to `max_keepdim` but the target dimension is squeezed.
pub fn max<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, false, ReduceOp::Max)
}
/// Gathers the minimum value across the selected dimension. The resulting shape has the same
/// number of dimensions as the original tensor and the select dimension has a single element.
pub fn min_keepdim<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, true, ReduceOp::Min)
}
/// Similar to `min_keepdim` but the target dimension is squeezed.
pub fn min<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, false, ReduceOp::Min)
}
pub fn argmax_keepdim<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, true, ReduceOp::ArgMax)
}
/// Similar to `argmax_keepdim` but the target dimension is squeezed.
pub fn argmax<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, false, ReduceOp::ArgMax)
}
pub fn argmin_keepdim<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, true, ReduceOp::ArgMin)
}
/// Similar to `argmin_keepdim` but the target dimension is squeezed.
pub fn argmin<D: Dim>(&self, dim: D) -> Result<Self> {
self.reduce_impl(dim, false, ReduceOp::ArgMin)
}
/// Element-wise comparison between two tensors, e.g. equality, greater than, ... The actual
/// comparison operation is specified by the `op` argument.
///
/// The returned tensor has the same shape as the original tensors and uses `u8` elements.
pub fn cmp<T: TensorOrScalar>(&self, rhs: T, op: CmpOp) -> Result<Self> {
let rhs = match rhs.to_tensor_scalar()? {
crate::scalar::TensorScalar::Tensor(rhs) => rhs,
crate::scalar::TensorScalar::Scalar(rhs) => rhs
.to_dtype(self.dtype())?
.to_device(self.device())?
.broadcast_as(self.shape())?,
};
let shape = self.same_shape_binary_op(&rhs, "cmp")?;
let storage = self
.storage()
.cmp(op, &rhs.storage(), self.layout(), rhs.layout())?;
let op = BackpropOp::new1(self, |a| Op::Cmp(a, op));
Ok(from_storage(storage, shape.dims(), op, false))
}
/// Element-wise equality.
pub fn eq<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> {
self.cmp(rhs, CmpOp::Eq)
}
/// Element-wise non-equality.
pub fn ne<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> {
self.cmp(rhs, CmpOp::Ne)
}
/// Element-wise comparison with lower-than, the returned tensor uses value 1 where `self <
/// rhs` and 0 otherwise.
pub fn lt<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> {
self.cmp(rhs, CmpOp::Lt)
}
/// Element-wise comparison with greater-than, the returned tensor uses value 1 where `self >
/// rhs` and 0 otherwise.
pub fn gt<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> {
self.cmp(rhs, CmpOp::Gt)
}
/// Element-wise comparison with greater-equal, the returned tensor uses value 1 where `self >=
/// rhs` and 0 otherwise.
pub fn ge<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> {
self.cmp(rhs, CmpOp::Ge)
}
/// Element-wise comparison with lower-equal, the returned tensor uses value 1 where `self <=
/// rhs` and 0 otherwise.
pub fn le<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> {
self.cmp(rhs, CmpOp::Le)
}
/// Clamp the tensor values to be between `min` and `max`.
pub fn clamp<T1: TensorOrScalar, T2: TensorOrScalar>(&self, min: T1, max: T2) -> Result<Self> {
self.maximum(min)?.minimum(max)
}
/// Interpolate the input tensor to the `target_size` size, taking the value of the nearest element.
///
/// The input tensor should have three dimensions, `(batch, channels, l)`, the returned
/// tensor also has three dimensions, `(batch, channels, target_size)`.
pub fn interpolate1d(&self, target_size: usize) -> Result<Self> {
let (n, c, _l) = self.dims3()?;
let op = BackpropOp::new1(self, |arg| Op::UpsampleNearest1D { arg, target_size });
let storage = self
.storage()
.upsample_nearest1d(self.layout(), target_size)?;
Ok(from_storage(storage, (n, c, target_size), op, false))
}
/// Alias for `interpolate1d`.
pub fn upsample_nearest1d(&self, target_size: usize) -> Result<Self> {
self.interpolate1d(target_size)
}
/// Interpolate the input tensor to the `(target_h, target_w)` size, taking the value of the
/// nearest element.
///
/// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned
/// tensor also has four dimensions, `(batch, channels, target_h, target_w)`.
pub fn interpolate2d(&self, target_h: usize, target_w: usize) -> Result<Self> {
let (n, c, _h, _w) = self.dims4()?;
let op = BackpropOp::new1(self, |arg| Op::UpsampleNearest2D {
arg,
target_h,
target_w,
});
let storage = self
.storage()
.upsample_nearest2d(self.layout(), target_h, target_w)?;
Ok(from_storage(storage, (n, c, target_h, target_w), op, false))
}
/// Alias for `interpolate2d`.
pub fn upsample_nearest2d(&self, target_h: usize, target_w: usize) -> Result<Self> {
self.interpolate2d(target_h, target_w)
}
/// 2D average pooling over an input tensor with multiple channels.
///
/// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned
/// tensor also has four dimensions, `(batch, channels, h', w')`. The pooling is performed on
/// the two last dimensions using a kernel of size `sz`. The returned element is the average
/// value over the kernel window.
pub fn avg_pool2d<T: crate::ToUsize2>(&self, sz: T) -> Result<Self> {
let sz = sz.to_usize2();
self.avg_pool2d_with_stride(sz, sz)
}
/// Same as `avg_pool2d` but with a `stride` that can be set to a value different from the
/// kernel size.
pub fn avg_pool2d_with_stride<T: crate::ToUsize2>(
&self,
kernel_size: T,
stride: T,
) -> Result<Self> {
let kernel_size = kernel_size.to_usize2();
let stride = stride.to_usize2();
let (n, c, h, w) = self.dims4()?;
if h < kernel_size.0 || w < kernel_size.1 {
bail!("kernel-size {kernel_size:?} is larger than the input size {h},{w}")
}
// https://pytorch.org/docs/stable/generated/torch.nn.AvgPool2d.html#torch.nn.AvgPool2d
let h_out = (h - kernel_size.0) / stride.0 + 1;
let w_out = (w - kernel_size.1) / stride.1 + 1;
let op = BackpropOp::new1(self, |arg| Op::AvgPool2D {
arg,
kernel_size,
stride,
});
let storage = self
.storage()
.avg_pool2d(self.layout(), kernel_size, stride)?;
Ok(from_storage(storage, (n, c, h_out, w_out), op, false))
}
/// 2D max pooling over an input tensor with multiple channels.
///
/// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned
/// tensor also has four dimensions, `(batch, channels, h', w')`. The pooling is performed on
/// the two last dimensions using a kernel of size `sz`, the returned element is the maximum
/// value over the kernel window.
pub fn max_pool2d<T: crate::ToUsize2>(&self, sz: T) -> Result<Self> {
let sz = sz.to_usize2();
self.max_pool2d_with_stride(sz, sz)
}
/// Same as `max_pool2d` but with a `stride` that can be set to a value different from the
/// kernel size.
pub fn max_pool2d_with_stride<T: crate::ToUsize2>(
&self,
kernel_size: T,
stride: T,
) -> Result<Self> {
let kernel_size = kernel_size.to_usize2();
let stride = stride.to_usize2();
let (n, c, h, w) = self.dims4()?;
if h < kernel_size.0 || w < kernel_size.1 {
bail!("kernel-size {kernel_size:?} is larger than the input size {h},{w}")
}
// https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html#torch.nn.MaxPool2d
let h_out = (h - kernel_size.0) / stride.0 + 1;
let w_out = (w - kernel_size.1) / stride.1 + 1;
let op = BackpropOp::new1(self, |arg| Op::MaxPool2D {
arg,
kernel_size,
stride,
});
let storage = self
.storage()
.max_pool2d(self.layout(), kernel_size, stride)?;
Ok(from_storage(storage, (n, c, h_out, w_out), op, false))
}
/// Returns the matrix-multiplication of the input tensor with the other provided tensor.
///
/// # Arguments
///
/// * `self` - A tensor with dimensions `b1, b2, ..., bi, m, k`.
/// * `rhs` - A tensor with dimensions `b1, b2, ..., bi, k, n`.
///
/// The resulting tensor has dimensions `b1, b2, ..., bi, m, n`.
pub fn matmul(&self, rhs: &Self) -> Result<Self> {
let a_dims = self.shape().dims();
let b_dims = rhs.shape().dims();
let dim = a_dims.len();
if dim < 2 || b_dims.len() != dim {
Err(Error::ShapeMismatchBinaryOp {
lhs: self.shape().clone(),
rhs: rhs.shape().clone(),
op: "matmul",
}
.bt())?
}
let m = a_dims[dim - 2];
let k = a_dims[dim - 1];
let k2 = b_dims[dim - 2];
let n = b_dims[dim - 1];
let c_shape = Shape::from(&a_dims[..dim - 2]).extend(&[m, n]);
if c_shape.elem_count() == 0 || k == 0 {
return Tensor::zeros(c_shape, self.dtype(), self.device());
}
let batching: usize = a_dims[..dim - 2].iter().product();
let batching_b: usize = b_dims[..dim - 2].iter().product();
if k != k2 || batching != batching_b {
Err(Error::ShapeMismatchBinaryOp {
lhs: self.shape().clone(),
rhs: rhs.shape().clone(),
op: "matmul",
}
.bt())?
}
let storage = self.storage().matmul(
&rhs.storage(),
(batching, m, n, k),
self.layout(),
rhs.layout(),
)?;
let op = BackpropOp::new2(self, rhs, Op::Matmul);
Ok(from_storage(storage, c_shape, op, false))
}
/// Matrix-multiplication with broadcasting support.
///
/// Compared to `matmul` the two matrixes are allowed to have different dimensions as long as
/// they are compatible for broadcast. E.g. if `self` has shape `(j, 1, n, k)` and `rhs` has
/// shape `(l, k, m)`, the output will have shape `(j, l, n, m)`.
pub fn broadcast_matmul(&self, rhs: &Self) -> Result<Self> {
let lhs = self;
let (l_shape, r_shape) = lhs.shape().broadcast_shape_matmul(rhs.shape())?;
let l_broadcast = l_shape != *lhs.shape();
let r_broadcast = r_shape != *rhs.shape();
// TODO: Avoid concretising the broadcasted matrixes via contiguous.
match (l_broadcast, r_broadcast) {
(true, true) => lhs
.broadcast_as(&l_shape)?
.contiguous()?
.matmul(&rhs.broadcast_as(&r_shape)?.contiguous()?),
(false, true) => lhs.matmul(&rhs.broadcast_as(&r_shape)?.contiguous()?),
(true, false) => lhs.broadcast_as(&l_shape)?.contiguous()?.matmul(rhs),
(false, false) => lhs.matmul(rhs),
}
}
/// Returns a tensor with the same shape as the input tensor, the values are taken from
/// `on_true` if the input tensor value is not zero, and `on_false` at the positions where the
/// input tensor is equal to zero.
pub fn where_cond(&self, on_true: &Self, on_false: &Self) -> Result<Self> {
let _shap = self.same_shape_binary_op(on_true, "where_cond")?;
let shape = self.same_shape_binary_op(on_false, "where_cond")?;
let storage = self.storage().where_cond(
self.layout(),
&on_true.storage(),
on_true.layout(),
&on_false.storage(),
on_false.layout(),
)?;
let op = BackpropOp::new3(self, on_true, on_false, Op::WhereCond);
Ok(from_storage(storage, shape, op, false))
}
/// Returns a tensor with the values from the `self` tensor at the index corresponding to the
/// values hold in the `ids` tensor.
///
/// # Arguments
///
/// * `self` - A tensor with dimensions `v, h`.
/// * `ids` - A tensor with dimensions `s` and with integer values between 0 and v (exclusive).
///
/// The resulting tensor has dimensions `s, h`. `s` is called the sequence length, `v` the
/// vocabulary size, and `h` the hidden size.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let values = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let ids = Tensor::new(&[2u32, 1u32, 2u32], &Device::Cpu)?;
/// let emb = values.embedding(&ids)?;
/// assert_eq!(emb.to_vec2::<f32>()?, &[[4., 5.], [2., 3.], [4., 5.]]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn embedding(&self, ids: &Self) -> Result<Self> {
if self.rank() != 2 || ids.rank() != 1 {
Err(Error::ShapeMismatchBinaryOp {
lhs: self.shape().clone(),
rhs: ids.shape().clone(),
op: "embedding",
}
.bt())?
}
self.index_select(ids, 0)
}
pub fn scatter_add<D: Dim>(&self, indexes: &Self, source: &Self, dim: D) -> Result<Self> {
let dim = dim.to_index(self.shape(), "scatter-add")?;
let source_dims = source.dims();
let self_dims = self.dims();
let mismatch = if source_dims.len() != self_dims.len() {
true
} else {
let mut mismatch = false;
for (i, (&d1, &d2)) in self_dims.iter().zip(source_dims.iter()).enumerate() {
if i != dim && d1 != d2 {
mismatch = true;
break;
}
}
mismatch
};
if mismatch {
Err(Error::ShapeMismatchBinaryOp {
op: "scatter-add (self, src)",
lhs: self.shape().clone(),
rhs: source.shape().clone(),
}
.bt())?
}
if indexes.dims() != source.dims() {
Err(Error::ShapeMismatchBinaryOp {
op: "scatter-add (indexes, src)",
lhs: indexes.shape().clone(),
rhs: source.shape().clone(),
}
.bt())?
}
let storage = self.storage().scatter_add(
self.layout(),
&indexes.storage(),
indexes.layout(),
&source.storage(),
source.layout(),
dim,
)?;
let op = BackpropOp::new3(self, indexes, source, |t1, t2, t3| {
Op::ScatterAdd(t1, t2, t3, dim)
});
Ok(from_storage(storage, self.shape(), op, false))
}
/// Embeds the values of the `src` tensor into the `self` tensor on the specified dimension.
pub fn slice_scatter<D: Dim>(&self, src: &Self, dim: D, start: usize) -> Result<Self> {
let dim = dim.to_index(self.shape(), "slice-scatter")?;
if dim == 0 {
self.slice_scatter0(src, start)
} else {
// TODO: Maybe we want to add a more efficient implementation at some point.
self.transpose(0, dim)?
.slice_scatter0(&src.transpose(0, dim)?, start)?
.transpose(0, dim)
}
}
/// Embeds the values of the `src` tensor into the `self` tensor on the first dimension.
pub fn slice_scatter0(&self, src: &Self, start: usize) -> Result<Self> {
if self.dtype() != src.dtype() {
Err(Error::DTypeMismatchBinaryOp {
lhs: self.dtype(),
rhs: src.dtype(),
op: "slice-scatter",
}
.bt())?
}
if self.device().location() != src.device.location() {
Err(Error::DeviceMismatchBinaryOp {
lhs: self.device().location(),
rhs: src.device().location(),
op: "slice-scatter",
}
.bt())?
}
if self.rank() != src.rank() {
Err(Error::UnexpectedNumberOfDims {
expected: self.rank(),
got: src.rank(),
shape: src.shape().clone(),
}
.bt())?
}
let shape_ok =
self.dims()
.iter()
.zip(src.dims().iter())
.enumerate()
.all(|(dim_idx, (&d1, &d2))| {
if 0 == dim_idx {
d2 + start <= d1
} else {
d1 == d2
}
});
if !shape_ok {
Err(Error::ShapeMismatchBinaryOp {
op: "slice-scatter (self, src)",
lhs: self.shape().clone(),
rhs: src.shape().clone(),
}
.bt())?
}
let mut storage = unsafe { self.device().alloc_uninit(self.shape(), self.dtype())? };
self.storage()
.copy_strided_src(&mut storage, 0, self.layout())?;
let offset = start * src.dims()[1..].iter().product::<usize>();
src.storage()
.copy_strided_src(&mut storage, offset, src.layout())?;
let op = BackpropOp::new2(self, src, |t1, t2| Op::SliceScatter0(t1, t2, start));
Ok(from_storage(storage, self.shape(), op, false))
}
/// Accumulate element from `source` at indexes `indexes` and add them to `self`.
pub fn index_add<D: Dim>(&self, indexes: &Self, source: &Self, dim: D) -> Result<Self> {
let dim = dim.to_index(self.shape(), "index-add")?;
let source_dims = source.dims();
let self_dims = self.dims();
let mismatch = if source_dims.len() != self_dims.len() {
true
} else {
let mut mismatch = false;
for (i, (&d1, &d2)) in self_dims.iter().zip(source_dims.iter()).enumerate() {
if i != dim && d1 != d2 {
mismatch = true;
break;
}
}
mismatch
};
if mismatch {
Err(Error::ShapeMismatchBinaryOp {
op: "index-add (self, source)",
lhs: self.shape().clone(),
rhs: source.shape().clone(),
}
.bt())?
}
// The number of element in indexes must match the dimension on which the add is
// performed on the source tensor (and the index values from `indexes` are taken from
// the target tensor self)
let indexes_len = indexes.dims1()?;
if source_dims[dim] != indexes_len {
Err(Error::ShapeMismatchBinaryOp {
op: "index-add (ids, source))",
lhs: indexes.shape().clone(),
rhs: source.shape().clone(),
}
.bt())?
}
let storage = self.storage().index_add(
self.layout(),
&indexes.storage(),
indexes.layout(),
&source.storage(),
source.layout(),
dim,
)?;
let op = BackpropOp::new3(self, indexes, source, |t1, t2, t3| {
Op::IndexAdd(t1, t2, t3, dim)
});
Ok(from_storage(storage, self.shape(), op, false))
}
/// Gather values across the target dimension.
///
/// # Arguments
///
/// * `self` - The input tensor.
/// * `indexes` - The indices of elements to gather, this should have same number of dimensions as `self`
/// and indexes.dims()[d] <= self.dims()[d] for all dimensions d != dim
/// * `dim` - the target dimension.
///
/// The resulting tensor has the same shape as `indexes` and use values from `self` indexed on
/// dimension `dim` by the values in `indexes`.
pub fn gather<D: Dim>(&self, indexes: &Self, dim: D) -> Result<Self> {
let dim = dim.to_index(self.shape(), "gather")?;
let self_dims = self.dims();
let indexes_dims = indexes.dims();
let mismatch = if indexes_dims.len() != self_dims.len() {
true
} else {
let mut mismatch = false;
for (i, (&d1, &d2)) in self_dims.iter().zip(indexes_dims.iter()).enumerate() {
if i != dim && d1 < d2 {
mismatch = true;
break;
}
}
mismatch
};
if mismatch {
Err(Error::ShapeMismatchBinaryOp {
op: "gather",
lhs: self.shape().clone(),
rhs: indexes.shape().clone(),
}
.bt())?
}
let storage =
self.storage()
.gather(self.layout(), &indexes.storage(), indexes.layout(), dim)?;
let op = BackpropOp::new2(self, indexes, |t1, t2| Op::Gather(t1, t2, dim));
Ok(from_storage(storage, indexes.shape(), op, false))
}
/// Select values for the input tensor at the target indexes across the specified dimension.
///
/// The `indexes` is argument is an int tensor with a single dimension.
/// The output has the same number of dimension as the `self` input. The target dimension of
/// the output has length the length of `indexes` and the values are taken from `self` using
/// the index from `indexes`. Other dimensions have the same number of elements as the input
/// tensor.
pub fn index_select<D: Dim>(&self, indexes: &Self, dim: D) -> Result<Self> {
let dim = dim.to_index(self.shape(), "index-select")?;
let indexes_len = match indexes.dims() {
[l] => *l,
_ => Err(Error::ShapeMismatchBinaryOp {
lhs: self.shape().clone(),
rhs: indexes.shape().clone(),
op: "index-select",
}
.bt())?,
};
let storage = self.storage().index_select(
&indexes.storage(),
self.layout(),
indexes.layout(),
dim,
)?;
let mut dims = self.dims().to_vec();
dims[dim] = indexes_len;
let op = BackpropOp::new2(self, indexes, |t1, t2| Op::IndexSelect(t1, t2, dim));
Ok(from_storage(storage, dims, op, false))
}
/// Returns an iterator over position of the elements in the storage when ranging over the
/// index tuples in lexicographic order.
pub fn strided_index(&self) -> crate::StridedIndex {
self.layout.strided_index()
}
/// Similar to `strided_index` but returns the position of the start of each contiguous block
/// as well as the length of the contiguous blocks. For a contiguous tensor, the index iterator
/// will only return the start offset and the size would be the number of elements in the
/// tensor.
pub fn strided_blocks(&self) -> crate::StridedBlocks {
self.layout.strided_blocks()
}
/// Returns the data contained in a 1D tensor as a vector of scalar values.
pub fn to_vec1<S: crate::WithDType>(&self) -> Result<Vec<S>> {
if self.rank() != 1 {
Err(Error::UnexpectedNumberOfDims {
expected: 1,
got: self.rank(),
shape: self.shape().clone(),
}
.bt())?
}
let from_cpu_storage = |cpu_storage: &crate::CpuStorage| {
let data = S::cpu_storage_as_slice(cpu_storage)?;
let data = match self.layout.contiguous_offsets() {
Some((o1, o2)) => data[o1..o2].to_vec(),
None => self.strided_index().map(|i| data[i]).collect(),
};
Ok::<Vec<_>, Error>(data)
};
match &*self.storage() {
Storage::Cpu(storage) => from_cpu_storage(storage),
Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
}
}
/// Returns the data contained in a 2D tensor as a vector of vector of scalar values.
pub fn to_vec2<S: crate::WithDType>(&self) -> Result<Vec<Vec<S>>> {
let (dim1, dim2) = self.dims2()?;
let from_cpu_storage = |cpu_storage: &crate::CpuStorage| {
let data = S::cpu_storage_as_slice(cpu_storage)?;
let mut rows = vec![];
match self.layout.contiguous_offsets() {
Some((o1, o2)) => {
let data = &data[o1..o2];
for idx_row in 0..dim1 {
rows.push(data[idx_row * dim2..(idx_row + 1) * dim2].to_vec())
}
}
None => {
let mut src_index = self.strided_index();
for _idx_row in 0..dim1 {
let row = (0..dim2).map(|_| data[src_index.next().unwrap()]).collect();
rows.push(row)
}
assert!(src_index.next().is_none());
}
}
Ok(rows)
};
match &*self.storage() {
Storage::Cpu(storage) => from_cpu_storage(storage),
Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
}
}
/// Returns the data contained in a 3D tensor.
pub fn to_vec3<S: crate::WithDType>(&self) -> Result<Vec<Vec<Vec<S>>>> {
let (dim1, dim2, dim3) = self.dims3()?;
let from_cpu_storage = |cpu_storage: &crate::CpuStorage| {
let data = S::cpu_storage_as_slice(cpu_storage)?;
let mut top_rows = vec![];
match self.layout.contiguous_offsets() {
Some((o1, o2)) => {
let data = &data[o1..o2];
let dim23 = dim2 * dim3;
for idx1 in 0..dim1 {
let data = &data[idx1 * dim23..(idx1 + 1) * dim23];
let mut rows = vec![];
for idx2 in 0..dim2 {
rows.push(data[idx2 * dim3..(idx2 + 1) * dim3].to_vec())
}
top_rows.push(rows);
}
}
None => {
let mut src_index = self.strided_index();
for _idx in 0..dim1 {
let mut rows = vec![];
for _jdx in 0..dim2 {
let row = (0..dim3).map(|_| data[src_index.next().unwrap()]).collect();
rows.push(row)
}
top_rows.push(rows);
}
assert!(src_index.next().is_none());
}
}
Ok(top_rows)
};
match &*self.storage() {
Storage::Cpu(storage) => from_cpu_storage(storage),
Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?),
}
}
/// The dtype for the elements stored in the input tensor.
pub fn dtype(&self) -> DType {
self.dtype
}
/// The device on which the input tensor is located.
pub fn device(&self) -> &Device {
&self.device
}
/// The tensor shape, i.e. dimension sizes on each axis.
pub fn shape(&self) -> &Shape {
self.layout().shape()
}
/// The dimension size for this tensor on each axis.
pub fn dims(&self) -> &[usize] {
self.shape().dims()
}
/// The dimension size for a specified dimension index.
pub fn dim<D: Dim>(&self, dim: D) -> Result<usize> {
let dim = dim.to_index(self.shape(), "dim")?;
Ok(self.dims()[dim])
}
/// The layout of the input tensor, this stores both the shape of the tensor as well as the
/// strides and the start offset to apply to the underlying storage.
pub fn layout(&self) -> &Layout {
&self.layout
}
pub fn stride(&self) -> &[usize] {
self.layout.stride()
}
/// The number of dimensions for this tensor, 0 for a scalar tensor, 1 for a 1D tensor, etc.
pub fn rank(&self) -> usize {
self.shape().rank()
}
/// The number of elements stored in this tensor.
pub fn elem_count(&self) -> usize {
self.shape().elem_count()
}
/// The unique identifier for this tensor.
pub fn id(&self) -> TensorId {
self.id
}
/// Whether this tensor is a variable or not. A variable is a tensor for which gradient is
/// tracked and on which backpropagation can be performed.
pub fn is_variable(&self) -> bool {
self.is_variable
}
pub(crate) fn op(&self) -> &Option<Op> {
&self.op
}
/// Computes the max of all the elements in this tensor and returns a tensor holding this
/// scalar with zero dimensions.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let tensor = tensor.max_all()?;
/// assert_eq!(tensor.to_scalar::<f32>()?, 5.);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn max_all(&self) -> Result<Tensor> {
if self.rank() == 0 {
Ok(self.clone())
} else {
self.flatten_all()?.max(0)
}
}
/// Computes the min of all the elements in this tensor and returns a tensor holding this
/// scalar with zero dimensions.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let tensor = tensor.min_all()?;
/// assert_eq!(tensor.to_scalar::<f32>()?, 0.);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn min_all(&self) -> Result<Tensor> {
if self.rank() == 0 {
Ok(self.clone())
} else {
self.flatten_all()?.min(0)
}
}
/// Computes the sum of all the elements in this tensor and returns a tensor holding this
/// scalar with zero dimensions.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let tensor = tensor.sum_all()?;
/// assert_eq!(tensor.to_scalar::<f32>()?, 15.);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn sum_all(&self) -> Result<Tensor> {
let dims: Vec<_> = (0..self.rank()).collect();
self.sum(dims)
}
pub fn mean_all(&self) -> Result<Tensor> {
self.sum_all()? / self.elem_count() as f64
}
fn flatten_<D1: Dim, D2: Dim>(
&self,
start_dim: Option<D1>,
end_dim: Option<D2>,
) -> Result<Tensor> {
if self.rank() == 0 {
self.reshape(1)
} else {
let start_dim = match start_dim {
None => 0,
Some(dim) => dim.to_index(self.shape(), "flatten")?,
};
let end_dim = match end_dim {
None => self.rank() - 1,
Some(dim) => dim.to_index(self.shape(), "flatten")?,
};
if start_dim < end_dim {
let dims = self.dims();
let mut dst_dims = dims[..start_dim].to_vec();
dst_dims.push(dims[start_dim..end_dim + 1].iter().product::<usize>());
if end_dim + 1 < dims.len() {
dst_dims.extend(&dims[end_dim + 1..]);
}
self.reshape(dst_dims)
} else {
Ok(self.clone())
}
}
}
/// Flattens the input tensor on the dimension indexes from `start_dim` to `end_dim` (both
/// inclusive).
pub fn flatten<D1: Dim, D2: Dim>(&self, start_dim: D1, end_dim: D2) -> Result<Tensor> {
self.flatten_(Some(start_dim), Some(end_dim))
}
/// Flattens the input tensor on the dimension indexes from `0` to `end_dim` (inclusive).
pub fn flatten_to<D: Dim>(&self, end_dim: D) -> Result<Tensor> {
self.flatten_(None::<usize>, Some(end_dim))
}
/// Flattens the input tensor on the dimension indexes from `start_dim` (inclusive) to the last
/// dimension.
pub fn flatten_from<D: Dim>(&self, start_dim: D) -> Result<Tensor> {
self.flatten_(Some(start_dim), None::<usize>)
}
/// Flattens the input tensor by reshaping it into a one dimension tensor.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let tensor = tensor.flatten_all()?;
/// assert_eq!(tensor.to_vec1::<f32>()?, &[0., 1., 2., 3., 4., 5.]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn flatten_all(&self) -> Result<Tensor> {
self.flatten_(None::<usize>, None::<usize>)
}
/// Returns the sub-tensor fixing the index at `i` on the first dimension.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let t = tensor.get(0)?;
/// assert_eq!(t.to_vec1::<f32>()?, &[0., 1.]);
/// let t = tensor.get(1)?;
/// assert_eq!(t.to_vec1::<f32>()?, &[2., 3.]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn get(&self, i: usize) -> Result<Tensor> {
let dims = self.dims();
if dims.is_empty() {
Ok(self.clone())
} else {
self.narrow(0, i, 1)?.reshape(&dims[1..])
}
}
/// Returns the sub-tensor fixing the index at `index` on the dimension `dim`.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let t = tensor.get_on_dim(1, 0)?;
/// assert_eq!(t.to_vec1::<f32>()?, &[0., 2., 4.]);
/// let t = tensor.get_on_dim(1, 1)?;
/// assert_eq!(t.to_vec1::<f32>()?, &[1., 3., 5.]);
/// let t = tensor.get_on_dim(0, 1)?;
/// assert_eq!(t.to_vec1::<f32>()?, &[2., 3.]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn get_on_dim<D: Dim>(&self, dim: D, index: usize) -> Result<Tensor> {
let dim = dim.to_index(self.shape(), "get_on_dim")?;
self.narrow(dim, index, 1)?.squeeze(dim)
}
/// Returns a tensor that is a transposed version of the input, the two last dimensions of the
/// input are swapped.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?;
/// let tensor = tensor.t()?;
/// assert_eq!(tensor.to_vec2::<f32>()?, &[[0.0, 2.0, 4.0], [1.0, 3.0, 5.0]]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn t(&self) -> Result<Tensor> {
let rank = self.rank();
if rank < 2 {
Err(Error::UnexpectedNumberOfDims {
expected: 2,
got: rank,
shape: self.shape().clone(),
}
.bt())?
}
self.transpose(rank - 2, rank - 1)
}
/// Returns a tensor that is a transposed version of the input, the given dimensions are
/// swapped.
pub fn transpose<D1: Dim, D2: Dim>(&self, dim1: D1, dim2: D2) -> Result<Tensor> {
let dim1 = dim1.to_index(self.shape(), "transpose")?;
let dim2 = dim2.to_index(self.shape(), "transpose")?;
if dim1 == dim2 {
return Ok(self.clone());
}
let op = BackpropOp::new1(self, |t| Op::Transpose(t, dim1, dim2));
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout: self.layout.transpose(dim1, dim2)?,
op,
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
}
/// Returns a tensor with the same data as the input where the dimensions have been permuted.
/// dims must be a permutation, i.e. include each dimension index exactly once.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::arange(0u32, 120u32, &Device::Cpu)?.reshape((2, 3, 4, 5))?;
/// assert_eq!(tensor.dims(), &[2, 3, 4, 5]);
/// let tensor = tensor.permute((2, 3, 1, 0))?;
/// assert_eq!(tensor.dims(), &[4, 5, 3, 2]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn permute<D: Dims>(&self, dims: D) -> Result<Tensor> {
let dims = dims.to_indexes(self.shape(), "permute")?;
// O(n^2) permutation check but these arrays are small.
let is_permutation =
dims.len() == self.rank() && (0..dims.len()).all(|i| dims.contains(&i));
if !is_permutation {
bail!(
"dimension mismatch in permute, tensor {:?}, dims: {:?}",
self.dims(),
dims
)
}
let op = BackpropOp::new1(self, |t| Op::Permute(t, dims.clone()));
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout: self.layout.permute(&dims)?,
op,
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
}
/// Returns true if the data is stored in a C contiguous (aka row major) way.
pub fn is_contiguous(&self) -> bool {
self.layout.is_contiguous()
}
/// Returns true if the data is stored in a Fortran contiguous (aka column major) way.
pub fn is_fortran_contiguous(&self) -> bool {
self.layout.is_fortran_contiguous()
}
/// Compared to clone, this copies the actual storage but may fail because of running out of
/// memory.
pub fn copy(&self) -> Result<Tensor> {
let op = BackpropOp::new1(self, Op::Copy);
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: Arc::new(RwLock::new(self.storage().try_clone(self.layout())?)),
layout: self.layout.clone(),
op,
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
}
/// Returns a new tensor detached from the current graph, gradient are not propagated through
/// this new node. The storage of this tensor is shared with the initial tensor.
///
/// If the tensor is already detached from the computation graph, the same tensor is returned.
pub fn detach(&self) -> Tensor {
if self.op.is_none() && !self.is_variable {
self.clone()
} else {
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout: self.layout.clone(),
op: BackpropOp::none(),
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Tensor(Arc::new(tensor_))
}
}
/// If the target device is the same as the tensor device, only a shallow copy is performed.
pub fn to_device(&self, device: &Device) -> Result<Tensor> {
if self.device().same_device(device) {
Ok(self.clone())
} else {
let storage = match (&*self.storage(), device) {
(Storage::Cpu(storage), Device::Cuda(cuda)) => {
Storage::Cuda(cuda.storage_from_cpu_storage(storage)?)
}
(Storage::Cpu(storage), Device::Metal(metal)) => {
Storage::Metal(metal.storage_from_cpu_storage(storage)?)
}
(Storage::Cuda(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?),
(Storage::Metal(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?),
(Storage::Cuda(storage), Device::Cuda(cuda)) => {
// TODO: Avoid passing through the cpu storage here, especially if the gpu ids
// are the same.
let cpu_storage = storage.to_cpu_storage()?;
Storage::Cuda(cuda.storage_from_cpu_storage(&cpu_storage)?)
}
(Storage::Cpu(storage), Device::Cpu) => Storage::Cpu(storage.clone()),
_ => {
bail!(
"not implemented yet, self.device: {:?}, device: {:?}",
self.device(),
device
)
}
};
let op = BackpropOp::new1(self, Op::ToDevice);
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: Arc::new(RwLock::new(storage)),
layout: self.layout.clone(),
op,
is_variable: false,
dtype: self.dtype,
device: device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
}
}
/// Returns a new tensor duplicating data from the original tensor. New dimensions are inserted
/// on the left.
pub fn broadcast_left<S: Into<Shape>>(&self, left_shape: S) -> Result<Self> {
let left_shape = left_shape.into();
let mut dims = left_shape.into_dims();
dims.extend(self.dims());
self.broadcast_as(dims)
}
/// Broadcast the input tensor to the target shape. This returns an error if the input shape is
/// not compatible with the target shape.
///
/// If the input shape is `i_1, i_2, ... i_k`, the target shape has to have `k` dimensions or
/// more and shape `j_1, ..., j_l, t_1, t_2, ..., t_k`. The dimensions `j_1` to `j_l` can have
/// any value, the dimension `t_a` must be equal to `i_a` if `i_a` is different from 1. If
/// `i_a` is equal to 1, any value can be used.
pub fn broadcast_as<S: Into<Shape>>(&self, shape: S) -> Result<Self> {
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout: self.layout.broadcast_as(shape)?,
op: BackpropOp::new1(self, Op::Broadcast),
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
}
/// An alias for broadcast_as.
pub fn expand<S: Into<Shape>>(&self, shape: S) -> Result<Self> {
self.broadcast_as(shape)
}
/// Casts the input tensor to the target `dtype`.
///
/// ```rust
/// use candle_core::{Tensor, Device};
/// let tensor = Tensor::new(3.14159265358979f64, &Device::Cpu)?;
/// assert_eq!(tensor.to_scalar::<f64>()?, 3.14159265358979);
/// let tensor = tensor.to_dtype(candle_core::DType::F32)?;
/// assert_eq!(tensor.to_scalar::<f32>()?, 3.1415927);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn to_dtype(&self, dtype: DType) -> Result<Self> {
if self.dtype() == dtype {
Ok(self.clone())
} else {
let shape = self.shape();
let storage = self.storage().to_dtype(self.layout(), dtype)?;
let op = BackpropOp::new1(self, Op::ToDType);
Ok(from_storage(storage, shape.clone(), op, false))
}
}
/// Returns a tensor that is in row major order. This is the same as the original tensor if it
/// was already contiguous, otherwise a copy is triggered.
pub fn contiguous(&self) -> Result<Tensor> {
if self.is_contiguous() {
Ok(self.clone())
} else {
let shape = self.shape();
let mut storage = unsafe { self.device().alloc_uninit(shape, self.dtype())? };
self.storage()
.copy_strided_src(&mut storage, 0, self.layout())?;
let op = BackpropOp::new1(self, Op::Copy);
Ok(from_storage(storage, shape.clone(), op, false))
}
}
/// Returns a tensor that is in row major order. This always makes a copy.
pub fn force_contiguous(&self) -> Result<Tensor> {
let shape = self.shape();
let mut storage = unsafe { self.device().alloc_uninit(shape, self.dtype())? };
self.storage()
.copy_strided_src(&mut storage, 0, self.layout())?;
let op = BackpropOp::new1(self, Op::Copy);
Ok(from_storage(storage, shape.clone(), op, false))
}
/// Create a variable based on the values currently stored in a tensor. The storage is always
/// copied.
pub(crate) fn make_var(&self) -> Result<Tensor> {
let shape = self.shape().clone();
let mut storage = unsafe { self.device().alloc_uninit(&shape, self.dtype())? };
self.storage()
.copy_strided_src(&mut storage, 0, self.layout())?;
Ok(from_storage(storage, shape, BackpropOp::none(), true))
}
/// Reshape returns a tensor with the target shape provided that the number of elements of the
/// original tensor is the same.
/// If the input tensor is contiguous, this is a view on the original data. Otherwise this uses
/// a new storage and copies the data over, the returned tensor is always contiguous.
///
/// The shape can be specified using a tuple of `usize` and at most one `()` in which case
/// the behavior is the same as when using `-1` in PyTorch: this dimension size is adjusted so
/// as to match the number of elements in the tensor.
///
/// ```rust
/// # use candle_core::{Tensor, DType, Device, D};
/// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
///
/// let c = a.reshape((1, 6))?;
/// assert_eq!(c.shape().dims(), &[1, 6]);
///
/// let c = a.reshape((3, 2))?;
/// assert_eq!(c.shape().dims(), &[3, 2]);
///
/// let c = a.reshape((2, (), 1))?;
/// assert_eq!(c.shape().dims(), &[2, 3, 1]);
///
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn reshape<S: crate::shape::ShapeWithOneHole>(&self, s: S) -> Result<Tensor> {
let shape = s.into_shape(self.elem_count())?;
if shape.elem_count() != self.elem_count() {
return Err(Error::ShapeMismatchBinaryOp {
lhs: self.shape().clone(),
rhs: shape,
op: "reshape",
}
.bt());
}
let op = BackpropOp::new1(self, Op::Reshape);
if self.is_contiguous() {
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout: Layout::contiguous_with_offset(shape, self.layout.start_offset()),
op,
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
} else {
let mut storage = unsafe { self.device().alloc_uninit(&shape, self.dtype())? };
self.storage()
.copy_strided_src(&mut storage, 0, self.layout())?;
Ok(from_storage(storage, shape, op, false))
}
}
/// Creates a new tensor with the specified dimension removed if its size was one.
///
/// ```rust
/// # use candle_core::{Tensor, DType, Device, D};
/// let a = Tensor::zeros((2, 3, 1), DType::F32, &Device::Cpu)?;
///
/// let c = a.squeeze(2)?;
/// assert_eq!(c.shape().dims(), &[2, 3]);
///
/// let c = a.squeeze(D::Minus1)?;
/// assert_eq!(c.shape().dims(), &[2, 3]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn squeeze<D: Dim>(&self, dim: D) -> Result<Self> {
// The PyTorch semantics are to return the same tensor if the target dimension
// does not have a size of 1.
let dims = self.dims();
let dim = dim.to_index(self.shape(), "squeeze")?;
if dims[dim] == 1 {
let mut dims = dims.to_vec();
let mut strides = self.stride().to_vec();
dims.remove(dim);
strides.remove(dim);
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout: Layout::new(dims.into(), strides, self.layout.start_offset()),
op: BackpropOp::new1(self, Op::Reshape),
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
} else {
Ok(self.clone())
}
}
/// Creates a new tensor with a dimension of size one inserted at the specified position.
///
/// ```rust
/// # use candle_core::{Tensor, DType, Device, D};
/// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
///
/// let c = a.unsqueeze(0)?;
/// assert_eq!(c.shape().dims(), &[1, 2, 3]);
///
/// let c = a.unsqueeze(D::Minus1)?;
/// assert_eq!(c.shape().dims(), &[2, 3, 1]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn unsqueeze<D: Dim>(&self, dim: D) -> Result<Self> {
let mut dims = self.dims().to_vec();
let mut strides = self.stride().to_vec();
let dim = dim.to_index_plus_one(self.shape(), "unsqueeze")?;
// Cannot panic because to_index_plus_one already checks dimensions
dims.insert(dim, 1);
// Any stride would work here, but we pick one so as to maximize the probability to remain
// C contiguous.
let stride = if dim < strides.len() { strides[dim] } else { 1 };
strides.insert(dim, stride);
let tensor_ = Tensor_ {
id: TensorId::new(),
storage: self.storage.clone(),
layout: Layout::new(dims.into(), strides, self.layout.start_offset()),
op: BackpropOp::new1(self, Op::Reshape),
is_variable: false,
dtype: self.dtype,
device: self.device.clone(),
};
Ok(Tensor(Arc::new(tensor_)))
}
/// Stacks two or more tensors along a particular dimension.
///
/// All tensors must have the same rank, and the output has one additional rank
///
/// ```rust
/// # use candle_core::{Tensor, DType, Device};
/// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
/// let b = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?;
///
/// let c = Tensor::stack(&[&a, &b], 0)?;
/// assert_eq!(c.shape().dims(), &[2, 2, 3]);
///
/// let c = Tensor::stack(&[&a, &b], 2)?;
/// assert_eq!(c.shape().dims(), &[2, 3, 2]);
/// # Ok::<(), candle_core::Error>(())
/// ```
pub fn stack<A: AsRef<Tensor>, D: Dim>(args: &[A], dim: D) -> Result<Self> {
if args.is_empty() {
Err(Error::OpRequiresAtLeastOneTensor { op: "stack" }.bt())?
}
let dim = dim.to_index_plus_one(args[0].as_ref().shape(), "stack")?;
let args = args
.iter()
.map(|t| t.as_ref().unsqueeze(dim))
.collect::<Result<Vec<_>>>()?;
Self::cat(&args, dim)
}
/// Pad the input tensor using 0s along dimension `dim`. This adds `left` elements before the
/// input tensor values and `right` elements after.
pub fn pad_with_zeros<D: Dim>(&self, dim: D, left: usize, right: usize) -> Result<Self> {
if left == 0 && right == 0 {
Ok(self.clone())
} else if left == 0 {
let dim = dim.to_index(self.shape(), "pad_with_zeros")?;
let mut dims = self.dims().to_vec();
dims[dim] = right;
let right = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?;
Tensor::cat(&[self, &right], dim)
} else if right == 0 {
let dim = dim.to_index(self.shape(), "pad_with_zeros")?;
let mut dims = self.dims().to_vec();
dims[dim] = left;
let left = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?;
Tensor::cat(&[&left, self], dim)
} else {
let dim = dim.to_index(self.shape(), "pad_with_zeros")?;
let mut dims = self.dims().to_vec();
dims[dim] = left;
let left = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?;
dims[dim] = right;
let right = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?;
Tensor::cat(&[&left, self, &right], dim)
}
}
/// Pad the input tensor using same values along dimension `dim`. This adds `left` elements before the
/// input tensor values and `right` elements after.
pub fn pad_with_same<D: Dim>(&self, dim: D, left: usize, right: usize) -> Result<Self> {
if left == 0 && right == 0 {
Ok(self.clone())
} else if self.elem_count() == 0 {
bail!("cannot use pad_with_same on an empty tensor")
} else if left == 0 {
let dim = dim.to_index(self.shape(), "pad_with_same")?;
let r = self.narrow(dim, self.dim(dim)? - 1, 1)?;
let mut v = vec![self];
for _ in 0..right {
v.push(&r)
}
Tensor::cat(&v, dim)
} else if right == 0 {
let dim = dim.to_index(self.shape(), "pad_with_same")?;
let l = self.narrow(dim, 0, 1)?;
let mut v = vec![];
for _ in 0..left {
v.push(&l)
}
v.push(self);
Tensor::cat(&v, dim)
} else {
let dim = dim.to_index(self.shape(), "pad_with_same")?;
let l = self.narrow(dim, 0, 1)?;
let r = self.narrow(dim, self.dim(dim)? - 1, 1)?;
let mut v = vec![];
for _ in 0..left {
v.push(&l)
}
v.push(self);
for _ in 0..right {
v.push(&r)
}
Tensor::cat(&v, dim)
}
}
/// Run the `forward` method of `m` on `self`.
pub fn apply<M: crate::Module>(&self, m: &M) -> Result<Self> {
m.forward(self)
}
/// Run the `forward` method of `m` on `self`.
pub fn apply_t<M: crate::ModuleT>(&self, m: &M, train: bool) -> Result<Self> {
m.forward_t(self, train)
}
pub(crate) fn storage(&self) -> std::sync::RwLockReadGuard<'_, Storage> {
self.storage.read().unwrap()
}
pub(crate) fn storage_mut(&self) -> std::sync::RwLockWriteGuard<'_, Storage> {
self.storage.write().unwrap()
}
// If we extend the visibility of this function to be usable outside of this crate, we should
// make it unsafe.
pub(crate) fn storage_mut_and_layout(
&self,
) -> (std::sync::RwLockWriteGuard<'_, Storage>, &Layout) {
let storage = self.storage.write().unwrap();
(storage, &self.layout)
}
/// The storage used by this tensor, together with the layout to use to access it safely.
pub fn storage_and_layout(&self) -> (std::sync::RwLockReadGuard<'_, Storage>, &Layout) {
let storage = self.storage.read().unwrap();
(storage, &self.layout)
}
pub(crate) fn same_storage(&self, rhs: &Self) -> bool {
let lhs: &RwLock<Storage> = self.storage.as_ref();
let rhs: &RwLock<Storage> = rhs.storage.as_ref();
std::ptr::eq(lhs, rhs)
}
/// Normalize a 'relative' axis value: positive values are kept, negative
/// values means counting the dimensions from the back.
pub fn normalize_axis(&self, axis: i64) -> Result<usize> {
let rank = self.rank() as i64;
if rank <= axis {
bail!("axis {axis} is too large, tensor rank {rank}")
} else if 0 <= axis {
Ok(axis as usize)
} else {
let naxis = rank + axis;
if naxis < 0 {
bail!("axis {axis} is too small, tensor rank {rank}")
}
Ok(naxis as usize)
}
}
/// Returns a lower triangular matrix of ones of size n by n.
pub fn tril2(n: usize, dtype: DType, device: &Device) -> Result<Self> {
let t = Tensor::arange(0u32, n as u32, device)?;
let t1 = t.reshape((1, n))?.broadcast_as((n, n))?;
let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?;
t1.le(&t2)?.to_dtype(dtype)
}
/// Returns an upper triangular matrix of ones of size n by n.
pub fn triu2(n: usize, dtype: DType, device: &Device) -> Result<Self> {
let t = Tensor::arange(0u32, n as u32, device)?;
let t1 = t.reshape((1, n))?.broadcast_as((n, n))?;
let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?;
t1.ge(&t2)?.to_dtype(dtype)
}
/// Returns a matrix with a diagonal of ones of size n by n.
pub fn eye(n: usize, dtype: DType, device: &Device) -> Result<Self> {
let t = Tensor::arange(0u32, n as u32, device)?;
let t1 = t.reshape((1, n))?.broadcast_as((n, n))?;
let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?;
t1.eq(&t2)?.to_dtype(dtype)
}
/// Returns the cumulative sum of elements of the input tensor summed over the specified
/// dimension.
///
/// This operation is most efficient when dim is the last dimension of the tensor.
pub fn cumsum<D: Dim>(&self, dim: D) -> Result<Self> {
let dim = dim.to_index(self.shape(), "cumsum")?;
let rank = self.rank();
if rank == 0 {
return Ok(self.clone());
}
let n_axis = self.dim(dim)?;
let triu = Tensor::triu2(n_axis, self.dtype(), self.device())?;
if rank == 1 {
self.unsqueeze(0)?.matmul(&triu)?.squeeze(0)
} else {
let last = rank - 1;
let t = self.transpose(dim, last)?;
let t = t.broadcast_matmul(&triu)?;
t.transpose(dim, last)
}
}
/// Returns a copy of `self` where the values within `ranges` have been replaced with the
/// content of `src`.
pub fn slice_assign<D: std::ops::RangeBounds<usize>>(
&self,
ranges: &[D],
src: &Tensor,
) -> Result<Self> {
let src_dims = src.dims();
let self_dims = self.dims();
if self_dims.len() != src_dims.len() {
bail!(
"slice-assign requires input with the same rank {} <> {}",
self_dims.len(),
src_dims.len()
)
}
if self_dims.len() != ranges.len() {
bail!(
"slice-assign requires input with the same rank as there are ranges {} <> {}",
self_dims.len(),
ranges.len()
)
}
let mut src = src.clone();
let mut mask = Self::ones(src.shape(), DType::U8, src.device())?;
for (i, range) in ranges.iter().enumerate() {
let start_included = match range.start_bound() {
std::ops::Bound::Unbounded => 0,
std::ops::Bound::Included(v) => *v,
std::ops::Bound::Excluded(v) => *v + 1,
};
let end_excluded = match range.end_bound() {
std::ops::Bound::Unbounded => self_dims[i],
std::ops::Bound::Included(v) => *v + 1,
std::ops::Bound::Excluded(v) => *v,
};
if end_excluded <= start_included {
bail!("slice-assign: empty range for dim {i}, {start_included} {end_excluded}")
}
if self_dims[i] < end_excluded {
bail!(
"slice-assign: upper bound is out of range for dim {i}, {end_excluded} {}",
self_dims[i]
)
}
if end_excluded - start_included != src_dims[i] {
bail!(
"slice-assign: the range for dim {i} ({start_included}..{end_excluded}) does not match the size of src {}", src_dims[i]
)
}
src = src.pad_with_zeros(i, start_included, self_dims[i] - end_excluded)?;
mask = mask.pad_with_zeros(i, start_included, self_dims[i] - end_excluded)?
}
mask.where_cond(/* on_true= */ &src, /* on_false= */ self)
}
/// Returns log(sum(exp(tensor), dim)).
pub fn log_sum_exp<D: Dims>(&self, sum_dims: D) -> Result<Self> {
let sum_dims = sum_dims.to_indexes(self.shape(), "log-sum-exp")?;
if sum_dims.is_empty() {
return Ok(self.clone());
}
let max = sum_dims[1..]
.iter()
.try_fold(self.max_keepdim(sum_dims[0])?, |max, &dim| {
max.max_keepdim(dim)
})?;
let exp = self.broadcast_sub(&max)?.exp()?;
let sum = exp.sum(sum_dims.clone())?;
sum.log()? + max.squeeze_dims(&sum_dims)
}
/// Pointwise pow operation.
pub fn pow(&self, rhs: &Tensor) -> Result<Self> {
rhs.mul(&self.log()?)?.exp()
}
/// Broadcasting version of `pow`.
pub fn broadcast_pow(&self, rhs: &Tensor) -> Result<Self> {
rhs.broadcast_mul(&self.log()?)?.exp()
}
}
macro_rules! bin_trait {
($trait:ident, $fn1:ident, $mul:expr, $add:expr) => {
impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<B> for Tensor {
type Output = Result<Tensor>;
fn $fn1(self, rhs: B) -> Self::Output {
Tensor::$fn1(&self, rhs.borrow())
}
}
impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<B> for &Tensor {
type Output = Result<Tensor>;
fn $fn1(self, rhs: B) -> Self::Output {
Tensor::$fn1(&self, rhs.borrow())
}
}
impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Tensor> for Result<B> {
type Output = Result<Tensor>;
fn $fn1(self, rhs: Tensor) -> Self::Output {
Tensor::$fn1(self?.borrow(), &rhs)
}
}
impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<&Tensor> for Result<B> {
type Output = Result<Tensor>;
fn $fn1(self, rhs: &Tensor) -> Self::Output {
Tensor::$fn1(self?.borrow(), rhs)
}
}
impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Result<B>> for Tensor {
type Output = Result<Tensor>;
fn $fn1(self, rhs: Result<B>) -> Self::Output {
Tensor::$fn1(&self, rhs?.borrow())
}
}
impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Result<B>> for &Tensor {
type Output = Result<Tensor>;
fn $fn1(self, rhs: Result<B>) -> Self::Output {
Tensor::$fn1(&self, rhs?.borrow())
}
}
impl std::ops::$trait<f64> for Tensor {
type Output = Result<Tensor>;
fn $fn1(self, rhs: f64) -> Self::Output {
self.affine($mul(rhs), $add(rhs))
}
}
impl std::ops::$trait<f64> for &Tensor {
type Output = Result<Tensor>;
fn $fn1(self, rhs: f64) -> Self::Output {
self.affine($mul(rhs), $add(rhs))
}
}
};
}
bin_trait!(Add, add, |_| 1., |v| v);
bin_trait!(Sub, sub, |_| 1., |v: f64| -v);
bin_trait!(Mul, mul, |v| v, |_| 0.);
bin_trait!(Div, div, |v| 1. / v, |_| 0.);
impl std::ops::Add<Tensor> for f64 {
type Output = Result<Tensor>;
fn add(self, rhs: Tensor) -> Self::Output {
rhs + self
}
}
impl std::ops::Add<&Tensor> for f64 {
type Output = Result<Tensor>;
fn add(self, rhs: &Tensor) -> Self::Output {
rhs + self
}
}
impl std::ops::Mul<Tensor> for f64 {
type Output = Result<Tensor>;
fn mul(self, rhs: Tensor) -> Self::Output {
rhs * self
}
}
impl std::ops::Mul<&Tensor> for f64 {
type Output = Result<Tensor>;
fn mul(self, rhs: &Tensor) -> Self::Output {
rhs * self
}
}
impl std::ops::Sub<Tensor> for f64 {
type Output = Result<Tensor>;
fn sub(self, rhs: Tensor) -> Self::Output {
rhs.affine(-1., self)
}
}
impl std::ops::Sub<&Tensor> for f64 {
type Output = Result<Tensor>;
fn sub(self, rhs: &Tensor) -> Self::Output {
rhs.affine(-1., self)
}
}
impl std::ops::Div<Tensor> for f64 {
type Output = Result<Tensor>;
#[allow(clippy::suspicious_arithmetic_impl)]
fn div(self, rhs: Tensor) -> Self::Output {
rhs.recip()? * self
}
}
impl std::ops::Div<&Tensor> for f64 {
type Output = Result<Tensor>;
#[allow(clippy::suspicious_arithmetic_impl)]
fn div(self, rhs: &Tensor) -> Self::Output {
rhs.recip()? * self
}
}
| 6 |
0 | hf_public_repos/candle/candle-core | hf_public_repos/candle/candle-core/src/sort.rs | use crate::{Result, Tensor};
use rayon::prelude::*;
#[derive(Debug, Clone, Copy)]
struct ArgSort {
asc: bool,
last_dim: usize,
}
impl ArgSort {
fn asort<T: crate::WithDType>(&self, vs: &[T], layout: &crate::Layout) -> Vec<u32> {
#[allow(clippy::uninit_vec)]
// Safety: indexes are set later in the parallelized section.
let mut sort_indexes = unsafe {
let el_count = layout.shape().elem_count();
let mut v = Vec::with_capacity(el_count);
v.set_len(el_count);
v
};
if self.asc {
sort_indexes
.par_chunks_exact_mut(self.last_dim)
.zip(vs.par_chunks_exact(self.last_dim))
.for_each(|(indexes, vs)| {
indexes
.iter_mut()
.enumerate()
.for_each(|(i, v)| *v = i as u32);
indexes.sort_by(|&i, &j| {
vs[i as usize]
.partial_cmp(&vs[j as usize])
.unwrap_or(std::cmp::Ordering::Greater)
})
});
} else {
sort_indexes
.par_chunks_exact_mut(self.last_dim)
.zip(vs.par_chunks_exact(self.last_dim))
.for_each(|(indexes, vs)| {
indexes
.iter_mut()
.enumerate()
.for_each(|(i, v)| *v = i as u32);
indexes.sort_by(|&j, &i| {
vs[i as usize]
.partial_cmp(&vs[j as usize])
.unwrap_or(std::cmp::Ordering::Greater)
})
});
}
sort_indexes
}
}
impl crate::CustomOp1 for ArgSort {
fn name(&self) -> &'static str {
"argsort"
}
fn cpu_fwd(
&self,
storage: &crate::CpuStorage,
layout: &crate::Layout,
) -> Result<(crate::CpuStorage, crate::Shape)> {
let sort_indexes = match storage {
crate::CpuStorage::U8(vs) => self.asort(vs, layout),
crate::CpuStorage::U32(vs) => self.asort(vs, layout),
crate::CpuStorage::I64(vs) => self.asort(vs, layout),
crate::CpuStorage::BF16(vs) => self.asort(vs, layout),
crate::CpuStorage::F16(vs) => self.asort(vs, layout),
crate::CpuStorage::F32(vs) => self.asort(vs, layout),
crate::CpuStorage::F64(vs) => self.asort(vs, layout),
};
let sort_indexes = crate::CpuStorage::U32(sort_indexes);
Ok((sort_indexes, layout.shape().into()))
}
#[cfg(feature = "cuda")]
fn cuda_fwd(
&self,
storage: &crate::CudaStorage,
layout: &crate::Layout,
) -> Result<(crate::CudaStorage, crate::Shape)> {
use crate::cuda_backend::cudarc::driver::{
CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, ValidAsZeroBits,
};
use crate::cuda_backend::{kernel_name, kernels, CudaStorageSlice as S, Map1Any, WrapErr};
use crate::{CudaDevice, WithDType};
impl Map1Any for ArgSort {
fn f<T: DeviceRepr + WithDType + ValidAsZeroBits, W: Fn(CudaSlice<T>) -> S>(
&self,
src: &CudaSlice<T>,
dev: &CudaDevice,
layout: &crate::Layout,
_wrap: W,
) -> Result<S> {
let slice = match layout.contiguous_offsets() {
None => crate::bail!("input has to be contiguous"),
Some((o1, o2)) => src.slice(o1..o2),
};
let elem_count = layout.shape().elem_count();
let dst = unsafe { dev.alloc::<u32>(elem_count) }.w()?;
let func = if self.asc {
dev.get_or_load_func(&kernel_name::<T>("asort_asc"), kernels::SORT)?
} else {
dev.get_or_load_func(&kernel_name::<T>("asort_desc"), kernels::SORT)?
};
let ncols = self.last_dim;
let nrows = elem_count / ncols;
let ncols_pad = next_power_of_2(ncols);
let params = (&slice, &dst, ncols as i32, ncols_pad as i32);
let cfg = LaunchConfig {
grid_dim: (1, nrows as u32, 1),
block_dim: (ncols_pad as u32, 1, 1),
shared_mem_bytes: (ncols_pad * std::mem::size_of::<u32>()) as u32,
};
unsafe { func.launch(cfg, params) }.w()?;
Ok(S::U32(dst))
}
}
use crate::backend::BackendStorage;
let dev = storage.device();
let slice = self.map(&storage.slice, dev, layout)?;
let dst = crate::cuda_backend::CudaStorage {
slice,
device: dev.clone(),
};
Ok((dst, layout.shape().clone()))
}
#[cfg(feature = "metal")]
fn metal_fwd(
&self,
storage: &crate::MetalStorage,
layout: &crate::Layout,
) -> Result<(crate::MetalStorage, crate::Shape)> {
use crate::backend::BackendStorage;
use crate::DType;
let name = {
if self.asc {
match storage.dtype() {
DType::BF16 => "asort_asc_bf16",
DType::F16 => "asort_asc_f16",
DType::F32 => "asort_asc_f32",
DType::F64 => "asort_asc_f64",
DType::U8 => "asort_asc_u8",
DType::U32 => "asort_asc_u32",
DType::I64 => "asort_asc_i64",
}
} else {
match storage.dtype() {
DType::BF16 => "asort_desc_bf16",
DType::F16 => "asort_desc_f16",
DType::F32 => "asort_desc_f32",
DType::F64 => "asort_desc_f64",
DType::U8 => "asort_desc_u8",
DType::U32 => "asort_desc_u32",
DType::I64 => "asort_desc_i64",
}
}
};
let device = storage.device();
let kernels = device.kernels();
let command_buffer = device.command_buffer()?;
let el = layout.shape().elem_count();
let ncols = self.last_dim;
let nrows = el / ncols;
let src = crate::metal_backend::buffer_o(storage.buffer(), layout, storage.dtype());
let dst = device.new_buffer(el, DType::U32, "asort")?;
let mut ncols_pad = 1;
while ncols_pad < ncols {
ncols_pad *= 2;
}
candle_metal_kernels::call_arg_sort(
device.metal_device(),
&command_buffer,
kernels,
name,
nrows,
ncols,
ncols_pad,
src,
&dst,
)
.map_err(crate::Error::wrap)?;
let dst = crate::MetalStorage::new(dst, device.clone(), el, DType::U32);
Ok((dst, layout.shape().clone()))
}
}
#[allow(unused)]
fn next_power_of_2(x: usize) -> usize {
let mut n = 1;
while n < x {
n *= 2
}
n
}
impl Tensor {
/// Returns the indices that sort the tensor along the last dimension.
///
/// If `asc` is `true`, sorting is in ascending order. Otherwise sorting is performed in
/// descending order. The sort is unstable so there is no guarantees on the final order when it
/// comes to ties.
pub fn arg_sort_last_dim(&self, asc: bool) -> Result<Tensor> {
if !self.is_contiguous() {
return Err(crate::Error::RequiresContiguous {
op: "arg_sort_last_dim",
});
}
let last_dim = match self.dims().last() {
None => crate::bail!("empty last-dim in arg-sort"),
Some(last_dim) => *last_dim,
};
// No need for a backward pass for arg sort.
self.apply_op1_no_bwd(&ArgSort { asc, last_dim })
}
/// Sorts the tensor along the last dimension, returns the sorted tensor together with the
/// sorted indexes.
///
/// If `asc` is `true`, sorting is in ascending order. Otherwise sorting is performed in
/// descending order. The sort is unstable so there is no guarantees on the final order when it
/// comes to ties.
pub fn sort_last_dim(&self, asc: bool) -> Result<(Tensor, Tensor)> {
if !self.is_contiguous() {
return Err(crate::Error::RequiresContiguous {
op: "sort_last_dim",
});
}
let asort = self.arg_sort_last_dim(asc)?;
let sorted = self.gather(&asort, crate::D::Minus1)?;
Ok((sorted, asort))
}
}
| 7 |
0 | hf_public_repos/candle/candle-core | hf_public_repos/candle/candle-core/src/backprop.rs | //! Methods for backpropagation of gradients.
use crate::op::{BinaryOp, Op, ReduceOp, UnaryOp};
use crate::{Error, Result, Tensor, TensorId};
use std::collections::HashMap;
// arg has been reduced to node via reduce_dims, expand it back to arg.
// This has to handle keepdims.
fn broadcast_back(arg: &Tensor, node: &Tensor, reduced_dims: &[usize]) -> Result<Tensor> {
if arg.rank() == node.rank() {
// keepdim = true
node.broadcast_as(arg.shape())
} else {
// keepdim = false
// first expand the reduced dims.
node.reshape(reduced_dims)?.broadcast_as(arg.shape())
}
}
thread_local! {
static CANDLE_GRAD_DO_NOT_DETACH: bool = {
match std::env::var("CANDLE_GRAD_DO_NOT_DETACH") {
Ok(s) => {
!s.is_empty() && s != "0"
},
Err(_) => false,
}
}
}
impl Tensor {
/// Return all the nodes that lead to this value in a topologically sorted vec, the first
/// elements having dependencies on the latter ones, e.g. the first element if any is the
/// argument.
/// This assumes that the op graph is a DAG.
fn sorted_nodes(&self) -> Vec<&Tensor> {
// The vec of sorted nodes is passed as an owned value rather than a mutable reference
// to get around some lifetime limitations.
fn walk<'a>(
node: &'a Tensor,
nodes: Vec<&'a Tensor>,
already_seen: &mut HashMap<TensorId, bool>,
) -> (bool, Vec<&'a Tensor>) {
if let Some(&tg) = already_seen.get(&node.id()) {
return (tg, nodes);
}
let mut track_grad = false;
let mut nodes = if node.is_variable() {
// Do not call recursively on the "leaf" nodes.
track_grad = true;
nodes
} else if node.dtype().is_int() {
nodes
} else if let Some(op) = node.op() {
match op {
Op::IndexAdd(t1, t2, t3, _)
| Op::ScatterAdd(t1, t2, t3, _)
| Op::CustomOp3(t1, t2, t3, _)
| Op::WhereCond(t1, t2, t3) => {
let (tg, nodes) = walk(t1, nodes, already_seen);
track_grad |= tg;
let (tg, nodes) = walk(t2, nodes, already_seen);
track_grad |= tg;
let (tg, nodes) = walk(t3, nodes, already_seen);
track_grad |= tg;
nodes
}
Op::Conv1D {
arg: lhs,
kernel: rhs,
..
}
| Op::ConvTranspose1D {
arg: lhs,
kernel: rhs,
..
}
| Op::Conv2D {
arg: lhs,
kernel: rhs,
..
}
| Op::ConvTranspose2D {
arg: lhs,
kernel: rhs,
..
}
| Op::CustomOp2(lhs, rhs, _)
| Op::Binary(lhs, rhs, _)
| Op::Gather(lhs, rhs, _)
| Op::IndexSelect(lhs, rhs, _)
| Op::Matmul(lhs, rhs)
| Op::SliceScatter0(lhs, rhs, _) => {
let (tg, nodes) = walk(lhs, nodes, already_seen);
track_grad |= tg;
let (tg, nodes) = walk(rhs, nodes, already_seen);
track_grad |= tg;
nodes
}
Op::Cat(args, _) => args.iter().fold(nodes, |nodes, arg| {
let (tg, nodes) = walk(arg, nodes, already_seen);
track_grad |= tg;
nodes
}),
Op::Affine { arg, mul, .. } => {
if *mul == 0. {
nodes
} else {
let (tg, nodes) = walk(arg, nodes, already_seen);
track_grad |= tg;
nodes
}
}
Op::Unary(_node, UnaryOp::Ceil)
| Op::Unary(_node, UnaryOp::Floor)
| Op::Unary(_node, UnaryOp::Round)
| Op::Unary(_node, UnaryOp::Sign) => nodes,
Op::Reshape(node)
| Op::UpsampleNearest1D { arg: node, .. }
| Op::UpsampleNearest2D { arg: node, .. }
| Op::AvgPool2D { arg: node, .. }
| Op::MaxPool2D { arg: node, .. }
| Op::Copy(node)
| Op::Broadcast(node)
| Op::Cmp(node, _)
| Op::Reduce(node, ReduceOp::Min | ReduceOp::Sum | ReduceOp::Max, _)
| Op::ToDevice(node)
| Op::Transpose(node, _, _)
| Op::Permute(node, _)
| Op::Narrow(node, _, _, _)
| Op::Unary(node, _)
| Op::Elu(node, _)
| Op::Powf(node, _)
| Op::CustomOp1(node, _) => {
let (tg, nodes) = walk(node, nodes, already_seen);
track_grad |= tg;
nodes
}
Op::ToDType(node) => {
if node.dtype().is_float() {
let (tg, nodes) = walk(node, nodes, already_seen);
track_grad |= tg;
nodes
} else {
nodes
}
}
Op::Reduce(_, ReduceOp::ArgMin | ReduceOp::ArgMax, _) => nodes,
}
} else {
nodes
};
already_seen.insert(node.id(), track_grad);
if track_grad {
nodes.push(node);
}
(track_grad, nodes)
}
let (_tg, mut nodes) = walk(self, vec![], &mut HashMap::new());
nodes.reverse();
nodes
}
pub fn backward(&self) -> Result<GradStore> {
let sorted_nodes = self.sorted_nodes();
let mut grads = GradStore::new();
grads.insert(self, self.ones_like()?.contiguous()?);
for node in sorted_nodes.iter() {
if node.is_variable() {
continue;
}
let grad = grads
.remove(node)
.expect("candle internal error - grad not populated");
// https://github.com/huggingface/candle/issues/1241
// Ideally, we would make these operations in place where possible to ensure that we
// do not have to allocate too often. Here we just call `.detach` to avoid computing
// the backprop graph of the backprop itself. This would be an issue for second order
// derivatives but these are out of scope at the moment.
let do_not_detach = CANDLE_GRAD_DO_NOT_DETACH.with(|b| *b);
let grad = if do_not_detach { grad } else { grad.detach() };
if let Some(op) = node.op() {
match op {
Op::Binary(lhs, rhs, BinaryOp::Add) => {
let lhs_sum_grad = grads.or_insert(lhs)?;
*lhs_sum_grad = lhs_sum_grad.add(&grad)?;
let rhs_sum_grad = grads.or_insert(rhs)?;
*rhs_sum_grad = rhs_sum_grad.add(&grad)?;
}
Op::Binary(lhs, rhs, BinaryOp::Sub) => {
let lhs_sum_grad = grads.or_insert(lhs)?;
*lhs_sum_grad = lhs_sum_grad.add(&grad)?;
let rhs_sum_grad = grads.or_insert(rhs)?;
*rhs_sum_grad = rhs_sum_grad.sub(&grad)?;
}
Op::Binary(lhs, rhs, BinaryOp::Mul) => {
let lhs_grad = grad.mul(rhs)?;
let lhs_sum_grad = grads.or_insert(lhs)?;
*lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?;
let rhs_grad = grad.mul(lhs)?;
let rhs_sum_grad = grads.or_insert(rhs)?;
*rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?;
}
Op::Binary(lhs, rhs, BinaryOp::Div) => {
let lhs_grad = grad.div(rhs)?;
let lhs_sum_grad = grads.or_insert(lhs)?;
*lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?;
let rhs_grad = grad.mul(lhs)?.div(&rhs.sqr()?)?;
let rhs_sum_grad = grads.or_insert(rhs)?;
*rhs_sum_grad = rhs_sum_grad.sub(&rhs_grad)?;
}
Op::Binary(lhs, rhs, BinaryOp::Minimum)
| Op::Binary(lhs, rhs, BinaryOp::Maximum) => {
let mask_lhs = node.eq(lhs)?.to_dtype(grad.dtype())?;
let mask_rhs = node.eq(rhs)?.to_dtype(grad.dtype())?;
// If both masks are 1 one the same point, we want to scale the
// gradient by 0.5 rather than 1.
let lhs_grad = mask_lhs.mul(&grad)?.div(&(&mask_rhs + 1.)?)?;
let lhs_sum_grad = grads.or_insert(lhs)?;
*lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?;
let rhs_grad = mask_rhs.mul(&grad)?.div(&(&mask_lhs + 1.)?)?;
let rhs_sum_grad = grads.or_insert(rhs)?;
*rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?;
}
Op::WhereCond(pred, t, f) => {
let zeros = grad.zeros_like()?;
let t_sum_grad = grads.or_insert(t)?;
let t_grad = pred.where_cond(&grad, &zeros)?;
*t_sum_grad = t_sum_grad.add(&t_grad)?;
let f_sum_grad = grads.or_insert(f)?;
let f_grad = pred.where_cond(&zeros, &grad)?;
*f_sum_grad = f_sum_grad.add(&f_grad)?;
}
Op::Conv1D {
arg,
kernel,
padding,
stride,
dilation,
} => {
// The output height for conv_transpose1d is:
// (l_in - 1) * stride - 2 * padding + dilation * (k_size - 1) + out_padding + 1
let grad_l_in = grad.dim(2)?;
let k_size = kernel.dim(2)?;
let out_size =
(grad_l_in - 1) * stride + dilation * (k_size - 1) + 1 - 2 * padding;
let out_padding = arg.dim(2)? - out_size;
let grad_arg = grad.conv_transpose1d(
kernel,
*padding,
out_padding,
*stride,
*dilation,
/* groups */ 1,
)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad_arg)?;
let grad_kernel = arg
.transpose(0, 1)?
.conv1d(&grad.transpose(0, 1)?, *padding, *dilation, *stride, 1)?
.transpose(0, 1)?;
let sum_grad = grads.or_insert(kernel)?;
let (_, _, k0) = kernel.dims3()?;
let (_, _, g_k0) = grad_kernel.dims3()?;
let grad_kernel = if g_k0 != k0 {
grad_kernel.narrow(2, 0, k0)?
} else {
grad_kernel
};
*sum_grad = sum_grad.add(&grad_kernel)?;
}
Op::Conv2D {
arg,
kernel,
padding,
stride,
dilation,
} => {
// The output height for conv_transpose2d is:
// (i_h - 1) * stride - 2 * padding + dilation * (k_h - 1) + out_padding + 1
let grad_h = grad.dim(2)?;
let k_h = kernel.dim(2)?;
let out_size =
(grad_h - 1) * stride + dilation * (k_h - 1) + 1 - 2 * padding;
let out_padding = arg.dim(2)? - out_size;
let grad_arg = grad.conv_transpose2d(
kernel,
*padding,
out_padding,
*stride,
*dilation,
)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad_arg)?;
let grad_kernel = arg
.transpose(0, 1)?
.conv2d(&grad.transpose(0, 1)?, *padding, *dilation, *stride, 1)?
.transpose(0, 1)?;
let sum_grad = grads.or_insert(kernel)?;
let (_, _, k0, k1) = kernel.dims4()?;
let (_, _, g_k0, g_k1) = grad_kernel.dims4()?;
let grad_kernel = if g_k0 != k0 || g_k1 != k1 {
grad_kernel.narrow(2, 0, k0)?.narrow(3, 0, k1)?
} else {
grad_kernel
};
*sum_grad = sum_grad.add(&grad_kernel)?;
}
Op::ConvTranspose1D { .. } => Err(Error::BackwardNotSupported {
op: "conv-transpose1d",
})?,
Op::ConvTranspose2D {
arg,
kernel,
padding,
stride,
dilation,
output_padding: _output_padding,
} => {
let grad_arg = grad.conv2d(kernel, *padding, *stride, *dilation, 1)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad_arg)?;
let grad_kernel = grad
.transpose(0, 1)?
.conv2d(&arg.transpose(0, 1)?, *padding, *dilation, *stride, 1)?
.transpose(0, 1)?;
let sum_grad = grads.or_insert(kernel)?;
let (_, _, k0, k1) = kernel.dims4()?;
let (_, _, g_k0, g_k1) = grad_kernel.dims4()?;
let grad_kernel = if g_k0 != k0 || g_k1 != k1 {
grad_kernel.narrow(2, 0, k0)?.narrow(3, 0, k1)?
} else {
grad_kernel
};
*sum_grad = sum_grad.add(&grad_kernel)?;
}
Op::AvgPool2D {
arg,
kernel_size,
stride,
} => {
if kernel_size != stride {
crate::bail!("backward not supported for avgpool2d if ksize {kernel_size:?} != stride {stride:?}")
}
let (_n, _c, h, w) = arg.dims4()?;
let grad_arg = grad.upsample_nearest2d(h, w)?;
let grad_arg =
(grad_arg * (1f64 / (kernel_size.0 * kernel_size.1) as f64))?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad_arg)?;
}
Op::MaxPool2D {
arg,
kernel_size,
stride,
} => {
if kernel_size != stride {
crate::bail!("backward not supported for maxpool2d if ksize {kernel_size:?} != stride {stride:?}")
}
let (_n, _c, h, w) = arg.dims4()?;
// For computing the max-pool gradient, we compute a mask where a 1 means
// that the element is the maximum, then we apply this mask to the
// upsampled gradient (taking into account that multiple max may exist so
// we scale the gradient for this case).
let node_upsampled = node.upsample_nearest2d(h, w)?;
let mask = arg.eq(&node_upsampled)?.to_dtype(arg.dtype())?;
let avg = mask.avg_pool2d_with_stride(*kernel_size, *stride)?;
let grad_arg = ((grad * avg)?.upsample_nearest2d(h, w)? * mask)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad_arg)?;
}
Op::UpsampleNearest1D { arg, target_size } => {
let (_n, c, size) = arg.dims3()?;
if target_size % size != 0 {
crate::bail!("backward not supported for non integer upscaling factors")
}
let scale = target_size / size;
let kernel = Tensor::ones((c, 1, scale), arg.dtype(), arg.device())?;
let conv_sum = grad.conv1d(&kernel, 0, scale, 1, c)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = conv_sum;
}
Op::UpsampleNearest2D {
arg,
target_h,
target_w,
} => {
let (_n, c, h, w) = arg.dims4()?;
if target_h % h != 0 || target_w % w != 0 {
crate::bail!("backward not supported for non integer upscaling factors")
}
let scale_h = target_h / h;
let scale_w = target_w / w;
if scale_h != scale_w {
crate::bail!("backward not supported for non uniform upscaling factors")
};
let kernel =
Tensor::ones((c, 1, scale_h, scale_w), arg.dtype(), arg.device())?;
let conv_sum = grad.conv2d(&kernel, 0, scale_h, 1, c)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = conv_sum;
}
Op::SliceScatter0(lhs, rhs, start_rhs) => {
let rhs_sum_grad = grads.or_insert(rhs)?;
let rhs_grad = grad.narrow(0, *start_rhs, rhs.dim(0)?)?;
*rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?;
let lhs_sum_grad = grads.or_insert(lhs)?;
let lhs_grad = grad.slice_scatter0(&rhs.zeros_like()?, *start_rhs)?;
*lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?
}
Op::Gather(arg, indexes, dim) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.scatter_add(indexes, &grad, *dim)?;
}
Op::ScatterAdd(init, indexes, src, dim) => {
let init_sum_grad = grads.or_insert(init)?;
*init_sum_grad = init_sum_grad.add(&grad)?;
let src_grad = grad.gather(indexes, *dim)?;
let src_sum_grad = grads.or_insert(src)?;
*src_sum_grad = src_sum_grad.add(&src_grad)?;
}
Op::IndexAdd(init, indexes, src, dim) => {
let init_sum_grad = grads.or_insert(init)?;
*init_sum_grad = init_sum_grad.add(&grad)?;
let src_grad = grad.index_select(indexes, *dim)?;
let src_sum_grad = grads.or_insert(src)?;
*src_sum_grad = src_sum_grad.add(&src_grad)?;
}
Op::IndexSelect(arg, indexes, dim) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.index_add(indexes, &grad, *dim)?;
}
Op::Matmul(lhs, rhs) => {
// Skipping checks, the op went ok, we can skip
// the matmul size checks for now.
let lhs_grad = grad.matmul(&rhs.t()?)?;
let lhs_sum_grad = grads.or_insert(lhs)?;
*lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?;
let rhs_grad = lhs.t()?.matmul(&grad)?;
let rhs_sum_grad = grads.or_insert(rhs)?;
*rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?;
}
Op::Cat(args, dim) => {
let mut start_idx = 0;
for arg in args {
let len = arg.dims()[*dim];
let arg_grad = grad.narrow(*dim, start_idx, len)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?;
start_idx += len;
}
}
Op::Broadcast(arg) => {
let arg_dims = arg.dims();
let node_dims = node.dims();
// The number of dims that have been inserted on the left.
let left_dims = node_dims.len() - arg_dims.len();
let mut sum_dims: Vec<usize> = (0..left_dims).collect();
for (dim, (node_dim, arg_dim)) in node_dims[left_dims..]
.iter()
.zip(arg_dims.iter())
.enumerate()
{
if node_dim != arg_dim {
sum_dims.push(dim + left_dims)
}
}
let mut arg_grad = grad.sum_keepdim(sum_dims.as_slice())?;
for _i in 0..left_dims {
arg_grad = arg_grad.squeeze(0)?
}
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad.broadcast_as(sum_grad.dims())?)?;
}
Op::Reduce(arg, ReduceOp::Sum, reduced_dims) => {
let grad = broadcast_back(arg, &grad, reduced_dims)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad)?;
}
Op::Reduce(arg, ReduceOp::Max, reduced_dims) => {
let node = broadcast_back(arg, node, reduced_dims)?;
let grad = broadcast_back(arg, &grad, reduced_dims)?;
let grad = node.eq(arg)?.to_dtype(grad.dtype())?.mul(&grad)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad.broadcast_as(sum_grad.dims())?)?;
}
Op::Reduce(arg, ReduceOp::Min, reduced_dims) => {
let node = broadcast_back(arg, node, reduced_dims)?;
let grad = broadcast_back(arg, &grad, reduced_dims)?;
let grad = node.eq(arg)?.to_dtype(grad.dtype())?.mul(&grad)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad.broadcast_as(sum_grad.dims())?)?;
}
Op::ToDType(arg) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad.to_dtype(arg.dtype())?)?
}
Op::Copy(arg) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&grad)?
}
Op::Affine { arg, mul, .. } => {
let arg_grad = grad.affine(*mul, 0.)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::Unary(arg, UnaryOp::Log) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&(grad / arg)?)?
}
Op::Unary(arg, UnaryOp::Sin) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&(&grad * arg.cos())?)?
}
Op::Unary(arg, UnaryOp::Cos) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.sub(&(&grad * arg.sin())?)?
}
Op::Unary(arg, UnaryOp::Tanh) => {
let sum_grad = grads.or_insert(arg)?;
let minus_dtanh = (node.sqr()? - 1.)?;
*sum_grad = sum_grad.sub(&(&grad * &minus_dtanh)?)?
}
Op::Unary(arg, UnaryOp::Abs) => {
let sum_grad = grads.or_insert(arg)?;
let ones = arg.ones_like()?;
let abs_grad = arg.ge(&arg.zeros_like()?)?.where_cond(&ones, &ones.neg()?);
*sum_grad = sum_grad.add(&(&grad * abs_grad)?)?
}
Op::Unary(arg, UnaryOp::Exp) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&(&grad * *node)?)?
}
Op::Unary(arg, UnaryOp::Neg) => {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.sub(&grad)?
}
Op::Unary(arg, UnaryOp::Recip) => {
let sum_grad = grads.or_insert(arg)?;
let grad = (grad / arg.sqr()?)?;
*sum_grad = sum_grad.sub(&grad)?
}
&Op::Narrow(ref arg, dim, start_idx, len) => {
let arg_dims = arg.dims();
let left_pad = if start_idx == 0 {
None
} else {
let mut dims = arg_dims.to_vec();
dims[dim] = start_idx;
Some(Tensor::zeros(dims, grad.dtype(), grad.device())?)
};
let right_pad = arg_dims[dim] - start_idx - len;
let right_pad = if right_pad == 0 {
None
} else {
let mut dims = arg_dims.to_vec();
dims[dim] = right_pad;
Some(Tensor::zeros(dims, grad.dtype(), grad.device())?)
};
let arg_grad = match (left_pad, right_pad) {
(None, None) => grad,
(Some(l), None) => Tensor::cat(&[&l, &grad], dim)?,
(None, Some(r)) => Tensor::cat(&[&grad, &r], dim)?,
(Some(l), Some(r)) => Tensor::cat(&[&l, &grad, &r], dim)?,
};
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::Unary(_, UnaryOp::Floor)
| Op::Unary(_, UnaryOp::Round)
| Op::Reduce(_, ReduceOp::ArgMin, _)
| Op::Reduce(_, ReduceOp::ArgMax, _)
| Op::Unary(_, UnaryOp::Sign)
| Op::Cmp(_, _) => {}
Op::Reshape(arg) => {
let arg_grad = grad.reshape(arg.dims())?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::Unary(_, UnaryOp::Ceil) => Err(Error::BackwardNotSupported { op: "ceil" })?,
Op::Unary(arg, UnaryOp::Gelu) => {
let sum_grad = grads.or_insert(arg)?;
let cube = arg.powf(3.)?;
let tanh = (0.0356774 * &cube + (0.797885 * arg)?)?.tanh()?;
let gelu_grad = (((0.5 * &tanh)?
+ (0.0535161 * cube + (0.398942 * arg)?)? * (1. - tanh.powf(2.)?))?
+ 0.5)?;
*sum_grad = sum_grad.add(&(&grad * gelu_grad)?)?
}
Op::Unary(arg, UnaryOp::Erf) => {
let sum_grad = grads.or_insert(arg)?;
// d/dx erf(x) = 2/sqrt(pi) * e^(-x^2)
let erf_grad =
(2. / std::f64::consts::PI.sqrt()) * (arg.sqr()?.neg()?).exp()?;
*sum_grad = sum_grad.add(&(&grad * erf_grad)?)?
}
Op::Unary(arg, UnaryOp::GeluErf) => {
let sum_grad = grads.or_insert(arg)?;
// d/dx gelu_erf(x) = 0.5 + 0.398942 e^(-x^2/2) x + 0.5 erf(x/sqrt(2))
let neg_half_square = (arg.sqr()?.neg()? / 2.)?;
let scaled_exp_arg = (0.398942 * neg_half_square.exp()? * arg)?;
let arg_scaled_sqrt = (arg / 2f64.sqrt())?;
let erf_scaled_sqrt = (0.5 * arg_scaled_sqrt.erf()?)?;
let gelu_erf_grad = (0.5 + scaled_exp_arg + erf_scaled_sqrt)?;
*sum_grad = sum_grad.add(&(&grad * gelu_erf_grad)?)?;
}
Op::Unary(arg, UnaryOp::Relu) => {
let sum_grad = grads.or_insert(arg)?;
let relu_grad = arg.ge(&arg.zeros_like()?)?.to_dtype(arg.dtype())?;
*sum_grad = sum_grad.add(&(&grad * relu_grad)?)?
}
Op::Unary(arg, UnaryOp::Silu) => {
let sum_grad = grads.or_insert(arg)?;
// d/dx silu = sigmoid(x) * (1 + x * (1 - sigmoid(x))) = sigmoid(x) * (1 - node) + node
let sigmoid_arg = (arg.neg()?.exp()? + 1.)?.recip()?;
let silu_grad = &sigmoid_arg * (1. - *node) + *node;
*sum_grad = sum_grad.add(&(&grad * silu_grad)?)?
}
Op::Elu(arg, alpha) => {
// d/dx elu(x) = 1 for x > 0, alpha * e^x for x <= 0
let sum_grad = grads.or_insert(arg)?;
let zeros = arg.zeros_like()?;
let positive_mask = arg.gt(&zeros)?.to_dtype(arg.dtype())?;
let negative_mask = arg.le(&zeros)?.to_dtype(arg.dtype())?;
// node == alpha * (e^x - 1) for x <= 0, reuse it
let negative_exp_mask = (negative_mask * (*node + *alpha))?;
let combined_mask = (positive_mask + negative_exp_mask)?;
*sum_grad = sum_grad.add(&(grad * combined_mask)?)?
}
Op::Powf(arg, e) => {
let arg_grad = (&(grad * arg.powf(e - 1.)?)? * *e)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::CustomOp1(arg, c) => {
if let Some(arg_grad) = c.bwd(arg, node, &grad)? {
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
}
Op::CustomOp2(arg1, arg2, c) => {
let (arg_grad1, arg_grad2) = c.bwd(arg1, arg2, node, &grad)?;
if let Some(arg_grad1) = arg_grad1 {
let sum_grad = grads.or_insert(arg1)?;
*sum_grad = sum_grad.add(&arg_grad1)?
}
if let Some(arg_grad2) = arg_grad2 {
let sum_grad = grads.or_insert(arg2)?;
*sum_grad = sum_grad.add(&arg_grad2)?
}
}
Op::CustomOp3(arg1, arg2, arg3, c) => {
let (arg_grad1, arg_grad2, arg_grad3) =
c.bwd(arg1, arg2, arg3, node, &grad)?;
if let Some(arg_grad1) = arg_grad1 {
let sum_grad = grads.or_insert(arg1)?;
*sum_grad = sum_grad.add(&arg_grad1)?
}
if let Some(arg_grad2) = arg_grad2 {
let sum_grad = grads.or_insert(arg2)?;
*sum_grad = sum_grad.add(&arg_grad2)?
}
if let Some(arg_grad3) = arg_grad3 {
let sum_grad = grads.or_insert(arg3)?;
*sum_grad = sum_grad.add(&arg_grad3)?
}
}
Op::Unary(arg, UnaryOp::Sqr) => {
let arg_grad = arg.mul(&grad)?.affine(2., 0.)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::Unary(arg, UnaryOp::Sqrt) => {
let arg_grad = grad.div(node)?.affine(0.5, 0.)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::ToDevice(arg) => {
let sum_grad = grads.or_insert(arg)?;
let arg_grad = grad.to_device(sum_grad.device())?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::Transpose(arg, dim1, dim2) => {
let arg_grad = grad.transpose(*dim1, *dim2)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
Op::Permute(arg, dims) => {
let mut inv_dims = vec![0; dims.len()];
for (i, &dim_idx) in dims.iter().enumerate() {
inv_dims[dim_idx] = i
}
let arg_grad = grad.permute(inv_dims)?;
let sum_grad = grads.or_insert(arg)?;
*sum_grad = sum_grad.add(&arg_grad)?
}
};
}
}
Ok(grads)
}
}
/// A store for gradients, associating a tensor id to the corresponding gradient tensor, used for back propagation.
#[derive(Debug)]
pub struct GradStore(HashMap<TensorId, Tensor>);
impl GradStore {
/// Create a new gradient store
fn new() -> Self {
GradStore(HashMap::new())
}
/// Get the gradient tensor corresponding to the given tensor id
pub fn get_id(&self, id: TensorId) -> Option<&Tensor> {
self.0.get(&id)
}
/// Get the gradient tensor associated with the given tensor
pub fn get(&self, tensor: &Tensor) -> Option<&Tensor> {
self.0.get(&tensor.id())
}
/// Remove the gradient tensor associated with the given tensor, returning it if it exists
pub fn remove(&mut self, tensor: &Tensor) -> Option<Tensor> {
self.0.remove(&tensor.id())
}
/// Insert a gradient tensor associated with the given tensor, returning the previous gradient tensor if it existed
pub fn insert(&mut self, tensor: &Tensor, grad: Tensor) -> Option<Tensor> {
self.0.insert(tensor.id(), grad)
}
/// Get the gradient tensor associated with the given tensor, or, if it does not exist,
/// insert a tensor of zeroes, with the same shape and type as the given tensors and return it
fn or_insert(&mut self, tensor: &Tensor) -> Result<&mut Tensor> {
use std::collections::hash_map::Entry;
let grad = match self.0.entry(tensor.id()) {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => {
let grad = tensor.zeros_like()?;
entry.insert(grad)
}
};
Ok(grad)
}
/// Get the tensor ids of the stored gradient tensors
pub fn get_ids(&self) -> impl Iterator<Item = &TensorId> {
self.0.keys()
}
}
| 8 |
0 | hf_public_repos/candle/candle-core | hf_public_repos/candle/candle-core/src/convert.rs | //! Implement conversion traits for tensors
use crate::{DType, Device, Error, Tensor, WithDType};
use half::{bf16, f16, slice::HalfFloatSliceExt};
use std::convert::TryFrom;
impl<T: WithDType> TryFrom<&Tensor> for Vec<T> {
type Error = Error;
fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> {
tensor.to_vec1::<T>()
}
}
impl<T: WithDType> TryFrom<&Tensor> for Vec<Vec<T>> {
type Error = Error;
fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> {
tensor.to_vec2::<T>()
}
}
impl<T: WithDType> TryFrom<&Tensor> for Vec<Vec<Vec<T>>> {
type Error = Error;
fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> {
tensor.to_vec3::<T>()
}
}
impl<T: WithDType> TryFrom<Tensor> for Vec<T> {
type Error = Error;
fn try_from(tensor: Tensor) -> Result<Self, Self::Error> {
Vec::<T>::try_from(&tensor)
}
}
impl<T: WithDType> TryFrom<Tensor> for Vec<Vec<T>> {
type Error = Error;
fn try_from(tensor: Tensor) -> Result<Self, Self::Error> {
Vec::<Vec<T>>::try_from(&tensor)
}
}
impl<T: WithDType> TryFrom<Tensor> for Vec<Vec<Vec<T>>> {
type Error = Error;
fn try_from(tensor: Tensor) -> Result<Self, Self::Error> {
Vec::<Vec<Vec<T>>>::try_from(&tensor)
}
}
impl<T: WithDType> TryFrom<&[T]> for Tensor {
type Error = Error;
fn try_from(v: &[T]) -> Result<Self, Self::Error> {
Tensor::from_slice(v, v.len(), &Device::Cpu)
}
}
impl<T: WithDType> TryFrom<Vec<T>> for Tensor {
type Error = Error;
fn try_from(v: Vec<T>) -> Result<Self, Self::Error> {
let len = v.len();
Tensor::from_vec(v, len, &Device::Cpu)
}
}
macro_rules! from_tensor {
($typ:ident) => {
impl TryFrom<&Tensor> for $typ {
type Error = Error;
fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> {
tensor.to_scalar::<$typ>()
}
}
impl TryFrom<Tensor> for $typ {
type Error = Error;
fn try_from(tensor: Tensor) -> Result<Self, Self::Error> {
$typ::try_from(&tensor)
}
}
impl TryFrom<$typ> for Tensor {
type Error = Error;
fn try_from(v: $typ) -> Result<Self, Self::Error> {
Tensor::new(v, &Device::Cpu)
}
}
};
}
from_tensor!(f64);
from_tensor!(f32);
from_tensor!(f16);
from_tensor!(bf16);
from_tensor!(i64);
from_tensor!(u32);
from_tensor!(u8);
impl Tensor {
pub fn write_bytes<W: std::io::Write>(&self, f: &mut W) -> crate::Result<()> {
use byteorder::{LittleEndian, WriteBytesExt};
let vs = self.flatten_all()?;
match self.dtype() {
DType::BF16 => {
let vs = vs.to_vec1::<bf16>()?;
for &v in vs.reinterpret_cast() {
f.write_u16::<LittleEndian>(v)?
}
}
DType::F16 => {
let vs = vs.to_vec1::<f16>()?;
for &v in vs.reinterpret_cast() {
f.write_u16::<LittleEndian>(v)?
}
}
DType::F32 => {
// TODO: Avoid using a buffer when data is already on the CPU.
for v in vs.to_vec1::<f32>()? {
f.write_f32::<LittleEndian>(v)?
}
}
DType::F64 => {
for v in vs.to_vec1::<f64>()? {
f.write_f64::<LittleEndian>(v)?
}
}
DType::U32 => {
for v in vs.to_vec1::<u32>()? {
f.write_u32::<LittleEndian>(v)?
}
}
DType::I64 => {
for v in vs.to_vec1::<i64>()? {
f.write_i64::<LittleEndian>(v)?
}
}
DType::U8 => {
let vs = vs.to_vec1::<u8>()?;
f.write_all(&vs)?;
}
}
Ok(())
}
}
| 9 |
0 | hf_public_repos/api-inference-community/docker_images/stanza/app | hf_public_repos/api-inference-community/docker_images/stanza/app/pipelines/__init__.py | from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.token_classification import TokenClassificationPipeline
| 0 |
0 | hf_public_repos/api-inference-community/docker_images/stanza/app | hf_public_repos/api-inference-community/docker_images/stanza/app/pipelines/token_classification.py | import os
from typing import Any, Dict, List
import stanza
from app.pipelines import Pipeline
from stanza import Pipeline as pipeline
class TokenClassificationPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
namespace, model_name = model_id.split("/")
path = os.path.join(
os.environ.get("HUGGINGFACE_HUB_CACHE", "."), namespace, model_name
)
lang = model_name.replace("stanza-", "")
stanza.download(model_dir=path, lang=lang)
self.model = pipeline(model_dir=path, lang=lang)
def __call__(self, inputs: str) -> List[Dict[str, Any]]:
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`list`:. The object returned should be like [{"entity_group": "XXX", "word": "some word", "start": 3, "end": 6, "score": 0.82}] containing :
- "entity_group": A string representing what the entity is.
- "word": A rubstring of the original string that was detected as an entity.
- "start": the offset within `input` leading to `answer`. context[start:stop] == word
- "end": the ending offset within `input` leading to `answer`. context[start:stop] === word
- "score": A score between 0 and 1 describing how confident the model is for this entity.
"""
doc = self.model(inputs)
entities = []
if "ner_model_path" in self.model.config.keys():
for entity in doc.entities:
entity_dict = {
"entity_group": entity.type,
"word": entity.text,
"start": entity.start_char,
"end": entity.end_char,
"score": 1.0,
}
entities.append(entity_dict)
else:
for sent in doc.sentences:
for entity in sent.words:
entity_dict = {
"entity_group": entity.upos,
"word": entity.text,
"start": entity.start_char,
"end": entity.end_char,
"score": 1.0,
}
entities.append(entity_dict)
return entities
| 1 |
0 | hf_public_repos/api-inference-community/docker_images/stanza | hf_public_repos/api-inference-community/docker_images/stanza/tests/test_docker_build.py | import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
| 2 |
0 | hf_public_repos/api-inference-community/docker_images/stanza | hf_public_repos/api-inference-community/docker_images/stanza/tests/test_api.py | import os
from typing import Dict, List
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, List[Dict]] = {
"token-classification": [
{"stanfordnlp/stanza-en": "Hello, my name is John and I live in New York"},
{"stanfordnlp/stanza-tr": "Merhaba, adım Merve ve İstanbul'da yaşıyorum."},
]
}
ALL_TASKS = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"feature-extraction",
"image-classification",
"question-answering",
"sentence-similarity",
"structured-data-classification",
"speech-segmentation",
"text-to-speech",
"token-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
| 3 |
0 | hf_public_repos/api-inference-community/docker_images/stanza | hf_public_repos/api-inference-community/docker_images/stanza/tests/test_api_token_classification.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"token-classification" not in ALLOWED_TASKS,
"token-classification not implemented",
)
@parameterized_class(
("model_id", "inputs"),
[
[[model_id, lang[model_id]] for model_id in lang]
for lang in TESTABLE_MODELS["token-classification"]
],
)
class TokenClassificationTestCase(TestCase):
def setUp(self):
model_id = self.model_id[0] # changed
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "token-classification"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
inputs = self.model_id[1] # changed
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"entity_group", "word", "start", "end", "score"},
)
with TestClient(self.app) as client:
response = client.post("/", json=inputs)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"entity_group", "word", "start", "end", "score"},
)
def test_malformed_question(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
| 4 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/allennlp/requirements.txt | starlette==0.27.0
numpy==1.22.0
allennlp>=2.5.0,<3.0.0
# Even though it is not imported, it is actually required.
allennlp_models>=2.5.0,<3.0.0
api-inference-community==0.0.23
huggingface_hub==0.5.1
| 5 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/allennlp/Dockerfile | FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <[email protected]>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
RUN pip install spacy && python -m spacy download en_core_web_sm
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV ALLENNLP_CACHE_ROOT=/data
ENV NLTK_DATA=/data
ENV HOME=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
| 6 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/allennlp/prestart.sh | python app/main.py
| 7 |
0 | hf_public_repos/api-inference-community/docker_images/allennlp | hf_public_repos/api-inference-community/docker_images/allennlp/app/main.py | import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline, QuestionAnsweringPipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"question-answering": QuestionAnsweringPipeline
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
| 8 |
0 | hf_public_repos/api-inference-community/docker_images/allennlp/app | hf_public_repos/api-inference-community/docker_images/allennlp/app/pipelines/base.py | from abc import ABC, abstractmethod
from typing import Any, Optional
class Pipeline(ABC):
task: Optional[str] = None
model_id: Optional[str] = None
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
| 9 |
0 | hf_public_repos | hf_public_repos/blog/fine-video.md | ---
title: "FineVideo: behind the scenes"
thumbnail: /blog/assets/186_fine_video/thumbnail.png
authors:
- user: mfarre
- user: andito
- user: lewtun
- user: lvwerra
- user: pcuenq
- user: thomwolf
---
<center>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/finevideo/logo.png" alt="FineVideo logo" style="width: 50%;"><br>
</center>
Open video datasets are scarce and therefore slowing down the development of open-source video AI. For this reason we built [FineVideo](https://huggingface.co/spaces/HuggingFaceFV/FineVideo-Explorer), a dataset with 43k videos that span 3.4k hours and are annotated with rich descriptions, narrative details, scene splits, and QA pairs.
FineVideo contains a highly diverse collection of videos and metadata which makes it a good ingredient to train models to understand video content, train diffusion models to generate videos from a text description or train computer vision models using its structured data as input.
Wait, you haven’t seen FineVideo yet? take a look at it through the [dataset explorer page](https://huggingface.co/spaces/HuggingFaceFV/FineVideo-Explorer).
<center>
<br>
<a href="https://huggingface.co/spaces/HuggingFaceFV/FineVideo-Explorer">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/finevideo/finevideo.gif" alt="FineVideo Explorer" style="width: 60%;">
</a>
<br><br>
</center>
## Table of Contents
- [Table of Contents](#table-of-contents)
- [About this blog post](#about-this-blog-post)
- [Building the Raw dataset](#building-the-raw-dataset)
- [Filtering YouTube-Commons](#filtering-youtube-commons)
- [Downloading the videos](#downloading-the-videos)
- [Keeping dynamic content](#keeping-dynamic-content)
- [Word density filtering](#word-density-filtering)
- [Visual dynamism filtering](#visual-dynamism-filtering)
- [Video Categorization](#video-categorization)
- [Custom built Taxonomy](#custom-built-taxonomy)
- [Content annotation](#content-annotation)
- [Feedback loop taxonomy - content annotation](#feedback-loop-taxonomy---content-annotation)
- [Contributing descriptive metadata](#contributing-descriptive-metadata)
- [Long videos \& Gemini 1.5 Pro](#long-videos--gemini-15-pro)
- [Content selection](#content-selection)
- [Annotating with Gemini 1.5 Pro and Structured Output with GPT4o](#annotating-with-gemini-15-pro-and-structured-output-with-gpt4o)
- [Fine Alignment and anomaly filtering](#fine-alignment-and-anomaly-filtering)
- [Future Work](#future-work)
## About this blog post
In this blog post, we share the technical details and code involved in developing FineVideo: a journey that starts with 1.9M videos in [YouTube-Commons](https://huggingface.co/datasets/PleIAs/YouTube-Commons) and ends with 44K videos with all details annotated.
A good way to start is taking a look at the different steps of our journey. Those steps involve content filtering, annotation and output structuring.
<center>
<br>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/finevideo/dataset-creation.png" alt="Dataset Creation" style="width: 70%;">
<figcaption style="font-style: italic;">FineVideo video filtering and annotation pipeline</figcaption>
<br><br>
</center>
In the following sections we discuss each of the steps and provide references to relevant parts of the code. If you prefer to navigate the code directly, take a look at our FineVideo repository on [Github](https://github.com/mfarre/fineVideo).
First, let’s have a look how we got an initial list of YouTube videos and how we apply some first filters.
<br>
## Building the Raw dataset
Our journey starts in [YouTube-Commons](https://huggingface.co/datasets/PleIAs/YouTube-Commons): a collection of audio transcripts of videos shared on YouTube under a CC-By license. Such project was created and is currently maintained by [PleIAs](https://pleias.fr/) as part of their corpus collection projects.
<br>
### Filtering YouTube-Commons
YouTube Commons contain videos and transcripts in a diverse set of languages, our initial task is about narrowing down the content of YouTube Commons to the same language.
We filter YouTube-Commons for videos in English and at the same time we gather relevant metadata. From this initial filtering, we collect 1.9M videos, their closed captions and metadata.
Below some details on the filters and metadata fields that we keep:
**Filters**
<div style="text-align: center;margin: auto; width: 80%;">
| **Field** | **Filter value** | **Description** |
| --- | --- | --- |
| original_language | en | videos in English|
| transcription_language | en | transcripts in English |
</div>
<br>
**Metadata fields**
<div style="text-align: center;margin: auto; width: 80%;">
<details>
<summary>Click to Expand Metadata Fields</summary>
<table style="width: 100%; margin-top: 10px;">
<tr>
<th>Field</th>
<th>Description</th>
</tr>
<tr>
<td>acodec</td>
<td>audio codec</td>
</tr>
<tr>
<td>age_limit</td>
<td>YouTube age restrictions for the video</td>
</tr>
<tr>
<td>categories</td>
<td>YouTube video category</td>
</tr>
<tr>
<td>channel</td>
<td>YouTube channel</td>
</tr>
<tr>
<td>channel_follower_count</td>
<td>Number of subscribed users to the channel</td>
</tr>
<tr>
<td>channel_id</td>
<td>YouTube channel identifier</td>
</tr>
<tr>
<td>character_count</td>
<td>Number of characters in the closed caption</td>
</tr>
<tr>
<td>comment_count</td>
<td>Number of comments in YouTube</td>
</tr>
<tr>
<td>description</td>
<td>YouTube video description</td>
</tr>
<tr>
<td>duration_string</td>
<td>Video duration in hh:mm:ss format</td>
</tr>
<tr>
<td>license</td>
<td>Video License</td>
</tr>
<tr>
<td>like_count</td>
<td>Number of video likes in YouTube</td>
</tr>
<tr>
<td>resolution</td>
<td>Pixel resolution of the video in the format Width x Height</td>
</tr>
<tr>
<td>tags</td>
<td>YouTube free text tags associated with the video</td>
</tr>
<tr>
<td>text</td>
<td>Closed Caption</td>
</tr>
<tr>
<td>title</td>
<td>YouTube video title</td>
</tr>
<tr>
<td>upload_date</td>
<td>YouTube upload date</td>
</tr>
<tr>
<td>vcodec</td>
<td>Video Codec</td>
</tr>
<tr>
<td>video_id</td>
<td>YouTube video identifier</td>
</tr>
<tr>
<td>view_count</td>
<td>Number of views in YouTube</td>
</tr>
<tr>
<td>word_count</td>
<td>Number of words in the closed caption</td>
</tr>
</table>
</details>
</div>
<br>
Code for content filtering and metadata gathering available here [[link](https://github.com/mfarre/fineVideo/blob/main/rawdataset/filter-yt-commons.py)]
### Downloading the videos
Once we had a target video list with 1.9M videos, we managed to successfully download 1.8M videos (some of the videos where removed by the channel owners and some changed their permissions).
We explored two different approaches for distributed downloading.
<u><b>Option 1: Video2dataset</b></u>
video2dataset is an open-source project [[link](https://github.com/iejMac/video2dataset)] that focuses on distributed video download, transformation and packaging in different dataset formats. The project natively supports Slurm Workload Manager and therefore we could run it in our CPU cluster.
<center>
<br>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/finevideo/video2dataset_overview.png" alt="Dataset Creation" style="width: 60%;">
<figcaption style="font-style: italic;">Source: Video2Dataset GitHub page</figcaption>
<br><br>
</center>
As all our cluster instances face internet with the same public IP, we contributed to the project the possibility to specify a proxy to facilitate video downloads. While the feature is not yet merged, you can patch video2dataset with our PR [[link](https://github.com/iejMac/video2dataset/pull/350)] to use the proxy capabilities.
<br>
<u><b>Option 2: Cloud batch jobs</b></u>
<br><br>
Most cloud providers have the possibility to run jobs by simply defining the type of instance that will execute each job, defining a queue and providing a container with the code that will be executed.
We used Google Cloud and AWS to run a custom-made docker container that downloads videos and metadata with [ytdlp](https://github.com/yt-dlp/yt-dlp) and pushes the results to S3.
The files to build the Docker container can be found here [[code](https://github.com/mfarre/fineVideo/tree/main/rawdataset/ytdlps3)].
<u><b>Our conclusion</b></u>
While Video2Dataset was functional with a proxy and allowed us to do additional processing steps, the requests / second we could do to the proxy became a bottleneck. This made us pivot towards cloud batch jobs.
## Keeping dynamic content
In our search for the best videos, we narrowed down our selection to content where there is both visual action and people speaking at a mid-fast pace. We achieve this with word density filtering and visual dynamism filtering.
### Word density filtering
We took the density of words in the video as a proxy of audio dynamism. The definition of word density being:
`Word density = Number of words in closed captions / Total video length in seconds`
By sampling and visually evaluating the quality of the content at different density thresholds, we decided to remove all videos with a word density lower than 0.5 words/second.
Examples:
<div style="text-align: center;margin: auto; width: 50%;">
| **Word density** | **Example** |
| --- | --- |
| 0.25 | <iframe width="200" height="113" src="https://www.youtube.com/embed/mqAeYCSP1wA" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> |
| 0.5 | <iframe width="200" height="113" src="https://www.youtube.com/embed/eLtOfmzdU_o" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> |
| 0.75 | <iframe width="200" height="113" src="https://www.youtube.com/embed/nx9yfGgXK6s" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> |
| 1.0 | <iframe width="200" height="113" src="https://www.youtube.com/embed/7xMDfivSrkg" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> |
</div>
The code to filter by word density and explore examples can be found here [[link](https://github.com/mfarre/fineVideo/blob/main/dynamicfilters/worddensityfiltering.py)]
### Visual dynamism filtering
We repurposed FFMPEG’s [Freezedetect filter](https://ffmpeg.org/ffmpeg-filters.html#freezedetect) to judge the dynamism of the video. While this filter is designed to identify frozen sections of a video (multiple equal frames placed one after the other), we could also identify chunks with low movement by exaggerating the `noise` parameter to a very high value.
Rather than running freezedetect across the full video, we analyzed the video by temporal segments and we voted if the video was static based on the amount of segments categorized as static. Through manual evaluation we set a threshold to discard the video if 40% of the segments analyzed have low movement.
Some types of content discarded after this filtering:
<div style="text-align: center;margin: auto; width: 50%;">
| **Type** | **Example** |
| --- | --- |
| Static image with music | <iframe width="200" height="113" src="https://www.youtube.com/embed/-3PjwEGxu9w" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> |
| Presentation screen cast | <iframe width="200" height="113" src="https://www.youtube.com/embed/-72DqMfjtF8" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> |
| Highly static people talking to camera | <iframe width="200" height="113" src="https://www.youtube.com/embed/0-KRYKbg_T8" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> |
</div>
The DockerFile and code to classify video by its dynamism can be found here [[link](https://github.com/mfarre/fineVideo/tree/main/dynamicfilters/videodynamismfiltering)]
From the 1.8M videos analyzed, after this step we keep 600K dynamic videos. At this stage, we dig deeper into the content of the videos, which will be key to ensure diversity in the dataset.
## Video Categorization
In order to achieve the most diverse content selection, we categorized the 600K filtered assets using the closed captioning and YouTube metadata. As a way to gain control on the categorization, we created a taxonomy and guided the annotation process to adhere to the taxonomy.
### Custom built Taxonomy
We bootstrapped the custom built taxonomy using GPT4-o and an information scientist reviewed and adjusted it. The taxonomy contains 126 fine categories aggregated in multiple levels. This multi-level approach allow users of FineVideo to slice the dataset to fit their particular use-case.

The taxonomy is also available in JSON [[link](https://github.com/mfarre/fineVideo/blob/main/videocategorization/content_taxonomy.json)]
With an initial version of the taxonomy we started content annotation and by looking at the results of content annotation, with the help of an information scientist, we adjusted the taxonomy accordingly.
### Content annotation
We categorized the videos using Llama 3.1 70B served through Text Generation Inference [TGI](https://github.com/huggingface/text-generation-inference) [[code](https://github.com/mfarre/fineVideo/tree/main/videocategorization)].
The prompt required multiple iterations to ensure the answer is strictly a category in our taxonomy. During our prompt evaluation we learned that by removing the existing YouTube tags and categories from the prompt, the quality of our results increased drastically: YouTube metadata was biasing the text generated by Llama 3.1 towards one of the categories provided by YouTube.
```python
prompt_template = """
Given those categories: {leaves}
Classify a youtube video given its closed captioning and some metadata details. RETURN ONLY the selected category and nothing else!
Title: {title}
Description: {description}
Channel: {channel}
Closed Caption: {closed_caption}
"""
```
### Feedback loop taxonomy - content annotation
<center>
<br>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/finevideo/categorization-feedback-loop.png" alt="Categorization feedback loop" style="width: 40%;">
<figcaption style="font-style: italic;">Taxomy adjustments during content categorization</figcaption>
<br><br>
</center>
One of the roles of information scientists is to curate taxonomies over-time to add new categories or add some extra degrees of differentiation when needed.
Using LLMs to categorize content stresses the need to adjust taxonomies from months / years to hours. Furthermore, in some cases, we created categories specifically to discard sensitive videos such as the ones falling under `Firearms & Weapons` and `Substance Use & Drugs`.
## Contributing descriptive metadata
At this point of the process, we have three sources of video level metadata:
* video category (inferred with Llama 3.1)
* YouTube Metadata (title, description)
* Transcripts from YouTube-Commons
In order to contribute in the field of video understanding, we decided to go deeper into timecode-level metadata, for example activities, objects, narrative and editing aspects.
While human annotation was something we considered as part of active learning setup where one or more models propose annotations and the human does a QA step, as we will discuss in the next sections, we found in Gemini a good solution especially when we constrained the input video length and the output format.
### Long videos & Gemini 1.5 Pro
We dig deeper into Gemini 1.5 Pro iterating our prompt and testing it with different content length.
Given its limitation to 1M tokens, which is approximately equivalent to ~1hour of video, we were forced to drop videos longer than 1 hour.
An idea to overcome this situation was to accelerate videos longer than one hour and that way fit in Gemini’s context.
<center>
<br>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/finevideo/gemini-context-cartoon.png" alt="Gemini context" style="width: 80%;">
<figcaption style="font-style: italic;">Exploration: accelerating videos to fit more content in Gemini's context</figcaption>
<br><br>
</center>
While it seemed to work at high level, when we started looking at the details we realized that only the first minutes of the video were accurately annotated.
Finding that quality drops on long videos made us wonder: is this an issue impacting the rest of our videos? by sampling videos of different lengths and inspecting the video coverage of the annotations, we found a reduction in quality for videos longer than 10+ minutes.
Aligned with our goal to bring high quality data back to the community, we dropped videos longer than 10+ minutes.
### Content selection
Given that each hour of video costs more than $5 to annotate with Gemini, we can’t annotate all the videos that we have after filtering. Therefore, we wanted to make sure that we have a good coverage over all topics and we search a good compromise of content diversity for late-pre-training / fine-tuning task and budget. We set this size constraint to 4K hours of video.
In order to go from 600K videos to 4K hours of content we prepared an algorithm that balances content categories, user engagement, and channel representation to achieve the targeted duration.
<div style="display: flex; align-items: flex-start;">
<!-- Image on the left -->
<div style="flex: 1; text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/finevideo/oracle-flow.png" alt="Oracle Flow" style="max-width: 100%; height: auto;clip-path: inset(0px 0px 3px 0px);">
<p><em>Algorithm flow diagram</em></p>
</div>
<!-- Text on the right -->
<div style="flex: 1; padding-left: 20px;">
<br><br><br>
<h3>Some key parts of the content selection algorithm:</h3>
<ul>
<li><strong>Activity Score</strong>: We calculate an engagement metric for each video by combining comment, view, and like counts with weighted importance. This score helps prioritize videos that have resonated well with viewers.</li><br><br>
<li><strong>Video Selection</strong>: This step iteratively selects videos to meet the target duration while ensuring diversity. It balances between high-engagement content and representation from various categories and channels, using a penalty system to avoid overrepresentation of any single channel.</li><br><br>
<li><strong>Final Adjustment</strong>: We adjust the selection to match the target duration as closely as possible without exceeding it. It sorts the selected videos by duration and adds them to the final list until reaching the closest possible total duration to the target.</li>
</ul>
</div>
</div>
<!-- Additional text beneath the image and text -->
<div style="margin-top: 20px;">
<p>The code can be found in the repository <a href="https://github.com/mfarre/fineVideo/blob/main/contentselection/oracle.py" target="_blank">[link]</a>.</p>
</div>
<br>
### Annotating with Gemini 1.5 Pro and Structured Output with GPT4o
<u><b>Why structured data?</b></u>
One of our goals with FineVideo is to provide structured data as a way to empower our community: if you are working on MultiModal LLMs, you can slice the data and decide which categories fit your pre-training or fine-tuning mix. If you are more into computer vision, you can directly use the dataset to train classifiers based on the numerical categories included in FineVideo such as the dynamism score, scene boundaries or audio/video correlation score.
<u><b>Structured data and Gemini 1.5</u></b>
Gemini 1.5 Pro allows JSON based outputs by providing a schema. We explored this feature and we quickly realized two issues:
- We could not fit our original schema into Gemini because our schema is highly complex
- When we tried with slightly simpler schemas -still quite complex- the quality of the Gemini results dropped substantially: most of the scene types of data (characters, activities, props) dropped. We tried splitting the prompt in multiple prompts and matching the different prompts to different parts of the schema without much success.
What we observed completely matched what other researchers experienced: adding concrete schema constraints can decrease performance. ([Let Me Speak Freely? A Study on the Impact of Format Restrictions on Performance of Large Language Models](https://huggingface.co/papers/2408.02442)).
Our solution relied on generating free text with Gemini 1.5 and add a second processing step to align the results of Gemini with our schema.
The Gemini prompt that we used is the following:
```
Study the video and provide the following details about the video and the semantic scenes that compose it:
- characterList: a list of characters that appear in the whole video and a visual description that should allow me to identify them just seeing an image of them.
- scenes: a list of the scenes with the following properties:
- start/end timestamps of the scene
- list of all the characters that appear in the scene
- list of all activities and their timestamps
- list of all props and their timestamps
- list of all video editing details and their start/end timestamps. Details include transitions, effects, music as well as suggestions like segments of the scene that could be removed and why
- scene mood with notes on how the visuals, audio and context contribute to it. Use the following taxonomy returning only the name in your answer {"moods":{"Positive":[{"name":"Happy","description":"Feeling joyful, content, or delighted."},{"name":"Excited","description":"Feeling enthusiastic, energetic, or eager."},{"name":"Calm","description":"Feeling peaceful, relaxed, or serene."},{"name":"Grateful","description":"Feeling appreciative or thankful."},{"name":"Proud","description":"Feeling satisfied with one's achievements or the achievements of others."}],"Negative":[{"name":"Sad","description":"Feeling down, unhappy, or sorrowful."},{"name":"Angry","description":"Feeling irritated, frustrated, or furious."},{"name":"Anxious","description":"Feeling nervous, worried, or uneasy."},{"name":"Lonely","description":"Feeling isolated, disconnected, or abandoned."},{"name":"Bored","description":"Feeling uninterested, disengaged, or restless."}],"Neutral":[{"name":"Indifferent","description":"Feeling neither particularly positive nor negative."},{"name":"Content","description":"Feeling satisfied but not overly excited."},{"name":"Curious","description":"Feeling interested or inquisitive without strong emotion."},{"name":"Confused","description":"Feeling uncertain or unclear but without strong negative feelings."},{"name":"Pensive","description":"Feeling thoughtful or reflective without strong emotional engagement."}]}}
- specific mood changing moments inside the scene, report the timestamp and what we transition from/to in any of the dimensions (visual / auditive)
- scene narrative progression and plot development
- specific narrative moments inside the scene. Report the timestamp and what happened
- character interaction and dynamics descriptions and their start/end timestamps
- specific thematic elements and descriptions
- specific relevant happenings to create deeper meanings and subtexts not explicitly stated that contribute to the richness and depth of the content, timestamp and descriptions
- dynamism score of the scene. Score between 0 and 1. 1 is highly dynamic
- audio - visual correlation score. Score between 0 and 1. 0 what we see is not correlated with the speech and 1 is highly correlated
- storylines: a list of the different storylines found and which scenes belong to it.
- Specify where is the climax (scene and timestamp) and if the content is being presented a narrative story, or is it more like a collection of facts or non-narrative information
- if there are scenes not matching storylines, explain how those scenes contribute to the video
- looking at the overall video and the storylines, which segments of the video could be trimmed to make it more dynamic?
- q&a: a list of 5 questions/answers about the video that focus on fine details (objects and or activities), overall story reasoning and mood. Focus on Q&A aspects captured on the audio and the video whenever possible difficult to get only by looking at the transcription.
```
<br>
<u><b>Adding Instructor</u></b>
Once the result was processed by Gemini, we parsed it with [Instructor](https://github.com/jxnl/instructor/): a library built on top of Pydantic to achieve structured outputs given a schema. See table with example below.
Instructor allowed us to experiment with different models to convert the free text from Gemini into the schema that we defined in Pydantic. We tried Gemini and GPT4o and we sticked with GPT4o given the higher success rate.
<div style="text-align: center;margin: auto; width: 100%; font-size: 12px;">
<table>
<thead>
<tr>
<th><strong>Video</strong></th>
<th><strong>Gemini Output</strong></th>
<th><strong>Instructor Output</strong></th>
</tr>
</thead>
<tbody>
<tr>
<td>
<iframe width="200" height="113" src="https://www.youtube.com/embed/-E9Te2S1gXY" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
</td>
<td>
<pre style="line-height: 1.0;"><code>
CharacterList:
Man Slim build, brown eyes, shaved sides, black hoodie with colorful logo, black pants.
Scenes
Scene 1
Start 0:00 End 0:55
Characters: [Man]
Activities:
Introduces bus
Describes peaceful location with cows
Props: Bus, cows, deck.
Mood:Excited, adventure.
Narrative Progression:
Introduction to bus.
Tour begins outside, highlighting nature and relaxation.
Dynamism Score 0.7
Audio-Visual Correlation 1
</code></pre>
</td>
<td>
<pre style="line-height: 1.0;"><code>
{
"title": "Bertie the Bus Tour",
"description": "Guided tour of converted bus.",
"characterList": [
{
"name": "Narrator",
"description": "Slim build, brown eyes, shaved sides, black hoodie with colorful logo, black pants."
}
],
"scenes": [
{
"sceneId": 1,
"title": "Introduction to Bus",
"timestamps": {
"start": "0:00",
"end": "0:55"
},
"cast": ["Narrator"],
"activities": [
"Narrator speaks in front of bus",
"Shows outdoor deck with chairs, cows nearby."
],
"props": ["Bus", "Deck", "Cows"],
"mood": "Excited, adventure."
}
],
"dynamismScore": 0.7,
"audioVisualCorrelation": 1
}
</code></pre>
</td>
</tr>
</tbody>
</table>
</div>
<br>
It is worth highlighting that the content filtering in Gemini dropped some videos as this is something that can happen to you if you use Gemini. In our case, given the amount of content that we were targetting, the total minutes of content that were dropped by Gemini’s filtering was negligible.
The full code to annotate video can be found here [[link](https://github.com/mfarre/fineVideo/blob/main/contentannotation/video2annotation.py)].
<br>
## Fine Alignment and anomaly filtering
With the videos annotated and the data properly aligned to our schema, we look at the temporal domain of the data and we ensure its alignment with the video: Gemini 1.5 reads video at 1 frame per second and quite frequently videos have 25 - 29 frames per second. In our Fine Alignment we make sure scene boundaries provided by Gemini 1.5 match the correct frames in the video.
We also use this temporal alignment to discard cases were Gemini stopped providing useful data and a part of the video is wrongly annotated. Notice that thanks to dropping all content longer than 10+ minutes earlier in the pipeline, the number of videos with bad quality data was negligible (lower than 0.5%).
<center>
<br>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/finevideo/fine-alignment.png" alt="Fine Alignment" style="width: 60%;">
<figcaption style="font-style: italic;">Fine metadata - video scene boundary to shot alignment as a mechanism to discard outliers</figcaption>
<br><br>
</center>
Link to video alignment code here [[link](https://github.com/mfarre/fineVideo/blob/main/finealignment/video_alignment.py)]
## Future Work
We are currently preparing the training of a multi-modal LLM trained with FineVideo, we plan to share the model weights and training recipe with the community as soon as it is completed.
We are also open to other extensions of FineVideo, speak up and tell us what you would like to see! | 0 |
0 | hf_public_repos | hf_public_repos/blog/deepspeed-to-fsdp-and-back.md | ---
title: "From DeepSpeed to FSDP and Back Again with Hugging Face Accelerate"
thumbnail: /blog/assets/deepspeed-to-fsdp-and-back/thumbnail.png
authors:
- user: mirinflim
guest: true
org: IBM
- user: aldopareja
guest: true
org: IBM
- user: muellerzr
- user: stas
guest: true
org: ContextualAI
---
# A Hugging Face Accelerate Story of Multiple Backends: FSDP and DeepSpeed
There are two popular implementations of the [ZeRO Redundancy Optimizer (Zero)](https://arxiv.org/abs/1910.02054) algorithm in the community, one from [DeepSpeed](https://github.com/microsoft/DeepSpeed) and the other from [PyTorch](https://pytorch.org/docs/stable/fsdp.html). Hugging Face [Accelerate](https://huggingface.co/docs/accelerate/en/index) exposes both these frameworks for the end users to train/tune their models. This blog highlights the differences between how these backends are exposed through Accelerate. To enable users to seamlessly switch between these backends, we [upstreamed a precision-related change](https://github.com/huggingface/accelerate/issues/2624) and a [concept guide](https://huggingface.co/docs/accelerate/concept_guides/fsdp_and_deepspeed).
## Are FSDP and DeepSpeed Interchangeable?
Recently, we tried running a training pipeline with DeepSpeed and PyTorch FSDP. We noticed that the results obtained differed. The specific model was Mistral-7B base and it was loaded in half-precision (`bfloat16`). While the DeepSpeed (blue) loss had converged well, the FSDP (orange) loss was not decreasing, as can be seen in Figure 1.

We hypothesized that the learning rate may need scaling by the number of GPUs and bumped up the learning rate by 4x since we were using 4 GPUs. Then, we saw the following loss behavior, shown in Figure 2.

It looked like the desired behavior had been achieved by scaling the FSDP learning rate by the number of GPUs! However, when we tried a different learning rate (`1e-5`) without scaling, we observed similar loss and gradient norm characteristics for both frameworks, shown in Figure 3.

## Precision Matters
Inside the `DeepSpeed` codebase, specifically in the implementation of
`DeepSpeedZeroOptimizer_Stage3` (as the name implies, what handles doing Stage 3 optimizer sharding), we noticed that the `trainable_param_groups`, the parameter groups being trained on, pass through an
internal `_setup_for_real_optimizer` function call, which calls another function called `_create_fp32_partitions`.
As the `fp32` in the name suggests, `DeepSpeed` was performing upcasting internally, and it always keeps its master weights in `fp32` by design. This upcasting to full precision meant that the optimizer could converge at learning rates that it would not converge in lower precision. The earlier observations were artifacts of this precision difference.
In FSDP, before the model and optimizer parameters are distributed across GPUs, they are first "flattened" to a one-dimensional tensor. FSDP and DeepSpeed use different `dtype`s for these "flattened" parameters which has ramifications for PyTorch optimizers. Table 1 outlines the processes for both frameworks; the "Local" column indicates the process occurring per-GPU, therefore the memory overhead from upcasting is amortized by the number of GPUs.
| **Process** | **Local?** | **Framework** | **Details** |
| ---------------------------------------------------------------------------------------- | ---------- | ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Loading the model in (such as `AutoModel.from_pretrained(..., torch_dtype=torch_dtype)`) | ❌ | | |
| Preparation, such as creation of the "flattened parameters" | ✅ | FSDP<br>DeepSpeed | utilizes `torch_dtype`<br>disregards `torch_dtype` and is created in `float32` |
| Optimizer initialization | ✅ | FSDP<br>DeepSpeed | creates parameters in `torch_dtype`<br>creates parameters in `float32` |
| Training Step (forward, backward, reduction) | ❌ | FSDP<br>DeepSpeed | follows [fsdp.MixedPrecision](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.MixedPrecision)<br>follows `deepspeed_config_file` mixed precision settings |
| Optimizer (pre-step) | ✅ | FSDP<br>DeepSpeed | upcasting (if any) to `torch_dtype`<br>upcasting everything to `float32` |
| Optimizer (actual step) | ✅ | FSDP<br>DeepSpeed | occurs in `torch_dtype`<br>occurs in `float32` |
> Table 1: Summary of how FSDP and DeepSpeed handle mixed precision
A few takeaway points:
* As noted an [🤗 Accelerate issue](https://github.com/huggingface/accelerate/issues/2624#issuecomment-2058402753), a rule of thumb when performing mixed precision is to keep trainable parameters in `float32`.
* Upcasting, as is done in `DeepSpeed`, may have a negligible effect on memory consumption when sharding over a large number of GPUs. However, when using `DeepSpeed` on a small number of GPUs, the 2x increase in memory consumption can be significant.
* The torch-native implementation of FSDP does not force upcasting, allowing a user to operate PyTorch optimizers in low precision. This offers more flexibility than the native upcasting of `DeepSpeed`.
## Harmonizing DeepSpeed and FSDP in 🤗 Accelerate
To better align DeepSpeed and FSDP in 🤗 Accelerate, we can perform upcasting automatically for FSDP when mixed precision is enabled. We created a pull request with this change that was included in the [0.30.0 release](https://github.com/huggingface/accelerate/releases/tag/v0.30.0).

The result of this PR is to allow FSDP to operate in two modes:
- A “mixed-precision” mode like the DeepSpeed counterpart
- A low precision mode for memory constrained scenarios, as shown in Figure 4.
The two new FSDP modes are summarized in Table 2 and compared with DeepSpeed.
| **Framework** | **Model Loading (`torch_dtype`)** | **Mixed Precision** | **Preparation (Local)** | **Training** | **Optimizer (Local)** |
| ------------------------- | --------------------------------- | ------------------- | ----------------------- | ------------ | --------------------- |
| FSDP (memory-constrained) | `bf16` | default (none) | `bf16` | `bf16` | `bf16` |
| FSDP (mixed precision mode) | `bf16` | `bf16` | `fp32` | `bf16` | `fp32` |
| DeepSpeed | `bf16` | `bf16` | `fp32` | `bf16` | `fp32` |
> Table 2: Summary of the two new FSDP modes and comparisons with DeepSpeed
## Throughput results
We use the [IBM Granite 7B](https://huggingface.co/ibm-granite/granite-7b-base) model (which follows the Meta Llama2 architecture) for throughput comparisons. We compare Model Flops Utilization (MFU) and tokens/sec/GPU metrics and show them for FSDP (full sharding) and DeepSpeed (Zero3).
We used four A100 GPUs as before with the following hyperparameters:
- Batch size of 8
- Model loaded in `torch.bfloat16`
- Mixed precision is the same dtype.
Table 3 shows that FSDP and DeepSpeed are expected to perform similarly.
> We intend to follow up with a comprehensive throughput comparison and approaches to improve throughput (e.g., 4D masks with packing, torch.compile, selective activation checkpointing) as large scale alignment techniques like [InstructLab](https://github.com/instructlab) and [GLAN](https://arxiv.org/abs/2402.13064) become popular.
| **Framework** | **Tokens / sec / device** | **Step time (s)** | **Model Flops Utilization (MFU)** |
| ------------------- | ------------------------- | ----------------- | --------------------------------- |
| FSDP (aligned mode) | 3158.7 | 10.4 | 0.41 |
| DeepSpeed | 3094.5 | 10.6 | 0.40 |
> Table 3: Ballpark throughput comparisons between FSDP and DeepSpeed on four A100 GPUs.
## Closing thoughts
We provided a [new concept guide](https://huggingface.co/docs/accelerate/v0.31.0/en/concept_guides/fsdp_and_deepspeed) to help users migrate between the two frameworks. The guide helps users answer questions such as:
* How do we achieve equivalent sharding strategies?
* How do we perform efficient model loading?
* How is weight prefetching managed in FSDP and DeepSpeed?
* What is the equivalent of FSDP wrapping in DeepSpeed?
We consider various modes of configuring these frameworks in 🤗 Accelerate,
- From the command line during `accelerate launch`
- From the various `Plugin` classes 🤗 Accelerate provides for (`DeepSpeed`)[https://huggingface.co/docs/accelerate/main/en/package_reference/deepspeed] and (`FSDP`)[https://huggingface.co/docs/accelerate/main/en/package_reference/fsdp]
🤗 Accelerate makes it almost **trivial** to switch between FSDP and DeepSpeed, with the majority of it being an Accelerate config file change (see the new concept guide for instructions on this).
Besides the config
change, some of the other considerations (also outlined in the guide) are differences in how checkpoints are handled, etc.
All experiments in this blog can be reproduced with the code from the [original 🤗 Accelerate issue](https://github.com/huggingface/accelerate/issues/2624).
We intend to follow up with throughput comparisons at scale and techniques to better utilize those GPUs for tuning and alignment jobs while maintaining model quality.
## Acknowledgements
This is an effort that involved several teams across multiple organizations to come together. It started at IBM Research, specifically Aldo Pareja, who found the issue, and Fabian Lim, who identified the precision gaps and fixed this issue. Zach Mueller and [Stas Bekman](https://github.com/stas00) have been phenomenal in providing feedback and the fixes to accelerate. Less Wright from the PyTorch Team at Meta was very helpful with questions on FSDP parameters. Finally, we would also like to thank the [DeepSpeed](https://www.deepspeed.ai/) team for providing feedback on this blog.
| 1 |
0 | hf_public_repos | hf_public_repos/blog/eu-ai-act-oss.md | ---
title: "AI Policy @🤗: Open ML Considerations in the EU AI Act"
thumbnail: /blog/assets/eu_ai_act_oss/thumbnailEU.png
authors:
- user: yjernite
---
# AI Policy @🤗: Open ML Considerations in the EU AI Act
Like everyone else in Machine Learning, we’ve been following the EU AI Act closely at Hugging Face.
It’s a ground-breaking piece of legislation that is poised to shape how democratic inputs interact with AI technology development around the world.
It’s also the outcome of extensive work and negotiations between organizations representing many different components of society –
a process we’re particularly sensitive to as a community-driven company.
In the present <a href="/blog/assets/eu_ai_act_oss/supporting_OS_in_the_AIAct.pdf">position paper</a> written in coalition with [Creative Commons](https://creativecommons.org/),
[Eleuther AI](https://www.eleuther.ai/), [GitHub](https://github.com/), [LAION](https://laion.ai/), and [Open Future](http://openfuture.eu/),
we aim to contribute to this process by sharing our experience of the necessary role of open ML development
in supporting the goals of the Act – and conversely, by outlining specific ways in which the regulation
can better account for the needs of open, modular, and collaborative ML development.
Hugging Face is where it is today thanks to its community of developers, so we’ve seen firsthand what open development brings to the table
to support more robust innovation for more diverse and context-specific use cases;
where developers can easily share innovative new techniques, mix and match ML components to suit their own needs,
and reliably work with full visibility into their entire stack.
We’re also acutely aware of the necessary role of transparency in supporting more accountability and inclusivity of the technology –
which we’ve worked on fostering through better documentation and accessibility of ML artifacts, education efforts,
and hosting large-scale multidisciplinary collaborations, among others.
Thus, as the EU AI Act moves toward its final phase, we believe accounting for the specific needs and strengths of open and open-source development of ML systems will be instrumental in supporting its long-term goals.
Along with our co-signed partner organizations, we make the following five recommendations to that end:
1. Define AI components clearly,
2. Clarify that collaborative development of open source AI components and making them available in public repositories does not subject developers to the requirements in the AI Act, building on and improving the Parliament text’s Recitals 12a-c and Article 2(5e),
3. Support the AI Office’s coordination and inclusive governance with the open source ecosystem, building on the Parliament’s text,
4. Ensure the R&D exception is practical and effective, by permitting limited testing in real-world conditions, combining aspects of the Council’s approach and an amended version of the Parliament’s Article 2(5d),
5. Set proportional requirements for “foundation models,” recognizing and distinctly treating different uses and development modalities, including open source approaches, tailoring the Parliament’s Article 28b.
You can find more detail and context for those in the <a href="/blog/assets/eu_ai_act_oss/supporting_OS_in_the_AIAct.pdf">full paper here!</a>
| 2 |
0 | hf_public_repos | hf_public_repos/blog/overview-quantization-transformers.md | ---
title: "Overview of natively supported quantization schemes in 🤗 Transformers"
thumbnail: /blog/assets/163_overview_quantization_transformers/thumbnail.jpg
authors:
- user: ybelkada
- user: marcsun13
- user: IlyasMoutawwakil
- user: clefourrier
- user: fxmarty
---
# Overview of natively supported quantization schemes in 🤗 Transformers
We aim to give a clear overview of the pros and cons of each quantization scheme supported in transformers to help you decide which one you should go for.
Currently, quantizing models are used for two main purposes:
- Running inference of a large model on a smaller device
- Fine-tune adapters on top of quantized models
So far, two integration efforts have been made and are **natively** supported in transformers : *bitsandbytes* and *auto-gptq*.
Note that some additional quantization schemes are also supported in the [🤗 optimum library](https://github.com/huggingface/optimum), but this is out of scope for this blogpost.
To learn more about each of the supported schemes, please have a look at one of the resources shared below. Please also have a look at the appropriate sections of the documentation.
Note also that the details shared below are only valid for `PyTorch` models, this is currently out of scope for Tensorflow and Flax/JAX models.
## Table of contents
- [Resources](#resources)
- [Comparing bitsandbytes and auto-gptq](#Comparing-bitsandbytes-and-auto-gptq)
- [Diving into speed benchmarks](#Diving-into-speed-benchmarks)
- [Conclusion and final words](#conclusion-and-final-words)
- [Acknowledgements](#acknowledgements)
## Resources
- [GPTQ blogpost](https://huggingface.co/blog/gptq-integration) – gives an overview on what is the GPTQ quantization method and how to use it.
- [bistandbytes 4-bit quantization blogpost](https://huggingface.co/blog/4bit-transformers-bitsandbytes) - This blogpost introduces 4-bit quantization and QLoRa, an efficient finetuning approach.
- [bistandbytes 8-bit quantization blogpost](https://huggingface.co/blog/hf-bitsandbytes-integration) - This blogpost explains how 8-bit quantization works with bitsandbytes.
- [Basic usage Google Colab notebook for GPTQ](https://colab.research.google.com/drive/1_TIrmuKOFhuRRiTWN94iLKUFu6ZX4ceb?usp=sharing) - This notebook shows how to quantize your transformers model with the GPTQ method, how to do inference, and how to do fine-tuning with the quantized model.
- [Basic usage Google Colab notebook for bitsandbytes](https://colab.research.google.com/drive/1ge2F1QSK8Q7h0hn3YKuBCOAS0bK8E0wf?usp=sharing) - This notebook shows how to use 4-bit models in inference with all their variants, and how to run GPT-neo-X (a 20B parameter model) on a free Google Colab instance.
- [Merve's blogpost on quantization](https://huggingface.co/blog/merve/quantization) - This blogpost provides a gentle introduction to quantization and the quantization methods supported natively in transformers.
## Comparing bitsandbytes and auto-gptq
In this section, we will go over the pros and cons of bitsandbytes and gptq quantization. Note that these are based on the feedback from the community and they can evolve over time as some of these features are in the roadmap of the respective libraries.
### What are the benefits of bitsandbytes?
**easy**: bitsandbytes still remains the easiest way to quantize any model as it does not require calibrating the quantized model with input data (also called zero-shot quantization). It is possible to quantize any model out of the box as long as it contains `torch.nn.Linear` modules. Whenever a new architecture is added in transformers, as long as they can be loaded with accelerate’s `device_map=”auto”`, users can benefit from bitsandbytes quantization straight out of the box with minimal performance degradation. Quantization is performed on model load, no need to run any post-processing or preparation step.
**cross-modality interoperability**: As the only condition to quantize a model is to contain a `torch.nn.Linear` layer, quantization works out of the box for any modality, making it possible to load models such as Whisper, ViT, Blip2, etc. in 8-bit or 4-bit out of the box.
**0 performance degradation when merging adapters**: (Read more about adapters and PEFT in [this blogpost](https://huggingface.co/blog/peft) if you are not familiar with it). If you train adapters on top of the quantized base model, the adapters can be merged on top of of the base model for deployment, with no inference performance degradation. You can also [merge](https://github.com/huggingface/peft/pull/851/files) the adapters on top of the dequantized model ! This is not supported for GPTQ.
### What are the benefits of autoGPTQ?
**fast for text generation**: GPTQ quantized models are fast compared to bitsandbytes quantized models for [text generation](https://huggingface.co/docs/transformers/main_classes/text_generation). We will address the speed comparison in an appropriate section.
**n-bit support**: The GPTQ algorithm makes it possible to quantize models up to 2 bits! However, this might come with severe quality degradation. The recommended number of bits is 4, which seems to be a great tradeoff for GPTQ at this time.
**easily-serializable**: GPTQ models support serialization for any number of bits. Loading models from TheBloke namespace: https://huggingface.co/TheBloke (look for those that end with the `-GPTQ` suffix) is supported out of the box, as long as you have the required packages installed. Bitsandbytes supports 8-bit serialization but does not support 4-bit serialization as of today.
**AMD support**: The integration should work out of the box for AMD GPUs!
### What are the potential rooms of improvements of bitsandbytes?
**slower than GPTQ for text generation**: bitsandbytes 4-bit models are slow compared to GPTQ when using [`generate`](https://huggingface.co/docs/transformers/main_classes/text_generation).
**4-bit weights are not serializable**: Currently, 4-bit models cannot be serialized. This is a frequent community request, and we believe it should be addressed very soon by the bitsandbytes maintainers as it's in their roadmap!
### What are the potential rooms of improvements of autoGPTQ?
**calibration dataset**: The need of a calibration dataset might discourage some users to go for GPTQ. Furthermore, it can take several hours to quantize the model (e.g. 4 GPU hours for a 175B scale model [according to the paper](https://arxiv.org/pdf/2210.17323.pdf) - section 2)
**works only for language models (for now)**: As of today, the API for quantizing a model with auto-GPTQ has been designed to support only language models. It should be possible to quantize non-text (or multimodal) models using the GPTQ algorithm, but the process has not been elaborated in the original paper or in the auto-gptq repository. If the community is excited about this topic this might be considered in the future.
## Diving into speed benchmarks
We decided to provide an extensive benchmark for both inference and fine-tuning adapters using bitsandbytes and auto-gptq on different hardware. The inference benchmark should give users an idea of the speed difference they might get between the different approaches we propose for inference, and the adapter fine-tuning benchmark should give a clear idea to users when it comes to deciding which approach to use when fine-tuning adapters on top of bitsandbytes and GPTQ base models.
We will use the following setup:
- bitsandbytes: 4-bit quantization with `bnb_4bit_compute_dtype=torch.float16`. Make sure to use `bitsandbytes>=0.41.1` for fast 4-bit kernels.
- auto-gptq: 4-bit quantization with exllama kernels. You will need `auto-gptq>=0.4.0` to use ex-llama kernels.
### Inference speed (forward pass only)
This benchmark measures only the prefill step, which corresponds to the forward pass during training. It was run on a single NVIDIA A100-SXM4-80GB GPU with a prompt length of 512. The model we used was `meta-llama/Llama-2-13b-hf`.
with batch size = 1:
|quantization |act_order|bits|group_size|kernel|Load time (s)|Per-token latency (ms)|Throughput (tok/s)|Peak memory (MB)|
|-----|---------|----|----------|------|-------------|----------------------|------------------|----------------|
|fp16|None |None|None |None |26.0 |36.958 |27.058 |29152.98 |
|gptq |False |4 |128 |exllama|36.2 |33.711 |29.663 |10484.34 |
|bitsandbytes|None |4|None |None |37.64 |52.00 |19.23 |11018.36 |
with batch size = 16:
|quantization |act_order|bits|group_size|kernel|Load time (s)|Per-token latency (ms)|Throughput (tok/s)|Peak memory (MB)|
|-----|---------|----|----------|------|-------------|----------------------|------------------|----------------|
|fp16|None |None|None |None |26.0 |69.94 |228.76 |53986.51 |
|gptq |False |4 |128 |exllama|36.2 |95.41 |167.68 |34777.04 |
|bitsandbytes|None |4|None |None |37.64 |113.98 |140.38 |35532.37 |
From the benchmark, we can see that bitsandbyes and GPTQ are equivalent, with GPTQ being slightly faster for large batch size. Check this [link](https://github.com/huggingface/optimum/blob/main/tests/benchmark/README.md#prefill-only-benchmark-results) to have more details on these benchmarks.
### Generate speed
The following benchmarks measure the generation speed of the model during inference. The benchmarking script can be found [here](https://gist.github.com/younesbelkada/e576c0d5047c0c3f65b10944bc4c651c) for reproducibility.
#### use_cache
Let's test `use_cache` to better understand the impact of caching the hidden state during the generation.
The benchmark was run on an A100 with a prompt length of 30 and we generated exactly 30 tokens. The model we used was `meta-llama/Llama-2-7b-hf`.
with `use_cache=True`

with `use_cache=False`

From the two benchmarks, we conclude that generation is faster when we use attention caching, as expected. Moreover, GPTQ is, in general, faster than bitsandbytes. For example, with `batch_size=4` and `use_cache=True`, it is twice as fast! Therefore let’s use `use_cache` for the next benchmarks. Note that `use_cache` will consume more memory.
#### Hardware
In the following benchmark, we will try different hardware to see the impact on the quantized model. We used a prompt length of 30 and we generated exactly 30 tokens. The model we used was `meta-llama/Llama-2-7b-hf`.
with a NVIDIA A100:

with a NVIDIA T4:

with a Titan RTX:

From the benchmark above, we can conclude that GPTQ is faster than bitsandbytes for those three GPUs.
#### Generation length
In the following benchmark, we will try different generation lengths to see their impact on the quantized model. It was run on a A100 and we used a prompt length of 30, and varied the number of generated tokens. The model we used was `meta-llama/Llama-2-7b-hf`.
with 30 tokens generated:

with 512 tokens generated:

From the benchmark above, we can conclude that GPTQ is faster than bitsandbytes independently of the generation length.
### Adapter fine-tuning (forward + backward)
It is not possible to perform pure training on a quantized model. However, you can fine-tune quantized models by leveraging parameter efficient fine tuning methods (PEFT) and train adapters on top of them. The fine-tuning method will rely on a recent method called "Low Rank Adapters" (LoRA): instead of fine-tuning the entire model you just have to fine-tune these adapters and load them properly inside the model. Let's compare the fine-tuning speed!
The benchmark was run on a NVIDIA A100 GPU and we used `meta-llama/Llama-2-7b-hf` model from the Hub. Note that for GPTQ model, we had to disable the exllama kernels as exllama is not supported for fine-tuning.

From the result, we conclude that bitsandbytes is faster than GPTQ for fine-tuning.
### Performance degradation
Quantization is great for reducing memory consumption. However, it does come with performance degradation. Let's compare the performance using the [Open-LLM leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) !
with 7b model:
| model_id | Average | ARC | Hellaswag | MMLU | TruthfulQA |
|------------------------------------|---------|-------|-----------|-------|------------|
| meta-llama/llama-2-7b-hf | **54.32** | 53.07 | 78.59 | 46.87 | 38.76 |
| meta-llama/llama-2-7b-hf-bnb-4bit | **53.4** | 53.07 | 77.74 | 43.8 | 38.98 |
| TheBloke/Llama-2-7B-GPTQ | **53.23** | 52.05 | 77.59 | 43.99 | 39.32 |
with 13b model:
| model_id | Average | ARC | Hellaswag | MMLU | TruthfulQA |
|------------------------------------|---------|-------|-----------|-------|------------|
| meta-llama/llama-2-13b-hf | **58.66** | 59.39 | 82.13 | 55.74 | 37.38 |
| TheBloke/Llama-2-13B-GPTQ (revision = 'gptq-4bit-128g-actorder_True')| **58.03** | 59.13 | 81.48 | 54.45 | 37.07 |
| TheBloke/Llama-2-13B-GPTQ | **57.56** | 57.25 | 81.66 | 54.81 | 36.56 |
| meta-llama/llama-2-13b-hf-bnb-4bit | **56.9** | 58.11 | 80.97 | 54.34 | 34.17 |
From the results above, we conclude that there is less degradation in bigger models. More interestingly, the degradation is minimal!
## Conclusion and final words
In this blogpost, we compared bitsandbytes and GPTQ quantization across multiple setups. We saw that bitsandbytes is better suited for fine-tuning while GPTQ is better for generation. From this observation, one way to get better merged models would be to:
- (1) quantize the base model using bitsandbytes (zero-shot quantization)
- (2) add and fine-tune the adapters
- (3) merge the trained adapters on top of the base model or the [dequantized model](https://github.com/huggingface/peft/pull/851/files) !
- (4) quantize the merged model using GPTQ and use it for deployment
We hope that this overview will make it easier for everyone to use LLMs in their applications and usecases, and we are looking forward to seeing what you will build with it!
## Acknowledgements
We would like to thank [Ilyas](https://huggingface.co/IlyasMoutawwakil), [Clémentine](https://huggingface.co/clefourrier) and [Felix](https://huggingface.co/fxmarty) for their help on the benchmarking.
Finally, we would like to thank [Pedro Cuenca](https://github.com/pcuenca) for his help with the writing of this blogpost.
| 3 |
0 | hf_public_repos | hf_public_repos/blog/sd3-5.md | ---
title: "Diffusers welcomes Stable Diffusion 3.5 Large"
thumbnail: /blog/assets/sd3-5/thumbnail.png
authors:
- user: YiYiXu
- user: a-r-r-o-w
- user: dn6
- user: sayakpaul
- user: linoyts
- user: multimodalart
- user: OzzyGT
- user: ariG23498
---
# 🧨 Diffusers welcomes Stable Diffusion 3.5 Large
Stable Diffusion 3.5 is the improved variant of its predecessor, [Stable Diffusion 3](https://huggingface.co/blog/sd3).
As of today, the models are available on the Hugging Face Hub and can be used with 🧨 Diffusers.
The release comes with [two checkpoints](https://huggingface.co/collections/stabilityai/stable-diffusion-35-671785cca799084f71fa2838):
- A large (8B) model
- A large (8B) timestep-distilled model enabling few-step inference
In this post, we will focus on how to use Stable Diffusion 3.5 (SD3.5) with Diffusers, covering both inference and training.
## Table Of Contents
- [Architectural changes](#architectural-changes)
- [Using SD3.5 with Diffusers](#using-sd35-with-diffusers)
- [Performing inference with quantization](#running-inference-with-quantization)
- [Training LoRAs with quantization](#training-loras-with-sd35-large-with-quantization)
- [Using single-file loading](#using-single-file-loading-with-the-stable-diffusion-35-transformer)
- [Important links](#important-links)
## Architectural changes
The transformer architecture of SD3.5 (large) is very similar to SD3 (medium), with the following changes:
- QK normalization: For training large transformer models, [QK normalization](https://research.google/blog/scaling-vision-transformers-to-22-billion-parameters/) has now become a standard, and SD3.5 Large is no exception.
- Dual attention layers: Instead of using single attention layers for each stream of modality in the MMDiT blocks, SD3.5 uses double attention layers.
The rest of the details in terms of the text encoders, VAE, and noise scheduler stay exactly the same as in SD3 Medium. For more on SD3, we recommend checking out the [original paper](https://arxiv.org/abs/2403.03206).
## Using SD3.5 with Diffusers
Make sure you install the latest version of diffusers:
```bash
pip install -U diffusers
```
As the model is gated, before using it with diffusers, you first need to go to the [Stable Diffusion 3.5 Large Hugging Face page](link), fill in the form and accept the gate.
Once you are in, you need to log in so that your system knows you’ve accepted the gate. Use the command below to log in:
```bash
huggingface-cli login
```
The following snippet will download the 8B parameter version of SD3.5 in `torch.bfloat16` precision.
This is the format used in the original checkpoint published by Stability AI, and is the recommended way to run inference.
```python
import torch
from diffusers import StableDiffusion3Pipeline
pipe = StableDiffusion3Pipeline.from_pretrained(
"stabilityai/stable-diffusion-3.5-large", torch_dtype=torch.bfloat16
).to("cuda")
image = pipe(
prompt="a photo of a cat holding a sign that says hello world",
negative_prompt="",
num_inference_steps=40,
height=1024,
width=1024,
guidance_scale=4.5,
).images[0]
image.save("sd3_hello_world.png")
```

The release also comes with a **“timestep-distilled”** model that eliminates classifier-free guidance and lets us generate images in fewer steps (typically in 4-8 steps).
```python
import torch
from diffusers import StableDiffusion3Pipeline
pipe = StableDiffusion3Pipeline.from_pretrained(
"stabilityai/stable-diffusion-3.5-large-turbo", torch_dtype=torch.bfloat16
).to("cuda")
image = pipe(
prompt="a photo of a cat holding a sign that says hello world",
num_inference_steps=4,
height=1024,
width=1024,
guidance_scale=1.0,
).images[0]
image.save("sd3_hello_world.png")
```

All the examples shown in our [SD3 blog post](https://huggingface.co/blog/sd3) and the [official Diffusers documentation](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_3) should already work with SD3.5.
In particular, both of those resources dive deep into optimizing the memory requirements to run inference.
Since SD3.5 Large is significantly larger than SD3 Medium, memory optimization becomes crucial to allow inference on consumer interfaces.
## Running inference with quantization
Diffusers natively support working with [`bitsandbytes`](https://github.com/bitsandbytes-foundation/bitsandbytes) quantization, which otimizes memory even more.
First, make sure to install all the libraries necessary:
```bash
pip install -Uq git+https://github.com/huggingface/transformers@main
pip install -Uq bitsandbytes
```
Then load the transformer in [“NF4” precision](https://huggingface.co/blog/4bit-transformers-bitsandbytes):
```python
from diffusers import BitsAndBytesConfig, SD3Transformer2DModel
import torch
model_id = "stabilityai/stable-diffusion-3.5-large"
nf4_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
model_nf4 = SD3Transformer2DModel.from_pretrained(
model_id,
subfolder="transformer",
quantization_config=nf4_config,
torch_dtype=torch.bfloat16
)
```
And now, we’re ready to run inference:
```python
from diffusers import StableDiffusion3Pipeline
pipeline = StableDiffusion3Pipeline.from_pretrained(
model_id,
transformer=model_nf4,
torch_dtype=torch.bfloat16
)
pipeline.enable_model_cpu_offload()
prompt = "A whimsical and creative image depicting a hybrid creature that is a mix of a waffle and a hippopotamus, basking in a river of melted butter amidst a breakfast-themed landscape. It features the distinctive, bulky body shape of a hippo. However, instead of the usual grey skin, the creature's body resembles a golden-brown, crispy waffle fresh off the griddle. The skin is textured with the familiar grid pattern of a waffle, each square filled with a glistening sheen of syrup. The environment combines the natural habitat of a hippo with elements of a breakfast table setting, a river of warm, melted butter, with oversized utensils or plates peeking out from the lush, pancake-like foliage in the background, a towering pepper mill standing in for a tree. As the sun rises in this fantastical world, it casts a warm, buttery glow over the scene. The creature, content in its butter river, lets out a yawn. Nearby, a flock of birds take flight"
image = pipeline(
prompt=prompt,
negative_prompt="",
num_inference_steps=28,
guidance_scale=4.5,
max_sequence_length=512,
).images[0]
image.save("whimsical.png")
```

You can control other knobs in the `BitsAndBytesConfig`. Refer to the [documentation](https://huggingface.co/docs/diffusers/main/en/quantization/bitsandbytes) for details.
It is also possible to directly load a model quantized with the same `nf4_config` as above.
This is particularly helpful for machines with low RAM. Refer to [this Colab Notebook](https://colab.research.google.com/drive/1nK5hOCPY3RoGi0yqddscGdKvo1r-rHqE?usp=sharing) for an end-to-end example.
## Training LoRAs with SD3.5 Large with quantization
Thanks to libraries like `bitsandbytes` and `peft`, it is possible to fine-tune large models like SD3.5 Large on consumer GPU cards having 24GBs of VRAM. It is already possible to leverage our existing [SD3 training script](https://huggingface.co/blog/sd3#dreambooth-and-lora-fine-tuning) for training LoRAs.
The below training command already works:
```bash
accelerate launch train_dreambooth_lora_sd3.py \
--pretrained_model_name_or_path="stabilityai/stable-diffusion-3.5-large" \
--dataset_name="Norod78/Yarn-art-style" \
--output_dir="yart_art_sd3-5_lora" \
--mixed_precision="bf16" \
--instance_prompt="Frog, yarn art style" \
--caption_column="text"\
--resolution=768 \
--train_batch_size=1 \
--gradient_accumulation_steps=1 \
--learning_rate=4e-4 \
--report_to="wandb" \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--max_train_steps=700 \
--rank=16 \
--seed="0" \
--push_to_hub
```
However, to make it work with quantization, we need to tweak a couple of knobs. Below, we provide pointers on how to do that:
- We initialize `transformer` either with a quantization config or load a quantized checkpoint directly.
- Then, we prepare it by using the `prepare_model_for_kbit_training()` from `peft`.
- The rest of the process remains the same, thanks to `peft`'s strong support for `bitsandbytes`!
Refer to [this example](https://gist.github.com/sayakpaul/05afd428bc089b47af7c016e42004527) script for a fuller example.
## Using single-file loading with the Stable Diffusion 3.5 Transformer
You can load the Stable Diffusion 3.5 Transformer model using the original checkpoint files published by Stability AI with the `from_single_file` method:
```python
import torch
from diffusers import SD3Transformer2DModel, StableDiffusion3Pipeline
transformer = SD3Transformer2DModel.from_single_file(
"https://huggingface.co/stabilityai/stable-diffusion-3.5-large-turbo/blob/main/sd3.5_large.safetensors",
torch_dtype=torch.bfloat16,
)
pipe = StableDiffusion3Pipeline.from_pretrained(
"stabilityai/stable-diffusion-3.5-large",
transformer=transformer,
torch_dtype=torch.bfloat16,
)
pipe.enable_model_cpu_offload()
image = pipe("a cat holding a sign that says hello world").images[0]
image.save("sd35.png")
```
### Important links
- Stable Diffusion 3.5 Large [collection](https://huggingface.co/collections/stabilityai/stable-diffusion-35-671785cca799084f71fa2838) on the Hub
- Official Diffusers [documentation](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_3) for Stable Diffusion 3.5
- [Colab Notebook](https://colab.research.google.com/drive/1nK5hOCPY3RoGi0yqddscGdKvo1r-rHqE?usp=sharing) to run inference with quantization
- [Training LoRAs](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/README_sd3.md)
- Stable Diffusion 3 [paper](https://arxiv.org/abs/2403.03206)
- Stable Diffusion 3 [blog post](https://huggingface.co/blog/sd3)
_Acknowledgements: [Daniel Frank](https://www.pexels.com/@fr3nks/) for the background photo used in the thumbnail of this blog post. Thanks to [Pedro Cuenca](https://huggingface.co/pcuenq) and [Tom Aarsen](https://huggingface.co/tomaarsen) for their reviews on the post draft._ | 4 |
0 | hf_public_repos | hf_public_repos/blog/1_58_llm_extreme_quantization.md | ---
title: "Fine-tuning LLMs to 1.58bit: extreme quantization made easy"
thumbnail: /blog/assets/1_58_llm_extreme_quantization/thumbnail.png
authors:
- user: medmekk
- user: marcsun13
- user: lvwerra
- user: pcuenq
- user: osanseviero
- user: thomwolf
---
# Fine-tuning LLMs to 1.58bit: extreme quantization made easy
As Large Language Models (LLMs) grow in size and complexity, finding ways to reduce their computational and energy costs has become a critical challenge. One popular solution is quantization, where the precision of parameters is reduced from the standard 16-bit floating-point (FP16) or 32-bit floating-point (FP32) to lower-bit formats like 8-bit or 4-bit. While this approach significantly cuts down on memory usage and speeds up computation, it often comes at the expense of accuracy. Reducing the precision too much can cause models to lose crucial information, resulting in degraded performance.
[BitNet](https://arxiv.org/abs/2402.17764) is a special transformers architecture that represents each parameter with only three values: `(-1, 0, 1)`, offering a extreme quantization of just 1.58 ( \\( log_2(3) \\) ) bits per parameter. However, it requires to train a model from scratch. While the results are impressive, not everybody has the budget to pre-train an LLM. To overcome this limitation, we explored a few tricks that allow fine-tuning an existing model to 1.58 bits! Keep reading to find out how !
## Table of Contents
- [TL;DR](#tldr)
- [What is BitNet in More Depth?](#what-is-bitnet-in-more-depth)
- [Pre-training Results in 1.58b](#pre-training-results-in-158b)
- [Fine-tuning in 1.58b](#fine-tuning-in-158bit)
- [Kernels used & Benchmarks](#kernels-used--benchmarks)
- [Conclusion](#conclusion)
- [Acknowledgements](#acknowledgements)
- [Additional Resources](#additional-resources)
## TL;DR
[BitNet](https://arxiv.org/abs/2402.17764) is an architecture introduced by Microsoft Research that uses extreme quantization, representing each parameter with only three values: -1, 0, and 1. This results in a model that uses just 1.58 bits per parameter, significantly reducing computational and memory requirements.
This architecture uses INT8 addition calculations when performing matrix multiplication, in contrast to LLaMA LLM's FP16 addition and multiplication operations.
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/matmulfree.png" alt="The new computation paradigm of BitNet b1.58" style="width: 100%;"/>
<figcaption>The new computation paradigm of BitNet b1.58 (source: BitNet paper https://arxiv.org/abs/2402.17764)</figcaption>
</figure>
This results in a theoretically reduced energy consumption, with BitNet b1.58 saving 71.4 times the arithmetic operations energy for matrix multiplication compared to the Llama baseline.
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/energy_consumption.png" alt="Energy consumption of BitNet b1.58 compared to LLaMA" style="width: 100%;"/>
<figcaption>Energy consumption of BitNet b1.58 compared to LLama (source: BitNet paper https://arxiv.org/abs/2402.17764)</figcaption>
</figure>
We have successfully fine-tuned a [Llama3 8B model](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) using the BitNet architecture, achieving strong performance on downstream tasks. The 8B models we developed are released under the [HF1BitLLM](https://huggingface.co/HF1BitLLM) organization. Two of these models were fine-tuned on 10B tokens with different training setup, while the third was fine-tuned on 100B tokens. Notably, our models surpass the Llama 1 7B model in MMLU benchmarks.
### How to Use with Transformers
To integrate the BitNet architecture into Transformers, we introduced a new quantization method called "bitnet" ([PR](https://github.com/huggingface/transformers/pull/33410)). This method involves replacing the standard Linear layers with specialized BitLinear layers that are compatible with the BitNet architecture, with appropriate dynamic quantization of activations, weight unpacking, and matrix multiplication.
Loading and testing the model in Transformers is incredibly straightforward, there are zero changes to the API:
```python
model = AutoModelForCausalLM.from_pretrained(
"HF1BitLLM/Llama3-8B-1.58-100B-tokens",
device_map="cuda",
torch_dtype=torch.bfloat16
)
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
input_text = "Daniel went back to the the the garden. Mary travelled to the kitchen. Sandra journeyed to the kitchen. Sandra went to the hallway. John went to the bedroom. Mary went back to the garden. Where is Mary?\nAnswer:"
input_ids = tokenizer.encode(input_text, return_tensors="pt").cuda()
output = model.generate(input_ids, max_new_tokens=10)
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
print(generated_text)
```
With this code, everything is managed seamlessly behind the scenes, so there's no need to worry about additional complexities, you just need to install the latest version of transformers.
For a quick test of the model, check out this [notebook](https://colab.research.google.com/drive/1ovmQUOtnYIdvcBkwEE4MzVL1HKfFHdNT?usp=sharing)
## What is BitNet In More Depth?
[BitNet](https://arxiv.org/abs/2402.17764) replaces traditional Linear layers in Multi-Head Attention and Feed-Forward Networks with specialized layers called BitLinear that use ternary precision (or even binary, in the initial version). The BitLinear layers we use in this project quantize the weights using ternary precision (with values of -1, 0, and 1), and we quantize the activations to 8-bit precision. We use a different implementation of BitLinear for training than we do for inference, as we'll see in the next section.
The main obstacle to training in ternary precision is that the weight values are discretized (via the `round()` function) and thus non-differentiable. BitLinear solves this with a nice trick: [STE (Straight Through Estimator)](https://arxiv.org/abs/1903.05662). The STE allows gradients to flow through the non-differentiable rounding operation by approximating its gradient as 1 (treating `round()` as equivalent to the identity function). Another way to view it is that, instead of stopping the gradient at the rounding step, the STE lets the gradient pass through as if the rounding never occurred, enabling weight updates using standard gradient-based optimization techniques.
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/bitlinear.png" alt="The architecture of BitNet with BitLinear layers" style="width: 100%"/>
<figcaption>The architecture of BitNet with BitLinear layers (source: BitNet paper https://arxiv.org/pdf/2310.11453)</figcaption>
</figure>
### Training
We train in full precision, but quantize the weights into ternary values as we go, using symmetric per tensor quantization. First, we compute the average of the absolute values of the weight matrix and use this as a scale. We then divide the weights by the scale, round the values, constrain them between -1 and 1, and finally rescale them to continue in full precision.
\\( scale_w = \frac{1}{\frac{1}{nm} \sum_{ij} |W_{ij}|} \\)
\\( W_q = \text{clamp}_{[-1,1]}(\text{round}(W*scale)) \\)
\\( W_{dequantized} = W_q*scale_w \\)
Activations are then quantized to a specified bit-width (8-bit, in our case) using absmax per token quantization (for a comprehensive introduction to quantization methods check out this [post](https://mlabonne.github.io/blog/posts/Introduction_to_Weight_Quantization.html)). This involves scaling the activations into the range `[−128, 127]` for an 8-bit bit-width. The quantization formula is:
\\( scale_x = \frac{127}{|X|_{\text{max}, \, \text{dim}=-1}} \\)
\\( X_q = \text{clamp}_{[-128,127]}(\text{round}(X*scale)) \\)
\\( X_{dequantized} = X_q * scale_x \\)
To make the formulas clearer, here are examples of weight and activation quantization using a 3x3 matrix:
---
<details>
<summary>Example 1: Weight Matrix Quantization</summary>
Let the weight matrix \( W \) be:
\\( W =
\begin{bmatrix}
0.8 & -0.5 & 1.2 \\
-1.5 & 0.4 & -0.9 \\
1.3 & -0.7 & 0.2
\end{bmatrix} \\)
**Step 1: Compute the Scale for Weights**
Using the formula:
\\( scale_w = \frac{1}{\frac{1}{nm} \sum_{ij} |W_{ij}|} \\)
we calculate the average absolute value of \( W \):
\\( \frac{1}{nm} \sum_{ij} |W_{ij}| = \frac{1}{9}(0.8 + 0.5 + 1.2 + 1.5 + 0.4 + 0.9 + 1.3 + 0.7 + 0.2) = \frac{1}{9}(7.5) = 0.8333 \\)
Now, the scale factor is:
\\( scale_w = \frac{1}{0.8333} \approx 1.2 \\)
**Step 2: Quantize the Weight Matrix**
Using the formula:
\\( W_q = \text{clamp}_{[-1, 1]}(\text{round}(W \times scale_w)) \\)
We first scale the weights by \\( scale_w \approx 1.2 \\):
\\( W \times scale_w =
\begin{bmatrix}
0.8 \times 1.2 & -0.5 \times 1.2 & 1.2 \times 1.2 \\
-1.5 \times 1.2 & 0.4 \times 1.2 & -0.9 \times 1.2 \\
1.3 \times 1.2 & -0.7 \times 1.2 & 0.2 \times 1.2
\end{bmatrix}
=
\begin{bmatrix}
0.96 & -0.6 & 1.44 \\
-1.8 & 0.48 & -1.08 \\
1.56 & -0.84 & 0.24
\end{bmatrix} \\)
Next, we round the values and clamp them to the range \\( [-1, 1] \\):
\\( W_q =
\begin{bmatrix}
1 & -1 & 1 \\
-1 & 0 & -1 \\
1 & -1 & 0
\end{bmatrix} \\)
**Step 3: Dequantize the Weights**
Finally, we dequantize the weights using:
\\( W_{dequantized} = W_q \times scale_w \\)
Substituting scale_w, we get:
\\( W_{dequantized} =
\begin{bmatrix}
1 \times 1.2 & -1 \times 1.2 & 1 \times 1.2 \\
-1 \times 1.2 & 0 \times 1.2 & -1 \times 1.2 \\
1 \times 1.2 & -1 \times 1.2 & 0 \times 1.2
\end{bmatrix}
=
\begin{bmatrix}
1.2 & -1.2 & 1.2 \\
-1.2 & 0 & -1.2 \\
1.2 & -1.2 & 0
\end{bmatrix} \\)
</details>
<details>
<summary>Example 2: Activation Matrix Quantization</summary>
Let the activation matrix \( X \) be:
\\( X =
\begin{bmatrix}
1.0 & -0.6 & 0.7 \\
-0.9 & 0.4 & -1.2 \\
0.8 & -0.5 & 0.3
\end{bmatrix} \\)
**Step 1: Compute the Scale for Activations**
For each row (or channel), compute the maximum absolute value:
- **Row 1**: Maximum absolute value = 1.0
- **Row 2**: Maximum absolute value = 1.2
- **Row 3**: Maximum absolute value = 0.8
Compute the scale factors for each row:
\\( \text{scale} = \begin{bmatrix}
\frac{127}{1.0} \\
\frac{127}{1.2} \\
\frac{127}{0.8}
\end{bmatrix}
=
\begin{bmatrix}
127 \\
105.83 \\
158.75
\end{bmatrix} \\)
**Step 2: Quantize the Activation Matrix**
Using the formula:
\\( X_q = \text{clamp}_{[-128,127]}(\text{round}(X \times \text{scale})) \\)
Scale the activations:
\\( X \times \text{scale} =
\begin{bmatrix}
1.0 \times 127 & -0.6 \times 127 & 0.7 \times 127 \\
-0.9 \times 105.83 & 0.4 \times 105.83 & -1.2 \times 105.83 \\
0.8 \times 158.75 & -0.5 \times 158.75 & 0.3 \times 158.75
\end{bmatrix}
=
\begin{bmatrix}
127 & -76.2 & 88.9 \\
-95.2 & 42.3 & -127 \\
127 & -79.4 & 47.6
\end{bmatrix} \\)
Round the values and clamp them to the range \\([-128, 127] \\):
\\( X_q =
\begin{bmatrix}
127 & -76 & 89 \\
-95 & 42 & -127 \\
127 & -79 & 48
\end{bmatrix} \\)
**Step 3: Dequantize the Activations**
Finally, dequantize the activations using:
\\( X_{dequantized} = X_q \times \frac{1}{\text{scale}} \\)
Substituting the scales:
\\( X_{dequantized} =
\begin{bmatrix}
127 \times \frac{1}{127} & -76 \times \frac{1}{127} & 89 \times \frac{1}{127} \\
-95 \times \frac{1}{105.83} & 42 \times \frac{1}{105.83} & -127 \times \frac{1}{105.83} \\
127 \times \frac{1}{158.75} & -79 \times \frac{1}{158.75} & 48 \times \frac{1}{158.75}
\end{bmatrix}
=
\begin{bmatrix}
1.0 & -0.6 & 0.7 \\
-0.9 & 0.4 & -1.2 \\
0.8 & -0.5 & 0.3
\end{bmatrix} \\)
</details>
---
We apply Layer Normalization (LN) before quantizing the activations to maintain the variance of the output:
\\( \text{LN}(x) = \frac{x - E(x)}{\sqrt{\text{Var}(x) + \epsilon}} \\)
where ϵ is a small number to prevent overflow.
The `round()` function is not differentiable, as mentioned before. We use `detach()` as a trick to implement a differentiable straight-through estimator in the backward pass:
```python
# Adapted from https://github.com/microsoft/unilm/blob/master/bitnet/The-Era-of-1-bit-LLMs__Training_Tips_Code_FAQ.pdf
import torch
import torch.nn as nn
import torch.nn.functional as F
def activation_quant(x):
scale = 127.0 / x.abs().max(dim=-1, keepdim=True).values.clamp_(min=1e-5)
y = (x * scale).round().clamp_(-128, 127) / scale
return y
def weight_quant(w):
scale = 1.0 / w.abs().mean().clamp_(min=1e-5)
u = (w * scale).round().clamp_(-1, 1) / scale
return u
class BitLinear(nn.Linear):
"""
Only for training
"""
def forward(self, x):
w = self.weight
x_norm = LN(x)
# A trick for implementing Straight−Through−Estimator (STE) using detach()
x_quant = x_norm + (activation_quant(x_norm) - x_norm).detach()
w_quant = w + (weight_quant(w) - w).detach()
# Perform quantized linear transformation
y = F.linear(x_quant, w_quant)
return y
```
### Inference
During inference, we simply quantize the weights to ternary values without rescaling. We apply the same approach to activations using 8-bit precision, then perform a matrix multiplication with an efficient kernel, followed by dividing by both the weight and activation scales. This should significantly improve inference speed, particularly with optimized hardware. You can see that the rescaling process differs during training, as matrix multiplications are kept in fp16/bf16/fp32 for proper training.
```python
# Adapted from https://github.com/microsoft/unilm/blob/master/bitnet/The-Era-of-1-bit-LLMs__Training_Tips_Code_FAQ.pdf
import torch
import torch.nn as nn
import torch.nn.functional as F
def activation_quant_inference(x):
x = LN(x)
scale = 127.0 / x.abs().max(dim=-1, keepdim=True).values.clamp_(min=1e-5)
y = (x * scale).round().clamp_(-128, 127)
return y, scale
class BitLinear(nn.Linear):
"""
Only for training
"""
def forward(self, x):
w = self.weight # weights here are already quantized to (-1, 0, 1)
w_scale = self.w_scale
x_quant, x_scale = activation_quant_inference(x)
y = efficient_kernel(x_quant, w) / w_scale / x_scale
return y
```
## Pre-training Results in 1.58b
Before attempting fine-tuning, we first tried to reproduce the results of the BitNet paper with pre-training. We started with a small dataset, [tinystories](https://huggingface.co/datasets/roneneldan/TinyStories), and a [Llama3 8B model](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct). We confirmed that adding a normalization function, like the paper does, improves performance. For example, after 2000 steps of training, we had a perplexity on the validation set equal to 6.3 without normalization, and 5.9 with normalization. Training was stable in both cases.
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/pre-training.png" alt="Pre-training plots without (blue) & with (green) layer normalisation" style="width: 100%;"/>
<figcaption>Pre-training plots without (blue) & with (orange) layer normalisation </figcaption>
</figure>
While this approach looks very interesting for pre-training, only a few institutions can afford doing it at the necessary scale. However, there is already a wide range of strong pretrained models, and it would be extremely useful if they could be converted to 1.58bit after pre-training. Other groups had reported that fine-tuning results were not as strong as those achieved with pre-training, so we set out on an investigation to see if we could make 1.58 fine-tuning work.
## Fine-tuning in 1.58bit
When we began fine-tuning from the pre-trained Llama3 8B weights, the model performed slightly better but not as well as we expected.
> **Note:** All our experiments were conducted using [Nanotron](https://github.com/huggingface/nanotron). If you're interested in trying 1.58bit pre-training or fine-tuning, you can check out this [PR](https://github.com/huggingface/nanotron/pull/180).
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/finetuning_basic.png" alt="Fine-tuning plot compared to pre-training plot" style="width: 100%;"/>
<figcaption>Fine-tuning plot compared to pre-training plot</figcaption>
</figure>
To understand why, we tried to inspect both the weight distributions of the randomly initialized model and the pre-trained model to identify potential issues.
<div style="display: flex; justify-content: center;">
<figure style="margin-right: 20px; text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/poids_aléatoires.png" alt="Random weights distribution (2 merged stds)" style="width: 400px;" />
<figcaption>Random weights distribution (2 merged stds)</figcaption>
</figure>
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/poids_llama3.png" alt="Pre-trained Llama3 weights distribution" style="width: 400px;" />
<figcaption>Pre-trained Llama3 weights distribution</figcaption>
</figure>
</div>
And the scale values for the two distributions are, respectively :
<div style="display: flex; justify-content: center;">
<figure style="margin-right: 20px; text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/scales_random.png" alt="Random weights scales distribution" style="width: 400px;" />
<figcaption>Random weights scales distribution</figcaption>
</figure>
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/scales_llama3.png" alt="Pre-trained Llama3 weights distribution" style="width: 400px;" />
<figcaption>Pre-trained Llama3 weights distribution</figcaption>
</figure>
</div>
The initial random weight distribution is a mix of two normal distributions:
- One with a standard deviation (std) of \\( 0.025 \\)
- Another with a std of \\( \frac{0.025}{\sqrt{2 \cdot \text{num\_hidden\_layers}}} = 0.00325 \\)
This results from using different stds for column linear and row linear weights in `nanotron`. In the quantized version, all matrices have only 2 weight scales (50.25 and 402), which are the inverse of the mean absolute value of the weights for each matrix: `scale = 1.0 / w.abs().mean().clamp_(min=1e-5)`
- For \\(\text{scale} = 50.25 \\), \\( w.abs().mean() = 0.0199 \\), leading to \\(\text{std} = 0.025 \\) which matches our first standard deviation. The formula used to derive the std is based on the expectation of the half-normal distribution of \\( |w| \\):
\\( \mathbb{E}(|w|) = \text{std}(w) \cdot \sqrt{\frac{2}{\pi}} \\)
- For \\( \text{scale} = 402 \\), \\( w.abs().mean() = 0.0025 \\), leading to \\(\text{std} = 0.00325 \\)
On the other hand, the pretrained weight's distribution looks like a normal distribution with an \\( \text{std} = 0.013 \\)
Clearly, the pretrained model starts with more information (scales), while the randomly initialized model starts with practically no information and adds to it over time. Our conclusion was that starting with random weights gives the model minimal initial information, enabling a gradual learning process, while during fine-tuning, the introduction of BitLinear layers overwhelms the model into losing all its prior information.
To improve the fine-tuning results, we tried different techniques. For example, instead of using per-tensor quantization, we tried per-row and per-column quantization to keep more information from the Llama 3 weights. We also tried to change the way the scale is computed: instead of just taking the mean absolute value of the weights as a scale, we take the mean absolute value of the outliers as a scale (an outlier value is a value that exceeds k*mean_absolute_value, where k is a constant we tried to vary in our experiments), but we didn’t notice big improvements.
```python
def scale_outliers(tensor, threshold_factor=1):
mean_absolute_value = torch.mean(torch.abs(tensor))
threshold = threshold_factor * mean_absolute_value
outliers = tensor[torch.abs(tensor) > threshold]
mean_outlier_value = torch.mean(torch.abs(outliers))
return mean_outlier_value
def weight_quant_scaling(w):
scale = 1.0 / scale_outliers(w).clamp_(min=1e-5)
quantized_weights = (w * scale).round().clamp_(-1, 1) / scale
return quantized_weights
```
We observed that both the random weights and the Llama 3 weights resulted in losses starting at approximately the same value of 13. This suggests that the Llama 3 model loses all of its prior information when quantization is introduced. To further investigate how much information the model loses during this process, we experimented with per-group quantization.
As a sanity check, we first set the group size to 1, which essentially means no quantization. In this scenario, the loss started at 1.45, same as we see during normal fine-tuning. However, when we increased the group size to 2, the loss jumped to around 11. This indicates that even with a minimal group size of 2, the model still loses nearly all of its information.
To address this issue, we considered the possibility of introducing quantization gradually rather than applying it abruptly to the weights and activations for each tensor. To achieve this, we implemented a lambda value to control the process :
```python
lambda_ = ?
x_quant = x + lambda_ * (activation_quant(x) - x).detach()
w_quant = w + lambda_ * (weight_quant(w) - w).detach()
```
When `lambda` is set to 0, there is essentially no quantization occurring, while at `lambda=1`, full quantization is applied.
We initially tested some discrete `lambda` values, such as 0.25, 0.5, 0.75, and 1. However, this approach did not lead to any significant improvement in results, mainly because `lambda=0.25` is already high enough for the loss to start very high.
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/lambda_0.25.png" alt="Fine-tuning plot with lambda = 0.25->0.5->0.75->1" style="width: 100%;"/>
<figcaption>Fine-tuning plot with lambda = 0.25->0.5->0.75->1</figcaption>
</figure>
As a result, we decided to experiment with a `lambda` value that adjusts dynamically based on the training step.
```python
lambda_ = training_step / total_training_steps
```
Using this dynamic `lambda` value led to better loss convergence, but the perplexity (ppl) results during inference, when `lambda` was set to 1, were still far from satisfactory. We realized this was likely because the model hadn't been trained long enough with `lambda=1`. To address this, we adjusted our `lambda` value to improve the training process.
```python
lambda_ = min(2 * training_step / total_training_steps, 1)
```
With this configuration, after 2000 steps we have :
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/lambda_training_step.png" alt="Fine-tuning plot with lambda = min(2*training_step/total_training_steps, 1)" style="width: 100%;"/>
<figcaption>Fine-tuning plot with lambda = min(2*training_step/total_training_steps, 1)</figcaption>
</figure>
Our fine-tuning method shows better convergence overall. You can observe a slight increase in the loss curve around 1,000 steps, which corresponds to when we begin approaching `lambda=1`, or full quantization. However, immediately after this point, the loss starts to converge again, leading to an improved perplexity of approximately 4.
Despite this progress, when we tested the quantized model on the WikiText dataset (instead of the tinystories one we used for fine-tuning), it showed a very high perplexity. This suggests that fine-tuning the model in low-bit mode on a specific dataset causes it to lose much of its general knowledge. This issue might arise because the minimal representations we aim for with ternary weights can vary significantly from one dataset to another. To address this problem, we scaled our training process to include the larger [FineWeb-edu](https://huggingface.co/datasets/HuggingFaceFW/fineweb) dataset. We maintained a `lambda` value of:
```python
lambda_ = min(training_step/1000, 1)
```
We chose this `lambda` value because it seemed to be a good starting point for warming up the model. We then trained the model using a learning rate of 1e-4 for 5,000 steps on the FineWeb-edu dataset. The training involved a batch size (BS) of 2 million, totaling 10 billion tokens.
Finding the right learning rate and the right decay was challenging; it seems to be a crucial factor in the model's performance.
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/fineweb-edu.png" alt="Fine-tuning plot with warmup quantization on Fineweb-edu" style="width: 100%;"/>
<figcaption>Fine-tuning plot with warmup quantization on Fineweb-edu</figcaption>
</figure>
After the fine-tuning process on Fineweb-Edu, the perplexity on the WikiText dataset reached 12.2, which is quite impressive given that we only used 10 billion tokens. The other evaluation metrics also show strong performance considering the limited amount of data (see results).
We also tried to smooth out the sharp increase when lambda approaches 1. To do this, we considered using lambda schedulers that grow exponentially at first, then level off as they get closer to 1.
```python
def scheduler(step, total_steps, k):
normalized_step = step / total_steps
return 1 - (1 - normalized_step)**k
```
for different k values, with a number of total warmup steps of 1, we have plots like the following :
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/exp_scheduler.png" alt="Exponential scheduler for different k values" style="width: 100%;"/>
<figcaption>Exponential scheduler for different k values</figcaption>
</figure>
We ran 4 experiments using the best-performing learning rate of 1e-4, testing values of k in [4, 6, 8, 10].
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/exp_scheduler_results.png" alt="Fine-tuning plots with exponential scheduler" style="width: 100%;"/>
<figcaption>Fine-tuning plots with exponential scheduler</figcaption>
</figure>
The smoothing worked well, as there's no spike like with the linear scheduler. However, the perplexity isn't great, staying around ~15, and the performance on downstream tasks is not better.
We also noticed the spike at the beginning, which the model struggled to recover from. With lambda = 0, there's essentially no quantization, so the loss starts low, around ~2. But right after the first step, there's a spike, similar to what happened with the linear scheduler (as seen in the blue plot above). So, we tried a different scheduler—a sigmoid one—that starts slowly, rises sharply to 1, and then levels off as it approaches 1.
```python
def sigmoid_scheduler(step, total_steps, k):
# Sigmoid-like curve: slow start, fast middle, slow end
normalized_step = step / total_steps
return 1 / (1 + np.exp(-k * (normalized_step - 0.5)))
```
For different k values we have the following curves :
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/sig_scheduler.png" alt="Sigmoid scheduler for different k values" style="width: 100%;"/>
<figcaption>Sigmoid scheduler for different k values</figcaption>
</figure>
We ran 5 experiments this time with k in [15, 20, 25, 40, 100] :
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/sig_scheduler_exps.png" alt="Finetuning plots with sigmoid scheduler" style="width: 100%;"/>
<figcaption>Fine-tuning plots with sigmoid scheduler</figcaption>
</figure>
The sharp increase in lambda caused instability around the 500th step and didn’t fix the first divergence issue. However, for \\( k = 100 \\), we observed some improvement in downstream tasks (see the results table), although perplexity remained around ~13.5. Despite this, it didn’t show a clear performance boost over a linear scheduler.
Additionally, we experimented with training models from scratch using random weights and various learning rates. This allowed us to compare the effectiveness of our fine-tuning approach against traditional pre-training methods.
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/exp-randoms.png" alt="Different Pre-training plots with different learning rates" style="width: 100%;"/>
<figcaption>Different Pre-training plots with different learning rates</figcaption>
</figure>
None of the models trained from random weights performed better than our fine-tuned model. The best perplexity we achieved with those models was 26, which falls short compared to the results from our fine-tuning approach.
### Scaling to 100B Tokens !
We scaled our experiments to 100 billion tokens to see if we could match the performance of Llama 3 8B. We conducted longer training runs, starting from our best-performing checkpoint from the shorter runs with the linear scheduler, and continued fine-tuning for 45,000 steps. We experimented with different learning rates, and while the model performed closely to the Llama 3 model in some metrics, on average, it still lagged behind.
Here are some examples of the metrics we evaluated at various checkpoints during the training :
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/metrics_100B.png" alt="Metrics evaluations during the training for different lrs" style="width: 100%;"/>
<figcaption>Metrics evaluations during the training for different lrs</figcaption>
</figure>
and the average score looks like :
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/metric_avg.png" alt="Average evaluation during the training for different lrs" style="width: 100%;"/>
<figcaption>Average evaluation during the training for different lrs</figcaption>
</figure>
### Experiments on Smaller Models
In our initial experiments with smaller models like SmolLM, we observed that the warmup quantization technique didn’t yield as much improvement as it did with larger models. This suggests that the effectiveness of warmup quantization could be more closely related to model size and complexity.
For example, here are the loss curves for the [SmolLM 135M](https://huggingface.co/HuggingFaceTB/SmolLM-135M) model, comparing warmup quantization with full quantization from the start. Interestingly, the curves closely align, and the resulting perplexities aren't significantly different.
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/smol_llm_exp.png" alt="Smoll LLm fine-tuning experiment with & without warmup quantization" style="width: 100%;"/>
<figcaption>Smoll LLm fine-tuning experiment with & without warmup quantization</figcaption>
</figure>
### Results & Comparison
BitNet is effective in delivering strong performance compared to baseline methods, especially at lower bit levels. According to the paper, BitNet achieves scores that are on par with 8-bit models but with significantly lower inference costs. In the case of 4-bit models, methods that only quantize weights outperform those that quantize both weights and activations, as activations are harder to quantify. However, BitNet, which uses 1.58-bit weights, surpasses both weight-only and weight-and-activation quantization methods.
The table below presents the results for various metrics after the 10B fine-tuning process of Llama3 8B. These results are compared against those from other model architectures to provide a comprehensive overview of performance (All evaluations were conducted using [Lighteval](https://github.com/huggingface/lighteval) on the [Nanotron](https://github.com/huggingface/nanotron) format model)
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/metrics_comparison_updated.png" alt="Metrics comparison with Llama models" style="width: 100%;"/>
<figcaption>Metrics comparison with Llama models : Linear means Linear lambda scheduler, and Sigmoid means Sigmoid lambda scheduler (in our case k = 100)</figcaption>
</figure>
After fine-tuning on just 10 billion tokens using ternary weights, the model demonstrates impressive performance, especially when compared to other models that underwent much more extensive training. For instance, it outperforms the Bitnet 7B model, which was trained on a significantly larger dataset of 100 billion tokens. Additionally, it performs better than the FBI LLM (Fully Binarized LLM), a model that was distilled on an even more massive 1.26 trillion tokens. This highlights the model's efficiency and effectiveness despite the relatively smaller scale of its fine-tuning process.
For the 100B tokens experiments, the best performing checkpoint we had is the following :
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/metrics_100B_table.png" alt="Metrics comparison with Llama models for the model trained on 100B tokens" style="width: 100%;"/>
<figcaption>Metrics comparison with Llama models for the model trained on 100B tokens</figcaption>
</figure>
To replicate these results, you can check out this [PR](https://github.com/huggingface/nanotron/pull/174) to convert models to nanotron format, unpack the weights (check the function [unpack_weights](https://gist.github.com/MekkCyber/78c1532e8767e8da0588b778faf61866)), and use lighteval
Note that even though the models are fine-tuned from an Instruct-tuned model, they still need to be fine-tuned using an Instruct dataset as well. These can be considered base models.
## Custom Kernels & Benchmarks
To benefit from the BitNet low-precision weights, we pack them into an `int8` tensor (this makes the number of parameters go from 8B to 2.8B!). During inference, these weights must be unpacked before performing matrix multiplication. We implemented custom kernels in Cuda and Triton to handle the on-the-fly unpacking during the matrix multiplication process. For the matrix multiplication itself, we employed the cached tiled matrix multiplication technique. To fully grasp this approach, let's first review some Cuda programming fundamentals.
### Basic GPU Concepts: Threads, Blocks, and Shared Memory
Before diving into cached tiled matrix multiplication, it's important to understand some basic GPU concepts:
- **Threads and Blocks**: GPUs execute thousands of threads simultaneously. These threads are grouped into blocks, and each block runs independently. The grid is made up of these blocks, and it represents the entire problem space. For example, in matrix multiplication, each thread might be responsible for computing a single element of the output matrix.
- **Shared Memory**: Each block has access to a limited amount of shared memory, which is much faster than global memory (the main memory on the GPU). However, shared memory is limited in size and shared among all threads within a block. Using shared memory effectively is key to improving performance in GPU programs.
### Challenges in Matrix Multiplication
A simple implementation of matrix multiplication on a GPU might involve each thread computing a single element of the result matrix by directly reading the necessary elements from global memory. However, this approach can be inefficient for the following reasons:
- **Memory Bandwidth**: Accessing global memory is relatively slow compared to the speed at which the GPU cores can perform computations. If each thread reads matrix elements directly from global memory, the memory access times can become a bottleneck.
- **Redundant Data Access**: In matrix multiplication, many elements of the input matrices are used multiple times. If each thread fetches the required data from global memory independently, the same data might be loaded into the GPU multiple times, leading to inefficiency. For example, if each thread is used to compute a single element in the output matrix, the thread responsible for calculating the element at position (i, j) will need to load the i-th row of matrix A and the j-th column of matrix B from global memory. However, other threads, such as the one computing the element at position (i+1, j), cannot reuse this data and will have to reload the same j-th column from global memory again.
### The Idea of Tiling
Tiling is a technique used to address these challenges, and it was mainly used in FlashAttention to improve the kernel's efficiency. The basic idea is to divide the matrices into smaller sub-matrices, called tiles, which can fit into the shared memory of the GPU. Instead of computing the entire output matrix in one go, the computation is broken down into smaller pieces that are processed tile by tile.
In the context of matrix multiplication, this means dividing matrices A and B into blocks (tiles), loading these tiles into shared memory, and then performing the multiplication on these smaller blocks. This approach allows the threads to reuse data stored in the fast shared memory, reducing the need to access global memory repeatedly.
Here’s how it works:
- **Loading Tiles into Shared Memory**: Each block of threads cooperatively loads a tile of matrix A and a corresponding tile of matrix B from global memory into shared memory. This operation is done once per tile, and then the tile is reused multiple times by the threads in the block.
- **Computing Partial Products**: Once the tiles are loaded into shared memory, each thread computes a partial product. Since all threads in a block are working on the same tiles in shared memory, they can efficiently reuse the data without additional global memory accesses.
- **Accumulating Results**: After computing the partial products for one tile, the threads load the next tiles from matrices A and B into shared memory and repeat the process. The results are accumulated in a register (or local memory), and once all tiles have been processed, the final value for the output matrix element is written back to global memory.
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/illustration_tiling.png" alt="Tiled Matrix multiplication illustration" style="width: 100%;"/>
<figcaption>Tiled Matrix multiplication illustration (source https://cnugteren.github.io/tutorial/pages/page4.html)</figcaption>
</figure>
**Practical Considerations**
When implementing cached tiled matrix multiplication, several factors are considered:
- **Tile Size**: The size of the tiles should be chosen to balance the trade-off between the amount of data that can fit into shared memory and the number of global memory accesses.
- **Memory Coalescing**: the global memory accesses are coalesced, which means that adjacent threads access adjacent memory locations.
- **Occupancy**: The number of threads per block and the number of blocks in the grid should be chosen to ensure high occupancy, which means having as many active warps (a warp is a set of 32 threads) as possible on the GPU to hide memory latency.
### Triton Kernel
Here is the kernel in triton we benchmarked :
```python
@triton.autotune(
configs=get_cuda_autotune_config(),
key=['M', 'N', 'K'],
)
@triton.jit
def matmul_kernel(
a_ptr, b_ptr, c_ptr,
M, N, K,
stride_am, stride_ak,
stride_bk, stride_bn,
stride_cm, stride_cn,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
):
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.int32)
for i in range(4) :
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
for j in range(0, tl.cdiv(K // 4, BLOCK_SIZE_K) ):
k = i * tl.cdiv(K // 4, BLOCK_SIZE_K) + j
# BLOCK_SIZE_K must be a divisor of K / 4
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0)
b_uint8 = tl.load(b_ptrs, mask=offs_k[:, None] < K // 4 - j * BLOCK_SIZE_K, other=0)
mask = 3<<(2*i)
b = ((b_uint8 & mask) >> (2*i))
# We accumulate the tiles along the K dimension.
tensor_full = tl.full((1,), 1, dtype=tl.int8)
accumulator += tl.dot(a, (b.to(tl.int8) - tensor_full), out_dtype=tl.int32)
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
c = accumulator
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
def matmul(a, b):
assert a.shape[1] == b.shape[0] * 4, "Incompatible dimensions, the weight matrix need to be packed"
assert a.is_contiguous(), "Matrix A must be contiguous"
M, K = a.shape
_, N = b.shape
c = torch.empty((M, N), device=a.device, dtype=torch.float16)
grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), )
matmul_kernel[grid](
a, b, c,
M, N, K,
a.stride(0), a.stride(1),
b.stride(0), b.stride(1),
c.stride(0), c.stride(1),
)
return c
```
### Code Breakdown
1. **Determining Tile Positions**
The kernel first determines which tile (block) of the output matrix each thread block is responsible for:
- `pid` is the unique identifier for each thread block, obtained using `tl.program_id(axis=0)`.
- The grid is divided into groups of thread blocks (`GROUP_SIZE_M`). Each group processes a portion of the output matrix.
- `pid_m` and `pid_n` are the coordinates of the tile in the M and N dimensions, respectively.
- Offsets (`offs_am`, `offs_bn`, `offs_k`) are calculated to determine which elements of matrices A and B each thread in the block will work on
2. **Loading and Computing Tiles**
The kernel uses a loop to iterate over the K dimension in chunks of `BLOCK_SIZE_K`. For each chunk:
- **Load Tiles**: tiles from matrices A and B are loaded from global memory.
- **Unpacking Matrix B**: The kernel assumes that matrix B is packed with `int8` values, meaning each element actually represents four smaller values packed into one byte. The unpacking happens within the loop:
- `b_uint8` is loaded from global memory as packed `int8`.
- Each packed value is unpacked to obtain the actual weight values used for computation.
- **Dot Product**: The kernel computes the dot product of the loaded tiles from A and B, accumulating the results in the `accumulator`. The `accumulator` stores the partial results for the tile of the output matrix C.
3. **Storing Results**
After all tiles along the K dimension have been processed, the final results stored in the `accumulator` are converted to `float16` and written back to the corresponding tile of matrix C in global memory. The writing process respects memory boundaries using a mask to ensure that only valid elements are written.
For a more detailed explanation of the code, checkout this [PR](https://github.com/linkedin/Liger-Kernel/pull/195/files)
### Benchmark
We benchmarked our kernel against the method of unpacking the weights using `@torch.compile` followed by performing the matmul in BF16 precision, and found that both approaches achieved approximately the same performance. To ensure accurate benchmarking, we performed the matmul operation over 2000 iterations and averaged the time taken during the last 1000 iterations, to eliminate any inefficiencies related to initial loading or compilation. Below is a graph showing the benchmark results. We also tested various matrix sizes, with the x-axis representing the number of multiplications on a log scale, and the y-axis showing the average time in ms.
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/without_bitblas.png" alt="Triton kernel compared to torch.compile" style="width: 100%;"/>
<figcaption>Triton kernel compared to torch.compile</figcaption>
</figure>
We also tried using BitBlas, which is a software library designed to perform matrix operations with mixed precision. It helps optimize these operations by allowing calculations to be done in lower precision formats like INT8, INT4, or even INT2, instead of the traditional FP32 or FP16 formats.
The benchmark results are promising, as BitBlas outperforms both our custom kernel and Torch's `matmul` function in low precision, as shown in the graph.
<figure style="text-align: center;">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/1.58llm_extreme_quantization/with_bitblas.png" alt="Bitblas benchmark" style="width: 100%;"/>
<figcaption>Bitblas benchmark</figcaption>
</figure>
However, during model loading, BitBlas needs to compile kernels tailored to the shape of the weight matrix and store them in a local database, which can increase the initial loading time.
## Conclusion
In conclusion, as LLMs continue to expand, reducing their computational demands through quantization is essential. This blog has explored the approach of 1.58-bit quantization, which uses ternary weights. While pre-training models in 1.58 bits is resource-intensive, we’ve demonstrated that, with some tricks, it’s possible to fine-tune existing models to this precision level, achieving efficient performance without sacrificing accuracy. By optimizing inference speed through specialized kernels, BitNet opens new possibilities for making LLMs more practical and scalable.
## Acknowledgements
We would like to express our sincere gratitude to Leandro von Werra, Thomas Wolf, and Marc Sun for their invaluable assistance and insights throughout this project. We also extend our thanks to Omar Sanseviero and Pedro Cuenca for their contributions in refining this blog post, helping to communicate our findings clearly and effectively to the AI community.
Furthermore, we want to acknowledge the GeneralAI team for their pioneering work on the BitNet project. Their research has been foundational to our efforts, and we are particularly grateful for the clear and precise figures provided in their paper.
## Additional Resources
1. H. Wang et al., *BitNet: Scaling 1-bit Transformers for Large Language Models*. [arxiv paper](https://arxiv.org/pdf/2310.11453)
2. S. Ma et al., *The Era of 1-bit LLMs: All Large Language Models are in 1.58 Bits*. [arxiv paper](https://arxiv.org/pdf/2402.17764)
3. S. Ma et al., *The Era of 1-bit LLMs: Training Tips, Code and FAQ*. [link](https://github.com/microsoft/unilm/blob/master/bitnet/The-Era-of-1-bit-LLMs__Training_Tips_Code_FAQ.pdf)
4. RJ. Honicky, *Are All Large Language Models Really in 1.58 Bits?*. [blogpost](https://learning-exhaust.hashnode.dev/are-all-large-language-models-really-in-158-bits)
5. L. Mao, *CUDA Matrix Multiplication Optimization*. [blogpost](https://leimao.github.io/article/CUDA-Matrix-Multiplication-Optimization/)
6. *Tutorial: OpenCL SGEMM tuning for Kepler*. [link](https://cnugteren.github.io/tutorial/pages/page4.html)
7. *CUDAMODE*. [github](https://github.com/cuda-mode), [youtube](https://www.youtube.com/channel/UCJgIbYl6C5no72a0NUAPcTA)
8. Wen-mei W. Hwu, David B. Kirk, Izzat El Hajj, *Programming Massively Parallel Processors : A Hands-on Approach* | 5 |
0 | hf_public_repos | hf_public_repos/blog/perceiver.md | ---
title: "Perceiver IO: a scalable, fully-attentional model that works on any modality"
thumbnail: /blog/assets/41_perceiver/thumbnail.png
authors:
- user: nielsr
---
# Perceiver IO: a scalable, fully-attentional model that works on any modality
### TLDR
We've added [Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver) to Transformers, the first Transformer-based neural network that works on all kinds of modalities (text, images, audio, video, point clouds,...) and combinations thereof. Take a look at the following Spaces to view some examples:
- predicting [optical flow](https://huggingface.co/spaces/nielsr/perceiver-optical-flow) between images
- [classifying images](https://huggingface.co/spaces/nielsr/perceiver-image-classification).
We also provide [several notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Perceiver).
Below, you can find a technical explanation of the model.
### Introduction
The [Transformer](https://arxiv.org/abs/1706.03762), originally introduced by
Vaswani et al. in 2017, caused a revolution in the AI community, initially improving
state-of-the-art (SOTA) results in machine translation. In 2018, [BERT](https://arxiv.org/abs/1810.04805)
was released, a Transformer encoder-only model that crushed the benchmarks of natural language
processing (NLP), most famously the [GLUE benchmark](https://gluebenchmark.com/).
Not long after that, AI researchers started to apply the idea of BERT to other domains. To name a few examples:
* [Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2) by Facebook AI illustrated that the architecture could be extended to audio
* the [Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit) by Google AI showed that the architecture works really well for vision
* most recently the [Video Vision transformer (ViViT)](https://arxiv.org/abs/2103.15691), also by Google AI, applied the architecture to video.
In all of these domains, state-of-the-art results were improved dramatically, thanks to the combination of this powerful architecture with large-scale pre-training.
However, there's an important limitation to the architecture of the Transformer: due to its [self-attention mechanism](https://jalammar.github.io/illustrated-transformer/), it scales [very poorly](https://arxiv.org/abs/2009.06732v2) in both compute and memory. In every layer, all inputs are used to produce queries and keys, for which a pairwise dot product is computed. Hence, it is not possible to apply self-attention on high-dimensional data without some form of preprocessing. Wav2Vec2, for example, solves this by employing a feature encoder to turn a raw waveform into a sequence of time-based features. The Vision Transformer (ViT) divides an image into a sequence of non-overlapping patches, which serve as "tokens". The Video Vision Transformer (ViViT) extracts non-overlapping, spatio-temporal
“tubes” from a video, which serve as "tokens". To make the Transformer work on a particular modality, one typically discretizes it to a sequence of tokens to make it work.
## The Perceiver
The [Perceiver](https://arxiv.org/abs/2103.03206) aims to solve this limitation by employing the self-attention mechanism on a set of latent variables, rather than on the inputs. The `inputs` (which could be text, image, audio, video) are only used for doing cross-attention with the latents. This has the advantage that the bulk of compute happens in a latent space, where compute is cheap (one typically uses 256 or 512 latents). The resulting architecture has no quadratic dependence on the input size: the Transformer encoder only depends linearly on the input size, while latent attention is independent of it. In a follow-up paper, called [Perceiver IO](https://arxiv.org/abs/2107.14795), the authors extend this idea to let the Perceiver also handle arbitrary outputs. The idea is similar: one only uses the outputs for doing cross-attention with the latents. Note that I'll use the terms "Perceiver" and "Perceiver IO" interchangeably to refer to the Perceiver IO model throughout this blog post.
In the following section, we look in a bit more detail at how Perceiver IO actually works by going over its implementation in [HuggingFace Transformers](https://github.com/huggingface/transformers), a popular library that initially implemented Transformer-based models for NLP, but is now starting to implement them for other domains as well. In the sections below, we explain in detail - in terms of shapes of tensors - how the Perceiver actually pre and post processes modalities of any kind.
All Perceiver variants in HuggingFace Transformers are based on the `PerceiverModel` class. To initialize a `PerceiverModel`, one can provide 3 additional instances to the model:
- a preprocessor
- a decoder
- a postprocessor.
Note that each of these are optional. A `preprocessor` is only required in case one hasn't already embedded the `inputs` (such as text, image, audio, video) themselves. A `decoder` is only required in case one wants to decode the output of the Perceiver encoder (i.e. the last hidden states of the latents) into something more useful, such as classification logits or optical flow. A `postprocessor` is only required in case one wants to turn the output of the decoder into a specific feature (this is only required when doing auto-encoding, as we will see further). An overview of the architecture is depicted below.
<img src="assets/41_perceiver/perceiver_architecture.png" width="800">
<small>The Perceiver architecture.</small>
In other words, the `inputs` (which could be any modality, or a combination thereof) are first optionally preprocessed using a `preprocessor`. Next, the preprocessed inputs perform a cross-attention operation with the latent variables of the Perceiver encoder. In this operation, the latent variables produce queries (Q), while the preprocessed inputs produce keys and values (KV). After this operation, the Perceiver encoder employs a (repeatable) block of self-attention layers to update the embeddings of the latents. The encoder will finally produce a tensor of shape (batch_size, num_latents, d_latents), containing the last hidden states of the latents. Next, there's an optional `decoder`, which can be used to decode the final hidden states of the latents into something more useful, such as classification logits. This is done by performing a cross-attention operation, in which trainable embeddings are used to produce queries (Q), while the latents are used to produce keys and values (KV). Finally, there's an optional `postprocessor`, which can be used to postprocess the decoder outputs to specific features.
Let's start off by showing how the Perceiver is implemented to work on text.
## Perceiver for text
Suppose that one wants to apply the Perceiver to perform text classification. As the memory and time requirements of the Perceiver's self-attention mechanism don't depend on the size of the inputs, one can directly provide raw UTF-8 bytes to the model. This is beneficial, as familar Transformer-based models (like [BERT](https://arxiv.org/abs/1810.04805) and [RoBERTa](https://arxiv.org/abs/1907.11692)) all employ some form of explicit tokenization, such as [WordPiece](https://research.google/pubs/pub37842/), [BPE](https://arxiv.org/abs/1508.07909) or [SentencePiece](https://arxiv.org/abs/1808.06226), which [may be harmful](https://arxiv.org/abs/2004.03720). For a fair comparison to BERT (which uses a sequence length of 512 subword tokens), the authors used input sequences of 2048 bytes. Let's say one also adds a batch dimension, then the `inputs` to the model are of shape (batch_size, 2048). The `inputs` contain the byte IDs (similar to the `input_ids` of BERT) for a single piece of text. One can use `PerceiverTokenizer` to turn a text into a sequence of byte IDs, padded up to a length of 2048:
``` python
from transformers import PerceiverTokenizer
tokenizer = PerceiverTokenizer.from_pretrained("deepmind/language-perceiver")
text = "hello world"
inputs = tokenizer(text, padding="max_length", return_tensors="pt").input_ids
```
In this case, one provides `PerceiverTextPreprocessor` as preprocessor to the model, which will take care of embedding the `inputs` (i.e. turn each byte ID into a corresponding vector), as well as adding absolute position embeddings. As decoder, one provides `PerceiverClassificationDecoder` to the model (which will turn the last hidden states of the latents into classification logits). No postprocessor is required. In other words, a Perceiver model for text classification (which is called `PerceiverForSequenceClassification` in HuggingFace Transformers) is implemented as follows:
``` python
from torch import nn
from transformers import PerceiverModel
from transformers.models.perceiver.modeling_perceiver import PerceiverTextPreprocessor, PerceiverClassificationDecoder
class PerceiverForSequenceClassification(nn.Module):
def __init__(self, config):
super().__init__(config)
self.perceiver = PerceiverModel(
config,
input_preprocessor=PerceiverTextPreprocessor(config),
decoder=PerceiverClassificationDecoder(
config,
num_channels=config.d_latents,
trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1),
use_query_residual=True,
),
)
```
One can already see here that the decoder is initialized with trainable position encoding arguments. Why is that? Well, let's take a look in detail at how Perceiver IO works. At initialization, `PerceiverModel` internally defines a set of latent variables, as follows:
``` python
from torch import nn
self.latents = nn.Parameter(torch.randn(config.num_latents, config.d_latents))
```
In the Perceiver IO paper, one uses 256 latents, and sets the dimensionality of the latents to 1280. If one also adds a batch dimension, the Perceiver has latents of shape (batch_size, 256, 1280). First, the preprocessor (which one provides at initialization) will take care of embedding the UTF-8 byte IDs to embedding vectors. Hence, `PerceiverTextPreprocessor` will turn the `inputs` of shape (batch_size, 2048) to a tensor of shape (batch_size, 2048, 768) - assuming that each byte ID is turned into a vector of size 768 (this is determined by the `d_model` attribute of `PerceiverConfig`).
After this, Perceiver IO applies cross-attention between the latents (which produce queries) of shape (batch_size, 256, 1280) and the preprocessed inputs (which produce keys and values) of shape (batch_size, 2048, 768). The output of this initial cross-attention operation is a tensor that has the same shape as the queries (which are the latents, in this case). In other words, the output of the cross-attention operation is of shape (batch_size, 256, 1280).
Next, a (repeatable) block of self-attention layers is applied to update the representations of the latents. Note that these don't depend on the length of the inputs (i.e. the bytes) one provided, as these were only used during the cross-attention operation. In the Perceiver IO paper, a single block of 26 self-attention layers (each of which has 8 attention heads) were used to update the representations of the latents of the text model. Note that the output after these 26 self-attention layers still has the same shape as what one initially provided as input to the encoder: (batch_size, 256, 1280). These are also called the "last hidden states" of the latents. This is very similar to the "last hidden states" of the tokens one provides to BERT.
Ok, so now one has final hidden states of shape (batch_size, 256, 1280). Great, but one actually wants to turn these into classification logits of shape (batch_size, num_labels). How can we make the Perceiver output these?
This is handled by `PerceiverClassificationDecoder`. The idea is very similar to what was done when mapping the inputs to the latent space: one uses cross-attention. But now, the latent variables will produce keys and values, and one provides a tensor of whatever shape we'd like - in this case we'll provide a tensor of shape (batch_size, 1, num_labels) which will act as queries (the authors refer to these as "decoder queries", because they are used in the decoder). This tensor will be randomly initialized at the beginning of training, and trained end-to-end. As one can see, one just provides a dummy sequence length dimension of 1. Note that the output of a QKV attention layer always has the same shape as the shape of the queries - hence the decoder will output a tensor of shape (batch_size, 1, num_labels). The decoder then simply squeezes this tensor to have shape (batch_size, num_labels) and boom, one has classification logits<sup id="a1">[1](#f1)</sup>.
Great, isn't it? The Perceiver authors also show that it is straightforward to pre-train the Perceiver for masked language modeling, similar to BERT. This model is also available in HuggingFace Transformers, and called `PerceiverForMaskedLM`. The only difference with `PerceiverForSequenceClassification` is that it doesn't use `PerceiverClassificationDecoder` as decoder, but rather `PerceiverBasicDecoder`, to decode the latents to a tensor of shape (batch_size, 2048, 1280). After this, a language modeling head is added, which turns it into a tensor of shape (batch_size, 2048, vocab_size). The vocabulary size of the Perceiver is only 262, namely the 256 UTF-8 byte IDs, as well as 6 special tokens. By pre-training the Perceiver on English Wikipedia and [C4](https://arxiv.org/abs/1910.10683), the authors show that it is possible to achieve an overall score of 81.8 on GLUE after fine-tuning.
## Perceiver for images
Now that we've seen how to apply the Perceiver to perform text classification, it is straightforward to apply the Perceiver to do image classification. The only difference is that we'll provide a different `preprocessor` to the model, which will embed the image `inputs`. The Perceiver authors actually tried out 3 different ways of preprocessing:
- flattening the pixel values, applying a convolutional layer with kernel size 1 and adding learned absolute 1D position embeddings.
- flattening the pixel values and adding fixed 2D Fourier position embeddings.
- applying a 2D convolutional + maxpool layer and adding fixed 2D Fourier position embeddings.
Each of these are implemented in the Transformers library, and called `PerceiverForImageClassificationLearned`, `PerceiverForImageClassificationFourier` and `PerceiverForImageClassificationConvProcessing` respectively. They only differ in their configuration of `PerceiverImagePreprocessor`. Let's take a closer look at `PerceiverForImageClassificationLearned`. It initializes a `PerceiverModel` as follows:
``` python
from torch import nn
from transformers import PerceiverModel
from transformers.models.perceiver.modeling_perceiver import PerceiverImagePreprocessor, PerceiverClassificationDecoder
class PerceiverForImageClassificationLearned(nn.Module):
def __init__(self, config):
super().__init__(config)
self.perceiver = PerceiverModel(
config,
input_preprocessor=PerceiverImagePreprocessor(
config,
prep_type="conv1x1",
spatial_downsample=1,
out_channels=256,
position_encoding_type="trainable",
concat_or_add_pos="concat",
project_pos_dim=256,
trainable_position_encoding_kwargs=dict(num_channels=256, index_dims=config.image_size ** 2),
),
decoder=PerceiverClassificationDecoder(
config,
num_channels=config.d_latents,
trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1),
use_query_residual=True,
),
)
```
One can see that `PerceiverImagePreprocessor` is initialized with `prep_type = "conv1x1"` and that one adds arguments for the trainable position encodings. So how does this preprocessor work in detail? Suppose that one provides a batch of images to the model. Let's say one applies center cropping to a resolution of 224 and normalization of the color channels first, such that the `inputs` are of shape (batch_size, num_channels, height, width) = (batch_size, 3, 224, 224). One can use `PerceiverImageProcessor` for this, as follows:
``` python
from transformers import PerceiverImageProcessor
import requests
from PIL import Image
processor = PerceiverImageProcessor.from_pretrained("deepmind/vision-perceiver")
url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
image = Image.open(requests.get(url, stream=True).raw)
inputs = processor(image, return_tensors="pt").pixel_values
```
`PerceiverImagePreprocessor` (with the settings defined above) will first apply a convolutional layer with kernel size (1, 1) to turn the `inputs` into a tensor of shape (batch_size, 256, 224, 224) - hence increasing the channel dimension. It will then place the channel dimension last - so now one has a tensor of shape (batch_size, 224, 224, 256). Next, it flattens the spatial (height + width) dimensions such that one has a tensor of shape (batch_size, 50176, 256). Next, it concatenates it with trainable 1D position embeddings. As the dimensionality of the position embeddings is defined to be 256 (see the `num_channels` argument above), one is left with a tensor of shape (batch_size, 50176, 512). This tensor will be used for the cross-attention operation with the latents.
The authors use 512 latents for all image models, and set the dimensionality of the latents to 1024. Hence, the latents are a tensor of shape (batch_size, 512, 1024) - assuming we add a batch dimension. The cross-attention layer takes the queries of shape (batch_size, 512, 1024) and keys + values of shape (batch_size, 50176, 512) as input, and produces a tensor that has the same shape as the queries, so outputs a new tensor of shape (batch_size, 512, 1024). Next, a block of 6 self-attention layers is applied repeatedly (8 times), to produce final hidden states of the latents of shape (batch_size, 512, 1024). To turn these into classification logits, `PerceiverClassificationDecoder` is used, which works similarly to the one for text classification: it uses the latents as keys + values, and uses trainable position embeddings of shape (batch_size, 1, num_labels) as queries. The output of the cross-attention operation is a tensor of shape (batch_size, 1, num_labels), which is squeezed to have classification logits of shape (batch_size, num_labels).
The Perceiver authors show that the model is capable of achieving strong results compared to models designed primarily for image classification (such as [ResNet](https://arxiv.org/abs/1512.03385) or [ViT](https://arxiv.org/abs/2010.11929)). After large-scale pre-training on [JFT](https://paperswithcode.com/dataset/jft-300m), the model that uses conv+maxpool preprocessing (`PerceiverForImageClassificationConvProcessing`) achieves 84.5 top-1 accuracy on ImageNet. Remarkably, `PerceiverForImageClassificationLearned`, the model that only employs a 1D fully learned position encoding, achieves a top-1 accuracy of 72.7 despite having no privileged information about the 2D structure of images.
## Perceiver for optical flow
The authors show that it's straightforward to make the Perceiver also work on optical flow, which is a decades-old problem in computer vision, with many broader applications. For an introduction to optical flow, I refer to [this blog post](https://medium.com/swlh/what-is-optical-flow-and-why-does-it-matter-in-deep-learning-b3278bb205b5). Given two images of the same scene (e.g. two consecutive frames of a video), the task is to estimate the 2D displacement for each pixel in the first image. Existing algorithms are quite hand-engineered and complex, however with the Perceiver, this becomes relatively simple. The model is implemented in the Transformers library, and available as `PerceiverForOpticalFlow`. It is implemented as follows:
``` python
from torch import nn
from transformers import PerceiverModel
from transformers.models.perceiver.modeling_perceiver import PerceiverImagePreprocessor, PerceiverOpticalFlowDecoder
class PerceiverForOpticalFlow(nn.Module):
def __init__(self, config):
super().__init__(config)
fourier_position_encoding_kwargs_preprocessor = dict(
num_bands=64,
max_resolution=config.train_size,
sine_only=False,
concat_pos=True,
)
fourier_position_encoding_kwargs_decoder = dict(
concat_pos=True, max_resolution=config.train_size, num_bands=64, sine_only=False
)
image_preprocessor = PerceiverImagePreprocessor(
config,
prep_type="patches",
spatial_downsample=1,
conv_after_patching=True,
conv_after_patching_in_channels=54,
temporal_downsample=2,
position_encoding_type="fourier",
# position_encoding_kwargs
fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
)
self.perceiver = PerceiverModel(
config,
input_preprocessor=image_preprocessor,
decoder=PerceiverOpticalFlowDecoder(
config,
num_channels=image_preprocessor.num_channels,
output_image_shape=config.train_size,
rescale_factor=100.0,
use_query_residual=False,
output_num_channels=2,
position_encoding_type="fourier",
fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_decoder,
),
)
```
As one can see, `PerceiverImagePreprocessor` is used as preprocessor (i.e. to prepare the 2 images for the cross-attention operation with the latents) and `PerceiverOpticalFlowDecoder` is used as decoder (i.e. to decode the final hidden states of the latents to an actual predicted flow). For each of the 2 frames, the authors extract a 3 x 3 patch around each pixel, leading to 3 x 3 x 3 = 27 values for each pixel (as each pixel also has 3 color channels). The authors use a training resolution of (368, 496). If one stacks 2 frames of size (368, 496) of each training example on top of each other, the `inputs` to the model are of shape (batch_size, 2, 27, 368, 496).
The preprocessor (with the settings defined above) will first concatenate the frames along the channel dimension, leading to a tensor of shape (batch_size, 368, 496, 54) - assuming one also moves the channel dimension to be last. The authors explain in their paper (page 8) why concatenation along the channel dimension makes sense. Next, the spatial dimensions are flattened, leading to a tensor of shape (batch_size, 368*496, 54) = (batch_size, 182528, 54). Then, position embeddings (each of which have dimensionality 258) are concatenated, leading to a final preprocessed input of shape (batch_size, 182528, 322). These will be used to perform cross-attention with the latents.
The authors use 2048 latents for the optical flow model (yes, 2048!), with a dimensionality of 512 for each latent. Hence, the latents have shape (batch_size, 2048, 512). After the cross-attention, one again has a tensor of the same shape (as the latents act as queries). Next, a single block of 24 self-attention layers (each of which has 16 attention heads) are applied to update the embeddings of the latents.
To decode the final hidden states of the latents to an actual predicted flow, `PerceiverOpticalFlowDecoder` simply uses the preprocessed inputs of shape (batch_size, 182528, 322) as queries for the cross-attention operation. Next, these are projected to a tensor of shape (batch_size, 182528, 2). Finally, one rescales and reshapes this back to the original image size to get a predicted flow of shape (batch_size, 368, 496, 2). The authors claim state-of-the-art results on important benchmarks including [Sintel](https://link.springer.com/chapter/10.1007/978-3-642-33783-3_44) and [KITTI](http://www.cvlibs.net/publications/Menze2015CVPR.pdf) when training on [AutoFlow](https://arxiv.org/abs/2104.14544), a large synthetic dataset of 400,000 annotated image pairs.
The video below shows the predicted flow on 2 examples.
<p float="left">
<img src="https://lh3.googleusercontent.com/Rkhzc3Ckl4oWrOjxviohVmK4ZYGvGGrxaXCaOgBl3YGdBuHeFcQG_0-QjenoHKlTsHR6_6LpmCYu2bghEEzWdpYYp6QksFi0nkI3RNkdJEP-6c13bg=w2048-rw-v1" width="300" style="display:inline" />
<img src="https://lh3.googleusercontent.com/p51q5x-JYJKltxxUtp60lUViVguTnxBpw7dQFfs47FTWpaj3iTmz2RJCGuiIEEpIoJKhZBU19W_k85lJ-8AtywD9YiVXc5KbiubvZakz2qFrNMj-cA=w2048-rw-v1" width="300" style="display:inline" />
<img src="assets/41_perceiver/flow_legend.jpeg" width="300" />
</p>
<small> Optical flow estimation by Perceiver IO. The colour of each pixel shows the direction and speed of motion estimated by the model, as indicated by the legend on the right.</small>
## Perceiver for multimodal autoencoding
The authors also use the Perceiver for multimodal autoencoding. The goal of multimodal autoencoding is to learn a model that can accurately reconstruct multimodal inputs in the presence of a bottleneck induced by an architecture. The authors train the model on the [Kinetics-700 dataset](https://deepmind.com/research/open-source/kinetics), in which each example consists of a sequence of images (i.e. frames), audio and a class label (one of 700 possible labels). This model is also implemented in HuggingFace Transformers, and available as `PerceiverForMultimodalAutoencoding`. For brevity, I will omit the code of defining this model, but important to note is that it uses `PerceiverMultimodalPreprocessor` to prepare the `inputs` for the model. This preprocessor will first use the respective preprocessor for each modality (image, audio, label) separately. Suppose one has a video of 16 frames of resolution 224x224 and 30,720 audio samples, then the modalities are preprocessed as follows:
- The images - actually a sequence of frames - of shape (batch_size, 16, 3, 224, 224) are turned into a tensor of shape (batch_size, 50176, 243) using `PerceiverImagePreprocessor`. This is a “space to depth” transformation, after which fixed 2D Fourier position embeddings are concatenated.
- The audio has shape (batch_size, 30720, 1) and is turned into a tensor of shape (batch_size, 1920, 401) using `PerceiverAudioPreprocessor` (which concatenates fixed Fourier position embeddings to the raw audio).
- The class label of shape (batch_size, 700) is turned into a tensor of shape (batch_size, 1, 700) using `PerceiverOneHotPreprocessor`. In other words, this preprocessor just adds a dummy time (index) dimension. Note that one initializes the class label with a tensor of zeros during evaluation, so as to let the model act as a video classifier.
Next, `PerceiverMultimodalPreprocessor` will pad the preprocessed modalities with modality-specific trainable embeddings to make concatenation along the time dimension possible. In this case, the modality with the highest channel dimension is the class label (it has 700 channels). The authors enforce a minimum padding size of 4, hence each modality will be padded to have 704 channels. They can then be concatenated, hence the final preprocessed input is a tensor of shape (batch_size, 50176 + 1920 + 1, 704) = (batch_size, 52097, 704).
The authors use 784 latents, with a dimensionality of 512 for each latent. Hence, the latents have shape (batch_size, 784, 512). After the cross-attention, one again has a tensor of the same shape (as the latents act as queries). Next, a single block of 8 self-attention layers (each of which has 8 attention heads) is applied to update the embeddings of the latents.
Next, there is `PerceiverMultimodalDecoder`, which will first create output queries for each modality separately. However, as it is not possible to decode an entire video in a single forward pass, the authors instead auto-encode in chunks. Each chunk will subsample certain index dimensions for every modality. Let's say we process the video in 128 chunks, then the decoder queries will be produced as follows:
- For the image modality, the total size of the decoder query is 16x3x224x224 = 802,816. However, when auto-encoding the first chunk, one subsamples the first 802,816/128 = 6272 values. The shape of the image output query is (batch_size, 6272, 195) - the 195 comes from the fact that fixed Fourier position embeddings are used.
- For the audio modality, the total input has 30,720 values. However, one only subsamples the first 30720/128/16 = 15 values. Hence, the shape of the audio query is (batch_size, 15, 385). Here, the 385 comes from the fact that fixed Fourier position embeddings are used.
- For the class label modality, there's no need to subsample. Hence, the subsampled index is set to 1. The shape of the label output query is (batch_size, 1, 1024). One uses trainable position embeddings (of size 1024) for the queries.
Similarly to the preprocessor, `PerceiverMultimodalDecoder` pads the different modalities to the same number of channels, to make concatenation of the modality-specific queries possible along the time dimension. Here, the class label has again the highest number of channels (1024), and the authors enforce a minimum padding size of 2, hence every modality will be padded to have 1026 channels. After concatenation, the final decoder query has shape (batch_size, 6272 + 15 + 1, 1026) = (batch_size, 6288, 1026). This tensor produces queries in the cross-attention operation, while the latents act as keys and values. Hence, the output of the cross-attention operation is a tensor of shape (batch_size, 6288, 1026). Next, `PerceiverMultimodalDecoder` employs a linear layer to reduce the output channels to get a tensor of shape (batch_size, 6288, 512).
Finally, there is `PerceiverMultimodalPostprocessor`. This class postprocesses the output of the decoder to produce an actual reconstruction of each modality. It first splits up the time dimension of the decoder output according to the different modalities: (batch_size, 6272, 512) for image, (batch_size, 15, 512) for audio and (batch_size, 1, 512) for the class label. Next, the respective postprocessors for each modality are applied:
- The image post processor (which is called `PerceiverProjectionPostprocessor` in Transformers) simply turns the (batch_size, 6272, 512) tensor into a tensor of shape (batch_size, 6272, 3) - i.e. it projects the final dimension to RGB values.
- `PerceiverAudioPostprocessor` turns the (batch_size, 15, 512) tensor into a tensor of shape (batch_size, 240).
- `PerceiverClassificationPostprocessor` simply takes the first (and only index), to get a tensor of shape (batch_size, 700).
So now one ends up with tensors containing the reconstruction of the image, audio and class label modalities respectively. As one auto-encodes an entire video in chunks, one needs to concatenate the reconstruction of each chunk to have a final reconstruction of an entire video. The figure below shows an example:
<p float="left">
<img src="assets/41_perceiver/original_video.gif" width="200" style="display:inline">
<img src="assets/41_perceiver/reconstructed_video.gif" width="200" style="display:inline">
<img src="assets/41_perceiver/perceiver_audio_autoencoding.png" width="400">
</p>
<small>Above: original video (left), reconstruction of the first 16 frames (right). Video taken from the [UCF101 dataset](https://www.crcv.ucf.edu/data/UCF101.php). Below: reconstructed audio (taken from the paper). </small>
<img src="assets/41_perceiver/predicted_labels.png" width="500">
<small>Top 5 predicted labels for the video above. By masking the class label, the Perceiver becomes a video classifier. </small>
With this approach, the model learns a joint distribution across 3 modalities. The authors do note that because the latent variables are shared across modalities and not explicitly allocated between them, the quality of reconstructions for each modality is sensitive to the weight of its loss term and other training hyperparameters. By putting stronger emphasis on classification accuracy, they are able to reach 45% top-1 accuracy while maintaining 20.7 PSNR (peak signal-to-noise ratio) for video.
## Other applications of the Perceiver
Note that there are no limits on the applications of the Perceiver! In the original [Perceiver paper](https://arxiv.org/abs/2103.03206), the authors showed that the architecture can be used to process 3D point clouds – a common concern for self-driving cars equipped with Lidar sensors. They trained the model on [ModelNet40](https://modelnet.cs.princeton.edu/), a dataset of point clouds derived from 3D triangular meshes spanning 40 object categories. The model was shown to achieve a top-1 accuracy of 85.7 % on the test set, competing with [PointNet++](https://arxiv.org/abs/1706.02413), a highly specialized model that uses extra geometric features and performs more advanced augmentations.
The authors also used the Perceiver to replace the original Transformer in [AlphaStar](https://deepmind.com/blog/article/alphastar-mastering-real-time-strategy-game-starcraft-ii), the state-of-the-art reinforcement learning system for the complex game of [StarCraft II](https://starcraft2.com/en-us/). Without tuning any additional parameters, the authors observed that the resulting agent reached the same level of performance as the original AlphaStar agent, reaching an 87% win-rate versus the Elite bot after [behavioral cloning](https://proceedings.neurips.cc/paper/1988/file/812b4ba287f5ee0bc9d43bbf5bbe87fb-Paper.pdf) on human data.
It is important to note that the models currently implemented (such as `PerceiverForImageClassificationLearned`, `PerceiverForOpticalFlow`) are just examples of what you can do with the Perceiver. Each of these are different instances of `PerceiverModel`, just with a different preprocessor and/or decoder (and optionally, a postprocessor as is the case for multimodal autoencoding). People can come up with new preprocessors, decoders and postprocessors to make the model solve different problems. For instance, one could extend the Perceiver to perform named-entity recognition (NER) or question-answering similar to BERT, audio classification similar to Wav2Vec2 or object detection similar to DETR.
## Conclusion
In this blog post, we went over the architecture of Perceiver IO, an extension of the Perceiver by Google Deepmind, and showed its generality of handling all kinds of modalities. The big advantage of the Perceiver is that the compute and memory requirements of the self-attention mechanism don't depend on the size of the inputs and outputs, as the bulk of compute happens in a latent space (a not-too large set of vectors). Despite its task-agnostic architecture, the model is capabable of achieving great results on modalities such as language, vision, multimodal data, and point clouds. In the future, it might be interesting to train a single (shared) Perceiver encoder on several modalities at the same time, and use modality-specific preprocessors and postprocessors. As [Karpathy puts it](https://twitter.com/karpathy/status/1424469507658031109), it may well be that this architecture can unify all modalities into a shared space, with a library of encoders/decoders.
Speaking of a library, the model is available in [HuggingFace Transformers](https://github.com/huggingface/transformers) as of today. It will be exciting to see what people build with it, as its applications seem endless!
### Appendix
The implementation in HuggingFace Transformers is based on the original JAX/Haiku implementation which can be found [here](https://github.com/deepmind/deepmind-research/tree/master/perceiver).
The documentation of the Perceiver IO model in HuggingFace Transformers is available [here](https://huggingface.co/docs/transformers/model_doc/perceiver).
Tutorial notebooks regarding the Perceiver on several modalities can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Perceiver).
## Footnotes
<b id="f1">1</b> Note that in the official paper, the authors used a two-layer MLP to generate the output logits, which was omitted here for brevity. [↩](#a1)
| 6 |
0 | hf_public_repos | hf_public_repos/blog/few-shot-learning-gpt-neo-and-inference-api.md | ---
title: 'Few-shot learning in practice: GPT-Neo and the 🤗 Accelerated Inference API'
# thumbnail: /blog/assets/22_few_shot_learning_gpt_neo_and_inference_api/thumbnail.png
authors:
- user: philschmid
---
# Few-shot learning in practice: GPT-Neo and the 🤗 Accelerated Inference API
In many Machine Learning applications, the amount of available labeled data is a barrier to producing a high-performing model. The latest developments in NLP show that you can overcome this limitation by providing a few examples at inference time with a large language model - a technique known as Few-Shot Learning. In this blog post, we'll explain what Few-Shot Learning is, and explore how a large language model called GPT-Neo, and the 🤗 Accelerated Inference API, can be used to generate your own predictions.
## What is Few-Shot Learning?
Few-Shot Learning refers to the practice of feeding a machine learning model with a very small amount of training data to guide its predictions, like a few examples at inference time, as opposed to standard fine-tuning techniques which require a relatively large amount of training data for the pre-trained model to adapt to the desired task with accuracy.
This technique has been mostly used in computer vision, but with some of the latest Language Models, like [EleutherAI GPT-Neo](https://www.eleuther.ai/research/projects/gpt-neo/) and [OpenAI GPT-3](https://openai.com/blog/gpt-3-apps/), we can now use it in Natural Language Processing (NLP).
In NLP, Few-Shot Learning can be used with Large Language Models, which have learned to perform a wide number of tasks implicitly during their pre-training on large text datasets. This enables the model to generalize, that is to understand related but previously unseen tasks, with just a few examples.
Few-Shot NLP examples consist of three main components:
- **Task Description**: A short description of what the model should do, e.g. "Translate English to French"
- **Examples**: A few examples showing the model what it is expected to predict, e.g. "sea otter => loutre de mer"
- **Prompt**: The beginning of a new example, which the model should complete by generating the missing text, e.g. "cheese => "

<small>Image from <a href="https://arxiv.org/abs/2005.14165" target="_blank">Language Models are Few-Shot Learners</a></small>
Creating these few-shot examples can be tricky, since you need to articulate the “task” you want the model to perform through them. A common issue is that models, especially smaller ones, are very sensitive to the way the examples are written.
An approach to optimize Few-Shot Learning in production is to learn a common representation for a task and then train task-specific classifiers on top of this representation.
OpenAI showed in the [GPT-3 Paper](https://arxiv.org/abs/2005.14165) that the few-shot prompting ability improves with the number of language model parameters.

<small>Image from <a href="https://arxiv.org/abs/2005.14165" target="_blank">Language Models are Few-Shot Learners</a></small>
Let's now take a look at how at how GPT-Neo and the 🤗 Accelerated Inference API can be used to generate your own Few-Shot Learning predictions!
---
## What is GPT-Neo?
GPT-Neo is a family of transformer-based language models from [EleutherAI](https://www.eleuther.ai/projects/gpt-neo/) based on the GPT architecture. [EleutherAI](https://www.eleuther.ai)'s primary goal is to train a model that is equivalent in size to GPT-3 and make it available to the public under an open license.
All of the currently available GPT-Neo checkpoints are trained with the Pile dataset, a large text corpus that is extensively documented in ([Gao et al., 2021](https://arxiv.org/abs/2101.00027)). As such, it is expected to function better on the text that matches the distribution of its training text; we recommend keeping this in mind when designing your examples.
---
## 🤗 Accelerated Inference API
The [Accelerated Inference API](https://huggingface.co/inference-api) is our hosted service to run inference on any of the 10,000+ models publicly available on the 🤗 Model Hub, or your own private models, via simple API calls. The API includes acceleration on CPU and GPU with [up to 100x speedup](https://huggingface.co/blog/accelerated-inference) compared to out of the box deployment of Transformers.
To integrate Few-Shot Learning predictions with `GPT-Neo` in your own apps, you can use the 🤗 Accelerated Inference API with the code snippet below. You can find your API Token [here](https://huggingface.co/settings/token), if you don't have an account you can get started [here](https://huggingface.co/pricing).
```python
import json
import requests
API_TOKEN = ""
def query(payload='',parameters=None,options={'use_cache': False}):
API_URL = "https://api-inference.huggingface.co/models/EleutherAI/gpt-neo-2.7B"
headers = {"Authorization": f"Bearer {API_TOKEN}"}
body = {"inputs":payload,'parameters':parameters,'options':options}
response = requests.request("POST", API_URL, headers=headers, data= json.dumps(body))
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
return "Error:"+" ".join(response.json()['error'])
else:
return response.json()[0]['generated_text']
parameters = {
'max_new_tokens':25, # number of generated tokens
'temperature': 0.5, # controlling the randomness of generations
'end_sequence': "###" # stopping sequence for generation
}
prompt="...." # few-shot prompt
data = query(prompt,parameters,options)
```
---
## Practical Insights
Here are some practical insights, which help you get started using `GPT-Neo` and the 🤗 Accelerated Inference API.
Since `GPT-Neo` (2.7B) is about 60x smaller than `GPT-3` (175B), it does not generalize as well to zero-shot problems and needs 3-4 examples to achieve good results. When you provide more examples `GPT-Neo` understands the task and takes the `end_sequence` into account, which allows us to control the generated text pretty well.

The hyperparameter `End Sequence`, `Token Length` & `Temperature` can be used to control the `text-generation` of the model and you can use this to your advantage to solve the task you need. The `Temperature` controlls the randomness of your generations, lower temperature results in less random generations and higher temperature results in more random generations.

In the example, you can see how important it is to define your hyperparameter. These can make the difference between solving your task or failing miserably.
---
## Responsible Use
Few-Shot Learning is a powerful technique but also presents unique pitfalls that need to be taken into account when designing uses cases.
To illustrate this, let's consider the default `Sentiment Analysis` setting provided in the widget. After seeing three examples of sentiment classification, the model makes the following predictions 4 times out of 5, with `temperature` set to 0.1:
> ###
> Tweet: "I'm a disabled happy person"
> Sentiment: Negative
What could go wrong? Imagine that you are using sentiment analysis to aggregate reviews of products on an online shopping website: a possible outcome could be that items useful to people with disabilities would be automatically down-ranked - a form of automated discrimination. For more on this specific issue, we recommend the ACL 2020 paper [Social Biases in NLP Models as Barriers for Persons with Disabilities](https://www.aclweb.org/anthology/2020.acl-main.487.pdf). Because Few-Shot Learning relies more directly on information and associations picked up from pre-training, it makes it more sensitive to this type of failures.
How to minimize the risk of harm? Here are some practical recommendations.
### Best practices for responsible use
- Make sure people know which parts of their user experience depend on the outputs of the ML system
- If possible, give users the ability to opt-out
- Provide a mechanism for users to give feedback on the model decision, and to override it
- Monitor feedback, especially model failures, for groups of users that may be disproportionately affected
What needs most to be avoided is to use the model to automatically make decisions for, or about, a user, without opportunity for a human to provide input or correct the output. Several regulations, such as [GDPR](https://gdpr-info.eu/) in Europe, require that users be provided an explanation for automatic decisions made about them.
---
To use GPT-Neo or any Hugging Face model in your own application, you can [start a free trial](https://huggingface.co/pricing) of the 🤗 Accelerated Inference API.
If you need help mitigating bias in models and AI systems, or leveraging Few-Shot Learning, the 🤗 Expert Acceleration Program can [offer your team direct premium support from the Hugging Face team](https://huggingface.co/support).
| 7 |
0 | hf_public_repos | hf_public_repos/blog/audioldm2.md | ---
title: "AudioLDM 2, but faster ⚡️"
thumbnail: /blog/assets/161_audioldm2/thumbnail.png
authors:
- user: sanchit-gandhi
---
# AudioLDM 2, but faster ⚡️
<a target="_blank" href="https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/AudioLDM-2.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a>
AudioLDM 2 was proposed in [AudioLDM 2: Learning Holistic Audio Generation with Self-supervised Pretraining](https://arxiv.org/abs/2308.05734)
by Haohe Liu et al. AudioLDM 2 takes a text prompt as input and predicts the corresponding audio. It can generate realistic
sound effects, human speech and music.
While the generated audios are of high quality, running inference with the original implementation is very slow: a 10
second audio sample takes upwards of 30 seconds to generate. This is due to a combination of factors, including a deep
multi-stage modelling approach, large checkpoint sizes, and un-optimised code.
In this blog post, we showcase how to use AudioLDM 2 in the Hugging Face 🧨 Diffusers library, exploring a range of code
optimisations such as half-precision, flash attention, and compilation, and model optimisations such as scheduler choice
and negative prompting, to reduce the inference time by over **10 times**, with minimal degradation in quality of the
output audio. The blog post is also accompanied by a more streamlined [Colab notebook](https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/AudioLDM-2.ipynb),
that contains all the code but fewer explanations.
Read to the end to find out how to generate a 10 second audio sample in just 1 second!
## Model overview
Inspired by [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview), AudioLDM 2
is a text-to-audio _latent diffusion model (LDM)_ that learns continuous audio representations from text embeddings.
The overall generation process is summarised as follows:
1. Given a text input \\(\boldsymbol{x}\\), two text encoder models are used to compute the text embeddings: the text-branch of [CLAP](https://huggingface.co/docs/transformers/main/en/model_doc/clap), and the text-encoder of [Flan-T5](https://huggingface.co/docs/transformers/main/en/model_doc/flan-t5)
$$
\boldsymbol{E}_{1} = \text{CLAP}\left(\boldsymbol{x} \right); \quad \boldsymbol{E}_{2} = \text{T5}\left(\boldsymbol{x}\right)
$$
The CLAP text embeddings are trained to be aligned with the embeddings of the corresponding audio sample, whereas the Flan-T5 embeddings give a better representation of the semantics of the text.
2. These text embeddings are projected to a shared embedding space through individual linear projections:
$$
\boldsymbol{P}_{1} = \boldsymbol{W}_{\text{CLAP}} \boldsymbol{E}_{1}; \quad \boldsymbol{P}_{2} = \boldsymbol{W}_{\text{T5}}\boldsymbol{E}_{2}
$$
In the `diffusers` implementation, these projections are defined by the [AudioLDM2ProjectionModel](https://huggingface.co/docs/diffusers/api/pipelines/audioldm2/AudioLDM2ProjectionModel).
3. A [GPT2](https://huggingface.co/docs/transformers/main/en/model_doc/gpt2) language model (LM) is used to auto-regressively generate a sequence of \\(N\\) new embedding vectors, conditional on the projected CLAP and Flan-T5 embeddings:
$$
\tilde{\boldsymbol{E}}_{i} = \text{GPT2}\left(\boldsymbol{P}_{1}, \boldsymbol{P}_{2}, \tilde{\boldsymbol{E}}_{1:i-1}\right) \qquad \text{for } i=1,\dots,N
$$
4. The generated embedding vectors \\(\tilde{\boldsymbol{E}}_{1:N}\\) and Flan-T5 text embeddings \\(\boldsymbol{E}_{2}\\) are used as cross-attention conditioning in the LDM, which *de-noises*
a random latent via a reverse diffusion process. The LDM is run in the reverse diffusion process for a total of \\(T\\) inference steps:
$$
\boldsymbol{z}_{t} = \text{LDM}\left(\boldsymbol{z}_{t-1} | \tilde{\boldsymbol{E}}_{1:N}, \boldsymbol{E}_{2}\right) \qquad \text{for } t = 1, \dots, T
$$
where the initial latent variable \\(\boldsymbol{z}_{0}\\) is drawn from a normal distribution \\(\mathcal{N} \left(\boldsymbol{0}, \boldsymbol{I} \right)\\).
The [UNet](https://huggingface.co/docs/diffusers/api/pipelines/audioldm2/AudioLDM2UNet2DConditionModel) of the LDM is unique in
the sense that it takes **two** sets of cross-attention embeddings, \\(\tilde{\boldsymbol{E}}_{1:N}\\) from the GPT2 language model and \\(\boldsymbol{E}_{2}\\)
from Flan-T5, as opposed to one cross-attention conditioning as in most other LDMs.
5. The final de-noised latents \\(\boldsymbol{z}_{T}\\) are passed to the VAE decoder to recover the Mel spectrogram \\(\boldsymbol{s}\\):
$$
\boldsymbol{s} = \text{VAE}_{\text{dec}} \left(\boldsymbol{z}_{T}\right)
$$
6. The Mel spectrogram is passed to the vocoder to obtain the output audio waveform \\(\mathbf{y}\\):
$$
\boldsymbol{y} = \text{Vocoder}\left(\boldsymbol{s}\right)
$$
The diagram below demonstrates how a text input is passed through the text conditioning models, with the two prompt embeddings used as cross-conditioning in the LDM:
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/161_audioldm2/audioldm2.png?raw=true" width="600"/>
</p>
For full details on how the AudioLDM 2 model is trained, the reader is referred to the [AudioLDM 2 paper](https://arxiv.org/abs/2308.05734).
Hugging Face 🧨 Diffusers provides an end-to-end inference pipeline class [`AudioLDM2Pipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/audioldm2) that wraps this multi-stage generation process into a single callable object, enabling you to generate audio samples from text in just a few lines of code.
AudioLDM 2 comes in three variants. Two of these checkpoints are applicable to the general task of text-to-audio generation. The third checkpoint is trained exclusively on text-to-music generation. See the table below for details on the three official checkpoints, which can all be found on the [Hugging Face Hub](https://huggingface.co/models?search=cvssp/audioldm2):
| Checkpoint | Task | Model Size | Training Data / h |
|-----------------------------------------------------------------------|---------------|------------|-------------------|
| [cvssp/audioldm2](https://huggingface.co/cvssp/audioldm2) | Text-to-audio | 1.1B | 1150k |
| [cvssp/audioldm2-music](https://huggingface.co/cvssp/audioldm2-music) | Text-to-music | 1.1B | 665k |
| [cvssp/audioldm2-large](https://huggingface.co/cvssp/audioldm2-large) | Text-to-audio | 1.5B | 1150k |
Now that we've covered a high-level overview of how the AudioLDM 2 generation process works, let's put this theory into practice!
## Load the pipeline
For the purposes of this tutorial, we'll initialise the pipeline with the pre-trained weights from the base checkpoint,
[cvssp/audioldm2](https://huggingface.co/cvssp/audioldm2). We can load the entirety of the pipeline using the
[`.from_pretrained`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/overview#diffusers.DiffusionPipeline.from_pretrained)
method, which will instantiate the pipeline and load the pre-trained weights:
```python
from diffusers import AudioLDM2Pipeline
model_id = "cvssp/audioldm2"
pipe = AudioLDM2Pipeline.from_pretrained(model_id)
```
**Output:**
```
Loading pipeline components...: 100%|███████████████████████████████████████████| 11/11 [00:01<00:00, 7.62it/s]
```
The pipeline can be moved to the GPU in much the same way as a standard PyTorch nn module:
```python
pipe.to("cuda");
```
Great! We'll define a Generator and set a seed for reproducibility. This will allow us to tweak our prompts and observe
the effect that they have on the generations by fixing the starting latents in the LDM model:
```python
import torch
generator = torch.Generator("cuda").manual_seed(0)
```
Now we're ready to perform our first generation! We'll use the same running example throughout this notebook, where we'll
condition the audio generations on a fixed text prompt and use the same seed throughout. The [`audio_length_in_s`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/audioldm2#diffusers.AudioLDM2Pipeline.__call__.audio_length_in_s)
argument controls the length of the generated audio. It defaults to the audio length that the LDM was trained on
(10.24 seconds):
```python
prompt = "The sound of Brazilian samba drums with waves gently crashing in the background"
audio = pipe(prompt, audio_length_in_s=10.24, generator=generator).audios[0]
```
**Output:**
```
100%|███████████████████████████████████████████| 200/200 [00:13<00:00, 15.27it/s]
```
Cool! That run took about 13 seconds to generate. Let's have a listen to the output audio:
```python
from IPython.display import Audio
Audio(audio, rate=16000)
```
<audio controls>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/161_audioldm2/sample_1.wav" type="audio/wav">
Your browser does not support the audio element.
</audio>
Sounds much like our text prompt! The quality is good, but still has artefacts of background noise. We can provide the
pipeline with a [*negative prompt*](https://huggingface.co/docs/diffusers/main/en/api/pipelines/audioldm2#diffusers.AudioLDM2Pipeline.__call__.negative_prompt)
to discourage the pipeline from generating certain features. In this case, we'll pass a negative prompt that discourages
the model from generating low quality audio in the outputs. We'll omit the `audio_length_in_s` argument and leave it to
take its default value:
```python
negative_prompt = "Low quality, average quality."
audio = pipe(prompt, negative_prompt=negative_prompt, generator=generator.manual_seed(0)).audios[0]
```
**Output:**
```
100%|███████████████████████████████████████████| 200/200 [00:12<00:00, 16.50it/s]
```
The inference time is un-changed when using a negative prompt \\({}^1\\); we simply replace the unconditional input to the
LDM with the negative input. That means any gains we get in audio quality we get for free.
Let's take a listen to the resulting audio:
```python
Audio(audio, rate=16000)
```
<audio controls>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/161_audioldm2/sample_2.wav" type="audio/wav">
Your browser does not support the audio element.
</audio>
There's definitely an improvement in the overall audio quality - there are less noise artefacts and the audio generally
sounds sharper.
\\({}^1\\) Note that in practice, we typically see a reduction in inference time going from our first generation to our
second. This is due to a CUDA "warm-up" that occurs the first time we run the computation. The second generation is a
better benchmark for our actual inference time.
## Optimisation 1: Flash Attention
PyTorch 2.0 and upwards includes an optimised and memory-efficient implementation of the attention operation through the
[`torch.nn.functional.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention) (SDPA) function. This function automatically applies several in-built optimisations depending on the inputs, and runs faster and more memory-efficient than the vanilla attention implementation. Overall, the SDPA function gives similar behaviour to *flash attention*, as proposed in the paper [Fast and Memory-Efficient Exact Attention with IO-Awareness](https://arxiv.org/abs/2205.14135) by Dao et. al.
These optimisations will be enabled by default in Diffusers if PyTorch 2.0 is installed and if `torch.nn.functional.scaled_dot_product_attention`
is available. To use it, just install torch 2.0 or higher as per the [official instructions](https://pytorch.org/get-started/locally/),
and then use the pipeline as is 🚀
```python
audio = pipe(prompt, negative_prompt=negative_prompt, generator=generator.manual_seed(0)).audios[0]
```
**Output:**
```
100%|███████████████████████████████████████████| 200/200 [00:12<00:00, 16.60it/s]
```
For more details on the use of SDPA in `diffusers`, refer to the corresponding [documentation](https://huggingface.co/docs/diffusers/optimization/torch2.0).
## Optimisation 2: Half-Precision
By default, the `AudioLDM2Pipeline` loads the model weights in float32 (full) precision. All the model computations are
also performed in float32 precision. For inference, we can safely convert the model weights and computations to float16
(half) precision, which will give us an improvement to inference time and GPU memory, with an impercivable change to
generation quality.
We can load the weights in float16 precision by passing the [`torch_dtype`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/overview#diffusers.DiffusionPipeline.from_pretrained.torch_dtype)
argument to `.from_pretrained`:
```python
pipe = AudioLDM2Pipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe.to("cuda");
```
Let's run generation in float16 precision and listen to the audio outputs:
```python
audio = pipe(prompt, negative_prompt=negative_prompt, generator=generator.manual_seed(0)).audios[0]
Audio(audio, rate=16000)
```
**Output:**
```
100%|███████████████████████████████████████████| 200/200 [00:09<00:00, 20.94it/s]
```
<audio controls>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/161_audioldm2/sample_3.wav" type="audio/wav">
Your browser does not support the audio element.
</audio>
The audio quality is largely un-changed from the full precision generation, with an inference speed-up of about 2 seconds.
In our experience, we've not seen any significant audio degradation using `diffusers` pipelines with float16 precision,
but consistently reap a substantial inference speed-up. Thus, we recommend using float16 precision by default.
## Optimisation 3: Torch Compile
To get an additional speed-up, we can use the new `torch.compile` feature. Since the UNet of the pipeline is usually the
most computationally expensive, we wrap the unet with `torch.compile`, leaving the rest of the sub-models (text encoders
and VAE) as is:
```python
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
```
After wrapping the UNet with `torch.compile` the first inference step we run is typically going to be slow, due to the
overhead of compiling the forward pass of the UNet. Let's run the pipeline forward with the compilation step get this
longer run out of the way. Note that the first inference step might take up to 2 minutes to compile, so be patient!
```python
audio = pipe(prompt, negative_prompt=negative_prompt, generator=generator.manual_seed(0)).audios[0]
```
**Output:**
```
100%|███████████████████████████████████████████| 200/200 [01:23<00:00, 2.39it/s]
```
Great! Now that the UNet is compiled, we can now run the full diffusion process and reap the benefits of faster inference:
```python
audio = pipe(prompt, negative_prompt=negative_prompt, generator=generator.manual_seed(0)).audios[0]
```
**Output:**
```
100%|███████████████████████████████████████████| 200/200 [00:04<00:00, 48.98it/s]
```
Only 4 seconds to generate! In practice, you will only have to compile the UNet once, and then get faster inference for
all successive generations. This means that the time taken to compile the model is amortised by the gains in subsequent
inference time. For more information and options regarding `torch.compile`, refer to the
[torch compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) docs.
## Optimisation 4: Scheduler
Another option is to reduce the number of inference steps. Choosing a more efficient scheduler can help decrease the
number of steps without sacrificing the output audio quality. You can find which schedulers are compatible with the
`AudioLDM2Pipeline` by calling the [`schedulers.compatibles`](https://huggingface.co/docs/diffusers/v0.20.0/en/api/schedulers/overview#diffusers.SchedulerMixin)
attribute:
```python
pipe.scheduler.compatibles
```
**Output:**
```
[diffusers.schedulers.scheduling_lms_discrete.LMSDiscreteScheduler,
diffusers.schedulers.scheduling_k_dpm_2_discrete.KDPM2DiscreteScheduler,
diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler,
diffusers.schedulers.scheduling_unipc_multistep.UniPCMultistepScheduler,
diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler,
diffusers.schedulers.scheduling_pndm.PNDMScheduler,
diffusers.schedulers.scheduling_dpmsolver_singlestep.DPMSolverSinglestepScheduler,
diffusers.schedulers.scheduling_heun_discrete.HeunDiscreteScheduler,
diffusers.schedulers.scheduling_ddpm.DDPMScheduler,
diffusers.schedulers.scheduling_deis_multistep.DEISMultistepScheduler,
diffusers.utils.dummy_torch_and_torchsde_objects.DPMSolverSDEScheduler,
diffusers.schedulers.scheduling_ddim.DDIMScheduler,
diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete.KDPM2AncestralDiscreteScheduler,
diffusers.schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteScheduler]
```
Alright! We've got a long list of schedulers to choose from 📝. By default, AudioLDM 2 uses the [`DDIMScheduler`](https://huggingface.co/docs/diffusers/api/schedulers/ddim),
and requires 200 inference steps to get good quality audio generations. However, more performant schedulers, like [`DPMSolverMultistepScheduler`](https://huggingface.co/docs/diffusers/main/en/api/schedulers/multistep_dpm_solver#diffusers.DPMSolverMultistepScheduler),
require only **20-25 inference steps** to achieve similar results.
Let's see how we can switch the AudioLDM 2 scheduler from DDIM to DPM Multistep. We'll use the [`ConfigMixin.from_config()`](https://huggingface.co/docs/diffusers/main/en/api/configuration#diffusers.ConfigMixin.from_config)
method to load a [`DPMSolverMultistepScheduler`](https://huggingface.co/docs/diffusers/main/en/api/schedulers/multistep_dpm_solver#diffusers.DPMSolverMultistepScheduler)
from the configuration of our original [`DDIMScheduler`](https://huggingface.co/docs/diffusers/api/schedulers/ddim):
```python
from diffusers import DPMSolverMultistepScheduler
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
```
Let's set the number of inference steps to 20 and re-run the generation with the new scheduler.
Since the shape of the LDM latents are un-changed, we don't have to repeat the compilation step:
```python
audio = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=20, generator=generator.manual_seed(0)).audios[0]
```
**Output:**
```
100%|███████████████████████████████████████████| 20/20 [00:00<00:00, 49.14it/s]
```
That took less than **1 second** to generate the audio! Let's have a listen to the resulting generation:
```python
Audio(audio, rate=16000)
```
<audio controls>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/161_audioldm2/sample_4.wav" type="audio/wav">
Your browser does not support the audio element.
</audio>
More or less the same as our original audio sample, but only a fraction of the generation time! 🧨 Diffusers pipelines
are designed to be *composable*, allowing you two swap out schedulers and other components for more performant counterparts
with ease.
## What about memory?
The length of the audio sample we want to generate dictates the *width* of the latent variables we de-noise in the LDM.
Since the memory of the cross-attention layers in the UNet scales with sequence length (width) squared, generating very
long audio samples might lead to out-of-memory errors. Our batch size also governs our memory usage, controlling the number
of samples that we generate.
We've already mentioned that loading the model in float16 half precision gives strong memory savings. Using PyTorch 2.0
SDPA also gives a memory improvement, but this might not be suffienct for extremely large sequence lengths.
Let's try generating an audio sample 2.5 minutes (150 seconds) in duration. We'll also generate 4 candidate audios by
setting [`num_waveforms_per_prompt`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/audioldm2#diffusers.AudioLDM2Pipeline.__call__.num_waveforms_per_prompt)`=4`.
Once [`num_waveforms_per_prompt`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/audioldm2#diffusers.AudioLDM2Pipeline.__call__.num_waveforms_per_prompt)`>1`,
automatic scoring is performed between the generated audios and the text prompt: the audios and text prompts are embedded
in the CLAP audio-text embedding space, and then ranked based on their cosine similarity scores. We can access the 'best'
waveform as that in position `0`.
Since we've changed the width of the latent variables in the UNet, we'll have to perform another torch compilation step
with the new latent variable shapes. In the interest of time, we'll re-load the pipeline without torch compile, such that
we're not hit with a lengthy compilation step first up:
```python
pipe = AudioLDM2Pipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe.to("cuda")
audio = pipe(prompt, negative_prompt=negative_prompt, num_waveforms_per_prompt=4, audio_length_in_s=150, num_inference_steps=20, generator=generator.manual_seed(0)).audios[0]
```
**Output:**
```
---------------------------------------------------------------------------
OutOfMemoryError Traceback (most recent call last)
<ipython-input-33-c4cae6410ff5> in <cell line: 5>()
3 pipe.to("cuda")
4
----> 5 audio = pipe(prompt, negative_prompt=negative_prompt, num_waveforms_per_prompt=4, audio_length_in_s=150, num_inference_steps=20, generator=generator.manual_seed(0)).audios[0]
23 frames
/usr/local/lib/python3.10/dist-packages/torch/nn/modules/linear.py in forward(self, input)
112
113 def forward(self, input: Tensor) -> Tensor:
--> 114 return F.linear(input, self.weight, self.bias)
115
116 def extra_repr(self) -> str:
OutOfMemoryError: CUDA out of memory. Tried to allocate 1.95 GiB. GPU 0 has a total capacty of 14.75 GiB of which 1.66 GiB is free. Process 414660 has 13.09 GiB memory in use. Of the allocated memory 10.09 GiB is allocated by PyTorch, and 1.92 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
```
Unless you have a GPU with high RAM, the code above probably returned an OOM error. While the AudioLDM 2 pipeline involves
several components, only the model being used has to be on the GPU at any one time. The remainder of the modules can be
offloaded to the CPU. This technique, called *CPU offload*, can reduce memory usage, with a very low penalty to inference time.
We can enable CPU offload on our pipeline with the function [enable_model_cpu_offload()](https://huggingface.co/docs/diffusers/main/en/api/pipelines/audioldm2#diffusers.AudioLDM2Pipeline.enable_model_cpu_offload):
```python
pipe.enable_model_cpu_offload()
```
Running generation with CPU offload is then the same as before:
```python
audio = pipe(prompt, negative_prompt=negative_prompt, num_waveforms_per_prompt=4, audio_length_in_s=150, num_inference_steps=20, generator=generator.manual_seed(0)).audios[0]
```
**Output:**
```
100%|███████████████████████████████████████████| 20/20 [00:36<00:00, 1.82s/it]
```
And with that, we can generate four samples, each of 150 seconds in duration, all in one call to the pipeline! Using the
large AudioLDM 2 checkpoint will result in higher overall memory usage than the base checkpoint, since the UNet is over
twice the size (750M parameters compared to 350M), so this memory saving trick is particularly beneficial here.
## Conclusion
In this blog post, we showcased four optimisation methods that are available out of the box with 🧨 Diffusers, taking
the generation time of AudioLDM 2 from 14 seconds down to less than 1 second. We also highlighted how to employ memory
saving tricks, such as half-precision and CPU offload, to reduce peak memory usage for long audio samples or large
checkpoint sizes.
Blog post by [Sanchit Gandhi](https://huggingface.co/sanchit-gandhi). Many thanks to [Vaibhav Srivastav](https://huggingface.co/reach-vb)
and [Sayak Paul](https://huggingface.co/sayakpaul) for their constructive comments. Spectrogram image source: [Getting to Know the Mel Spectrogram](https://towardsdatascience.com/getting-to-know-the-mel-spectrogram-31bca3e2d9d0).
Waveform image source: [Aalto Speech Processing](https://speechprocessingbook.aalto.fi/Representations/Waveform.html).
| 8 |
0 | hf_public_repos | hf_public_repos/blog/paddlepaddle.md | ---
title: "Welcome PaddlePaddle to the Hugging Face Hub"
thumbnail: /blog/assets/126_paddlepaddle/thumbnail.jpg
authors:
- user: PaddlePaddle
guest: true
---
# Welcome PaddlePaddle to the Hugging Face Hub
We are happy to share an open source collaboration between Hugging Face and [PaddlePaddle](https://www.paddlepaddle.org.cn/en) on a shared mission to advance and democratize AI through open source!
First open sourced by Baidu in 2016, PaddlePaddle enables developers of all skill levels to adopt and implement Deep Learning at scale. As of Q4 2022, PaddlePaddle is being used by more than 5.35 million developers and 200,000 enterprises, ranking first in terms of market share among Deep Learning platforms in China. PaddlePaddle features popular open source repositories such as the [Paddle](https://github.com/PaddlePaddle/Paddle) Deep Learning Framework, model libraries across different modalities (e.g. [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR), [PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection), [PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP), [PaddleSpeech](https://github.com/PaddlePaddle/PaddleSpeech)), [PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim) for model compression, [FastDeploy](https://github.com/PaddlePaddle/FastDeploy) for model deployment and many more.

**With [PaddleNLP](https://huggingface.co/docs/hub/paddlenlp) leading the way, PaddlePaddle will gradually integrate its libraries with the Hugging Face Hub.** You will soon be able to play with the full suite of awesome pre-trained PaddlePaddle models across text, image, audio, video and multi-modalities on the Hub!
## Find PaddlePaddle Models
You can find all PaddlePaddle models on the Model Hub by filtering with the [PaddlePaddle library tag](https://huggingface.co/models?library=paddlepaddle).
<p align="center">
<img src="assets/126_paddlepaddle/paddle_tag.png" alt="PaddlePaddle Tag"/>
</p>
There are already over 75 PaddlePaddle models on the Hub. As an example, you can find our multi-task Information Extraction model series [UIE](https://huggingface.co/PaddlePaddle/uie-base), State-of-the-Art Chinese Language Model [ERNIE 3.0 model series](https://huggingface.co/PaddlePaddle/ernie-3.0-nano-zh), novel document pre-training model [Ernie-Layout](PaddlePaddle/ernie-layoutx-base-uncased) with layout knowledge enhancement in the whole workflow and so on.
You are also welcome to check out the [PaddlePaddle](https://huggingface.co/PaddlePaddle) org on the HuggingFace Hub. In additional to the above-mentioned models, you can also explore our Spaces, including our text-to-image [Ernie-ViLG](https://huggingface.co/spaces/PaddlePaddle/ERNIE-ViLG), cross-modal Information Extraction engine [UIE-X](https://huggingface.co/spaces/PaddlePaddle/UIE-X) and awesome multilingual OCR toolkit [PaddleOCR](https://huggingface.co/spaces/PaddlePaddle/PaddleOCR).
## Inference API and Widgets
PaddlePaddle models are available through the [Inference API](https://huggingface.co/docs/hub/models-inference), which you can access through HTTP with cURL, Python’s requests library, or your preferred method for making network requests.

Models that support a [task](https://huggingface.co/tasks) are equipped with an interactive widget that allows you to play with the model directly in the browser.

## Use Existing Models
If you want to see how to load a specific model, you can click `Use in paddlenlp` (or other PaddlePaddle libraries in the future) and you will be given a working snippet that to load it!

## Share Models
Depending on the PaddlePaddle library, you may be able to share your models by pushing to the Hub. For example, you can share PaddleNLP models by using the `save_to_hf_hub` method.
```python
from paddlenlp.transformers import AutoTokenizer, AutoModelForMaskedLM
tokenizer = AutoTokenizer.from_pretrained("PaddlePaddle/ernie-3.0-base-zh", from_hf_hub=True)
model = AutoModelForMaskedLM.from_pretrained("PaddlePaddle/ernie-3.0-base-zh", from_hf_hub=True)
tokenizer.save_to_hf_hub(repo_id="<my_org_name>/<my_repo_name>")
model.save_to_hf_hub(repo_id="<my_org_name>/<my_repo_name>")
```
## Conclusion
PaddlePaddle is an open source Deep Learning platform that originated from industrial practice and has been open-sourcing innovative and industry-grade projects since 2016. We are excited to join the Hub to share our work with the HuggingFace community and you can expect more fun and State-of-the-Art projects from us soon! To stay up to date with the latest news, you can follow us on Twitter at [@PaddlePaddle](https://twitter.com/PaddlePaddle).
| 9 |
0 | hf_public_repos/api-inference-community/docker_images/nemo/app | hf_public_repos/api-inference-community/docker_images/nemo/app/pipelines/base.py | from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
| 0 |
0 | hf_public_repos/api-inference-community/docker_images/nemo/app | hf_public_repos/api-inference-community/docker_images/nemo/app/pipelines/__init__.py | from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.automatic_speech_recognition import (
AutomaticSpeechRecognitionPipeline,
)
| 1 |
0 | hf_public_repos/api-inference-community/docker_images/nemo/app | hf_public_repos/api-inference-community/docker_images/nemo/app/pipelines/automatic_speech_recognition.py | import os
import tempfile
import uuid
from typing import Dict
import librosa
import nemo.collections.asr as nemo_asr
import numpy as np
import soundfile
from app.pipelines import Pipeline
from huggingface_hub import hf_hub_download
from huggingface_hub.hf_api import HfFolder
class AutomaticSpeechRecognitionPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
# Precheck for API key
is_token_available = HfFolder.get_token() is not None
# Prepare file name from model_id
filename = model_id.split("/")[-1] + ".nemo"
path = hf_hub_download(
repo_id=model_id, filename=filename, use_auth_token=is_token_available
)
# Load model
self.model = nemo_asr.models.ASRModel.restore_from(path)
self.model.freeze()
# Pre-Initialize RNNT decoding strategy
if hasattr(self.model, "change_decoding_strategy"):
self.model.change_decoding_strategy(None)
# IMPLEMENT_THIS : Please define a `self.sampling_rate` for this pipeline
# to automatically read the input correctly
self.sampling_rate = self.model.cfg.sample_rate
def __call__(self, inputs: np.array) -> Dict[str, str]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default at self.sampling_rate, otherwise 16KHz.
Return:
A :obj:`dict`:. The object return should be liked {"text": "XXX"} containing
the detected language from the input audio
"""
inputs = self.process_audio_file(inputs)
with tempfile.TemporaryDirectory() as tmpdir:
audio_path = os.path.join(tmpdir, f"audio_{uuid.uuid4()}.wav")
soundfile.write(audio_path, inputs, self.sampling_rate)
transcriptions = self.model.transcribe([audio_path])
# if transcriptions form a tuple (from RNNT), extract just "best" hypothesis
if isinstance(transcriptions, tuple) and len(transcriptions) == 2:
transcriptions = transcriptions[0]
audio_transcription = transcriptions[0]
return {"text": audio_transcription}
def process_audio_file(self, data):
# monochannel
data = librosa.to_mono(data)
return data
| 2 |
0 | hf_public_repos/api-inference-community/docker_images/nemo | hf_public_repos/api-inference-community/docker_images/nemo/tests/test_docker_build.py | import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
| 3 |
0 | hf_public_repos/api-inference-community/docker_images/nemo | hf_public_repos/api-inference-community/docker_images/nemo/tests/test_api_automatic_speech_recognition.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"automatic-speech-recognition" not in ALLOWED_TASKS,
"automatic-speech-recognition not implemented",
)
class AutomaticSpeecRecognitionTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["automatic-speech-recognition"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "automatic-speech-recognition"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
| 4 |
0 | hf_public_repos/api-inference-community/docker_images/nemo | hf_public_repos/api-inference-community/docker_images/nemo/tests/test_api.py | import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
# IMPLEMENT_THIS
"automatic-speech-recognition": "nvidia/stt_en_conformer_ctc_small",
}
ALL_TASKS = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"feature-extraction",
"image-classification",
"question-answering",
"sentence-similarity",
"speech-segmentation",
"structured-data-classification",
"text-classification",
"text-to-image",
"text-to-speech",
"token-classification",
"conversational",
"feature-extraction",
"question-answering",
"sentence-similarity",
"fill-mask",
"table-question-answering",
"summarization",
"text2text-generation",
"text-classification",
"text-to-image",
"text-to-speech",
"token-classification",
"zero-shot-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
| 5 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/adapter_transformers/requirements.txt | starlette==0.37.2
api-inference-community==0.0.32
torch==2.3.0
adapters==0.2.1
huggingface_hub==0.23.0
| 6 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/adapter_transformers/Dockerfile | FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <[email protected]>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
ENV HF_HOME=/data
ENV TORCH_HOME=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
| 7 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/adapter_transformers/prestart.sh | python app/main.py
| 8 |
0 | hf_public_repos/api-inference-community/docker_images/adapter_transformers | hf_public_repos/api-inference-community/docker_images/adapter_transformers/app/main.py | import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import (
Pipeline,
QuestionAnsweringPipeline,
SummarizationPipeline,
TextClassificationPipeline,
TextGenerationPipeline,
TokenClassificationPipeline,
)
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"question-answering": QuestionAnsweringPipeline,
"summarization": SummarizationPipeline,
"text-classification": TextClassificationPipeline,
"text-generation": TextGenerationPipeline,
"token-classification": TokenClassificationPipeline,
# IMPLEMENT_THIS: Add your implemented tasks here !
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
| 9 |
0 | hf_public_repos | hf_public_repos/candle/CHANGELOG.md | # Changelog
This documents the main changes to the `candle` crate.
## v0.3.1 - Unreleased
### Added
### Modified
## v0.3.0 - 2023-10-01
### Added
- Added the Mistral 7b v0.1 model
[983](https://github.com/huggingface/candle/pull/983).
- Quantized version of the Mistral model
[1009](https://github.com/huggingface/candle/pull/1009).
- Add the gelu-erf op and activation function
[969](https://github.com/huggingface/candle/pull/969).
- Add the mixformer/phi-v1.5 model
[930](https://github.com/huggingface/candle/pull/930).
- Add the sclice-scatter op
[927](https://github.com/huggingface/candle/pull/927).
- Add the Wuerstchen diffusion model
[911](https://github.com/huggingface/candle/pull/911).
### Modified
- Support for simd128 intrinsics in some quantized vecdots
[982](https://github.com/huggingface/candle/pull/982).
- Optimize the index-select cuda kernel
[976](https://github.com/huggingface/candle/pull/976).
- Self-contained safetensor wrappers
[946](https://github.com/huggingface/candle/pull/946).
## v0.2.2 - 2023-09-18
### Added
- Support for `top_p` sampling
[819](https://github.com/huggingface/candle/pull/819).
- T5 model including decoding
[864](https://github.com/huggingface/candle/pull/864).
- 1-d upsampling
[839](https://github.com/huggingface/candle/pull/839).
### Modified
- Bugfix for conv2d
[820](https://github.com/huggingface/candle/pull/820).
- Support tensor based indexing using `.i`
[842](https://github.com/huggingface/candle/pull/842).
## v0.2.1 - 2023-09-11
### Added
- Add some RNNs (GRU and LSTM) in `candle-nn`
[674](https://github.com/huggingface/candle/pull/674),
[688](https://github.com/huggingface/candle/pull/688).
- gguf v2 support
[725](https://github.com/huggingface/candle/pull/725).
- Quantized llama example in Python using the pyo3 api
[716](https://github.com/huggingface/candle/pull/716).
- `candle-nn` layer for conv2d-transposed
[760](https://github.com/huggingface/candle/pull/760).
- Add the Segment-Anything Model (SAM) as an example
[773](https://github.com/huggingface/candle/pull/773).
- TinyViT backbone for the segment anything example
[787](https://github.com/huggingface/candle/pull/787).
- Shape with holes support
[770](https://github.com/huggingface/candle/pull/770).
### Modified
- Dilations are now supported in conv-transpose2d.
[671](https://github.com/huggingface/candle/pull/671).
- Interactive mode for the quantized model
[690](https://github.com/huggingface/candle/pull/690).
- Faster softmax operation
[747](https://github.com/huggingface/candle/pull/747).
- Faster convolution operations on CPU and CUDA via im2col
[802](https://github.com/huggingface/candle/pull/802).
- Moving some models to a more central location
[796](https://github.com/huggingface/candle/pull/796).
## v0.2.0 - 2023-08-30
### Added
- Add the powf op
[664](https://github.com/huggingface/candle/pull/664).
- Stable Diffusion XL support
[647](https://github.com/huggingface/candle/pull/647).
- Add the conv-transpose2d op
[635](https://github.com/huggingface/candle/pull/635).
- Refactor the VarBuilder api
[627](https://github.com/huggingface/candle/pull/627).
- Add some quantization command
[625](https://github.com/huggingface/candle/pull/625).
- Support more quantized types, e.g. Q2K, Q4K, Q5K...
[586](https://github.com/huggingface/candle/pull/586).
- Add pose estimation to the yolo example
[589](https://github.com/huggingface/candle/pull/589).
- Api to write GGUF files
[585](https://github.com/huggingface/candle/pull/585).
- Support more quantization types
[580](https://github.com/huggingface/candle/pull/580).
- Add EfficientNet as an example Computer Vision model
[572](https://github.com/huggingface/candle/pull/572).
- Add a group parameter to convolutions
[566](https://github.com/huggingface/candle/pull/566).
- New dtype: int64
[563](https://github.com/huggingface/candle/pull/563).
- Handling of the GGUF file format.
[559](https://github.com/huggingface/candle/pull/559).
## v0.1.2 - 2023-08-21
| 0 |
0 | hf_public_repos | hf_public_repos/candle/Cargo.toml | [workspace]
members = [
"candle-core",
"candle-datasets",
"candle-examples",
"candle-book",
"candle-nn",
"candle-pyo3",
"candle-transformers",
"candle-wasm-examples/*",
"candle-wasm-tests",
"tensor-tools",
]
exclude = [
"candle-flash-attn",
"candle-kernels",
"candle-metal-kernels",
"candle-onnx",
]
resolver = "2"
[workspace.package]
version = "0.8.0"
edition = "2021"
description = "Minimalist ML framework."
repository = "https://github.com/huggingface/candle"
keywords = ["blas", "tensor", "machine-learning"]
categories = ["science"]
license = "MIT OR Apache-2.0"
[workspace.dependencies]
ab_glyph = "0.2.23"
accelerate-src = { version = "0.3.2" }
anyhow = { version = "1", features = ["backtrace"] }
byteorder = "1.4.3"
candle = { path = "./candle-core", package = "candle-core", version = "0.8.0" }
candle-datasets = { path = "./candle-datasets", version = "0.8.0" }
candle-flash-attn = { path = "./candle-flash-attn", version = "0.8.0" }
candle-kernels = { path = "./candle-kernels", version = "0.8.0" }
candle-metal-kernels = { path = "./candle-metal-kernels", version = "0.8.0" }
candle-nn = { path = "./candle-nn", version = "0.8.0" }
candle-onnx = { path = "./candle-onnx", version = "0.8.0" }
candle-transformers = { path = "./candle-transformers", version = "0.8.0" }
clap = { version = "4.2.4", features = ["derive"] }
criterion = { version = "0.5.1", default-features=false }
cudarc = { version = "0.12.1", features = ["std", "cublas", "cublaslt", "curand", "driver", "nvrtc", "f16", "cuda-version-from-build-system", "dynamic-linking"], default-features=false }
fancy-regex = "0.13.0"
gemm = { version = "0.17.0", features = ["wasm-simd128-enable"] }
hf-hub = { version = "0.3.3", package = "candle-hf-hub" }
half = { version = "2.3.1", features = ["num-traits", "use-intrinsics", "rand_distr"] }
hound = "3.5.1"
image = { version = "0.25.2", default-features = false, features = ["jpeg", "png"] }
imageproc = { version = "0.24.0", default-features = false }
intel-mkl-src = { version = "0.8.1", features = ["mkl-static-lp64-iomp"] }
libc = { version = "0.2.147" }
log = "0.4"
memmap2 = { version = "0.9.3", features = ["stable_deref_trait"] }
num_cpus = "1.15.0"
num-traits = "0.2.15"
parquet = { version = "51.0.0" }
rand = "0.8.5"
rand_distr = "0.4.3"
rayon = "1.7.0"
safetensors = "0.4.1"
serde = { version = "1.0.171", features = ["derive"] }
serde_plain = "1.0.2"
serde_json = "1.0.99"
thiserror = "1"
tokenizers = { version = "0.19.1", default-features = false }
tracing = "0.1.37"
tracing-chrome = "0.7.1"
tracing-subscriber = "0.3.7"
ug = "0.0.2"
ug-cuda = "0.0.2"
ug-metal = "0.0.2"
yoke = { version = "0.7.2", features = ["derive"] }
zip = { version = "1.1.1", default-features = false }
metal = { version = "0.27.0", features = ["mps"]}
[profile.release-with-debug]
inherits = "release"
debug = true
| 1 |
0 | hf_public_repos | hf_public_repos/candle/LICENSE-MIT | Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
| 2 |
0 | hf_public_repos | hf_public_repos/candle/LICENSE-APACHE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 3 |
0 | hf_public_repos | hf_public_repos/candle/test.onnx | backend-test:J
xytest"Relu
SingleReluZ
x
b
y
B | 4 |
0 | hf_public_repos | hf_public_repos/candle/.pre-commit-config.yaml | repos:
- repo: https://github.com/Narsil/pre-commit-rust
rev: 2eed6366172ef2a5186e8785ec0e67243d7d73d0
hooks:
- id: fmt
name: "Rust (fmt)"
- id: clippy
name: "Rust (clippy)"
args:
[
"--tests",
"--examples",
"--",
"-Dwarnings",
]
| 5 |
0 | hf_public_repos | hf_public_repos/candle/README.md | # candle
[](https://discord.gg/hugging-face-879548962464493619)
[](https://crates.io/crates/candle-core)
[](https://docs.rs/candle-core)
[](https://github.com/huggingface/candle/blob/main/LICENSE-MIT)
[](https://github.com/huggingface/candle/blob/main/LICENSE-APACHE)
Candle is a minimalist ML framework for Rust with a focus on performance (including GPU support)
and ease of use. Try our online demos:
[whisper](https://huggingface.co/spaces/lmz/candle-whisper),
[LLaMA2](https://huggingface.co/spaces/lmz/candle-llama2),
[T5](https://huggingface.co/spaces/radames/Candle-T5-Generation-Wasm),
[yolo](https://huggingface.co/spaces/lmz/candle-yolo),
[Segment
Anything](https://huggingface.co/spaces/radames/candle-segment-anything-wasm).
## Get started
Make sure that you have [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) correctly installed as described in [**Installation**](https://huggingface.github.io/candle/guide/installation.html).
Let's see how to run a simple matrix multiplication.
Write the following to your `myapp/src/main.rs` file:
```rust
use candle_core::{Device, Tensor};
fn main() -> Result<(), Box<dyn std::error::Error>> {
let device = Device::Cpu;
let a = Tensor::randn(0f32, 1., (2, 3), &device)?;
let b = Tensor::randn(0f32, 1., (3, 4), &device)?;
let c = a.matmul(&b)?;
println!("{c}");
Ok(())
}
```
`cargo run` should display a tensor of shape `Tensor[[2, 4], f32]`.
Having installed `candle` with Cuda support, simply define the `device` to be on GPU:
```diff
- let device = Device::Cpu;
+ let device = Device::new_cuda(0)?;
```
For more advanced examples, please have a look at the following section.
## Check out our examples
These online demos run entirely in your browser:
- [yolo](https://huggingface.co/spaces/lmz/candle-yolo): pose estimation and
object recognition.
- [whisper](https://huggingface.co/spaces/lmz/candle-whisper): speech recognition.
- [LLaMA2](https://huggingface.co/spaces/lmz/candle-llama2): text generation.
- [T5](https://huggingface.co/spaces/radames/Candle-T5-Generation-Wasm): text generation.
- [Phi-1.5, and Phi-2](https://huggingface.co/spaces/radames/Candle-Phi-1.5-Wasm): text generation.
- [Segment Anything Model](https://huggingface.co/spaces/radames/candle-segment-anything-wasm): Image segmentation.
- [BLIP](https://huggingface.co/spaces/radames/Candle-BLIP-Image-Captioning): image captioning.
We also provide a some command line based examples using state of the art models:
- [LLaMA v1, v2, and v3](./candle-examples/examples/llama/): general LLM, includes
the SOLAR-10.7B variant.
- [Falcon](./candle-examples/examples/falcon/): general LLM.
- [Codegeex4](./candle-examples/examples/codegeex4-9b/): Code completion,code interpreter,web search,fuction calling,repository-level
- [GLM4](./candle-examples/examples/glm4/): Open Multilingual Multimodal Chat LMs by THUDM
- [Gemma v1 and v2](./candle-examples/examples/gemma/): 2b and 7b+/9b general LLMs from Google Deepmind.
- [RecurrentGemma](./candle-examples/examples/recurrent-gemma/): 2b and 7b
Griffin based models from Google that mix attention with a RNN like state.
- [Phi-1, Phi-1.5, Phi-2, and Phi-3](./candle-examples/examples/phi/): 1.3b,
2.7b, and 3.8b general LLMs with performance on par with 7b models.
- [StableLM-3B-4E1T](./candle-examples/examples/stable-lm/): a 3b general LLM
pre-trained on 1T tokens of English and code datasets. Also supports
StableLM-2, a 1.6b LLM trained on 2T tokens, as well as the code variants.
- [Mamba](./candle-examples/examples/mamba/): an inference only
implementation of the Mamba state space model.
- [Mistral7b-v0.1](./candle-examples/examples/mistral/): a 7b general LLM with
better performance than all publicly available 13b models as of 2023-09-28.
- [Mixtral8x7b-v0.1](./candle-examples/examples/mixtral/): a sparse mixture of
experts 8x7b general LLM with better performance than a Llama 2 70B model with
much faster inference.
- [StarCoder](./candle-examples/examples/bigcode/) and
[StarCoder2](./candle-examples/examples/starcoder2/): LLM specialized to code generation.
- [Qwen1.5](./candle-examples/examples/qwen/): Bilingual (English/Chinese) LLMs.
- [RWKV v5 and v6](./candle-examples/examples/rwkv/): An RNN with transformer level LLM
performance.
- [Replit-code-v1.5](./candle-examples/examples/replit-code/): a 3.3b LLM specialized for code completion.
- [Yi-6B / Yi-34B](./candle-examples/examples/yi/): two bilingual
(English/Chinese) general LLMs with 6b and 34b parameters.
- [Quantized LLaMA](./candle-examples/examples/quantized/): quantized version of
the LLaMA model using the same quantization techniques as
[llama.cpp](https://github.com/ggerganov/llama.cpp).
<img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/quantized/assets/aoc.gif" width="600">
- [Stable Diffusion](./candle-examples/examples/stable-diffusion/): text to
image generative model, support for the 1.5, 2.1, SDXL 1.0 and Turbo versions.
<img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg" width="200">
- [Wuerstchen](./candle-examples/examples/wuerstchen/): another text to
image generative model.
<img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/wuerstchen/assets/cat.jpg" width="200">
- [yolo-v3](./candle-examples/examples/yolo-v3/) and
[yolo-v8](./candle-examples/examples/yolo-v8/): object detection and pose
estimation models.
<img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.od.jpg" width="200"><img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.pose.jpg" width="200">
- [segment-anything](./candle-examples/examples/segment-anything/): image
segmentation model with prompt.
<img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/segment-anything/assets/sam_merged.jpg" width="200">
- [SegFormer](./candle-examples/examples/segformer/): transformer based semantic segmentation model.
- [Whisper](./candle-examples/examples/whisper/): speech recognition model.
- [EnCodec](./candle-examples/examples/encodec/): high-quality audio compression
model using residual vector quantization.
- [MetaVoice](./candle-examples/examples/metavoice/): foundational model for
text-to-speech.
- [Parler-TTS](./candle-examples/examples/parler-tts/): large text-to-speech
model.
- [T5](./candle-examples/examples/t5), [Bert](./candle-examples/examples/bert/),
[JinaBert](./candle-examples/examples/jina-bert/) : useful for sentence embeddings.
- [DINOv2](./candle-examples/examples/dinov2/): computer vision model trained
using self-supervision (can be used for imagenet classification, depth
evaluation, segmentation).
- [VGG](./candle-examples/examples/vgg/),
[RepVGG](./candle-examples/examples/repvgg): computer vision models.
- [BLIP](./candle-examples/examples/blip/): image to text model, can be used to
generate captions for an image.
- [CLIP](./candle-examples/examples/clip/): multi-model vision and language
model.
- [TrOCR](./candle-examples/examples/trocr/): a transformer OCR model, with
dedicated submodels for hand-writing and printed recognition.
- [Marian-MT](./candle-examples/examples/marian-mt/): neural machine translation
model, generates the translated text from the input text.
- [Moondream](./candle-examples/examples/moondream/): tiny computer-vision model
that can answer real-world questions about images.
Run them using commands like:
```
cargo run --example quantized --release
```
In order to use **CUDA** add `--features cuda` to the example command line. If
you have cuDNN installed, use `--features cudnn` for even more speedups.
There are also some wasm examples for whisper and
[llama2.c](https://github.com/karpathy/llama2.c). You can either build them with
`trunk` or try them online:
[whisper](https://huggingface.co/spaces/lmz/candle-whisper),
[llama2](https://huggingface.co/spaces/lmz/candle-llama2),
[T5](https://huggingface.co/spaces/radames/Candle-T5-Generation-Wasm),
[Phi-1.5, and Phi-2](https://huggingface.co/spaces/radames/Candle-Phi-1.5-Wasm),
[Segment Anything Model](https://huggingface.co/spaces/radames/candle-segment-anything-wasm).
For LLaMA2, run the following command to retrieve the weight files and start a
test server:
```bash
cd candle-wasm-examples/llama2-c
wget https://huggingface.co/spaces/lmz/candle-llama2/resolve/main/model.bin
wget https://huggingface.co/spaces/lmz/candle-llama2/resolve/main/tokenizer.json
trunk serve --release --port 8081
```
And then head over to
[http://localhost:8081/](http://localhost:8081/).
<!--- ANCHOR: useful_libraries --->
## Useful External Resources
- [`candle-tutorial`](https://github.com/ToluClassics/candle-tutorial): A
very detailed tutorial showing how to convert a PyTorch model to Candle.
- [`candle-lora`](https://github.com/EricLBuehler/candle-lora): Efficient and
ergonomic LoRA implementation for Candle. `candle-lora` has
out-of-the-box LoRA support for many models from Candle, which can be found
[here](https://github.com/EricLBuehler/candle-lora/tree/master/candle-lora-transformers/examples).
- [`optimisers`](https://github.com/KGrewal1/optimisers): A collection of optimisers
including SGD with momentum, AdaGrad, AdaDelta, AdaMax, NAdam, RAdam, and RMSprop.
- [`candle-vllm`](https://github.com/EricLBuehler/candle-vllm): Efficient platform for inference and
serving local LLMs including an OpenAI compatible API server.
- [`candle-ext`](https://github.com/mokeyish/candle-ext): An extension library to Candle that provides PyTorch functions not currently available in Candle.
- [`candle-coursera-ml`](https://github.com/vishpat/candle-coursera-ml): Implementation of ML algorithms from Coursera's [Machine Learning Specialization](https://www.coursera.org/specializations/machine-learning-introduction) course.
- [`kalosm`](https://github.com/floneum/floneum/tree/master/interfaces/kalosm): A multi-modal meta-framework in Rust for interfacing with local pre-trained models with support for controlled generation, custom samplers, in-memory vector databases, audio transcription, and more.
- [`candle-sampling`](https://github.com/EricLBuehler/candle-sampling): Sampling techniques for Candle.
- [`gpt-from-scratch-rs`](https://github.com/jeroenvlek/gpt-from-scratch-rs): A port of Andrej Karpathy's _Let's build GPT_ tutorial on YouTube showcasing the Candle API on a toy problem.
- [`candle-einops`](https://github.com/tomsanbear/candle-einops): A pure rust implementation of the python [einops](https://github.com/arogozhnikov/einops) library.
- [`atoma-infer`](https://github.com/atoma-network/atoma-infer): A Rust library for fast inference at scale, leveraging FlashAttention2 for efficient attention computation, PagedAttention for efficient KV-cache memory management, and multi-GPU support. It is OpenAI api compatible.
If you have an addition to this list, please submit a pull request.
<!--- ANCHOR_END: useful_libraries --->
<!--- ANCHOR: features --->
## Features
- Simple syntax, looks and feels like PyTorch.
- Model training.
- Embed user-defined ops/kernels, such as [flash-attention v2](https://github.com/huggingface/candle/blob/89ba005962495f2bfbda286e185e9c3c7f5300a3/candle-flash-attn/src/lib.rs#L152).
- Backends.
- Optimized CPU backend with optional MKL support for x86 and Accelerate for macs.
- CUDA backend for efficiently running on GPUs, multiple GPU distribution via NCCL.
- WASM support, run your models in a browser.
- Included models.
- Language Models.
- LLaMA v1, v2, and v3 with variants such as SOLAR-10.7B.
- Falcon.
- StarCoder, StarCoder2.
- Phi 1, 1.5, 2, and 3.
- Mamba, Minimal Mamba
- Gemma v1 2b and 7b+, v2 2b and 9b.
- Mistral 7b v0.1.
- Mixtral 8x7b v0.1.
- StableLM-3B-4E1T, StableLM-2-1.6B, Stable-Code-3B.
- Replit-code-v1.5-3B.
- Bert.
- Yi-6B and Yi-34B.
- Qwen1.5, Qwen1.5 MoE.
- RWKV v5 and v6.
- Quantized LLMs.
- Llama 7b, 13b, 70b, as well as the chat and code variants.
- Mistral 7b, and 7b instruct.
- Mixtral 8x7b.
- Zephyr 7b a and b (Mistral-7b based).
- OpenChat 3.5 (Mistral-7b based).
- Text to text.
- T5 and its variants: FlanT5, UL2, MADLAD400 (translation), CoEdit (Grammar correction).
- Marian MT (Machine Translation).
- Text to image.
- Stable Diffusion v1.5, v2.1, XL v1.0.
- Wurstchen v2.
- Image to text.
- BLIP.
- TrOCR.
- Audio.
- Whisper, multi-lingual speech-to-text.
- EnCodec, audio compression model.
- MetaVoice-1B, text-to-speech model.
- Parler-TTS, text-to-speech model.
- Computer Vision Models.
- DINOv2, ConvMixer, EfficientNet, ResNet, ViT, VGG, RepVGG, ConvNeXT,
ConvNeXTv2, MobileOne, EfficientVit (MSRA), MobileNetv4, Hiera, FastViT.
- yolo-v3, yolo-v8.
- Segment-Anything Model (SAM).
- SegFormer.
- File formats: load models from safetensors, npz, ggml, or PyTorch files.
- Serverless (on CPU), small and fast deployments.
- Quantization support using the llama.cpp quantized types.
<!--- ANCHOR_END: features --->
## How to use
<!--- ANCHOR: cheatsheet --->
Cheatsheet:
| | Using PyTorch | Using Candle |
|------------|------------------------------------------|------------------------------------------------------------------|
| Creation | `torch.Tensor([[1, 2], [3, 4]])` | `Tensor::new(&[[1f32, 2.], [3., 4.]], &Device::Cpu)?` |
| Creation | `torch.zeros((2, 2))` | `Tensor::zeros((2, 2), DType::F32, &Device::Cpu)?` |
| Indexing | `tensor[:, :4]` | `tensor.i((.., ..4))?` |
| Operations | `tensor.view((2, 2))` | `tensor.reshape((2, 2))?` |
| Operations | `a.matmul(b)` | `a.matmul(&b)?` |
| Arithmetic | `a + b` | `&a + &b` |
| Device | `tensor.to(device="cuda")` | `tensor.to_device(&Device::new_cuda(0)?)?` |
| Dtype | `tensor.to(dtype=torch.float16)` | `tensor.to_dtype(&DType::F16)?` |
| Saving | `torch.save({"A": A}, "model.bin")` | `candle::safetensors::save(&HashMap::from([("A", A)]), "model.safetensors")?` |
| Loading | `weights = torch.load("model.bin")` | `candle::safetensors::load("model.safetensors", &device)` |
<!--- ANCHOR_END: cheatsheet --->
## Structure
- [candle-core](./candle-core): Core ops, devices, and `Tensor` struct definition
- [candle-nn](./candle-nn/): Tools to build real models
- [candle-examples](./candle-examples/): Examples of using the library in realistic settings
- [candle-kernels](./candle-kernels/): CUDA custom kernels
- [candle-datasets](./candle-datasets/): Datasets and data loaders.
- [candle-transformers](./candle-transformers): transformers-related utilities.
- [candle-flash-attn](./candle-flash-attn): Flash attention v2 layer.
- [candle-onnx](./candle-onnx/): ONNX model evaluation.
## FAQ
### Why should I use Candle?
Candle's core goal is to *make serverless inference possible*. Full machine learning frameworks like PyTorch
are very large, which makes creating instances on a cluster slow. Candle allows deployment of lightweight
binaries.
Secondly, Candle lets you *remove Python* from production workloads. Python overhead can seriously hurt performance,
and the [GIL](https://www.backblaze.com/blog/the-python-gil-past-present-and-future/) is a notorious source of headaches.
Finally, Rust is cool! A lot of the HF ecosystem already has Rust crates, like [safetensors](https://github.com/huggingface/safetensors) and [tokenizers](https://github.com/huggingface/tokenizers).
### Other ML frameworks
- [dfdx](https://github.com/coreylowman/dfdx) is a formidable crate, with shapes being included
in types. This prevents a lot of headaches by getting the compiler to complain about shape mismatches right off the bat.
However, we found that some features still require nightly, and writing code can be a bit daunting for non rust experts.
We're leveraging and contributing to other core crates for the runtime so hopefully both crates can benefit from each
other.
- [burn](https://github.com/burn-rs/burn) is a general crate that can leverage multiple backends so you can choose the best
engine for your workload.
- [tch-rs](https://github.com/LaurentMazare/tch-rs.git) Bindings to the torch library in Rust. Extremely versatile, but they
bring in the entire torch library into the runtime. The main contributor of `tch-rs` is also involved in the development
of `candle`.
### Common Errors
#### Missing symbols when compiling with the mkl feature.
If you get some missing symbols when compiling binaries/tests using the mkl
or accelerate features, e.g. for mkl you get:
```
= note: /usr/bin/ld: (....o): in function `blas::sgemm':
.../blas-0.22.0/src/lib.rs:1944: undefined reference to `sgemm_' collect2: error: ld returned 1 exit status
= note: some `extern` functions couldn't be found; some native libraries may need to be installed or have their path specified
= note: use the `-l` flag to specify native libraries to link
= note: use the `cargo:rustc-link-lib` directive to specify the native libraries to link with Cargo
```
or for accelerate:
```
Undefined symbols for architecture arm64:
"_dgemm_", referenced from:
candle_core::accelerate::dgemm::h1b71a038552bcabe in libcandle_core...
"_sgemm_", referenced from:
candle_core::accelerate::sgemm::h2cf21c592cba3c47 in libcandle_core...
ld: symbol(s) not found for architecture arm64
```
This is likely due to a missing linker flag that was needed to enable the mkl library. You
can try adding the following for mkl at the top of your binary:
```rust
extern crate intel_mkl_src;
```
or for accelerate:
```rust
extern crate accelerate_src;
```
#### Cannot run the LLaMA examples: access to source requires login credentials
```
Error: request error: https://huggingface.co/meta-llama/Llama-2-7b-hf/resolve/main/tokenizer.json: status code 401
```
This is likely because you're not permissioned for the LLaMA-v2 model. To fix
this, you have to register on the huggingface-hub, accept the [LLaMA-v2 model
conditions](https://huggingface.co/meta-llama/Llama-2-7b-hf), and set up your
authentication token. See issue
[#350](https://github.com/huggingface/candle/issues/350) for more details.
#### Missing cute/cutlass headers when compiling flash-attn
```
In file included from kernels/flash_fwd_launch_template.h:11:0,
from kernels/flash_fwd_hdim224_fp16_sm80.cu:5:
kernels/flash_fwd_kernel.h:8:10: fatal error: cute/algorithm/copy.hpp: No such file or directory
#include <cute/algorithm/copy.hpp>
^~~~~~~~~~~~~~~~~~~~~~~~~
compilation terminated.
Error: nvcc error while compiling:
```
[cutlass](https://github.com/NVIDIA/cutlass) is provided as a git submodule so you may want to run the following command to check it in properly.
```bash
git submodule update --init
```
#### Compiling with flash-attention fails
```
/usr/include/c++/11/bits/std_function.h:530:146: error: parameter packs not expanded with ‘...’:
```
This is a bug in gcc-11 triggered by the Cuda compiler. To fix this, install a different, supported gcc version - for example gcc-10, and specify the path to the compiler in the NVCC_CCBIN environment variable.
```
env NVCC_CCBIN=/usr/lib/gcc/x86_64-linux-gnu/10 cargo ...
```
#### Linking error on windows when running rustdoc or mdbook tests
```
Couldn't compile the test.
---- .\candle-book\src\inference\hub.md - Using_the_hub::Using_in_a_real_model_ (line 50) stdout ----
error: linking with `link.exe` failed: exit code: 1181
//very long chain of linking
= note: LINK : fatal error LNK1181: cannot open input file 'windows.0.48.5.lib'
```
Make sure you link all native libraries that might be located outside a project target, e.g., to run mdbook tests, you should run:
```
mdbook test candle-book -L .\target\debug\deps\ `
-L native=$env:USERPROFILE\.cargo\registry\src\index.crates.io-6f17d22bba15001f\windows_x86_64_msvc-0.42.2\lib `
-L native=$env:USERPROFILE\.cargo\registry\src\index.crates.io-6f17d22bba15001f\windows_x86_64_msvc-0.48.5\lib
```
#### Extremely slow model load time with WSL
This may be caused by the models being loaded from `/mnt/c`, more details on
[stackoverflow](https://stackoverflow.com/questions/68972448/why-is-wsl-extremely-slow-when-compared-with-native-windows-npm-yarn-processing).
#### Tracking down errors
You can set `RUST_BACKTRACE=1` to be provided with backtraces when a candle
error is generated.
#### CudaRC error
If you encounter an error like this one `called `Result::unwrap()` on an `Err` value: LoadLibraryExW { source: Os { code: 126, kind: Uncategorized, message: "The specified module could not be found." } }` on windows. To fix copy and rename these 3 files (make sure they are in path). The paths depend on your cuda version.
`c:\Windows\System32\nvcuda.dll` -> `cuda.dll`
`c:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\bin\cublas64_12.dll` -> `cublas.dll`
`c:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\bin\curand64_10.dll` -> `curand.dll`
| 6 |
0 | hf_public_repos/candle | hf_public_repos/candle/candle-wasm-tests/webdriver.json | {
"moz:firefoxOptions": {
"prefs": {
"media.navigator.streams.fake": true,
"media.navigator.permission.disabled": true
},
"args": []
},
"goog:chromeOptions": {
"args": [
"--use-fake-device-for-media-stream",
"--use-fake-ui-for-media-stream"
]
}
}
| 7 |
0 | hf_public_repos/candle | hf_public_repos/candle/candle-wasm-tests/Cargo.toml | [package]
name = "candle-wasm-tests"
version.workspace = true
edition.workspace = true
description = "WASM tests for candle"
keywords.workspace = true
categories.workspace = true
[dependencies]
candle = { workspace = true }
rand = { workspace = true }
getrandom = { version = "0.2", features = ["js"] }
[dev-dependencies]
wasm-bindgen-test = "0.3.0"
| 8 |
0 | hf_public_repos/candle | hf_public_repos/candle/candle-wasm-tests/README.md | Run the tests with:
```bash
RUST_LOG=wasm_bindgen_test_runner wasm-pack test --chrome --headless
```
Or:
```bash
wasm-pack test --chrome
```
If you get an "invalid session id" failure in headless mode, check that logs and
it may well be that your ChromeDriver is not at the same version as your
browser.
| 9 |
0 | hf_public_repos/api-inference-community/docker_images/fastai | hf_public_repos/api-inference-community/docker_images/fastai/tests/test_api.py | import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
"image-classification": ["fastai/fastbook_02_bears_classifier"]
}
ALL_TASKS = {
"image-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
# def test_unsupported_tasks(self):
# unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
# for unsupported_task in unsupported_tasks:
# with self.subTest(msg=unsupported_task, task=unsupported_task):
# os.environ["TASK"] = unsupported_task
# os.environ["MODEL_ID"] = "XX"
# with self.assertRaises(EnvironmentError):
# get_pipeline()
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
with self.assertRaises(EnvironmentError):
get_pipeline(unsupported_task, model_id="XX")
| 0 |
0 | hf_public_repos/api-inference-community/docker_images/fastai | hf_public_repos/api-inference-community/docker_images/fastai/tests/test_api_image_classification.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"image-classification" not in ALLOWED_TASKS,
"image-classification not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["image-classification"]]
)
class ImageClassificationTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "image-classification"
from app.main import app, get_pipeline
get_pipeline.cache_clear()
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("plane.jpg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(set(type(el) for el in content), {dict})
self.assertEqual(
set((k, type(v)) for el in content for (k, v) in el.items()),
{("label", str), ("score", float)},
)
def test_different_resolution(self):
bpayload = self.read("plane2.jpg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(set(type(el) for el in content), {dict})
self.assertEqual(
set(k for el in content for k in el.keys()), {"label", "score"}
)
| 1 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/common/requirements.txt | starlette==0.27.0
api-inference-community==0.0.32
huggingface_hub==0.11.0
| 2 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/common/Dockerfile | FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <[email protected]>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
| 3 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/common/prestart.sh | python app/main.py
| 4 |
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/app/main.py | import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
# IMPLEMENT_THIS: Add your implemented tasks here !
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
| 5 |
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/conversational.py | from typing import Any, Dict, List, Union
from app.pipelines import Pipeline
class ConversationalPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement ConversationalPipeline __init__ function"
)
def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> Dict[str, Any]:
"""
Args:
inputs (:obj:`dict`): a dictionary containing the following key values:
text (`str`, *optional*):
The initial user input to start the conversation
past_user_inputs (`List[str]`, *optional*):
Eventual past history of the conversation of the user. You don't need to pass it manually if you use the
pipeline interactively but if you want to recreate history you need to set both `past_user_inputs` and
`generated_responses` with equal length lists of strings
generated_responses (`List[str]`, *optional*):
Eventual past history of the conversation of the model. You don't need to pass it manually if you use the
pipeline interactively but if you want to recreate history you need to set both `past_user_inputs` and
`generated_responses` with equal length lists of strings
Return:
A :obj:`dict`: a dictionary containing the following key values:
generated_text (`str`):
The answer of the bot
conversation (`Dict[str, List[str]]`):
A facility dictionary to send back for the next input (with the new user input addition).
past_user_inputs (`List[str]`)
List of strings. The last inputs from the user in the conversation, after the model has run.
generated_responses (`List[str]`)
List of strings. The last outputs from the model in the conversation, after the model has run.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement ConversationalPipeline __call__ function"
)
| 6 |
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/text_to_image.py | from typing import TYPE_CHECKING
from app.pipelines import Pipeline
if TYPE_CHECKING:
from PIL import Image
class TextToImagePipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need for inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement TextToImagePipeline.__init__ function"
)
def __call__(self, inputs: str) -> "Image.Image":
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`PIL.Image` with the raw image representation as PIL.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement TextToImagePipeline.__call__ function"
)
| 7 |
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/tabular_regression_pipeline.py | from typing import Dict, List, Union
from app.pipelines import Pipeline
class TabularRegressionPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement TabularRegressionPipeline __init__ function"
)
def __call__(
self, inputs: Dict[str, Dict[str, List[Union[int, str, float]]]]
) -> List[float]:
"""
Args:
inputs (:obj:`dict`):
a dictionary containing a key 'data' mapping to a dict in which
the values represent each column.
Return:
A :obj:`list` of float: The regression output for each row.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement TabularRegressionPipeline __init__ function"
)
| 8 |
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/text_to_speech.py | from typing import Tuple
import numpy as np
from app.pipelines import Pipeline
class TextToSpeechPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement TextToSpeechPipeline __init__ function"
)
def __call__(self, inputs: str) -> Tuple[np.array, int]:
"""
Args:
inputs (:obj:`str`):
The text to generate audio from
Return:
A :obj:`np.array` and a :obj:`int`: The raw waveform as a numpy array, and the sampling rate as an int.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement TextToSpeechPipeline __call__ function"
)
| 9 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/informer.md | ---
title: "使用 Informer 进行多元概率时间序列预测"
thumbnail: /blog/assets/134_informer/thumbnail.png
authors:
- user: elisim
guest: true
- user: nielsr
- user: kashif
translators:
- user: innovation64
---
# 使用 Informer 进行多元概率时间序列预测
<script async defer src="https://unpkg.com/medium-zoom-element@0/dist/medium-zoom-element.min.js"></script>
<a target="_blank" href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multivariate_informer.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a>
## 介绍
几个月前,我们介绍了 [Time Series Transformer](https://huggingface.co/blog/time-series-transformers),它是 Vanilla Transformer ([Vaswani et al., 2017](https://arxiv.org/abs/1706.03762)) 应用于预测的模型,并展示了**单变量**概率预测任务的示例(即单独预测每个时间序列的 1-d 分布)。在这篇文章中,我们介绍了 _Informer_ 模型 ([Zhou, Haoyi, et al., 2021](https://arxiv.org/abs/2012.07436)),AAAI21最佳论文,现在在🤗 Transformers 中 [可用](https://huggingface.co/docs/transformers/main/en/model_doc/informer)。我们将展示如何使用 Informer 模型进行 **多元** 概率时间序列预测任务,即预测未来时间序列目标值的 **向量** 的分布。请注意,这也适用于原始时间序列 Transformer 模型。
## 多元概率时间序列预测
就概率预测的建模方面而言,当处理多元时间序列时,Transformer/Informer 不需要进行任何更改。在单变量和多变量设置中,模型将接收一系列向量,因此唯一的更改在于最终输出或模型输出。
对高维数据的完整联合条件分布进行建模可能会使得计算变得非常昂贵,因此会采用某些分布的近似方法,最简单的是将数据建模为来自相同族的独立分布,或者是对完整协方差的某些低秩近似等。在这里,我们将只使用独立(或对角线)模型输出,这些模型输出受到我们[已实现](https://huggingface.co/docs/transformers/main/en/internal/time_series_utils)的分布族支持。
## Informer - 原理
基于原始 Transformer([Vaswani et al., 2017](https://arxiv.org/abs/1706.03762)),Informer 采用了两个主要改进。为了理解这些改进,让我们回顾一下原始 Transformer 的缺点:
1. **规范自注意力机制的二次计算:** 原始 Transformer 的计算复杂度为 \\(O(T^2 D)\\) ,其中 \\(T\\) 是时间序列长度,\\(D\\) 是隐藏状态的维度。对于长序列时间序列预测(也称为 _LSTF 问题_ ),可能非常耗费计算资源。为了解决这个问题,Informer 采用了一种新的自注意力机制,称为 _稀疏概率_ 自注意力机制,其时间和空间复杂度为 \\(O(T \log T)\\)。
2. **堆叠层时的内存瓶颈:**当堆叠 \\(N\\) 个编码器/解码器层时,原始 Transformer 的内存使用量为 \\(O(N T^2)\\),这限制了模型对长序列的容量。Informer 使用了一种称为 _蒸馏_ 操作的方法,将层之间的输入大小缩小到其一半切片。通过这样做,它将整个内存使用量减少到 \\(O(N\cdot T \log T)\\)。
正如您所看到的,Informer 模型的原理类似于 Longformer([Beltagy et el., 2020](https://arxiv.org/abs/2004.05150)),Sparse Transformer([Child et al., 2019](https://arxiv.org/abs/1904.10509))和其他 NLP 论文,**当输入序列很长时**用于减少自注意力机制的二次复杂度。现在,让我们深入了解 _稀疏概率_ 自注意力机制 和 _蒸馏_ 操作,并提供代码示例。
### 稀疏概率自注意力机制(ProbSparse attention)
稀疏概率的主要思想是规范的自注意力分数形成长尾分布,其中“激活” query 位于“头部”分数,“沉默” query 位于“尾部”区域的分数。通过“激活” query,我们的意思是 query \\(q_i\\) 这样点积 \\(\langle q_i,k_i \rangle\\) **有助于**主要的注意力,而“沉默” query 形成一个点积,产生 **琐碎的** 注意力。这里,\\(q_i\\) 和 \\(k_i\\) 分别是 \\(Q\\) 和 \\(K\\) 注意力矩阵中的第 \\(i\\) 行。
|  |
|:--:|
| 在 [Autoformer (Wu, Haixu, et al., 2021)](https://wuhaixu2016.github.io/pdf/NeurIPS2021_Autoformer.pdf)中,原始自注意力机制 vs 稀疏概率自注意力机制 |
基于“激活”和“沉默” query 的想法,稀疏概率自注意力机制选择“激活” query ,并创建一个简化的 query 矩阵 \\(Q_{reduced}\\) 用于计算 \\ 中的注意力权重(O(T \log T)\\)。让我们通过代码示例更详细地了解这一点。
回忆一下典型的自注意力公式:
$$
\textrm{Attention}(Q, K, V) = \textrm{softmax}(\frac{QK^T}{\sqrt{d_k}} )V
$$
其中 \\(Q\in \mathbb{R}^{L_Q \times d}\\)、\\(K\in \mathbb{R}^{L_K \times d}\\) 和 \\(V\in \mathbb{R}^{L_V \times d}\\)。请注意,在实践中,query 和 key 的输入长度在自注意力计算中通常是等效的,即 \\(L_Q = L_K = T\\) 其中 \\(T\\) 是时间序列长度。因此,\\(QK^T\\) 乘法需要 \\(O(T^2 \cdot d)\\) 计算复杂度。在稀疏概率自注意力机制中,我们的目标是创建一个新的 \\(Q_{reduce}\\) 矩阵并定义:
$$
\textrm{ProbSparseAttention}(Q, K, V) = \textrm{softmax}(\frac{Q_{reduce}K^T}{\sqrt{d_k}} )V
$$
其中 \\(Q_{reduce}\\) 矩阵仅选择 Top \\(u\\) 个“激活” query 。这里,\\(u = c \cdot \log L_Q\\) 和 \\(c\\) 调用了稀疏概率自注意力机制的 _采样因子_ 超参数。由于 \\(Q_{reduce}\\) 仅选择 Top \\(u\\) query,其大小为 \\(c\cdot \log L_Q \times d\\),因此乘法 \\(Q_ {reduce}K^T\\) 只需要 \\(O(L_K \log L_Q) = O(T \log T)\\)。
这很好!但是我们如何选择 \\(u\\) 个“激活” query 来创建 \\(Q_{reduce}\\)?让我们定义 _Query 稀疏度测量(Query Sparsity Measurement)_。
#### Query 稀疏度测量(Query Sparsity Measurement)
Query 稀疏度测量 \\(M(q_i, K)\\) 用于在 \\(Q\\) 中选择 \\(u\\) “激活” query \\(q_i\\) 以创建 \\ (Q_{reduce}\\)。从理论上讲,占主导地位的 \\(\langle q_i,k_i \rangle\\) 对鼓励“激活” \\(q_i\\) 的概率分布**远离**均匀分布,如下图所示。因此,实际 query 分布与均匀分布之间的 [KL 散度](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) 用于定义稀疏度度量。
|  |
|:--:|
|从官方[仓库](https://github.com/zhouhaoyi/Informer2020) 给出的稀疏概率自注意力机制描述|
实际中,测量被定义为:
$$
M(q_i, K) = \max_j \frac{q_ik_j^T}{\sqrt{d}}-\frac{1}{L_k} \sum_{j=1}^{L_k}\frac{q_ik_j^T}{\sqrt{d}}
$$
这里要理解的重要一点是当 \\(M(q_i, K)\\) 较大时,Query \\(q_i\\) 应该在 \\(Q_{reduce}\\) 中,反之亦然。
但是我们如何计算非二次时间的项 \\(q_ik_j^T\\) 呢?回想一下,大多数点积 \\(\langle q_i,k_i \rangle\\) 都会产生正常的注意力(即长尾分布属性),所以从 \\(K\\) 中随机抽取一个键子集就足够了,这在代码中称为 `K_sample`。
现在,我们来看一看 `probsparse_attention` 的代码:
```python
from torch import nn
import math
def probsparse_attention(query_states, key_states, value_states, sampling_factor=5):
"""
Compute the probsparse self-attention.
Input shape: Batch x Time x Channel
Note the additional `sampling_factor` input.
"""
# get input sizes with logs
L_K = key_states.size(1)
L_Q = query_states.size(1)
log_L_K = np.ceil(np.log1p(L_K)).astype("int").item()
log_L_Q = np.ceil(np.log1p(L_Q)).astype("int").item()
# calculate a subset of samples to slice from K and create Q_K_sample
U_part = min(sampling_factor * L_Q * log_L_K, L_K)
# create Q_K_sample (the q_i * k_j^T term in the sparsity measurement)
index_sample = torch.randint(0, L_K, (U_part,))
K_sample = key_states[:, index_sample, :]
Q_K_sample = torch.bmm(query_states, K_sample.transpose(1, 2))
# calculate the query sparsity measurement with Q_K_sample
M = Q_K_sample.max(dim=-1)[0] - torch.div(Q_K_sample.sum(dim=-1), L_K)
# calculate u to find the Top-u queries under the sparsity measurement
u = min(sampling_factor * log_L_Q, L_Q)
M_top = M.topk(u, sorted=False)[1]
# calculate Q_reduce as query_states[:, M_top]
dim_for_slice = torch.arange(query_states.size(0)).unsqueeze(-1)
Q_reduce = query_states[dim_for_slice, M_top] # size: c*log_L_Q x channel
# and now, same as the canonical
d_k = query_states.size(-1)
attn_scores = torch.bmm(Q_reduce, key_states.transpose(-2, -1)) # Q_reduce x K^T
attn_scores = attn_scores / math.sqrt(d_k)
attn_probs = nn.functional.softmax(attn_scores, dim=-1)
attn_output = torch.bmm(attn_probs, value_states)
return attn_output, attn_scores
```
我们做到了!请注意,这只是 `probsparse_attention` 的部分实现,完整的实现可以在 🤗 Transformers 中找到。
### 蒸馏(distilling)
由于概率稀疏自注意力机制,编码器的特征图有一些可以去除的冗余。所以,
蒸馏操作用于将编码器层之间的输入大小减少到它的半片,从而在理论上消除了这种冗余。实际上,Informer 的“蒸馏”操作只是在每个编码器层之间添加一维卷积层和最大池化。设 \\(X_n\\) 为第 \\(n\\) 编码层的输出,则蒸馏操作定义为:
$$
X_{n+1} = \textrm{MaxPool} ( \textrm{ELU}(\textrm{Conv1d}(X_n))
$$
让我们看一下代码:
```python
from torch import nn
# ConvLayer is a class with forward pass applying ELU and MaxPool1d
def informer_encoder_forward(x_input, num_encoder_layers=3, distil=True):
# Initialize the convolution layers
if distil:
conv_layers = nn.ModuleList([ConvLayer() for _ in range(num_encoder_layers - 1)])
conv_layers.append(None)
else:
conv_layers = [None] * num_encoder_layers
# Apply conv_layer between each encoder_layer
for encoder_layer, conv_layer in zip(encoder_layers, conv_layers):
output = encoder_layer(x_input)
if conv_layer is not None:
output = conv_layer(loutput)
return output
```
通过将每层的输入减少两个,我们得到的内存使用量为 \\(O(N\cdot T \log T)\\) 而不是 \\(O(N\cdot T^2)\\) 其中\\(N\\) 是编码器/解码器层数。这就是我们想要的!
Informer 模型在 🤗 Transformers 库中 [现已可用](https://huggingface.co/docs/transformers/main/en/model_doc/informer),简称为 `InformerModel`。在下面的部分中,我们将展示如何在自定义多元时间序列数据集上训练此模型。
## 设置环境
首先,让我们安装必要的库:🤗 Transformers、🤗 Datasets、🤗 Evaluate、🤗 Accelerate 和 [GluonTS](https://github.com/awslabs/gluonts)。
正如我们将展示的那样,GluonTS 将用于转换数据以创建特征以及创建适当的训练、验证和测试批次。
```python
!pip install -q git+https://github.com/huggingface/transformers.git datasets evaluate accelerate gluonts ujson
```
## 加载数据集
在这篇博文中,我们将使用 [Hugging Face Hub](https://huggingface.co/datasets/monash_tsf) 上提供的 `traffic_hourly` 数据集。该数据集包含 [Lai 等人使用的旧金山交通数据集。 (2017)](https://arxiv.org/abs/1703.07015)。它包含 862 个小时的时间序列,显示 2015 年至 2016 年旧金山湾区高速公路 \\([0, 1]\\) 范围内的道路占用率。
此数据集是 [Monash Time Series Forecasting](https://forecastingdata.org/) 仓库的一部分,该仓库是来自多个领域的时间序列数据集的集合。它可以被视为时间序列预测的 [GLUE 基准](https://gluebenchmark.com/)。
```python
from datasets import load_dataset
dataset = load_dataset("monash_tsf", "traffic_hourly")
```
可以看到,数据集包含 3 个切片:训练集,验证集,测试集。
```python
dataset
>>> DatasetDict({
train: Dataset({
features: ['start', 'target', 'feat_static_cat', 'feat_dynamic_real', 'item_id'],
num_rows: 862
})
test: Dataset({
features: ['start', 'target', 'feat_static_cat', 'feat_dynamic_real', 'item_id'],
num_rows: 862
})
validation: Dataset({
features: ['start', 'target', 'feat_static_cat', 'feat_dynamic_real', 'item_id'],
num_rows: 862
})
})
```
每个示例都包含一些键,其中 `start`和 `target` 是最重要的键。让我们看一下数据集中的第一个时间序列:
```python
train_example = dataset["train"][0]
train_example.keys()
>>> dict_keys(['start', 'target', 'feat_static_cat', 'feat_dynamic_real', 'item_id'])
```
`start` 仅指示时间序列的开始(作为日期时间),而 `target` 包含时间序列的实际值。
`start` 将有助于将时间相关的特征添加到时间序列值中,作为模型的额外输入(例如“一年中的月份”)。因为我们知道数据的频率是`每小时`,所以我们知道例如第二个值的时间戳为`2015-01-01 01:00:01`、`2015-01-01 02:00:01` 等等。
```python
print(train_example["start"])
print(len(train_example["target"]))
>>> 2015-01-01 00:00:01
17448
```
验证集包含与训练集相同的数据,只是 `prediction_length` 的时间更长。这使我们能够根据真实情况验证模型的预测。
与验证集相比,测试集也是一个 `prediction_length` 长数据(或者与用于在多个滚动窗口上进行测试的训练集相比,`prediction_length` 长数据的若干倍)。
```python
validation_example = dataset["validation"][0]
validation_example.keys()
>>> dict_keys(['start', 'target', 'feat_static_cat', 'feat_dynamic_real', 'item_id'])
```
初始值与相应的训练示例完全相同。但是,与训练示例相比,此示例具有 `prediction_length=48`(48 小时或 2 天)附加值。让我们验证一下。
```python
freq = "1H"
prediction_length = 48
assert len(train_example["target"]) + prediction_length == len(
dataset["validation"][0]["target"]
)
```
让我们可视化看一下:
```python
import matplotlib.pyplot as plt
num_of_samples = 150
figure, axes = plt.subplots()
axes.plot(train_example["target"][-num_of_samples:], color="blue")
axes.plot(
validation_example["target"][-num_of_samples - prediction_length :],
color="red",
alpha=0.5,
)
plt.show()
```

让我们划分一下数据:
```python
train_dataset = dataset["train"]
test_dataset = dataset["test"]
```
## 更新 `start` 到 `pd.Period`
我们要做的第一件事是使用数据的 `freq` 将每个时间序列的 `start` 特征转换为 pandas `Period` 索引:
```python
from functools import lru_cache
import pandas as pd
import numpy as np
@lru_cache(10_000)
def convert_to_pandas_period(date, freq):
return pd.Period(date, freq)
def transform_start_field(batch, freq):
batch["start"] = [convert_to_pandas_period(date, freq) for date in batch["start"]]
return batch
```
我们现在使用 `datasets`' [`set_transform`](https://huggingface.co/docs/datasets/v2.7.0/en/package_reference/main_classes#datasets.Dataset.set_transform) 功能来即时执行此操作到位:
```python
from functools import partial
train_dataset.set_transform(partial(transform_start_field, freq=freq))
test_dataset.set_transform(partial(transform_start_field, freq=freq))
```
现在,让我们使用 GluonTS 中的 `MultivariateGrouper` 将数据集转换为多元时间序列。该 grouper 会将单个一维时间序列转换为单个二维矩阵。
```python
from gluonts.dataset.multivariate_grouper import MultivariateGrouper
num_of_variates = len(train_dataset)
train_grouper = MultivariateGrouper(max_target_dim=num_of_variates)
test_grouper = MultivariateGrouper(
max_target_dim=num_of_variates,
num_test_dates=len(test_dataset) // num_of_variates, # number of rolling test windows
)
multi_variate_train_dataset = train_grouper(train_dataset)
multi_variate_test_dataset = test_grouper(test_dataset)
```
请注意,目标现在是二维的,其中第一个维度是变量的数量(时间序列的数量),第二个是时间序列值(时间维度):
```python
multi_variate_train_example = multi_variate_train_dataset[0]
print("multi_variate_train_example["target"].shape =", multi_variate_train_example["target"].shape)
>>> multi_variate_train_example["target"].shape = (862, 17448)
```
## 定义模型
接下来,让我们实例化一个模型。该模型将从头开始训练,因此我们不会在这里使用 `from_pretrained` 方法,而是从 [`config`](https://huggingface.co/docs/transformers/main/en/model_doc/informer#transformers.InformerConfig) 随机初始化模型。
我们为模型指定了几个附加参数:
- `prediction_length` (在我们的例子中, `48` 小时): 这是 Informer 的解码器将学习预测的范围;
- `context_length`: 如果未指定 `context_length` ,模型会将 `context_length` (编码器的输入)设置为等于 `prediction_length`;
- 给定频率的 `lags` : 这些指定了一种有效的“回顾”机制,我们将过去的值连接到当前值作为附加功能,例如对于`每日`频率,我们可能会考虑回顾`[1, 7, 30, ...]`,或者对于`分钟`数据,我们可能会考虑`[1, 30, 60, 60*24, ... ]` 等;
- 时间特征的数量: 在我们的例子中,这将是 `5`,因为我们将添加 `HourOfDay`、`DayOfWeek` …… 和 `Age` 特征(见下文)。
让我们检查 GluonTS 为给定频率(“每小时”)提供的默认 lags:
```python
from gluonts.time_feature import get_lags_for_frequency
lags_sequence = get_lags_for_frequency(freq)
print(lags_sequence)
>>> [1, 2, 3, 4, 5, 6, 7, 23, 24, 25, 47, 48, 49, 71, 72, 73, 95, 96, 97, 119, 120,
121, 143, 144, 145, 167, 168, 169, 335, 336, 337, 503, 504, 505, 671, 672, 673, 719, 720, 721]
```
这意味着每个时间步长最多可回顾 721 小时(约 30 天),作为附加功能。但是,生成的特征向量最终的大小为 `len(lags_sequence)*num_of_variates`,对于我们的例子来说是 34480!这是行不通的,所以我们将使用我们自己的合理滞后。
我们还检查 GluonTS 为我们提供的默认时间功能:
```python
from gluonts.time_feature import time_features_from_frequency_str
time_features = time_features_from_frequency_str(freq)
print(time_features)
>>> [<function hour_of_day at 0x7f3809539240>, <function day_of_week at 0x7f3809539360>, <function day_of_month at 0x7f3809539480>, <function day_of_year at 0x7f38095395a0>]
```
在这种情况下,有四个附加特征,即“一天中的小时”、“星期几”、“月中的天”和“年中的天”。这意味着对于每个时间步,我们将这些特征添加为标量值。例如,考虑时间戳 `2015-01-01 01:00:01`。四个附加函数是:
```python
from pandas.core.arrays.period import period_array
timestamp = pd.Period("2015-01-01 01:00:01", freq=freq)
timestamp_as_index = pd.PeriodIndex(data=period_array([timestamp]))
additional_features = [
(time_feature.__name__, time_feature(timestamp_as_index))
for time_feature in time_features
]
print(dict(additional_features))
>>> {'hour_of_day': array([-0.45652174]), 'day_of_week': array([0.]), 'day_of_month': array([-0.5]), 'day_of_year': array([-0.5])}
```
请注意,小时和天被编码为来自 GluonTS 的`[-0.5, 0.5]`之间的值。有关 `time_features` 的更多信息,请参阅[这里](https://github.com/awslabs/gluonts/blob/dev/src/gluonts/time_feature/_base.py)。除了这 4 个特征之外,我们还将添加一个“年龄”特征,我们将在稍后的数据 transformations 中看到这一点。
我们现在拥有了定义模型的一切:
```python
from transformers import InformerConfig, InformerForPrediction
config = InformerConfig(
# in the multivariate setting, input_size is the number of variates in the time series per time step
input_size=num_of_variates,
# prediction length:
prediction_length=prediction_length,
# context length:
context_length=prediction_length * 2,
# lags value copied from 1 week before:
lags_sequence=[1, 24 * 7],
# we'll add 5 time features ("hour_of_day", ..., and "age"):
num_time_features=len(time_features) + 1,
# informer params:
dropout=0.1,
encoder_layers=6,
decoder_layers=4,
# project input from num_of_variates*len(lags_sequence)+num_time_features to:
d_model=64,
)
model = InformerForPrediction(config)
```
默认情况下,该模型使用对角 Student-t 分布(但这是 [可配置的](https://huggingface.co/docs/transformers/main/en/internal/time_series_utils)):
```python
model.config.distribution_output
>>> 'student_t'
```
## 定义 Transformations
接下来,我们定义数据的 transformations,特别是时间特征的创建(基于数据集或通用数据集)。
同样,我们将为此使用 GluonTS 库。我们定义了一个 transformations `链`(有点类似于图像的 `torchvision.transforms.Compose`)。它允许我们将多个 transformations 组合到一个 pipeline 中。
```python
from gluonts.time_feature import TimeFeature
from gluonts.dataset.field_names import FieldName
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSplitter,
RemoveFields,
SelectFields,
SetField,
TestSplitSampler,
Transformation,
ValidationSplitSampler,
VstackFeatures,
RenameFields,
)
```
下面的 transformations 带有注释,以解释它们的作用。在高层次上,我们将迭代数据集的各个时间序列并添加/删除字段或特征:
```python
from transformers import PretrainedConfig
def create_transformation(freq: str, config: PretrainedConfig) -> Transformation:
# create list of fields to remove later
remove_field_names = []
if config.num_static_real_features == 0:
remove_field_names.append(FieldName.FEAT_STATIC_REAL)
if config.num_dynamic_real_features == 0:
remove_field_names.append(FieldName.FEAT_DYNAMIC_REAL)
if config.num_static_categorical_features == 0:
remove_field_names.append(FieldName.FEAT_STATIC_CAT)
return Chain(
# step 1: remove static/dynamic fields if not specified
[RemoveFields(field_names=remove_field_names)]
# step 2: convert the data to NumPy (potentially not needed)
+ (
[
AsNumpyArray(
field=FieldName.FEAT_STATIC_CAT,
expected_ndim=1,
dtype=int,
)
]
if config.num_static_categorical_features > 0
else []
)
+ (
[
AsNumpyArray(
field=FieldName.FEAT_STATIC_REAL,
expected_ndim=1,
)
]
if config.num_static_real_features > 0
else []
)
+ [
AsNumpyArray(
field=FieldName.TARGET,
# we expect an extra dim for the multivariate case:
expected_ndim=1 if config.input_size == 1 else 2,
),
# step 3: handle the NaN's by filling in the target with zero
# and return the mask (which is in the observed values)
# true for observed values, false for nan's
# the decoder uses this mask (no loss is incurred for unobserved values)
# see loss_weights inside the xxxForPrediction model
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
),
# step 4: add temporal features based on freq of the dataset
# these serve as positional encodings
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=time_features_from_frequency_str(freq),
pred_length=config.prediction_length,
),
# step 5: add another temporal feature (just a single number)
# tells the model where in the life the value of the time series is
# sort of running counter
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=config.prediction_length,
log_scale=True,
),
# step 6: vertically stack all the temporal features into the key FEAT_TIME
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE]
+ (
[FieldName.FEAT_DYNAMIC_REAL]
if config.num_dynamic_real_features > 0
else []
),
),
# step 7: rename to match HuggingFace names
RenameFields(
mapping={
FieldName.FEAT_STATIC_CAT: "static_categorical_features",
FieldName.FEAT_STATIC_REAL: "static_real_features",
FieldName.FEAT_TIME: "time_features",
FieldName.TARGET: "values",
FieldName.OBSERVED_VALUES: "observed_mask",
}
),
]
)
```
## 定义 `InstanceSplitter`
为了训练/验证/测试,我们接下来创建一个 `InstanceSplitter`,用于从数据集中对窗口进行采样(因为,请记住,由于时间和内存限制,我们无法将整个历史值传递给模型)。
实例拆分器从数据中随机采样大小为 `context_length` 和后续大小为`prediction_length` 的窗口,并将 `past_` 或 `future_`键附加到各个窗口的任何时间键。这确保了 `values` 将被拆分为 `past_values` 和后续的 `future_values` 键,它们将分别用作编码器和解码器的输入。 `time_series_fields` 参数中的任何键都会发生同样的情况:
```python
from gluonts.transform.sampler import InstanceSampler
from typing import Optional
def create_instance_splitter(
config: PretrainedConfig,
mode: str,
train_sampler: Optional[InstanceSampler] = None,
validation_sampler: Optional[InstanceSampler] = None,
) -> Transformation:
assert mode in ["train", "validation", "test"]
instance_sampler = {
"train": train_sampler
or ExpectedNumInstanceSampler(
num_instances=1.0, min_future=config.prediction_length
),
"validation": validation_sampler
or ValidationSplitSampler(min_future=config.prediction_length),
"test": TestSplitSampler(),
}[mode]
return InstanceSplitter(
target_field="values",
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
instance_sampler=instance_sampler,
past_length=config.context_length + max(config.lags_sequence),
future_length=config.prediction_length,
time_series_fields=["time_features", "observed_mask"],
)
```
## 创建 PyTorch DataLoaders
下面是时候创建 PyTorch DataLoaders 了,这将允许我们这允许我们拥有成批的(输入、输出)对——或者换句话说(`past_values`、`future_values`)。
```python
from typing import Iterable
import torch
from gluonts.itertools import Cached, Cyclic
from gluonts.dataset.loader import as_stacked_batches
def create_train_dataloader(
config: PretrainedConfig,
freq,
data,
batch_size: int,
num_batches_per_epoch: int,
shuffle_buffer_length: Optional[int] = None,
cache_data: bool = True,
**kwargs,
) -> Iterable:
PREDICTION_INPUT_NAMES = [
"past_time_features",
"past_values",
"past_observed_mask",
"future_time_features",
]
if config.num_static_categorical_features > 0:
PREDICTION_INPUT_NAMES.append("static_categorical_features")
if config.num_static_real_features > 0:
PREDICTION_INPUT_NAMES.append("static_real_features")
TRAINING_INPUT_NAMES = PREDICTION_INPUT_NAMES + [
"future_values",
"future_observed_mask",
]
transformation = create_transformation(freq, config)
transformed_data = transformation.apply(data, is_train=True)
if cache_data:
transformed_data = Cached(transformed_data)
# we initialize a Training instance
instance_splitter = create_instance_splitter(config, "train")
# the instance splitter will sample a window of
# context length + lags + prediction length (from all the possible transformed time series, 1 in our case)
# randomly from within the target time series and return an iterator.
stream = Cyclic(transformed_data).stream()
training_instances = instance_splitter.apply(
stream, is_train=True
)
return as_stacked_batches(
training_instances,
batch_size=batch_size,
shuffle_buffer_length=shuffle_buffer_length,
field_names=TRAINING_INPUT_NAMES,
output_type=torch.tensor,
num_batches_per_epoch=num_batches_per_epoch,
)
```
```python
def create_test_dataloader(
config: PretrainedConfig,
freq,
data,
batch_size: int,
**kwargs,
):
PREDICTION_INPUT_NAMES = [
"past_time_features",
"past_values",
"past_observed_mask",
"future_time_features",
]
if config.num_static_categorical_features > 0:
PREDICTION_INPUT_NAMES.append("static_categorical_features")
if config.num_static_real_features > 0:
PREDICTION_INPUT_NAMES.append("static_real_features")
transformation = create_transformation(freq, config)
transformed_data = transformation.apply(data, is_train=False)
# we create a Test Instance splitter which will sample the very last
# context window seen during training only for the encoder.
instance_sampler = create_instance_splitter(config, "test")
# we apply the transformations in test mode
testing_instances = instance_sampler.apply(transformed_data, is_train=False)
return as_stacked_batches(
testing_instances,
batch_size=batch_size,
output_type=torch.tensor,
field_names=PREDICTION_INPUT_NAMES,
)
```
```python
train_dataloader = create_train_dataloader(
config=config,
freq=freq,
data=multi_variate_train_dataset,
batch_size=256,
num_batches_per_epoch=100,
num_workers=2,
)
test_dataloader = create_test_dataloader(
config=config,
freq=freq,
data=multi_variate_test_dataset,
batch_size=32,
)
```
让我们查看一下第一批次:
```python
batch = next(iter(train_dataloader))
for k, v in batch.items():
print(k, v.shape, v.type())
>>> past_time_features torch.Size([256, 264, 5]) torch.FloatTensor
past_values torch.Size([256, 264, 862]) torch.FloatTensor
past_observed_mask torch.Size([256, 264, 862]) torch.FloatTensor
future_time_features torch.Size([256, 48, 5]) torch.FloatTensor
future_values torch.Size([256, 48, 862]) torch.FloatTensor
future_observed_mask torch.Size([256, 48, 862]) torch.FloatTensor
```
可以看出,我们没有将 `input_ids` 和 `attention_mask` 提供给编码器(NLP 模型就是这种情况),而是将 `past_values` 以及 `past_observed_mask`、`past_time_features` 和 `static_real_features` 提供给编码器.
解码器输入包括 `future_values`、`future_observed_mask` 和 `future_time_features`。 `future_values` 可以等同于 NLP 中的 `decoder_input_ids` 。
我们参考了[文档](https://huggingface.co/docs/transformers/main/en/model_doc/informer#transformers.InformerModel.forward.past_values) 以获得对它们中每一个的详细解释。
## 前向传递
让我们对刚刚创建的批次执行一次前向传递:
```python
# perform forward pass
outputs = model(
past_values=batch["past_values"],
past_time_features=batch["past_time_features"],
past_observed_mask=batch["past_observed_mask"],
static_categorical_features=batch["static_categorical_features"]
if config.num_static_categorical_features > 0
else None,
static_real_features=batch["static_real_features"]
if config.num_static_real_features > 0
else None,
future_values=batch["future_values"],
future_time_features=batch["future_time_features"],
future_observed_mask=batch["future_observed_mask"],
output_hidden_states=True,
)
```
```python
print("Loss:", outputs.loss.item())
>>> Loss: -1071.5718994140625
```
请注意,该模型正在返回损失。这是可能的,因为解码器会自动将 `future_values` 向右移动一个位置以获得标签。这将允许计算预测值和标签之间的损失。损失是预测分布相对于真实值的负对数似然,并且趋于负无穷大。
另外请注意,解码器使用因果掩码来遮盖未来,因为它需要预测的值在 `future_values` 张量中。
## 训练模型
是时候训练模型了!我们将会使用标准的 PyTorch training loop。
我们将在这里使用 🤗 [Accelerate](https://huggingface.co/docs/accelerate/index) 库,它会自动将模型、优化器和数据加载器放置在适当的`设备`上。
```python
from accelerate import Accelerator
from torch.optim import AdamW
epochs = 25
loss_history = []
accelerator = Accelerator()
device = accelerator.device
model.to(device)
optimizer = AdamW(model.parameters(), lr=6e-4, betas=(0.9, 0.95), weight_decay=1e-1)
model, optimizer, train_dataloader = accelerator.prepare(
model,
optimizer,
train_dataloader,
)
model.train()
for epoch in range(epochs):
for idx, batch in enumerate(train_dataloader):
optimizer.zero_grad()
outputs = model(
static_categorical_features=batch["static_categorical_features"].to(device)
if config.num_static_categorical_features > 0
else None,
static_real_features=batch["static_real_features"].to(device)
if config.num_static_real_features > 0
else None,
past_time_features=batch["past_time_features"].to(device),
past_values=batch["past_values"].to(device),
future_time_features=batch["future_time_features"].to(device),
future_values=batch["future_values"].to(device),
past_observed_mask=batch["past_observed_mask"].to(device),
future_observed_mask=batch["future_observed_mask"].to(device),
)
loss = outputs.loss
# Backpropagation
accelerator.backward(loss)
optimizer.step()
loss_history.append(loss.item())
if idx % 100 == 0:
print(loss.item())
>>> -1081.978515625
...
-2877.723876953125
```
```python
# view training
loss_history = np.array(loss_history).reshape(-1)
x = range(loss_history.shape[0])
plt.figure(figsize=(10, 5))
plt.plot(x, loss_history, label="train")
plt.title("Loss", fontsize=15)
plt.legend(loc="upper right")
plt.xlabel("iteration")
plt.ylabel("nll")
plt.show()
```

## 推理
在推理时,建议使用 `generate()` 方法进行自回归生成,类似于 NLP 模型。
预测涉及从测试实例采样器获取数据,该采样器将从数据集中每个时间序列的最后一个 `context_length` 大小的值窗口中采样,并将其传递给模型。请注意,我们将提前已知的 `future_time_features` 传递给解码器。
该模型将从预测分布中自回归采样一定数量的值,并将它们传回解码器以返回预测输出:
```python
model.eval()
forecasts_ = []
for batch in test_dataloader:
outputs = model.generate(
static_categorical_features=batch["static_categorical_features"].to(device)
if config.num_static_categorical_features > 0
else None,
static_real_features=batch["static_real_features"].to(device)
if config.num_static_real_features > 0
else None,
past_time_features=batch["past_time_features"].to(device),
past_values=batch["past_values"].to(device),
future_time_features=batch["future_time_features"].to(device),
past_observed_mask=batch["past_observed_mask"].to(device),
)
forecasts_.append(outputs.sequences.cpu().numpy())
```
该模型输出形状的张量(`batch_size`、`number of samples`、`prediction length`、`input_size`)。
在这种情况下,对于 `862` 时间序列中的每个时间序列,我们在接下来的 `48` 小时内获得 `100` 个可能值(对于大小为 `1` 的批处理中的每个示例,因为我们只有一个多元时间序列):
```python
forecasts_[0].shape
>>> (1, 100, 48, 862)
```
我们将垂直堆叠它们,以获得测试数据集中所有时间序列的预测(以防万一测试集中有更多时间序列):
```python
forecasts = np.vstack(forecasts_)
print(forecasts.shape)
>>> (1, 100, 48, 862)
```
我们可以根据测试集中存在的样本值,根据真实情况评估生成的预测。为此,我们将使用 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) 库,其中包括 [MASE](https://huggingface.co/spaces/evaluate-metric/mase) 和 [sMAPE](https://huggingface.co/spaces/evaluate-metric/smape) 指标。
我们计算数据集中每个时间序列变量的两个指标:
```python
from evaluate import load
from gluonts.time_feature import get_seasonality
mase_metric = load("evaluate-metric/mase")
smape_metric = load("evaluate-metric/smape")
forecast_median = np.median(forecasts, 1).squeeze(0).T
mase_metrics = []
smape_metrics = []
for item_id, ts in enumerate(test_dataset):
training_data = ts["target"][:-prediction_length]
ground_truth = ts["target"][-prediction_length:]
mase = mase_metric.compute(
predictions=forecast_median[item_id],
references=np.array(ground_truth),
training=np.array(training_data),
periodicity=get_seasonality(freq),
)
mase_metrics.append(mase["mase"])
smape = smape_metric.compute(
predictions=forecast_median[item_id],
references=np.array(ground_truth),
)
smape_metrics.append(smape["smape"])
```
```python
print(f"MASE: {np.mean(mase_metrics)}")
>>> MASE: 1.1913437728068093
print(f"sMAPE: {np.mean(smape_metrics)}")
>>> sMAPE: 0.5322665081607634
```
```python
plt.scatter(mase_metrics, smape_metrics, alpha=0.2)
plt.xlabel("MASE")
plt.ylabel("sMAPE")
plt.show()
```

为了绘制任何时间序列的预测,我们定义了以下助手:
```python
import matplotlib.dates as mdates
def plot(ts_index, mv_index):
fig, ax = plt.subplots()
index = pd.period_range(
start=multi_variate_test_dataset[ts_index][FieldName.START],
periods=len(multi_variate_test_dataset[ts_index][FieldName.TARGET]),
freq=multi_variate_test_dataset[ts_index][FieldName.START].freq,
).to_timestamp()
ax.xaxis.set_minor_locator(mdates.HourLocator())
ax.plot(
index[-2 * prediction_length :],
multi_variate_test_dataset[ts_index]["target"][mv_index, -2 * prediction_length :],
label="actual",
)
ax.plot(
index[-prediction_length:],
forecasts[ts_index, ..., mv_index].mean(axis=0),
label="mean",
)
ax.fill_between(
index[-prediction_length:],
forecasts[ts_index, ..., mv_index].mean(0)
- forecasts[ts_index, ..., mv_index].std(axis=0),
forecasts[ts_index, ..., mv_index].mean(0)
+ forecasts[ts_index, ..., mv_index].std(axis=0),
alpha=0.2,
interpolate=True,
label="+/- 1-std",
)
ax.legend()
fig.autofmt_xdate()
```
举个例子:
```python
plot(0, 344)
```

## 结论
我们如何与其他模型进行比较? [Monash Time Series Repository](https://forecastingdata.org/#results) 有一个测试集 MASE 指标的比较表,我们可以将其添加到里面:
|Dataset | SES| Theta | TBATS| ETS | (DHR-)ARIMA| PR| CatBoost | FFNN | DeepAR | N-BEATS | WaveNet| Transformer (uni.) | **Informer (mv. our)**|
|:------------------:|:-----------------:|:--:|:--:|:--:|:--:|:--:|:--:|:---:|:---:|:--:|:--:|:--:|:--:|
|Traffic Hourly | 1.922 | 1.922 | 2.482 | 2.294| 2.535| 1.281| 1.571 |0.892| 0.825 |1.100| 1.066 | **0.821** | 1.191 |
可以看出,也许有些人会感到惊讶,多变量预测通常比单变量预测_更差_,原因是难以估计跨系列相关性/关系。估计增加的额外方差通常会损害最终的预测或模型学习虚假相关性。我们参考 [这篇文章](https://openreview.net/forum?id=GpW327gxLTF) 来进一步阅读。当对大量数据进行训练时,多变量模型往往效果很好。
所以原始 Transformer 在这里仍然表现最好!将来,我们希望集中的更好地对这些模型进行基准测试,以便于重现几篇论文的结果。敬请期待更多!
## 资源
我们建议查看 [Informer 文档](https://huggingface.co/docs/transformers/main/en/model_doc/informer) 和 [示例 notebook](https://github.com/huggingface/notebooks/blob/main/examples/multivariate_informer.ipynb) 链接在此博客文章的顶部。
| 0 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/mask2former.md | ---
title: 通用图像分割任务:使用 Mask2Former 和 OneFormer
thumbnail: /blog/assets/127_mask2former/thumbnail.png
authors:
- user: nielsr
- user: shivi
- user: adirik
translators:
- user: hoi2022
---
# 通用图像分割任务: 使用 Mask2Former 和 OneFormer
<script async defer src="https://unpkg.com/medium-zoom-element@0/dist/medium-zoom-element.min.js"></script>
**本文介绍两个领先的图像分割神经网络模型: Mask2Former 和 OneFormer。相关模型已经在 [`🤗 transformers`](https://huggingface.co/transformers) 提供。🤗 Transformers 是一个开源库,提供了很多便捷的先进模型。在本文中,你也会学到各种图像分割任务的不同之处。**
## 图像分割
图像分割任务旨在鉴别区分出一张图片的不同部分,比如人物、汽车等等。从技术角度讲,图像分割任务需要根据不同的语义信息区分并聚集起对应相同语义的像素点。读者可以参考 Hugging Face 的 [任务页面](https://huggingface.co/tasks/image-segmentation) 来简要了解。
大体上,图像分割可以分为三个子任务: 实例分割 (instance segmentation) 、语义分割 (semantic segmentation) 、全景分割 (panoptic segmentation)。这三个子任务都有着大量的算法与模型。
- **实例分割** 任务旨在区分不同的“实例”,例如图像中不同的人物个体。实例分割从某种角度看和物体检测很像,不同的是在这里我们需要的是一个对应类别的二元的分割掩膜,而不是一个检测框。实例也可以称为“物体 (objects)”或“实物 (things)”。需要注意的是,不同的个体可能在图像中是相互重叠的。
- **语义分割** 区分的是不同的“语义类别”,比如属于人物、天空等类别的各个像素点。与实例分割不同的是,这里我们不需要区分开同一类别下的不同个体,例如这里我们只需要得到“人物”类别的像素级掩膜即可,不需要区分开不同的人。有些类别根本不存在个体的区分,比如天空、草地,这种类别我们称之为“东西 (stuff)”,以此区分开其它类别,称之为“实物 (things)”。请注意这里不存在不同语义类别间的重叠,因为一个像素点只能属于一个类别。
- **全景分割** 在 2018 年由 [Kirillov et al.](https://arxiv.org/abs/1801.00868) 提出,目的是为了统一实例分割和语义分割。模型单纯地鉴别出一系列的图像部分,每个部分既有对应的二元掩膜,也有对应的类别标签。这些区分出来的部分,既可以是“东西”也可以是“实物”。与实例分割不同的是,不同部分间不存在重叠。
下图展示了三个子任务的不同: (图片来自 [这篇博客文章](https://www.v7labs.com/blog/panoptic-segmentation-guide))
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/127_mask2former/semantic_vs_semantic_vs_panoptic.png" alt="drawing" width=500>
</p>
近年来,研究者们已经推出了很多针对实例、语义、全景分割精心设计的模型架构。实例分割和全景分割基本上是通过输出一系列实例的二元掩膜和对应类别标签来处理的 (和物体检测很像,只不过这里不是输出每个实例的检测框)。这一操作也常常被称为“二元掩膜分类”。语义分割则不同,通常是让模型输出一个“分割图”,令每一个像素点都有一个标签。所以语义分割也常被视为一个“像素级分类”的任务。采用这一范式的语义分割模块包括 [SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer) 和 [UPerNet](https://huggingface.co/docs/transformers/main/en/model_doc/upernet)。针对 SegFormer 我们还写了一篇 [详细的博客](https://huggingface.co/blog/zh/fine-tune-segformer)。
## 通用图像分割
幸运的是,从大约 2020 年开始,人们开始研究能同时解决三个任务 (实例、语义和全景分割) 的统一模型。[DETR](https://huggingface.co/docs/transformers/model_doc/detr) 是开山之作,它通过“二元掩膜分类”的范式去解决全景分割问题,把“实物”和“东西”的类别用统一的方法对待。其核心点是使用一个 Transformer 的解码器 (decoder) 来并行地生成一系列的二元掩膜和类别。随后 [MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer) 又在此基础上进行了改进,表明了“二元掩膜分类”的范式也可以用在语义分割上。
[Mask2Former](https://huggingface.co/docs/transformers/main/model_doc/mask2former) 又将此方法扩展到了实例分割上,进一步改进了神经网络的结构。因此,各自分离的子任务框架现在已经进化到了“通用图像分割”的框架,可以解决任何图像分割任务。有趣的是,这些通用模型全都采取了“掩膜分类”的范式,彻底抛弃了“像素级分类”这一方法。下图就展示了 Mask2Former 的网络结构 (图像取自 [原始论文](https://arxiv.org/abs/2112.01527))。
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/mask2former_architecture.jpg" alt="drawing" width=500>
</p>
简短来说,一张图片首先被送入骨干网络 (backbone) 里面来获取一系列,在论文中,骨干网络既可以是 [ResNet](https://huggingface.co/docs/transformers/model_doc/resnet) 也可以是 [Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)。接下来,这些特征图会被一个叫做 Pixel Decoder 的模块增强成为高分辨率特征图。最终,一个 transformer 的解码器会接收一系列的 query,基于上一步得到的特征,把它们转换成一些列二元掩膜和分类预测。
需要注意的是,MasksFormer 仍然需要在每个单独的任务上训练来获取领先的结果。这一点被 [OneFormer](https://arxiv.org/abs/2211.06220) 进行了改进,并通过在全景数据集上训练,达到了领先水平。OneFormer 增加了一个文本编码器 (text encoder),使得模型有了一个基于文本条件 (实例、语义或全景) 的输入。该模型 [已经收录入 🤗 Transformers 之中](https://huggingface.co/docs/transformers/main/en/model_doc/oneformer),比 Mask2Former 更准确,但由于文本编码器的引入,所以速度略慢。下图展示了 OneFormer 的基本结构,它使用 Swin Transformer 或 新的 [DiNAT](https://huggingface.co/docs/transformers/model_doc/dinat) 作为骨干网络。
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/oneformer_architecture.png" alt="drawing" width=500>
</p>
## 使用 Transformers 库中的 Mask2Former 和 OneFormer 进行推理
使用 Mask2Former 和 OneFormer 方法相当直接,而且和它们的前身 MaskFormer 非常相似。我们这里从 Hub 中使用一个在 COCO 全景数据集上训练的一个模型来实例化一个 Mask2Former 以及对应的 processor。需要注意的是,在不同数据集上训练出来的 [checkpoints 已经公开,数量不下 30 个](https://huggingface.co/models?other=mask2former)。
```
from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation
processor = AutoImageProcessor.from_pretrained("facebook/mask2former-swin-base-coco-panoptic")
model = Mask2FormerForUniversalSegmentation.from_pretrained("facebook/mask2former-swin-base-coco-panoptic")
```
然后我们从 COCO 数据集中找出一张猫的图片,用它来进行推理。
```
from PIL import Image
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
image
```
<img src="../assets/78_annotated-diffusion/output_cats.jpeg" width="400" />
我们使用 processor 处理原始图片,然后送入模型进行前向推理。
```
inputs = processor(image, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
```
模型输出了一系列二元掩膜以及对应类别的 logit。Mask2Former 的原始输出还可以使用 processor 进行处理,来得到最终的实例、语义或全景分割结果:
```
prediction = processor.post_process_panoptic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
print(prediction.keys())
```
<div class="output stream stdout">
Output:
----------------------------------------------------------------------------------------------------
dict_keys(['segmentation', 'segments_info'])
</div>
在全景分割中,最终的 prediction 包含两样东西: 一个是形状为 (height, width) 的 segmentation 图,里面针对每一个像素都给出了编码实例 ID 的值; 另一个是与之对应的 segments_info,包含了不同分割区域的更多信息 (比如类别、类别 ID 等)。需要注意的是,为了高效,Mask2Former 输出的二元掩码的形状是 (96, 96) 的,我们需要用 target_sizes 来改变尺寸,使得这个掩膜和原始图片尺寸一致。
将结果可视化出来:
```
from collections import defaultdict
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib import cm
def draw_panoptic_segmentation(segmentation, segments_info):
# get the used color map
viridis = cm.get_cmap('viridis', torch.max(segmentation))
fig, ax = plt.subplots()
ax.imshow(segmentation)
instances_counter = defaultdict(int)
handles = []
# for each segment, draw its legend
for segment in segments_info:
segment_id = segment['id']
segment_label_id = segment['label_id']
segment_label = model.config.id2label[segment_label_id]
label = f"{segment_label}-{instances_counter[segment_label_id]}"
instances_counter[segment_label_id] += 1
color = viridis(segment_id)
handles.append(mpatches.Patch(color=color, label=label))
ax.legend(handles=handles)
draw_panoptic_segmentation(**panoptic_segmentation)
```
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/127_mask2former/cats_panoptic_result.png" width="400" />
可以看到,模型区分开了不同的猫和遥控器。相比较而言,语义分割只会为“猫”这一种类创建一个单一的掩膜。
如果你想试试 OneFormer,它和 Mask2Former 的 API 几乎一样,只不过多了一个文本提示的输入; 可以参考这里的 [demo notebook](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/OneFormer)。
## 使用 transformers 微调 Mask2Former 和 OneFormer
读者可以参考这里的 [demo notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/MaskFormer/Fine-tuning) 来在自定义的实例、语义或全景分割数据集上微调 Mask2Former 或 OneFormer 模型。MaskFormer、Mask2Former 和 OneFormer 都有着相似的 API,所以基于 MaskFormer 进行改进十分方便、需要的修改很少。
在上述 notebooks 中,都是使用 `MaskFormerForInstanceSegmentation` 来加载模型,而你需要换成使用 `Mask2FormerForUniversalSegmentation` 或 `OneFormerForUniversalSegmentation`。对于 Mask2Former 中的图像处理,你也需要使用 `Mask2FormerImageProcessor`。你也可以使用 `AutoImageProcessor` 来自动地加载适合你的模型的 processor。OneFormer 则需要使用 `OneFormerProcessor`,因为它不仅预处理图片,还需要处理文字。
# 总结
总的来说就这些内容!你现在知道实例分割、语义分割以及全景分割都有什么不同了,你也知道如何使用 [🤗 transformers](https://huggingface.co/transformers) 中的 Mask2Former 和 OneFormer 之类的“通用架构”了。
我们希望你喜欢本文并学有所学。如果你微调了 Mask2Former 或 OneFormer,也请让我们知道你是否对结果足够满意。
如果想深入学习,我们推荐以下资源:
- 我们针对 [MaskFormer](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/MaskFormer), [Mask2Former](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/Mask2Former) 和 [OneFormer](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/OneFormer), 推出的 demo notebooks,将会给出更多关于推理 (包括可视化) 和微调的知识。
- 在 Hugging Face Hub 上, [Mask2Former](https://huggingface.co/spaces/shivi/mask2former-demo) 和 [OneFormer](https://huggingface.co/spaces/shi-labs/OneFormer) 的 [live demo spaces],可以让你快速用自己的输入数据尝试不同模型。
| 1 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/ethics-diffusers.md | ---
title: "开发 Diffusers 库的道德行为指南"
thumbnail: /blog/assets/ethics-diffusers/thumbnail.png
authors:
- user: giadap
translators:
- user: innovation64
- user: zhongdongy
proofreader: true
---
# 开发 Diffusers 库的道德行为指南
我们正在努力让我们每次发布的库更加负责!
我们很荣幸宣布我们发布了 [道德守则](https://huggingface.co/docs/diffusers/main/en/conceptual/ethical_guidelines),并将作为一部分其放入 [ Diffusers 库的说明文档](https://huggingface.co/docs/diffusers/main/en/index)。
由于扩散模型在现实世界上的实际应用例子会对社会造成潜在的负面影响,该守则旨在引导对于社区做出贡献的 Diffusers 库维护者进行技术决策。我们希望对于我们的决策进行更加透明,尤其是,我们想确认一些价值观来指导决策。
我们将道德准则作为一个引导价值,做出具体行动,然后持续适应新的条件的循环过程。基于此,我们致力于随着时间去不断更正我们的价值准则,不断跟进 Diffusers 项目的发展,并从社区持续收集反馈,使得准则始终保持有效。
# 道德守则
- **透明**: 我们致力于在管理 PR、向用户解释我们的选择以及做出技术决策方面保持透明。
- **一致性**: 我们致力于保证我们的用户在项目管理中得到同等程度的关注,保持技术上的稳定和一致。
- **简单性**: 为了让 Diffusers 库易于使用和利用,我们致力于保持项目目标的精简和连贯性。
- **可访问性**: Diffusers 项目帮助更多贡献者降低进入门槛即便没有专业技术也可以运行项目。这样做使得社区更容易获得研究成果。
- **可再现性**: 我们的目标是在使用 Diffusers 库时,使上游代码、模型和数据集的可再现性保持透明。
- **责任**: 作为一个社区,通过团队合作,我们通过预测和减轻该技术的潜在风险和危险来对我们的用户承担集体责任。
# 安全特性和机制
此外,我们提供了一个暂不全面的并希望不断扩展的列表,该列表是关于 Hugging Face 团队和更广泛的社区的实施的安全功能和机制。
- **[社区选项](https://huggingface.co/docs/hub/repositories-pull-requests-discussions)**: 它使社区能够讨论并更好地协作项目。
- **标签功能**: 仓库的作者可以将他们的内容标记为“不适合所有人”
- **偏差探索和评估**: Hugging Face 团队提供了一个 [Space](https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer) 以交互方式演示 Stable Diffusion 和 DALL-E 中的偏差。从这个意义上说,我们支持和鼓励有偏差的探索和评估。
- **鼓励安全部署**
- **[Safe Stable Diffusion](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion_safe)**: 它缓解了众所周知的问题,像 Stable Diffusion,在未经过滤的,网络抓取的数据集上训练的模型往往会遭受不当的退化。相关论文: [Safe Latent Diffusion: Mitigating Inappropriate Degeneration in Diffusion Models](https://arxiv.org/abs/2211.05105).
- **在 Hub 上分阶段发布**: 特别在敏感的情况下,应限制对某些仓库的访问。这是发布阶段的一个中间步骤,允许仓库的作者对其使用有更多的控制权限。
- **许可**: [OpenRAILs](https://huggingface.co/blog/open_rail), 是一种新型许可,可让我们确保自由访问,同时拥有一组限制,以确保更多负责任的用途。 | 2 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/dialog-agents.md | ---
title: "是什么让对话代理有用?"
thumbnail: /blog/assets/dialog-agents/thumbnail.png
authors:
- user: nazneen
- user: natolambert
- user: VictorSanh
- user: ThomWolf
translators:
- user: MatrixYao
- user: zhongdongy
proofreader: true
---
# 是什么让对话代理有用?
## ChatGPT 背后的技术:RLHF、IFT、CoT、红蓝对抗等
近段时间,ChatGPT 横空出世并获得巨大成功,使得 RLHF、SFT、IFT、CoT 等这些晦涩的缩写开始出现在普罗大众的讨论中。这些晦涩的首字母缩略词究竟是什么意思?为什么它们如此重要?我们调查了相关的所有重要论文,以对这些工作进行分类,总结迄今为止的工作,并对后续工作进行展望。
我们先来看看基于语言模型的会话代理的全景。ChatGPT 并非首创,事实上很多组织在 OpenAI 之前就发布了自己的语言模型对话代理 (dialog agents),包括 Meta 的 [BlenderBot](https://arxiv.org/abs/2208.03188),Google 的 [LaMDA](https://arxiv.org/abs/2201.08239),DeepMind 的 [Sparrow](https://arxiv.org/abs/2209.14375),以及 Anthropic 的 [Assistant](https://arxiv.org/abs/2204.05862) (*Anthropic 的 Claude 就是部分基于 Assistant 继续开发而得的*)。其中一些团队还公布了他们构建开源聊天机器人的计划,并公开分享了路线图 (比如 LAION 团队的 [Open Assistant](https://github.com/LAION-AI/Open-Assistant)),其他团队肯定也有类似的内容,但尚未宣布。
下表根据是否能公开访问、训练数据、模型架构和评估方向的详细信息对这些 AI 聊天机器人进行了比较。ChatGPT 没有这些信息的记录,因此我们改为使用 InstructGPT 的详细信息,这是一个来自 OpenAI 的指令微调模型,据信它是 ChatGPT 的基础。
| | LaMDA | BlenderBot 3 |Sparrow | ChatGPT/ InstructGPT | Assistant|
| --- | --- | --- | --- | --- | --- |
| **组织** | Google | Meta | DeepMind | OpenAI | Anthropic |
| **能否公开访问** | 否 | 能 | 否 | 有限 | 否 |
| **大小** | 137B | 175B | 70B | 175B | 52B |
| **预训练<br>基础模型** | 未知 | OPT | Chinchilla | GPT-3.5 | 未知 |
| **预训练语料库大小** (词数) | 2.81T | 180B | 1.4T | 未知 | 400B |
| **模型是否可以<br>访问网络** | ✔ | ✔ | ✔ | ✖️ | ✖️ |
| **有监督<br>微调** | ✔ | ✔ | ✔ | ✔ | ✔ |
| **微调<br>数据大小** | 质量:6.4K<br>安全性:8K<br>真实性:4K<br>IR:49K | 大小从 18K 到 1.2M 不等的 20 个 NLP 数据集 | 未知 | 12.7K (此为 InstructGPT,ChatGPT 可能更多) | 150K+ LM 生成的数据 |
| **RLHF** | ✖️ | ✖️ | ✔ | ✔ | ✔ |
| **人为制定的安全规则** | ✔ | ✖️ | ✔ | ✖️ | ✔ |
| **评价标准** | 1、质量 (合情性、具体性、趣味性)<br>2、安全性 (偏见)<br>3、真实性 |1、质量 (参与度、知识运用)<br>2、安全性 (毒性、偏见) | 1、校直 (有帮助,无害,正确)<br>2、证据 (来自网络)<br>3、是否违反规则<br>4、偏见和刻板印象<br>5、诚信度 | 1、 校直 (有帮助、无害、真实)<br>2、偏见 | 1、校直 (有帮助、无害、诚实)<br>2、偏见 |
| **用于数据标注的众包平台**| 美国供应商 | 亚马逊 MTurk | 未知 | Upwork 和 Scale AI | Surge AI、Amazon MTurk 和 Upwork |
我们观察到,尽管在训练数据、模型和微调方面存在许多差异,但也存在一些共性。上述所有聊天机器人的一个共同目标是 **指令依从 (instruction following)**,即遵循用户指定的指令。例如,要求 ChatGPT 写一首关于微调的诗。

### 从预测文本到遵循指令
通常,基础模型的语言建模目标不足以让模型学会以有用的方式遵循用户的指令。模型创建者使用 **指令微调 (Instruction Fine-Tuning,IFT)** 方法来达到该目的,该方法除了使用情感分析、文本分类、摘要等经典 NLP 任务来微调模型外,还在非常多样化的任务集上向基础模型示范各种书面指令及其输出,从而实现对基础模型的微调。这些指令示范由三个主要部分组成 —— 指令、输入和输出。输入是可选的,一些任务只需要指令,如上文使用 ChatGPT 做开放式文本生成的示例。当存在输入时,输入和输出组成一个**实例 (instance)**。给定指令可以有多个输入和输出实例。如下例 (摘自 [Wang 等, '22]):

IFT 的训练数据通常是人工编写的指令及用语言模型自举 (bootstrap) 生成的实例的集合。在自举时,先使用少样本技术输入一些样本给 LM 用于提示它 (如上图所示),随后要求 LM 生成新的指令、输入和输出。每一轮都会从人工编写的样本和模型生成的样本中各选择一些送给模型。人类和模型对创建数据集的贡献构成了一个谱图,见下图:

谱图的一端是纯模型生成的 IFT 数据集,例如 Unnatural Instructions ([Honovich 等,'22](https://arxiv.org/abs/2212.09689));另一端是经由社区的大量努力精心制作的指令如 Super-natural instructions ([Wang 等,'22](https://arxiv.org/abs/2204.07705))。在这两者之间的工作是使用一小组高质量的种子数据集,然后进行自举生成最终数据集,如 Self-Instruct ([Wang 等,'22](https://arxiv.org/pdf/2212.10560.pdf))。为 IFT 整理数据集的另一种方法是将现有的用于各种任务 (包括提示)的高质量众包 NLP 数据集使用统一模式或不同模板转换为指令。这一系列工作包括 T0 ([Sanh 等,'22](https://arxiv.org/pdf/2110.08207.pdf))、Natural instructions 数据集 ([Mishra 等,'22](https://arxiv.org/pdf/2104.08773.pdf))、FLAN LM ([Wei 等,'22](https://arxiv.org/pdf/2109.01652.pdf)) 和 OPT-IML ([Iyer 等,'22](https://arxiv.org/pdf/2212.12017.pdf))。
### 安全地遵循指令
然而,经过指令微调的 LM 并不总是能生成 **有帮助的** 和 **安全的** 响应。这种行为的例子包括通过总是给出无益的回应来逃避,例如 “对不起,我不明白。” 或对敏感话题的用户输入生成不安全的响应。为了减轻这种行为,模型开发人员使用 **有监督微调 (Supervised Fine-tuning, SFT)**,在高质量的人类标注数据上微调基础语言模型,以提高有用性和无害性。例如,请参阅下面的表格(摘自 Sparrow 论文的附录 F)。
SFT 和 IFT 联系非常紧密。指令微调可以看作是有监督微调的一个子集。在最近的文献中,SFT 阶段经常被用于提高响应的安全性,而不是接在 IFT 后面提高指令相应的具体性。将来,这种分类和划分应该日臻成熟,形成更清晰的使用场景和方法论。

谷歌的 LaMDA 也根据一组规则 (论文附录 A) 在带有安全标注的对话数据集上进行微调。这些规则通常由模型创建者预先定义和开发,涵盖广泛的主题,包括伤害、歧视、错误信息。
### 微调模型
同时,OpenAI 的 InstructGPT、DeepMind 的 Sparrow 和 Anthropic 的 Constitutional AI 使用 **人类反馈强化学习 (Reinforcement Learning From Human Feedback,RLHF)** 来微调模型,该方法使用基于人类偏好的标注数据。在 RLHF 中,根据人类反馈来对模型的响应进行排序标注 (如,根据人类偏好选择文本简介)。然后,用这些带标注的响应来训练偏好模型,该模型用于返回 RL 优化器的标量奖励。最后,通过强化学习训练对话代理来模拟偏好模型。有关更多详细信息,请参阅我们之前关于 RLHF 的文章: [ChatGPT 背后的“功臣”——RLHF 技术详解](https://huggingface.co/blog/zh/rlhf)。
思维链 **(Chain-of-thought,CoT)** 提示 ([Wei 等,'22](https://arxiv.org/abs/2201.11903) 是指令示范的一种特殊情况,它通过引发对话代理的逐步推理来生成输出。使用 CoT 微调的模型使用带有逐步推理的人工标注的指令数据集。这是 **[Let’s think step by step](https://arxiv.org/abs/2205.11916)** 这一著名提示的由来。下面的示例取自 [Chung 等,'22](https://arxiv.org/pdf/2210.11416.pdf),橙色高亮的部分是指令,粉色是输入和输出,蓝色是 CoT 推理。

如 [Chung 等,'22](https://arxiv.org/pdf/2210.11416.pdf) 中所述,使用 CoT 微调的模型在涉及常识、算术和符号推理的任务上表现得更好。
如 [Bai 等,'22](https://www.anthropic.com/constitutional.pdf) 的工作所示,CoT 微调也显示出对无害性非常有效 (有时比 RLHF 做得更好),而且对敏感提示,模型不会回避并生成 “抱歉,我无法回答这个问题” 这样的回答。更多示例,请参见其论文的附录 D。

## 要点
1. 与预训练数据相比,您只需要非常小的一部分数据来进行指令微调 (几百个数量级);
2. 使用人工标注的有监督微调使模型输出更安全和有用;
3. CoT 微调提高了模型在需要逐步思考的任务上的性能,并使它们在敏感话题上不那么回避。
## 对话代理的进一步工作
这个博客总结了许多关于使对话代理有用的现有工作。但仍有许多悬而未决的问题有待探索。我们在这里列出了其中的一些。
1. RL 在从人类反馈中学习有多重要?我们能否通过在 IFT 或 SFT 中使用更高质量的数据进行训练来获得 RLHF 的性能?
2. 为了安全的角度看,Sparrow 中的 SFT+RLHF 与 LaMDA 中仅使用 SFT 相比如何?
3. 鉴于我们有 IFT、SFT、CoT 和 RLHF,预训练有多大的必要性?如何折衷?人们应该使用的最佳基础模型是什么 (公开的和非公开的)?
4. 本文中引用的许多模型都经过 [红蓝对抗 (red-teaming)](https://arxiv.org/abs/2209.07858) 的精心设计,工程师特地搜寻故障模式并基于已被揭示的问题改进后续的训练 (提示和方法)。我们如何系统地记录这些方法的效果并重现它们?
P.s. 如果您发现本博客中的任何信息缺失或不正确,请告知我们。
****************引用****************
`Rajani et al., "What Makes a Dialog Agent Useful?", Hugging Face Blog, 2023.`
BibTeX 引用:
```
@article{rajani2023ift,
author = {Rajani, Nazneen and Lambert, Nathan and Sanh, Victor and Wolf, Thomas},
title = {What Makes a Dialog Agent Useful?},
journal = {Hugging Face Blog},
year = {2023},
note = {https://huggingface.co/blog/dialog-agents},
}
```
| 3 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/universal_assisted_generation.md | ---
title: "通用辅助生成:使用任意辅助模型加速解码"
thumbnail: /blog/assets/optimum_intel/intel_thumbnail.png
authors:
- user: danielkorat
guest: true
org: Intel
- user: orenpereg
guest: true
org: Intel
- user: mber
guest: true
org: Intel
- user: jmamou
guest: true
org: Intel
- user: joaogante
- user: lewtun
- user: Nadav-Timor
guest: true
org: weizmannscience
- user: moshew
guest: true
org: Intel
translators:
- user: MatrixYao
- user: zhongdongy
proofreader: true
---
# 通用辅助生成: 使用任意辅助模型加速解码
<em>太长不看版</em>: 许多 LLM (如 `gemma-2-9b` 、 `Mixtral-8x22B-Instruct-v0.1` 等) 苦于缺乏对应小模型,而无法适用 [辅助生成](https://huggingface.co/blog/zh/assisted-generation) 方案。本文,我们将介绍由英特尔研究院和 Hugging Face 合作开发的 _通用辅助生成_ 技术。有了这项技术,LLM 可与 **任意** SLM 搭配组成辅助生成方案。从而,我们可以用辅助生成技术加速 _任意_ 解码器模型或 [混合专家](https://huggingface.co/blog/zh/moe) 模型以获得 **1.5x-2.0x** 的加速比。重要的是,开销几乎为零 🔥🔥🔥!一起了解一下吧!
## 引言
如今,风头最劲的开放权重 LLM 参数量一般都有数十亿到数千亿 (说你呢 Llama-3.1-405B 👋),这给在生产环境中部署这些饿兽带来了一系列工程挑战。挑战之一就是: 大模型文本生成速度很慢。为此,社区开发了很多不同的技术来加速解码过程。辅助生成,也称为 [投机解码](https://arxiv.org/abs/2211.17192),是其中一种非常常用且实用的方法,可在不损失准确性的情况下加速 LLM 推理。本文,我们将了解辅助生成的工作原理,并分享我们的最新研究成果,该成果使得对 Hugging Face Hub [14 万个语言模型](https://huggingface.co/models?pipeline_tag=text-generation&sort=trending) 中的 _任意一个_ 模型进行加速成为可能,🚀!
## 辅助生成
辅助生成的核心是一对模型,分别称为 _目标模型_ 和 _辅助模型_ ,其中辅助模型是目标模型的小版,举个例子,你可以使用 [`Llama-3.2-1B`](https://huggingface.co/meta-llama/Llama-3.2-1B) 作为较大的 [`Llama-3.1-70b`](https://huggingface.co/meta-llama/Llama-3.1-70b) 目标模型的辅助模型。整个生成过程是一个迭代过程: 每一轮,辅助模型会先一个一个自回归地生成多个词元; 接着,目标模型通过一次前向传播验证辅助模型本轮生成的所有词元。加速的奥秘就在于目标模型每次前向传播中可以验证多个词元,而不像原本每次只能生成一个词元。更详细的解释,请参阅 [原博文](https://huggingface.co/blog/zh/assisted-generation)。结合新近推出的 [动态投机](https://huggingface.co/blog/dynamic_speculation_lookahead) 策略,辅助生成可将文本生成速度提高 1.5 至 3 倍,具体倍数取决于任务类型及所使用的模型。
但,辅助生成并非无往而不利,一个最明显的问题就是: 其要求目标模型和辅助模型必须使用相同的分词器,这意味着两者必须来自同一个模型系列。然而,许多广泛使用的模型缺乏合适的“矮小紧”模型,因此与如此大幅的延迟降低无缘。根据我们的经验,一般来说,辅助模型需要至少比目标模型小 50-100 倍,才会看到有意义的加速。举几个例子,[`CodeLlama-13b`](https://huggingface.co/meta-llama/CodeLlama-13b-Instruct-hf) 没有小模型; [`gemma-2-9b`](https://huggingface.co/google/gemma-2-9b) 只有一个 `2b` 的小模型,显然不够小、不够快,因此加速注定不会太明显。
## 通用辅助生成
为了缓解这个痛点,英特尔研究院与 Hugging Face 合作开发了通用辅助生成 (Universal Assisted Generation,UAG) 技术。UAG 可以无视分词器的差异,配对任意目标模型和辅助模型。例如,可以使用 `gemma-2-9b` 作为目标模型,并选取 [`vicuna-68m`](https://huggingface.co/double7/vicuna-68m) 作为辅助模型。
该技术背后的主要思想是双路分词器映射: 每一轮,辅助模型生成完词元后,就将其输出词元序列解码为文本,再使用目标模型的分词器将文本编码成词元序列; 同样地,在目标模型验证完后,将目标模型的词元序列用相同的方法转换回辅助模型的词元序列,再将其添加至辅助模型的上下文用于下一轮迭代。
由于辅助模型和目标模型的分词器的词汇表不同,因此还需要处理由此带来的差异。为了准确地对辅助模型新生成的词元序列进行重编码,必须再多给它一些上文词元。然后,将整个序列重新编码为目标模型的词元格式,并与之前生成的最新的目标词元对齐,以锚定新生成词元的确切位置。下面的视频对此过程进行了图解。
<!-- [GIF 1 -- FWD PASS] -->
<figure class="image table text-center m-0 w-full">
<video
style="max-width: 80%; margin: auto;"
autoplay loop muted playsinline
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/universal-assisted-generation/method-animation.mov"
></video>
</figure>
从目标模型到辅助模型的词元重编码也遵循与上述视频类似的过程。此时,如遇不匹配的词元,需从辅助模型的键值 (KV) 缓存中将它们丢弃掉,以保证数据的完整性。
## 基准测试
下表展示了不同目标模型与异分词器辅助模型形成辅助解码方案时测得的延迟改进。
| 目标模型 | 辅助模型 | 数据集 | 任务 | 加速比 |
|----------------------|---------------------|---------------------------|---------------------------|---------------------------|
| `codellama/CodeLlama-13b-Instruct-hf` | `bigcode/tiny_starcoder_py` | [`openai/humaneval`](https://huggingface.co/datasets/openai/openai_humaneval) | 代码生成 | **1.90x** |
| [`mistralai/Mixtral-8x22B-Instruct-v0.1`](mistralai/Mixtral-8x22B-Instruct-v0.1) | `double7/vicuna-68m` | [`cnn_dailymail`](https://huggingface.co/datasets/cnn_dailymail) | 摘要 | **1.52x** |
| `google/gemma-2-9b` | `double7/vicuna-68m` | [`cnn_dailymail`](https://huggingface.co/datasets/cnn_dailymail) | 摘要 | **1.76x** |
| `mistralai/Mixtral-8x22B-Instruct-v0.1` | `Qwen/Qwen2-0.5B-Instruct` | [`tau/scrolls`](https://huggingface.co/datasets/tau/scrolls) | 长文摘要 | **1.78x** |
| `meta-llama/Llama-3.1-70B` | `Qwen/Qwen2-0.5B-Instruct` | [`tau/scrolls`](https://huggingface.co/datasets/tau/scrolls) | 长文摘要 | **1.78x** |
| `microsoft/Phi-3-medium-128k-instruct` | `Qwen/Qwen2-0.5B-Instruct` | [`tau/scrolls`](https://huggingface.co/datasets/tau/scrolls) | 长文摘要 | **1.91x** |
请注意,在标准辅助解码方案下,上表中所有目标模型都会苦于没有合适的小模型 (低于 10 亿参数)。
上述实验均在 100 个随机样本上完成。 `Llama` 和 `Mixtral` 目标模型的实验分别用了 2 张和 4 张 A100 GPU; 其他所有实验均使用单张 A6000 GPU。
## 代码
通用辅助生成技术已集成至 🤗 Transformers [4.46.0](https://github.com/huggingface/transformers/releases/tag/v4.46.0) 版。
要使能该技术,需将 `tokenizer` 和 `assistant_tokenizer` 传递给 `generate()` ,示例代码如下:
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
prompt = "Alice and Bob"
checkpoint = "google/gemma-2-9b"
assistant_checkpoint = "double7/vicuna-68m"
assistant_tokenizer = AutoTokenizer.from_pretrained(assistant_checkpoint)
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
inputs = tokenizer(prompt, return_tensors="pt")
model = AutoModelForCausalLM.from_pretrained(checkpoint)
assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint)
outputs = model.generate(**inputs, assistant_model=assistant_model, tokenizer=tokenizer, assistant_tokenizer=assistant_tokenizer)
tokenizer.batch_decode(outputs, skip_special_tokens=True)
```
输出如下:
```
['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a']
```
## 下一步
标准辅助生成方案在 `do_sample=True` 时,使用的投机采样算法为 [该论文的算法 1](https://arxiv.org/pdf/2211.17192.pdf),但 UAG
目前仅实现了多项分布采样。在多项分布采样中,如果目标模型与辅助模型采样得的词元不相同时,会自动拒绝该词元,这与投机采样对此情况的处理不同。在实践中,这意味着与共享分词器的标准方案相比,UAG 方案在 `do_sample=True` 时吞吐量会较低。将来,我们计划增加对 UAG 投机采样的支持。
此外,我们还打算将 UAG 集成到 🤗 Transformers 流水线中,以使用户能够更简单、轻松地利用它。
## 参考资源
- [Fast Inference from Transformers via Speculative Decoding](https://arxiv.org/pdf/2211.17192)
- [辅助生成: 低延迟文本生成的新方向](https://huggingface.co/blog/zh/assisted-generation) | 4 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/habana-gaudi-2-bloom.md | ---
title: "大语言模型快速推理:在 Habana Gaudi2 上推理 BLOOMZ"
thumbnail: /blog/assets/habana-gaudi-2-bloom/thumbnail.png
authors:
- user: regisss
translators:
- user: MatrixYao
---
# 大语言模型快速推理:在 Habana Gaudi2 上推理 BLOOMZ
本文将展示如何在 [Habana® Gaudi®2](https://habana.ai/training/gaudi2/) 上使用 🤗 [Optimum Habana](https://huggingface.co/docs/optimum/habana/index)。Optimum Habana 是 Gaudi2 和 🤗 Transformers 库之间的桥梁。本文设计并实现了一个大模型推理基准测试,证明了通过使用 Optimum Habana 你将能够在 Gaudi2 上获得 **比目前市面上任何可用的 GPU 都快的推理速度**。
随着模型越来越大,将它们部署到生产环境中以用于推理也变得越来越具有挑战性。硬件和软件都需要很多创新来应对这些挑战,让我们来深入了解 Optimum Habana 是如何有效地克服这些挑战的!
## BLOOMZ
[BLOOM](https://arxiv.org/abs/2211.05100) 是一个 1760 亿参数的自回归模型,经训练后可用于文本生成。它可以处理 46 种不同的语言以及 13 种编程语言。作为 [BigScience](https://bigscience.huggingface.co/) 计划的一部分,BLOOM 作为一个开放科学项目,来自全球的大量的研究人员和工程师参与了模型的设计和训练。最近,我们又发布了架构与 BLOOM 完全相同的模型:[BLOOMZ](https://arxiv.org/abs/2211.01786),它是 BLOOM 在多个任务上的微调版本,具有更好的泛化和零样本[^1] 能力。
如此大的模型在 [训练](https://huggingface.co/blog/bloom-megatron-deepspeed) 和 [推理](https://huggingface.co/blog/bloom-inference-optimization) 两个场景下都对内存和速度提出了新的挑战。即使是使用 16 位精度,一个模型也需要 352 GB 的内存!目前你可能很难找到一个具有如此大内存的设备,但像 Habana Gaudi2 这样先进的硬件已能让低延迟 BLOOM 和 BLOOMZ 模型推理变得可能。
## Habana Gaudi2
[Gaudi2](https://habana.ai/training/gaudi2/) 是 Habana Labs 设计的第二代 AI 硬件加速器。单个服务器包含 8 张加速卡(称为 Habana 处理单元(Habana Processing Units),或 HPU),每张卡有 96GB 的内存,这为容纳超大模型提供了可能。但是,如果仅仅是内存大,而计算速度很慢,也没办法将其用于模型托管服务。幸运的是,Gaudi2 在这方面证明了自己,大放异彩:它与 GPU 的不同之处在于,它的架构使加速器能够并行执行通用矩阵乘法 (General Matrix Multiplication,GeMM) 和其他操作,从而加快了深度学习工作流。这些特性使 Gaudi2 成为 LLM 训练和推理的理想方案。
Habana 的 SDK SynapseAI™ 支持 PyTorch 和 DeepSpeed 以加速 LLM 训练和推理。[SynapseAI 图编译器](https://docs.habana.ai/en/latest/Gaudi_Overview/SynapseAI_Software_Suite.html#graph-compiler-and-runtime) 会优化整个计算图的执行过程(如通过算子融合、数据布局管理、并行化、流水线、内存管理、图优化等手段)。
此外,最近 SynapseAI 还引入了 [HPU graphs](https://docs.habana.ai/en/latest/PyTorch/Inference_on_PyTorch/Inference_Using_HPU_Graphs.html) 和 [DeepSpeed-inference](https://docs.habana.ai/en/latest/PyTorch/DeepSpeed/Inference_Using_DeepSpeed.html) 的支持,这两者非常适合延迟敏感型的应用,下面的基准测试结果即很好地说明了这一点。
以上所有功能都集成进了 🤗 [Optimum Habana](https://github.com/huggingface/optimum-habana) 库,因此在 Gaudi 上部署模型非常简单。你可以阅读 [此处](https://huggingface.co/docs/optimum/habana/quickstart)快速起步。
如果你想试试 Gaudi2,请登录 [英特尔开发者云(Intel Developer Cloud)](https://www.intel.com/content/www/us/en/secure/developer/devcloud/cloud-launchpad.html) 并按照[本指南](https://huggingface.co/blog/habana-gaudi-2-benchmark#how-to-get-access-to-gaudi2)申请。
## 测试基准
在本节中,我们将提供 BLOOMZ 在 Gaudi2、第一代 Gaudi 和 Nvidia A100 80GB 上的早期基准测试。虽然这些设备内存都不小,但由于模型太大,单个设备还是放不下整个 BLOOMZ 模型。为了解决这个问题,我们要使用 [DeepSpeed](https://www.deepspeed.ai/),这是一个深度学习优化库,它实现了很多内存优化和速度优化以加速模型推理。特别地,我们在这里依赖 [DeepSpeed-inference](https://arxiv.org/abs/2207.00032),它引入了几个特性,如[模型(或管道)并行](https://huggingface.co/blog/bloom-megatron-deepspeed#pipeline-parallelism)以充分利用可用设备。对 Gaudi2,我们使用 [Habana 的 DeepSpeed 分支](https://github.com/HabanaAI/deepspeed) ,其添加了对 HPU 的支持。
### 延迟
我们测量了两种不同大小的 BLOOMZ 模型的延迟(batch size 为 1),两者参数量都有数十亿:
- [1760 亿](https://huggingface.co/bigscience/bloomz) 参数
- [70 亿](https://huggingface.co/bigscience/bloomz-7b1) 参数
我们使用 DeepSpeed-inference 以 16 位精度在8 张卡上运行推理,同时我们开启了 [key-value 缓存](https://huggingface.co/docs/transformers/v4.27.1/en/model_doc/bloom#transformers.BloomForCausalLM.forward.use_cache) 优化。请注意,尽管 [CUDA graphs](https://developer.nvidia.com/blog/cuda-graphs/) 当前与 DeepSpeed 中的模型并行不兼容(DeepSpeed v0.8.2,请参见 [此处](https://github.com/microsoft/DeepSpeed/blob/v0.8.2/deepspeed/inference/engine.py#L158),但 Habana 的 DeepSpeed 分支是支持 HPU graphs 的。所有基准测试都使用 [贪心搜索](https://huggingface.co/blog/how-to-generate#greedy-search)生成 100 个词元。输入提示为:
> "DeepSpeed is a machine learning framework"
该提示会被 BLOOM 分词器分成 7 个词元。
推理延迟测试结果如下表所示(单位为*秒*)。
| 模型 | 卡数 | Gaudi2 延迟(秒) | A100-80GB 延迟(秒) | 第一代 Gaudi 延迟(秒) |
|:-----------:|:-----------------:|:-------------------------:|:-----------------:|:----------------------------------:|
| BLOOMZ | 8 | 3.717 | 4.402 | / |
| BLOOMZ-7B | 8 | 0.737 | 2.417 | 3.029 |
| BLOOMZ-7B | 1 | 1.066 | 2.119 | 2.865 |
Habana 团队最近在 SynapseAI 1.8 中引入了对 DeepSpeed-inference 的支持,从而快速支持了 1000 多亿参数模型的推理。 **对于 1760 亿参数的模型,Gaudi2 比 A100 80GB 快 1.2 倍**。较小模型上的结果更有意思:**对于 BLOOMZ-7B,Gaudi2 比 A100 快 3 倍**。 有趣的是,BLOOMZ-7B 这种尺寸的模型也能受益于模型并行。
我们还在第一代 Gaudi 上运行了这些模型。虽然它比 Gaudi2 慢,但从价格角度看很有意思,因为 AWS 上的 DL1 实例每小时大约 13 美元。BLOOMZ-7B 在第一代 Gaudi 上的延迟为 2.865 秒。因此,**对70亿参数的模型而言,第一代 Gaudi 比 A100 的性价比更高,每小时能省 30 多美元**!
我们预计 Habana 团队将在即将发布的新 SynapseAI 版本中继续优化这些模型的性能。在我们上一个基准测试中,我们看到 [Gaudi2 的 Stable Diffusion推理速度比 A100 快 2.2 倍](https://huggingface.co/blog/habana-gaudi-2-benchmark#generating-images-from-text-with-stable-diffusion),这个优势在随后 Habana 提供的最新优化中进一步提高到了 2.37 倍。在 SynapseAI 1.9 的预览版中,我们看到 BLOOMZ-176B 的推理延迟进一步降低到了 3.5 秒。当新版本的 SynapseAI 发布并集成到 Optimum Habana 中时,我们会更新最新的性能数字。
### 在完整数据集上进行推理
我们的脚本允许支持模型整个数据集上逐句进行文本补全。如果你想在自己的数据集上尝试用 Gaudi2 进行 BLOOMZ 推理,这个脚本就很好用。
这里我们以 [*tldr_news*](https://huggingface.co/datasets/JulesBelveze/tldr_news/viewer/all/test) 数据集为例。该数据每一条都包含文章的标题和内容(你可以在 Hugging Face Hub 上可视化一下数据)。这里,我们仅保留 *content* 列(即内容)并对每个样本只截前 16 个词元,然后让模型来生成后 50 个词元。前 5 条数据如下所示:
```
Batch n°1
Input: ['Facebook has released a report that shows what content was most widely viewed by Americans between']
Output: ['Facebook has released a report that shows what content was most widely viewed by Americans between January and June of this year. The report, which is based on data from the company’s mobile advertising platform, shows that the most popular content on Facebook was news, followed by sports, entertainment, and politics. The report also shows that the most']
--------------------------------------------------------------------------------------------------
Batch n°2
Input: ['A quantum effect called superabsorption allows a collection of molecules to absorb light more']
Output: ['A quantum effect called superabsorption allows a collection of molecules to absorb light more strongly than the sum of the individual absorptions of the molecules. This effect is due to the coherent interaction of the molecules with the electromagnetic field. The superabsorption effect has been observed in a number of systems, including liquid crystals, liquid crystals in']
--------------------------------------------------------------------------------------------------
Batch n°3
Input: ['A SpaceX Starship rocket prototype has exploded during a pressure test. It was']
Output: ['A SpaceX Starship rocket prototype has exploded during a pressure test. It was the first time a Starship prototype had been tested in the air. The explosion occurred at the SpaceX facility in Boca Chica, Texas. The Starship prototype was being tested for its ability to withstand the pressure of flight. The explosion occurred at']
--------------------------------------------------------------------------------------------------
Batch n°4
Input: ['Scalene is a high-performance CPU and memory profiler for Python.']
Output: ['Scalene is a high-performance CPU and memory profiler for Python. It is designed to be a lightweight, portable, and easy-to-use profiler. Scalene is a Python package that can be installed on any platform that supports Python. Scalene is a lightweight, portable, and easy-to-use profiler']
--------------------------------------------------------------------------------------------------
Batch n°5
Input: ['With the rise of cheap small "Cube Satellites", startups are now']
Output: ['With the rise of cheap small "Cube Satellites", startups are now able to launch their own satellites for a fraction of the cost of a traditional launch. This has led to a proliferation of small satellites, which are now being used for a wide range of applications. The most common use of small satellites is for communications,']
```
下一节,我们将展示如何用该脚本来执行基准测试,我们还将展示如何将其应用于 Hugging Face Hub 中任何你喜欢的数据集!
### 如何复现这些结果?
[此处](https://github.com/huggingface/optimum-habana/tree/main/examples/text-generation) 提供了用于在 Gaudi2 和第一代 Gaudi 上对 BLOOMZ 进行基准测试的脚本。在运行它之前,请确保按照 [Habana 给出的指南](https://docs.habana.ai/en/latest/Installation_Guide/index.html) 安装了最新版本的 SynapseAI 和 Gaudi 驱动程序。
然后,运行以下命令:
```bash
git clone https://github.com/huggingface/optimum-habana.git
cd optimum-habana && pip install . && cd examples/text-generation
pip install git+https://github.com/HabanaAI/[email protected]
```
最后,你可以按如下方式运行脚本:
```bash
python ../gaudi_spawn.py --use_deepspeed --world_size 8 run_generation.py --model_name_or_path bigscience/bloomz --use_hpu_graphs --use_kv_cache --max_new_tokens 100
```
对于多节点推理,你可以遵循 Optimum Habana 文档中的 [这个指南](https://huggingface.co/docs/optimum/habana/usage_guides/multi_node_training)。
你还可以从 Hugging Face Hub 加载任何数据集作为文本生成任务的提示,只需使用参数`--dataset_name my_dataset_name`。
此基准测试基于 Transformers v4.27.1、SynapseAI v1.8.0,而Optimum Habana 是从源码安装的。
对于 GPU,[此代码库](https://github.com/huggingface/transformers-bloom-inference/tree/main/bloom-inference-scripts) 里包含了[可用于复现这篇文章结果的脚本](https://huggingface.co/blog/bloom-inference-pytorch-scripts)。要使用 CUDA graphs,需要使用静态数据尺寸,而 🤗 Transformers 中不支持这一用法。你可以使用 Habana 团队的 [这份代码](https://github.com/HabanaAI/Model-References/tree/1.8.0/PyTorch/nlp/bloom) 来使能 CUDA graphs 或 HPU graphs。
## 总结
通过本文,我们看到,**Habana Gaudi2 执行 BLOOMZ 推理的速度比 Nvidia A100 80GB 更快**。并且无需编写复杂的脚本,因为 🤗 [Optimum Habana](https://huggingface.co/docs/optimum/habana/index) 提供了易于使用的工具用于在 HPU 上运行数十亿参数模型的推理。Habana 的 SynapseAI SDK 的后续版本有望提高性能,因此随着 SynapseAI 上 LLM 推理优化的不断推进,我们将定期更新此基准。我们也期待 FP8 推理在 Gaudi2 上带来的性能优势。
我们还介绍了在第一代 Gaudi 上的结果。对于更小的模型,它的性能与 A100 比肩,甚至更好,而价格仅为 A100 的近三分之一。对于像 BLOOMZ 这样的大模型,它是替代 GPU 推理的一个不错的选择。
如果你有兴趣使用最新的 AI 硬件加速器和软件库来加速你的机器学习训练和推理工作流,请查看我们的 [专家加速计划](https://huggingface.co/support)。要了解有关 Habana 解决方案的更多信息,可以 [从此处了解我们双方的相关合作并联系他们](https://huggingface.co/hardware/habana)。要详细了解 Hugging Face 为使 AI 硬件加速器易于使用所做的工作,请查看我们的 [硬件合作伙伴计划](https://huggingface.co/hardware)。
### 相关话题
- [更快训推:Habana Gaudi-2 与 Nvidia A100 80GB](https://huggingface.co/blog/habana-gaudi-2-benchmark)
- [在Hugging Face 和 Habana Labs Gaudi 上用 DeepSpeed 训练更快、更便宜的大规模 Transformer 模型](https://developer.habana.ai/events/leverage-deepspeed-to-train-faster-and-cheaper-large-scale-transformer-models-with-hugging-face-and-habana-labs-gaudi/)
---
感谢阅读!如果你有任何问题,请随时通过 [Github](https://github.com/huggingface/optimum-habana) 或 [论坛](https://discuss.huggingface.co/c/optimum/59) 与我联系。你也可以在 [LinkedIn](https://www.linkedin.com/in/regispierrard/) 上找到我。
[^1]:“零样本”是指模型在新的或未见过的输入数据上完成任务的能力,即训练数据中完全不含此类数据。我们输给模型提示和以自然语言描述的指令(即我们希望模型做什么)。零样本分类不提供任何与正在完成的任务相关的任何示例。这区别于单样本或少样本分类,因为这些任务还是需要提供有关当前任务的一个或几个示例的。
| 5 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/gradio-reload.md | ---
title: "使用 Gradio 的“热重载”模式快速开发 AI 应用"
thumbnail: /blog/assets/gradio-reload/thumbnail_compressed.png
authors:
- user: freddyaboulton
translators:
- user: chenglu
---
# 使用 Gradio 的“热重载”模式快速开发 AI 应用
在这篇文章中,我将展示如何利用 Gradio 的“热重载”模式快速构建一个功能齐全的 AI 应用。但在进入正题之前,让我们先了解一下什么是热重载模式以及 Gradio 为什么要采用自定义的自动重载逻辑。如果你已熟悉 Gradio 并急于开始构建,请直接跳转到第三部分[构建文档分析应用](#building-a-document-analyzer-application)。
## 热重载模式具体是做什么的?
简而言之,热重载模式可以在不重启 Gradio 服务器的情况下,自动引入你源代码中的最新更改。如果这听起来还有些模糊,不妨继续阅读。
Gradio 是一个广受欢迎的 Python 库,专门用于创建交互式机器学习应用。开发者可以完全在 Python 中设计 UI 布局,并嵌入一些 Python 逻辑来响应 UI 事件。如果你已经掌握了 Python 基础,那么学习 Gradio 将会非常轻松。如果你对 Gradio 还不太熟悉,建议你查看这个[快速入门指南](https://www.gradio.app/guides/quickstart)。
通常,Gradio 应用像运行任何其他 Python 脚本一样启动,只需执行 `python app.py`(Gradio 代码文件可以任意命名)。这会启动一个 HTTP 服务器,渲染你的应用 UI 并响应用户操作。如果需要修改应用,通常会停止服务器(通常使用 `Ctrl + C`),编辑源文件后重新运行脚本。
开发过程中频繁停止和重启服务器会造成明显的延迟。如果能有一种方式能自动更新代码变更并即刻测试新思路,那将大为便利。
这正是 Gradio 的热重载模式的用武之地。你只需运行 `gradio app.py` 而不是 `python app.py`,即可在热重载模式下启动应用!
## Gradio 为何要自行实现重载逻辑?
Gradio 应用通常与 [uvicorn](https://www.uvicorn.org/)(一个 Python Web 框架的异步服务器)一同运行。尽管 Uvicorn 提供了[自动重载功能](https://www.uvicorn.org/),但 Gradio 出于以下原因自行实现了重载逻辑:
1. **更快速的重载**:Uvicorn 的自动重载功能虽快于手动操作,但在开发 Gradio 应用时仍显缓慢。Gradio 开发者在 Python 中构建其 UI,因此他们希望在进行更改后能立即看到更新的 UI,这在 Javascript 生态中已是常态,但在 Python 中则较为新颖。
2. **选择性重载**:Gradio 应用属于 AI 应用,通常需要将 AI 模型加载到内存或连接到数据存储(如向量数据库)。开发过程中重启服务器将导致模型重新加载或重新连接数据库,这会在开发周期间引入不必要的延迟。为解决此问题,Gradio 引入了 `if gr.NO_RELOAD:` 代码块,你可以利用它标记不需重载的代码部分。这种做法只有在 Gradio 实现了自定义重载逻辑的情况下才可行。
接下来,我将展示如何利用 Gradio 的热重载模式迅速开发一个 AI 应用。
## 构建文档分析应用
本应用将允许用户上传文档图片并提出问题,随后以自然语言形式获得答案。我们将利用免费的 [Hugging Face 推理 API](https://huggingface.co/docs/huggingface_hub/guides/inference),你可以在自己的电脑上轻松操作,无需 GPU!
首先,让我们在名为 `app.py` 的文件中输入以下代码,并通过执行 `gradio app.py` 在热重载模式下启动它:
```python
import gradio as gr
demo = gr.Interface(lambda x: x, "text", "text")
if __name__ == "__main__":
demo.launch()
```
这会创建以下简单的用户界面。

鉴于我希望用户能够上传图像文件及其问题,我将输入组件更改为 `gr.MultimodalTextbox()`。注意用户界面是如何立即更新的!

虽然这个用户界面已经可以工作,但我认为如果输入文本框位于输出文本框下方会更合适。我可以通过使用 `Blocks` API 来实现这一点,并且我还通过添加占位符文本来定制输入文本框,以引导用户。

现在 UI 已经令人满意,我将开始实现 `chat_fn` 的逻辑。
我将使用 Hugging Face 的推理 API,因此我需要从 `huggingface_hub` 包中导入 `InferenceClient`(预装在 Gradio 中)。我将使用 [`impira/layouylm-document-qa`](https://huggingface.co/impira/layoutlm-document-qa) 模型来回答用户的问题,然后使用 [HuggingFaceH4/zephyr-7b-beta](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) 大语言模型提供自然语言回答。
```python
from huggingface_hub import InferenceClient
client = InferenceClient()
def chat_fn(multimodal_message):
question = multimodal_message["text"]
image = multimodal_message["files"][0]
answer = client.document_question_answering(image=image, question=question, model="impira/layoutlm-document-qa")
answer = [{"answer": a.answer, "confidence": a.score} for a in answer]
user_message = {"role": "user", "content": f"Question: {question}, answer: {answer}"}
message = ""
for token in client.chat_completion(messages=[user_message],
max_tokens=200,
stream=True,
model="HuggingFaceH4/zephyr-7b-beta"):
if token.choices[0].finish_reason is not None:
continue
message += token.choices[0].delta.content
yield message
```
这是我们的应用演示!

我还会添加一个系统消息,以便大语言模型保持回答简短,不包括原始置信度分数。为避免每次更改时都重新实例化 `InferenceClient`,我将其放在不需重载的代码块中。
```python
if gr.NO_RELOAD:
client = InferenceClient()
system_message = {
"role": "system",
"content": """
You are a helpful assistant.
You will be given a question and a set of answers along with a confidence score between 0 and 1 for each answer.
You job is to turn this information into a short, coherent response.
For example:
Question: "Who is being invoiced?", answer: {"answer": "John Doe", "confidence": 0.98}
You should respond with something like:
With a high degree of confidence, I can say John Doe is being invoiced.
Question: "What is the invoice total?", answer: [{"answer": "154.08", "confidence": 0.75}, {"answer": "155", "confidence": 0.25}
You should respond with something like:
I believe the invoice total is $154.08 but it can also be $155.
"""}
```
这是我们演示的现在情况!系统消息确实帮助保持了机器人的回答简短而不包含长的小数。

作为最终改进,我将在页面上添加一个 Markdown 标题:

## 结语
在本文中,我使用 Gradio 和 Hugging Face 推理 API 开发了一个实用的 AI 应用。从开发初期,我就不确定最终产品会是什么样子,所以能够即时重新加载 UI 和服务器逻辑让我能迅速尝试各种想法。整个应用的开发过程大约只用了一个小时!
如果你想了解此演示的完整代码,请访问这个 [Space 应用](https://huggingface.co/spaces/freddyaboulton/document-analyzer)! | 6 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/google-cloud-model-garden.md | ---
title: "在 Google Cloud 上轻松部署开放大语言模型"
thumbnail: /blog/assets/173_gcp-partnership/thumbnail.jpg
authors:
- user: philschmid
- user: jeffboudier
translators:
- user: chenglu
---
# 在 Google Cloud 上轻松部署开放大语言模型
今天,我们想向大家宣布:“**在 Google Cloud 上部署**”功能正式上线!
这是 Hugging Face Hub 上的一个新功能,让开发者可以轻松地将数千个基础模型使用 Vertex AI 或 Google Kubernetes Engine (GKE) 部署到 Google Cloud。
Model Garden (模型库) 是 Google Cloud Vertex AI 平台的一个工具,用户能够发现、定制和部署来自 Google 及其合作伙伴的各种模型。不论是在 Hugging Face 模型页面还是在 Vertex AI 模型库页面,开发者们都可以轻松简单地将开放模型作为 API 端点部署在自己的 Google Cloud 账户内。我们也将启用 Hugging Face 上最受欢迎的开放模型进行推理,这一切都得益于我们的生产级解决方案 [文本生成推理](https://github.com/huggingface/text-generation-inference/)。
借助“在 Google Cloud 上部署”,开发者可以在自己的安全 Google Cloud 环境中直接构建准备就绪的生成式 AI 应用,无需自行管理基础设施和服务器。
## 为 AI 开发者构建
这一全新的体验是基于我们今年早些时候宣布的 [战略合作关系](https://huggingface.co/blog/gcp-partnership) 进一步扩展的,目的是简化 Google 客户访问和部署开放生成式 AI 模型的过程。开发者和机构面临的一个主要挑战是,部署模型需要投入大量时间和资源,且必须确保部署的安全性和可靠性。
“在 Google Cloud 上部署”提供了一个简单且管理化的解决方案,专为 Hugging Face 模型提供了专门的配置和资源。只需简单点击几下,就可以在 Google Cloud 的 Vertex AI 上创建一个准备就绪的端点。
Google 产品经理 Wenming Ye 表示:“Vertex AI 的 Model Garden 与 Hugging Face Hub 的集成,让在 Vertex AI 和 GKE 上发现和部署开放模型变得无缝衔接,无论您是从 Hub 开始,还是直接在 Google Cloud 控制台中。我们迫不及待想看到 Google 开发者们将会用 Hugging Face 模型创建出什么样的创新。”
## 从 HF Hub 开启模型部署
在 Google Cloud 上部署 Hugging Face 模型变得非常简单。以下是如何部署 [Zephyr Gemma](https://console.cloud.google.com/vertex-ai/publishers/HuggingFaceH4/model-garden/zephyr-7b-gemma-v0.1;hfSource=true;action=deploy?authuser=1) 的步骤指导。从今天开始,所有带有 [text-generation-inference](https://huggingface.co/models?pipeline_tag=text-generation-inference&sort=trending) 标签的模型都将受到支持。

只需打开“部署”菜单,选择“Google Cloud”即可。这将直接带您进入 Google Cloud 控制台,您可以在 Vertex AI 或 GKE 上轻松一键部署 Zephyr Gemma。

进入 Vertex AI 模型库之后,您可以选择 Vertex AI 或 GKE 作为部署环境。如果选择 Vertex AI,您可以通过点击“部署”一键完成部署过程。如果选择 GKE,您可以根据提供的指南和模板,在新建或现有的 Kubernetes 集群上部署模型。
## 从 Vertex AI 模型库开启模型部署
Vertex AI 模型库是 Google 开发者寻找可用于生成式 AI 项目的现成模型的理想场所。从今天开始,Vertex Model Garden 将提供一种全新的体验,使开发者能够轻松部署 Hugging Face 上可用的最流行的开放大语言模型!
在 Google Vertex AI 模型库中,您会发现一个新的“从 Hugging Face 部署”选项,允许您直接在 Google Cloud 控制台内搜索并部署 Hugging Face 模型。

点击“从 Hugging Face 部署”后,将显示一个表单,您可以在其中快速查找模型 ID。Hugging Face 上数以百计最受欢迎的开放大语言模型已经准备就绪,提供了经过测试的硬件配置。

找到想要部署的模型后,选择该模型,Vertex AI 会自动填充所有必要的配置,以便您将模型部署到 Vertex AI 或 GKE 上。通过“在 Hugging Face 上查看”功能,您甚至可以确认选择的模型是否正确。如果您使用的是受限模型,请确保提供您的 Hugging Face 访问令牌,以授权下载模型。

就是这样!从 Vertex AI 模型库直接将模型如 Zephyr Gemma 部署到您的 Google Cloud 账户,只需简单几步。
## 这只是开始
我们很高兴能够与 Google Cloud 合作,让 AI 更加开放和易于访问。无论是从 Hugging Face Hub 开始,还是在 Google Cloud 控制台内,部署开放模型到 Google Cloud 上都变得前所未有的简单。但我们不会止步于此——敬请期待,我们将开启更多在 Google Cloud 上利用开放模型构建 AI 的新体验! | 7 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/sd3.md | ---
title: "欢迎 Stable Diffusion 3 加入 🧨 Diffusers"
thumbnail: /blog/assets/sd3/thumbnail.png
authors:
- user: dn6
- user: YiYiXu
- user: sayakpaul
- user: OzzyGT
- user: kashif
- user: multimodalart
translators:
- user: hugging-hoi2022
- user: zhongdongy
proofreader: true
---
# 欢迎 Stable Diffusion 3 加入 🧨 Diffusers
作为 Stability AI 的 Stable Diffusion 家族最新的模型,[Stable Diffusion 3](https://stability.ai/news/stable-diffusion-3-research-paper) (SD3) 现已登陆 Hugging Face Hub,并且可用在 🧨 Diffusers 中使用了。
当前放出的模型版本是 Stable Diffusion 3 Medium,有二十亿 (2B) 的参数量。
针对当前发布版本,我们提供了:
1. Hub 上可供下载的模型
2. Diffusers 的代码集成
3. SD3 的 Dreambooth 和 LoRA 训练脚本
## 目录
- [SD3 新特性](#SD3 新特性)
- [在 Diffusers 中使用 SD3](#在 Diffusers 中使用 SD3)
- [对 SD3 进行内存优化以适配各种硬件](#对 SD3 进行内存优化)
- [性能优化与推理加速](#SD3 性能优化)
- [SD3 微调和 LoRA 创建](#使用 DreamBooth 和 LoRA 进行微调)
## SD3 新特性
### 模型
作为一个隐变量扩散模型,SD3 包含了三个不同的文本编码器 ([CLIP L/14](https://huggingface.co/openai/clip-vit-large-patch14)、[OpenCLIP bigG/14](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) 和 [T5-v1.1-XXL](https://huggingface.co/google/t5-v1_1-xxl)) 、一个新提出的多模态 Diffusion Transformer (MMDiT) 模型,以及一个 16 通道的 AutoEncoder 模型 (与 [Stable Diffusion XL](https://arxiv.org/abs/2307.01952) 中的类似)。
SD3 以序列 Embedding 的形式处理文本输入和视觉隐空间特征。位置编码 (Positional Encoding) 是施加在隐空间特征的 2x2 patch 上的,随后被展开成 patch 的 Enbedding 序列。这一序列和文本的特征序列一起,被送入 MMDiT 的各个模块中去。两种特征序列被转化成相同特征维度,拼接在一起,然后送入一系列注意力机制模块和多层感知机 (MLP) 里。
为应对两种模态间的差异,MMDiT 模块使用两组不同的权重去转换文本和图像序列的特征维度。两个序列之后会在注意力操作之前被合并在一起。这种设计使得两种表征能在自己的特征空间里工作,同时也使得它们之间可以通过注意力机制 [1] 从对方的特征中提取有用的信息。这种文本和图像间双向的信息流动有别于以前的文生图模型,后者的文本信息是通过 cross-attention 送入模型的,且不同层输入的文本特征均是文本编码器的输出,不随深度的变化而改变。
此外,SD3 还在时间步 (timestep) 这一条件信息上加入了汇合过的文本特征,这些文本特征来自使用的两个 CLIP 模型。这些汇合过的文本特征被拼接在一起,然后加到时间步的 Embedding 上,再送入每个 MMDiT 模块。
### 使用 Rectified Flow Matching 训练
除了结构上的创新,SD3 也使用了 [conditional flow-matching](https://arxiv.org/html/2403.03206v1#S2) 作为训练目标函数来训练模型。这一方法中,前向加噪过程被定义为一个 [rectified flow](https://arxiv.org/html/2403.03206v1#S3),以一条直线连接数据分布和噪声分布。
采样过程也变得更简单了,当采样步数减少的时候,模型性能也很稳定。为此,我们也引入了新的 scheduler ( `FlowMatchEulerDiscreteScheduler` ),集成了 rectified flow-matching 的运算公式以及欧拉方法 (Euler Method) 的采样步骤。同时还提出了一个与生成分辨率相关的 `shift` 参数。对于高分辨率,增大 `shift` 的值可以更好地处理 noise scaling。针对 2B 模型,我们建议设置 `shift=3.0` 。
如想快速尝试 SD3,可以使用下面的一个基于 Gradio 的应用:
<script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/4.36.1/gradio.js"> </script>
<gradio-app theme_mode="light" space="stabilityai/stable-diffusion-3-medium"></gradio-app>
## 在 Diffusers 中使用 SD3
如想在 diffusers 中使用 SD3,首先请确保安装的 diffusers 是最新版本:
```python
pip install --upgrade diffusers
```
使用模型前,你需要先到 [Stable Diffusion 3 Medium 在 Hugging Face 的页面](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers),填写表格并同意相关内容。一切就绪后,你需要登录你的 huggingface 账号:
```bash
huggingface-cli login
```
下面程序将会下载 SD3 的 2B 参数模型,并使用 `fp16` 精度。Stability AI 原本发布的模型精度就是 `fp16` ,这也是推荐的模型推理精度。
### 文生图
```python
import torch
from diffusers import StableDiffusion3Pipeline
pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16)
pipe = pipe.to("cuda")
image = pipe(
"A cat holding a sign that says hello world",
negative_prompt="",
num_inference_steps=28,
guidance_scale=7.0,
).images[0]
image
```

### 图生图
```python
import torch
from diffusers import StableDiffusion3Img2ImgPipeline
from diffusers.utils import load_image
pipe = StableDiffusion3Img2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16)
pipe = pipe.to("cuda")
init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
prompt = "cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k"
image = pipe(prompt, image=init_image).images[0]
image
```

相关的 SD3 文档可在 [这里](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_3) 查看。
## 对 SD3 进行内存优化
SD3 使用了三个文本编码器,其中一个是 [T5-XXL model](https://huggingface.co/google/t5-v1_1-xxl),是一个很大的模型。这使得在显存小于 24GB 的 GPU 上跑模型非常困难,即使使用的是 `fp16` 精度。
对此,diffusers 集成了一些内存优化手段,来让 SD3 能在更多的 GPU 上跑起来。
### 使用 Model Offloading 推理
Diffusers 上一个最常用的内存优化手段就是 model offloading。它使得你可以在推理时,把一些当前不需要的模型组件卸载到 CPU 上,以此节省 GPU 显存。但这会引入少量的推理时长增长。在推理时,model offloading 只会将模型当前需要参与计算的部分放在 GPU 上,而把剩余部分放在 CPU 上。
```python
import torch
from diffusers import StableDiffusion3Pipeline
pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16)
pipe.enable_model_cpu_offload()
prompt = "smiling cartoon dog sits at a table, coffee mug on hand, as a room goes up in flames. “This is fine,” the dog assures himself."
image = pipe(prompt).images[0]
```
### 不使用 T5 模型进行推理
[推理时移除掉 4.7B 参数量的 T5-XXL 文本编码器](https://arxiv.org/html/2403.03206v1#S5.F9) 可以很大程度地减少内存需求,带来的性能损失却很小。
```python
import torch
from diffusers import StableDiffusion3Pipeline
pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", text_encoder_3=None, tokenizer_3=None, torch_dtype=torch.float16)
pipe = pipe.to("cuda")
prompt = "smiling cartoon dog sits at a table, coffee mug on hand, as a room goes up in flames. “This is fine,” the dog assures himself."
image = pipe("").images[0]
```
## 使用量化版的 T5-XXL 模型
使用 `bitsandbytes` 这个库,你也可以加载 8 比特量化版的 T5-XXL 模型,进一步减少显存需求。
```python
import torch
from diffusers import StableDiffusion3Pipeline
from transformers import T5EncoderModel, BitsAndBytesConfig
# Make sure you have `bitsandbytes` installed.
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
model_id = "stabilityai/stable-diffusion-3-medium-diffusers"
text_encoder = T5EncoderModel.from_pretrained(
model_id,
subfolder="text_encoder_3",
quantization_config=quantization_config,
)
pipe = StableDiffusion3Pipeline.from_pretrained(
model_id,
text_encoder_3=text_encoder,
device_map="balanced",
torch_dtype=torch.float16
)
```
_完整代码在 [这里](https://gist.github.com/sayakpaul/82acb5976509851f2db1a83456e504f1)。_
### 显存优化小结
所有的基准测试都用了 2B 参数量的 SD3 模型,测试在一个 A100-80G 上进行,使用 `fp16` 精度推理,PyTorch 版本为 2.3。
我们对每个推理调用跑十次,记录平均峰值显存用量和 20 步采样的平均时长。
## SD3 性能优化
为加速推理,我们可以使用 `torch.compile()` 来获取优化过的 `vae` 和 `transformer` 部分的计算图。
```python
import torch
from diffusers import StableDiffusion3Pipeline
torch.set_float32_matmul_precision("high")
torch._inductor.config.conv_1x1_as_mm = True
torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.epilogue_fusion = False
torch._inductor.config.coordinate_descent_check_all_directions = True
pipe = StableDiffusion3Pipeline.from_pretrained(
"stabilityai/stable-diffusion-3-medium-diffusers",
torch_dtype=torch.float16
).to("cuda")
pipe.set_progress_bar_config(disable=True)
pipe.transformer.to(memory_format=torch.channels_last)
pipe.vae.to(memory_format=torch.channels_last)
pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True)
pipe.vae.decode = torch.compile(pipe.vae.decode, mode="max-autotune", fullgraph=True)
# Warm Up
prompt = "a photo of a cat holding a sign that says hello world",
for _ in range(3):
_ = pipe(prompt=prompt, generator=torch.manual_seed(1))
# Run Inference
image = pipe(prompt=prompt, generator=torch.manual_seed(1)).images[0]
image.save("sd3_hello_world.png")
```
_完整代码可参考 [这里](https://gist.github.com/sayakpaul/508d89d7aad4f454900813da5d42ca97)。_
我们测量了使用过 `torch.compile()` 的 SD3 的推理速度 (在 A100-80G 上,使用 `fp16` 推理,PyTorch 版本为 2.3)。我们针对每个生成任务跑 10 遍,每次推理使用 20 步采样。平均推理耗时是 **0.585 秒**, _这比 eager execution 模式下快了四倍_ 。
## 使用 DreamBooth 和 LoRA 进行微调
最后,我们还提供了使用 [LoRA](https://huggingface.co/blog/lora) 的 [DreamBooth](https://dreambooth.github.io/) 代码,用于微调 SD3。这一程序不仅能微调模型,还能作为一个参考,如果你想使用 rectified flow 来训练模型。当然,热门的 rectified flow 实现代码还有 [minRF](https://github.com/cloneofsimo/minRF/)。
如果需要使用该程序,首先需要确保各项设置都已完成,同时准备好一个数据集 (比如 [这个](https://huggingface.co/datasets/diffusers/dog-example))。你需要安装 `peft` 和 `bitsandbytes` ,然后再开始运行训练程序:
```bash
export MODEL_NAME="stabilityai/stable-diffusion-3-medium-diffusers"
export INSTANCE_DIR="dog"
export OUTPUT_DIR="dreambooth-sd3-lora"
accelerate launch train_dreambooth_lora_sd3.py \
--pretrained_model_name_or_path=${MODEL_NAME} \
--instance_data_dir=${INSTANCE_DIR} \
--output_dir=/raid/.cache/${OUTPUT_DIR} \
--mixed_precision="fp16" \
--instance_prompt="a photo of sks dog" \
--resolution=1024 \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--learning_rate=1e-5 \
--report_to="wandb" \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--max_train_steps=500 \
--weighting_scheme="logit_normal" \
--validation_prompt="A photo of sks dog in a bucket" \
--validation_epochs=25 \
--seed="0" \
--push_to_hub
```
## 声明
感谢 Stability AI 团队开发并开源了 Stable Diffusion 3 并让我们提早体验,也感谢 [Linoy](https://huggingface.co/linoyts) 对撰写此文的帮助。 | 8 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/optimize-llm.md | ---
title: "面向生产的 LLM 优化"
thumbnail: /blog/assets/163_getting_most_out_of_llms/optimize_llm.png
authors:
- user: patrickvonplaten
translators:
- user: MatrixYao
- user: zhongdongy
proofreader: true
---
# 面向生产的 LLM 优化
<!-- {blog_metadata} -->
<!-- {authors} -->
<a target="_blank" href="https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Getting_the_most_out_of_LLMs.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt=" 在 Colab 中打开 "/>
</a>
_**注意**_ : _本文同时也是 [Transformers](https://huggingface.co/docs/transformers/llm_tutorial_optimization) 的文档。_
以 GPT3/4、[Falcon](https://huggingface.co/tiiuae/falcon-40b) 以及 [LLama](https://huggingface.co/meta-llama/Llama-2-70b-hf) 为代表的大语言模型 (Large Language Model,LLM) 在处理以人为中心的任务上能力突飞猛进,俨然已成为现代知识型行业的重要工具。
然而,在实际部署这些模型时,我们仍面临不少挑战:
- 为了展现可媲美人类的文本理解和生成能力,LLM 的参数量一般需要达到数十亿 (参见 [Kaplan 等人](https://arxiv.org/abs/2001.08361)、[Wei 等人](https://arxiv.org/abs/2206.07682) 的论述),随之而来的是对推理内存的巨大需求。
- 在许多实际任务中,LLM 需要广泛的上下文信息,这就要求模型在推理过程中能够处理很长的输入序列。
这些挑战的关键在于增强 LLM 的计算和存储效能,特别是如何增强长输入序列的计算和存储效能。
本文,我们将回顾迄今为止那些最有效的技术,以应对高效 LLM 部署的挑战:
1. **低精度**: 研究表明,低精度 (即 8 比特和 4 比特) 推理可提高计算效率,且对模型性能没有显著影响。
2. **Flash 注意力**: Flash 注意力是注意力算法的一个变种,它不仅更节省内存,而且通过优化 GPU 内存利用率从而提升了计算效率。
3. **架构创新**: 考虑到 LLM 推理的部署方式始终为: 输入序列为长文本的自回归文本生成,因此业界提出了专门的模型架构,以实现更高效的推理。这方面最重要的进展有 [Alibi](https://arxiv.org/abs/2108.12409)、[旋转式嵌入 (rotary embeddings) ](https://arxiv.org/abs/2104.09864)、[多查询注意力 (Multi-Query Attention,MQA) ](https://arxiv.org/abs/1911.02150) 以及 [分组查询注意 (Grouped Query Attention,GQA) ](https://arxiv.org/abs/2305.13245)。
本文,我们将从张量的角度对自回归生成进行分析。我们深入研究了低精度的利弊,对最新的注意力算法进行了全面的探索,并讨论了改进的 LLM 架构。在此过程中,我们用实际的例子来展示每项技术所带来的改进。
## 1. 充分利用低精度的力量
通过将 LLM 视为一组权重矩阵及权重向量,并将文本输入视为向量序列,可以更好地理解 LLM 的内存需求。下面, _权重_ 表示模型的所有权重矩阵及向量。
迄今为止,一个 LLM 至少有数十亿参数。每个参数均为十进制数,例如 `4.5689` 通常存储成 [float32](https://en.wikipedia.org/wiki/Single-precision_floating-point_format)、[bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) 或 [float16](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) 格式。因此,我们能够轻松算出加载 LLM 所需的内存:
> _加载 $X$ B 参数的 FP32 模型权重需要大约 4 * $X$ GB 显存_
现如今,很少有模型以 float32 精度进行训练,通常都是以 bfloat16 精度训练的,在很少情况下还会以 float16 精度训练。因此速算公式就变成了:
> _加载有 $X$ B 参数的 BF16/FP16 模型权重需要大约 2 * $X$ GB 显存_
对于较短的文本输入 (词元数小于 1024),推理的内存需求很大程度上取决于模型权重的大小。因此,现在我们假设推理的内存需求等于将模型加载到 GPU 中所需的显存量。
我们举几个例子来说明用 bfloat16 加载模型大约需要多少显存:
- **GPT3** 需要 2 \* 175 GB = **350 GB** 显存
- [**Bloom**](https://huggingface.co/bigscience/bloom) 需要 2 \* 176 GB = **352 GB** 显存
- [**Llama-2-70b**](https://huggingface.co/meta-llama/Llama-2-70b-hf) 需要 2 \* 70 GB = **140 GB** 显存
- [**Falcon-40b**](https://huggingface.co/tiiuae/falcon-40b) 需要 2 \* 40 GB = **80 GB** 显存
- [**MPT-30b**](https://huggingface.co/mosaicml/mpt-30b) 需要 2 \* 30 GB = **60 GB** 显存
- [**bigcode/starcoder**](https://huggingface.co/bigcode/starcoder) 需要 2 \* 15.5 = **31 GB** 显存
迄今为止,市面上显存最大的 GPU 芯片是 80GB 显存的 A100。前面列出的大多数模型需要超过 80GB 才能加载,因此必然需要 [张量并行](https://huggingface.co/docs/transformers/perf_train_gpu_many#tensor-parallelism) 和/或 [流水线并行](https://huggingface.co/docs/transformers/perf_train_gpu_many#naive-model-parallelism-vertical-and-pipeline-parallelism)。
🤗 Transformers 不支持开箱即用的张量并行,因为它需要特定的模型架构编写方式。如果你对以张量并行友好的方式编写模型感兴趣,可随时查看 [TGI(text generation inference) 库](https://github.com/huggingface/text-generation-inference/tree/main/server/text_generation_server/models/custom_modeling)。
🤗 Transformers 开箱即用地支持简单的流水线并行。为此,只需使用 `device="auto"` 加载模型,它会自动将不同层放到相应的 GPU 上,详见 [此处](https://huggingface.co/docs/accelerate/v0.22.0/en/concept_guides/big_model_inference)。
但请注意,虽然非常有效,但这种简单的流水线并行并不能解决 GPU 空闲的问题。可参考 [此处](https://huggingface.co/docs/transformers/v4.15.0/parallelism#naive-model-parallel-vertical-and-pipeline-parallel) 了解更高级的流水线并行技术。
如果你能访问 8 x 80GB A100 节点,你可以按如下方式加载 BLOOM:
```bash
!pip install transformers accelerate bitsandbytes optimum
```
```python
# from transformers import AutoModelForCausalLM
# model = AutoModelForCausalLM.from_pretrained("bigscience/bloom", device_map="auto", pad_token_id=0)
```
通过使用 `device_map="auto"` ,注意力层将均匀分布在所有可用的 GPU 上。
本文,我们选用 [bigcode/octocoder](https://huggingface.co/bigcode/octocoder) 模型,因为它可以在单个 40GB A100 GPU 上运行。请注意,下文所有的内存和速度优化同样适用于需要模型或张量并行的模型。
由于我们以 bfloat16 精度加载模型,根据上面的速算公式,预计使用 `“bigcode/octocoder”` 运行推理所需的显存约为 31 GB。我们试试吧!
首先加载模型和分词器,并将两者传递给 `Transformers` 的 [pipeline](https://huggingface.co/docs/transformers/main_classes/pipelines)。
```python
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import torch
model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", torch_dtype=torch.bfloat16, device_map="auto", pad_token_id=0)
tokenizer = AutoTokenizer.from_pretrained("bigcode/octocoder")
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
```
```python
prompt = "Question: Please write a function in Python that transforms bytes to Giga bytes.\n\nAnswer:"
result = pipe(prompt, max_new_tokens=60)[0]["generated_text"][len(prompt):]
result
```
**输出**:
```
Here is a Python function that transforms bytes to Giga bytes:\n\n```python\ndef bytes_to_giga_bytes(bytes):\n return bytes / 1024 / 1024 / 1024\n```\n\nThis function takes a single
```
好,现在我们可以把生成的函数直接用于将字节数转换为千兆字节数。
```python
def bytes_to_giga_bytes(bytes):
return bytes / 1024 / 1024 / 1024
```
我们直接调用 `torch.cuda.max_memory_allocated` 来测量 GPU 显存的峰值占用。
```python
bytes_to_giga_bytes(torch.cuda.max_memory_allocated())
```
**输出**:
```bash
29.0260648727417
```
相当接近我们的速算结果!我们可以看到这个数字并不完全准确,因为从字节到千字节需要乘以 1024 而不是 1000。因此,速算公式也可以理解为“最多 $X$ GB”。
请注意,如果我们尝试以全 float32 精度运行模型,则需要高达 64GB 的显存。
> 现在几乎所有模型都是用 bfloat16 中训练的,如果 [你的 GPU 支持 bfloat16](https://discuss.pytorch.org/t/bfloat16-native-support/117155/5) 的话,你就不应该以 float32 来运行推理。float32 并不会提供比训练精度更好的推理结果。
如果你不确定 Hub 上的模型权重的精度如何,可随时查看模型配置文件内的 `torch_dtype` 项, _如_ [此处](https://huggingface.co/meta-llama/Llama-2-7b-hf/blob/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9/config.json#L21)。建议在使用 `from_pretrained(..., torch_dtype=...)` 加载模型时将精度设置为与配置文件中的精度相同,该接口的默认精度为 float32。这样的话,你就可以使用 `float16` 或 `bfloat16` 来推理了。
我们再定义一个 `flush(...)` 函数来释放所有已分配的显存,以便我们可以准确测量分配的 GPU 显存的峰值。
```python
del pipe
del model
import gc
import torch
def flush():
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
```
下一个实验我们就可以调用它了。
```python
flush()
```
在最新的 accelerate 库中,你还可以使用名为 `release_memory()` 的方法。
```python
from accelerate.utils import release_memory
# ...
release_memory(model)
```
那如果你的 GPU 没有 32GB 显存怎么办?研究发现,模型权重可以量化为 8 比特或 4 比特,而对模型输出没有明显影响 (参见 [Dettmers 等人的论文](https://arxiv.org/abs/2208.07339))。
甚至可以将模型量化为 3 或 2 比特,对输出的影响仍可接受,如最近的 [GPTQ 论文](https://arxiv.org/pdf/2210.17323.pdf) 🤯 所示。
总的来讲,量化方案旨在降低权重的精度,同时尽量保持模型的推理结果尽可能准确 ( _即_ 尽可能接近 bfloat16)。
请注意,量化对于文本生成特别有效,因为我们关心的是选择 _最可能的下一个词元的分布_ ,而不真正关心下一个词元的确切 _logit_ 值。所以,只要下一个词元 _logit_ 大小顺序保持相同, `argmax` 或 `topk` 操作的结果就会相同。
量化技术有很多,我们在这里不作详细讨论,但一般来说,所有量化技术的工作原理如下:
1. 将所有权重量化至目标精度
2. 加载量化权重,并把 `bfloat16` 精度的输入向量序列传给模型
3. 将权重动态反量化为 `bfloat16` ,并基于 `bfloat16` 精度与输入进行计算
4. 计算后,将权重再次量化回目标精度。[译者注: 这一步一般不需要做]
简而言之,这意味着原来的每个 _输入数据 - 权重矩阵乘_ ,其中 $X$ 为 _输入_ , $W$ 为权重矩阵,$Y$ 为输出:
$$ Y = X \times W $$
都变成了:
$$ Y = X \times \text{dequantize}(W); \text{quantize}(W) $$
当输入向量走过模型计算图时,所有权重矩阵都会依次执行反量化和重量化操作。
因此,使用权重量化时,推理时间通常 **不会** 减少,反而会增加。
到此为止理论讲完了,我们可以开始试试了!要使用 Transformer 权重量化方案,请确保
[`bitsandbytes`](https://github.com/TimDettmers/bitsandbytes) 库已安装。
```bash
# !pip install bitsandbytes
```
然后,只需在 `from_pretrained` 中添加 `load_in_8bit=True` 参数,即可用 8 比特量化加载模型。
```python
model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", load_in_8bit=True, pad_token_id=0)
```
现在,再次运行我们的示例,并测量其显存使用情况。
```python
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
result = pipe(prompt, max_new_tokens=60)[0]["generated_text"][len(prompt):]
result
```
**输出**:
```
Here is a Python function that transforms bytes to Giga bytes:\n\n```python\ndef bytes_to_giga_bytes(bytes):\n return bytes / 1024 / 1024 / 1024\n```\n\nThis function takes a single
```
很好,我们得到了与之前一样的结果,这就说明准确性没有损失!我们看一下这次用了多少显存。
```python
bytes_to_giga_bytes(torch.cuda.max_memory_allocated())
```
**输出**:
```
15.219234466552734
```
显存明显减少!降至 15GB 多一点,这样就可以在 4090 这样的消费级 GPU 上运行该模型了。
我们看到内存效率有了很大的提高,且模型的输出没啥退化。同时,我们也注意到推理速度出现了轻微的减慢。
删除模型并再次刷一下显存。
```python
del model
del pipe
```
```python
flush()
```
然后,我们看下 4 比特量化的 GPU 显存消耗峰值是多少。可以用与之前相同的 API 将模型量化为 4 比特 - 这次参数设置为 `load_in_4bit=True` 而不是 `load_in_8bit=True` 。
```python
model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", load_in_4bit=True, low_cpu_mem_usage=True, pad_token_id=0)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
result = pipe(prompt, max_new_tokens=60)[0]["generated_text"][len(prompt):]
result
```
**输出**:
```
Here is a Python function that transforms bytes to Giga bytes:\n\n```\ndef bytes_to_gigabytes(bytes):\n return bytes / 1024 / 1024 / 1024\n```\n\nThis function takes a single argument
```
输出几乎与以前相同 - 只是在代码片段之前缺了 `python` 这个词。我们看下需要多少显存。
```python
bytes_to_giga_bytes(torch.cuda.max_memory_allocated())
```
**输出**:
```
9.543574333190918
```
仅需 9.5GB!对于参数量大于 150 亿的模型来说,确实不算多。
虽然我们这里看到模型的准确性几乎没有下降,但与 8 比特量化或完整的 `bfloat16` 推理相比,4 比特量化实际上通常会导致不同的结果。到底用不用它,就看用户自己抉择了。
另请注意,与 8 比特量化相比,其推理速度会更慢一些,这是由于 4 比特量化使用了更激进的量化方法,导致 $\text{quantize}$ 和 $\text {dequantize}$ 在推理过程中花的时间更长。
```python
del model
del pipe
```
```python
flush()
```
总的来说,我们发现以 8 比特精度运行 `OctoCoder` 将所需的 GPU 显存 从 32GB 减少到仅 15GB,而以 4 比特精度运行模型则进一步将所需的 GPU 显存减少到 9GB 多一点。
4 比特量化让模型可以在 RTX3090、V100 和 T4 等大多数人都可以轻松获取的 GPU 上运行。
更多有关量化的信息以及有关如何量化模型以使其显存占用比 4 比特更少,我们建议大家查看 [`AutoGPTQ`](https://huggingface.co/docs/transformers/main/en/main_classes/quantization#autogptq-integration%60) 的实现。
> 总结一下,重要的是要记住,模型量化会提高内存效率,但会牺牲准确性,在某些情况下还会牺牲推理时间。
如果 GPU 显存对你而言不是问题,通常不需要考虑量化。然而,如果不量化,许多 GPU 根本无法运行 LLM,在这种情况下,4 比特和 8 比特量化方案是非常有用的工具。
更详细的使用信息,我们强烈建议你查看 [Transformers 的量化文档](https://huggingface.co/docs/transformers/main_classes/quantization#general-usage)。
接下来,我们看看如何用更好的算法和改进的模型架构来提高计算和内存效率。
# 2. Flash 注意力: 速度飞跃
当今表现最好的 LLM 其基本架构大体相似,包括前馈层、激活层、层归一化层以及最重要的自注意力层。
自注意力层是大语言模型 (LLM) 的核心,因为其使模型能够理解输入词元之间的上下文关系。然而,自注意力层在计算以及峰值显存这两个方面都随着输入词元的数目 (也称为 _序列长度_ ,下文用 $N$ 表示) 呈 _二次方_ 增长。
虽然这对于较短的输入序列 (输入词元数小于 1000) 来说并不明显,但对于较长的输入序列 (如: 约 16000 个输入词元) 来说,就会成为一个严重的问题。
我们仔细分析一下。计算长度为 $N$ 的输入序列 $\mathbf{X}$ 的自注意力层的输出 $\mathbf{O}$ ,其公式为:
$$ \textbf{O} = \text{Attn}(\mathbf{X}) = \mathbf{V} \times \text{Softmax}(\mathbf{QK}^T) \text{ ,其中 } \mathbf{Q} = \mathbf{W}_q \mathbf{X}, \mathbf{V} = \mathbf{W}_v \mathbf{X}, \mathbf{K} = \mathbf{W}_k \mathbf{X} $$
$\mathbf{X} = (\mathbf{x} _1, … \mathbf{x}_ {N})$ 是注意力层的输入序列。投影 $\mathbf{Q}$ 和 $\mathbf{K}$ 也是 $N$ 个向量组成的序列,其乘积 $\mathbf{QK}^T$ 的大小为 $N^2$ 。
LLM 通常有多个注意力头,因此可以并行进行多个自注意力计算。
假设 LLM 有 40 个注意力头并以 bfloat16 精度运行,我们可以计算出存储 $ \mathbf{QK^T}$ 矩阵的内存需求为 $40 \times 2 \times N^2$ 字节。当 $N=1000$ 时仅需要大约 50MB 的显存,但当 $N=16000$ 时,我们需要 19GB 的显存,当 $N=100,000$ 时,仅存储 $\mathbf{QK}^T$ 矩阵就需要近 1TB。
总之,随着输入上下文越来越长,默认的自注意力算法所需的内存很快就会变得非常昂贵。
伴随着 LLM 在文本理解和生成方面的进展,它们正被应用于日益复杂的任务。之前,我们主要用模型来对几个句子进行翻译或摘要,但现在我们会用这些模型来管理整页的文本,这就要求它们具有处理长输入文本的能力。
我们如何摆脱长输入文本对内存的过高要求?我们需要一种新的方法让我们在计算自注意力机制时能够摆脱 $QK^T$ 矩阵。 [Tri Dao 等人](https://arxiv.org/abs/2205.14135) 开发了这样一种新算法,并将其称为 **Flash 注意力**。
简而言之,Flash 注意力将 $\mathbf{V} \times \text{Softmax}(\mathbf{QK}^T)$ 的计算分解成若干步骤,通过迭代多个 softmax 计算步来将输出分成多个较小的块进行计算:
$$ \textbf{O} _i \leftarrow s^a_ {ij} \times \textbf{O} _i + s^b_ {ij} \times \mathbf{V} _{j} \times \text{Softmax}(\mathbf{QK}^T_ {i,j}) \text{,在 } i, j \text{ 上迭代} $$
其中 $s^a_{ij}$ 和 $s^b_{ij}$ 是随着每个 $i$ 和 $j$ 迭代更新的 softmax 统计归一化值。
请注意,整个 Flash 注意力有点复杂,这里已经大大简化了。如果想要深入理解,可以阅读 [Flash Attention 的论文](https://arxiv.org/pdf/2205.14135.pdf)。
要点如下:
> 通过跟踪 softmax 统计归一化值再加上一些聪明的数学技巧,与默认的自注意力层相比,Flash 注意力的计算结果 **完全相同**,而内存成本仅随着 $N$ 线性增加。
仅看这个公式,直觉上来讲,Flash 注意力肯定比默认的自注意力公式要慢很多,因为需要进行更多的计算。确实,与普通注意力相比,Flash 注意力需要更多的 FLOP,因为需要不断重新计算 softmax 统计归一化值 (如果感兴趣,请参阅 [论文](https://arxiv.org/pdf/2205.14135.pdf) 以了解更多详细信息)。
> 然而,与默认注意力相比,Flash 注意力的推理速度要快得多,这是因为它能够显著减少对较慢的高带宽显存的需求,而更多使用了更快的片上内存 (SRAM)。
从本质上讲,Flash 注意力确保所有中间写入和读取操作都可以使用快速 _片上_ SRAM 来完成,而不必访问较慢的显存来计算输出向量 $\mathbf{O}$。
实际上,如果能用的话,我们没有理由不用 Flash 注意力。该算法在数学上给出相同的输出,但速度更快且内存效率更高。
我们看一个实际的例子。
我们的 `OctoCoder` 模型现在被输入了长得多的提示,其中包括所谓的“系统提示”。系统提示用于引导 LLM 去适应特定的用户任务。
接下来,我们使用系统提示,引导 `OctoCoder` 成为更好的编程助手。
```python
system_prompt = """Below are a series of dialogues between various people and an AI technical assistant.
The assistant tries to be helpful, polite, honest, sophisticated, emotionally aware, and humble but knowledgeable.
The assistant is happy to help with code questions and will do their best to understand exactly what is needed.
It also tries to avoid giving false or misleading information, and it caveats when it isn't entirely sure about the right answer.
That said, the assistant is practical really does its best, and doesn't let caution get too much in the way of being useful.
The Starcoder models are a series of 15.5B parameter models trained on 80+ programming languages from The Stack (v1.2) (excluding opt-out requests).
The model uses Multi Query Attention, was trained using the Fill-in-the-Middle objective, and with 8,192 tokens context window for a trillion tokens of heavily deduplicated data.
-----
Question: Write a function that takes two lists and returns a list that has alternating elements from each input list.
Answer: Sure. Here is a function that does that.
def alternating(list1, list2):
results = []
for i in range(len(list1)):
results.append(list1[i])
results.append(list2[i])
return results
Question: Can you write some test cases for this function?
Answer: Sure, here are some tests.
assert alternating([10, 20, 30], [1, 2, 3]) == [10, 1, 20, 2, 30, 3]
assert alternating([True, False], [4, 5]) == [True, 4, False, 5]
assert alternating([], []) == []
Question: Modify the function so that it returns all input elements when the lists have uneven length. The elements from the longer list should be at the end.
Answer: Here is the modified function.
def alternating(list1, list2):
results = []
for i in range(min(len(list1), len(list2))):
results.append(list1[i])
results.append(list2[i])
if len(list1) > len(list2):
results.extend(list1[i+1:])
else:
results.extend(list2[i+1:])
return results
-----
"""
```
为了演示需要,我们将系统提示复制十倍,以便输入长度足够长以观察 Flash 注意力带来的内存节省。然后在其后加上原始提示 `"Question: Please write a function in Python that transforms bytes to Giga bytes.\n\nAnswer: Here"` :
```python
long_prompt = 10 * system_prompt + prompt
```
以 bfloat16 精度再次初始化模型。
```python
model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", torch_dtype=torch.bfloat16, device_map="auto")
tokenizer = AutoTokenizer.from_pretrained("bigcode/octocoder")
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
```
现在,我们可以像以前一样运行模型,同时测量其峰值 GPU 显存需求及推理时间。
```python
import time
start_time = time.time()
result = pipe(long_prompt, max_new_tokens=60)[0]["generated_text"][len(long_prompt):]
print(f"Generated in {time.time() - start_time} seconds.")
result
```
**输出**:
```
Generated in 10.96854019165039 seconds.
Sure. Here is a function that does that.\n\ndef bytes_to_giga(bytes):\n return bytes / 1024 / 1024 / 1024\n\nAnswer: Sure. Here is a function that does that.\n\ndef
```
输出与之前一样,但是这一次,模型会多次重复答案,直到达到 60 个词元为止。这并不奇怪,因为出于演示目的,我们将系统提示重复了十次,从而提示模型重复自身。
**注意**,在实际应用中,系统提示不应重复十次 —— 一次就够了!
我们测量一下峰值 GPU 显存需求。
```python
bytes_to_giga_bytes(torch.cuda.max_memory_allocated())
```
**输出**:
```bash
37.668193340301514
```
正如我们所看到的,峰值 GPU 显存需求现在明显高于以前,这主要是因为输入序列变长了。整个生成过程也需要一分多钟的时间。
我们调用 `flush()` 来释放 GPU 内存以供下一个实验使用。
```python
flush()
```
为便于比较,我们运行相同的函数,但启用 Flash 注意力。
为此,我们将模型转换为 [BetterTransformers](https://huggingface.co/docs/optimum/bettertransformer/overview),这会因此而启用 PyTorch 的 [SDPA 自注意力](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention),其实现是基于 Flash 注意力的。
```python
model.to_bettertransformer()
```
现在我们运行与之前完全相同的代码片段,但此时 Transformers 在底层将使用 Flash 注意力。
```py
start_time = time.time()
with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
result = pipe(long_prompt, max_new_tokens=60)[0]["generated_text"][len(long_prompt):]
print(f"Generated in {time.time() - start_time} seconds.")
result
```
**输出**:
```
Generated in 3.0211617946624756 seconds.
Sure. Here is a function that does that.\n\ndef bytes_to_giga(bytes):\n return bytes / 1024 / 1024 / 1024\n\nAnswer: Sure. Here is a function that does that.\n\ndef
```
结果与之前完全相同,但由于 Flash 注意力,我们可以观察到非常显著的加速。
我们最后一次测量一下内存消耗。
```python
bytes_to_giga_bytes(torch.cuda.max_memory_allocated())
```
**输出**:
```
32.617331981658936
```
我们几乎一下就回到了原来的 29GB 峰值 GPU 显存。
我们可以观察到,与刚开始的短输入序列相比,使用 Flash 注意力且输入长序列时,我们只多用了大约 100MB 的 GPU 显存。
```py
flush()
```
## 3. 架构背后的科学: 长文本输入和聊天式 LLM 的策略选择
到目前为止,我们已经研究了通过以下方式提高计算和内存效率:
- 将权重转换为较低精度的格式
- 用内存和计算效率更高的版本替换自注意力算法
现在让我们看看如何改变 LLM 的架构,使其对于需要长文本输入的任务更高效, _例如_ :
- 检索增强问答
- 总结
- 聊天
请注意, _聊天_ 应用不仅需要 LLM 处理长文本输入,还需要 LLM 能够有效地处理用户和助手之间的多轮对话 (例如 ChatGPT)。
一旦经过训练,LLM 的基本架构就很难改变,因此提前考虑 LLM 的任务特征并相应地优化模型架构非常重要。模型架构中有两个重要组件很快就会成为长输入序列的内存和/或性能瓶颈。
- 位置嵌入 (positional embeddings)
- 键值缓存 (key-value cache)
我们来一一详细探讨:
### 3.1 改进 LLM 的位置嵌入
自注意力机制计算每个词元间的相关系数。例如,文本输入序列 _“Hello”, “I”, “love”, “you”_ 的 $\text{Softmax}(\mathbf{QK}^T)$ 矩阵看起来如下:

每个词元都会被赋予一个概率值,表示其对另一个词元的关注度。例如, _“love”_ 这个词关注 _“Hello”_ 这个词的概率为 0.05%,关注 _“I”_ 的概率为 0.3%,而对自己的关注概率则为 0.65%。
基于自注意力但没有位置嵌入的 LLM 在理解输入文本彼此的相对位置上会遇到很大困难。这是因为在经由 $\mathbf{QK}^T$ 来计算相关概率时,其计算是与词元间的相对距离无关的,即该计算与词元间的相对距离的关系为 $O(1)$。因此,对于没有位置嵌入的 LLM,每个词元似乎与所有其他词元等距。 _此时_ ,区分 _“Hello I love you”_ 和 _“You love I hello”_ 会比较困难。
为了让能够 LLM 理解语序,需要额外的 _提示_ ,通常我们用 _位置编码_ (也称为 _位置嵌入_ ) 来注入这种提示。位置编码将每个词元的位置编码为数字,LLM 可以利用这些数字更好地理解语序。
_Attention Is All You Need_ [](https://arxiv.org/abs/1706.03762) 论文引入了正弦位置嵌入 $\mathbf{P} = \mathbf{p}_1, \ldots, \mathbf{p}_N $。其中每个向量 $\mathbf{p}_i$ 为其位置 $i$ 的正弦函数。然后将位置编码与输入序列向量简单相加 $\mathbf{\hat{X}} = \mathbf{\hat{x}}_1, \ldots, \mathbf{\hat{x}}_N$ = $\mathbf{x}_1 + \mathbf{p}_1, \ldots, \mathbf{x}_N + \mathbf{p}_N$ 从而提示模型更好地学习语序。
其他工作 (如 [Devlin 等人的工作](https://arxiv.org/abs/1810.04805)) 没有使用固定位置嵌入,而是使用可训练的位置编码,在训练期间学习位置嵌入 $\mathbf{P}$。
曾经,正弦位置嵌入以及可训练位置嵌入是将语序编码进 LLM 的主要方法,但这两个方法会有一些问题:
1. 正弦位置嵌入以及可训练位置嵌入都是绝对位置嵌入, _即_ 为每个位置 id ($ 0, \ldots, N$) 生成一个唯一的嵌入。正如 [Huang et al.](https://arxiv.org/abs/2009.13658) 和 [Su et al.](https://arxiv.org/abs/2104.09864) 的工作所示,绝对位置嵌入会导致 LLM 在处理长文本输入时性能较差。对长文本输入而言,如果模型能够学习输入词元间的相对距离而不是它们的绝对位置,会比较好。
2. 当使用训练位置嵌入时,LLM 必须在固定的输入长度 $N$上进行训练,因此如果推理时的输入长度比训练长度更长,外插会比较麻烦。
最近,可以解决上述问题的相对位置嵌入变得越来越流行,其中应用最多的有两个:
- [旋转位置嵌入 (Rotary Position Embedding, RoPE) ](https://arxiv.org/abs/2104.09864)
- [ALiBi](https://arxiv.org/abs/2108.12409)
_RoPE_ 和 _ALiBi_ 都认为,最好直接在自注意力算法中向 LLM 提示语序,因为词元是通过自注意力机制互相关联的。更具体地说,应该通过修改 $\mathbf{QK}^T$ 的计算来提示语序。
简而言之, _RoPE_ 指出位置信息可以编码为 `查询 - 键值对` , _如_ $\mathbf{q}_i$ 和 $\mathbf{x}_j$ 通过分别将每个向量根据其在句子中的位置 $i, j$ 旋转角度 $\theta \times i$ 和 $\theta \times j$:
$$ \mathbf{\hat{q}}_i^T \mathbf{\hat{x}}_j = \mathbf{{q}} _i^T \mathbf{R}_ {\theta, i -j} \mathbf{{x}}_j. $$
$\mathbf{R}_{\theta, i - j}$ 表示旋转矩阵。 $ \theta$ 在不可训练的预定义值,其值取决于训练期间最大输入序列长度。
> 通过这样做,$\mathbf{q}_i$ 和 $\mathbf{q}_j$ 之间的概率得分仅受 $i \ne j$ 是否成立这一条件影响,且其值仅取决于相对距离 $i - j$,而与每个向量的具体位置 $i$ 和 $j$ 无关。
如今,多个最重要的 LLM 使用了 _RoPE_ ,例如:
- [**Falcon**](https://huggingface.co/tiiuae/falcon-40b)
- [**Llama**](https://arxiv.org/abs/2302.13971)
- [**PaLM**](https://arxiv.org/pdf/2204.02311.pdf)
另一个方案是 _ALiBi_ , 它提出了一种更简单的相对位置编码方案。在计算 softmax 之前,$\mathbf{QK}^T$ 矩阵的每个元素会减去被一个预定义系数 `m` 缩放后的对应两个向量间的相对距离。

如 [ALiBi](https://arxiv.org/abs/2108.12409) 论文所示,这种简单的相对位置编码使得模型即使在很长的文本输入序列中也能保持高性能。
当前也有多个最重要的 LLM 使用了 _ALiBi_ ,如:
- **MPT** [](https://huggingface.co/mosaicml/mpt-30b)
- **BLOOM** [](https://huggingface.co/bigscience/bloom)
_RoPE_ 和 _ALiBi_ 位置编码都可以外推到训练期间未见的输入长度,而事实证明,与 _RoPE_ 相比, _ALiBi_ 的外推效果要好得多。对于 ALiBi,只需简单地增加下三角位置矩阵的值以匹配输入序列的长度即可。而对于 _RoPE_ ,如果输入长度比训练期间的输入长得多,使用训练期间 $\theta$ 值的生成效果不好, _参见_ [Press et al.](https://arxiv.org/abs/2108.12409)。然而,社区已经找到了一些调整 $\theta$ 的有效技巧。从而允许 _RoPE_ 位置嵌入能够很好地应对输入序列外插的状况 (请参阅 [此处](https://github.com/huggingface/transformers/pull/24653))。
> RoPE 和 ALiBi 都是相对位置嵌入,其嵌入参数是 _不可_ 训练的,而是基于以下直觉:
- 有关输入文本的位置提示应直接提供给自注意力层的 $QK^T$ 矩阵
- 应该激励 LLM 学习基于恒定 _相对_ 距离的位置编码
- 输入词元间彼此距离越远,它们的 `查询 - 键` 概率越低。 RoPE 和 ALiBi 都降低了距离较远词元间的 `查询 - 键` 概率。RoPE 通过增加 `查询 - 键` 向量之间的夹角来减少它们的向量积。而 ALiBi 通过从向量积中减去一个更大的数来达成这个目的。
总之,打算部署在需要处理长文本输入的任务中的 LLM 可以通过相对位置嵌入 (例如 RoPE 和 ALiBi) 来进行更好的训练。另请注意,使用了 RoPE 和 ALiBi 的 LLM 即使是仅在固定长度 (例如 $ N_1 = 2048$) 上训练的,其仍然可以在推理时通过位置嵌入外插来处理比 $N_1$ 长得多的文本输入 (如 $N_2 = 8192 > N_1$)。
### 3.2 键值缓存
使用 LLM 进行自回归文本生成的工作原理是把输入序列输入给模型,并采样获得下一个词元,再将获得的词元添加到输入序列后面,如此往复,直到 LLM 生成一个表示结束的词元。
请查阅 [Transformer 的文本生成教程](https://huggingface.co/docs/transformers/llm_tutorial#generate-text) 以更直观地了解自回归生成的工作原理。
下面,我们快速运行一个代码段来展示自回归是如何工作的。我们简单地使用 `torch.argmax` 获取最有可能的下一个词元。
```python
input_ids = tokenizer(prompt, return_tensors="pt")["input_ids"].to("cuda")
for _ in range(5):
next_logits = model(input_ids)["logits"][:, -1:]
next_token_id = torch.argmax(next_logits,dim=-1)
input_ids = torch.cat([input_ids, next_token_id], dim=-1)
print("shape of input_ids", input_ids.shape)
generated_text = tokenizer.batch_decode(input_ids[:, -5:])
generated_text
```
**输出**:
```
shape of input_ids torch.Size([1, 21])
shape of input_ids torch.Size([1, 22])
shape of input_ids torch.Size([1, 23])
shape of input_ids torch.Size([1, 24])
shape of input_ids torch.Size([1, 25])
[' Here is a Python function']
```
正如我们所看到的,每次我们都把刚刚采样出的词元添加到输入文本中。
除了极少数例外,LLM 都是基于因果语言模型的目标函数进行训练的,因此我们不需要注意力矩阵的上三角部分 - 这就是为什么在上面的两个图中,上三角的注意力分数是空的 ( _也即_ 概率为 0)。想要快速入门因果语言模型,你可以参考这篇 _图解自注意力_ [](https://jalammar.github.io/illustrated-gpt2/#part-2-illustrated-self-attention) 博文。
因此,当前词元 _永远仅_ 依赖于其前面的词元,更具体地说,$\mathbf{q} _i$ 向量永远与任何 $j > i$ 的键、值向量无关联。相反 $\mathbf{q} _i$ 仅关注其之前的键、值向量 $\mathbf{k}_ {m < i}, \mathbf{v}_ {m < i} \text{,} m \in {0, \ldots i - 1}$。为了减少不必要的计算,因此可以把先前所有步的每一层的键、值向量缓存下来。
接下来,我们将告诉 LLM 在每次前向传播中都利用键值缓存来减少计算量。在 Transformers 中,我们可以通过将 `use_cache` 参数传给 `forward` 来利用键值缓存,这样的话,每次推理仅需传当前词元给 `forward` 就可以。
```python
past_key_values = None # past_key_values is the key-value cache
generated_tokens = []
next_token_id = tokenizer(prompt, return_tensors="pt")["input_ids"].to("cuda")
for _ in range(5):
next_logits, past_key_values = model(next_token_id, past_key_values=past_key_values, use_cache=True).to_tuple()
next_logits = next_logits[:, -1:]
next_token_id = torch.argmax(next_logits, dim=-1)
print("shape of input_ids", input_ids.shape)
print("length of key-value cache", len(past_key_values[0][0])) # past_key_values are of shape [num_layers, 0 for k, 1 for v, batch_size, length, hidden_dim]
generated_tokens.append(next_token_id.item())
generated_text = tokenizer.batch_decode(generated_tokens)
generated_text
```
**输出**:
```
shape of input_ids torch.Size([1, 20])
length of key-value cache 20
shape of input_ids torch.Size([1, 20])
length of key-value cache 21
shape of input_ids torch.Size([1, 20])
length of key-value cache 22
shape of input_ids torch.Size([1, 20])
length of key-value cache 23
shape of input_ids torch.Size([1, 20])
length of key-value cache 24
[' Here', ' is', ' a', ' Python', ' function']
```
正如我们所看到的,当使用键值缓存时,输入文本的长度 _没有_ 增加,每次都只有一个向量。另一方面,键值缓存的长度每解码步都增加了一。
> 利用键值缓存意味着 $\mathbf{QK}^T$ 本质上减少为 $\mathbf{q}_c\mathbf{K}^T$,其中 $\mathbf{q}_c$ 是当前输入词元的查询投影,它 _始终_ 只是单个向量。
使用键值缓存有两个优点:
- 与计算完整的 $\mathbf{QK}^T$ 矩阵相比,计算量更小,计算效率显著提高,因此推理速度也随之提高。
- 所需的最大内存不随生成的词元数量呈二次方增加,而仅呈线性增加。
> 用户应该 _始终_ 使用键值缓存,因为它的生成结果相同且能显著加快长输入序列的生成速度。当使用文本 pipeline 或 [`generate` 方法](https://huggingface.co/docs/transformers/main_classes/text_generation) 时,Transformers 默认启用键值缓存。
请注意,键值缓存对于聊天等需要多轮自回归解码的应用程序特别有用。我们看一个例子。
```
User: How many people live in France?
Assistant: Roughly 75 million people live in France
User: And how many are in Germany?
Assistant: Germany has ca. 81 million inhabitants
```
在这个聊天示例中,LLM 需自回归解码两次:
1. 第一次,键值缓存为空,输入提示为 `"User: How many people live in France?"` ,模型自回归生成文本 `"Roughly 75 million people live in France"` ,同时在每个解码步添加键值缓存。
2. 第二次输入提示为 `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many in Germany?"` 。由于缓存,前两个句子的所有键值向量都已经计算出来。因此输入提示仅包含 `"User: And how many in Germany?"` 。在处理缩短的输入提示时,计算出的键值向量将添加到第一次解码的键值缓存后面。然后,助手使用键值缓存自回归地生成第二个问题的答案 `"Germany has ca. 81 million inhabitants"` ,该键值缓存是 `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many are in Germany?"` 的编码向量序列。
这里需要注意两件事:
1. 保留所有上下文对于在聊天场景中部署的 LLM 至关重要,以便 LLM 理解对话的所有上文。例如,上面的示例中,LLM 需要了解用户在询问 `"And how many are in Germany"` 时指的是人口。
2. 键值缓存对于聊天非常有用,因为它允许我们不断增长聊天历史记录的编码缓存,而不必对聊天历史记录从头开始重新编码 (当使用编码器 - 解码器时架构时我们就不得不这么做)。
然而,还有一个问题。虽然 $\mathbf{QK}^T$ 矩阵所需的峰值内存显著减少,但对于长输入序列或多轮聊天,将键值缓存保留在内存中还是会非常昂贵。请记住,键值缓存需要存储先前所有输入向量 $\mathbf{x}_i \text{, for } i \in {1, \ldots, c - 1}$ 的所有层、所有注意力头的键值向量。
我们计算一下我们之前使用的 LLM `bigcode/octocoder` 需要存储在键值缓存中的浮点数的个数。浮点数的个数等于序列长度的两倍乘以注意力头的个数乘以注意力头的维度再乘以层数。假设输入序列长度为 16000,我们计算得出:
```python
config = model.config
2 * 16_000 * config.n_layer * config.n_head * config.n_embd // config.n_head
```
**输出**:
```
7864320000
```
大约 80 亿个浮点数!以 `float16` 精度存储 80 亿个浮点值需要大约 15 GB 的显存,大约是模型本身权重的一半!
研究人员提出了两种方法,用于显著降低键值缓存的内存成本:
1. [多查询注意力 (Multi-Query-Attention,MQA) ](https://arxiv.org/abs/1911.02150)
多查询注意力机制是 Noam Shazeer 在 _Fast Transformer Decoding: One Write-Head is All You Need_ 论文中提出的。正如标题所示,Noam 发现,可以在所有注意力头之间共享同一对键、值投影权重,而不是使用 `n_head` 对键值投影权重,这并不会显著降低模型的性能。
> 通过共享同一对键、值投影权重,键值向量 $\mathbf{k}_i, \mathbf{v}_i$ 在所有注意力头上相同,这意味着我们只需要缓存 1 个键值投影对,而不需要 `n_head` 对。
由于大多数 LLM 有 20 到 100 个注意力头,MQA 显著减少了键值缓存的内存消耗。因此,对于本文中使用的 LLM,假设输入序列长度为 16000,其所需的内存消耗从 15 GB 减少到不到 400 MB。
除了节省内存之外,MQA 还可以提高计算效率。在自回归解码中,需要重新加载大的键值向量,与当前的键值向量对相串接,然后将其输入到每一步的 $\mathbf{q}_c\mathbf{K}^T$ 计算中。对于自回归解码,不断重新加载所需的内存带宽可能成为严重的性能瓶颈。通过减少键值向量的大小,需要访问的内存更少,从而减少内存带宽瓶颈。欲了解更多详细信息,请查看 [Noam 的论文](https://arxiv.org/abs/1911.02150)。
这里的重点是,只有使用键值缓存时,将键值注意力头的数量减少到 1 才有意义。没有键值缓存时,模型单次前向传播的峰值内存消耗保持不变,因为每个注意力头查询向量不同,因此每个注意力头的 $\mathbf{QK}^T$ 矩阵也不相同。
MQA 已被社区广泛采用,现已被许多流行的 LLM 所采用:
- [**Falcon**](https://huggingface.co/tiiuae/falcon-40b)
- [**PaLM**](https://arxiv.org/pdf/2204.02311.pdf)
- [**MPT**](https://huggingface.co/mosaicml/mpt-30b)
- [**BLOOM**](https://huggingface.co/bigscience/bloom)
此外,本文所使用的检查点 - `bigcode/octocoder` - 也使用了 MQA。
2. [分组查询注意力 (Grouped-Query-Attention,GQA) ](https://arxiv.org/abs/2305.13245)
分组查询注意力由来自 Google 的 Ainslie 等人提出,它们发现,与原始的多头键值投影相比,使用 MQA 通常会导致生成质量下降。该论文认为,通过不太大幅度地减少查询头投影权重的数量可以获得更高的模型性能。不应仅使用单个键值投影权重,而应使用 `n < n_head` 个键值投影权重。通过将 `n` 设为比 `n_head` 小得多的值 (例如 2,4 或 8),几乎可以保留 MQA 带来的所有内存和速度增益,同时更少地牺牲模型能力,或者说说仅略微牺牲模型性能。
此外,GQA 的作者发现,现有的模型检查点可以通过 _升级训练_ ,变成 GQA 架构,而其所需的计算量仅为原始预训练计算的 5%。虽然 5% 的原始预训练计算量仍然很大,但 GQA _升级训练_ 允许现有 checkpoint 通过这个机制,升级成能处理长输入序列的 checkpoint,这点还是挺诱人的。
GQA 最近才被提出,这就是为什么截至本文撰写时其被采用得较少。GQA 最著名的应用是 [Llama-v2](https://huggingface.co/meta-llama/Llama-2-70b-hf)。
> 总之,如果部署自回归解码的 LLM 并且需要处理长输入序列 (例如聊天),我们强烈建议使用 GQA 或 MQA。
## 总结
研究界不断提出新的、巧妙的方法来加速更大的 LLM 的推理。举个例子,一个颇有前景的研究方向是 [投机解码](https://arxiv.org/abs/2211.17192),其中“简单词元”是由更小、更快的语言模型生成的,而只有“难词元”是由 LLM 本身生成的。详细介绍超出了本文的范围,但可以阅读这篇 [不错的博文](https://huggingface.co/blog/cn/assisted-generation)。
GPT3/4、Llama-2-70b、Claude、PaLM 等海量 LLM 能够在 [Hugging Face Chat](https://huggingface.co/chat/) 或 ChatGPT 等聊天应用中快速运行的原因是很大一部分归功于上述精度、算法和架构方面的改进。展望未来,GPU、TPU 等加速器只会变得更快且内存更大,但人们仍然应该始终确保使用最好的可用算法和架构来获得最大的收益 🤗。
| 9 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/silero-vad/main.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::Result;
use clap::Parser;
use candle::{DType, Tensor};
#[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)]
enum Which {
#[value(name = "silero")]
Silero,
}
#[derive(Clone, Debug, Copy, PartialEq, Eq, clap::ValueEnum)]
enum SampleRate {
#[value(name = "8000")]
Sr8k,
#[value(name = "16000")]
Sr16k,
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
input: Option<String>,
#[arg(long)]
sample_rate: SampleRate,
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
config_file: Option<String>,
/// The model to use.
#[arg(long, default_value = "silero")]
which: Which,
}
/// an iterator which reads consecutive frames of le i16 values from a reader
struct I16Frames<R> {
rdr: R,
buf: Box<[u8]>,
len: usize,
eof: bool,
}
impl<R> I16Frames<R> {
fn new(rdr: R, frame_size: usize) -> Self {
I16Frames {
rdr,
buf: vec![0; frame_size * std::mem::size_of::<i16>()].into_boxed_slice(),
len: 0,
eof: false,
}
}
}
impl<R: std::io::Read> Iterator for I16Frames<R> {
type Item = std::io::Result<Vec<f32>>;
fn next(&mut self) -> Option<Self::Item> {
if self.eof {
return None;
}
self.len += match self.rdr.read(&mut self.buf[self.len..]) {
Ok(0) => {
self.eof = true;
0
}
Ok(n) => n,
Err(e) => return Some(Err(e)),
};
if self.eof || self.len == self.buf.len() {
let buf = self.buf[..self.len]
.chunks(2)
.map(|bs| match bs {
[a, b] => i16::from_le_bytes([*a, *b]),
_ => unreachable!(),
})
.map(|i| i as f32 / i16::MAX as f32)
.collect();
self.len = 0;
Some(Ok(buf))
} else {
self.next()
}
}
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
let start = std::time::Instant::now();
let model_id = match &args.model_id {
Some(model_id) => std::path::PathBuf::from(model_id),
None => match args.which {
Which::Silero => hf_hub::api::sync::Api::new()?
.model("onnx-community/silero-vad".into())
.get("onnx/model.onnx")?,
// TODO: candle-onnx doesn't support Int8 dtype
// Which::SileroQuantized => hf_hub::api::sync::Api::new()?
// .model("onnx-community/silero-vad".into())
// .get("onnx/model_quantized.onnx")?,
},
};
let (sample_rate, frame_size, context_size): (i64, usize, usize) = match args.sample_rate {
SampleRate::Sr8k => (8000, 256, 32),
SampleRate::Sr16k => (16000, 512, 64),
};
println!("retrieved the files in {:?}", start.elapsed());
let start = std::time::Instant::now();
let device = candle_examples::device(args.cpu)?;
let model = candle_onnx::read_file(model_id)?;
println!("loaded the model in {:?}", start.elapsed());
let start = std::time::Instant::now();
struct State {
frame_size: usize,
sample_rate: Tensor,
state: Tensor,
context: Tensor,
}
let mut state = State {
frame_size,
sample_rate: Tensor::new(sample_rate, &device)?,
state: Tensor::zeros((2, 1, 128), DType::F32, &device)?,
context: Tensor::zeros((1, context_size), DType::F32, &device)?,
};
let mut res = vec![];
for chunk in I16Frames::new(std::io::stdin().lock(), state.frame_size) {
let chunk = chunk.unwrap();
if chunk.len() < state.frame_size {
continue;
}
let next_context = Tensor::from_slice(
&chunk[state.frame_size - context_size..],
(1, context_size),
&device,
)?;
let chunk = Tensor::from_vec(chunk, (1, state.frame_size), &device)?;
let chunk = Tensor::cat(&[&state.context, &chunk], 1)?;
let inputs = std::collections::HashMap::from_iter([
("input".to_string(), chunk),
("sr".to_string(), state.sample_rate.clone()),
("state".to_string(), state.state.clone()),
]);
let out = candle_onnx::simple_eval(&model, inputs).unwrap();
let out_names = &model.graph.as_ref().unwrap().output;
let output = out.get(&out_names[0].name).unwrap().clone();
state.state = out.get(&out_names[1].name).unwrap().clone();
assert_eq!(state.state.dims(), &[2, 1, 128]);
state.context = next_context;
let output = output.flatten_all()?.to_vec1::<f32>()?;
assert_eq!(output.len(), 1);
let output = output[0];
println!("vad chunk prediction: {output}");
res.push(output);
}
println!("calculated prediction in {:?}", start.elapsed());
let res_len = res.len() as f32;
let prediction = res.iter().sum::<f32>() / res_len;
println!("vad average prediction: {prediction}");
Ok(())
}
| 0 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/silero-vad/README.md | # silero-vad: Voice Activity Detection
[Silero VAD (v5)](https://github.com/snakers4/silero-vad) detects voice activity in streaming audio.
This example uses the models available in the hugging face [onnx-community/silero-vad](https://huggingface.co/onnx-community/silero-vad).
## Running the example
```bash
$ arecord -t raw -f S16_LE -r 16000 -c 1 -d 5 - | cargo run --example silero-vad --release --features onnx -- --sample-rate 16000
```
| 1 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/llama/main.rs | // An implementation of LLaMA https://github.com/facebookresearch/llama
//
// This is based on nanoGPT in a similar way to:
// https://github.com/Lightning-AI/lit-llama/blob/main/lit_llama/model.py
//
// The tokenizer config can be retrieved from:
// https://huggingface.co/hf-internal-testing/llama-tokenizer/raw/main/tokenizer.json
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
use anyhow::{bail, Error as E, Result};
use clap::{Parser, ValueEnum};
use candle::{DType, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::generation::{LogitsProcessor, Sampling};
use hf_hub::{api::sync::Api, Repo, RepoType};
use std::io::Write;
use candle_transformers::models::llama as model;
use model::{Llama, LlamaConfig};
const EOS_TOKEN: &str = "</s>";
const DEFAULT_PROMPT: &str = "My favorite theorem is ";
#[derive(Clone, Debug, Copy, PartialEq, Eq, ValueEnum)]
enum Which {
V1,
V2,
V3,
V31,
V3Instruct,
V31Instruct,
V32_1b,
V32_1bInstruct,
V32_3b,
V32_3bInstruct,
#[value(name = "solar-10.7b")]
Solar10_7B,
#[value(name = "tiny-llama-1.1b-chat")]
TinyLlama1_1BChat,
#[value(name = "SmoLM2-1.7B")]
SmolLM2_1B,
#[value(name = "SmoLM2-1.7B-Instruct")]
SmolLM2_1BInstruct,
#[value(name = "SmoLM2-360M")]
SmolLM2_360M,
#[value(name = "SmoLM2-360M-Instruct")]
SmolLM2_360MInstruct,
#[value(name = "SmoLM2-135M")]
SmolLM2_135M,
#[value(name = "SmoLM2-135M-Instruct")]
SmolLM2_135MInstruct,
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// The temperature used to generate samples.
#[arg(long, default_value_t = 0.8)]
temperature: f64,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
/// Only sample among the top K samples.
#[arg(long)]
top_k: Option<usize>,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// The length of the sample to generate (in tokens).
#[arg(short = 'n', long, default_value_t = 10000)]
sample_len: usize,
/// Disable the key-value cache.
#[arg(long)]
no_kv_cache: bool,
/// The initial prompt.
#[arg(long)]
prompt: Option<String>,
/// Use different dtype than f16
#[arg(long)]
dtype: Option<String>,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
revision: Option<String>,
/// The model size to use.
#[arg(long, default_value = "v3")]
which: Which,
#[arg(long)]
use_flash_attn: bool,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.1)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 128)]
repeat_last_n: usize,
}
fn main() -> Result<()> {
use tokenizers::Tokenizer;
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let device = candle_examples::device(args.cpu)?;
let dtype = match args.dtype.as_deref() {
Some("f16") => DType::F16,
Some("bf16") => DType::BF16,
Some("f32") => DType::F32,
Some(dtype) => bail!("Unsupported dtype {dtype}"),
None => DType::F16,
};
let (llama, tokenizer_filename, mut cache, config) = {
let api = Api::new()?;
let model_id = args.model_id.unwrap_or_else(|| {
let str = match args.which {
Which::V1 => "Narsil/amall-7b",
Which::V2 => "meta-llama/Llama-2-7b-hf",
Which::V3 => "meta-llama/Meta-Llama-3-8B",
Which::V3Instruct => "meta-llama/Meta-Llama-3-8B-Instruct",
Which::V31 => "meta-llama/Llama-3.1-8B",
Which::V31Instruct => "meta-llama/Llama-3.1-8B-Instruct",
Which::V32_1b => "meta-llama/Llama-3.2-1B",
Which::V32_1bInstruct => "meta-llama/Llama-3.2-1B-Instruct",
Which::V32_3b => "meta-llama/Llama-3.2-3B",
Which::V32_3bInstruct => "meta-llama/Llama-3.2-3B-Instruct",
Which::Solar10_7B => "upstage/SOLAR-10.7B-v1.0",
Which::TinyLlama1_1BChat => "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
Which::SmolLM2_135M => "HuggingFaceTB/SmolLM2-135M",
Which::SmolLM2_135MInstruct => "HuggingFaceTB/SmolLM2-135M-Instruct",
Which::SmolLM2_360M => "HuggingFaceTB/SmolLM2-360M",
Which::SmolLM2_360MInstruct => "HuggingFaceTB/SmolLM2-360M-Instruct",
Which::SmolLM2_1B => "HuggingFaceTB/SmolLM2-1.7B",
Which::SmolLM2_1BInstruct => "HuggingFaceTB/SmolLM2-1.7B-Instruct",
};
str.to_string()
});
println!("loading the model weights from {model_id}");
let revision = args.revision.unwrap_or("main".to_string());
let api = api.repo(Repo::with_revision(model_id, RepoType::Model, revision));
let tokenizer_filename = api.get("tokenizer.json")?;
let config_filename = api.get("config.json")?;
let config: LlamaConfig = serde_json::from_slice(&std::fs::read(config_filename)?)?;
let config = config.into_config(args.use_flash_attn);
let filenames = match args.which {
Which::V1
| Which::V2
| Which::V3
| Which::V3Instruct
| Which::V31
| Which::V31Instruct
| Which::V32_3b
| Which::V32_3bInstruct
| Which::Solar10_7B => {
candle_examples::hub_load_safetensors(&api, "model.safetensors.index.json")?
}
Which::SmolLM2_360M
| Which::SmolLM2_360MInstruct
| Which::SmolLM2_135M
| Which::SmolLM2_135MInstruct
| Which::SmolLM2_1B
| Which::SmolLM2_1BInstruct
| Which::V32_1b
| Which::V32_1bInstruct
| Which::TinyLlama1_1BChat => {
vec![api.get("model.safetensors")?]
}
};
let cache = model::Cache::new(!args.no_kv_cache, dtype, &config, &device)?;
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? };
(Llama::load(vb, &config)?, tokenizer_filename, cache, config)
};
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let eos_token_id = config.eos_token_id.or_else(|| {
tokenizer
.token_to_id(EOS_TOKEN)
.map(model::LlamaEosToks::Single)
});
let prompt = args.prompt.as_ref().map_or(DEFAULT_PROMPT, |p| p.as_str());
let mut tokens = tokenizer
.encode(prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let mut tokenizer = candle_examples::token_output_stream::TokenOutputStream::new(tokenizer);
println!("starting the inference loop");
print!("{prompt}");
let mut logits_processor = {
let temperature = args.temperature;
let sampling = if temperature <= 0. {
Sampling::ArgMax
} else {
match (args.top_k, args.top_p) {
(None, None) => Sampling::All { temperature },
(Some(k), None) => Sampling::TopK { k, temperature },
(None, Some(p)) => Sampling::TopP { p, temperature },
(Some(k), Some(p)) => Sampling::TopKThenTopP { k, p, temperature },
}
};
LogitsProcessor::from_sampling(args.seed, sampling)
};
let mut start_gen = std::time::Instant::now();
let mut index_pos = 0;
let mut token_generated = 0;
for index in 0..args.sample_len {
let (context_size, context_index) = if cache.use_kv_cache && index > 0 {
(1, index_pos)
} else {
(tokens.len(), 0)
};
if index == 1 {
start_gen = std::time::Instant::now()
}
let ctxt = &tokens[tokens.len().saturating_sub(context_size)..];
let input = Tensor::new(ctxt, &device)?.unsqueeze(0)?;
let logits = llama.forward(&input, context_index, &mut cache)?;
let logits = logits.squeeze(0)?;
let logits = if args.repeat_penalty == 1. {
logits
} else {
let start_at = tokens.len().saturating_sub(args.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
args.repeat_penalty,
&tokens[start_at..],
)?
};
index_pos += ctxt.len();
let next_token = logits_processor.sample(&logits)?;
token_generated += 1;
tokens.push(next_token);
match eos_token_id {
Some(model::LlamaEosToks::Single(eos_tok_id)) if next_token == eos_tok_id => {
break;
}
Some(model::LlamaEosToks::Multiple(ref eos_ids)) if eos_ids.contains(&next_token) => {
break;
}
_ => (),
}
if let Some(t) = tokenizer.next_token(next_token)? {
print!("{t}");
std::io::stdout().flush()?;
}
}
if let Some(rest) = tokenizer.decode_rest().map_err(E::msg)? {
print!("{rest}");
}
let dt = start_gen.elapsed();
println!(
"\n\n{} tokens generated ({} token/s)\n",
token_generated,
(token_generated - 1) as f64 / dt.as_secs_f64(),
);
Ok(())
}
| 2 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/moondream/main.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::{Error as E, Result};
use clap::Parser;
use candle::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::{
generation::LogitsProcessor,
models::{moondream, quantized_moondream},
};
use tokenizers::Tokenizer;
enum Model {
Moondream(moondream::Model),
Quantized(quantized_moondream::Model),
}
struct TextGeneration {
model: Model,
device: Device,
tokenizer: Tokenizer,
logits_processor: LogitsProcessor,
repeat_penalty: f32,
repeat_last_n: usize,
verbose_prompt: bool,
}
impl TextGeneration {
#[allow(clippy::too_many_arguments)]
fn new(
model: Model,
tokenizer: Tokenizer,
seed: u64,
temp: Option<f64>,
top_p: Option<f64>,
repeat_penalty: f32,
repeat_last_n: usize,
verbose_prompt: bool,
device: &Device,
) -> Self {
let logits_processor = LogitsProcessor::new(seed, temp, top_p);
Self {
model,
tokenizer,
logits_processor,
repeat_penalty,
repeat_last_n,
verbose_prompt,
device: device.clone(),
}
}
fn run(&mut self, prompt: &str, image_embeds: &Tensor, sample_len: usize) -> Result<()> {
use std::io::Write;
println!("starting the inference loop");
let tokens = self.tokenizer.encode(prompt, true).map_err(E::msg)?;
if tokens.is_empty() {
anyhow::bail!("Empty prompts are not supported in the Moondream model.")
}
if self.verbose_prompt {
for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) {
let token = token.replace('▁', " ").replace("<0x0A>", "\n");
println!("{id:7} -> '{token}'");
}
}
let mut tokens = tokens.get_ids().to_vec();
let mut generated_tokens = 0usize;
// Moondream tokenizer bos_token and eos_token is "<|endoftext|>"
// https://huggingface.co/vikhyatk/moondream2/blob/main/special_tokens_map.json
let special_token = match self.tokenizer.get_vocab(true).get("<|endoftext|>") {
Some(token) => *token,
None => anyhow::bail!("cannot find the special token"),
};
let (bos_token, eos_token) = (special_token, special_token);
let start_gen = std::time::Instant::now();
let mut load_t = std::time::Duration::from_secs_f64(0f64);
for index in 0..sample_len {
let context_size = if index > 0 { 1 } else { tokens.len() };
let ctxt = &tokens[tokens.len().saturating_sub(context_size)..];
let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?;
let logits = if index > 0 {
match self.model {
Model::Moondream(ref mut model) => model.text_model.forward(&input)?,
Model::Quantized(ref mut model) => model.text_model.forward(&input)?,
}
} else {
let bos_token = Tensor::new(&[bos_token], &self.device)?.unsqueeze(0)?;
let logits = match self.model {
Model::Moondream(ref mut model) => {
model
.text_model
.forward_with_img(&bos_token, &input, image_embeds)?
}
Model::Quantized(ref mut model) => {
model
.text_model
.forward_with_img(&bos_token, &input, image_embeds)?
}
};
load_t = start_gen.elapsed();
println!("load_t: {:?}", load_t);
logits
};
let logits = logits.squeeze(0)?.to_dtype(DType::F32)?;
let logits = if self.repeat_penalty == 1. {
logits
} else {
let start_at = tokens.len().saturating_sub(self.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
self.repeat_penalty,
&tokens[start_at..],
)?
};
let next_token = self.logits_processor.sample(&logits)?;
tokens.push(next_token);
generated_tokens += 1;
if next_token == eos_token || tokens.ends_with(&[27, 10619, 29] /* <END> */) {
break;
}
let token = self.tokenizer.decode(&[next_token], true).map_err(E::msg)?;
print!("{token}");
std::io::stdout().flush()?;
}
let dt = start_gen.elapsed() - load_t;
println!(
"\ngenerated in {} seconds\n{generated_tokens} tokens generated ({:.2} token/s)",
dt.as_secs_f64(),
(generated_tokens - 1) as f64 / dt.as_secs_f64()
);
Ok(())
}
}
#[derive(Parser)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// Display the token for the specified prompt.
#[arg(long)]
verbose_prompt: bool,
#[arg(long)]
prompt: String,
#[arg(long)]
image: String,
/// The temperature used to generate samples.
#[arg(long)]
temperature: Option<f64>,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 0)]
seed: u64,
#[arg(long, default_value_t = 5000)]
sample_len: usize,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.0)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 64)]
repeat_last_n: usize,
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
revision: Option<String>,
#[arg(long)]
quantized: bool,
/// Use f16 precision for all the computations rather than f32.
#[arg(long)]
f16: bool,
#[arg(long)]
model_file: Option<String>,
#[arg(long)]
tokenizer_file: Option<String>,
}
/// Loads an image from disk using the image crate, this returns a tensor with shape
/// (3, 378, 378).
pub fn load_image<P: AsRef<std::path::Path>>(p: P) -> candle::Result<Tensor> {
let img = image::ImageReader::open(p)?
.decode()
.map_err(candle::Error::wrap)?
.resize_to_fill(378, 378, image::imageops::FilterType::Triangle); // Adjusted to 378x378
let img = img.to_rgb8();
let data = img.into_raw();
let data = Tensor::from_vec(data, (378, 378, 3), &Device::Cpu)?.permute((2, 0, 1))?;
let mean = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?;
let std = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?;
(data.to_dtype(candle::DType::F32)? / 255.)?
.broadcast_sub(&mean)?
.broadcast_div(&std)
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
println!(
"temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}",
args.temperature.unwrap_or(0.),
args.repeat_penalty,
args.repeat_last_n
);
let start = std::time::Instant::now();
let api = hf_hub::api::tokio::Api::new()?;
let (model_id, revision) = match args.model_id {
Some(model_id) => (model_id.to_string(), None),
None => {
if args.quantized {
("santiagomed/candle-moondream".to_string(), None)
} else {
(
"vikhyatk/moondream2".to_string(),
Some("30c7cdf3fa6914f50bee3956694374143f5cc884"),
)
}
}
};
let revision = match (args.revision, revision) {
(Some(r), _) => r,
(None, Some(r)) => r.to_string(),
(None, None) => "main".to_string(),
};
let repo = api.repo(hf_hub::Repo::with_revision(
model_id,
hf_hub::RepoType::Model,
revision,
));
let model_file = match args.model_file {
Some(m) => m.into(),
None => {
if args.quantized {
repo.get("model-q4_0.gguf").await?
} else {
repo.get("model.safetensors").await?
}
}
};
let tokenizer = match args.tokenizer_file {
Some(m) => m.into(),
None => repo.get("tokenizer.json").await?,
};
println!("retrieved the files in {:?}", start.elapsed());
let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?;
let start = std::time::Instant::now();
let device = candle_examples::device(args.cpu)?;
let config = moondream::Config::v2();
let dtype = if args.quantized {
if args.f16 {
anyhow::bail!("Quantized model does not support f16");
}
DType::F32
} else if device.is_cuda() || args.f16 {
DType::F16
} else {
DType::F32
};
let model = if args.quantized {
let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(
&model_file,
&device,
)?;
let model = quantized_moondream::Model::new(&config, vb)?;
Model::Quantized(model)
} else {
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], dtype, &device)? };
let model = moondream::Model::new(&config, vb)?;
Model::Moondream(model)
};
println!("loaded the model in {:?}", start.elapsed());
let start = std::time::Instant::now();
let image = load_image(args.image)?
.to_device(&device)?
.to_dtype(dtype)?;
let image_embeds = image.unsqueeze(0)?;
let image_embeds = match model {
Model::Moondream(ref m) => image_embeds.apply(m.vision_encoder())?,
Model::Quantized(ref m) => image_embeds.apply(m.vision_encoder())?,
};
println!(
"loaded and encoded the image {image:?} in {:?}",
start.elapsed()
);
let prompt = format!("\n\nQuestion: {0}\n\nAnswer:", args.prompt);
let mut pipeline = TextGeneration::new(
model,
tokenizer,
args.seed,
args.temperature,
args.top_p,
args.repeat_penalty,
args.repeat_last_n,
args.verbose_prompt,
&device,
);
pipeline.run(&prompt, &image_embeds, args.sample_len)?;
Ok(())
}
| 3 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/moondream/README.md | # candle-moondream
[Moondream](https://github.com/vikhyat/moondream) is a computer-vision model can answer real-world questions about images. It's tiny by today's models, with only 1.6B parameters. That enables it to run on a variety of devices, including mobile phones and edge devices.
## Running some examples
First download an example image
```bash
$ wget https://raw.githubusercontent.com/vikhyat/moondream/main/assets/demo-1.jpg
```
<img src="https://raw.githubusercontent.com/vikhyat/moondream/main/assets/demo-1.jpg" width="200">
Now you can run Moondream from the `candle-examples` crate:
```bash
$ cargo run --example moondream --release -- --prompt "What is the girl eating?" --image "./demo-1.jpg"
avavx: false, neon: true, simd128: false, f16c: false
temp: 0.00 repeat-penalty: 1.00 repeat-last-n: 64
retrieved the files in 3.395583ms
Running on CPU, to run on GPU(metal), build this example with `--features metal`
loaded the model in 5.485493792s
loaded and encoded the image Tensor[dims 3, 378, 378; f32] in 4.801396417s
starting the inference loop
The girl is eating a hamburger.<
9 tokens generated (0.68 token/s)
``` | 4 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/convmixer/main.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use clap::Parser;
use candle::{DType, IndexOp, D};
use candle_nn::{Module, VarBuilder};
use candle_transformers::models::convmixer;
#[derive(Parser)]
struct Args {
#[arg(long)]
model: Option<String>,
#[arg(long)]
image: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
}
pub fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?;
println!("loaded image {image:?}");
let model_file = match args.model {
None => {
let api = hf_hub::api::sync::Api::new()?;
let api = api.model("lmz/candle-convmixer".into());
api.get("convmixer_1024_20_ks9_p14.safetensors")?
}
Some(model) => model.into(),
};
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? };
let model = convmixer::c1024_20(1000, vb)?;
println!("model built");
let logits = model.forward(&image.unsqueeze(0)?)?;
let prs = candle_nn::ops::softmax(&logits, D::Minus1)?
.i(0)?
.to_vec1::<f32>()?;
let mut prs = prs.iter().enumerate().collect::<Vec<_>>();
prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1));
for &(category_idx, pr) in prs.iter().take(5) {
println!(
"{:24}: {:.2}%",
candle_examples::imagenet::CLASSES[category_idx],
100. * pr
);
}
Ok(())
}
| 5 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/convnext/main.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use clap::{Parser, ValueEnum};
use candle::{DType, IndexOp, D};
use candle_nn::{Module, VarBuilder};
use candle_transformers::models::convnext;
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Which {
Atto,
Femto,
Pico,
Nano,
Tiny,
Small,
Base,
Large,
AttoV2,
FemtoV2,
PicoV2,
NanoV2,
TinyV2,
BaseV2,
LargeV2,
XLarge,
Huge,
}
impl Which {
fn model_filename(&self) -> String {
let name = match self {
Self::Atto => "convnext_atto.d2_in1k",
Self::Femto => "convnext_femto.d1_in1k",
Self::Pico => "convnext_pico.d1_in1k",
Self::Nano => "convnext_nano.d1h_in1k",
Self::Tiny => "convnext_tiny.fb_in1k",
Self::Small => "convnext_small.fb_in1k",
Self::Base => "convnext_base.fb_in1k",
Self::Large => "convnext_large.fb_in1k",
Self::AttoV2 => "convnextv2_atto.fcmae_ft_in1k",
Self::FemtoV2 => "convnextv2_femto.fcmae_ft_in1k",
Self::PicoV2 => "convnextv2_pico.fcmae_ft_in1k",
Self::NanoV2 => "convnextv2_nano.fcmae_ft_in1k",
Self::TinyV2 => "convnextv2_tiny.fcmae_ft_in1k",
Self::BaseV2 => "convnextv2_base.fcmae_ft_in1k",
Self::LargeV2 => "convnextv2_large.fcmae_ft_in1k",
Self::XLarge => "convnext_xlarge.fb_in22k_ft_in1k",
Self::Huge => "convnextv2_huge.fcmae_ft_in1k",
};
format!("timm/{name}")
}
fn config(&self) -> convnext::Config {
match self {
Self::Atto | Self::AttoV2 => convnext::Config::atto(),
Self::Femto | Self::FemtoV2 => convnext::Config::femto(),
Self::Pico | Self::PicoV2 => convnext::Config::pico(),
Self::Nano | Self::NanoV2 => convnext::Config::nano(),
Self::Tiny | Self::TinyV2 => convnext::Config::tiny(),
Self::Small => convnext::Config::small(),
Self::Base | Self::BaseV2 => convnext::Config::base(),
Self::Large | Self::LargeV2 => convnext::Config::large(),
Self::XLarge => convnext::Config::xlarge(),
Self::Huge => convnext::Config::huge(),
}
}
}
#[derive(Parser)]
struct Args {
#[arg(long)]
model: Option<String>,
#[arg(long)]
image: String,
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
#[arg(value_enum, long, default_value_t=Which::Tiny)]
which: Which,
}
pub fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = candle_examples::device(args.cpu)?;
let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?;
println!("loaded image {image:?}");
let model_file = match args.model {
None => {
let model_name = args.which.model_filename();
let api = hf_hub::api::sync::Api::new()?;
let api = api.model(model_name);
api.get("model.safetensors")?
}
Some(model) => model.into(),
};
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? };
let model = convnext::convnext(&args.which.config(), 1000, vb)?;
println!("model built");
let logits = model.forward(&image.unsqueeze(0)?)?;
let prs = candle_nn::ops::softmax(&logits, D::Minus1)?
.i(0)?
.to_vec1::<f32>()?;
let mut prs = prs.iter().enumerate().collect::<Vec<_>>();
prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1));
for &(category_idx, pr) in prs.iter().take(5) {
println!(
"{:24}: {:.2}%",
candle_examples::imagenet::CLASSES[category_idx],
100. * pr
);
}
Ok(())
}
| 6 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/convnext/README.md | # candle-convnext
[A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) and
[ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808).
This candle implementation uses a pre-trained ConvNeXt network for inference. The
classification head has been trained on the ImageNet dataset and returns the
probabilities for the top-5 classes.
## Running an example
```
$ cargo run --example convnext --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which tiny
loaded image Tensor[dims 3, 224, 224; f32]
model built
mountain bike, all-terrain bike, off-roader: 84.09%
bicycle-built-for-two, tandem bicycle, tandem: 4.15%
maillot : 0.74%
crash helmet : 0.54%
unicycle, monocycle : 0.44%
```
| 7 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/t5/main.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use std::io::Write;
use std::path::PathBuf;
use candle_transformers::models::t5;
use anyhow::{Error as E, Result};
use candle::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::generation::LogitsProcessor;
use clap::{Parser, ValueEnum};
use hf_hub::{api::sync::Api, Repo, RepoType};
use tokenizers::Tokenizer;
const DTYPE: DType = DType::F32;
#[derive(Clone, Debug, Copy, ValueEnum)]
enum Which {
T5Base,
T5Small,
T5Large,
T5_3B,
Mt5Base,
Mt5Small,
Mt5Large,
}
#[derive(Parser, Debug, Clone)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// The model repository to use on the HuggingFace hub.
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
revision: Option<String>,
#[arg(long)]
model_file: Option<String>,
#[arg(long)]
tokenizer_file: Option<String>,
#[arg(long)]
config_file: Option<String>,
/// Enable decoding.
#[arg(long)]
decode: bool,
// Enable/disable decoding.
#[arg(long, default_value = "false")]
disable_cache: bool,
/// Use this prompt, otherwise compute sentence similarities.
#[arg(long)]
prompt: Option<String>,
/// If set along with --decode, will use this prompt to initialize the decoder.
#[arg(long)]
decoder_prompt: Option<String>,
/// L2 normalization for embeddings.
#[arg(long, default_value = "true")]
normalize_embeddings: bool,
/// The temperature used to generate samples.
#[arg(long, default_value_t = 0.8)]
temperature: f64,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.1)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 64)]
repeat_last_n: usize,
/// The model to be used.
#[arg(long, default_value = "t5-small")]
which: Which,
}
struct T5ModelBuilder {
device: Device,
config: t5::Config,
weights_filename: Vec<PathBuf>,
}
impl T5ModelBuilder {
pub fn load(args: &Args) -> Result<(Self, Tokenizer)> {
let device = candle_examples::device(args.cpu)?;
let (default_model, default_revision) = match args.which {
Which::T5Base => ("t5-base", "main"),
Which::T5Small => ("t5-small", "refs/pr/15"),
Which::T5Large => ("t5-large", "main"),
Which::T5_3B => ("t5-3b", "main"),
Which::Mt5Base => ("google/mt5-base", "refs/pr/5"),
Which::Mt5Small => ("google/mt5-small", "refs/pr/6"),
Which::Mt5Large => ("google/mt5-large", "refs/pr/2"),
};
let default_model = default_model.to_string();
let default_revision = default_revision.to_string();
let (model_id, revision) = match (args.model_id.to_owned(), args.revision.to_owned()) {
(Some(model_id), Some(revision)) => (model_id, revision),
(Some(model_id), None) => (model_id, "main".to_string()),
(None, Some(revision)) => (default_model, revision),
(None, None) => (default_model, default_revision),
};
let repo = Repo::with_revision(model_id.clone(), RepoType::Model, revision);
let api = Api::new()?;
let repo = api.repo(repo);
let config_filename = match &args.config_file {
None => repo.get("config.json")?,
Some(f) => f.into(),
};
let tokenizer_filename = match &args.tokenizer_file {
None => match args.which {
Which::Mt5Base => api
.model("lmz/mt5-tokenizers".into())
.get("mt5-base.tokenizer.json")?,
Which::Mt5Small => api
.model("lmz/mt5-tokenizers".into())
.get("mt5-small.tokenizer.json")?,
Which::Mt5Large => api
.model("lmz/mt5-tokenizers".into())
.get("mt5-large.tokenizer.json")?,
_ => repo.get("tokenizer.json")?,
},
Some(f) => f.into(),
};
let weights_filename = match &args.model_file {
Some(f) => f.split(',').map(|v| v.into()).collect::<Vec<_>>(),
None => {
if model_id == "google/flan-t5-xxl" || model_id == "google/flan-ul2" {
candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?
} else {
vec![repo.get("model.safetensors")?]
}
}
};
let config = std::fs::read_to_string(config_filename)?;
let mut config: t5::Config = serde_json::from_str(&config)?;
config.use_cache = !args.disable_cache;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
Ok((
Self {
device,
config,
weights_filename,
},
tokenizer,
))
}
pub fn build_encoder(&self) -> Result<t5::T5EncoderModel> {
let vb = unsafe {
VarBuilder::from_mmaped_safetensors(&self.weights_filename, DTYPE, &self.device)?
};
Ok(t5::T5EncoderModel::load(vb, &self.config)?)
}
pub fn build_conditional_generation(&self) -> Result<t5::T5ForConditionalGeneration> {
let vb = unsafe {
VarBuilder::from_mmaped_safetensors(&self.weights_filename, DTYPE, &self.device)?
};
Ok(t5::T5ForConditionalGeneration::load(vb, &self.config)?)
}
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let (builder, mut tokenizer) = T5ModelBuilder::load(&args)?;
let device = &builder.device;
let tokenizer = tokenizer
.with_padding(None)
.with_truncation(None)
.map_err(E::msg)?;
match args.prompt {
Some(prompt) => {
let tokens = tokenizer
.encode(prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?;
if !args.decode {
let mut model = builder.build_encoder()?;
let start = std::time::Instant::now();
let ys = model.forward(&input_token_ids)?;
println!("{ys}");
println!("Took {:?}", start.elapsed());
} else {
let mut model = builder.build_conditional_generation()?;
let mut output_token_ids = [builder
.config
.decoder_start_token_id
.unwrap_or(builder.config.pad_token_id)
as u32]
.to_vec();
if let Some(decoder_prompt) = &args.decoder_prompt {
print!("{decoder_prompt}");
output_token_ids.extend(
tokenizer
.encode(decoder_prompt.to_string(), false)
.map_err(E::msg)?
.get_ids()
.to_vec(),
);
}
let temperature = if args.temperature <= 0. {
None
} else {
Some(args.temperature)
};
let mut logits_processor = LogitsProcessor::new(299792458, temperature, args.top_p);
let encoder_output = model.encode(&input_token_ids)?;
let start = std::time::Instant::now();
for index in 0.. {
if output_token_ids.len() > 512 {
break;
}
let decoder_token_ids = if index == 0 || !builder.config.use_cache {
Tensor::new(output_token_ids.as_slice(), device)?.unsqueeze(0)?
} else {
let last_token = *output_token_ids.last().unwrap();
Tensor::new(&[last_token], device)?.unsqueeze(0)?
};
let logits = model
.decode(&decoder_token_ids, &encoder_output)?
.squeeze(0)?;
let logits = if args.repeat_penalty == 1. {
logits
} else {
let start_at = output_token_ids.len().saturating_sub(args.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
args.repeat_penalty,
&output_token_ids[start_at..],
)?
};
let next_token_id = logits_processor.sample(&logits)?;
if next_token_id as usize == builder.config.eos_token_id {
break;
}
output_token_ids.push(next_token_id);
if let Some(text) = tokenizer.id_to_token(next_token_id) {
let text = text.replace('▁', " ").replace("<0x0A>", "\n");
print!("{text}");
std::io::stdout().flush()?;
}
}
let dt = start.elapsed();
println!(
"\n{} tokens generated ({:.2} token/s)\n",
output_token_ids.len(),
output_token_ids.len() as f64 / dt.as_secs_f64(),
);
}
}
None => {
let mut model = builder.build_encoder()?;
let sentences = [
"The cat sits outside",
"A man is playing guitar",
"I love pasta",
"The new movie is awesome",
"The cat plays in the garden",
"A woman watches TV",
"The new movie is so great",
"Do you like pizza?",
];
let n_sentences = sentences.len();
let mut all_embeddings = Vec::with_capacity(n_sentences);
for sentence in sentences {
let tokens = tokenizer
.encode(sentence, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let token_ids = Tensor::new(&tokens[..], model.device())?.unsqueeze(0)?;
let embeddings = model.forward(&token_ids)?;
println!("generated embeddings {:?}", embeddings.shape());
// Apply some avg-pooling by taking the mean embedding value for all tokens (including padding)
let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?;
let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?;
let embeddings = if args.normalize_embeddings {
normalize_l2(&embeddings)?
} else {
embeddings
};
println!("pooled embeddings {:?}", embeddings.shape());
all_embeddings.push(embeddings)
}
let mut similarities = vec![];
for (i, e_i) in all_embeddings.iter().enumerate() {
for (j, e_j) in all_embeddings
.iter()
.enumerate()
.take(n_sentences)
.skip(i + 1)
{
let sum_ij = (e_i * e_j)?.sum_all()?.to_scalar::<f32>()?;
let sum_i2 = (e_i * e_i)?.sum_all()?.to_scalar::<f32>()?;
let sum_j2 = (e_j * e_j)?.sum_all()?.to_scalar::<f32>()?;
let cosine_similarity = sum_ij / (sum_i2 * sum_j2).sqrt();
similarities.push((cosine_similarity, i, j))
}
}
similarities.sort_by(|u, v| v.0.total_cmp(&u.0));
for &(score, i, j) in similarities[..5].iter() {
println!("score: {score:.2} '{}' '{}'", sentences[i], sentences[j])
}
}
}
Ok(())
}
pub fn normalize_l2(v: &Tensor) -> Result<Tensor> {
Ok(v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)?)
}
| 8 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/t5/README.md | # candle-t5
## Encoder-decoder example:
```bash
$ cargo run --example t5 --release -- --model-id "t5-small" --prompt "translate to German: A beautiful candle." --decode
...
Eine schöne Kerze.
9 tokens generated (2.42 token/s)
```
Variants such as [flan-t5](https://huggingface.co/google/flan-t5-small), [flan-ul2](https://huggingface.co/google/flan-ul2) (with `--revision "refs/pr/25"`), and [Co-EdIT](https://huggingface.co/grammarly/coedit-large) are also supported.
## Translation with [MADLAD-400](https://arxiv.org/abs/2309.04662)
MADLAD-400 is a series of multilingual machine translation T5 models trained on 250 billion tokens covering over 450 languages using publicly available data. These models are competitive with significantly larger models.
```bash
cargo run --example t5 --release -- \
--model-id "jbochi/madlad400-3b-mt" \
--prompt "<2de> How are you, my friend?" \
--decode --temperature 0
...
Wie geht es dir, mein Freund?
```
## Sentence embedding example
```bash
$ cargo run --example t5 --release -- --model-id "t5-small" --prompt "A beautiful candle."
...
[[[ 0.0515, -0.0541, -0.0761, ..., -0.0392, 0.1511, -0.0265],
[-0.0974, 0.0998, -0.1659, ..., -0.2450, 0.1738, -0.0164],
[ 0.0624, -0.1024, 0.0430, ..., -0.1388, 0.0564, -0.2962],
[-0.0389, -0.1173, 0.0026, ..., 0.1064, -0.1065, 0.0990],
[ 0.1300, 0.0027, -0.0326, ..., 0.0026, -0.0317, 0.0851]]]
Tensor[[1, 5, 512], f32]
Took 303.766583ms
```
| 9 |
0 | hf_public_repos/accelerate/src/accelerate/test_utils/scripts | hf_public_repos/accelerate/src/accelerate/test_utils/scripts/external_deps/__init__.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 0 |
0 | hf_public_repos/accelerate/src/accelerate/test_utils/scripts | hf_public_repos/accelerate/src/accelerate/test_utils/scripts/external_deps/test_performance.py | # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from pathlib import Path
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"):
"""
Creates a set of `DataLoader`s for the `glue` dataset.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
model_name (`str`, *optional*):
"""
tokenizer = AutoTokenizer.from_pretrained(model_name)
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
tokenized_datasets = datasets.map(
tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.XLA:
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
return train_dataloader, eval_dataloader
def training_function(config, args):
# Initialize accelerator
accelerator = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
model_name = args.model_name_or_path
set_seed(seed)
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
# Instantiate optimizer
optimizer_cls = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
optimizer = optimizer_cls(params=model.parameters(), lr=lr)
max_training_steps = len(train_dataloader) * num_epochs
# Instantiate scheduler
linear_decay_scheduler = False
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=max_training_steps,
)
linear_decay_scheduler = True
else:
lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# We also need to keep track of the stating epoch so files are named properly
starting_epoch = 0
# Now we train the model
metric = evaluate.load("glue", "mrpc")
best_performance = 0
performance_metric = {}
expected_lr_after_first_optim_step = lr * (
1 - 1 / (max_training_steps / accelerator.num_processes / accelerator.gradient_accumulation_steps)
)
lr_scheduler_check_completed = False
for epoch in range(starting_epoch, num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(model):
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# assert the learning rate after first optimizer step
if (
accelerator.sync_gradients
and not lr_scheduler_check_completed
and linear_decay_scheduler
and accelerator.state.mixed_precision == "no"
):
assert (
lr_scheduler.get_last_lr()[0] == expected_lr_after_first_optim_step
), f"Wrong lr found at second step, expected {expected_lr_after_first_optim_step}, got {lr_scheduler.get_last_lr()[0]}"
lr_scheduler_check_completed = True
model.eval()
samples_seen = 0
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
predictions, references = accelerator.gather(
(predictions, batch["labels"])
) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(eval_dataloader) - 1:
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
references = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
performance_metric[f"epoch-{epoch}"] = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
best_performance = eval_metric["accuracy"]
# check that the LR is 0
if linear_decay_scheduler and accelerator.state.mixed_precision == "no":
assert (
lr_scheduler.get_last_lr()[0] == 0
), f"Wrong lr found at last step, expected 0, got {lr_scheduler.get_last_lr()[0]}"
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
json.dump(performance_metric, f)
# Finally try saving the model
accelerator.save_model(model, args.output_dir)
accelerator.wait_for_everyone()
assert Path(
args.output_dir, "model.safetensors"
).exists(), "Model was not saved when calling `Accelerator.save_model`"
accelerator.end_training()
def main():
parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
parser.add_argument(
"--model_name_or_path",
type=str,
default="bert-base-cased",
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=False,
)
parser.add_argument(
"--output_dir",
type=str,
default=".",
help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
)
parser.add_argument(
"--performance_lower_bound",
type=float,
default=None,
help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.",
)
parser.add_argument(
"--num_epochs",
type=int,
default=3,
help="Number of train epochs.",
)
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
| 1 |
0 | hf_public_repos/accelerate/src/accelerate/test_utils/scripts | hf_public_repos/accelerate/src/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.distributed
from accelerate.test_utils import require_huggingface_suite, torch_device
from accelerate.utils import is_transformers_available
if is_transformers_available():
from transformers import AutoModel, TrainingArguments
GPT2_TINY = "sshleifer/tiny-gpt2"
@require_huggingface_suite
def init_torch_dist_then_launch_deepspeed():
backend = "ccl" if torch_device == "xpu" else "nccl"
torch.distributed.init_process_group(backend=backend)
deepspeed_config = {
"zero_optimization": {
"stage": 3,
},
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
}
train_args = TrainingArguments(
output_dir="./",
deepspeed=deepspeed_config,
)
model = AutoModel.from_pretrained(GPT2_TINY)
assert train_args is not None
assert model is not None
def main():
init_torch_dist_then_launch_deepspeed()
if __name__ == "__main__":
main()
| 2 |
0 | hf_public_repos/accelerate/src/accelerate/test_utils/scripts | hf_public_repos/accelerate/src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py | # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import is_mlu_available, is_musa_available, is_npu_available, is_xpu_available
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
# Converting Bytes to Megabytes
def b2mb(x):
return int(x / 2**20)
# This context manager is used to track the peak memory usage of the process
class TorchTracemalloc:
def __enter__(self):
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
self.begin = torch.cuda.memory_allocated()
elif is_mlu_available():
torch.mlu.empty_cache()
torch.mlu.reset_max_memory_allocated() # reset the peak gauge to zero
self.begin = torch.mlu.memory_allocated()
elif is_musa_available():
torch.musa.empty_cache()
torch.musa.reset_max_memory_allocated() # reset the peak gauge to zero
self.begin = torch.musa.memory_allocated()
elif is_npu_available():
torch.npu.empty_cache()
torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero
self.begin = torch.npu.memory_allocated()
elif is_xpu_available():
torch.xpu.empty_cache()
torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero
self.begin = torch.xpu.memory_allocated()
return self
def __exit__(self, *exc):
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
self.end = torch.cuda.memory_allocated()
self.peak = torch.cuda.max_memory_allocated()
elif is_mlu_available():
torch.mlu.empty_cache()
torch.mlu.memory_allocated() # reset the peak gauge to zero
self.begin = torch.mlu.max_memory_allocated()
elif is_musa_available():
torch.musa.empty_cache()
torch.musa.memory_allocated() # reset the peak gauge to zero
self.begin = torch.musa.max_memory_allocated()
elif is_npu_available():
torch.npu.empty_cache()
self.end = torch.npu.memory_allocated()
self.peak = torch.npu.max_memory_allocated()
elif is_xpu_available():
torch.xpu.empty_cache()
self.end = torch.xpu.memory_allocated()
self.peak = torch.xpu.max_memory_allocated()
self.used = b2mb(self.end - self.begin)
self.peaked = b2mb(self.peak - self.begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def get_dataloaders(
accelerator: Accelerator,
batch_size: int = 16,
model_name: str = "bert-base-cased",
n_train: int = 320,
n_val: int = 160,
):
"""
Creates a set of `DataLoader`s for the `glue` dataset.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
model_name (`str`, *optional*):
The name of the model to use.
n_train (`int`, *optional*):
The number of training examples to use.
n_val (`int`, *optional*):
The number of validation examples to use.
"""
tokenizer = AutoTokenizer.from_pretrained(model_name)
datasets = load_dataset(
"glue", "mrpc", split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"}
)
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
tokenized_datasets = datasets.map(
tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.XLA:
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
return train_dataloader, eval_dataloader
def training_function(config, args):
# Initialize accelerator
accelerator = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
model_name = args.model_name_or_path
set_seed(seed)
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name, args.n_train, args.n_val)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
# Instantiate optimizer
optimizer_cls = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
optimizer = optimizer_cls(params=model.parameters(), lr=lr)
if accelerator.state.deepspeed_plugin is not None:
gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
gradient_accumulation_steps = 1
max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=max_training_steps,
)
else:
lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# We need to keep track of how many total steps we have iterated over
overall_step = 0
# We also need to keep track of the stating epoch so files are named properly
starting_epoch = 0
# Now we train the model
train_total_peak_memory = {}
for epoch in range(starting_epoch, num_epochs):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = loss / gradient_accumulation_steps
accelerator.backward(loss)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}")
accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}")
accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}")
accelerator.print(
f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
)
train_total_peak_memory[f"epoch-{epoch}"] = tracemalloc.peaked + b2mb(tracemalloc.begin)
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, "peak_memory_utilization.json"), "w") as f:
json.dump(train_total_peak_memory, f)
accelerator.end_training()
def main():
parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
parser.add_argument(
"--model_name_or_path",
type=str,
default="bert-base-cased",
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=False,
)
parser.add_argument(
"--output_dir",
type=str,
default=".",
help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
)
parser.add_argument(
"--peak_memory_upper_bound",
type=float,
default=None,
help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.",
)
parser.add_argument(
"--n_train",
type=int,
default=320,
help="Number of training examples to use.",
)
parser.add_argument(
"--n_val",
type=int,
default=160,
help="Number of validation examples to use.",
)
parser.add_argument(
"--num_epochs",
type=int,
default=1,
help="Number of train epochs.",
)
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
| 3 |
0 | hf_public_repos/accelerate | hf_public_repos/accelerate/examples/complete_cv_example.py | # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator, DataLoaderConfiguration
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a ResNet50 on the Oxford-IIT Pet Dataset
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
# Function to get the label from the filename
def extract_label(fname):
stem = fname.split(os.path.sep)[-1]
return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0]
class PetsDataset(Dataset):
def __init__(self, file_names, image_transform=None, label_to_id=None):
self.file_names = file_names
self.image_transform = image_transform
self.label_to_id = label_to_id
def __len__(self):
return len(self.file_names)
def __getitem__(self, idx):
fname = self.file_names[idx]
raw_image = PIL.Image.open(fname)
image = raw_image.convert("RGB")
if self.image_transform is not None:
image = self.image_transform(image)
label = extract_label(fname)
if self.label_to_id is not None:
label = self.label_to_id[label]
return {"image": image, "label": label}
def training_function(config, args):
# Initialize accelerator
dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=args.use_stateful_dataloader)
if args.with_tracking:
accelerator = Accelerator(
cpu=args.cpu,
mixed_precision=args.mixed_precision,
log_with="all",
project_dir=args.project_dir,
dataloader_config=dataloader_config,
)
else:
accelerator = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, dataloader_config=dataloader_config
)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
image_size = config["image_size"]
if not isinstance(image_size, (list, tuple)):
image_size = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit"):
if args.checkpointing_steps == "epoch":
checkpointing_steps = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
checkpointing_steps = int(args.checkpointing_steps)
else:
raise ValueError(
f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed."
)
else:
checkpointing_steps = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
run = os.path.split(__file__)[-1].split(".")[0]
accelerator.init_trackers(run, config)
# Grab all the image filenames
file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(".jpg")]
# Build the label correspondences
all_labels = [extract_label(fname) for fname in file_names]
id_to_label = list(set(all_labels))
id_to_label.sort()
label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)}
# Set the seed before splitting the data.
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Split our filenames between train and validation
random_perm = np.random.permutation(len(file_names))
cut = int(0.8 * len(file_names))
train_split = random_perm[:cut]
eval_split = random_perm[cut:]
# For training we use a simple RandomResizedCrop
train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()])
train_dataset = PetsDataset(
[file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id
)
# For evaluation, we use a deterministic Resize
eval_tfm = Compose([Resize(image_size), ToTensor()])
eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id)
# Instantiate dataloaders.
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)
eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id))
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Freezing the base model
for param in model.parameters():
param.requires_grad = False
for param in model.get_classifier().parameters():
param.requires_grad = True
# We normalize the batches of images to be a bit faster.
mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None].to(accelerator.device)
std = torch.tensor(model.default_cfg["std"])[None, :, None, None].to(accelerator.device)
# Instantiate optimizer
optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25)
# Instantiate learning rate scheduler
lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader))
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# We need to keep track of how many total steps we have iterated over
overall_step = 0
# We also need to keep track of the starting epoch so files are named properly
starting_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
training_difference = os.path.splitext(path)[0]
if "epoch" in training_difference:
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
resume_step = None
else:
resume_step = int(training_difference.replace("step_", ""))
starting_epoch = resume_step // len(train_dataloader)
resume_step -= starting_epoch * len(train_dataloader)
# Now we train the model
for epoch in range(starting_epoch, num_epochs):
model.train()
if args.with_tracking:
total_loss = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
active_dataloader = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch = {k: v.to(accelerator.device) for k, v in batch.items()}
inputs = (batch["image"] - mean) / std
outputs = model(inputs)
loss = torch.nn.functional.cross_entropy(outputs, batch["label"])
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(checkpointing_steps, int):
output_dir = f"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
model.eval()
accurate = 0
num_elems = 0
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch = {k: v.to(accelerator.device) for k, v in batch.items()}
inputs = (batch["image"] - mean) / std
with torch.no_grad():
outputs = model(inputs)
predictions = outputs.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["label"]))
accurate_preds = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
eval_metric = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}")
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(train_dataloader),
"epoch": epoch,
},
step=overall_step,
)
if checkpointing_steps == "epoch":
output_dir = f"epoch_{epoch}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
accelerator.end_training()
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument("--data_dir", required=True, help="The data folder on disk.")
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--output_dir",
type=str,
default=".",
help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--use_stateful_dataloader",
action="store_true",
help="If the dataloader should be a resumable stateful dataloader.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to load in all available experiment trackers from the environment and use them for logging.",
)
parser.add_argument(
"--project_dir",
type=str,
default="logs",
help="Location on where to store experiment tracking logs` and relevent project information",
)
args = parser.parse_args()
config = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(config, args)
if __name__ == "__main__":
main()
| 4 |
0 | hf_public_repos/accelerate | hf_public_repos/accelerate/examples/nlp_example.py | # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16):
"""
Creates a set of `DataLoader`s for the `glue` dataset,
using "bert-base-cased" as the tokenizer.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
"""
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# For Torchxla, it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
return tokenizer.pad(
examples,
padding="longest",
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size, drop_last=True
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"],
shuffle=False,
collate_fn=collate_fn,
batch_size=EVAL_BATCH_SIZE,
drop_last=(accelerator.mixed_precision == "fp8"),
)
return train_dataloader, eval_dataloader
def training_function(config, args):
# Initialize accelerator
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
metric = evaluate.load("glue", "mrpc")
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE
set_seed(seed)
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=lr)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Now we train the model
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
outputs = model(**batch)
loss = outputs.loss
loss = loss / gradient_accumulation_steps
accelerator.backward(loss)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
accelerator.end_training()
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
| 5 |
0 | hf_public_repos/accelerate | hf_public_repos/accelerate/examples/cv_example.py | # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a ResNet50 on the Oxford-IIT Pet Dataset
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
# Function to get the label from the filename
def extract_label(fname):
stem = fname.split(os.path.sep)[-1]
return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0]
class PetsDataset(Dataset):
def __init__(self, file_names, image_transform=None, label_to_id=None):
self.file_names = file_names
self.image_transform = image_transform
self.label_to_id = label_to_id
def __len__(self):
return len(self.file_names)
def __getitem__(self, idx):
fname = self.file_names[idx]
raw_image = PIL.Image.open(fname)
image = raw_image.convert("RGB")
if self.image_transform is not None:
image = self.image_transform(image)
label = extract_label(fname)
if self.label_to_id is not None:
label = self.label_to_id[label]
return {"image": image, "label": label}
def training_function(config, args):
# Initialize accelerator
accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
image_size = config["image_size"]
if not isinstance(image_size, (list, tuple)):
image_size = (image_size, image_size)
# Grab all the image filenames
file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(".jpg")]
# Build the label correspondences
all_labels = [extract_label(fname) for fname in file_names]
id_to_label = list(set(all_labels))
id_to_label.sort()
label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)}
# Set the seed before splitting the data.
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Split our filenames between train and validation
random_perm = np.random.permutation(len(file_names))
cut = int(0.8 * len(file_names))
train_split = random_perm[:cut]
eval_split = random_perm[cut:]
# For training we use a simple RandomResizedCrop
train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()])
train_dataset = PetsDataset(
[file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id
)
# For evaluation, we use a deterministic Resize
eval_tfm = Compose([Resize(image_size), ToTensor()])
eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id)
# Instantiate dataloaders.
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)
eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id))
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Freezing the base model
for param in model.parameters():
param.requires_grad = False
for param in model.get_classifier().parameters():
param.requires_grad = True
# We normalize the batches of images to be a bit faster.
mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None].to(accelerator.device)
std = torch.tensor(model.default_cfg["std"])[None, :, None, None].to(accelerator.device)
# Instantiate optimizer
optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25)
# Instantiate learning rate scheduler
lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader))
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Now we train the model
for epoch in range(num_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch = {k: v.to(accelerator.device) for k, v in batch.items()}
inputs = (batch["image"] - mean) / std
outputs = model(inputs)
loss = torch.nn.functional.cross_entropy(outputs, batch["label"])
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
accurate = 0
num_elems = 0
for _, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch = {k: v.to(accelerator.device) for k, v in batch.items()}
inputs = (batch["image"] - mean) / std
with torch.no_grad():
outputs = model(inputs)
predictions = outputs.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["label"]))
accurate_preds = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
eval_metric = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}")
accelerator.end_training()
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument("--data_dir", required=True, help="The data folder on disk.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
args = parser.parse_args()
config = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(config, args)
if __name__ == "__main__":
main()
| 6 |
0 | hf_public_repos/accelerate | hf_public_repos/accelerate/examples/requirements.txt | accelerate # used to be installed in Amazon SageMaker environment
evaluate
datasets==2.3.2
schedulefree
huggingface_hub>=0.20.0
| 7 |
0 | hf_public_repos/accelerate | hf_public_repos/accelerate/examples/multigpu_remote_launcher.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import runhouse as rh
import torch
from nlp_example import training_function
from accelerate.utils import PrepareForLaunch, patch_environment
def launch_train(*args):
num_processes = torch.cuda.device_count()
print(f"Device count: {num_processes}")
with patch_environment(
world_size=num_processes, master_addr="127.0.0.1", master_port="29500", mixed_precision=args[1].mixed_precision
):
launcher = PrepareForLaunch(training_function, distributed_type="MULTI_GPU")
torch.multiprocessing.start_processes(launcher, args=args, nprocs=num_processes, start_method="spawn")
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/main/rh_primitives/cluster.html#hardware-setup
# for cloud access setup instructions (if using on-demand hardware), and for API specifications.
# on-demand GPU
# gpu = rh.cluster(name='rh-cluster', instance_type='V100:1', provider='cheapest', use_spot=False) # single GPU
gpu = rh.cluster(name="rh-cluster", instance_type="V100:4", provider="cheapest", use_spot=False) # multi GPU
gpu.up_if_not()
# on-prem GPU
# gpu = rh.cluster(
# ips=["ip_addr"], ssh_creds={ssh_user:"<username>", ssh_private_key:"<key_path>"}, name="rh-cluster"
# )
# Set up remote function
reqs = [
"pip:./",
"transformers",
"datasets",
"evaluate",
"tqdm",
"scipy",
"scikit-learn",
"tensorboard",
"torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117",
]
launch_train_gpu = rh.function(fn=launch_train, system=gpu, reqs=reqs, name="train_bert_glue")
# Define train args/config, run train function
train_args = argparse.Namespace(cpu=False, mixed_precision="fp16")
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
launch_train_gpu(config, train_args, stream_logs=True)
# Alternatively, we can just run as instructed in the README (but only because there's already a wrapper CLI):
# gpu.install_packages(reqs)
# gpu.run(['accelerate launch --multi_gpu accelerate/examples/nlp_example.py'])
| 8 |
0 | hf_public_repos/accelerate | hf_public_repos/accelerate/examples/complete_nlp_example.py | # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DataLoaderConfiguration, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# This example also demonstrates the checkpointing and sharding capabilities
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
def training_function(config, args):
# Initialize accelerator
dataloader_config = DataLoaderConfiguration(use_stateful_dataloader=args.use_stateful_dataloader)
if args.with_tracking:
accelerator = Accelerator(
cpu=args.cpu,
mixed_precision=args.mixed_precision,
dataloader_config=dataloader_config,
log_with="all",
project_dir=args.project_dir,
)
else:
accelerator = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, dataloader_config=dataloader_config
)
if hasattr(args.checkpointing_steps, "isdigit"):
if args.checkpointing_steps == "epoch":
checkpointing_steps = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
checkpointing_steps = int(args.checkpointing_steps)
else:
raise ValueError(
f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed."
)
else:
checkpointing_steps = None
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lr = config["lr"]
num_epochs = int(config["num_epochs"])
seed = int(config["seed"])
batch_size = int(config["batch_size"])
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
run = os.path.split(__file__)[-1].split(".")[0]
accelerator.init_trackers(run, config)
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
datasets = load_dataset("glue", "mrpc")
metric = evaluate.load("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=["idx", "sentence1", "sentence2"],
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
# If the batch size is too big we use gradient accumulation
gradient_accumulation_steps = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA:
gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE
batch_size = MAX_GPU_BATCH_SIZE
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
pad_to_multiple_of = 16
elif accelerator.mixed_precision != "no":
pad_to_multiple_of = 8
else:
pad_to_multiple_of = None
return tokenizer.pad(
examples,
padding="longest",
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
set_seed(seed)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
model = model.to(accelerator.device)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=lr)
# Instantiate scheduler
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=100,
num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps,
)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# We need to keep track of how many total steps we have iterated over
overall_step = 0
# We also need to keep track of the stating epoch so files are named properly
starting_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
training_difference = os.path.splitext(path)[0]
if "epoch" in training_difference:
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
resume_step = None
else:
resume_step = int(training_difference.replace("step_", ""))
starting_epoch = resume_step // len(train_dataloader)
resume_step -= starting_epoch * len(train_dataloader)
# Now we train the model
for epoch in range(starting_epoch, num_epochs):
model.train()
if args.with_tracking:
total_loss = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
if not args.use_stateful_dataloader:
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
else:
active_dataloader = train_dataloader
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
active_dataloader = train_dataloader
for step, batch in enumerate(active_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
outputs = model(**batch)
loss = outputs.loss
loss = loss / gradient_accumulation_steps
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(loss)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(checkpointing_steps, int):
output_dir = f"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
model.eval()
for step, batch in enumerate(eval_dataloader):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(train_dataloader),
"epoch": epoch,
},
step=epoch,
)
if checkpointing_steps == "epoch":
output_dir = f"epoch_{epoch}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
accelerator.end_training()
def main():
parser = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16", "fp8"],
help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.",
)
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.")
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--use_stateful_dataloader",
action="store_true",
help="If the dataloader should be a resumable stateful dataloader.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to load in all available experiment trackers from the environment and use them for logging.",
)
parser.add_argument(
"--output_dir",
type=str,
default=".",
help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
)
parser.add_argument(
"--project_dir",
type=str,
default="logs",
help="Location on where to store experiment tracking logs` and relevent project information",
)
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(config, args)
if __name__ == "__main__":
main()
| 9 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/bert.rs | //! BERT (Bidirectional Encoder Representations from Transformers)
//!
//! Bert is a general large language model that can be used for various language tasks:
//! - Compute sentence embeddings for a prompt.
//! - Compute similarities between a set of sentences.
//! - [Arxiv](https://arxiv.org/abs/1810.04805) "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding"
//! - Upstream [Github repo](https://github.com/google-research/bert).
//! - See bert in [candle-examples](https://github.com/huggingface/candle/tree/main/candle-examples/) for runnable code
//!
use super::with_tracing::{layer_norm, linear, LayerNorm, Linear};
use candle::{DType, Device, Result, Tensor};
use candle_nn::{embedding, Embedding, Module, VarBuilder};
use serde::Deserialize;
pub const DTYPE: DType = DType::F32;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HiddenAct {
Gelu,
GeluApproximate,
Relu,
}
struct HiddenActLayer {
act: HiddenAct,
span: tracing::Span,
}
impl HiddenActLayer {
fn new(act: HiddenAct) -> Self {
let span = tracing::span!(tracing::Level::TRACE, "hidden-act");
Self { act, span }
}
fn forward(&self, xs: &Tensor) -> candle::Result<Tensor> {
let _enter = self.span.enter();
match self.act {
// https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/activations.py#L213
HiddenAct::Gelu => xs.gelu_erf(),
HiddenAct::GeluApproximate => xs.gelu(),
HiddenAct::Relu => xs.relu(),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Default)]
#[serde(rename_all = "lowercase")]
enum PositionEmbeddingType {
#[default]
Absolute,
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/configuration_bert.py#L1
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
vocab_size: usize,
hidden_size: usize,
num_hidden_layers: usize,
num_attention_heads: usize,
intermediate_size: usize,
pub hidden_act: HiddenAct,
hidden_dropout_prob: f64,
max_position_embeddings: usize,
type_vocab_size: usize,
initializer_range: f64,
layer_norm_eps: f64,
pad_token_id: usize,
#[serde(default)]
position_embedding_type: PositionEmbeddingType,
#[serde(default)]
use_cache: bool,
classifier_dropout: Option<f64>,
model_type: Option<String>,
}
impl Default for Config {
fn default() -> Self {
Self {
vocab_size: 30522,
hidden_size: 768,
num_hidden_layers: 12,
num_attention_heads: 12,
intermediate_size: 3072,
hidden_act: HiddenAct::Gelu,
hidden_dropout_prob: 0.1,
max_position_embeddings: 512,
type_vocab_size: 2,
initializer_range: 0.02,
layer_norm_eps: 1e-12,
pad_token_id: 0,
position_embedding_type: PositionEmbeddingType::Absolute,
use_cache: true,
classifier_dropout: None,
model_type: Some("bert".to_string()),
}
}
}
impl Config {
fn _all_mini_lm_l6_v2() -> Self {
// https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/blob/main/config.json
Self {
vocab_size: 30522,
hidden_size: 384,
num_hidden_layers: 6,
num_attention_heads: 12,
intermediate_size: 1536,
hidden_act: HiddenAct::Gelu,
hidden_dropout_prob: 0.1,
max_position_embeddings: 512,
type_vocab_size: 2,
initializer_range: 0.02,
layer_norm_eps: 1e-12,
pad_token_id: 0,
position_embedding_type: PositionEmbeddingType::Absolute,
use_cache: true,
classifier_dropout: None,
model_type: Some("bert".to_string()),
}
}
}
struct Dropout {
#[allow(dead_code)]
pr: f64,
}
impl Dropout {
fn new(pr: f64) -> Self {
Self { pr }
}
}
impl Module for Dropout {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
// TODO
Ok(x.clone())
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L180
struct BertEmbeddings {
word_embeddings: Embedding,
position_embeddings: Option<Embedding>,
token_type_embeddings: Embedding,
layer_norm: LayerNorm,
dropout: Dropout,
span: tracing::Span,
}
impl BertEmbeddings {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let word_embeddings = embedding(
config.vocab_size,
config.hidden_size,
vb.pp("word_embeddings"),
)?;
let position_embeddings = embedding(
config.max_position_embeddings,
config.hidden_size,
vb.pp("position_embeddings"),
)?;
let token_type_embeddings = embedding(
config.type_vocab_size,
config.hidden_size,
vb.pp("token_type_embeddings"),
)?;
let layer_norm = layer_norm(
config.hidden_size,
config.layer_norm_eps,
vb.pp("LayerNorm"),
)?;
Ok(Self {
word_embeddings,
position_embeddings: Some(position_embeddings),
token_type_embeddings,
layer_norm,
dropout: Dropout::new(config.hidden_dropout_prob),
span: tracing::span!(tracing::Level::TRACE, "embeddings"),
})
}
fn forward(&self, input_ids: &Tensor, token_type_ids: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (_bsize, seq_len) = input_ids.dims2()?;
let input_embeddings = self.word_embeddings.forward(input_ids)?;
let token_type_embeddings = self.token_type_embeddings.forward(token_type_ids)?;
let mut embeddings = (&input_embeddings + token_type_embeddings)?;
if let Some(position_embeddings) = &self.position_embeddings {
// TODO: Proper absolute positions?
let position_ids = (0..seq_len as u32).collect::<Vec<_>>();
let position_ids = Tensor::new(&position_ids[..], input_ids.device())?;
embeddings = embeddings.broadcast_add(&position_embeddings.forward(&position_ids)?)?
}
let embeddings = self.layer_norm.forward(&embeddings)?;
let embeddings = self.dropout.forward(&embeddings)?;
Ok(embeddings)
}
}
struct BertSelfAttention {
query: Linear,
key: Linear,
value: Linear,
dropout: Dropout,
num_attention_heads: usize,
attention_head_size: usize,
span: tracing::Span,
span_softmax: tracing::Span,
}
impl BertSelfAttention {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let attention_head_size = config.hidden_size / config.num_attention_heads;
let all_head_size = config.num_attention_heads * attention_head_size;
let dropout = Dropout::new(config.hidden_dropout_prob);
let hidden_size = config.hidden_size;
let query = linear(hidden_size, all_head_size, vb.pp("query"))?;
let value = linear(hidden_size, all_head_size, vb.pp("value"))?;
let key = linear(hidden_size, all_head_size, vb.pp("key"))?;
Ok(Self {
query,
key,
value,
dropout,
num_attention_heads: config.num_attention_heads,
attention_head_size,
span: tracing::span!(tracing::Level::TRACE, "self-attn"),
span_softmax: tracing::span!(tracing::Level::TRACE, "softmax"),
})
}
fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> {
let mut new_x_shape = xs.dims().to_vec();
new_x_shape.pop();
new_x_shape.push(self.num_attention_heads);
new_x_shape.push(self.attention_head_size);
let xs = xs.reshape(new_x_shape.as_slice())?.transpose(1, 2)?;
xs.contiguous()
}
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let query_layer = self.query.forward(hidden_states)?;
let key_layer = self.key.forward(hidden_states)?;
let value_layer = self.value.forward(hidden_states)?;
let query_layer = self.transpose_for_scores(&query_layer)?;
let key_layer = self.transpose_for_scores(&key_layer)?;
let value_layer = self.transpose_for_scores(&value_layer)?;
let attention_scores = query_layer.matmul(&key_layer.t()?)?;
let attention_scores = (attention_scores / (self.attention_head_size as f64).sqrt())?;
let attention_scores = attention_scores.broadcast_add(attention_mask)?;
let attention_probs = {
let _enter_sm = self.span_softmax.enter();
candle_nn::ops::softmax(&attention_scores, candle::D::Minus1)?
};
let attention_probs = self.dropout.forward(&attention_probs)?;
let context_layer = attention_probs.matmul(&value_layer)?;
let context_layer = context_layer.transpose(1, 2)?.contiguous()?;
let context_layer = context_layer.flatten_from(candle::D::Minus2)?;
Ok(context_layer)
}
}
struct BertSelfOutput {
dense: Linear,
layer_norm: LayerNorm,
dropout: Dropout,
span: tracing::Span,
}
impl BertSelfOutput {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let dense = linear(config.hidden_size, config.hidden_size, vb.pp("dense"))?;
let layer_norm = layer_norm(
config.hidden_size,
config.layer_norm_eps,
vb.pp("LayerNorm"),
)?;
let dropout = Dropout::new(config.hidden_dropout_prob);
Ok(Self {
dense,
layer_norm,
dropout,
span: tracing::span!(tracing::Level::TRACE, "self-out"),
})
}
fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hidden_states = self.dense.forward(hidden_states)?;
let hidden_states = self.dropout.forward(&hidden_states)?;
self.layer_norm.forward(&(hidden_states + input_tensor)?)
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L392
struct BertAttention {
self_attention: BertSelfAttention,
self_output: BertSelfOutput,
span: tracing::Span,
}
impl BertAttention {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let self_attention = BertSelfAttention::load(vb.pp("self"), config)?;
let self_output = BertSelfOutput::load(vb.pp("output"), config)?;
Ok(Self {
self_attention,
self_output,
span: tracing::span!(tracing::Level::TRACE, "attn"),
})
}
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let self_outputs = self.self_attention.forward(hidden_states, attention_mask)?;
let attention_output = self.self_output.forward(&self_outputs, hidden_states)?;
Ok(attention_output)
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L441
struct BertIntermediate {
dense: Linear,
intermediate_act: HiddenActLayer,
span: tracing::Span,
}
impl BertIntermediate {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let dense = linear(config.hidden_size, config.intermediate_size, vb.pp("dense"))?;
Ok(Self {
dense,
intermediate_act: HiddenActLayer::new(config.hidden_act),
span: tracing::span!(tracing::Level::TRACE, "inter"),
})
}
}
impl Module for BertIntermediate {
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hidden_states = self.dense.forward(hidden_states)?;
let ys = self.intermediate_act.forward(&hidden_states)?;
Ok(ys)
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L456
struct BertOutput {
dense: Linear,
layer_norm: LayerNorm,
dropout: Dropout,
span: tracing::Span,
}
impl BertOutput {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let dense = linear(config.intermediate_size, config.hidden_size, vb.pp("dense"))?;
let layer_norm = layer_norm(
config.hidden_size,
config.layer_norm_eps,
vb.pp("LayerNorm"),
)?;
let dropout = Dropout::new(config.hidden_dropout_prob);
Ok(Self {
dense,
layer_norm,
dropout,
span: tracing::span!(tracing::Level::TRACE, "out"),
})
}
fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hidden_states = self.dense.forward(hidden_states)?;
let hidden_states = self.dropout.forward(&hidden_states)?;
self.layer_norm.forward(&(hidden_states + input_tensor)?)
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L470
struct BertLayer {
attention: BertAttention,
intermediate: BertIntermediate,
output: BertOutput,
span: tracing::Span,
}
impl BertLayer {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let attention = BertAttention::load(vb.pp("attention"), config)?;
let intermediate = BertIntermediate::load(vb.pp("intermediate"), config)?;
let output = BertOutput::load(vb.pp("output"), config)?;
Ok(Self {
attention,
intermediate,
output,
span: tracing::span!(tracing::Level::TRACE, "layer"),
})
}
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let attention_output = self.attention.forward(hidden_states, attention_mask)?;
// TODO: Support cross-attention?
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523
// TODO: Support something similar to `apply_chunking_to_forward`?
let intermediate_output = self.intermediate.forward(&attention_output)?;
let layer_output = self
.output
.forward(&intermediate_output, &attention_output)?;
Ok(layer_output)
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L556
struct BertEncoder {
layers: Vec<BertLayer>,
span: tracing::Span,
}
impl BertEncoder {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let layers = (0..config.num_hidden_layers)
.map(|index| BertLayer::load(vb.pp(format!("layer.{index}")), config))
.collect::<Result<Vec<_>>>()?;
let span = tracing::span!(tracing::Level::TRACE, "encoder");
Ok(BertEncoder { layers, span })
}
fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let mut hidden_states = hidden_states.clone();
// Use a loop rather than a fold as it's easier to modify when adding debug/...
for layer in self.layers.iter() {
hidden_states = layer.forward(&hidden_states, attention_mask)?
}
Ok(hidden_states)
}
}
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L874
pub struct BertModel {
embeddings: BertEmbeddings,
encoder: BertEncoder,
pub device: Device,
span: tracing::Span,
}
impl BertModel {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let (embeddings, encoder) = match (
BertEmbeddings::load(vb.pp("embeddings"), config),
BertEncoder::load(vb.pp("encoder"), config),
) {
(Ok(embeddings), Ok(encoder)) => (embeddings, encoder),
(Err(err), _) | (_, Err(err)) => {
if let Some(model_type) = &config.model_type {
if let (Ok(embeddings), Ok(encoder)) = (
BertEmbeddings::load(vb.pp(format!("{model_type}.embeddings")), config),
BertEncoder::load(vb.pp(format!("{model_type}.encoder")), config),
) {
(embeddings, encoder)
} else {
return Err(err);
}
} else {
return Err(err);
}
}
};
Ok(Self {
embeddings,
encoder,
device: vb.device().clone(),
span: tracing::span!(tracing::Level::TRACE, "model"),
})
}
pub fn forward(
&self,
input_ids: &Tensor,
token_type_ids: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let _enter = self.span.enter();
let embedding_output = self.embeddings.forward(input_ids, token_type_ids)?;
let attention_mask = match attention_mask {
Some(attention_mask) => attention_mask.clone(),
None => input_ids.ones_like()?,
};
// https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L995
let attention_mask = get_extended_attention_mask(&attention_mask, DType::F32)?;
let sequence_output = self.encoder.forward(&embedding_output, &attention_mask)?;
Ok(sequence_output)
}
}
fn get_extended_attention_mask(attention_mask: &Tensor, dtype: DType) -> Result<Tensor> {
let attention_mask = match attention_mask.rank() {
3 => attention_mask.unsqueeze(1)?,
2 => attention_mask.unsqueeze(1)?.unsqueeze(1)?,
_ => candle::bail!("Wrong shape for input_ids or attention_mask"),
};
let attention_mask = attention_mask.to_dtype(dtype)?;
// torch.finfo(dtype).min
(attention_mask.ones_like()? - &attention_mask)?
.broadcast_mul(&Tensor::try_from(f32::MIN)?.to_device(attention_mask.device())?)
}
//https://github.com/huggingface/transformers/blob/1bd604d11c405dfb8b78bda4062d88fc75c17de0/src/transformers/models/bert/modeling_bert.py#L752-L766
struct BertPredictionHeadTransform {
dense: Linear,
activation: HiddenActLayer,
layer_norm: LayerNorm,
}
impl BertPredictionHeadTransform {
fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let dense = linear(config.hidden_size, config.hidden_size, vb.pp("dense"))?;
let activation = HiddenActLayer::new(config.hidden_act);
let layer_norm = layer_norm(
config.hidden_size,
config.layer_norm_eps,
vb.pp("LayerNorm"),
)?;
Ok(Self {
dense,
activation,
layer_norm,
})
}
}
impl Module for BertPredictionHeadTransform {
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
let hidden_states = self
.activation
.forward(&self.dense.forward(hidden_states)?)?;
self.layer_norm.forward(&hidden_states)
}
}
// https://github.com/huggingface/transformers/blob/1bd604d11c405dfb8b78bda4062d88fc75c17de0/src/transformers/models/bert/modeling_bert.py#L769C1-L790C1
pub struct BertLMPredictionHead {
transform: BertPredictionHeadTransform,
decoder: Linear,
}
impl BertLMPredictionHead {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let transform = BertPredictionHeadTransform::load(vb.pp("transform"), config)?;
let decoder = linear(config.hidden_size, config.vocab_size, vb.pp("decoder"))?;
Ok(Self { transform, decoder })
}
}
impl Module for BertLMPredictionHead {
fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> {
self.decoder
.forward(&self.transform.forward(hidden_states)?)
}
}
// https://github.com/huggingface/transformers/blob/1bd604d11c405dfb8b78bda4062d88fc75c17de0/src/transformers/models/bert/modeling_bert.py#L792
pub struct BertOnlyMLMHead {
predictions: BertLMPredictionHead,
}
impl BertOnlyMLMHead {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let predictions = BertLMPredictionHead::load(vb.pp("predictions"), config)?;
Ok(Self { predictions })
}
}
impl Module for BertOnlyMLMHead {
fn forward(&self, sequence_output: &Tensor) -> Result<Tensor> {
self.predictions.forward(sequence_output)
}
}
pub struct BertForMaskedLM {
bert: BertModel,
cls: BertOnlyMLMHead,
}
impl BertForMaskedLM {
pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> {
let bert = BertModel::load(vb.pp("bert"), config)?;
let cls = BertOnlyMLMHead::load(vb.pp("cls"), config)?;
Ok(Self { bert, cls })
}
pub fn forward(
&self,
input_ids: &Tensor,
token_type_ids: &Tensor,
attention_mask: Option<&Tensor>,
) -> Result<Tensor> {
let sequence_output = self
.bert
.forward(input_ids, token_type_ids, attention_mask)?;
self.cls.forward(&sequence_output)
}
}
| 0 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/llama.rs | //! Llama inference implementation.
//!
//! See ["LLaMA: Open and Efficient Foundation Language Models"](https://arxiv.org/abs/2302.13971)
//!
//! Implementation based on Hugging Face's [transformers](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py)
use super::with_tracing::{linear_no_bias as linear, Linear, RmsNorm};
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{embedding, Embedding, Module, VarBuilder};
use std::{collections::HashMap, f32::consts::PI};
pub const DEFAULT_MAX_SEQ_LEN: usize = 4096;
#[derive(Debug, Clone, serde::Deserialize, Default)]
pub enum Llama3RopeType {
#[serde(rename = "llama3")]
Llama3,
#[default]
#[serde(rename = "default")]
Default,
}
#[derive(Debug, Clone, serde::Deserialize, Default)]
pub struct Llama3RopeConfig {
pub factor: f32,
pub low_freq_factor: f32,
pub high_freq_factor: f32,
pub original_max_position_embeddings: usize,
pub rope_type: Llama3RopeType,
}
#[derive(Debug, Clone, serde::Deserialize)]
#[serde(untagged)]
pub enum LlamaEosToks {
Single(u32),
Multiple(Vec<u32>),
}
#[derive(Debug, Clone, serde::Deserialize)]
pub struct LlamaConfig {
pub hidden_size: usize,
pub intermediate_size: usize,
pub vocab_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: Option<usize>,
pub rms_norm_eps: f64,
#[serde(default = "default_rope")]
pub rope_theta: f32,
pub bos_token_id: Option<u32>,
pub eos_token_id: Option<LlamaEosToks>,
pub rope_scaling: Option<Llama3RopeConfig>,
pub max_position_embeddings: usize,
pub tie_word_embeddings: Option<bool>,
}
impl LlamaConfig {
pub fn num_key_value_heads(&self) -> usize {
self.num_key_value_heads.unwrap_or(self.num_attention_heads)
}
}
fn default_rope() -> f32 {
10_000.0
}
impl LlamaConfig {
pub fn into_config(self, use_flash_attn: bool) -> Config {
Config {
hidden_size: self.hidden_size,
intermediate_size: self.intermediate_size,
vocab_size: self.vocab_size,
num_hidden_layers: self.num_hidden_layers,
num_attention_heads: self.num_attention_heads,
num_key_value_heads: self.num_key_value_heads(),
rms_norm_eps: self.rms_norm_eps,
rope_theta: self.rope_theta,
use_flash_attn,
bos_token_id: self.bos_token_id,
eos_token_id: self.eos_token_id,
rope_scaling: self.rope_scaling,
max_position_embeddings: self.max_position_embeddings,
tie_word_embeddings: self.tie_word_embeddings.unwrap_or(false),
}
}
}
#[derive(Debug, Clone)]
pub struct Config {
pub hidden_size: usize,
pub intermediate_size: usize,
pub vocab_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub use_flash_attn: bool,
pub rms_norm_eps: f64,
pub rope_theta: f32,
pub bos_token_id: Option<u32>,
pub eos_token_id: Option<LlamaEosToks>,
pub rope_scaling: Option<Llama3RopeConfig>,
pub max_position_embeddings: usize,
pub tie_word_embeddings: bool,
}
impl Config {
pub fn config_7b_v1(use_flash_attn: bool) -> Self {
Self {
hidden_size: 4096,
intermediate_size: 11008,
vocab_size: 32000,
num_hidden_layers: 32,
num_attention_heads: 32,
num_key_value_heads: 32,
use_flash_attn,
rms_norm_eps: 1e-6,
rope_theta: 10_000.0,
bos_token_id: None,
eos_token_id: None,
rope_scaling: None,
max_position_embeddings: DEFAULT_MAX_SEQ_LEN,
tie_word_embeddings: false,
}
}
pub fn config_7b_v2(use_flash_attn: bool) -> Self {
Self {
hidden_size: 4096,
intermediate_size: 11008,
vocab_size: 32000,
num_hidden_layers: 32,
num_attention_heads: 32,
num_key_value_heads: 32,
use_flash_attn,
rms_norm_eps: 1e-5,
rope_theta: 10_000.0,
bos_token_id: None,
eos_token_id: None,
rope_scaling: None,
max_position_embeddings: DEFAULT_MAX_SEQ_LEN,
tie_word_embeddings: false,
}
}
}
#[derive(Debug, Clone)]
pub struct Cache {
masks: HashMap<usize, Tensor>,
pub use_kv_cache: bool,
kvs: Vec<Option<(Tensor, Tensor)>>,
cos: Tensor,
sin: Tensor,
device: Device,
}
fn calculate_default_inv_freq(cfg: &Config) -> Vec<f32> {
let head_dim = cfg.hidden_size / cfg.num_attention_heads;
(0..head_dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f32 / head_dim as f32))
.collect()
}
impl Cache {
pub fn new(use_kv_cache: bool, dtype: DType, config: &Config, device: &Device) -> Result<Self> {
// precompute freqs_cis
let theta = match &config.rope_scaling {
None
| Some(Llama3RopeConfig {
rope_type: Llama3RopeType::Default,
..
}) => calculate_default_inv_freq(config),
Some(rope_scaling) => {
let low_freq_wavelen = rope_scaling.original_max_position_embeddings as f32
/ rope_scaling.low_freq_factor;
let high_freq_wavelen = rope_scaling.original_max_position_embeddings as f32
/ rope_scaling.high_freq_factor;
calculate_default_inv_freq(config)
.into_iter()
.map(|freq| {
let wavelen = 2. * PI / freq;
if wavelen < high_freq_wavelen {
freq
} else if wavelen > low_freq_wavelen {
freq / rope_scaling.factor
} else {
let smooth = (rope_scaling.original_max_position_embeddings as f32
/ wavelen
- rope_scaling.low_freq_factor)
/ (rope_scaling.high_freq_factor - rope_scaling.low_freq_factor);
(1. - smooth) * freq / rope_scaling.factor + smooth * freq
}
})
.collect::<Vec<_>>()
}
};
let theta = Tensor::new(theta, device)?;
let idx_theta = Tensor::arange(0, config.max_position_embeddings as u32, device)?
.to_dtype(DType::F32)?
.reshape((config.max_position_embeddings, 1))?
.matmul(&theta.reshape((1, theta.elem_count()))?)?;
// This is different from the paper, see:
// https://github.com/huggingface/transformers/blob/6112b1c6442aaf7affd2b0676a1cd4eee30c45cf/src/transformers/models/llama/modeling_llama.py#L112
let cos = idx_theta.cos()?.to_dtype(dtype)?;
let sin = idx_theta.sin()?.to_dtype(dtype)?;
Ok(Self {
masks: HashMap::new(),
use_kv_cache,
kvs: vec![None; config.num_hidden_layers],
device: device.clone(),
cos,
sin,
})
}
fn mask(&mut self, t: usize) -> Result<Tensor> {
if let Some(mask) = self.masks.get(&t) {
Ok(mask.clone())
} else {
let mask: Vec<_> = (0..t)
.flat_map(|i| (0..t).map(move |j| u8::from(j > i)))
.collect();
let mask = Tensor::from_slice(&mask, (t, t), &self.device)?;
self.masks.insert(t, mask.clone());
Ok(mask)
}
}
}
#[derive(Debug, Clone)]
struct CausalSelfAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_attention_heads: usize,
num_key_value_heads: usize,
head_dim: usize,
use_flash_attn: bool,
span: tracing::Span,
span_rot: tracing::Span,
max_position_embeddings: usize,
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
impl CausalSelfAttention {
fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize, cache: &Cache) -> Result<Tensor> {
let _enter = self.span_rot.enter();
let (_b_sz, _, seq_len, _hidden_size) = x.dims4()?;
let cos = cache.cos.narrow(0, index_pos, seq_len)?;
let sin = cache.sin.narrow(0, index_pos, seq_len)?;
candle_nn::rotary_emb::rope(x, &cos, &sin)
}
fn forward(
&self,
x: &Tensor,
index_pos: usize,
block_idx: usize,
cache: &mut Cache,
) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_sz, seq_len, hidden_size) = x.dims3()?;
let q = self.q_proj.forward(x)?;
let k = self.k_proj.forward(x)?;
let v = self.v_proj.forward(x)?;
let q = q
.reshape((b_sz, seq_len, self.num_attention_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let mut v = v
.reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))?
.transpose(1, 2)?;
let q = self.apply_rotary_emb(&q, index_pos, cache)?;
let mut k = self.apply_rotary_emb(&k, index_pos, cache)?;
if cache.use_kv_cache {
if let Some((cache_k, cache_v)) = &cache.kvs[block_idx] {
k = Tensor::cat(&[cache_k, &k], 2)?.contiguous()?;
v = Tensor::cat(&[cache_v, &v], 2)?.contiguous()?;
let k_seq_len = k.dims()[1];
if k_seq_len > self.max_position_embeddings {
k = k
.narrow(
D::Minus1,
k_seq_len - self.max_position_embeddings,
self.max_position_embeddings,
)?
.contiguous()?
}
let v_seq_len = v.dims()[1];
if v_seq_len > 2 * self.max_position_embeddings {
v = v
.narrow(
D::Minus1,
v_seq_len - self.max_position_embeddings,
self.max_position_embeddings,
)?
.contiguous()?
}
}
cache.kvs[block_idx] = Some((k.clone(), v.clone()))
}
let k = self.repeat_kv(k)?;
let v = self.repeat_kv(v)?;
let y = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = q.transpose(1, 2)?;
let k = k.transpose(1, 2)?;
let v = v.transpose(1, 2)?;
let softmax_scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, softmax_scale, seq_len > 1)?.transpose(1, 2)?
} else {
let in_dtype = q.dtype();
let q = q.to_dtype(DType::F32)?;
let k = k.to_dtype(DType::F32)?;
let v = v.to_dtype(DType::F32)?;
let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?;
let att = if seq_len == 1 {
att
} else {
let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?;
masked_fill(&att, &mask, f32::NEG_INFINITY)?
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)?
};
let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, hidden_size])?;
let y = self.o_proj.forward(&y)?;
Ok(y)
}
fn repeat_kv(&self, x: Tensor) -> Result<Tensor> {
crate::utils::repeat_kv(x, self.num_attention_heads / self.num_key_value_heads)
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "attn");
let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot");
let size_in = cfg.hidden_size;
let size_q = (cfg.hidden_size / cfg.num_attention_heads) * cfg.num_attention_heads;
let size_kv = (cfg.hidden_size / cfg.num_attention_heads) * cfg.num_key_value_heads;
let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?;
let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?;
let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?;
let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_attention_heads: cfg.num_attention_heads,
num_key_value_heads: cfg.num_key_value_heads,
head_dim: cfg.hidden_size / cfg.num_attention_heads,
use_flash_attn: cfg.use_flash_attn,
span,
span_rot,
max_position_embeddings: cfg.max_position_embeddings,
})
}
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[derive(Debug, Clone)]
struct Mlp {
c_fc1: Linear,
c_fc2: Linear,
c_proj: Linear,
span: tracing::Span,
}
impl Mlp {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let x = (candle_nn::ops::silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?;
self.c_proj.forward(&x)
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "mlp");
let h_size = cfg.hidden_size;
let i_size = cfg.intermediate_size;
let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?;
let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?;
let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?;
Ok(Self {
c_fc1,
c_fc2,
c_proj,
span,
})
}
}
#[derive(Debug, Clone)]
struct Block {
rms_1: RmsNorm,
attn: CausalSelfAttention,
rms_2: RmsNorm,
mlp: Mlp,
span: tracing::Span,
}
impl Block {
fn forward(
&self,
x: &Tensor,
index_pos: usize,
block_idx: usize,
cache: &mut Cache,
) -> Result<Tensor> {
let _enter = self.span.enter();
let residual = x;
let x = self.rms_1.forward(x)?;
let x = (self.attn.forward(&x, index_pos, block_idx, cache)? + residual)?;
let residual = &x;
let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?;
Ok(x)
}
fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "block");
let attn = CausalSelfAttention::load(vb.pp("self_attn"), cfg)?;
let mlp = Mlp::load(vb.pp("mlp"), cfg)?;
let rms_1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let rms_2 = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
rms_1,
attn,
rms_2,
mlp,
span,
})
}
}
#[derive(Debug, Clone)]
pub struct Llama {
wte: Embedding,
blocks: Vec<Block>,
ln_f: RmsNorm,
lm_head: Linear,
}
impl Llama {
// required by LLaVA
pub fn embed(&self, x: &Tensor) -> Result<Tensor> {
self.wte.forward(x)
}
// required by LLaVA
pub fn forward_input_embed(
&self,
input_embed: &Tensor,
index_pos: usize,
cache: &mut Cache,
) -> Result<Tensor> {
let (_, seq_len, _) = input_embed.dims3()?;
let mut x = input_embed.clone();
for (block_idx, block) in self.blocks.iter().enumerate() {
x = block.forward(&x, index_pos, block_idx, cache)?;
}
let x = self.ln_f.forward(&x)?;
let x = x.i((.., seq_len - 1, ..))?.contiguous()?;
let logits = self.lm_head.forward(&x)?;
logits.to_dtype(DType::F32)
}
pub fn forward(&self, x: &Tensor, index_pos: usize, cache: &mut Cache) -> Result<Tensor> {
let (_b_sz, seq_len) = x.dims2()?;
let mut x = self.wte.forward(x)?;
for (block_idx, block) in self.blocks.iter().enumerate() {
x = block.forward(&x, index_pos, block_idx, cache)?;
}
let x = self.ln_f.forward(&x)?;
let x = x.i((.., seq_len - 1, ..))?.contiguous()?;
let logits = self.lm_head.forward(&x)?;
logits.to_dtype(DType::F32)
}
pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let wte = embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("model.embed_tokens"))?;
let lm_head = if cfg.tie_word_embeddings {
Linear::from_weights(wte.embeddings().clone(), None)
} else {
linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?
};
let ln_f = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("model.norm"))?;
let blocks: Vec<_> = (0..cfg.num_hidden_layers)
.map(|i| Block::load(vb.pp(format!("model.layers.{i}")), cfg).unwrap())
.collect();
Ok(Self {
wte,
blocks,
ln_f,
lm_head,
})
}
}
| 1 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/vit.rs | //! Vision Transformer (ViT) implementation.
//!
//! Vision Transformer applies transformer architecture to image classification
//! by splitting images into patches and processing them as a sequence.
//!
//! Key characteristics:
//! - Image patches as sequence tokens
//! - Self-attention between patches
//! - Position embeddings
//! - CLS token for classification
//! - Layer normalization
//!
//! References:
//! - [ViT Paper](https://arxiv.org/abs/2010.11929)
//! - [Model Card](https://huggingface.co/google/vit-base-patch16-224)
//!
use crate::models::with_tracing::{conv2d, linear, linear_no_bias, Conv2d, Linear};
use candle::{IndexOp, Module, Result, Tensor, D};
use candle_nn::{layer_norm, LayerNorm, VarBuilder};
// https://github.com/huggingface/transformers/blob/main/src/transformers/models/vit/configuration_vit.py
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub hidden_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub intermediate_size: usize,
pub hidden_act: candle_nn::Activation,
pub layer_norm_eps: f64,
pub image_size: usize,
pub patch_size: usize,
pub num_channels: usize,
pub qkv_bias: bool,
}
impl Config {
// https://huggingface.co/google/vit-base-patch16-224/blob/main/config.json
pub fn vit_base_patch16_224() -> Self {
Self {
hidden_size: 768,
num_hidden_layers: 12,
num_attention_heads: 12,
intermediate_size: 3072,
hidden_act: candle_nn::Activation::Gelu,
layer_norm_eps: 1e-12,
image_size: 224,
patch_size: 16,
num_channels: 3,
qkv_bias: true,
}
}
pub fn microsoft_trocr_base_handwritten() -> Self {
Self {
hidden_size: 768,
num_hidden_layers: 12,
num_attention_heads: 12,
intermediate_size: 3072,
hidden_act: candle_nn::Activation::Gelu,
layer_norm_eps: 1e-12,
image_size: 384,
patch_size: 16,
num_channels: 3,
qkv_bias: false,
}
}
}
#[derive(Debug, Clone)]
struct PatchEmbeddings {
num_patches: usize,
projection: Conv2d,
}
impl PatchEmbeddings {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let image_size = cfg.image_size;
let patch_size = cfg.patch_size;
let num_patches = (image_size / patch_size) * (image_size / patch_size);
let conv_cfg = candle_nn::Conv2dConfig {
stride: patch_size,
..Default::default()
};
let projection = conv2d(
cfg.num_channels,
cfg.hidden_size,
patch_size,
conv_cfg,
vb.pp("projection"),
)?;
Ok(Self {
num_patches,
projection,
})
}
}
impl Module for PatchEmbeddings {
fn forward(&self, pixel_values: &Tensor) -> Result<Tensor> {
let (_b_size, _num_channels, _height, _width) = pixel_values.dims4()?;
self.projection
.forward(pixel_values)?
.flatten_from(2)?
.transpose(1, 2)
}
}
#[derive(Debug, Clone)]
pub struct Embeddings {
cls_token: Tensor,
mask_token: Option<Tensor>,
patch_embeddings: PatchEmbeddings,
position_embeddings: Tensor,
hidden_size: usize,
}
impl Embeddings {
pub fn new(cfg: &Config, use_mask_token: bool, vb: VarBuilder) -> Result<Self> {
let hidden_size = cfg.hidden_size;
let cls_token = vb.get((1, 1, hidden_size), "cls_token")?;
let mask_token = if use_mask_token {
Some(vb.get((1, 1, hidden_size), "mask_token")?)
} else {
None
};
let patch_embeddings = PatchEmbeddings::new(cfg, vb.pp("patch_embeddings"))?;
let num_patches = patch_embeddings.num_patches;
let position_embeddings =
vb.get((1, num_patches + 1, hidden_size), "position_embeddings")?;
Ok(Self {
cls_token,
mask_token,
patch_embeddings,
position_embeddings,
hidden_size,
})
}
fn interpolate_pos_encoding(
&self,
_embeddings: &Tensor,
_height: usize,
_width: usize,
) -> Result<Tensor> {
todo!()
}
pub fn forward(
&self,
pixel_values: &Tensor,
bool_masked_pos: Option<&Tensor>,
interpolate_pos_encoding: bool,
) -> Result<Tensor> {
let (b_size, _num_channels, height, width) = pixel_values.dims4()?;
let embeddings = self.patch_embeddings.forward(pixel_values)?;
let embeddings = match (bool_masked_pos, &self.mask_token) {
(None, _) => embeddings,
(Some(_), None) => candle::bail!("bool_masked_pos set without mask_token"),
(Some(bool_masked_pos), Some(mask_tokens)) => {
let seq_len = embeddings.dim(1)?;
let mask_tokens = mask_tokens.broadcast_as((b_size, seq_len, self.hidden_size))?;
let mask = bool_masked_pos
.unsqueeze(D::Minus1)?
.to_dtype(mask_tokens.dtype())?;
((mask_tokens * &mask)? - (embeddings * (mask - 1.)?)?)?
}
};
let cls_tokens = self.cls_token.broadcast_as((b_size, 1, self.hidden_size))?;
let embeddings = Tensor::cat(&[&cls_tokens, &embeddings], 1)?;
if interpolate_pos_encoding {
let pos = self.interpolate_pos_encoding(&embeddings, height, width)?;
embeddings.broadcast_add(&pos)
} else {
embeddings.broadcast_add(&self.position_embeddings)
}
}
}
#[derive(Debug, Clone)]
struct SelfAttention {
query: Linear,
key: Linear,
value: Linear,
num_attention_heads: usize,
attention_head_size: usize,
}
impl SelfAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention_head_size = cfg.hidden_size / cfg.num_attention_heads;
let num_attention_heads = cfg.num_attention_heads;
let all_head_size = num_attention_heads * attention_head_size;
let linear = |name| {
if cfg.qkv_bias {
linear(cfg.hidden_size, all_head_size, vb.pp(name))
} else {
linear_no_bias(cfg.hidden_size, all_head_size, vb.pp(name))
}
};
let query = linear("query")?;
let key = linear("key")?;
let value = linear("value")?;
Ok(Self {
query,
key,
value,
num_attention_heads,
attention_head_size,
})
}
fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> {
let (b_size, seq_len, _) = xs.dims3()?;
xs.reshape((
b_size,
seq_len,
self.num_attention_heads,
self.attention_head_size,
))?
.permute((0, 2, 1, 3))
}
}
impl Module for SelfAttention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let query = self.query.forward(xs)?;
let key = self.key.forward(xs)?;
let value = self.value.forward(xs)?;
let query = self.transpose_for_scores(&query)?.contiguous()?;
let key = self.transpose_for_scores(&key)?.contiguous()?;
let value = self.transpose_for_scores(&value)?.contiguous()?;
let attention_scores =
(query.matmul(&key.t()?)? / f64::sqrt(self.attention_head_size as f64))?;
let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?;
attention_probs
.matmul(&value)?
.permute((0, 2, 1, 3))?
.contiguous()?
.flatten_from(D::Minus2)
}
}
#[derive(Debug, Clone)]
struct SelfOutput {
dense: Linear,
}
impl SelfOutput {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
Ok(Self { dense })
}
}
impl Module for SelfOutput {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.dense)
}
}
#[derive(Debug, Clone)]
struct Attention {
attention: SelfAttention,
output: SelfOutput,
}
impl Attention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = SelfAttention::new(cfg, vb.pp("attention"))?;
let output = SelfOutput::new(cfg, vb.pp("output"))?;
Ok(Self { attention, output })
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.attention)?.apply(&self.output)
}
}
#[derive(Debug, Clone)]
struct Intermediate {
dense: Linear,
intermediate_act_fn: candle_nn::Activation,
}
impl Intermediate {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("dense"))?;
Ok(Self {
dense,
intermediate_act_fn: cfg.hidden_act,
})
}
}
impl Module for Intermediate {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.dense)?.apply(&self.intermediate_act_fn)
}
}
#[derive(Debug, Clone)]
struct Output {
dense: Linear,
}
impl Output {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let dense = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("dense"))?;
Ok(Self { dense })
}
fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
xs.apply(&self.dense)? + input_tensor
}
}
#[derive(Debug, Clone)]
struct Layer {
attention: Attention,
intermediate: Intermediate,
output: Output,
layernorm_before: LayerNorm,
layernorm_after: LayerNorm,
}
impl Layer {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = Attention::new(cfg, vb.pp("attention"))?;
let intermediate = Intermediate::new(cfg, vb.pp("intermediate"))?;
let output = Output::new(cfg, vb.pp("output"))?;
let h_sz = cfg.hidden_size;
let layernorm_before = layer_norm(h_sz, cfg.layer_norm_eps, vb.pp("layernorm_before"))?;
let layernorm_after = layer_norm(h_sz, cfg.layer_norm_eps, vb.pp("layernorm_after"))?;
Ok(Self {
attention,
intermediate,
output,
layernorm_after,
layernorm_before,
})
}
}
impl Module for Layer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = (xs.apply(&self.layernorm_before)?.apply(&self.attention)? + xs)?;
let ys = xs.apply(&self.layernorm_after)?.apply(&self.intermediate)?;
self.output.forward(&ys, &xs)
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
layers: Vec<Layer>,
}
impl Encoder {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("layer");
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
for i in 0..cfg.num_hidden_layers {
let layer = Layer::new(cfg, vb.pp(i))?;
layers.push(layer)
}
Ok(Self { layers })
}
}
impl Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = xs.apply(layer)?
}
Ok(xs)
}
}
#[derive(Debug, Clone)]
pub struct Model {
embeddings: Embeddings,
encoder: Encoder,
layernorm: LayerNorm,
// no need for pooling layer for image classification
classifier: Linear,
}
impl Model {
pub fn new(cfg: &Config, num_labels: usize, vb: VarBuilder) -> Result<Self> {
let vb_v = vb.pp("vit");
let embeddings = Embeddings::new(cfg, false, vb_v.pp("embeddings"))?;
let encoder = Encoder::new(cfg, vb_v.pp("encoder"))?;
let layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb_v.pp("layernorm"))?;
let classifier = linear(cfg.hidden_size, num_labels, vb.pp("classifier"))?;
Ok(Self {
embeddings,
encoder,
layernorm,
classifier,
})
}
pub fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let embedding_output = self.embeddings.forward(xs, None, false)?;
let encoder_outputs = self.encoder.forward(&embedding_output)?;
encoder_outputs
.i((.., 0, ..))?
.apply(&self.layernorm)?
.apply(&self.classifier)
}
}
| 2 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/gemma.rs | //! Gemma inference implementation.
//!
//! See ["Gemma: Open Models Based on Gemini Technology"](https://blog.google/technology/developers/gemma-open-ai-model/)
//!
//! Based on implementation from Google and PyTorch
use std::sync::Arc;
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{linear_b as linear, Activation, Linear, VarBuilder};
fn default_max_position_embeddings() -> usize {
4096
}
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub attention_bias: bool,
pub head_dim: usize,
// The code gemma configs include both hidden_act and hidden_activation.
pub hidden_act: Option<Activation>,
pub hidden_activation: Option<Activation>,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub rms_norm_eps: f64,
pub rope_theta: f64,
pub vocab_size: usize,
#[serde(default = "default_max_position_embeddings")]
pub max_position_embeddings: usize,
}
impl Config {
fn hidden_act(&self) -> Result<Activation> {
match (self.hidden_act, self.hidden_activation) {
(None, Some(act)) | (Some(act), None) => Ok(act),
(Some(_), Some(_)) => candle::bail!("both hidden_act and hidden_activation are set"),
(None, None) => candle::bail!("none of hidden_act and hidden_activation are set"),
}
}
}
#[derive(Debug, Clone)]
struct RmsNorm {
weight: Tensor,
eps: f64,
}
impl RmsNorm {
fn new(dim: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let weight = vb.get(dim, "weight")?;
Ok(Self { weight, eps })
}
}
impl Module for RmsNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x_dtype = x.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = x.dim(D::Minus1)?;
let x = x.to_dtype(internal_dtype)?;
let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?;
x_normed
.to_dtype(x_dtype)?
.broadcast_mul(&(&self.weight + 1.0)?)
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.head_dim;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: candle_nn::Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear(hidden_sz, intermediate_sz, false, vb.pp("gate_proj"))?;
let up_proj = linear(hidden_sz, intermediate_sz, false, vb.pp("up_proj"))?;
let down_proj = linear(intermediate_sz, hidden_sz, false, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act()?,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
use_flash_attn: bool,
}
impl Attention {
fn new(
rotary_emb: Arc<RotaryEmbedding>,
use_flash_attn: bool,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = cfg.head_dim;
let bias = cfg.attention_bias;
let q_proj = linear(hidden_sz, num_heads * head_dim, bias, vb.pp("q_proj"))?;
let k_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("k_proj"))?;
let v_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("v_proj"))?;
let o_proj = linear(num_heads * head_dim, hidden_sz, bias, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
rotary_emb,
kv_cache: None,
use_flash_attn,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = query_states.transpose(1, 2)?;
let k = key_states.transpose(1, 2)?;
let v = value_states.transpose(1, 2)?;
let scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, scale, attention_mask.is_some())?.transpose(1, 2)?
} else {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, ()))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(
rotary_emb: Arc<RotaryEmbedding>,
use_flash_attn: bool,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, use_flash_attn, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
device: Device,
dtype: DType,
hidden_size: usize,
}
impl Model {
pub fn new(use_flash_attn: bool, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer =
DecoderLayer::new(rotary_emb.clone(), use_flash_attn, cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = Linear::new(embed_tokens.embeddings().clone(), None);
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
device: vb.device().clone(),
dtype: vb.dtype(),
hidden_size: cfg.hidden_size,
})
}
pub fn embed_tokens(&self) -> &candle_nn::Embedding {
&self.embed_tokens
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let xs = self.embed_tokens.forward(input_ids)?;
let mut xs = (xs * (self.hidden_size as f64).sqrt())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn forward_embeds(
&mut self,
xs: &Tensor,
attn_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (_, seq_len, _) = xs.dims3()?;
let mut xs = (xs * (self.hidden_size as f64).sqrt())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attn_mask, seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
// Forward the model and return the hidden states without the lm_head
pub fn forward_embeds_without_projection(
&mut self,
xs: &Tensor,
attn_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (_, _, _) = xs.dims3()?;
let mut xs = (xs * (self.hidden_size as f64).sqrt())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attn_mask, seqlen_offset)?
}
Ok(xs)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| 3 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/with_tracing.rs | use candle::{Module, Result, Tensor};
use candle_nn::VarBuilder;
#[derive(Debug, Clone)]
pub struct Embedding {
inner: candle_nn::Embedding,
span: tracing::Span,
}
impl Embedding {
pub fn new(d1: usize, d2: usize, vb: VarBuilder) -> Result<Self> {
let inner = candle_nn::embedding(d1, d2, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "embedding");
Ok(Self { inner, span })
}
pub fn from_weights(weights: Tensor) -> Result<Self> {
let (_in_size, out_size) = weights.dims2()?;
let inner = candle_nn::Embedding::new(weights, out_size);
let span = tracing::span!(tracing::Level::TRACE, "embedding");
Ok(Self { inner, span })
}
pub fn embeddings(&self) -> &Tensor {
self.inner.embeddings()
}
}
impl Module for Embedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
#[derive(Debug, Clone)]
pub struct Linear {
inner: candle_nn::Linear,
span: tracing::Span,
}
impl Linear {
pub fn from_weights(weights: Tensor, bias: Option<Tensor>) -> Self {
let inner = candle_nn::Linear::new(weights, bias);
let span = tracing::span!(tracing::Level::TRACE, "linear");
Self { inner, span }
}
}
pub fn linear_b(d1: usize, d2: usize, b: bool, vb: VarBuilder) -> Result<Linear> {
let inner = candle_nn::linear_b(d1, d2, b, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "linear");
Ok(Linear { inner, span })
}
pub fn linear(d1: usize, d2: usize, vb: VarBuilder) -> Result<Linear> {
let inner = candle_nn::linear(d1, d2, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "linear");
Ok(Linear { inner, span })
}
pub fn linear_no_bias(d1: usize, d2: usize, vb: VarBuilder) -> Result<Linear> {
let inner = candle_nn::linear_no_bias(d1, d2, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "linear");
Ok(Linear { inner, span })
}
impl Module for Linear {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
// Wrap the conv2d op to provide some tracing.
#[derive(Debug, Clone)]
pub struct Conv2d {
inner: candle_nn::Conv2d,
span: tracing::Span,
}
impl Module for Conv2d {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(x)
}
}
pub fn conv2d(
in_channels: usize,
out_channels: usize,
kernel_size: usize,
cfg: candle_nn::Conv2dConfig,
vs: candle_nn::VarBuilder,
) -> Result<Conv2d> {
let span = tracing::span!(tracing::Level::TRACE, "conv2d");
let inner = candle_nn::conv2d(in_channels, out_channels, kernel_size, cfg, vs)?;
Ok(Conv2d { inner, span })
}
// QMatMul wrapper adding some tracing.
#[derive(Clone)]
pub struct QMatMul {
inner: candle::quantized::QMatMul,
span: tracing::Span,
}
impl QMatMul {
pub fn new(
out_dim: usize,
in_dim: usize,
vb: crate::quantized_var_builder::VarBuilder,
) -> Result<Self> {
let ws = vb.get((in_dim, out_dim), "weight")?;
let inner = candle::quantized::QMatMul::from_arc(ws)?;
let span = tracing::span!(tracing::Level::TRACE, "qmatmul");
Ok(Self { inner, span })
}
pub fn from_weights(ws: std::sync::Arc<candle::quantized::QTensor>) -> Result<Self> {
let inner = candle::quantized::QMatMul::from_arc(ws)?;
let span = tracing::span!(tracing::Level::TRACE, "qmatmul");
Ok(Self { inner, span })
}
}
impl Module for QMatMul {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
impl std::fmt::Debug for QMatMul {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "QMatMul")
}
}
#[derive(Clone, Debug)]
pub struct LayerNorm {
inner: candle_nn::LayerNorm,
span: tracing::Span,
}
impl LayerNorm {
pub fn new(weight: Tensor, bias: Tensor, eps: f64) -> Self {
let inner = candle_nn::LayerNorm::new(weight, bias, eps);
let span = tracing::span!(tracing::Level::TRACE, "layer-norm");
Self { inner, span }
}
}
impl Module for LayerNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(xs)
}
}
pub fn layer_norm<C: Into<candle_nn::LayerNormConfig>>(
size: usize,
c: C,
vb: VarBuilder,
) -> Result<LayerNorm> {
let inner = candle_nn::layer_norm(size, c, vb)?;
let span = tracing::span!(tracing::Level::TRACE, "layer-norm");
Ok(LayerNorm { inner, span })
}
#[derive(Debug, Clone)]
pub struct RmsNorm {
inner: candle_nn::RmsNorm,
span: tracing::Span,
}
impl RmsNorm {
pub fn new(size: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let span = tracing::span!(tracing::Level::TRACE, "rms-norm");
let inner = candle_nn::rms_norm(size, eps, vb)?;
Ok(Self { inner, span })
}
pub fn forward_diff(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward_diff(x)
}
}
impl Module for RmsNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.inner.forward(x)
}
}
| 4 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/mod.rs | //! Candle implementations for various deep learning models
//!
//! This crate provides implementations of popular machine learning models and architectures for different modalities.
//!
//! - Large language models: [`llama`], [`phi3`], [`mamba`], [`mixtral`], [`bert`], ...
//! - Text to text models: [`t5`], ...
//! - Image to text models: [`blip`], ...
//! - Text to image models: [`stable_diffusion`] and [`wuerstchen`], ...
//! - Audio models: [`whisper`], [`encodec`], [`metavoice`], [`parler_tts`], ...
//! - Computer vision models: [`dinov2`], [`convmixer`], [`efficientnet`], ...
//!
//! Some of the models also have quantized variants, e.g. [`quantized_blip`], [`quantized_llama`] and [`quantized_qwen2`].
//!
//! The implementations aim to be readable while maintaining good performance. For more information
//! on each model see the model's module docs in the links below.
pub mod based;
pub mod beit;
pub mod bert;
pub mod bigcode;
pub mod blip;
pub mod blip_text;
pub mod chatglm;
pub mod chinese_clip;
pub mod clip;
pub mod codegeex4_9b;
pub mod colpali;
pub mod convmixer;
pub mod convnext;
pub mod dac;
pub mod depth_anything_v2;
pub mod dinov2;
pub mod dinov2reg4;
pub mod distilbert;
pub mod efficientnet;
pub mod efficientvit;
pub mod encodec;
pub mod eva2;
pub mod falcon;
pub mod fastvit;
pub mod flux;
pub mod gemma;
pub mod gemma2;
pub mod glm4;
pub mod granite;
pub mod hiera;
pub mod jina_bert;
pub mod llama;
pub mod llama2_c;
pub mod llama2_c_weights;
pub mod llava;
pub mod mamba;
pub mod marian;
pub mod metavoice;
pub mod mimi;
pub mod mistral;
pub mod mixformer;
pub mod mixtral;
pub mod mmdit;
pub mod mobileclip;
pub mod mobilenetv4;
pub mod mobileone;
pub mod moondream;
pub mod mpt;
pub mod olmo;
pub mod openclip;
pub mod paligemma;
pub mod parler_tts;
pub mod persimmon;
pub mod phi;
pub mod phi3;
pub mod pixtral;
pub mod quantized_blip;
pub mod quantized_blip_text;
pub mod quantized_llama;
pub mod quantized_llama2_c;
pub mod quantized_metavoice;
pub mod quantized_mistral;
pub mod quantized_mixformer;
pub mod quantized_moondream;
pub mod quantized_mpt;
pub mod quantized_phi;
pub mod quantized_phi3;
pub mod quantized_qwen2;
pub mod quantized_recurrent_gemma;
pub mod quantized_rwkv_v5;
pub mod quantized_rwkv_v6;
pub mod quantized_stable_lm;
pub mod quantized_t5;
pub mod qwen2;
pub mod qwen2_moe;
pub mod recurrent_gemma;
pub mod repvgg;
pub mod resnet;
pub mod rwkv_v5;
pub mod rwkv_v6;
pub mod segformer;
pub mod segment_anything;
pub mod siglip;
pub mod stable_diffusion;
pub mod stable_lm;
pub mod starcoder2;
pub mod stella_en_v5;
pub mod t5;
pub mod trocr;
pub mod vgg;
pub mod vit;
pub mod whisper;
pub mod with_tracing;
pub mod wuerstchen;
pub mod yi;
| 5 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/stella_en_v5.rs | //! Stella v5 model implementation.
//!
//! Stella is a dense text embedding model optimized for retrieval and similarity tasks.
//! This implementation provides support for multiple embedding dimensions.
//!
//! Key characteristics:
//! - Dense text embeddings optimized for similarity search
//! - Multiple output dimension support (256 to 8192)
//! - Grouped query attention (GQA)
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//!
//! References:
//! - [MRL Framework](https://arxiv.org/abs/2205.13147)
//! - [Model Card](https://huggingface.co/dunzhang/stella_en_1.5B_v5)
//!
use crate::models::with_tracing::{linear, linear_no_bias, Linear, RmsNorm};
use candle::{DType, Device, Error, IndexOp, Module, Result, Tensor, D};
use candle_nn::{layer_norm, Activation, LayerNorm, VarBuilder};
use std::sync::Arc;
// internal representation for identifying which model is being used
#[derive(Debug, Copy, Clone, PartialEq, serde::Deserialize)]
pub enum ModelVariant {
Large, // 1.5B
Small, // 400M
}
impl Default for ModelVariant {
fn default() -> Self {
Self::Large
}
}
// Same as `qwen2` family of models with the exception being the `embed_head`
// The final `output` causal modelling head is swapped with a learned `dense` layer, `embed_head`
#[derive(Debug, Default, Clone, PartialEq, serde::Deserialize)]
pub struct Config {
pub variant: ModelVariant,
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub max_position_embeddings: usize,
pub rope_theta: f64,
pub embed_head: EmbedHead,
pub norm_eps: f64, // RMSNorm for 1.5B || LayerNorm for 400M
pub activation_fn: Activation, // Silu for 1.5B || Gelu for 400M
// Unique to 1.5B
pub num_key_value_heads: usize,
// Unique to 400M
pub type_vocab_size: usize,
pub scaling_factor: f64,
}
// Excerpt from `stella` model card:
// `Stella_en_1.5B_v5` models have been trained on [MRL](https://arxiv.org/abs/2205.13147) enabling multiple output dimensions
// Embed head represents the config for various embedding dims supported
#[derive(Debug, Default, Clone, PartialEq, serde::Deserialize)]
pub struct EmbedHead {
pub in_features: usize,
pub out_features: usize,
}
/// An enum variant representing the Embedding head dimensions `stella` is trained on
/// As the [model-card](https://huggingface.co/dunzhang/stella_en_1.5B_v5#introduction) suggests, D1024 is good enough for most cases
#[derive(Debug, Clone, Copy)]
pub enum EmbedDim {
Dim256,
Dim768,
Dim1024,
Dim2048,
Dim4096,
Dim6144,
Dim8192,
}
impl Default for EmbedDim {
fn default() -> Self {
Self::Dim1024
}
}
impl EmbedDim {
pub fn config(&self, in_features: usize) -> EmbedHead {
EmbedHead {
in_features,
out_features: match &self {
Self::Dim256 => 256,
Self::Dim768 => 768,
Self::Dim1024 => 1024,
Self::Dim2048 => 2048,
Self::Dim4096 => 4096,
Self::Dim6144 => 6144,
Self::Dim8192 => 8192,
},
}
}
}
// Initialize a new `stella_en` model - with 400M variant or 1.5B variant
impl Config {
/// Initialize a new `stella_en_1.5B_v5`` model with given embedding dim
pub fn new_1_5_b_v5(embed_dim: EmbedDim) -> Self {
// Representing config.json at https://huggingface.co/dunzhang/stella_en_1.5B_v5/blob/main/config.json
// Removed `sliding_window` related config which is basically being carried forward from `qwen2` but not used here
Self {
variant: ModelVariant::Large,
activation_fn: candle_nn::Activation::Silu,
vocab_size: 151646,
hidden_size: 1536,
intermediate_size: 8960,
num_hidden_layers: 28,
num_attention_heads: 12,
num_key_value_heads: 2,
max_position_embeddings: 131072,
rope_theta: 1000000.,
norm_eps: 1e-06,
embed_head: embed_dim.config(1536),
..Default::default()
}
}
/// Initialize new `stella_en_400M_v5`
pub fn new_400_m_v5(embed_dim: EmbedDim) -> Self {
Self {
variant: ModelVariant::Small,
vocab_size: 30528,
hidden_size: 1024,
intermediate_size: 4096,
num_hidden_layers: 24,
num_attention_heads: 16,
max_position_embeddings: 8192,
type_vocab_size: 2,
norm_eps: 1e-12,
scaling_factor: 2.0,
rope_theta: 160000.0,
activation_fn: Activation::Gelu,
embed_head: embed_dim.config(1024),
..Default::default()
}
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
// Factoring in `scaling factor` for `400M` variant
let max_seq_len = if cfg.scaling_factor == 0. {
cfg.max_position_embeddings
} else {
((cfg.max_position_embeddings as f64) * cfg.scaling_factor) as usize
};
// let rot_dim = if cfg.variant == ModelVariant::Small { dim / 2 } else { dim };
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| {
// Scaled rope_theta for 400M variant
let rope_theta = if cfg.scaling_factor == 0. {
cfg.rope_theta
} else {
cfg.rope_theta * cfg.scaling_factor
};
let mut freq = 1. / rope_theta.powf(i as f64 / dim as f64);
if cfg.scaling_factor != 0. {
freq /= cfg.scaling_factor.powf(2.0 / (dim as f64))
}
freq as f32
})
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
// Calculate position embeddings with scaled sequence length
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
// if cfg.variant == ModelVariant::Small {
// freqs = Tensor::cat(&[&freqs, &freqs], 1)?
// }
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
// TODO: re-visit this
fn apply_rotary_emb_qkv(&self, q: &Tensor, k: &Tensor) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, 0, seq_len)?;
let sin = self.sin.narrow(0, 0, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
variant: ModelVariant,
gate_proj: Linear,
up_proj: Option<Linear>, // `up_proj` only for 1.5B variant
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let (gate_proj, up_proj, down_proj) = match cfg.variant {
ModelVariant::Large => (
linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?,
Some(linear_no_bias(
hidden_sz,
intermediate_sz,
vb.pp("up_proj"),
)?),
linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?,
),
ModelVariant::Small => (
linear_no_bias(hidden_sz, intermediate_sz * 2, vb.pp("up_gate_proj"))?,
None,
linear(intermediate_sz, hidden_sz, vb.pp("down_proj"))?,
),
};
Ok(Self {
variant: cfg.variant,
gate_proj,
up_proj,
down_proj,
act_fn: cfg.activation_fn,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let up = self.gate_proj.forward(xs)?;
let (lhs, rhs) = match self.variant {
ModelVariant::Large => {
let lhs = up.apply(&self.act_fn)?;
let rhs = xs.apply(self.up_proj.as_ref().unwrap())?;
(lhs, rhs)
}
ModelVariant::Small => {
// Get the dimensions
let (_batch_size, _seq_len, hidden_dim) = up.dims3()?;
let split_size = hidden_dim / 2;
// Split along the last dimension (hidden_dim)
let up_states = up.narrow(2, 0, split_size)?;
let gate = up.narrow(2, split_size, split_size)?.apply(&self.act_fn)?;
(up_states, gate)
}
};
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
qkv_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
variant: ModelVariant,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = if num_kv_heads > 0 {
num_heads / num_kv_heads
} else {
0
};
let head_dim = hidden_sz / num_heads;
let (qkv_proj, o_proj) = match cfg.variant {
ModelVariant::Large => {
// The 1.5B variant comes with separate `q, k, v` layers, let's merge it and standardize
// Weights
let q_w = vb
.pp("q_proj")
.get((num_heads * head_dim, hidden_sz), "weight")?;
let k_w = vb
.pp("k_proj")
.get((num_kv_heads * head_dim, hidden_sz), "weight")?;
let v_w = vb
.pp("v_proj")
.get((num_kv_heads * head_dim, hidden_sz), "weight")?;
// Biases
let q_b = vb.pp("q_proj").get(num_heads * head_dim, "bias")?;
let k_b = vb.pp("k_proj").get(num_kv_heads * head_dim, "bias")?;
let v_b = vb.pp("v_proj").get(num_kv_heads * head_dim, "bias")?;
let qkv_w = Tensor::cat(&[&q_w, &k_w, &v_w], 0)?;
let qkv_b = Tensor::cat(&[&q_b, &k_b, &v_b], 0)?;
(
Linear::from_weights(qkv_w, Some(qkv_b)),
linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?,
)
}
ModelVariant::Small => (
linear(hidden_sz, 3 * num_heads * head_dim, vb.pp("qkv_proj"))?,
linear(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?,
),
};
Ok(Self {
qkv_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
variant: cfg.variant,
})
}
fn forward(&mut self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let qkv = self.qkv_proj.forward(xs)?;
let n_kv_heads = match self.variant {
ModelVariant::Large => self.num_kv_heads,
ModelVariant::Small => self.num_heads,
};
let (query_states, key_states, value_states) = match self.variant {
ModelVariant::Large => {
let q_sz = self.num_heads * self.head_dim;
let kv_sz = n_kv_heads * self.head_dim;
let q = qkv.narrow(D::Minus1, 0, q_sz)?.reshape((
b_sz,
q_len,
self.num_heads,
self.head_dim,
))?;
let k = qkv.narrow(D::Minus1, q_sz, kv_sz)?.reshape((
b_sz,
q_len,
n_kv_heads,
self.head_dim,
))?;
let v = qkv.narrow(D::Minus1, q_sz + kv_sz, kv_sz)?.reshape((
b_sz,
q_len,
n_kv_heads,
self.head_dim,
))?;
(q, k, v)
}
ModelVariant::Small => {
// Split into Q, K, V and reshape to match PyTorch shapes
let qkv = qkv.reshape((b_sz, q_len, 3, self.num_heads, self.head_dim))?;
(
qkv.i((.., .., 0, .., ..))?,
qkv.i((.., .., 1, .., ..))?,
qkv.i((.., .., 2, .., ..))?,
)
}
};
let query_states = query_states.transpose(1, 2)?.contiguous()?;
let key_states = key_states.transpose(1, 2)?.contiguous()?;
let value_states = value_states.transpose(1, 2)?.contiguous()?;
let (query_states, key_states) = self
.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states)?;
// The 1.5B is expected to have grouped query attention
let (key_states, value_states) = if self.variant == ModelVariant::Large {
(
crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?,
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?,
)
} else {
(key_states, value_states)
};
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = query_states.matmul(&key_states.transpose(2, 3)?)?;
let attn_weights = (attn_weights * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
}
#[derive(Debug, Clone)]
enum NormType {
Layer(LayerNorm),
Rms(RmsNorm),
}
#[derive(Debug, Clone)]
struct Layer {
variant: ModelVariant,
attention: Attention,
mlp: MLP,
// For 1.5B: this is `input_layernorm`
// For 400M: this is `output_layernorm`
layernorm: NormType,
post_attention_layernorm: NormType,
}
impl Layer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = Attention::new(
rotary_emb,
cfg,
vb.pp(if cfg.variant == ModelVariant::Large {
"self_attn"
} else {
"attention"
}),
)?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let (layernorm, post_attention_layernorm) = match cfg.variant {
ModelVariant::Large => (
NormType::Rms(RmsNorm::new(
cfg.hidden_size,
cfg.norm_eps,
vb.pp("input_layernorm"),
)?),
NormType::Rms(RmsNorm::new(
cfg.hidden_size,
cfg.norm_eps,
vb.pp("post_attention_layernorm"),
)?),
),
ModelVariant::Small => (
NormType::Layer(layer_norm(
cfg.hidden_size,
candle_nn::LayerNormConfig {
eps: cfg.norm_eps,
..Default::default()
},
vb.pp("mlp_ln"),
)?),
NormType::Layer(layer_norm(
cfg.hidden_size,
candle_nn::LayerNormConfig {
eps: cfg.norm_eps,
..Default::default()
},
vb.pp("attn_ln"),
)?),
),
};
Ok(Self {
variant: cfg.variant,
attention,
mlp,
layernorm,
post_attention_layernorm,
})
}
fn forward(&mut self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> {
// Here, the application of normalizations and activation calculations differ
// For Large [1.5B]:
// residual = x
// state = other_layernorm(xs)
// state = attention(state)
// state += residual
// residual = state
// state = mlp(attention_layernorm(state))
// -> residual + state
// For Small [400M]:
// residual = x;
// state = attention(x)
// state += residual
// state = attention_layernorm(state)
// residual = state
// state = mlp(state)
// state += residual
// -> other_layernorm(state)
let residual = xs;
match self.variant {
ModelVariant::Large => {
let (attn_ln, input_ln) = if let (NormType::Rms(attn_ln), NormType::Rms(input_ln)) =
(&self.post_attention_layernorm, &self.layernorm)
{
(attn_ln, input_ln)
} else {
return Err(candle::error::Error::Msg(
"Stella 1.5B expects RMSNorm".to_string(),
));
};
let xs = input_ln.forward(xs)?;
let xs = (self.attention.forward(&xs, attention_mask)? + residual)?;
let residual = &xs;
let xs = xs.apply(attn_ln)?.apply(&self.mlp)?;
residual + xs
}
ModelVariant::Small => {
let (attn_ln, output_ln) =
if let (NormType::Layer(attn_ln), NormType::Layer(input_ln)) =
(&self.post_attention_layernorm, &self.layernorm)
{
(attn_ln, input_ln)
} else {
return Err(candle::error::Error::Msg(
"Stella 400M expects RMSNorm".to_string(),
));
};
let xs = (self.attention.forward(xs, attention_mask)? + residual)?;
let xs = attn_ln.forward(&xs)?;
let residual = &xs;
let xs = (self.mlp.forward(&xs)? + residual)?;
output_ln.forward(&xs)
}
}
}
}
#[derive(Debug, Clone)]
pub struct Embeddings {
variant: ModelVariant,
// For 1.5B: this is the `embed_tokens`
// For 400M: this is the `word_embeddings`
embeddings: candle_nn::Embedding,
// folloing are specifically for 400M
token_type_embeddings: Option<candle_nn::Embedding>,
layer_norm: Option<LayerNorm>,
position_ids: Option<Tensor>,
}
impl Embeddings {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let (embeddings, token_type_embeddings, layer_norm, position_ids) = match cfg.variant {
ModelVariant::Large => (
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("embed_tokens"))?,
None,
None,
None,
),
ModelVariant::Small => {
let vb = vb.pp("embeddings");
let weight = vb.pp("LayerNorm").get_with_hints(
cfg.hidden_size,
"weight",
candle_nn::Init::Const(1.0),
)?;
let bias = vb.pp("LayerNorm").get_with_hints(
cfg.hidden_size,
"bias",
candle_nn::Init::Const(0.0),
)?;
let dev = bias.device().clone();
let layer_norm = candle_nn::LayerNorm::new(weight, bias, cfg.norm_eps);
(
candle_nn::embedding(
cfg.vocab_size,
cfg.hidden_size,
vb.pp("word_embeddings"),
)?,
Some(candle_nn::embedding(
cfg.type_vocab_size,
cfg.hidden_size,
vb.pp("token_type_embeddings"),
)?),
Some(layer_norm),
Some(Tensor::arange(
0u32,
cfg.max_position_embeddings as u32,
&dev,
)?),
)
}
};
Ok(Self {
variant: cfg.variant,
embeddings,
token_type_embeddings,
layer_norm,
position_ids,
})
}
}
impl Module for Embeddings {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let embd = self.embeddings.forward(xs)?;
// For 1.5B just forward the embeddings
if self.variant == ModelVariant::Large {
return Ok(embd);
}
let (token_type_embed, layer_norm, pos_ids) =
if let (Some(token_type_embd), Some(layer_norm), Some(position_ids)) = (
&self.token_type_embeddings,
&self.layer_norm,
&self.position_ids,
) {
(token_type_embd, layer_norm, position_ids)
} else {
return Err(Error::Msg(
"Stella 400M requires `token_type_embeddings`, `layer_norm` and `position_ids`"
.to_string(),
));
};
let (batch_size, seq_length) = xs.dims2()?;
let pos_ids = pos_ids
.as_ref()
.narrow(0, 0, seq_length)?
.expand((batch_size, seq_length))?;
layer_norm.forward(&embd.add(&token_type_embed.forward(&pos_ids.zeros_like()?)?)?)
}
}
#[derive(Debug, Clone)]
pub struct Model {
embeddings: Embeddings,
layers: Vec<Layer>,
norm: Option<RmsNorm>,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = match cfg.variant {
ModelVariant::Large => vb.pp("model"),
ModelVariant::Small => vb.pp("new"),
};
// let embed_tokens =
// candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let embeddings = Embeddings::new(cfg, vb_m.clone())?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = match cfg.variant {
ModelVariant::Large => vb_m.pp("layers"),
ModelVariant::Small => vb_m.pp("encoder").pp("layer"),
};
for layer_idx in 0..cfg.num_hidden_layers {
let layer = Layer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = match cfg.variant {
ModelVariant::Large => Some(RmsNorm::new(
cfg.hidden_size,
cfg.norm_eps,
vb_m.pp("norm"),
)?),
ModelVariant::Small => None,
};
Ok(Self {
embeddings,
layers,
norm,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_attention_mask(&self, attn_mask: &Tensor) -> Result<Tensor> {
let (b_sz, sql_len) = attn_mask.dims2()?;
let mut mask: Vec<Tensor> = vec![];
for b in 0..b_sz {
mask.push(attn_mask.i((b, ..))?.expand((1, 1, sql_len, sql_len))?);
}
let mask = Tensor::cat(&mask, 0)?;
let on_true = mask.zeros_like()?.to_dtype(self.dtype)?;
let on_false = Tensor::new(f32::NEG_INFINITY, &self.device)?
.broadcast_as(mask.shape())?
.to_dtype(self.dtype)?;
mask.where_cond(&on_true, &on_false)
}
pub fn forward(&mut self, input_ids: &Tensor, mask: &Tensor) -> Result<Tensor> {
let (_, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
// This is not a `causal language modelling` task, we'll need to prepare a `non-causal` attention
Some(self.prepare_attention_mask(mask)?)
};
let mut xs = self.embeddings.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref())?
}
if let Some(n) = &self.norm {
xs.apply(n)
} else {
Ok(xs)
}
}
}
#[derive(Debug)]
pub struct EmbeddingModel {
base_model: Model,
lm_head: Linear,
}
impl EmbeddingModel {
pub fn new(cfg: &Config, base_vb: VarBuilder, embed_vb: VarBuilder) -> Result<Self> {
let base_model = Model::new(cfg, base_vb.clone())?;
let lm_head = linear(
cfg.embed_head.in_features,
cfg.embed_head.out_features,
embed_vb.pp("linear"),
)?;
Ok(Self {
base_model,
lm_head,
})
}
pub fn forward(&mut self, input_ids: &Tensor, mask: &Tensor) -> Result<Tensor> {
let x = self.base_model.forward(input_ids, mask)?;
let x = self.pool(&x, mask)?;
// No matter what keeping the final activations as F32 helps with the accuracy
self.lm_head.forward(&x.to_dtype(DType::F32)?) // [B_sz, dim_size]
}
/// Same as forward pass but normalizes the output
pub fn forward_norm(&mut self, input_ids: &Tensor, mask: &Tensor) -> Result<Tensor> {
let x = self.forward(input_ids, mask)?;
// Normalize
x.broadcast_div(&x.sqr()?.sum_keepdim(1)?.sqrt()?)
}
fn pool(&self, x: &Tensor, mask: &Tensor) -> Result<Tensor> {
let mask = mask.to_dtype(x.dtype())?; // [B_Sz, Seq_len]
let (batch_size, seq_len, hidden_dim) = x.dims3()?;
// expanding the shape of the mask from [B_Sz, Seq_len] -> [B_Sz, Seq_len, Hidden_size]
let mask_expanded = mask
.unsqueeze(2)?
.broadcast_as((batch_size, seq_len, hidden_dim))?; // [B_Sz, Seq_len, Hidden_dim]
let x = (x * &mask_expanded)?;
// Sum
let sum_mask = mask
.sum(1)?
.unsqueeze(1)?
.expand((batch_size, hidden_dim))?;
x.sum(1)? / sum_mask
}
}
| 6 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/quantized_mixformer.rs | //! Module containing quantized MixFormer model implementation.
//!
//! MixFormer is an efficient transformer variant for text generation that uses
//! mixture-of-experts and parallel attention/feed-forward blocks.
//! This implementation provides quantization for reduced memory usage.
//!
//! Key features:
//! - Parallel attention and feed-forward computation
//! - Rotary positional embeddings
//! - Optional key-value caching
//! - Support for 8-bit quantization
//!
use crate::quantized_nn::{layer_norm, linear, Linear};
pub use crate::quantized_var_builder::VarBuilder;
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::Activation;
pub use crate::models::mixformer::Config;
const MAX_SEQ_LEN: usize = 4096;
#[derive(Debug, Clone)]
struct Embedding {
wte: crate::quantized_nn::Embedding,
}
impl Embedding {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let wte = crate::quantized_nn::Embedding::new(cfg.vocab_size, cfg.n_embd, vb.pp("wte"))?;
Ok(Self { wte })
}
}
impl Module for Embedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
self.wte.forward(xs)
}
}
fn get_mask(size: usize, device: &Device) -> Result<Tensor> {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
.collect();
Tensor::from_slice(&mask, (size, size), device)
}
fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
let shape = mask.shape();
let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
let m = mask.where_cond(&on_true, on_false)?;
Ok(m)
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dim: usize, max_seq_len: usize, dev: &Device) -> Result<Self> {
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(DType::F32)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
qkv: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor, Tensor)> {
let (_b_size, seqlen, three, _, _headdim) = qkv.dims5()?;
if three != 3 {
candle::bail!("unexpected shape for qkv {:?}", qkv.shape())
}
let (_rotary_seqlen, rotary_dim) = self.cos.dims2()?;
let rotary_dim = rotary_dim * 2;
let q_rot = qkv.i((.., .., 0, .., ..rotary_dim))?;
let q_pass = qkv.i((.., .., 0, .., rotary_dim..))?;
let k_rot = qkv.i((.., .., 1, .., ..rotary_dim))?;
let k_pass = qkv.i((.., .., 1, .., rotary_dim..))?;
let q12 = q_rot.chunk(2, D::Minus1)?;
let k12 = k_rot.chunk(2, D::Minus1)?;
let (q1, q2) = (&q12[0], &q12[1]);
let (k1, k2) = (&k12[0], &k12[1]);
let c = self.cos.narrow(0, seqlen_offset, seqlen)?.unsqueeze(1)?;
let s = self.sin.narrow(0, seqlen_offset, seqlen)?.unsqueeze(1)?;
let q_rot = Tensor::cat(
&[
(q1.broadcast_mul(&c)? - q2.broadcast_mul(&s)?)?,
(q1.broadcast_mul(&s)? + q2.broadcast_mul(&c)?)?,
],
D::Minus1,
)?;
let k_rot = Tensor::cat(
&[
(k1.broadcast_mul(&c)? - k2.broadcast_mul(&s)?)?,
(k1.broadcast_mul(&s)? + k2.broadcast_mul(&c)?)?,
],
D::Minus1,
)?;
let q = Tensor::cat(&[&q_rot, &q_pass], D::Minus1)?;
let k = Tensor::cat(&[&k_rot, &k_pass], D::Minus1)?;
let v = qkv.i((.., .., 2))?;
Ok((q, k, v))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
fc1: Linear,
fc2: Linear,
act: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let n_inner = cfg.n_inner.unwrap_or(4 * cfg.n_embd);
let fc1 = linear(cfg.n_embd, n_inner, vb.pp("fc1"))?;
let fc2 = linear(n_inner, cfg.n_embd, vb.pp("fc2"))?;
Ok(Self {
fc1,
fc2,
act: cfg.activation_function,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2)
}
}
#[derive(Debug, Clone)]
struct CausalLMHead {
ln: candle_nn::LayerNorm,
linear: Linear,
}
impl CausalLMHead {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln = layer_norm(cfg.n_embd, cfg.layer_norm_epsilon, vb.pp("ln"))?;
let linear = linear(cfg.n_embd, cfg.vocab_size, vb.pp("linear"))?;
Ok(Self { ln, linear })
}
}
impl Module for CausalLMHead {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.ln)?
.apply(&self.linear)?
.to_dtype(DType::F32)
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MHA {
wqkv: Linear,
out_proj: Linear,
rotary_emb: RotaryEmbedding,
kv_cache: Option<(Tensor, Tensor)>,
head_dim: usize,
n_head: usize,
softmax_scale: f64,
span: tracing::Span,
}
impl MHA {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let head_dim = cfg.n_embd / cfg.n_head;
let op_size = cfg.n_embd;
let wqkv = linear(cfg.n_embd, 3 * op_size, vb.pp("Wqkv"))?;
let out_proj = linear(op_size, cfg.n_embd, vb.pp("out_proj"))?;
let rotary_emb = RotaryEmbedding::new(cfg.rotary_dim, MAX_SEQ_LEN, vb.device())?;
let softmax_scale = 1f64 / (head_dim as f64).sqrt();
Ok(Self {
wqkv,
out_proj,
head_dim,
n_head: cfg.n_head,
kv_cache: None,
rotary_emb,
softmax_scale,
span: tracing::span!(tracing::Level::TRACE, "mha"),
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_size, seq_len, _n_embd) = xs.dims3()?;
let qkv = self
.wqkv
.forward(xs)?
.reshape((b_size, seq_len, 3, (), self.head_dim))?;
let seqlen_offset = match &self.kv_cache {
None => 0,
Some((prev_k, _)) => prev_k.dim(1)?,
};
// In the python implementation, a single tensor is returned with the third axis of size 3.
let (q, k, v) = self.rotary_emb.apply_rotary_emb_qkv(&qkv, seqlen_offset)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &k], 1)?;
let v = Tensor::cat(&[prev_v, &v], 1)?;
(k, v)
}
};
self.kv_cache = Some((k.clone(), v.clone()));
// scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale)
let q = q.transpose(1, 2)?.flatten_to(1)?; // b*h, t, d
let k = k.transpose(1, 2)?.flatten_to(1)?; // b*h, s, d
let v = v.transpose(1, 2)?.flatten_to(1)?; // b*h, s, d
let attn_weights = (q.matmul(&k.t()?)? * self.softmax_scale)?; // b*h, t, s
// causal_mask = torch.triu(torch.full((seqlen_q, seqlen_k), -10000.0, device=scores.device), 1)
// scores = scores + causal_mask.to(dtype=scores.dtype)
let attn_weights = match mask {
None => attn_weights,
Some(mask) => masked_fill(
&attn_weights,
&mask.broadcast_left(b_size * self.n_head)?,
f32::NEG_INFINITY,
)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
// output = torch.einsum('bhts,bshd->bthd', attention_drop, v)
// attn_weights: b*h,t,s, v: b*h,s,d
let attn_output = attn_weights.matmul(&v)?;
// b*h,t,d
let attn_output = attn_output
.reshape((b_size, (), seq_len, self.head_dim))?
.transpose(1, 2)?
.flatten_from(D::Minus2)?;
attn_output.apply(&self.out_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct ParallelBlock {
ln: candle_nn::LayerNorm,
mixer: MHA,
mlp: MLP,
span: tracing::Span,
}
impl ParallelBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln = layer_norm(cfg.n_embd, cfg.layer_norm_epsilon, vb.pp("ln"))?;
let mixer = MHA::new(cfg, vb.pp("mixer"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
Ok(Self {
ln,
mixer,
mlp,
span: tracing::span!(tracing::Level::TRACE, "block"),
})
}
fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> {
let _enter = self.span.enter();
let residual = xs;
let xs = xs.apply(&self.ln)?;
let attn_outputs = self.mixer.forward(&xs, mask)?;
let feed_forward_hidden_states = self.mlp.forward(&xs)?;
attn_outputs + feed_forward_hidden_states + residual
}
fn clear_kv_cache(&mut self) {
self.mixer.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct MixFormerSequentialForCausalLM {
embedding: Embedding,
blocks: Vec<ParallelBlock>,
head: CausalLMHead,
span: tracing::Span,
}
impl MixFormerSequentialForCausalLM {
pub fn new_v2(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_head = vb.pp("lm_head");
let vb = vb.pp("transformer");
let embedding = Embedding::new(cfg, vb.pp("embd"))?;
let mut blocks = Vec::new();
for i in 0..cfg.n_layer {
let block = ParallelBlock::new(cfg, vb.pp("h").pp(i))?;
blocks.push(block)
}
let head = CausalLMHead::new(cfg, vb_head)?;
Ok(Self {
embedding,
blocks,
head,
span: tracing::span!(tracing::Level::TRACE, "mixformer"),
})
}
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("layers");
let embedding = Embedding::new(cfg, vb.pp(0))?;
let mut blocks = Vec::new();
for i in 0..cfg.n_layer {
let block = ParallelBlock::new(cfg, vb.pp(i + 1))?;
blocks.push(block);
}
let head = CausalLMHead::new(cfg, vb.pp(cfg.n_layer + 1))?;
Ok(Self {
embedding,
blocks,
head,
span: tracing::span!(tracing::Level::TRACE, "mixformer"),
})
}
pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (_b_size, seq_len) = xs.dims2()?;
let mut xs = xs.apply(&self.embedding)?;
let mask = if seq_len <= 1 {
None
} else {
Some(get_mask(seq_len, xs.device())?)
};
for block in self.blocks.iter_mut() {
xs = block.forward(&xs, mask.as_ref())?;
}
xs.narrow(1, seq_len - 1, 1)?.apply(&self.head)?.squeeze(1)
}
pub fn forward_with_img(
&mut self,
bos_token: &Tensor,
xs: &Tensor,
img_embeds: &Tensor,
) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = xs.apply(&self.embedding)?;
let bos_token = bos_token.apply(&self.embedding)?;
// Python implementation sequence order is <bos token embedding><img embedding><rest of text embedding>
// https://github.com/vikhyat/moondream/blob/a9d788a20d1543fb1479edc54106e88cff7759d3/moondream/moondream.py#L43-L56
let mut xs = Tensor::cat(&[bos_token, img_embeds.clone(), xs], 1)?;
let (_b_size, seq_len, _embds) = xs.dims3()?;
let mask = Some(get_mask(seq_len, xs.device())?);
for block in self.blocks.iter_mut() {
xs = block.forward(&xs, mask.as_ref())?
}
let xs = xs
.narrow(1, seq_len - 1, 1)?
.apply(&self.head)?
.squeeze(1)?;
Ok(xs)
}
pub fn clear_kv_cache(&mut self) {
self.blocks.iter_mut().for_each(|b| b.clear_kv_cache())
}
}
| 7 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/yi.rs | //! Yi model implementation.
//!
//! This candle implementation uses a pre-trained Yi decoder-only large language model for inference.
//! The model was trained by 01.AI and follows a standard transformer architecture similar to LLaMA.
//!
//! Original code:
//! - 💻 [Yi Model](https://huggingface.co/01-ai/Yi-6B)
//! - 💻 [Yi Modeling Code](https://huggingface.co/01-ai/Yi-6B/blob/main/modeling_yi.py)
//! - 📝 [Technical Report](https://arxiv.org/abs/2403.04652) Yi: Open Foundation Models by 01.AI
//!
//! Key characteristics:
//! - Multi-head attention with rotary positional embeddings
//! - RMS normalization
//! - SwiGLU activation in feed-forward layers
//! - Grouped-query attention for efficient inference
//!
use crate::models::with_tracing::{linear_no_bias, Linear, RmsNorm};
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, PartialEq)]
pub struct Config {
pub(crate) vocab_size: usize,
pub(crate) hidden_size: usize,
pub(crate) intermediate_size: usize,
pub(crate) num_hidden_layers: usize,
pub(crate) num_attention_heads: usize,
pub(crate) num_key_value_heads: usize,
pub(crate) hidden_act: Activation,
pub(crate) max_position_embeddings: usize,
pub(crate) rms_norm_eps: f64,
pub(crate) rope_theta: f64,
}
impl Config {
pub fn config_6b() -> Self {
Self {
vocab_size: 64000,
hidden_size: 4096,
intermediate_size: 11008,
num_hidden_layers: 32,
num_attention_heads: 32,
num_key_value_heads: 4,
hidden_act: Activation::Silu,
max_position_embeddings: 4096,
rms_norm_eps: 1e-5,
rope_theta: 5_000_000.,
}
}
pub fn config_34b() -> Self {
Self {
vocab_size: 64000,
hidden_size: 7168,
intermediate_size: 20480,
num_hidden_layers: 60,
num_attention_heads: 56,
num_key_value_heads: 8,
hidden_act: Activation::Silu,
max_position_embeddings: 4096,
rms_norm_eps: 1e-5,
rope_theta: 5_000_000.,
}
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
fn rotate_half(xs: &Tensor) -> Result<Tensor> {
let last_dim = xs.dim(D::Minus1)?;
let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?;
let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?;
Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1)
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?;
let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?;
let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?;
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
ln1: RmsNorm,
ln2: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let ln1 = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let ln2 = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
ln1,
ln2,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.ln1.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.ln2)?.apply(&self.mlp)?;
residual + xs
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
// Sliding window mask?
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
}
| 8 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/mixtral.rs | //! Mixtral Model, a sparse mixture of expert model based on the Mistral architecture
//!
//! See Mixtral model details at:
//! - [Hugging Face](https://huggingface.co/docs/transformers/model_doc/mixtral)
//! - [Mixtral-8x7B Blog Post](https://mistral.ai/news/mixtral-of-experts/)
//!
//! The model uses a mixture of experts architecture with:
//! - 8 experts per layer
//! - Top 2 expert routing
//! - Sliding window attention
//! - RoPE embeddings
//!
//! References:
//! - [Hugging Face Implementation](https://github.com/huggingface/transformers/blob/main/src/transformers/models/mixtral/modeling_mixtral.py)
//! - [Mixtral Blog Post](https://mistral.ai/news/mixtral-of-experts/)
//!
use crate::models::with_tracing::{linear_no_bias, Linear, RmsNorm};
/// Mixtral Model
/// https://github.com/huggingface/transformers/blob/main/src/transformers/models/mixtral/modeling_mixtral.py
/// https://mistral.ai/news/mixtral-of-experts/
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use serde::Deserialize;
use std::sync::Arc;
/// https://github.com/huggingface/transformers/blob/1a585c1222a56bcaecc070966d558d4a9d862e83/src/transformers/models/mixtral/configuration_mixtral.py#L113
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub(crate) vocab_size: usize,
pub(crate) hidden_size: usize,
pub(crate) intermediate_size: usize,
pub(crate) num_hidden_layers: usize,
pub(crate) num_attention_heads: usize,
pub(crate) num_key_value_heads: usize,
pub(crate) hidden_act: Activation,
pub(crate) max_position_embeddings: usize,
pub(crate) rms_norm_eps: f64,
pub(crate) rope_theta: f64,
pub(crate) sliding_window: usize,
pub(crate) num_experts_per_tok: usize,
pub(crate) num_local_experts: usize,
pub(crate) use_flash_attn: bool,
}
impl Config {
/// https://huggingface.co/mistralai/Mixtral-8x7B-v0.1/blob/main/config.json
pub fn v0_1_8x7b(use_flash_attn: bool) -> Self {
Self {
vocab_size: 32000,
hidden_size: 4096,
intermediate_size: 14336,
num_hidden_layers: 32,
num_attention_heads: 32,
num_key_value_heads: 8,
hidden_act: Activation::Silu,
max_position_embeddings: 32768,
rms_norm_eps: 1e-5,
rope_theta: 1e6,
sliding_window: 4096,
num_experts_per_tok: 2,
num_local_experts: 8,
use_flash_attn,
}
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
fn rotate_half(xs: &Tensor) -> Result<Tensor> {
let last_dim = xs.dim(D::Minus1)?;
let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?;
let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?;
Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1)
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / (cfg.rope_theta as f32).powf(i as f32 / dim as f32))
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?;
let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?;
Ok((q_embed, k_embed))
}
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
use_flash_attn: bool,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
use_flash_attn: cfg.use_flash_attn,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?;
let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?;
let attn_output = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = query_states.transpose(1, 2)?;
let k = key_states.transpose(1, 2)?;
let v = value_states.transpose(1, 2)?;
let softmax_scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, softmax_scale, q_len > 1)?.transpose(1, 2)?
} else {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
}
#[derive(Debug, Clone)]
struct BlockSparseTop2MLP {
w1: Linear,
w2: Linear,
w3: Linear,
act_fn: Activation,
}
impl BlockSparseTop2MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let w1 = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("w1"))?;
let w2 = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("w2"))?;
let w3 = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("w3"))?;
Ok(Self {
w1,
w2,
w3,
act_fn: cfg.hidden_act,
})
}
}
impl Module for BlockSparseTop2MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.w1)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.w3)?;
(lhs * rhs)?.apply(&self.w2)
}
}
#[derive(Debug, Clone)]
struct SparseMoeBlock {
gate: Linear,
experts: Vec<BlockSparseTop2MLP>,
num_experts_per_tok: usize,
}
impl SparseMoeBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let gate = linear_no_bias(cfg.hidden_size, cfg.num_local_experts, vb.pp("gate"))?;
let mut experts = Vec::with_capacity(cfg.num_local_experts);
let vb = vb.pp("experts");
for idx in 0..cfg.num_local_experts {
let expert = BlockSparseTop2MLP::new(cfg, vb.pp(idx))?;
experts.push(expert)
}
Ok(SparseMoeBlock {
gate,
experts,
num_experts_per_tok: cfg.num_experts_per_tok,
})
}
}
impl Module for SparseMoeBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b_size, seq_len, hidden_dim) = xs.dims3()?;
let xs = xs.reshape(((), hidden_dim))?;
let router_logits = xs.apply(&self.gate)?;
let routing_weights = candle_nn::ops::softmax_last_dim(&router_logits)?;
// In order to extract topk, we extract the data from the tensor and manipulate it
// directly. Maybe we will want to use some custom ops instead at some point.
let routing_weights = routing_weights.to_dtype(DType::F32)?.to_vec2::<f32>()?;
// routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
// top_x contains the row indexes to evaluate for each expert.
let mut top_x = vec![vec![]; self.experts.len()];
let mut selected_rws = vec![vec![]; self.experts.len()];
for (row_idx, rw) in routing_weights.iter().enumerate() {
let mut dst = (0..rw.len() as u32).collect::<Vec<u32>>();
dst.sort_by(|&i, &j| rw[j as usize].total_cmp(&rw[i as usize]));
let mut sum_routing_weights = 0f32;
for &expert_idx in dst.iter().take(self.num_experts_per_tok) {
let expert_idx = expert_idx as usize;
let routing_weight = rw[expert_idx];
sum_routing_weights += routing_weight;
top_x[expert_idx].push(row_idx as u32);
}
for &expert_idx in dst.iter().take(self.num_experts_per_tok) {
let expert_idx = expert_idx as usize;
let routing_weight = rw[expert_idx];
selected_rws[expert_idx].push(routing_weight / sum_routing_weights)
}
}
// routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
// expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
let mut ys = xs.zeros_like()?;
for (expert_idx, expert_layer) in self.experts.iter().enumerate() {
let top_x = &top_x[expert_idx];
if top_x.is_empty() {
continue;
}
let top_x = Tensor::new(top_x.as_slice(), xs.device())?;
let selected_rws =
Tensor::new(selected_rws[expert_idx].as_slice(), xs.device())?.reshape(((), 1))?;
// Index the correct hidden states and compute the expert hidden state for
// the current expert. We need to make sure to multiply the output hidden
// states by `routing_weights` on the corresponding tokens (top-1 and top-2)
let current_state = xs.index_select(&top_x, 0)?.reshape(((), hidden_dim))?;
// current_hidden_states = expert_layer(current_state, routing_weights[top_x_list, idx_list, None])
let current_hidden_states = expert_layer.forward(¤t_state)?;
let current_hidden_states = current_hidden_states.broadcast_mul(&selected_rws)?;
ys = ys.index_add(&top_x, ¤t_hidden_states, 0)?;
}
let ys = ys.reshape((b_size, seq_len, hidden_dim))?;
Ok(ys)
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
block_sparse_moe: SparseMoeBlock,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let block_sparse_moe = SparseMoeBlock::new(cfg, vb.pp("block_sparse_moe"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
block_sparse_moe,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs
.apply(&self.post_attention_layernorm)?
.apply(&self.block_sparse_moe)?;
residual + xs
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
sliding_window: usize,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
sliding_window: cfg.sliding_window,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
// Sliding window mask?
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + self.sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
}
| 9 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.