text
stringlengths
7
318k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
439
use crate::{Error, Result, Shape}; #[derive(Debug, PartialEq, Eq, Clone)] pub struct Layout { shape: Shape, // The strides are given in number of elements and not in bytes. stride: Vec<usize>, start_offset: usize, } impl Layout { pub fn new(shape: Shape, stride: Vec<usize>, start_offset: usize) -> Self { Self { shape, stride, start_offset, } } pub fn contiguous_with_offset<S: Into<Shape>>(shape: S, start_offset: usize) -> Self { let shape = shape.into(); let stride = shape.stride_contiguous(); Self { shape, stride, start_offset, } } pub fn contiguous<S: Into<Shape>>(shape: S) -> Self { Self::contiguous_with_offset(shape, 0) } pub fn dims(&self) -> &[usize] { self.shape.dims() } pub fn shape(&self) -> &Shape { &self.shape } pub fn stride(&self) -> &[usize] { &self.stride } pub fn start_offset(&self) -> usize { self.start_offset } /// Returns the appropriate start and stop offset if the data is stored in a C /// contiguous (aka row major) way. pub fn contiguous_offsets(&self) -> Option<(usize, usize)> { if self.is_contiguous() { let start_o = self.start_offset; Some((start_o, start_o + self.shape.elem_count())) } else { None } } /// Returns true if the data is stored in a C contiguous (aka row major) way. /// Note that this does not implies that the start offset is 0 or that there are no extra /// elements at the end of the storage. pub fn is_contiguous(&self) -> bool { self.shape.is_contiguous(&self.stride) } /// Returns true if the data is stored in a Fortran contiguous (aka column major) way. pub fn is_fortran_contiguous(&self) -> bool { self.shape.is_fortran_contiguous(&self.stride) } pub(crate) fn narrow(&self, dim: usize, start: usize, len: usize) -> Result<Self> { let dims = self.shape().dims(); if dim >= dims.len() { Err(Error::DimOutOfRange { shape: self.shape().clone(), dim: dim as i32, op: "narrow", } .bt())? } if start + len > dims[dim] { Err(Error::NarrowInvalidArgs { shape: self.shape.clone(), dim, start, len, msg: "start + len > dim_len", } .bt())? } let mut dims = dims.to_vec(); dims[dim] = len; Ok(Self { shape: Shape::from(dims), stride: self.stride.clone(), start_offset: self.start_offset + self.stride[dim] * start, }) } pub(crate) fn transpose(&self, dim1: usize, dim2: usize) -> Result<Self> { let rank = self.shape.rank(); if rank <= dim1 || rank <= dim2 { Err(Error::UnexpectedNumberOfDims { expected: usize::max(dim1, dim2), got: rank, shape: self.shape().clone(), } .bt())? } let mut stride = self.stride().to_vec(); let mut dims = self.shape().dims().to_vec(); dims.swap(dim1, dim2); stride.swap(dim1, dim2); Ok(Self { shape: Shape::from(dims), stride, start_offset: self.start_offset, }) } pub(crate) fn permute(&self, idxs: &[usize]) -> Result<Self> { let is_permutation = idxs.len() == self.shape.rank() && (0..idxs.len()).all(|i| idxs.contains(&i)); if !is_permutation { crate::bail!( "dimension mismatch in permute, tensor {:?}, dims: {:?}", self.dims(), idxs ) } let stride = self.stride(); let dims = self.shape().dims(); let mut perm_stride = stride.to_vec(); let mut perm_dims = dims.to_vec(); for (i, &idx) in idxs.iter().enumerate() { perm_stride[i] = stride[idx]; perm_dims[i] = dims[idx]; } Ok(Self { shape: Shape::from(perm_dims), stride: perm_stride, start_offset: self.start_offset, }) } pub fn broadcast_as<S: Into<Shape>>(&self, shape: S) -> Result<Self> { let shape = shape.into(); if shape.rank() < self.shape().rank() { return Err(Error::BroadcastIncompatibleShapes { src_shape: self.shape().clone(), dst_shape: shape, } .bt()); } let added_dims = shape.rank() - self.shape().rank(); let mut stride = vec![0; added_dims]; for (&dst_dim, (&src_dim, &src_stride)) in shape.dims()[added_dims..] .iter() .zip(self.dims().iter().zip(self.stride())) { let s = if dst_dim == src_dim { src_stride } else if src_dim != 1 { return Err(Error::BroadcastIncompatibleShapes { src_shape: self.shape().clone(), dst_shape: shape, } .bt()); } else { 0 }; stride.push(s) } Ok(Self { shape, stride, start_offset: self.start_offset, }) } pub(crate) fn strided_index(&self) -> crate::StridedIndex { crate::StridedIndex::from_layout(self) } pub(crate) fn strided_blocks(&self) -> crate::StridedBlocks { let mut block_len = 1; let mut contiguous_dims = 0; // These are counted from the right. for (&stride, &dim) in self.stride().iter().zip(self.dims().iter()).rev() { if stride != block_len { break; } block_len *= dim; contiguous_dims += 1; } let index_dims = self.dims().len() - contiguous_dims; if index_dims == 0 { crate::StridedBlocks::SingleBlock { start_offset: self.start_offset, len: block_len, } } else { let block_start_index = crate::StridedIndex::new( &self.dims()[..index_dims], &self.stride[..index_dims], self.start_offset, ); crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } } } // Returns the contiguous offsets with broadcast if applicable. pub(crate) fn offsets_b(&self) -> Option<ContiguousOffsetsWithBroadcast> { let mut left_broadcast = 1; let mut right_broadcast = 1; let strides = self.stride(); let dims = self.dims(); let mut start_cont = 0; let mut end_cont = dims.len(); for (&s, &d) in strides.iter().zip(dims.iter()) { if s != 0 { break; } start_cont += 1; left_broadcast *= d; } if start_cont == dims.len() { return Some(ContiguousOffsetsWithBroadcast { start: self.start_offset, len: 1, left_broadcast, right_broadcast: 1, }); } for (&s, &d) in strides.iter().zip(dims.iter()).rev() { if s != 0 { break; } end_cont -= 1; right_broadcast *= d; } // Check that the inner dims are contiguous let strides = &strides[start_cont..end_cont]; let dims = &dims[start_cont..end_cont]; let mut len = 1; for (&stride, &dim) in strides.iter().zip(dims.iter()).rev() { if stride != len { return None; } len *= dim; } Some(ContiguousOffsetsWithBroadcast { start: self.start_offset, len, left_broadcast, right_broadcast, }) } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ContiguousOffsetsWithBroadcast { pub start: usize, pub len: usize, pub left_broadcast: usize, pub right_broadcast: usize, }
candle/candle-core/src/layout.rs/0
{ "file_path": "candle/candle-core/src/layout.rs", "repo_id": "candle", "token_count": 4361 }
18
use crate::{DType, Device, Error, Result, Tensor, WithDType}; use safetensors::tensor as st; use safetensors::tensor::SafeTensors; use std::borrow::Cow; use std::collections::HashMap; use std::path::Path; impl From<DType> for st::Dtype { fn from(value: DType) -> Self { match value { DType::U8 => st::Dtype::U8, DType::U32 => st::Dtype::U32, DType::I64 => st::Dtype::I64, DType::BF16 => st::Dtype::BF16, DType::F16 => st::Dtype::F16, DType::F32 => st::Dtype::F32, DType::F64 => st::Dtype::F64, } } } impl TryFrom<st::Dtype> for DType { type Error = Error; fn try_from(value: st::Dtype) -> Result<Self> { match value { st::Dtype::U8 => Ok(DType::U8), st::Dtype::U32 => Ok(DType::U32), st::Dtype::I64 => Ok(DType::I64), st::Dtype::BF16 => Ok(DType::BF16), st::Dtype::F16 => Ok(DType::F16), st::Dtype::F32 => Ok(DType::F32), st::Dtype::F64 => Ok(DType::F64), dtype => Err(Error::UnsupportedSafeTensorDtype(dtype)), } } } impl st::View for Tensor { fn dtype(&self) -> st::Dtype { self.dtype().into() } fn shape(&self) -> &[usize] { self.shape().dims() } fn data(&self) -> Cow<[u8]> { // This copies data from GPU to CPU. // TODO: Avoid the unwrap here. Cow::Owned(convert_back(self).unwrap()) } fn data_len(&self) -> usize { let n: usize = self.shape().elem_count(); let bytes_per_element = self.dtype().size_in_bytes(); n * bytes_per_element } } impl st::View for &Tensor { fn dtype(&self) -> st::Dtype { (*self).dtype().into() } fn shape(&self) -> &[usize] { self.dims() } fn data(&self) -> Cow<[u8]> { // This copies data from GPU to CPU. // TODO: Avoid the unwrap here. Cow::Owned(convert_back(self).unwrap()) } fn data_len(&self) -> usize { let n: usize = self.dims().iter().product(); let bytes_per_element = (*self).dtype().size_in_bytes(); n * bytes_per_element } } impl Tensor { pub fn save_safetensors<P: AsRef<Path>>(&self, name: &str, filename: P) -> Result<()> { let data = [(name, self.clone())]; Ok(st::serialize_to_file(data, &None, filename.as_ref())?) } } fn convert_slice<T: WithDType>(data: &[u8], shape: &[usize], device: &Device) -> Result<Tensor> { let size_in_bytes = T::DTYPE.size_in_bytes(); let elem_count = data.len() / size_in_bytes; if (data.as_ptr() as usize) % size_in_bytes == 0 { // SAFETY This is safe because we just checked that this // was correctly aligned. let data: &[T] = unsafe { std::slice::from_raw_parts(data.as_ptr() as *const T, elem_count) }; Tensor::from_slice(data, shape, device) } else { // XXX: We need to specify `T` here, otherwise the compiler will infer u8 because of the following cast // Making this vector too small to fit a full f16/f32/f64 weights, resulting in out-of-bounds access let mut c: Vec<T> = Vec::with_capacity(elem_count); // SAFETY: We just created c, so the allocated memory is necessarily // contiguous and non overlapping with the view's data. // We're downgrading the `c` pointer from T to u8, which removes alignment // constraints. unsafe { std::ptr::copy_nonoverlapping(data.as_ptr(), c.as_mut_ptr() as *mut u8, data.len()); c.set_len(elem_count) } Tensor::from_slice(&c, shape, device) } } fn convert_slice_with_cast<T: Sized + Copy, U: WithDType, F: Fn(T) -> Result<U>>( data: &[u8], shape: &[usize], device: &Device, conv: F, ) -> Result<Tensor> { let size_in_bytes = std::mem::size_of::<T>(); let elem_count = data.len() / size_in_bytes; if (data.as_ptr() as usize) % size_in_bytes == 0 { // SAFETY This is safe because we just checked that this // was correctly aligned. let data: &[T] = unsafe { std::slice::from_raw_parts(data.as_ptr() as *const T, elem_count) }; let data = data.iter().map(|t| conv(*t)).collect::<Result<Vec<_>>>()?; Tensor::from_vec(data, shape, device) } else { // XXX: We need to specify `T` here, otherwise the compiler will infer u8 because of the following cast // Making this vector too small to fit a full f16/f32/f64 weights, resulting in out-of-bounds access let mut c: Vec<T> = Vec::with_capacity(elem_count); // SAFETY: We just created c, so the allocated memory is necessarily // contiguous and non overlapping with the view's data. // We're downgrading the `c` pointer from T to u8, which removes alignment // constraints. unsafe { std::ptr::copy_nonoverlapping(data.as_ptr(), c.as_mut_ptr() as *mut u8, data.len()); c.set_len(elem_count) } let c = c.into_iter().map(conv).collect::<Result<Vec<_>>>()?; Tensor::from_vec(c, shape, device) } } fn convert_with_cast_<T: Sized + Copy, U: WithDType, F: Fn(T) -> Result<U>>( view: &st::TensorView<'_>, device: &Device, conv: F, ) -> Result<Tensor> { convert_slice_with_cast::<T, U, F>(view.data(), view.shape(), device, conv) } fn convert_<T: WithDType>(view: &st::TensorView<'_>, device: &Device) -> Result<Tensor> { convert_slice::<T>(view.data(), view.shape(), device) } fn convert_back_<T: WithDType>(mut vs: Vec<T>) -> Vec<u8> { let size_in_bytes = T::DTYPE.size_in_bytes(); let length = vs.len() * size_in_bytes; let capacity = vs.capacity() * size_in_bytes; let ptr = vs.as_mut_ptr() as *mut u8; // Don't run the destructor for Vec<T> std::mem::forget(vs); // SAFETY: // // Every T is larger than u8, so there is no issue regarding alignment. // This re-interpret the Vec<T> as a Vec<u8>. unsafe { Vec::from_raw_parts(ptr, length, capacity) } } pub trait Load { fn load(&self, device: &Device) -> Result<Tensor>; } impl<'a> Load for st::TensorView<'a> { fn load(&self, device: &Device) -> Result<Tensor> { convert(self, device) } } impl Tensor { pub fn from_raw_buffer( data: &[u8], dtype: DType, shape: &[usize], device: &Device, ) -> Result<Self> { match dtype { DType::U8 => convert_slice::<u8>(data, shape, device), DType::U32 => convert_slice::<u32>(data, shape, device), DType::I64 => convert_slice::<i64>(data, shape, device), DType::BF16 => convert_slice::<half::bf16>(data, shape, device), DType::F16 => convert_slice::<half::f16>(data, shape, device), DType::F32 => convert_slice::<f32>(data, shape, device), DType::F64 => convert_slice::<f64>(data, shape, device), } } } fn convert(view: &st::TensorView<'_>, device: &Device) -> Result<Tensor> { match view.dtype() { st::Dtype::U8 => convert_::<u8>(view, device), st::Dtype::U16 => { let conv = |x| Ok(u32::from(x)); convert_with_cast_::<u16, u32, _>(view, device, conv) } st::Dtype::U32 => convert_::<u32>(view, device), st::Dtype::I32 => { let conv = |x| Ok(i64::from(x)); convert_with_cast_::<i32, i64, _>(view, device, conv) } st::Dtype::I64 => convert_::<i64>(view, device), st::Dtype::BF16 => convert_::<half::bf16>(view, device), st::Dtype::F16 => convert_::<half::f16>(view, device), st::Dtype::F32 => convert_::<f32>(view, device), st::Dtype::F64 => convert_::<f64>(view, device), dtype => Err(Error::UnsupportedSafeTensorDtype(dtype)), } } fn convert_back(tensor: &Tensor) -> Result<Vec<u8>> { // TODO: This makes an unnecessary copy when the tensor is on the cpu. let tensor = tensor.flatten_all()?; match tensor.dtype() { DType::U8 => Ok(convert_back_::<u8>(tensor.to_vec1()?)), DType::U32 => Ok(convert_back_::<u32>(tensor.to_vec1()?)), DType::I64 => Ok(convert_back_::<i64>(tensor.to_vec1()?)), DType::F16 => Ok(convert_back_::<half::f16>(tensor.to_vec1()?)), DType::BF16 => Ok(convert_back_::<half::bf16>(tensor.to_vec1()?)), DType::F32 => Ok(convert_back_::<f32>(tensor.to_vec1()?)), DType::F64 => Ok(convert_back_::<f64>(tensor.to_vec1()?)), } } pub fn load<P: AsRef<Path>>(filename: P, device: &Device) -> Result<HashMap<String, Tensor>> { let data = std::fs::read(filename.as_ref())?; load_buffer(&data[..], device) } pub fn load_buffer(data: &[u8], device: &Device) -> Result<HashMap<String, Tensor>> { let st = safetensors::SafeTensors::deserialize(data)?; st.tensors() .into_iter() .map(|(name, view)| Ok((name, view.load(device)?))) .collect() } pub fn save<K: AsRef<str> + Ord + std::fmt::Display, P: AsRef<Path>>( tensors: &HashMap<K, Tensor>, filename: P, ) -> Result<()> { Ok(st::serialize_to_file(tensors, &None, filename.as_ref())?) } #[derive(yoke::Yokeable)] struct SafeTensors_<'a>(SafeTensors<'a>); pub struct MmapedSafetensors { safetensors: Vec<yoke::Yoke<SafeTensors_<'static>, memmap2::Mmap>>, routing: Option<HashMap<String, usize>>, } impl MmapedSafetensors { /// Creates a wrapper around a memory mapped file and deserialize the safetensors header. /// /// # Safety /// /// The unsafe is inherited from [`memmap2::MmapOptions`]. pub unsafe fn new<P: AsRef<Path>>(p: P) -> Result<Self> { let p = p.as_ref(); let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?; let file = memmap2::MmapOptions::new() .map(&file) .map_err(|e| Error::from(e).with_path(p))?; let safetensors = yoke::Yoke::<SafeTensors_<'static>, memmap2::Mmap>::try_attach_to_cart( file, |data: &[u8]| { let st = safetensors::SafeTensors::deserialize(data) .map_err(|e| Error::from(e).with_path(p))?; Ok::<_, Error>(SafeTensors_(st)) }, )?; Ok(Self { safetensors: vec![safetensors], routing: None, }) } /// Creates a wrapper around multiple memory mapped file and deserialize the safetensors headers. /// /// If a tensor name appears in multiple files, the last entry is returned. /// /// # Safety /// /// The unsafe is inherited from [`memmap2::MmapOptions`]. pub unsafe fn multi<P: AsRef<Path>>(paths: &[P]) -> Result<Self> { let mut routing = HashMap::new(); let mut safetensors = vec![]; for (index, p) in paths.iter().enumerate() { let p = p.as_ref(); let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?; let file = memmap2::MmapOptions::new() .map(&file) .map_err(|e| Error::from(e).with_path(p))?; let data = yoke::Yoke::<SafeTensors_<'static>, memmap2::Mmap>::try_attach_to_cart( file, |data: &[u8]| { let st = safetensors::SafeTensors::deserialize(data) .map_err(|e| Error::from(e).with_path(p))?; Ok::<_, Error>(SafeTensors_(st)) }, )?; for k in data.get().0.names() { routing.insert(k.to_string(), index); } safetensors.push(data) } Ok(Self { safetensors, routing: Some(routing), }) } pub fn load(&self, name: &str, dev: &Device) -> Result<Tensor> { self.get(name)?.load(dev) } pub fn tensors(&self) -> Vec<(String, st::TensorView<'_>)> { let mut tensors = vec![]; for safetensors in self.safetensors.iter() { tensors.push(safetensors.get().0.tensors()) } tensors.into_iter().flatten().collect() } pub fn get(&self, name: &str) -> Result<st::TensorView<'_>> { let index = match &self.routing { None => 0, Some(routing) => { let index = routing.get(name).ok_or_else(|| { Error::CannotFindTensor { path: name.to_string(), } .bt() })?; *index } }; Ok(self.safetensors[index].get().0.tensor(name)?) } } pub struct BufferedSafetensors { safetensors: yoke::Yoke<SafeTensors_<'static>, Vec<u8>>, } impl BufferedSafetensors { /// Creates a wrapper around a binary buffer and deserialize the safetensors header. pub fn new(buffer: Vec<u8>) -> Result<Self> { let safetensors = yoke::Yoke::<SafeTensors_<'static>, Vec<u8>>::try_attach_to_cart( buffer, |data: &[u8]| { let st = safetensors::SafeTensors::deserialize(data)?; Ok::<_, Error>(SafeTensors_(st)) }, )?; Ok(Self { safetensors }) } pub fn load(&self, name: &str, dev: &Device) -> Result<Tensor> { self.get(name)?.load(dev) } pub fn tensors(&self) -> Vec<(String, st::TensorView<'_>)> { self.safetensors.get().0.tensors() } pub fn get(&self, name: &str) -> Result<st::TensorView<'_>> { Ok(self.safetensors.get().0.tensor(name)?) } } pub struct MmapedFile { path: std::path::PathBuf, inner: memmap2::Mmap, } impl MmapedFile { /// Creates a wrapper around a memory mapped file from which you can retrieve /// tensors using [`MmapedFile::deserialize`] /// /// # Safety /// /// The unsafe is inherited from [`memmap2::MmapOptions`]. pub unsafe fn new<P: AsRef<Path>>(p: P) -> Result<Self> { let p = p.as_ref(); let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?; let inner = memmap2::MmapOptions::new() .map(&file) .map_err(|e| Error::from(e).with_path(p))?; Ok(Self { inner, path: p.to_path_buf(), }) } pub fn deserialize(&self) -> Result<SafeTensors<'_>> { let st = safetensors::SafeTensors::deserialize(&self.inner) .map_err(|e| Error::from(e).with_path(&self.path))?; Ok(st) } } #[cfg(test)] mod tests { use super::*; use std::collections::HashMap; #[test] fn save_single_tensor() { let t = Tensor::zeros((2, 2), DType::F32, &Device::Cpu).unwrap(); t.save_safetensors("t", "t.safetensors").unwrap(); let bytes = std::fs::read("t.safetensors").unwrap(); assert_eq!(bytes, b"@\0\0\0\0\0\0\0{\"t\":{\"dtype\":\"F32\",\"shape\":[2,2],\"data_offsets\":[0,16]}} \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); std::fs::remove_file("t.safetensors").unwrap(); } #[test] fn save_load_multiple_tensors() { let t = Tensor::zeros((2, 2), DType::F32, &Device::Cpu).unwrap(); let u = Tensor::zeros((1, 2), DType::F32, &Device::Cpu).unwrap(); let map: HashMap<_, _> = [("t", t), ("u", u)].into_iter().collect(); save(&map, "multi.safetensors").unwrap(); let weights = load("multi.safetensors", &Device::Cpu).unwrap(); assert_eq!(weights.get("t").unwrap().dims(), &[2, 2]); assert_eq!(weights.get("u").unwrap().dims(), &[1, 2]); let bytes = std::fs::read("multi.safetensors").unwrap(); assert_eq!(bytes, b"x\0\0\0\0\0\0\0{\"t\":{\"dtype\":\"F32\",\"shape\":[2,2],\"data_offsets\":[0,16]},\"u\":{\"dtype\":\"F32\",\"shape\":[1,2],\"data_offsets\":[16,24]}} \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); std::fs::remove_file("multi.safetensors").unwrap(); } }
candle/candle-core/src/safetensors.rs/0
{ "file_path": "candle/candle-core/src/safetensors.rs", "repo_id": "candle", "token_count": 7743 }
19
use candle_core::{test_device, test_utils, Device, IndexOp, Result, Tensor}; // https://github.com/huggingface/candle/issues/364 fn avg_pool2d(dev: &Device) -> Result<()> { let data: Vec<f32> = vec![ 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 4, 4), dev)?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[0.5f32, 1.], [1., 1.]]); let data: Vec<f32> = vec![ 1., 2., 1., 3., 0., 0., 1., 1., 1., 1., 1., 1., 5., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 2, 8), dev)?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[5. / 4., 6. / 4., 6. / 4., 1.]]); Ok(()) } fn max_pool2d(dev: &Device) -> Result<()> { let data: Vec<f32> = vec![ 1., 2., 1., 3., 0., 0., 1., 1., 1., 1., 1., 1., 5., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 4, 4), dev)?; let pool = t.max_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[2f32, 3.], [5., 1.]]); let t = t.reshape((1, 1, 2, 8))?; let pool = t.max_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[2.0, 3.0, 5.0, 1.0]]); Ok(()) } /* This test corresponds to the following PyTorch script. import torch torch.manual_seed(4242) t = torch.randn((1, 2, 4, 4)) print(t.flatten()) res = torch.nn.functional.avg_pool2d(t, 2) print(res) */ fn avg_pool2d_pytorch(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, ], dev, )? .reshape((1, 2, 4, 4))?; let pool = t.avg_pool2d(2)?.squeeze(0)?; assert_eq!( test_utils::to_vec3_round(&pool, 4)?, [ [[-1.1926, -0.0395], [0.2688, 0.1871]], [[0.1835, -0.1606], [0.6249, 0.3217]] ] ); let pool = t.avg_pool2d(3)?.squeeze(0)?; assert_eq!( test_utils::to_vec3_round(&pool, 4)?, [[[0.085]], [[0.0078]]] ); let t = t.reshape((1, 1, 4, 8))?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!( test_utils::to_vec2_round(&pool, 4)?, [ [0.7745, 0.0276, -1.6983, 0.12], [0.3542, 0.1625, 0.4542, -0.0014] ] ); Ok(()) } fn upsample_nearest2d(dev: &Device) -> Result<()> { let t = Tensor::arange(0f32, 6f32, dev)?.reshape((1, 1, 2, 3))?; let upsampled = t.upsample_nearest2d(4, 6)?.i(0)?.i(0)?; assert_eq!( t.i(0)?.i(0)?.to_vec2::<f32>()?, [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]] ); assert_eq!( upsampled.to_vec2::<f32>()?, [ [0.0, 0.0, 1.0, 1.0, 2.0, 2.0], [0.0, 0.0, 1.0, 1.0, 2.0, 2.0], [3.0, 3.0, 4.0, 4.0, 5.0, 5.0], [3.0, 3.0, 4.0, 4.0, 5.0, 5.0] ] ); Ok(()) } test_device!(avg_pool2d, avg_pool2d_cpu, avg_pool2d_gpu, avg_pool2d_metal); test_device!( avg_pool2d_pytorch, avg_pool2d_pytorch_cpu, avg_pool2d_pytorch_gpu, avg_pool2d_pytorch_metal ); test_device!(max_pool2d, max_pool2d_cpu, max_pool2d_gpu, max_pool2d_metal); test_device!( upsample_nearest2d, upsample_nearest2d_cpu, upsample_nearest2d_gpu, upsample_nearest2d_metal );
candle/candle-core/tests/pool_tests.rs/0
{ "file_path": "candle/candle-core/tests/pool_tests.rs", "repo_id": "candle", "token_count": 2083 }
20
[package] name = "candle-examples" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] accelerate-src = { workspace = true, optional = true } candle = { workspace = true } candle-datasets = { workspace = true } candle-nn = { workspace = true } candle-transformers = { workspace = true } candle-flash-attn = { workspace = true, optional = true } candle-onnx = { workspace = true, optional = true } csv = "1.3.0" cudarc = { workspace = true, optional = true } half = { workspace = true, optional = true } hf-hub = { workspace = true, features=["tokio"]} image = { workspace = true } intel-mkl-src = { workspace = true, optional = true } num-traits = { workspace = true } pyo3 = { version = "0.20.0", features = ["auto-initialize"], optional = true } rayon = { workspace = true } safetensors = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } tokenizers = { workspace = true, features = ["onig"] } [dev-dependencies] anyhow = { workspace = true } byteorder = { workspace = true } clap = { workspace = true } imageproc = { workspace = true } memmap2 = { workspace = true } rand = { workspace = true } rusttype = { workspace = true } tracing = { workspace = true } tracing-chrome = { workspace = true } tracing-subscriber = { workspace = true } wav = { workspace = true } # Necessary to disambiguate with tokio in wasm examples which are 1.28.1 tokio = "1.29.1" [build-dependencies] anyhow = { workspace = true } bindgen_cuda = { version = "0.1.1", optional = true } [features] default = [] accelerate = ["dep:accelerate-src", "candle/accelerate", "candle-nn/accelerate", "candle-transformers/accelerate"] cuda = ["candle/cuda", "candle-nn/cuda", "candle-transformers/cuda", "dep:bindgen_cuda"] cudnn = ["candle/cudnn"] flash-attn = ["cuda", "candle-transformers/flash-attn", "dep:candle-flash-attn"] mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl", "candle-transformers/mkl"] nccl = ["cuda", "cudarc/nccl", "dep:half"] onnx = ["candle-onnx"] metal = ["candle/metal", "candle-nn/metal"] [[example]] name = "llama_multiprocess" required-features = ["cuda", "nccl", "flash-attn"] [[example]] name = "reinforcement-learning" required-features = ["pyo3"] [[example]] name = "onnx" required-features = ["onnx"] [[example]] name = "onnx_basics" required-features = ["onnx"]
candle/candle-examples/Cargo.toml/0
{ "file_path": "candle/candle-examples/Cargo.toml", "repo_id": "candle", "token_count": 882 }
21
# candle-distilbert DistilBert is a distiled version of the Bert model. ## Sentence embeddings DistilBert is used to compute the sentence embeddings for a prompt. The model weights are downloaded from the hub on the first run. ```bash cargo run --example distilbert --release -- --prompt "Here is a test sentence" > [[[ 0.5109, 0.1280, -0.2635, ..., 0.3462, -1.0434, 0.1441], > [ 0.1735, 0.0818, -0.5549, ..., 0.3472, -0.8264, -0.0244], > [ 0.0702, -0.1311, -0.4914, ..., 0.3483, -0.6194, 0.1829], > ... > [ 0.2993, -0.0106, -0.4640, ..., 0.2844, -0.6732, 0.0042], > [ 0.1066, -0.0081, -0.4299, ..., 0.3435, -0.7729, 0.0190], > [ 0.8903, 0.2055, -0.2541, ..., 0.3208, -0.6585, 0.0586]]] > Tensor[[1, 7, 768], f32] ```
candle/candle-examples/examples/distilbert/README.md/0
{ "file_path": "candle/candle-examples/examples/distilbert/README.md", "repo_id": "candle", "token_count": 367 }
22
# candle-phi: 1.3b and 2.7b LLM with state of the art performance for <10b models. [Phi-1.5](https://huggingface.co/microsoft/phi-1_5) and [Phi-2](https://huggingface.co/microsoft/phi-2) are language models using only 1.3 and 2.7 billion parameters but with state of the art performance compared to models with up to 10 billion parameters. The candle implementation provides both the standard version as well as a quantized variant. ## Running some examples For the v2 version. ```bash $ cargo run --example phi --release -- --model 2 \ --prompt "A skier slides down a frictionless slope of height 40m and length 80m. What's the skier speed at the bottom?" A skier slides down a frictionless slope of height 40m and length 80m. What's the skier speed at the bottom? Solution: The potential energy of the skier is converted into kinetic energy as it slides down the slope. The formula for potential energy is mgh, where m is mass, g is acceleration due to gravity (9.8 m/s^2), and h is height. Since there's no friction, all the potential energy is converted into kinetic energy at the bottom of the slope. The formula for kinetic energy is 1/2mv^2, where v is velocity. We can equate these two formulas: mgh = 1/2mv^2 Solving for v, we get: v = sqrt(2gh) Substituting the given values, we get: v = sqrt(2*9.8*40) = 28 m/s Therefore, the skier speed at the bottom of the slope is 28 m/s. ``` For the v1.5 version. ```bash $ cargo run --example phi --release -- --prompt "def print_prime(n): " def print_prime(n): print("Printing prime numbers") for i in range(2, n+1): if is_prime(i): print(i) def is_prime(n): if n <= 1: return False for i in range(2, int(math.sqrt(n))+1): if n % i == 0: return False return True $ cargo run --example phi --release -- \ --prompt "Explain how to find the median of an array and write the corresponding python function.\nAnswer:" \ --quantized --sample-len 200 Explain how to find the median of an array and write the corresponding python function. Answer: The median is the middle value in an array. If the array has an even number of elements, the median is the average of the two middle values. def median(arr): arr.sort() n = len(arr) if n % 2 == 0: return (arr[n//2 - 1] + arr[n//2]) / 2 else: return arr[n//2] ``` This also supports the [Puffin Phi v2 model](https://huggingface.co/teknium/Puffin-Phi-v2) for human interaction. ``` $ cargo run --example phi --release -- \ --prompt "USER: What would you do on a sunny day in Paris?\nASSISTANT:" \ --sample-len 200 --model puffin-phi-v2 --quantized USER: What would you do on a sunny day in Paris? ASSISTANT: On a sunny day in Paris, you could visit the Musée du Louvre to admire the famous painting "Mona Lisa" by Leonardo da Vinci. You might also want to stroll along the Champs-Élysées and enjoy the beautiful architecture of the buildings around you. Don't forget to stop by a café for a cup of coffee and to soak up the sun!" ```
candle/candle-examples/examples/phi/README.md/0
{ "file_path": "candle/candle-examples/examples/phi/README.md", "repo_id": "candle", "token_count": 1011 }
23
# candle-repvgg [RepVGG: Making VGG-style ConvNets Great Again](https://arxiv.org/abs/2101.03697). This candle implementation uses a pre-trained RepVGG network for inference. The classification head has been trained on the ImageNet dataset and returns the probabilities for the top-5 classes. ## Running an example ``` $ cargo run --example repvgg --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg loaded image Tensor[dims 3, 224, 224; f32] model built mountain bike, all-terrain bike, off-roader: 61.70% bicycle-built-for-two, tandem bicycle, tandem: 33.14% unicycle, monocycle : 4.88% crash helmet : 0.15% moped : 0.04% ```
candle/candle-examples/examples/repvgg/README.md/0
{ "file_path": "candle/candle-examples/examples/repvgg/README.md", "repo_id": "candle", "token_count": 254 }
24
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use std::io::Write; use std::path::PathBuf; use candle_transformers::models::t5; use anyhow::{Error as E, Result}; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use clap::Parser; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; const DTYPE: DType = DType::F32; #[derive(Parser, Debug, Clone)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// The model repository to use on the HuggingFace hub. #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, /// Enable decoding. #[arg(long)] decode: bool, // Enable/disable decoding. #[arg(long, default_value = "false")] disable_cache: bool, /// Use this prompt, otherwise compute sentence similarities. #[arg(long)] prompt: Option<String>, /// If set along with --decode, will use this prompt to initialize the decoder. #[arg(long)] decoder_prompt: Option<String>, /// L2 normalization for embeddings. #[arg(long, default_value = "true")] normalize_embeddings: bool, /// The temperature used to generate samples. #[arg(long, default_value_t = 0.8)] temperature: f64, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } struct T5ModelBuilder { device: Device, config: t5::Config, weights_filename: Vec<PathBuf>, } impl T5ModelBuilder { pub fn load(args: &Args) -> Result<(Self, Tokenizer)> { let device = candle_examples::device(args.cpu)?; let default_model = "t5-small".to_string(); let default_revision = "refs/pr/15".to_string(); let (model_id, revision) = match (args.model_id.to_owned(), args.revision.to_owned()) { (Some(model_id), Some(revision)) => (model_id, revision), (Some(model_id), None) => (model_id, "main".to_string()), (None, Some(revision)) => (default_model, revision), (None, None) => (default_model, default_revision), }; let repo = Repo::with_revision(model_id.clone(), RepoType::Model, revision); let api = Api::new()?; let api = api.repo(repo); let config_filename = api.get("config.json")?; let tokenizer_filename = api.get("tokenizer.json")?; let weights_filename = if model_id == "google/flan-t5-xxl" || model_id == "google/flan-ul2" { candle_examples::hub_load_safetensors(&api, "model.safetensors.index.json")? } else { vec![api.get("model.safetensors")?] }; let config = std::fs::read_to_string(config_filename)?; let mut config: t5::Config = serde_json::from_str(&config)?; config.use_cache = !args.disable_cache; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; Ok(( Self { device, config, weights_filename, }, tokenizer, )) } pub fn build_encoder(&self) -> Result<t5::T5EncoderModel> { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&self.weights_filename, DTYPE, &self.device)? }; Ok(t5::T5EncoderModel::load(vb, &self.config)?) } pub fn build_conditional_generation(&self) -> Result<t5::T5ForConditionalGeneration> { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&self.weights_filename, DTYPE, &self.device)? }; Ok(t5::T5ForConditionalGeneration::load(vb, &self.config)?) } } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let (builder, mut tokenizer) = T5ModelBuilder::load(&args)?; let device = &builder.device; let tokenizer = tokenizer .with_padding(None) .with_truncation(None) .map_err(E::msg)?; match args.prompt { Some(prompt) => { let tokens = tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; if !args.decode { let mut model = builder.build_encoder()?; let start = std::time::Instant::now(); let ys = model.forward(&input_token_ids)?; println!("{ys}"); println!("Took {:?}", start.elapsed()); } else { let mut model = builder.build_conditional_generation()?; let mut output_token_ids = [builder .config .decoder_start_token_id .unwrap_or(builder.config.pad_token_id) as u32] .to_vec(); if let Some(decoder_prompt) = &args.decoder_prompt { print!("{decoder_prompt}"); output_token_ids.extend( tokenizer .encode(decoder_prompt.to_string(), false) .map_err(E::msg)? .get_ids() .to_vec(), ); } let temperature = if args.temperature <= 0. { None } else { Some(args.temperature) }; let mut logits_processor = LogitsProcessor::new(299792458, temperature, args.top_p); let encoder_output = model.encode(&input_token_ids)?; let start = std::time::Instant::now(); for index in 0.. { if output_token_ids.len() > 512 { break; } let decoder_token_ids = if index == 0 || !builder.config.use_cache { Tensor::new(output_token_ids.as_slice(), device)?.unsqueeze(0)? } else { let last_token = *output_token_ids.last().unwrap(); Tensor::new(&[last_token], device)?.unsqueeze(0)? }; let logits = model .decode(&decoder_token_ids, &encoder_output)? .squeeze(0)?; let logits = if args.repeat_penalty == 1. { logits } else { let start_at = output_token_ids.len().saturating_sub(args.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, args.repeat_penalty, &output_token_ids[start_at..], )? }; let next_token_id = logits_processor.sample(&logits)?; if next_token_id as usize == builder.config.eos_token_id { break; } output_token_ids.push(next_token_id); if let Some(text) = tokenizer.id_to_token(next_token_id) { let text = text.replace('▁', " ").replace("<0x0A>", "\n"); print!("{text}"); std::io::stdout().flush()?; } } let dt = start.elapsed(); println!( "\n{} tokens generated ({:.2} token/s)\n", output_token_ids.len(), output_token_ids.len() as f64 / dt.as_secs_f64(), ); } } None => { let mut model = builder.build_encoder()?; let sentences = [ "The cat sits outside", "A man is playing guitar", "I love pasta", "The new movie is awesome", "The cat plays in the garden", "A woman watches TV", "The new movie is so great", "Do you like pizza?", ]; let n_sentences = sentences.len(); let mut all_embeddings = Vec::with_capacity(n_sentences); for sentence in sentences { let tokens = tokenizer .encode(sentence, true) .map_err(E::msg)? .get_ids() .to_vec(); let token_ids = Tensor::new(&tokens[..], model.device())?.unsqueeze(0)?; let embeddings = model.forward(&token_ids)?; println!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if args.normalize_embeddings { normalize_l2(&embeddings)? } else { embeddings }; println!("pooled embeddings {:?}", embeddings.shape()); all_embeddings.push(embeddings) } let mut similarities = vec![]; for (i, e_i) in all_embeddings.iter().enumerate() { for (j, e_j) in all_embeddings .iter() .enumerate() .take(n_sentences) .skip(i + 1) { let sum_ij = (e_i * e_j)?.sum_all()?.to_scalar::<f32>()?; let sum_i2 = (e_i * e_i)?.sum_all()?.to_scalar::<f32>()?; let sum_j2 = (e_j * e_j)?.sum_all()?.to_scalar::<f32>()?; let cosine_similarity = sum_ij / (sum_i2 * sum_j2).sqrt(); similarities.push((cosine_similarity, i, j)) } } similarities.sort_by(|u, v| v.0.total_cmp(&u.0)); for &(score, i, j) in similarities[..5].iter() { println!("score: {score:.2} '{}' '{}'", sentences[i], sentences[j]) } } } Ok(()) } pub fn normalize_l2(v: &Tensor) -> Result<Tensor> { Ok(v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)?) }
candle/candle-examples/examples/t5/main.rs/0
{ "file_path": "candle/candle-examples/examples/t5/main.rs", "repo_id": "candle", "token_count": 5920 }
25
pub mod coco_classes; pub mod imagenet; pub mod token_output_stream; use candle::utils::{cuda_is_available, metal_is_available}; use candle::{Device, Result, Tensor}; pub fn device(cpu: bool) -> Result<Device> { if cpu { Ok(Device::Cpu) } else if cuda_is_available() { Ok(Device::new_cuda(0)?) } else if metal_is_available() { Ok(Device::new_metal(0)?) } else { #[cfg(all(target_os = "macos", target_arch = "aarch64"))] { println!( "Running on CPU, to run on GPU(metal), build this example with `--features metal`" ); } #[cfg(not(all(target_os = "macos", target_arch = "aarch64")))] { println!("Running on CPU, to run on GPU, build this example with `--features cuda`"); } Ok(Device::Cpu) } } pub fn load_image<P: AsRef<std::path::Path>>( p: P, resize_longest: Option<usize>, ) -> Result<(Tensor, usize, usize)> { let img = image::io::Reader::open(p)? .decode() .map_err(candle::Error::wrap)?; let (initial_h, initial_w) = (img.height() as usize, img.width() as usize); let img = match resize_longest { None => img, Some(resize_longest) => { let (height, width) = (img.height(), img.width()); let resize_longest = resize_longest as u32; let (height, width) = if height < width { let h = (resize_longest * height) / width; (h, resize_longest) } else { let w = (resize_longest * width) / height; (resize_longest, w) }; img.resize_exact(width, height, image::imageops::FilterType::CatmullRom) } }; let (height, width) = (img.height() as usize, img.width() as usize); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (height, width, 3), &Device::Cpu)?.permute((2, 0, 1))?; Ok((data, initial_h, initial_w)) } pub fn load_image_and_resize<P: AsRef<std::path::Path>>( p: P, width: usize, height: usize, ) -> Result<Tensor> { let img = image::io::Reader::open(p)? .decode() .map_err(candle::Error::wrap)? .resize_to_fill( width as u32, height as u32, image::imageops::FilterType::Triangle, ); let img = img.to_rgb8(); let data = img.into_raw(); Tensor::from_vec(data, (width, height, 3), &Device::Cpu)?.permute((2, 0, 1)) } /// Saves an image to disk using the image crate, this expects an input with shape /// (c, height, width). pub fn save_image<P: AsRef<std::path::Path>>(img: &Tensor, p: P) -> Result<()> { let p = p.as_ref(); let (channel, height, width) = img.dims3()?; if channel != 3 { candle::bail!("save_image expects an input of shape (3, height, width)") } let img = img.permute((1, 2, 0))?.flatten_all()?; let pixels = img.to_vec1::<u8>()?; let image: image::ImageBuffer<image::Rgb<u8>, Vec<u8>> = match image::ImageBuffer::from_raw(width as u32, height as u32, pixels) { Some(image) => image, None => candle::bail!("error saving image {p:?}"), }; image.save(p).map_err(candle::Error::wrap)?; Ok(()) } pub fn save_image_resize<P: AsRef<std::path::Path>>( img: &Tensor, p: P, h: usize, w: usize, ) -> Result<()> { let p = p.as_ref(); let (channel, height, width) = img.dims3()?; if channel != 3 { candle::bail!("save_image expects an input of shape (3, height, width)") } let img = img.permute((1, 2, 0))?.flatten_all()?; let pixels = img.to_vec1::<u8>()?; let image: image::ImageBuffer<image::Rgb<u8>, Vec<u8>> = match image::ImageBuffer::from_raw(width as u32, height as u32, pixels) { Some(image) => image, None => candle::bail!("error saving image {p:?}"), }; let image = image::DynamicImage::from(image); let image = image.resize_to_fill(w as u32, h as u32, image::imageops::FilterType::CatmullRom); image.save(p).map_err(candle::Error::wrap)?; Ok(()) } /// Loads the safetensors files for a model from the hub based on a json index file. pub fn hub_load_safetensors( repo: &hf_hub::api::sync::ApiRepo, json_file: &str, ) -> Result<Vec<std::path::PathBuf>> { let json_file = repo.get(json_file).map_err(candle::Error::wrap)?; let json_file = std::fs::File::open(json_file)?; let json: serde_json::Value = serde_json::from_reader(&json_file).map_err(candle::Error::wrap)?; let weight_map = match json.get("weight_map") { None => candle::bail!("no weight map in {json_file:?}"), Some(serde_json::Value::Object(map)) => map, Some(_) => candle::bail!("weight map in {json_file:?} is not a map"), }; let mut safetensors_files = std::collections::HashSet::new(); for value in weight_map.values() { if let Some(file) = value.as_str() { safetensors_files.insert(file.to_string()); } } let safetensors_files = safetensors_files .iter() .map(|v| repo.get(v).map_err(candle::Error::wrap)) .collect::<Result<Vec<_>>>()?; Ok(safetensors_files) }
candle/candle-examples/src/lib.rs/0
{ "file_path": "candle/candle-examples/src/lib.rs", "repo_id": "candle", "token_count": 2436 }
26
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once #include <assert.h> #include <stdint.h> #include <stdlib.h> #include <cuda_fp16.h> #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 #include <cuda_bf16.h> #endif #include <cute/algorithm/copy.hpp> #include <cute/algorithm/gemm.hpp> #include <cutlass/array.h> #include <cutlass/cutlass.h> #include <cutlass/numeric_conversion.h> #include <cutlass/numeric_types.h> //////////////////////////////////////////////////////////////////////////////////////////////////// namespace flash { //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> inline __device__ uint32_t relu2(const uint32_t x); template<> inline __device__ uint32_t relu2<cutlass::half_t>(const uint32_t x) { uint32_t res; const uint32_t zero = 0u; #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 asm volatile("max.f16x2 %0, %1, %2;\n" : "=r"(res) : "r"(x), "r"(zero)); #else asm volatile( \ "{\n" \ "\t .reg .f16x2 sela;\n" \ "\t set.gtu.u32.f16x2 sela, %1, %2;\n" \ "\t and.b32 %0, sela, %1;\n" "}\n" : "=r"(res) : "r"(x), "r"(zero)); #endif return res; } #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 template<> inline __device__ uint32_t relu2<cutlass::bfloat16_t>(const uint32_t x) { uint32_t res; const uint32_t zero = 0u; asm volatile("max.bf16x2 %0, %1, %2;\n" : "=r"(res) : "r"(x), "r"(zero)); return res; } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 template<typename T> inline __device__ uint32_t convert_relu2(const float2 x); template<> inline __device__ uint32_t convert_relu2<cutlass::half_t>(const float2 x) { uint32_t res; const uint32_t a = reinterpret_cast<const uint32_t&>(x.x); const uint32_t b = reinterpret_cast<const uint32_t&>(x.y); asm volatile("cvt.rn.relu.f16x2.f32 %0, %1, %2;\n" : "=r"(res) : "r"(b), "r"(a)); return res; } template<> inline __device__ uint32_t convert_relu2<cutlass::bfloat16_t>(const float2 x) { uint32_t res; const uint32_t a = reinterpret_cast<const uint32_t&>(x.x); const uint32_t b = reinterpret_cast<const uint32_t&>(x.y); asm volatile("cvt.rn.relu.bf16x2.f32 %0, %1, %2;\n" : "=r"(res) : "r"(b), "r"(a)); return res; } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> struct MaxOp { __device__ inline T operator()(T const & x, T const & y) { return x > y ? x : y; } }; template <> struct MaxOp<float> { // This is slightly faster __device__ inline float operator()(float const &x, float const &y) { return max(x, y); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> struct SumOp { __device__ inline T operator()(T const & x, T const & y) { return x + y; } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<int THREADS> struct Allreduce { static_assert(THREADS == 32 || THREADS == 16 || THREADS == 8 || THREADS == 4); template<typename T, typename Operator> static __device__ inline T run(T x, Operator &op) { constexpr int OFFSET = THREADS / 2; x = op(x, __shfl_xor_sync(uint32_t(-1), x, OFFSET)); return Allreduce<OFFSET>::run(x, op); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<> struct Allreduce<2> { template<typename T, typename Operator> static __device__ inline T run(T x, Operator &op) { x = op(x, __shfl_xor_sync(uint32_t(-1), x, 1)); return x; } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<bool A_in_regs=false, bool B_in_regs=false, typename Tensor0, typename Tensor1, typename Tensor2, typename Tensor3, typename Tensor4, typename TiledMma, typename TiledCopyA, typename TiledCopyB, typename ThrCopyA, typename ThrCopyB> inline __device__ void gemm(Tensor0 &acc, Tensor1 &tCrA, Tensor2 &tCrB, Tensor3 const& tCsA, Tensor4 const& tCsB, TiledMma tiled_mma, TiledCopyA smem_tiled_copy_A, TiledCopyB smem_tiled_copy_B, ThrCopyA smem_thr_copy_A, ThrCopyB smem_thr_copy_B) { CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(acc)); // MMA_M CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(acc)); // MMA_N CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // M Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB); CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // N if (!A_in_regs) { cute::copy(smem_tiled_copy_A, tCsA(_, _, _0{}), tCrA_copy_view(_, _, _0{})); } if (!B_in_regs) { cute::copy(smem_tiled_copy_B, tCsB(_, _, _0{}), tCrB_copy_view(_, _, _0{})); } #pragma unroll for (int i = 0; i < size<2>(tCrA); ++i) { if (i < size<2>(tCrA) - 1) { if (!A_in_regs) { cute::copy(smem_tiled_copy_A, tCsA(_, _, i + 1), tCrA_copy_view(_, _, i + 1)); } if (!B_in_regs) { cute::copy(smem_tiled_copy_B, tCsB(_, _, i + 1), tCrB_copy_view(_, _, i + 1)); } } cute::gemm(tiled_mma, tCrA(_, _, i), tCrB(_, _, i), acc); } } //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename Tensor0, typename Tensor1, typename Tensor2, typename Tensor3, typename TiledMma, typename TiledCopy, typename ThrCopy> inline __device__ void gemm_A_in_regs(Tensor0 &acc, Tensor1 &tCrA, Tensor2 &tCrB, Tensor3 const& tCsB, TiledMma tiled_mma, TiledCopy smem_tiled_copy_B, ThrCopy smem_thr_copy_B) { CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(acc)); // MMA_M CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(acc)); // MMA_N CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB); CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // N cute::copy(smem_tiled_copy_B, tCsB(_, _, _0{}), tCrB_copy_view(_, _, _0{})); #pragma unroll for (int i = 0; i < size<2>(tCrA); ++i) { if (i < size<2>(tCrA) - 1) { cute::copy(smem_tiled_copy_B, tCsB(_, _, i + 1), tCrB_copy_view(_, _, i + 1)); } cute::gemm(tiled_mma, tCrA(_, _, i), tCrB(_, _, i), acc); } } //////////////////////////////////////////////////////////////////////////////////////////////////// // Convert acc_layout from (MMA=4, MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, MMA_N)) template<typename Layout> inline __device__ auto convert_layout_acc_rowcol(Layout acc_layout) { static_assert(decltype(size<0>(acc_layout))::value == 4); static_assert(decltype(rank(acc_layout))::value == 3); auto l = logical_divide(acc_layout, Shape<_2>{}); // ((2, 2), MMA_M, MMA_N) // TD [2023-08-13]: Idk why but get<0, 1>(l) doesn't work for Cutlass 3.2, I'm getting // "int_tuple.hpp(74): error: conversion to inaccessible base class" // return make_layout(make_layout(get<0, 1>(l), get<1>(l)), make_layout(get<0, 0>(l), get<2>(l))); return make_layout(make_layout(get<1>(get<0>(l)), get<1>(l)), make_layout(get<0>(get<0>(l)), get<2>(l))); }; //////////////////////////////////////////////////////////////////////////////////////////////////// // Convert rowcol_layout from (nrow=(2, MMA_M), ncol=(2, MMA_N)) to ((2, 2, 2), MMA_M, MMA_N / 2) // if using m16n8k16, or to ((2, 2, 1), MMA_M, MMA_N) if using m16n8k8. template<typename MMA_traits, typename Layout> inline __device__ auto convert_layout_rowcol_Aregs(Layout rowcol_layout) { using X = Underscore; static_assert(decltype(size<0, 0>(rowcol_layout))::value == 2); static_assert(decltype(size<1, 0>(rowcol_layout))::value == 2); constexpr int mma_shape_K = get<2>(typename MMA_traits::Shape_MNK{}); static_assert(mma_shape_K == 8 || mma_shape_K == 16); constexpr int MMA_N_divisor = mma_shape_K == 8 ? 1 : 2; auto l = logical_divide(rowcol_layout, Shape<X, Shape<X, Int<MMA_N_divisor>>>{}); // ((2, MMA_M), (2, (2, MMA_N / 2))) // TD [2023-08-13]: Same error as above on Cutlass 3.2 // return make_layout(make_layout(get<1, 0>(l), get<0, 0>(l), get<1, 1, 0>(l)), // get<0, 1>(l), // get<1, 1, 1>(l)); return make_layout(make_layout(get<0>(get<1>(l)), get<0>(get<0>(l)), get<0>(get<1>(get<1>(l)))), get<1>(get<0>(l)), get<1>(get<1>(get<1>(l)))); }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename To_type, typename Engine, typename Layout> inline __device__ auto convert_type(Tensor<Engine, Layout> const &tensor) { using From_type = typename Engine::value_type; constexpr int numel = decltype(size(tensor))::value; cutlass::NumericArrayConverter<To_type, From_type, numel> convert_op; // HACK: this requires tensor to be "contiguous" auto frag = convert_op(*reinterpret_cast<const cutlass::Array<From_type, numel> *>(tensor.data())); return make_tensor(make_rmem_ptr<To_type>(&frag), tensor.layout()); } //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Engine, typename Layout> inline __device__ void relu_(Tensor<Engine, Layout> &tensor) { constexpr int numel = decltype(size(tensor))::value; static_assert(numel % 2 == 0); using value_t = typename Engine::value_type; // HACK: this requires tensor to be "contiguous" Tensor tensor_uint32 = recast<uint32_t>(tensor); #pragma unroll for (int i = 0; i < size(tensor_uint32); ++i) { tensor_uint32(i) = relu2<value_t>(tensor_uint32(i)); } } //////////////////////////////////////////////////////////////////////////////////////////////////// // On SM80 and above, we can fuse fp32 -> fp16/bf16 conversion and relu into 1 instruction template <typename To_type, typename Engine, typename Layout> inline __device__ auto convert_type_relu(Tensor<Engine, Layout> const &tensor) { using From_type = typename Engine::value_type; static_assert(std::is_same_v<To_type, cutlass::half_t> || std::is_same_v<To_type, cutlass::bfloat16_t>); static_assert(std::is_same_v<float, From_type>); constexpr int numel = decltype(size(tensor))::value; static_assert(numel % 2 == 0); #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 // HACK: this requires tensor to be "contiguous" Tensor tensor_float2 = recast<float2>(tensor); Tensor out_uint32 = make_tensor<uint32_t>(tensor_float2.layout()); #pragma unroll for (int i = 0; i < size(out_uint32); ++i) { out_uint32(i) = convert_relu2<To_type>(tensor_float2(i)); } Tensor out = make_tensor(make_rmem_ptr<To_type>(out_uint32.data()), tensor.layout()); #else Tensor out = flash::convert_type<To_type>(tensor); flash::relu_(out); #endif return out; } //////////////////////////////////////////////////////////////////////////////////////////////////// // Blocks until all but N previous cp.async.commit_group operations have committed. // This differs from cute::cp_async_wait in that when N = 0 we don't call cp.async.wait_all // (which is equivalent to commit_group then wait_group 0). // Instead we just call cp.async.wait_group 0, which is slightly faster. // https://github.com/NVIDIA/cutlass/blob/master/include/cute/arch/copy_sm80.hpp#L113 template <int N> CUTE_HOST_DEVICE void cp_async_wait() { #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) asm volatile("cp.async.wait_group %0;\n" :: "n"(N)); #endif } //////////////////////////////////////////////////////////////////////////////////////////////////// template <bool Is_even_MN=true, bool Is_even_K=true, bool Clear_OOB_MN=false, bool Clear_OOB_K=true, typename TiledCopy, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Engine2, typename Layout2, typename Engine3, typename Layout3> inline __device__ void copy(TiledCopy tiled_copy, Tensor<Engine0, Layout0> const &S, Tensor<Engine1, Layout1> &D, Tensor<Engine2, Layout2> const &identity_MN, Tensor<Engine3, Layout3> const &predicate_K, const int max_MN=0) { CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{}); CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{}); CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K // There's no case where !Clear_OOB_K && Clear_OOB_MN static_assert(!(Clear_OOB_MN && !Clear_OOB_K)); #pragma unroll for (int m = 0; m < size<1>(S); ++m) { if (Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN) { #pragma unroll for (int k = 0; k < size<2>(S); ++k) { if (Is_even_K || predicate_K(k)) { cute::copy(tiled_copy, S(_, m, k), D(_, m, k)); } else if (Clear_OOB_K) { cute::clear(D(_, m, k)); } } } else if (Clear_OOB_MN) { cute::clear(D(_, m, _)); } } // TD [2023-04-13]: Strange that the code below can cause race condition. // I think it's because the copies are under an if statement. // if (Is_even_K) { // #pragma unroll // for (int m = 0; m < size<1>(S); ++m) { // if (Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN) { // copy(tiled_copy, S(_, m, _), D(_, m, _)); // } else if (Clear_OOB_MN) { // clear(D(_, m, _)); // } // } // } else { // It's slightly faster in this case if iterate over K first // #pragma unroll // for (int k = 0; k < size<2>(S); ++k) { // if (predicate_K(k)) { // #pragma unroll // for (int m = 0; m < size<1>(S); ++m) { // if (Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN) { // copy(tiled_copy, S(_, m, k), D(_, m, k)); // } else if (Clear_OOB_MN) { // clear(D(_, m, k)); // } // } // } else if (Clear_OOB_K) { // There's no case where !Clear_OOB_K && Clear_OOB_MN // if (Clear_OOB_MN || Is_even_MN) { // clear(D(_, _, k)); // } else { // #pragma unroll // for (int m = 0; m < size<1>(S); ++m) { // if (!(Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN)) { // clear(D(_, m, k)); // } // } // } // } // } // } } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace flash
candle/candle-flash-attn/kernels/utils.h/0
{ "file_path": "candle/candle-flash-attn/kernels/utils.h", "repo_id": "candle", "token_count": 6965 }
27
pub const AFFINE: &str = include_str!(concat!(env!("OUT_DIR"), "/affine.ptx")); pub const BINARY: &str = include_str!(concat!(env!("OUT_DIR"), "/binary.ptx")); pub const CAST: &str = include_str!(concat!(env!("OUT_DIR"), "/cast.ptx")); pub const CONV: &str = include_str!(concat!(env!("OUT_DIR"), "/conv.ptx")); pub const FILL: &str = include_str!(concat!(env!("OUT_DIR"), "/fill.ptx")); pub const INDEXING: &str = include_str!(concat!(env!("OUT_DIR"), "/indexing.ptx")); pub const REDUCE: &str = include_str!(concat!(env!("OUT_DIR"), "/reduce.ptx")); pub const TERNARY: &str = include_str!(concat!(env!("OUT_DIR"), "/ternary.ptx")); pub const UNARY: &str = include_str!(concat!(env!("OUT_DIR"), "/unary.ptx"));
candle/candle-kernels/src/lib.rs/0
{ "file_path": "candle/candle-kernels/src/lib.rs", "repo_id": "candle", "token_count": 298 }
28
#include <metal_stdlib> # using namespace metal; METAL_FUNC uint get_strided_index( uint idx, constant size_t &num_dims, constant size_t *dims, constant size_t *strides ) { uint strided_i = 0; for (uint d = 0; d < num_dims; d++) { uint dim_idx = num_dims - 1 - d; strided_i += (idx % dims[dim_idx]) * strides[dim_idx]; idx /= dims[dim_idx]; } return strided_i; } template<typename T, typename ID> METAL_FUNC void where_cond( constant size_t &numel, constant size_t &num_dims, constant size_t *dims, constant size_t *strides, constant size_t *strides_t, constant size_t *strides_f, device const ID *ids, device const T *t, device const T *f, device T *out, uint i [[ thread_position_in_grid ]] ) { if (i >= numel){ return; } uint strided_i = get_strided_index(i, num_dims, dims, strides); uint strided_i_t = get_strided_index(i, num_dims, dims, strides_t); uint strided_i_f = get_strided_index(i, num_dims, dims, strides_f); out[i] = ids[strided_i] ? t[strided_i_t] : f[strided_i_f]; } #define WHERE_OP(T, ID, FN_NAME) \ kernel void FN_NAME( \ constant size_t &numel, \ constant size_t &num_dims, \ constant size_t *dims, \ constant size_t *strides, \ constant size_t *strides_t, \ constant size_t *strides_f, \ device const ID *ids, \ device const T *t, \ device const T *f, \ device T *out, \ uint i [[ thread_position_in_grid ]] \ ) { \ where_cond<T, ID>(numel, num_dims, dims, strides, strides_t, strides_f, ids, t, f, out, i); \ } \ // WHERE_OP(float, int64_t, where_i64_f32) // WHERE_OP(double, int64_t, where_i64_f64) // WHERE_OP(uint8_t, int64_t, where_i64_u8) // WHERE_OP(uint32_t, int64_t, where_i64_u32) // WHERE_OP(int64_t, int64_t, where_i64_i64) // // WHERE_OP(float, uint32_t, where_u32_f32) // WHERE_OP(double, uint32_t, where_u32_f64) // WHERE_OP(uint8_t, uint32_t, where_u32_u8) // WHERE_OP(uint32_t, uint32_t, where_u32_u32) // WHERE_OP(int64_t, uint32_t, where_u32_i64) WHERE_OP(float, uint8_t, where_u8_f32) WHERE_OP(half, uint8_t, where_u8_f16) WHERE_OP(uint8_t, uint8_t, where_u8_u8) WHERE_OP(uint32_t, uint8_t, where_u8_u32) #if __METAL_VERSION__ >= 220 WHERE_OP(int64_t, uint8_t, where_u8_i64) #endif #if defined(__HAVE_BFLOAT__) WHERE_OP(bfloat, uint8_t, where_u8_bf16) #endif
candle/candle-metal-kernels/src/ternary.metal/0
{ "file_path": "candle/candle-metal-kernels/src/ternary.metal", "repo_id": "candle", "token_count": 2209 }
29
//! Layers defined by closures. use candle::{Result, Tensor}; use std::sync::Arc; /// A layer defined by a simple closure. #[derive(Clone)] pub struct Func<'a> { #[allow(clippy::type_complexity)] f: Arc<dyn 'a + Fn(&Tensor) -> Result<Tensor> + Send + Sync>, } impl<'a> std::fmt::Debug for Func<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "func") } } pub fn func<'a, F>(f: F) -> Func<'a> where F: 'a + Fn(&Tensor) -> Result<Tensor> + Send + Sync, { Func { f: Arc::new(f) } } impl<'a> super::Module for Func<'a> { fn forward(&self, xs: &Tensor) -> Result<Tensor> { (*self.f)(xs) } } impl<'a> Func<'a> { pub fn new<F>(f: F) -> Self where F: 'a + Fn(&Tensor) -> Result<Tensor> + Send + Sync, { Self { f: Arc::new(f) } } } /// A layer defined by a simple closure. #[derive(Clone)] pub struct FuncT<'a> { #[allow(clippy::type_complexity)] f: Arc<dyn 'a + Fn(&Tensor, bool) -> Result<Tensor> + Send + Sync>, } impl<'a> std::fmt::Debug for FuncT<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "func") } } pub fn func_t<'a, F>(f: F) -> FuncT<'a> where F: 'a + Fn(&Tensor, bool) -> Result<Tensor> + Send + Sync, { FuncT { f: Arc::new(f) } } impl<'a> super::ModuleT for FuncT<'a> { fn forward_t(&self, xs: &Tensor, train: bool) -> Result<Tensor> { (*self.f)(xs, train) } } impl<'a> FuncT<'a> { pub fn new<F>(f: F) -> Self where F: 'a + Fn(&Tensor, bool) -> Result<Tensor> + Send + Sync, { Self { f: Arc::new(f) } } }
candle/candle-nn/src/func.rs/0
{ "file_path": "candle/candle-nn/src/func.rs", "repo_id": "candle", "token_count": 804 }
30
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::test_utils::to_vec0_round; use candle::{Device, Result, Tensor}; /* Equivalent python code: import torch import torch.nn.functional as F input = torch.tensor([ [ 1.1050, 0.3013, -1.5394, -2.1528, -0.8634], [ 1.0730, -0.9419, -0.1670, -0.6582, 0.5061], [ 0.8318, 1.1154, -0.3610, 0.5351, 1.0830]]) target = torch.tensor([1, 0, 4]) print(F.nll_loss(F.log_softmax(input, dim=1), target)) print(F.cross_entropy(input, target)) */ #[test] fn nll_and_cross_entropy() -> Result<()> { let cpu = Device::Cpu; let input = Tensor::new( &[ [1.1050f32, 0.3013, -1.5394, -2.1528, -0.8634], [1.0730, -0.9419, -0.1670, -0.6582, 0.5061], [0.8318, 1.1154, -0.3610, 0.5351, 1.0830], ], &cpu, )?; let target = Tensor::new(&[1u32, 0, 4], &cpu)?; let log_softmax = candle_nn::ops::log_softmax(&input, 1)?; let loss = candle_nn::loss::nll(&log_softmax, &target)?; assert_eq!(to_vec0_round(&loss, 4)?, 1.1312); let loss = candle_nn::loss::cross_entropy(&input, &target)?; assert_eq!(to_vec0_round(&loss, 4)?, 1.1312); Ok(()) } /* Equivalent python code: import torch import torch.nn.functional as F inp = torch.Tensor([[ 2.3611, -0.8813, -0.5006, -0.2178], [ 0.0419, 0.0763, -1.0457, -1.6692], [-1.0494, 0.8111, 1.5723, 1.2315], [ 1.3081, 0.6641, 1.1802, -0.2547], [ 0.5292, 0.7636, 0.3692, -0.8318]]) target = torch.Tensor([[0., 1., 0., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [1., 0., 0., 0.], [0., 0., 1., 0.]]) print(F.binary_cross_entropy_with_logits(inp, target)) */ #[test] fn binary_cross_entropy_with_logit() -> Result<()> { let cpu = Device::Cpu; let inp = [ [2.3611f32, -0.8813, -0.5006, -0.2178], [0.0419, 0.0763, -1.0457, -1.6692], [-1.0494, 0.8111, 1.5723, 1.2315], [1.3081, 0.6641, 1.1802, -0.2547], [0.5292, 0.7636, 0.3692, -0.8318], ]; let target = [ [0.0f32, 1., 0., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [1., 0., 0., 0.], [0., 0., 1., 0.], ]; let inp = Tensor::new(&inp, &cpu)?; let target = Tensor::new(&target, &cpu)?; let loss = candle_nn::loss::binary_cross_entropy_with_logit(&inp, &target)?; assert_eq!(to_vec0_round(&loss, 4)?, 0.8224); Ok(()) }
candle/candle-nn/tests/loss.rs/0
{ "file_path": "candle/candle-nn/tests/loss.rs", "repo_id": "candle", "token_count": 1344 }
31
from typing import Union, Sequence class Tensor: """ This contains the type hints for the magic methodes of the `candle.Tensor` class. """ def __add__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Add a scalar to a tensor or two tensors together. """ pass def __radd__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Add a scalar to a tensor or two tensors together. """ pass def __sub__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Subtract a scalar from a tensor or one tensor from another. """ pass def __truediv__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Divide a tensor by a scalar or one tensor by another. """ pass def __mul__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Multiply a tensor by a scalar or one tensor by another. """ pass def __rmul__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Multiply a tensor by a scalar or one tensor by another. """ pass def __richcmp__(self, rhs: Union["Tensor", "Scalar"], op) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __getitem__(self, index: Union["Index", "Tensor", Sequence["Index"]]) -> "Tensor": """ Return a slice of a tensor. """ pass def __eq__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __ne__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __lt__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __le__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __gt__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __ge__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass
candle/candle-pyo3/_additional_typing/__init__.py/0
{ "file_path": "candle/candle-pyo3/_additional_typing/__init__.py", "repo_id": "candle", "token_count": 1174 }
32
# Generated content DO NOT EDIT from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence from os import PathLike from candle.typing import _ArrayLike, Device, Scalar, Index, Shape from candle import Tensor, DType, QTensor class ONNXModel: """ A wrapper around an ONNX model. """ def __init__(self, path: str): pass @property def doc_string(self) -> str: """ The doc string of the model. """ pass @property def domain(self) -> str: """ The domain of the operator set of the model. """ pass def initializers(self) -> Dict[str, Tensor]: """ Get the weights of the model. """ pass @property def inputs(self) -> Optional[Dict[str, ONNXTensorDescription]]: """ The inputs of the model. """ pass @property def ir_version(self) -> int: """ The version of the IR this model targets. """ pass @property def model_version(self) -> int: """ The version of the model. """ pass @property def outputs(self) -> Optional[Dict[str, ONNXTensorDescription]]: """ The outputs of the model. """ pass @property def producer_name(self) -> str: """ The producer of the model. """ pass @property def producer_version(self) -> str: """ The version of the producer of the model. """ pass def run(self, inputs: Dict[str, Tensor]) -> Dict[str, Tensor]: """ Run the model on the given inputs. """ pass class ONNXTensorDescription: """ A wrapper around an ONNX tensor description. """ @property def dtype(self) -> DType: """ The data type of the tensor. """ pass @property def shape(self) -> Tuple[Union[int, str, Any]]: """ The shape of the tensor. """ pass
candle/candle-pyo3/py_src/candle/onnx/__init__.pyi/0
{ "file_path": "candle/candle-pyo3/py_src/candle/onnx/__init__.pyi", "repo_id": "candle", "token_count": 939 }
33
import candle from candle import Tensor, QTensor from candle.nn import Module, Linear from candle.utils import cuda_is_available import pytest def test_module_can_be_constructed(): class A(Module): pass a = A() assert a is not None assert len(list(a.buffers())) == 0 def test_module_registers_tensors(): class A(Module): def __init__(self): super().__init__() self.t = Tensor(42.0) a = A() named_buffers = dict(a.named_buffers()) assert len(named_buffers) == 1 assert "t" in named_buffers def test_module_registers_submodules(): class A(Module): def __init__(self): super().__init__() self.linear = Linear(10, 20) a = A() named_modules = dict(a.named_modules()) named_buffers = dict(a.named_buffers()) assert len(named_buffers) == 2 assert "linear" in named_modules assert "linear.weight" in named_buffers assert "linear.bias" in named_buffers def test_module_can_dump_statedict(): class A(Module): def __init__(self): super().__init__() self.linear = Linear(10, 20) self.t = Tensor(42.0) a = A() state_dict = a.state_dict() assert hasattr(state_dict, "_metadata") assert "t" in state_dict assert "linear.weight" in state_dict assert "linear.bias" in state_dict assert len(state_dict) == 3 def test_module_can_load_statedict(): class A(Module): def __init__(self): super().__init__() self.linear = Linear(10, 20) self.t = Tensor(42.0) statedict = { "linear.weight": candle.ones((20, 10)), "linear.bias": candle.zeros((20,)), "t": Tensor(42.0), } a = A() a.load_state_dict(statedict) def test_module_throws_on_shape_mismatch(): class A(Module): def __init__(self): super().__init__() self.t = Tensor(42.0) statedict = { "t": candle.ones((20,)), } a = A() with pytest.raises(RuntimeError) as excinfo: a.load_state_dict(statedict) assert "size mismatch" in str(excinfo.value) def test_module_throws_on_missing_key(): class A(Module): def __init__(self): super().__init__() self.t = Tensor(42.0) statedict = { "not_t": Tensor(42.0), } a = A() with pytest.raises(RuntimeError) as excinfo: a.load_state_dict(statedict) assert 'Missing key(s) in state_dict: "t".' in str(excinfo.value) def test_module_can_load_quantized_tensors(): class A(Module): def __init__(self): super().__init__() self.t = candle.randn((16, 256)) self._quantizable_buffers.add("t") statedict = { "t": candle.ones((16, 256)).quantize("q4_0"), } a = A() a.load_state_dict(statedict) assert isinstance(a.t, QTensor) assert a.t.ggml_dtype == "Q4_0" def test_module_dequantizes_tensors_automatically(): class A(Module): def __init__(self): super().__init__() self.t = candle.randn((16, 256)) statedict = { "t": candle.ones((16, 256)).quantize("q4_0"), } a = A() a.load_state_dict(statedict) assert isinstance(a.t, Tensor) @pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available") def test_module_can_be_moved_to_cuda(): class A(Module): def __init__(self): super().__init__() self.t = candle.randn((16, 256)) a = A() a.cuda() assert a.t.device == "cuda" @pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available") def test_module_can_be_moved_from_cuda_to_cpu(): class A(Module): def __init__(self): super().__init__() self.t = candle.randn((16, 256)) a = A() a.cuda() assert a.t.device == "cuda" a.cpu() assert a.t.device == "cpu"
candle/candle-pyo3/tests/bindings/test_module.py/0
{ "file_path": "candle/candle-pyo3/tests/bindings/test_module.py", "repo_id": "candle", "token_count": 1853 }
34
use candle::{Result, Tensor, D}; use candle_nn as nn; use nn::{Module, VarBuilder}; // Based on the Python version from torchvision. // https://github.com/pytorch/vision/blob/0d75d9e5516f446c9c0ef93bd4ed9fea13992d06/torchvision/models/efficientnet.py#L47 #[derive(Debug, Clone, Copy)] pub struct MBConvConfig { expand_ratio: f64, kernel: usize, stride: usize, input_channels: usize, out_channels: usize, num_layers: usize, } fn make_divisible(v: f64, divisor: usize) -> usize { let min_value = divisor; let new_v = usize::max( min_value, (v + divisor as f64 * 0.5) as usize / divisor * divisor, ); if (new_v as f64) < 0.9 * v { new_v + divisor } else { new_v } } fn bneck_confs(width_mult: f64, depth_mult: f64) -> Vec<MBConvConfig> { let bneck_conf = |e, k, s, i, o, n| { let input_channels = make_divisible(i as f64 * width_mult, 8); let out_channels = make_divisible(o as f64 * width_mult, 8); let num_layers = (n as f64 * depth_mult).ceil() as usize; MBConvConfig { expand_ratio: e, kernel: k, stride: s, input_channels, out_channels, num_layers, } }; vec![ bneck_conf(1., 3, 1, 32, 16, 1), bneck_conf(6., 3, 2, 16, 24, 2), bneck_conf(6., 5, 2, 24, 40, 2), bneck_conf(6., 3, 2, 40, 80, 3), bneck_conf(6., 5, 1, 80, 112, 3), bneck_conf(6., 5, 2, 112, 192, 4), bneck_conf(6., 3, 1, 192, 320, 1), ] } impl MBConvConfig { pub fn b0() -> Vec<Self> { bneck_confs(1.0, 1.0) } pub fn b1() -> Vec<Self> { bneck_confs(1.0, 1.1) } pub fn b2() -> Vec<Self> { bneck_confs(1.1, 1.2) } pub fn b3() -> Vec<Self> { bneck_confs(1.2, 1.4) } pub fn b4() -> Vec<Self> { bneck_confs(1.4, 1.8) } pub fn b5() -> Vec<Self> { bneck_confs(1.6, 2.2) } pub fn b6() -> Vec<Self> { bneck_confs(1.8, 2.6) } pub fn b7() -> Vec<Self> { bneck_confs(2.0, 3.1) } } /// Conv2D with same padding. #[derive(Debug)] struct Conv2DSame { conv2d: nn::Conv2d, s: usize, k: usize, } impl Conv2DSame { fn new( vb: VarBuilder, i: usize, o: usize, k: usize, stride: usize, groups: usize, bias: bool, ) -> Result<Self> { let conv_config = nn::Conv2dConfig { stride, groups, ..Default::default() }; let conv2d = if bias { nn::conv2d(i, o, k, conv_config, vb)? } else { nn::conv2d_no_bias(i, o, k, conv_config, vb)? }; Ok(Self { conv2d, s: stride, k, }) } } impl Module for Conv2DSame { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let s = self.s; let k = self.k; let (_, _, ih, iw) = xs.dims4()?; let oh = (ih + s - 1) / s; let ow = (iw + s - 1) / s; let pad_h = usize::max((oh - 1) * s + k - ih, 0); let pad_w = usize::max((ow - 1) * s + k - iw, 0); if pad_h > 0 || pad_w > 0 { let xs = xs.pad_with_zeros(2, pad_h / 2, pad_h - pad_h / 2)?; let xs = xs.pad_with_zeros(3, pad_w / 2, pad_w - pad_w / 2)?; self.conv2d.forward(&xs) } else { self.conv2d.forward(xs) } } } #[derive(Debug)] struct ConvNormActivation { conv2d: Conv2DSame, bn2d: nn::BatchNorm, activation: bool, } impl ConvNormActivation { fn new( vb: VarBuilder, i: usize, o: usize, k: usize, stride: usize, groups: usize, ) -> Result<Self> { let conv2d = Conv2DSame::new(vb.pp("0"), i, o, k, stride, groups, false)?; let bn2d = nn::batch_norm(o, 1e-3, vb.pp("1"))?; Ok(Self { conv2d, bn2d, activation: true, }) } fn no_activation(self) -> Self { Self { activation: false, ..self } } } impl Module for ConvNormActivation { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.conv2d.forward(xs)?.apply_t(&self.bn2d, false)?; if self.activation { swish(&xs) } else { Ok(xs) } } } #[derive(Debug)] struct SqueezeExcitation { fc1: Conv2DSame, fc2: Conv2DSame, } impl SqueezeExcitation { fn new(vb: VarBuilder, in_channels: usize, squeeze_channels: usize) -> Result<Self> { let fc1 = Conv2DSame::new(vb.pp("fc1"), in_channels, squeeze_channels, 1, 1, 1, true)?; let fc2 = Conv2DSame::new(vb.pp("fc2"), squeeze_channels, in_channels, 1, 1, 1, true)?; Ok(Self { fc1, fc2 }) } } impl Module for SqueezeExcitation { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs; // equivalent to adaptive_avg_pool2d([1, 1]) let xs = xs.mean_keepdim(D::Minus2)?.mean_keepdim(D::Minus1)?; let xs = self.fc1.forward(&xs)?; let xs = swish(&xs)?; let xs = self.fc2.forward(&xs)?; let xs = nn::ops::sigmoid(&xs)?; residual.broadcast_mul(&xs) } } #[derive(Debug)] struct MBConv { expand_cna: Option<ConvNormActivation>, depthwise_cna: ConvNormActivation, squeeze_excitation: SqueezeExcitation, project_cna: ConvNormActivation, config: MBConvConfig, } impl MBConv { fn new(vb: VarBuilder, c: MBConvConfig) -> Result<Self> { let vb = vb.pp("block"); let exp = make_divisible(c.input_channels as f64 * c.expand_ratio, 8); let expand_cna = if exp != c.input_channels { Some(ConvNormActivation::new( vb.pp("0"), c.input_channels, exp, 1, 1, 1, )?) } else { None }; let start_index = if expand_cna.is_some() { 1 } else { 0 }; let depthwise_cna = ConvNormActivation::new(vb.pp(start_index), exp, exp, c.kernel, c.stride, exp)?; let squeeze_channels = usize::max(1, c.input_channels / 4); let squeeze_excitation = SqueezeExcitation::new(vb.pp(start_index + 1), exp, squeeze_channels)?; let project_cna = ConvNormActivation::new(vb.pp(start_index + 2), exp, c.out_channels, 1, 1, 1)? .no_activation(); Ok(Self { expand_cna, depthwise_cna, squeeze_excitation, project_cna, config: c, }) } } impl Module for MBConv { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let use_res_connect = self.config.stride == 1 && self.config.input_channels == self.config.out_channels; let ys = match &self.expand_cna { Some(expand_cna) => expand_cna.forward(xs)?, None => xs.clone(), }; let ys = self.depthwise_cna.forward(&ys)?; let ys = self.squeeze_excitation.forward(&ys)?; let ys = self.project_cna.forward(&ys)?; if use_res_connect { ys + xs } else { Ok(ys) } } } fn swish(s: &Tensor) -> Result<Tensor> { s * nn::ops::sigmoid(s)? } #[derive(Debug)] pub struct EfficientNet { init_cna: ConvNormActivation, blocks: Vec<MBConv>, final_cna: ConvNormActivation, classifier: nn::Linear, } impl EfficientNet { pub fn new(p: VarBuilder, configs: Vec<MBConvConfig>, nclasses: usize) -> Result<Self> { let f_p = p.pp("features"); let first_in_c = configs[0].input_channels; let last_out_c = configs.last().unwrap().out_channels; let final_out_c = 4 * last_out_c; let init_cna = ConvNormActivation::new(f_p.pp(0), 3, first_in_c, 3, 2, 1)?; let nconfigs = configs.len(); let mut blocks = vec![]; for (index, cnf) in configs.into_iter().enumerate() { let f_p = f_p.pp(index + 1); for r_index in 0..cnf.num_layers { let cnf = if r_index == 0 { cnf } else { MBConvConfig { input_channels: cnf.out_channels, stride: 1, ..cnf } }; blocks.push(MBConv::new(f_p.pp(r_index), cnf)?) } } let final_cna = ConvNormActivation::new(f_p.pp(nconfigs + 1), last_out_c, final_out_c, 1, 1, 1)?; let classifier = nn::linear(final_out_c, nclasses, p.pp("classifier.1"))?; Ok(Self { init_cna, blocks, final_cna, classifier, }) } } impl Module for EfficientNet { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = self.init_cna.forward(xs)?; for block in self.blocks.iter() { xs = block.forward(&xs)? } let xs = self.final_cna.forward(&xs)?; // Equivalent to adaptive_avg_pool2d([1, 1]) -> squeeze(-1) -> squeeze(-1) let xs = xs.mean(D::Minus1)?.mean(D::Minus1)?; self.classifier.forward(&xs) } }
candle/candle-transformers/src/models/efficientnet.rs/0
{ "file_path": "candle/candle-transformers/src/models/efficientnet.rs", "repo_id": "candle", "token_count": 5123 }
35
use candle::{Result, Tensor}; use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder}; #[derive(Debug)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, out_proj: Linear, num_heads: usize, } impl Attention { fn new( embedding_dim: usize, num_heads: usize, downsample_rate: usize, vb: VarBuilder, ) -> Result<Self> { let internal_dim = embedding_dim / downsample_rate; let q_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("q_proj"))?; let k_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("k_proj"))?; let v_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("v_proj"))?; let out_proj = candle_nn::linear(internal_dim, embedding_dim, vb.pp("out_proj"))?; Ok(Self { q_proj, k_proj, v_proj, out_proj, num_heads, }) } fn separate_heads(&self, x: &Tensor) -> Result<Tensor> { let (b, n, c) = x.dims3()?; x.reshape((b, n, self.num_heads, c / self.num_heads))? .transpose(1, 2)? .contiguous() } fn recombine_heads(&self, x: &Tensor) -> Result<Tensor> { let (b, n_heads, n_tokens, c_per_head) = x.dims4()?; x.transpose(1, 2)? .reshape((b, n_tokens, n_heads * c_per_head)) } fn forward(&self, q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> { let q = self.q_proj.forward(&q.contiguous()?)?; let k = self.k_proj.forward(&k.contiguous()?)?; let v = self.v_proj.forward(&v.contiguous()?)?; let q = self.separate_heads(&q)?; let k = self.separate_heads(&k)?; let v = self.separate_heads(&v)?; let (_, _, _, c_per_head) = q.dims4()?; let attn = (q.matmul(&k.t()?)? / (c_per_head as f64).sqrt())?; let attn = candle_nn::ops::softmax_last_dim(&attn)?; let out = attn.matmul(&v)?; self.recombine_heads(&out)?.apply(&self.out_proj) } } #[derive(Debug)] struct TwoWayAttentionBlock { self_attn: Attention, norm1: LayerNorm, cross_attn_token_to_image: Attention, norm2: LayerNorm, mlp: super::MlpBlock, norm3: LayerNorm, norm4: LayerNorm, cross_attn_image_to_token: Attention, skip_first_layer_pe: bool, } impl TwoWayAttentionBlock { fn new( embedding_dim: usize, num_heads: usize, mlp_dim: usize, skip_first_layer_pe: bool, vb: VarBuilder, ) -> Result<Self> { let norm1 = layer_norm(embedding_dim, 1e-5, vb.pp("norm1"))?; let norm2 = layer_norm(embedding_dim, 1e-5, vb.pp("norm2"))?; let norm3 = layer_norm(embedding_dim, 1e-5, vb.pp("norm3"))?; let norm4 = layer_norm(embedding_dim, 1e-5, vb.pp("norm4"))?; let self_attn = Attention::new(embedding_dim, num_heads, 1, vb.pp("self_attn"))?; let cross_attn_token_to_image = Attention::new( embedding_dim, num_heads, 2, vb.pp("cross_attn_token_to_image"), )?; let cross_attn_image_to_token = Attention::new( embedding_dim, num_heads, 2, vb.pp("cross_attn_image_to_token"), )?; let mlp = super::MlpBlock::new( embedding_dim, mlp_dim, candle_nn::Activation::Relu, vb.pp("mlp"), )?; Ok(Self { self_attn, norm1, cross_attn_image_to_token, norm2, mlp, norm3, norm4, cross_attn_token_to_image, skip_first_layer_pe, }) } fn forward( &self, queries: &Tensor, keys: &Tensor, query_pe: &Tensor, key_pe: &Tensor, ) -> Result<(Tensor, Tensor)> { // Self attention block let queries = if self.skip_first_layer_pe { self.self_attn.forward(queries, queries, queries)? } else { let q = (queries + query_pe)?; let attn_out = self.self_attn.forward(&q, &q, queries)?; (queries + attn_out)? }; let queries = self.norm1.forward(&queries)?; // Cross attention block, tokens attending to image embedding let q = (&queries + query_pe)?; let k = (keys + key_pe)?; let attn_out = self.cross_attn_token_to_image.forward(&q, &k, keys)?; let queries = (&queries + attn_out)?; let queries = self.norm2.forward(&queries)?; // MLP block let mlp_out = self.mlp.forward(&queries); let queries = (queries + mlp_out)?; let queries = self.norm3.forward(&queries)?; // Cross attention block, image embedding attending to tokens let q = (&queries + query_pe)?; let k = (keys + key_pe)?; let attn_out = self.cross_attn_image_to_token.forward(&k, &q, &queries)?; let keys = (keys + attn_out)?; let keys = self.norm4.forward(&keys)?; Ok((queries, keys)) } } #[derive(Debug)] pub struct TwoWayTransformer { layers: Vec<TwoWayAttentionBlock>, final_attn_token_to_image: Attention, norm_final_attn: LayerNorm, } impl TwoWayTransformer { pub fn new( depth: usize, embedding_dim: usize, num_heads: usize, mlp_dim: usize, vb: VarBuilder, ) -> Result<Self> { let vb_l = vb.pp("layers"); let mut layers = Vec::with_capacity(depth); for i in 0..depth { let layer = TwoWayAttentionBlock::new(embedding_dim, num_heads, mlp_dim, i == 0, vb_l.pp(i))?; layers.push(layer) } let final_attn_token_to_image = Attention::new( embedding_dim, num_heads, 2, vb.pp("final_attn_token_to_image"), )?; let norm_final_attn = layer_norm(embedding_dim, 1e-5, vb.pp("norm_final_attn"))?; Ok(Self { layers, final_attn_token_to_image, norm_final_attn, }) } pub fn forward( &self, image_embedding: &Tensor, image_pe: &Tensor, point_embedding: &Tensor, ) -> Result<(Tensor, Tensor)> { let image_embedding = image_embedding.flatten_from(2)?.permute((0, 2, 1))?; let image_pe = image_pe.flatten_from(2)?.permute((0, 2, 1))?; let mut queries = point_embedding.clone(); let mut keys = image_embedding; for layer in self.layers.iter() { (queries, keys) = layer.forward(&queries, &keys, point_embedding, &image_pe)? } let q = (&queries + point_embedding)?; let k = (&keys + image_pe)?; let attn_out = self.final_attn_token_to_image.forward(&q, &k, &keys)?; let queries = (queries + attn_out)?.apply(&self.norm_final_attn)?; Ok((queries, keys)) } }
candle/candle-transformers/src/models/segment_anything/transformer.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/transformer.rs", "repo_id": "candle", "token_count": 3597 }
36
use crate::models::vit::{Config, Embeddings, Encoder}; use candle::{Result, Tensor}; use candle_nn::{ embedding, layer_norm, linear_no_bias, Embedding, LayerNorm, Linear, Module, VarBuilder, }; use serde::Deserialize; #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct TrOCRConfig { pub vocab_size: usize, pub d_model: usize, pub hidden_size: usize, pub decoder_layers: usize, pub decoder_attention_heads: usize, pub decoder_ffn_dim: usize, pub activation_function: candle_nn::Activation, pub max_position_embeddings: usize, pub dropout: f64, pub attention_dropout: f64, pub activation_dropout: f64, pub decoder_start_token_id: u32, pub init_std: f64, pub decoder_layerdrop: f64, pub use_cache: bool, pub scale_embedding: bool, pub use_learned_position_embeddings: bool, pub layernorm_embedding: bool, pub pad_token_id: usize, pub bos_token_id: usize, pub eos_token_id: u32, pub num_attention_heads: usize, pub decoder_vocab_size: Option<usize>, } impl Default for TrOCRConfig { fn default() -> Self { Self { vocab_size: 50265, d_model: 1024, hidden_size: 768, decoder_layers: 12, decoder_attention_heads: 16, decoder_ffn_dim: 4096, activation_function: candle_nn::Activation::Gelu, max_position_embeddings: 512, dropout: 0.1, attention_dropout: 0.0, activation_dropout: 0.0, decoder_start_token_id: 2, init_std: 0.02, decoder_layerdrop: 0.0, use_cache: true, scale_embedding: false, use_learned_position_embeddings: true, layernorm_embedding: true, pad_token_id: 1, bos_token_id: 0, eos_token_id: 2, num_attention_heads: 12, decoder_vocab_size: Some(50265), } } } #[derive(Debug, Clone)] struct TrOCRLearnedPositionalEmbedding { offset: usize, weights: Embedding, } impl TrOCRLearnedPositionalEmbedding { fn load(vb: VarBuilder, cfg: &TrOCRConfig) -> Result<Self> { let offset: usize = 2; let num_embeddings = cfg.max_position_embeddings; let embedding_dim = cfg.d_model; let weights = embedding(num_embeddings + offset, embedding_dim, vb)?; Ok(Self { offset, weights }) } fn forward(&mut self, input_ids: &Tensor, past_key_values_length: u32) -> Result<Tensor> { let (b_sz, seq_len) = input_ids.dims2()?; let mut positions = Tensor::arange( past_key_values_length, seq_len as u32 + past_key_values_length, input_ids.device(), )? .expand((b_sz, seq_len))?; positions = positions.broadcast_add(&Tensor::new(self.offset as u32, input_ids.device())?)?; self.weights.forward(&positions) } } #[derive(Debug, Clone)] struct TrOCRAttention { head_dim: usize, num_heads: usize, is_decoder: bool, scaling: f64, k_proj: Linear, v_proj: Linear, q_proj: Linear, out_proj: Linear, kv_cache: Option<(Tensor, Tensor)>, } impl TrOCRAttention { fn load( vb: VarBuilder, cfg: &TrOCRConfig, kdim: Option<usize>, vdim: Option<usize>, ) -> Result<Self> { let embed_dim = cfg.d_model; let num_heads = cfg.decoder_attention_heads; let head_dim = embed_dim / num_heads; let kdim = kdim.unwrap_or(embed_dim); let vdim = vdim.unwrap_or(embed_dim); let k_proj = linear_no_bias(kdim, embed_dim, vb.pp("k_proj"))?; let v_proj = linear_no_bias(vdim, embed_dim, vb.pp("v_proj"))?; let q_proj = linear_no_bias(embed_dim, embed_dim, vb.pp("q_proj"))?; let out_proj = linear_no_bias(embed_dim, embed_dim, vb.pp("out_proj"))?; Ok(Self { head_dim, num_heads, is_decoder: true, scaling: 1. / (head_dim as f64).sqrt(), k_proj, v_proj, q_proj, out_proj, kv_cache: None, }) } fn reset_kv_cache(&mut self) { self.kv_cache = None } fn _shape(&self, tensor: &Tensor, bsz: usize) -> Result<Tensor> { tensor .reshape((bsz, (), self.num_heads, self.head_dim))? .transpose(1, 2)? .contiguous() } fn forward( &mut self, xs: &Tensor, kv_states: Option<&Tensor>, attn_mask: Option<&Tensor>, ) -> Result<Tensor> { let (b_sz, tgt_len, _) = xs.dims3()?; let query_states = (xs.apply(&self.q_proj)? * self.scaling)?; let (key_states, value_states) = match kv_states { None => { let key_states = self._shape(&xs.apply(&self.k_proj)?, b_sz)?; let value_states = self._shape(&xs.apply(&self.v_proj)?, b_sz)?; if self.is_decoder { let kv_states = match &self.kv_cache { None => (key_states, value_states), Some((p_key_states, p_value_states)) => { let key_states = Tensor::cat(&[p_key_states, &key_states], 2)?; let value_states = Tensor::cat(&[p_value_states, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some(kv_states.clone()); kv_states } else { (key_states, value_states) } } Some(kv_states) => { let key_states = self._shape(&kv_states.apply(&self.k_proj)?, b_sz)?; let value_states = self._shape(&kv_states.apply(&self.v_proj)?, b_sz)?; (key_states, value_states) } }; let proj_shape = (b_sz * self.num_heads, (), self.head_dim); let query_states = self._shape(&query_states, b_sz)?.reshape(proj_shape)?; let key_states = key_states.reshape(proj_shape)?; let value_states = value_states.reshape(proj_shape)?; let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?; let attn_weights = match attn_mask { None => attn_weights, Some(attn_mask) => attn_weights.broadcast_add(attn_mask)?, }; let attn_probs = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_output = attn_probs.matmul(&value_states)?; attn_output .reshape((b_sz, self.num_heads, tgt_len, self.head_dim))? .transpose(1, 2)? .reshape((b_sz, tgt_len, self.head_dim * self.num_heads))? .apply(&self.out_proj) } } #[derive(Debug, Clone)] struct TrOCRDecoderLayer { self_attn: TrOCRAttention, activation_fn: candle_nn::Activation, self_attn_layer_norm: LayerNorm, encoder_attn: TrOCRAttention, encoder_attn_layer_norm: LayerNorm, fc1: Linear, fc2: Linear, final_layer_norm: LayerNorm, } impl TrOCRDecoderLayer { fn load(vb: VarBuilder, cfg: &TrOCRConfig) -> Result<Self> { let embed_dim = cfg.d_model; let self_attn = TrOCRAttention::load(vb.pp("self_attn"), cfg, None, None)?; let self_attn_layer_norm = layer_norm(embed_dim, 1e-5, vb.pp("self_attn_layer_norm"))?; let encoder_attn = TrOCRAttention::load( vb.pp("encoder_attn"), cfg, Some(cfg.hidden_size), Some(cfg.hidden_size), )?; let encoder_attn_layer_norm = layer_norm(embed_dim, 1e-5, vb.pp("encoder_attn_layer_norm"))?; let fc1 = linear_no_bias(embed_dim, cfg.decoder_ffn_dim, vb.pp("fc1"))?; let fc2 = linear_no_bias(cfg.decoder_ffn_dim, embed_dim, vb.pp("fc2"))?; let final_layer_norm = layer_norm(embed_dim, 1e-5, vb.pp("final_layer_norm"))?; let activation_fn = candle_nn::Activation::Gelu; Ok(Self { self_attn, activation_fn, self_attn_layer_norm, encoder_attn, encoder_attn_layer_norm, fc1, fc2, final_layer_norm, }) } fn reset_kv_cache(&mut self) { self.self_attn.reset_kv_cache(); } fn forward( &mut self, xs: &Tensor, attention_mask: &Tensor, encoder_hidden_states: Option<&Tensor>, ) -> Result<Tensor> { let residual = xs.clone(); let xs = self.self_attn.forward(xs, None, Some(attention_mask))?; let xs = (xs + residual)?; let mut xs = self.self_attn_layer_norm.forward(&xs)?; if let Some(encoder_hidden_states) = &encoder_hidden_states { let residual = xs.clone(); let encoder_attention_mask = attention_mask.clone(); // TODO xs = self.encoder_attn.forward( &xs, Some(encoder_hidden_states), Some(&encoder_attention_mask), )?; xs = (xs + residual)?; xs = self.encoder_attn_layer_norm.forward(&xs)? } let residual = xs.clone(); let xs = self.fc1.forward(&xs)?; let xs = self.activation_fn.forward(&xs)?; let xs = self.fc2.forward(&xs)?; let xs = (xs + residual)?; let xs = self.final_layer_norm.forward(&xs)?; Ok(xs) } } #[derive(Debug, Clone)] pub struct TrOCRDecoder { layers: Vec<TrOCRDecoderLayer>, embed_scale: Option<f64>, embed_tokens: Embedding, embed_positions: TrOCRLearnedPositionalEmbedding, } impl TrOCRDecoder { fn new(cfg: &TrOCRConfig, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("decoder.model.decoder"); let embed_tokens = embedding(cfg.vocab_size, cfg.d_model, vb.pp("embed_tokens"))?; let embed_positions = TrOCRLearnedPositionalEmbedding::load(vb.pp("embed_positions"), cfg)?; let mut layers = Vec::with_capacity(cfg.decoder_layers); let vb_l = vb.pp("layers"); for idx in 0..cfg.decoder_layers { let layer = TrOCRDecoderLayer::load(vb_l.pp(idx), cfg)?; layers.push(layer) } let embed_scale = if cfg.scale_embedding { Some((cfg.d_model as f64).sqrt()) } else { None }; Ok(Self { layers, embed_scale, embed_tokens, embed_positions, }) } fn reset_kv_cache(&mut self) { self.layers.iter_mut().for_each(|l| l.reset_kv_cache()) } pub fn forward( &mut self, xs: &Tensor, encoder_xs: Option<&Tensor>, past_kv_len: usize, attn_mask: &Tensor, ) -> Result<Tensor> { let embed_pos = self.embed_positions.forward(xs, past_kv_len as u32)?; let xs = xs.apply(&self.embed_tokens)?; let xs = match self.embed_scale { None => xs, Some(scale) => (xs * scale)?, }; let mut xs = xs.broadcast_add(&embed_pos)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attn_mask, encoder_xs)?; } Ok(xs) } } #[derive(Debug, Clone)] pub struct TrOCREncoder { embeddings: Embeddings, encoder: Encoder, layernorm: LayerNorm, } impl TrOCREncoder { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_v = vb.pp("encoder"); let embeddings = Embeddings::new(cfg, false, vb_v.pp("embeddings"))?; let encoder = Encoder::new(cfg, vb_v.pp("encoder"))?; let layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb_v.pp("layernorm"))?; Ok(Self { embeddings, encoder, layernorm, }) } pub fn forward(&self, xs: &Tensor) -> Result<Tensor> { let embedding_output = self.embeddings.forward(xs, None, false)?; let encoder_outputs = self.encoder.forward(&embedding_output)?; self.layernorm.forward(&encoder_outputs) } } #[derive(Debug, Clone)] pub struct TrOCRForCausalLM { decoder: TrOCRDecoder, output_projection: Linear, } impl TrOCRForCausalLM { pub fn new(decoder_cfg: &TrOCRConfig, vb: VarBuilder) -> Result<Self> { let decoder = TrOCRDecoder::new(decoder_cfg, vb.clone())?; let output_projection = candle_nn::Linear::new(decoder.embed_tokens.embeddings().clone(), None); Ok(Self { decoder, output_projection, }) } pub fn forward( &mut self, xs: &Tensor, encoder_xs: Option<&Tensor>, past_kv_len: usize, attn_mask: &Tensor, ) -> Result<Tensor> { let xs = self .decoder .forward(xs, encoder_xs, past_kv_len, attn_mask)?; let xs = xs.apply(&self.output_projection)?; Ok(xs) } fn reset_kv_cache(&mut self) { self.decoder.reset_kv_cache(); } } #[derive(Debug, Clone)] pub struct TrOCRModel { encoder: TrOCREncoder, decoder: TrOCRForCausalLM, } impl TrOCRModel { pub fn new(encoder_cfg: &Config, decoder_cfg: &TrOCRConfig, vb: VarBuilder) -> Result<Self> { let encoder = TrOCREncoder::new(encoder_cfg, vb.clone())?; let decoder = TrOCRForCausalLM::new(decoder_cfg, vb)?; Ok(Self { encoder, decoder }) } pub fn encoder(&mut self) -> &mut TrOCREncoder { &mut self.encoder } pub fn decoder(&mut self) -> &mut TrOCRForCausalLM { &mut self.decoder } pub fn decode( &mut self, xs: &Tensor, encoder_xs: &Tensor, past_kv_len: usize, ) -> Result<Tensor> { let seq_len = xs.dim(1)?; let mask: Vec<_> = (0..seq_len) .flat_map(|i| (0..seq_len).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 })) .collect(); let mask = Tensor::from_vec(mask, (seq_len, seq_len), xs.device())?; self.decoder .forward(xs, Some(encoder_xs), past_kv_len, &mask) } pub fn reset_kv_cache(&mut self) { self.decoder.reset_kv_cache(); } }
candle/candle-transformers/src/models/trocr.rs/0
{ "file_path": "candle/candle-transformers/src/models/trocr.rs", "repo_id": "candle", "token_count": 7391 }
37
/// A bounding box around an object. #[derive(Debug, Clone)] pub struct Bbox<D> { pub xmin: f32, pub ymin: f32, pub xmax: f32, pub ymax: f32, pub confidence: f32, pub data: D, } #[derive(Debug, Clone, Copy, PartialEq)] pub struct KeyPoint { pub x: f32, pub y: f32, pub mask: f32, } /// Intersection over union of two bounding boxes. pub fn iou<D>(b1: &Bbox<D>, b2: &Bbox<D>) -> f32 { let b1_area = (b1.xmax - b1.xmin + 1.) * (b1.ymax - b1.ymin + 1.); let b2_area = (b2.xmax - b2.xmin + 1.) * (b2.ymax - b2.ymin + 1.); let i_xmin = b1.xmin.max(b2.xmin); let i_xmax = b1.xmax.min(b2.xmax); let i_ymin = b1.ymin.max(b2.ymin); let i_ymax = b1.ymax.min(b2.ymax); let i_area = (i_xmax - i_xmin + 1.).max(0.) * (i_ymax - i_ymin + 1.).max(0.); i_area / (b1_area + b2_area - i_area) } pub fn non_maximum_suppression<D>(bboxes: &mut [Vec<Bbox<D>>], threshold: f32) { // Perform non-maximum suppression. for bboxes_for_class in bboxes.iter_mut() { bboxes_for_class.sort_by(|b1, b2| b2.confidence.partial_cmp(&b1.confidence).unwrap()); let mut current_index = 0; for index in 0..bboxes_for_class.len() { let mut drop = false; for prev_index in 0..current_index { let iou = iou(&bboxes_for_class[prev_index], &bboxes_for_class[index]); if iou > threshold { drop = true; break; } } if !drop { bboxes_for_class.swap(current_index, index); current_index += 1; } } bboxes_for_class.truncate(current_index); } }
candle/candle-transformers/src/object_detection.rs/0
{ "file_path": "candle/candle-transformers/src/object_detection.rs", "repo_id": "candle", "token_count": 894 }
38
use yew_agent::PublicWorker; fn main() { console_error_panic_hook::set_once(); candle_wasm_example_llama2::Worker::register(); }
candle/candle-wasm-examples/llama2-c/src/bin/worker.rs/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/src/bin/worker.rs", "repo_id": "candle", "token_count": 54 }
39
use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_wasm_example_sam as sam; use wasm_bindgen::prelude::*; struct Embeddings { original_width: u32, original_height: u32, width: u32, height: u32, data: Tensor, } #[wasm_bindgen] pub struct Model { sam: sam::Sam, embeddings: Option<Embeddings>, } #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn new(weights: Vec<u8>, use_tiny: bool) -> Result<Model, JsError> { console_error_panic_hook::set_once(); let dev = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, dev)?; let sam = if use_tiny { sam::Sam::new_tiny(vb)? // tiny vit_t } else { sam::Sam::new(768, 12, 12, &[2, 5, 8, 11], vb)? // sam_vit_b }; Ok(Self { sam, embeddings: None, }) } pub fn set_image_embeddings(&mut self, image_data: Vec<u8>) -> Result<(), JsError> { sam::console_log!("image data: {}", image_data.len()); let image_data = std::io::Cursor::new(image_data); let image = image::io::Reader::new(image_data) .with_guessed_format()? .decode() .map_err(candle::Error::wrap)?; let (original_height, original_width) = (image.height(), image.width()); let (height, width) = (original_height, original_width); let resize_longest = sam::IMAGE_SIZE as u32; let (height, width) = if height < width { let h = (resize_longest * height) / width; (h, resize_longest) } else { let w = (resize_longest * width) / height; (resize_longest, w) }; let image_t = { let img = image.resize_exact(width, height, image::imageops::FilterType::CatmullRom); let data = img.to_rgb8().into_raw(); Tensor::from_vec( data, (img.height() as usize, img.width() as usize, 3), &Device::Cpu, )? .permute((2, 0, 1))? }; let data = self.sam.embeddings(&image_t)?; self.embeddings = Some(Embeddings { original_width, original_height, width, height, data, }); Ok(()) } pub fn mask_for_point(&self, input: JsValue) -> Result<JsValue, JsError> { let input: PointsInput = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; let transformed_points = input.points; for &(x, y, _bool) in &transformed_points { if !(0.0..=1.0).contains(&x) { return Err(JsError::new(&format!( "x has to be between 0 and 1, got {}", x ))); } if !(0.0..=1.0).contains(&y) { return Err(JsError::new(&format!( "y has to be between 0 and 1, got {}", y ))); } } let embeddings = match &self.embeddings { None => Err(JsError::new("image embeddings have not been set"))?, Some(embeddings) => embeddings, }; let (mask, iou_predictions) = self.sam.forward_for_embeddings( &embeddings.data, embeddings.height as usize, embeddings.width as usize, &transformed_points, false, )?; let iou = iou_predictions.flatten(0, 1)?.to_vec1::<f32>()?[0]; let mask_shape = mask.dims().to_vec(); let mask_data = mask.ge(0f32)?.flatten_all()?.to_vec1::<u8>()?; let mask = Mask { iou, mask_shape, mask_data, }; let image = Image { original_width: embeddings.original_width, original_height: embeddings.original_height, width: embeddings.width, height: embeddings.height, }; Ok(serde_wasm_bindgen::to_value(&MaskImage { mask, image })?) } } #[derive(serde::Serialize, serde::Deserialize)] struct Mask { iou: f32, mask_shape: Vec<usize>, mask_data: Vec<u8>, } #[derive(serde::Serialize, serde::Deserialize)] struct Image { original_width: u32, original_height: u32, width: u32, height: u32, } #[derive(serde::Serialize, serde::Deserialize)] struct MaskImage { mask: Mask, image: Image, } #[derive(serde::Serialize, serde::Deserialize)] struct PointsInput { points: Vec<(f64, f64, bool)>, } fn main() { console_error_panic_hook::set_once(); }
candle/candle-wasm-examples/segment-anything/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/segment-anything/src/bin/m.rs", "repo_id": "candle", "token_count": 2400 }
40
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle Whisper Rust/WASM</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> // base url for audio examples const AUDIO_BASE_URL = "https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/"; // models base url const MODELS = { tiny_multilingual: { base_url: "https://huggingface.co/openai/whisper-tiny/resolve/main/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", size: "151 MB", }, tiny_en: { base_url: "https://huggingface.co/openai/whisper-tiny.en/resolve/main/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", size: "151 MB", }, tiny_quantized_multilingual_q80: { base_url: "https://huggingface.co/lmz/candle-whisper/resolve/main/", model: "model-tiny-q80.gguf", tokenizer: "tokenizer-tiny.json", config: "config-tiny.json", size: "41.5 MB", }, tiny_en_quantized_q80: { base_url: "https://huggingface.co/lmz/candle-whisper/resolve/main/", model: "model-tiny-q80.gguf", tokenizer: "tokenizer-tiny-en.json", config: "config-tiny-en.json", size: "41.8 MB", }, distil_medium_en: { base_url: "https://huggingface.co/distil-whisper/distil-medium.en/resolve/main/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", size: "789 MB", }, }; const modelEl = document.querySelector("#model"); Object.keys(MODELS).forEach((modelID) => { const model = MODELS[modelID]; const option = document.createElement("option"); option.value = modelID; option.textContent = `${modelID} (${model.size})`; modelEl.appendChild(option); }); const whisperWorker = new Worker("./whisperWorker.js", { type: "module", }); async function classifyAudio( weightsURL, // URL to the weights file modelID, // model ID tokenizerURL, // URL to the tokenizer file configURL, // model config URL mel_filtersURL, // URL to the mel filters file audioURL, // URL to the audio file updateStatus // function to update the status ) { return new Promise((resolve, reject) => { whisperWorker.postMessage({ weightsURL, modelID, tokenizerURL, configURL, mel_filtersURL, audioURL, }); function messageHandler(event) { console.log(event.data); if ("status" in event.data) { updateStatus(event.data); } if ("error" in event.data) { whisperWorker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { whisperWorker.removeEventListener("message", messageHandler); resolve(event.data); } } whisperWorker.addEventListener("message", messageHandler); }); } // keep track of the audio URL let audioURL = null; function setAudio(src) { const audio = document.querySelector("#audio"); audio.src = src; audio.controls = true; audio.hidden = false; document.querySelector("#detect").disabled = false; audioURL = src; } // add event listener to audio buttons document.querySelectorAll("#audios-select > button").forEach((target) => { target.addEventListener("click", (e) => { const value = target.dataset.value; const href = AUDIO_BASE_URL + value; setAudio(href); }); }); //add event listener to file input document.querySelector("#file-upload").addEventListener("change", (e) => { const target = e.target; if (target.files.length > 0) { const href = URL.createObjectURL(target.files[0]); setAudio(href); } }); // add event listener to drop-area const dropArea = document.querySelector("#drop-area"); dropArea.addEventListener("dragenter", (e) => { e.preventDefault(); dropArea.classList.add("border-blue-700"); }); dropArea.addEventListener("dragleave", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); }); dropArea.addEventListener("dragover", (e) => { e.preventDefault(); dropArea.classList.add("border-blue-700"); }); dropArea.addEventListener("drop", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); const url = e.dataTransfer.getData("text/uri-list"); const files = e.dataTransfer.files; if (files.length > 0) { const href = URL.createObjectURL(files[0]); setAudio(href); } else if (url) { setAudio(url); } }); // add event listener to detect button document.querySelector("#detect").addEventListener("click", async () => { if (audioURL === null) { return; } const modelID = modelEl.value; const model = MODELS[modelID]; const modelURL = model.base_url + model.model; const tokenizerURL = model.base_url + model.tokenizer; const configURL = model.base_url + model.config; classifyAudio( modelURL, modelID, tokenizerURL, configURL, "mel_filters.safetensors", audioURL, updateStatus ) .then((result) => { console.log("RESULT", result); const { output } = result; const text = output.map((segment) => segment.dr.text).join(" "); console.log(text); document.querySelector("#output-status").hidden = true; document.querySelector("#output-generation").hidden = false; document.querySelector("#output-generation").textContent = text; }) .catch((error) => { console.error(error); }); }); function updateStatus(data) { const { status, message } = data; const button = document.querySelector("#detect"); if (status === "decoding" || status === "loading") { button.disabled = true; button.textContent = message; } else if (status === "complete") { button.disabled = false; button.textContent = "Transcribe Audio"; } } </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle Whisper</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> Transcribe audio in the browser using rust/wasm with an audio file. This demo uses the <a href="https://huggingface.co/openai/" target="_blank" class="underline hover:text-blue-500 hover:no-underline"> OpenAI Whisper models </a> and WASM runtime built with <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle </a> </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light"> </select> </div> <!-- drag and drop area --> <div class="relative"> <div id="drop-area" class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative h-48 w-full overflow-hidden"> <div class="flex flex-col items-center justify-center space-y-1 text-center"> <svg width="25" height="25" viewBox="0 0 25 25" fill="none" xmlns="http://www.w3.org/2000/svg"> <path d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z" fill="#000" /> </svg> <div class="flex text-sm text-gray-600"> <label for="file-upload" class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700"> <span>Drag and drop your audio here</span> <span class="block text-xs">or</span> <span class="block text-xs">Click to upload</span> </label> </div> <input id="file-upload" name="file-upload" type="file" accept="audio/*" class="sr-only" /> </div> <audio id="audio" hidden controls class="w-full p-2 select-none"></audio> </div> </div> <div> <div class="flex flex-wrap gap-3 items-center" id="audios-select"> <h3 class="font-medium">Examples:</h3> <button data-value="samples_jfk.wav" class="text-gray-500 border border-gray-500 rounded-md p-2 underline hover:no-underline"> <span>jfk.wav</span> <span class="text-xs block"> (352 kB)</span> </button> <button data-value="samples_a13.wav" class="text-gray-500 border border-gray-500 rounded-md p-2 underline hover:no-underline"> <span>a13.wav</span> <span class="text-xs block"> (960 kB)</span> </button> <button data-value="samples_mm0.wav" class="text-gray-500 border border-gray-500 rounded-md p-2 underline hover:no-underline"> <span>mm0.wav</span> <span class="text-xs block new"> (957 kB)</span> </button> <button data-value="samples_gb0.wav" class="text-gray-500 border border-gray-500 rounded-md p-2 underline hover:no-underline"> <span>gb0.wav </span> <span class="text-xs block">(4.08 MB)</span> </button> <button data-value="samples_gb1.wav" class="text-gray-500 border border-gray-500 rounded-md p-2 underline hover:no-underline"> <span>gb1.wav </span> <span class="text-xs block">(6.36 MB)</span> </button> <button data-value="samples_hp0.wav" class="text-gray-500 border border-gray-500 rounded-md p-2 underline hover:no-underline"> <span>hp0.wav </span> <span class="text-xs block">(8.75 MB)</span> </button> </div> </div> <div> <button id="detect" disabled class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 px-4 rounded disabled:bg-gray-300 disabled:cursor-not-allowed"> Transcribe Audio </button> </div> <div> <h3 class="font-medium">Transcription:</h3> <div class="min-h-[250px] bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2"> <p hidden id="output-generation" class="grid-rows-2"></p> <span id="output-status" class="m-auto font-light" >No transcription results yet</span > </div> </div> </main> </body> </html>
candle/candle-wasm-examples/whisper/lib-example.html/0
{ "file_path": "candle/candle-wasm-examples/whisper/lib-example.html", "repo_id": "candle", "token_count": 6488 }
41
use crate::console_log; use crate::worker::{ModelData, RunData, Worker, WorkerInput, WorkerOutput}; use wasm_bindgen::prelude::*; use wasm_bindgen_futures::JsFuture; use yew::{html, Component, Context, Html}; use yew_agent::{Bridge, Bridged}; async fn fetch_url(url: &str) -> Result<Vec<u8>, JsValue> { use web_sys::{Request, RequestCache, RequestInit, RequestMode, Response}; let window = web_sys::window().ok_or("window")?; let mut opts = RequestInit::new(); let opts = opts .method("GET") .mode(RequestMode::Cors) .cache(RequestCache::NoCache); let request = Request::new_with_str_and_init(url, opts)?; let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?; // `resp_value` is a `Response` object. assert!(resp_value.is_instance_of::<Response>()); let resp: Response = resp_value.dyn_into()?; let data = JsFuture::from(resp.blob()?).await?; let blob = web_sys::Blob::from(data); let array_buffer = JsFuture::from(blob.array_buffer()).await?; let data = js_sys::Uint8Array::new(&array_buffer).to_vec(); Ok(data) } pub enum Msg { Refresh, Run, UpdateStatus(String), SetModel(ModelData), WorkerIn(WorkerInput), WorkerOut(Result<WorkerOutput, String>), } pub struct CurrentDecode { start_time: Option<f64>, } pub struct App { status: String, loaded: bool, generated: String, current_decode: Option<CurrentDecode>, worker: Box<dyn Bridge<Worker>>, } async fn model_data_load() -> Result<ModelData, JsValue> { let weights = fetch_url("yolov8s.safetensors").await?; let model_size = "s".to_string(); console_log!("loaded weights {}", weights.len()); Ok(ModelData { weights, model_size, }) } fn performance_now() -> Option<f64> { let window = web_sys::window()?; let performance = window.performance()?; Some(performance.now() / 1000.) } fn draw_bboxes(bboxes: Vec<Vec<crate::model::Bbox>>) -> Result<(), JsValue> { let document = web_sys::window().unwrap().document().unwrap(); let canvas = match document.get_element_by_id("canvas") { Some(canvas) => canvas, None => return Err("no canvas".into()), }; let canvas: web_sys::HtmlCanvasElement = canvas.dyn_into::<web_sys::HtmlCanvasElement>()?; let context = canvas .get_context("2d")? .ok_or("no 2d")? .dyn_into::<web_sys::CanvasRenderingContext2d>()?; let image_html_element = document.get_element_by_id("bike-img"); let image_html_element = match image_html_element { Some(data) => data, None => return Err("no bike-img".into()), }; let image_html_element = image_html_element.dyn_into::<web_sys::HtmlImageElement>()?; canvas.set_width(image_html_element.natural_width()); canvas.set_height(image_html_element.natural_height()); context.draw_image_with_html_image_element(&image_html_element, 0., 0.)?; context.set_stroke_style(&JsValue::from("#0dff9a")); for (class_index, bboxes_for_class) in bboxes.iter().enumerate() { for b in bboxes_for_class.iter() { let name = crate::coco_classes::NAMES[class_index]; context.stroke_rect( b.xmin as f64, b.ymin as f64, (b.xmax - b.xmin) as f64, (b.ymax - b.ymin) as f64, ); if let Ok(metrics) = context.measure_text(name) { let width = metrics.width(); context.set_fill_style(&"#3c8566".into()); context.fill_rect(b.xmin as f64 - 2., b.ymin as f64 - 12., width + 4., 14.); context.set_fill_style(&"#e3fff3".into()); context.fill_text(name, b.xmin as f64, b.ymin as f64 - 2.)? } } } Ok(()) } impl Component for App { type Message = Msg; type Properties = (); fn create(ctx: &Context<Self>) -> Self { let status = "loading weights".to_string(); let cb = { let link = ctx.link().clone(); move |e| link.send_message(Self::Message::WorkerOut(e)) }; let worker = Worker::bridge(std::rc::Rc::new(cb)); Self { status, generated: String::new(), current_decode: None, worker, loaded: false, } } fn rendered(&mut self, ctx: &Context<Self>, first_render: bool) { if first_render { ctx.link().send_future(async { match model_data_load().await { Err(err) => { let status = format!("{err:?}"); Msg::UpdateStatus(status) } Ok(model_data) => Msg::SetModel(model_data), } }); } } fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool { match msg { Msg::SetModel(md) => { self.status = "weights loaded successfully!".to_string(); self.loaded = true; console_log!("loaded weights"); self.worker.send(WorkerInput::ModelData(md)); true } Msg::Run => { if self.current_decode.is_some() { self.status = "already processing some image at the moment".to_string() } else { let start_time = performance_now(); self.current_decode = Some(CurrentDecode { start_time }); self.status = "processing...".to_string(); self.generated.clear(); ctx.link().send_future(async { match fetch_url("bike.jpeg").await { Err(err) => { let status = format!("{err:?}"); Msg::UpdateStatus(status) } Ok(image_data) => Msg::WorkerIn(WorkerInput::RunData(RunData { image_data, conf_threshold: 0.5, iou_threshold: 0.5, })), } }); } true } Msg::WorkerOut(output) => { match output { Ok(WorkerOutput::WeightsLoaded) => self.status = "weights loaded!".to_string(), Ok(WorkerOutput::ProcessingDone(Err(err))) => { self.status = format!("error in worker process: {err}"); self.current_decode = None } Ok(WorkerOutput::ProcessingDone(Ok(bboxes))) => { let mut content = Vec::new(); for (class_index, bboxes_for_class) in bboxes.iter().enumerate() { for b in bboxes_for_class.iter() { content.push(format!( "bbox {}: xs {:.0}-{:.0} ys {:.0}-{:.0}", crate::coco_classes::NAMES[class_index], b.xmin, b.xmax, b.ymin, b.ymax )) } } self.generated = content.join("\n"); let dt = self.current_decode.as_ref().and_then(|current_decode| { current_decode.start_time.and_then(|start_time| { performance_now().map(|stop_time| stop_time - start_time) }) }); self.status = match dt { None => "processing succeeded!".to_string(), Some(dt) => format!("processing succeeded in {:.2}s", dt,), }; self.current_decode = None; if let Err(err) = draw_bboxes(bboxes) { self.status = format!("{err:?}") } } Err(err) => { self.status = format!("error in worker {err:?}"); } } true } Msg::WorkerIn(inp) => { self.worker.send(inp); true } Msg::UpdateStatus(status) => { self.status = status; true } Msg::Refresh => true, } } fn view(&self, ctx: &Context<Self>) -> Html { html! { <div style="margin: 2%;"> <div><p>{"Running an object detection model in the browser using rust/wasm with "} <a href="https://github.com/huggingface/candle" target="_blank">{"candle!"}</a> </p> <p>{"Once the weights have loaded, click on the run button to process an image."}</p> <p><img id="bike-img" src="bike.jpeg"/></p> <p>{"Source: "}<a href="https://commons.wikimedia.org/wiki/File:V%C3%A9lo_parade_-_V%C3%A9lorution_-_bike_critical_mass.JPG">{"wikimedia"}</a></p> </div> { if self.loaded{ html!(<button class="button" onclick={ctx.link().callback(move |_| Msg::Run)}> { "run" }</button>) }else{ html! { <progress id="progress-bar" aria-label="Loading weights..."></progress> } } } <br/ > <h3> {&self.status} </h3> { if self.current_decode.is_some() { html! { <progress id="progress-bar" aria-label="generating…"></progress> } } else { html! {} } } <div> <canvas id="canvas" height="150" width="150"></canvas> </div> <blockquote> <p> { self.generated.chars().map(|c| if c == '\r' || c == '\n' { html! { <br/> } } else { html! { {c} } }).collect::<Html>() } </p> </blockquote> </div> } } }
candle/candle-wasm-examples/yolo/src/app.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/app.rs", "repo_id": "candle", "token_count": 5971 }
42
# Use .env.local to change these variables # DO NOT EDIT THIS FILE WITH SENSITIVE DATA MONGODB_URL=#your mongodb URL here MONGODB_DB_NAME=chat-ui MONGODB_DIRECT_CONNECTION=false COOKIE_NAME=hf-chat HF_TOKEN=#hf_<token> from from https://huggingface.co/settings/token HF_API_ROOT=https://api-inference.huggingface.co/models OPENAI_API_KEY=#your openai api key here HF_ACCESS_TOKEN=#LEGACY! Use HF_TOKEN instead # used to activate search with web functionality. disabled if none are defined. choose one of the following: YDC_API_KEY=#your docs.you.com api key here SERPER_API_KEY=#your serper.dev api key here SERPAPI_KEY=#your serpapi key here SERPSTACK_API_KEY=#your serpstack api key here USE_LOCAL_WEBSEARCH=#set to true to parse google results yourself, overrides other API keys WEBSEARCH_ALLOWLIST=`[]` # if it's defined, allow websites from only this list. WEBSEARCH_BLOCKLIST=`[]` # if it's defined, block websites from this list. # Parameters to enable open id login OPENID_CONFIG=`{ "PROVIDER_URL": "", "CLIENT_ID": "", "CLIENT_SECRET": "", "SCOPES": "" }` # /!\ legacy openid settings, prefer the config above OPENID_CLIENT_ID= OPENID_CLIENT_SECRET= OPENID_SCOPES="openid profile" # Add "email" for some providers like Google that do not provide preferred_username OPENID_PROVIDER_URL=https://huggingface.co # for Google, use https://accounts.google.com OPENID_TOLERANCE= OPENID_RESOURCE= # Parameters to enable a global mTLS context for client fetch requests USE_CLIENT_CERTIFICATE=false CERT_PATH=# KEY_PATH=# CA_PATH=# CLIENT_KEY_PASSWORD=# REJECT_UNAUTHORIZED=true TEXT_EMBEDDING_MODELS = `[ { "name": "Xenova/gte-small", "displayName": "Xenova/gte-small", "description": "Local embedding model running on the server.", "chunkCharLength": 512, "endpoints": [ { "type": "transformersjs" } ] } ]` # 'name', 'userMessageToken', 'assistantMessageToken' are required MODELS=`[ { "name": "mistralai/Mistral-7B-Instruct-v0.1", "displayName": "mistralai/Mistral-7B-Instruct-v0.1", "description": "Mistral 7B is a new Apache 2.0 model, released by Mistral AI that outperforms Llama2 13B in benchmarks.", "websiteUrl": "https://mistral.ai/news/announcing-mistral-7b/", "preprompt": "", "chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s>{{/ifAssistant}}{{/each}}", "parameters": { "temperature": 0.1, "top_p": 0.95, "repetition_penalty": 1.2, "top_k": 50, "truncate": 3072, "max_new_tokens": 1024, "stop": ["</s>"] }, "promptExamples": [ { "title": "Write an email from bullet list", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a snake game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Assist in a task", "prompt": "How do I make a delicious lemon cheesecake?" } ] } ]` OLD_MODELS=`[]`# any removed models, `{ name: string, displayName?: string, id?: string }` TASK_MODEL= # name of the model used for tasks such as summarizing title, creating query, etc. PUBLIC_ORIGIN=#https://huggingface.co PUBLIC_SHARE_PREFIX=#https://hf.co/chat PUBLIC_GOOGLE_ANALYTICS_ID=#G-XXXXXXXX / Leave empty to disable PUBLIC_ANNOUNCEMENT_BANNERS=`[ { "title": "Code Llama 70B is available! 🦙", "linkTitle": "try it", "linkHref": "https://huggingface.co/chat?model=codellama/CodeLlama-70b-Instruct-hf" } ]` PARQUET_EXPORT_DATASET= PARQUET_EXPORT_HF_TOKEN= PARQUET_EXPORT_SECRET= RATE_LIMIT= # requests per minute MESSAGES_BEFORE_LOGIN=# how many messages a user can send in a conversation before having to login. set to 0 to force login right away APP_BASE="" # base path of the app, e.g. /chat, left blank as default PUBLIC_APP_NAME=ChatUI # name used as title throughout the app PUBLIC_APP_ASSETS=chatui # used to find logos & favicons in static/$PUBLIC_APP_ASSETS PUBLIC_APP_COLOR=blue # can be any of tailwind colors: https://tailwindcss.com/docs/customizing-colors#default-color-palette PUBLIC_APP_DESCRIPTION=# description used throughout the app (if not set, a default one will be used) PUBLIC_APP_DATA_SHARING=#set to 1 to enable options & text regarding data sharing PUBLIC_APP_DISCLAIMER=#set to 1 to show a disclaimer on login page LLM_SUMMERIZATION=true EXPOSE_API=true # PUBLIC_APP_NAME=HuggingChat # PUBLIC_APP_ASSETS=huggingchat # PUBLIC_APP_COLOR=yellow # PUBLIC_APP_DESCRIPTION="Making the community's best AI chat models available to everyone." # PUBLIC_APP_DATA_SHARING=1 # PUBLIC_APP_DISCLAIMER=1 ENABLE_ASSISTANTS=false #set to true to enable assistants feature
chat-ui/.env/0
{ "file_path": "chat-ui/.env", "repo_id": "chat-ui", "token_count": 1922 }
43
export default { plugins: { tailwindcss: {}, autoprefixer: {}, }, };
chat-ui/postcss.config.js/0
{ "file_path": "chat-ui/postcss.config.js", "repo_id": "chat-ui", "token_count": 34 }
44
<script lang="ts"> import { base } from "$app/paths"; import { page } from "$app/stores"; import { PUBLIC_APP_DESCRIPTION, PUBLIC_APP_NAME } from "$env/static/public"; import LogoHuggingFaceBorderless from "$lib/components/icons/LogoHuggingFaceBorderless.svelte"; import Modal from "$lib/components/Modal.svelte"; import { useSettingsStore } from "$lib/stores/settings"; import { cookiesAreEnabled } from "$lib/utils/cookiesAreEnabled"; import Logo from "./icons/Logo.svelte"; const settings = useSettingsStore(); </script> <Modal on:close> <div class="flex w-full flex-col items-center gap-6 bg-gradient-to-b from-primary-500/40 via-primary-500/10 to-primary-500/0 px-5 pb-8 pt-9 text-center" > <h2 class="flex items-center text-2xl font-semibold text-gray-800"> <Logo classNames="mr-1" /> {PUBLIC_APP_NAME} </h2> <p class="text-lg font-semibold leading-snug text-gray-800" style="text-wrap: balance;"> {PUBLIC_APP_DESCRIPTION} </p> <p class="rounded-xl border bg-white/80 p-2 text-base text-gray-800"> You have reached the guest message limit, please Sign In with your Hugging Face account to continue. </p> <form action="{base}/{$page.data.loginRequired ? 'login' : 'settings'}" target="_parent" method="POST" class="flex w-full flex-col items-center gap-2" > {#if $page.data.loginRequired} <button type="submit" class="flex w-full items-center justify-center whitespace-nowrap rounded-full bg-black px-5 py-2 text-center text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900" > Sign in {#if PUBLIC_APP_NAME === "HuggingChat"} with <LogoHuggingFaceBorderless classNames="text-xl mr-1 ml-1.5" /> Hugging Face {/if} </button> {:else} <button class="flex w-full items-center justify-center whitespace-nowrap rounded-full border-2 border-black bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900" on:click={(e) => { if (!cookiesAreEnabled()) { e.preventDefault(); window.open(window.location.href, "_blank"); } $settings.ethicsModalAccepted = true; }} > Start chatting </button> {/if} </form> </div> </Modal>
chat-ui/src/lib/components/LoginModal.svelte/0
{ "file_path": "chat-ui/src/lib/components/LoginModal.svelte", "repo_id": "chat-ui", "token_count": 917 }
45
<script lang="ts"> import { webSearchParameters } from "$lib/stores/webSearchParameters"; import CarbonInformation from "~icons/carbon/information"; import Switch from "./Switch.svelte"; const toggle = () => ($webSearchParameters.useSearch = !$webSearchParameters.useSearch); </script> <div class="flex h-8 cursor-pointer select-none items-center gap-2 rounded-lg border bg-white p-1.5 shadow-sm hover:shadow-none dark:border-gray-800 dark:bg-gray-900" on:click={toggle} on:keypress={toggle} aria-checked={$webSearchParameters.useSearch} aria-label="web search toggle" role="switch" tabindex="0" > <Switch name="useSearch" bind:checked={$webSearchParameters.useSearch} on:click on:keypress /> <div class="whitespace-nowrap text-sm text-gray-800 dark:text-gray-200">Search web</div> <div class="group relative w-max"> <CarbonInformation class="text-xs text-gray-500" /> <div class="pointer-events-none absolute -top-20 left-1/2 w-max -translate-x-1/2 rounded-md bg-gray-100 p-2 opacity-0 transition-opacity group-hover:opacity-100 dark:bg-gray-800" > <p class="max-w-sm text-sm text-gray-800 dark:text-gray-200"> When enabled, the model will try to complement its answer with information queried from the web. </p> </div> </div> </div>
chat-ui/src/lib/components/WebSearchToggle.svelte/0
{ "file_path": "chat-ui/src/lib/components/WebSearchToggle.svelte", "repo_id": "chat-ui", "token_count": 447 }
46
export const PUBLIC_SEP_TOKEN = "</s>";
chat-ui/src/lib/constants/publicSepToken.ts/0
{ "file_path": "chat-ui/src/lib/constants/publicSepToken.ts", "repo_id": "chat-ui", "token_count": 16 }
47
import { error } from "@sveltejs/kit"; import { collections } from "../database"; import type { Conversation } from "$lib/types/Conversation"; import type { SharedConversation } from "$lib/types/SharedConversation"; export async function downloadFile( sha256: string, convId: Conversation["_id"] | SharedConversation["_id"] ) { const fileId = collections.bucket.find({ filename: `${convId.toString()}-${sha256}` }); let mime = ""; const content = await fileId.next().then(async (file) => { if (!file) { throw error(404, "File not found"); } if (file.metadata?.conversation !== convId.toString()) { throw error(403, "You don't have access to this file."); } mime = file.metadata?.mime; const fileStream = collections.bucket.openDownloadStream(file._id); const fileBuffer = await new Promise<Buffer>((resolve, reject) => { const chunks: Uint8Array[] = []; fileStream.on("data", (chunk) => chunks.push(chunk)); fileStream.on("error", reject); fileStream.on("end", () => resolve(Buffer.concat(chunks))); }); return fileBuffer; }); return { content, mime }; }
chat-ui/src/lib/server/files/downloadFile.ts/0
{ "file_path": "chat-ui/src/lib/server/files/downloadFile.ts", "repo_id": "chat-ui", "token_count": 383 }
48
import { writable } from "svelte/store"; export interface TitleUpdate { convId: string; title: string; } export default writable<TitleUpdate | null>(null);
chat-ui/src/lib/stores/titleUpdate.ts/0
{ "file_path": "chat-ui/src/lib/stores/titleUpdate.ts", "repo_id": "chat-ui", "token_count": 50 }
49
export interface Timestamps { createdAt: Date; updatedAt: Date; }
chat-ui/src/lib/types/Timestamps.ts/0
{ "file_path": "chat-ui/src/lib/types/Timestamps.ts", "repo_id": "chat-ui", "token_count": 23 }
50
export async function sha256(input: string): Promise<string> { const utf8 = new TextEncoder().encode(input); const hashBuffer = await crypto.subtle.digest("SHA-256", utf8); const hashArray = Array.from(new Uint8Array(hashBuffer)); const hashHex = hashArray.map((bytes) => bytes.toString(16).padStart(2, "0")).join(""); return hashHex; }
chat-ui/src/lib/utils/sha256.ts/0
{ "file_path": "chat-ui/src/lib/utils/sha256.ts", "repo_id": "chat-ui", "token_count": 119 }
51
<script lang="ts"> import { base } from "$app/paths"; import { clickOutside } from "$lib/actions/clickOutside"; import { afterNavigate, goto } from "$app/navigation"; import { useSettingsStore } from "$lib/stores/settings"; import type { PageData } from "./$types"; import { applyAction, enhance } from "$app/forms"; import { PUBLIC_APP_NAME, PUBLIC_ORIGIN } from "$env/static/public"; import { page } from "$app/stores"; export let data: PageData; let previousPage: string = base; afterNavigate(({ from }) => { if (!from?.url.pathname.includes("settings")) { previousPage = from?.url.pathname || previousPage; } }); const settings = useSettingsStore(); </script> <svelte:head> <meta property="og:title" content={data.assistant.name + " - " + PUBLIC_APP_NAME} /> <meta property="og:type" content="link" /> <meta property="og:description" content={`Use the ${data.assistant.name} assistant inside of ${PUBLIC_APP_NAME}`} /> <meta property="og:image" content="{PUBLIC_ORIGIN || $page.url.origin}{base}/assistant/{data.assistant._id}/thumbnail.png" /> <meta property="og:url" content={$page.url.href} /> <meta name="twitter:card" content="summary_large_image" /> </svelte:head> <div class="fixed inset-0 flex items-center justify-center bg-black/80 backdrop-blur-sm dark:bg-black/50" > <dialog open use:clickOutside={() => { goto(previousPage); }} class="z-10 flex flex-col content-center items-center gap-x-10 gap-y-3 overflow-hidden rounded-2xl bg-white p-4 pt-6 text-center shadow-2xl outline-none max-sm:w-[85dvw] max-sm:px-6 md:w-96 md:grid-cols-3 md:grid-rows-[auto,1fr] md:p-8" > {#if data.assistant.avatar} <img class="size-16 flex-none rounded-full object-cover sm:size-24" src="{base}/settings/assistants/{data.assistant._id}/avatar.jpg?hash={data.assistant .avatar}" alt="avatar" /> {:else} <div class="flex size-16 flex-none items-center justify-center rounded-full bg-gray-300 text-2xl font-bold uppercase text-gray-500 sm:size-24" > {data.assistant.name[0]} </div> {/if} <h1 class="text-balance text-xl font-bold"> {data.assistant.name} </h1> {#if data.assistant.description} <h3 class="line-clamp-6 text-balance text-sm text-gray-500"> {data.assistant.description} </h3> {/if} {#if data.assistant.createdByName} <p class="mt-2 text-sm text-gray-500"> Created by <a class="hover:underline" href="https://hf.co/{data.assistant.createdByName}" target="_blank" > {data.assistant.createdByName} </a> </p> {/if} <button class="mt-4 w-full rounded-full bg-gray-200 px-4 py-2 font-semibold text-gray-700" on:click={() => { goto(previousPage); }} > Cancel </button> <form method="POST" action="{base}/settings/assistants/{data.assistant._id}?/subscribe" class="w-full" use:enhance={() => { return async ({ result }) => { // `result` is an `ActionResult` object if (result.type === "success") { $settings.activeModel = data.assistant._id; goto(`${base}`); } else { await applyAction(result); } }; }} > <button type="submit" class=" w-full rounded-full bg-black px-4 py-3 font-semibold text-white" > Start chatting </button> </form> </dialog> </div>
chat-ui/src/routes/assistant/[assistantId]/+page.svelte/0
{ "file_path": "chat-ui/src/routes/assistant/[assistantId]/+page.svelte", "repo_id": "chat-ui", "token_count": 1417 }
52
import { redirect, error } from "@sveltejs/kit"; import { getOIDCUserData, validateAndParseCsrfToken } from "$lib/server/auth"; import { z } from "zod"; import { base } from "$app/paths"; import { updateUser } from "./updateUser"; export async function load({ url, locals, cookies, request, getClientAddress }) { const { error: errorName, error_description: errorDescription } = z .object({ error: z.string().optional(), error_description: z.string().optional(), }) .parse(Object.fromEntries(url.searchParams.entries())); if (errorName) { throw error(400, errorName + (errorDescription ? ": " + errorDescription : "")); } const { code, state } = z .object({ code: z.string(), state: z.string(), }) .parse(Object.fromEntries(url.searchParams.entries())); const csrfToken = Buffer.from(state, "base64").toString("utf-8"); const validatedToken = await validateAndParseCsrfToken(csrfToken, locals.sessionId); if (!validatedToken) { throw error(403, "Invalid or expired CSRF token"); } const { userData } = await getOIDCUserData({ redirectURI: validatedToken.redirectUrl }, code); await updateUser({ userData, locals, cookies, userAgent: request.headers.get("user-agent") ?? undefined, ip: getClientAddress(), }); throw redirect(302, `${base}/`); }
chat-ui/src/routes/login/callback/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/login/callback/+page.server.ts", "repo_id": "chat-ui", "token_count": 449 }
53
import { base } from "$app/paths"; import { requiresUser } from "$lib/server/auth"; import { collections } from "$lib/server/database"; import { fail, type Actions, redirect } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import { z } from "zod"; import { sha256 } from "$lib/utils/sha256"; import sharp from "sharp"; const newAsssistantSchema = z.object({ name: z.string().min(1), modelId: z.string().min(1), preprompt: z.string().min(1), description: z.string().optional(), exampleInput1: z.string().optional(), exampleInput2: z.string().optional(), exampleInput3: z.string().optional(), exampleInput4: z.string().optional(), avatar: z.union([z.instanceof(File), z.literal("null")]).optional(), }); const uploadAvatar = async (avatar: File, assistantId: ObjectId): Promise<string> => { const hash = await sha256(await avatar.text()); const upload = collections.bucket.openUploadStream(`${assistantId.toString()}`, { metadata: { type: avatar.type, hash }, }); upload.write((await avatar.arrayBuffer()) as unknown as Buffer); upload.end(); // only return the filename when upload throws a finish event or a 10s time out occurs return new Promise((resolve, reject) => { upload.once("finish", () => resolve(hash)); upload.once("error", reject); setTimeout(() => reject(new Error("Upload timed out")), 10000); }); }; export const actions: Actions = { default: async ({ request, locals, params }) => { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.assistantId), }); if (!assistant) { throw Error("Assistant not found"); } if (assistant.createdById.toString() !== (locals.user?._id ?? locals.sessionId).toString()) { throw Error("You are not the author of this assistant"); } const formData = Object.fromEntries(await request.formData()); const parse = newAsssistantSchema.safeParse(formData); if (!parse.success) { // Loop through the errors array and create a custom errors array const errors = parse.error.errors.map((error) => { return { field: error.path[0], message: error.message, }; }); return fail(400, { error: true, errors }); } // can only create assistants when logged in, IF login is setup if (!locals.user && requiresUser) { const errors = [{ field: "preprompt", message: "Must be logged in. Unauthorized" }]; return fail(400, { error: true, errors }); } const exampleInputs: string[] = [ parse?.data?.exampleInput1 ?? "", parse?.data?.exampleInput2 ?? "", parse?.data?.exampleInput3 ?? "", parse?.data?.exampleInput4 ?? "", ].filter((input) => !!input); const deleteAvatar = parse.data.avatar === "null"; let hash; if (parse.data.avatar && parse.data.avatar !== "null" && parse.data.avatar.size > 0) { let image; try { image = await sharp(await parse.data.avatar.arrayBuffer()) .resize(512, 512, { fit: "inside" }) .jpeg({ quality: 80 }) .toBuffer(); } catch (e) { const errors = [{ field: "avatar", message: (e as Error).message }]; return fail(400, { error: true, errors }); } const fileCursor = collections.bucket.find({ filename: assistant._id.toString() }); // Step 2: Delete the existing file if it exists let fileId = await fileCursor.next(); while (fileId) { await collections.bucket.delete(fileId._id); fileId = await fileCursor.next(); } hash = await uploadAvatar(new File([image], "avatar.jpg"), assistant._id); } else if (deleteAvatar) { // delete the avatar const fileCursor = collections.bucket.find({ filename: assistant._id.toString() }); let fileId = await fileCursor.next(); while (fileId) { await collections.bucket.delete(fileId._id); fileId = await fileCursor.next(); } } const { acknowledged } = await collections.assistants.updateOne( { _id: assistant._id, }, { $set: { name: parse.data.name, description: parse.data.description, modelId: parse.data.modelId, preprompt: parse.data.preprompt, exampleInputs, avatar: deleteAvatar ? undefined : hash ?? assistant.avatar, updatedAt: new Date(), }, } ); if (acknowledged) { throw redirect(302, `${base}/settings/assistants/${assistant._id}`); } else { throw Error("Update failed"); } }, };
chat-ui/src/routes/settings/assistants/[assistantId]/edit/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/settings/assistants/[assistantId]/edit/+page.server.ts", "repo_id": "chat-ui", "token_count": 1573 }
54
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="none"> <path fill="#FFD21E" d="M4 15.55C4 9.72 8.72 5 14.55 5h4.11a9.34 9.34 0 1 1 0 18.68H7.58l-2.89 2.8a.41.41 0 0 1-.69-.3V15.55Z" /> <path fill="#32343D" d="M19.63 12.48c.37.14.52.9.9.7.71-.38.98-1.27.6-1.98a1.46 1.46 0 0 0-1.98-.61 1.47 1.47 0 0 0-.6 1.99c.17.34.74-.21 1.08-.1ZM12.72 12.48c-.37.14-.52.9-.9.7a1.47 1.47 0 0 1-.6-1.98 1.46 1.46 0 0 1 1.98-.61c.71.38.98 1.27.6 1.99-.18.34-.74-.21-1.08-.1ZM16.24 19.55c2.89 0 3.82-2.58 3.82-3.9 0-1.33-1.71.7-3.82.7-2.1 0-3.8-2.03-3.8-.7 0 1.32.92 3.9 3.8 3.9Z" /> <path fill="#FF323D" d="M18.56 18.8c-.57.44-1.33.75-2.32.75-.92 0-1.65-.27-2.2-.68.3-.63.87-1.11 1.55-1.32.12-.03.24.17.36.38.12.2.24.4.37.4s.26-.2.39-.4.26-.4.38-.36a2.56 2.56 0 0 1 1.47 1.23Z" /> </svg>
chat-ui/static/huggingchat/logo.svg/0
{ "file_path": "chat-ui/static/huggingchat/logo.svg", "repo_id": "chat-ui", "token_count": 523 }
55
import json import os import tempfile import datasets from datasets.arrow_writer import ArrowWriter from datasets.features import Array2D from utils import generate_examples, get_duration SHAPE_TEST_1 = (30, 487) SHAPE_TEST_2 = (36, 1024) SPEED_TEST_SHAPE = (100, 100) SPEED_TEST_N_EXAMPLES = 100 DEFAULT_FEATURES = datasets.Features( {"text": Array2D(SHAPE_TEST_1, dtype="float32"), "image": Array2D(SHAPE_TEST_2, dtype="float32")} ) RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__) RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def write(my_features, dummy_data, tmp_dir): with ArrowWriter(features=my_features, path=os.path.join(tmp_dir, "beta.arrow")) as writer: for key, record in dummy_data: example = my_features.encode_example(record) writer.write(example) num_examples, num_bytes = writer.finalize() @get_duration def read_unformated(feats, tmp_dir): dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) for _ in dataset: pass @get_duration def read_formatted_as_numpy(feats, tmp_dir): dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) dataset.set_format("numpy") for _ in dataset: pass @get_duration def read_batch_unformated(feats, tmp_dir): batch_size = 10 dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) for i in range(0, len(dataset), batch_size): _ = dataset[i : i + batch_size] @get_duration def read_batch_formatted_as_numpy(feats, tmp_dir): batch_size = 10 dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) dataset.set_format("numpy") for i in range(0, len(dataset), batch_size): _ = dataset[i : i + batch_size] @get_duration def read_col_unformated(feats, tmp_dir): dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) for col in feats: _ = dataset[col] @get_duration def read_col_formatted_as_numpy(feats, tmp_dir): dataset = datasets.Dataset.from_file( filename=os.path.join(tmp_dir, "beta.arrow"), info=datasets.DatasetInfo(features=feats) ) dataset.set_format("numpy") for col in feats: _ = dataset[col] def benchmark_array_xd(): times = {} read_functions = ( read_unformated, read_formatted_as_numpy, read_batch_unformated, read_batch_formatted_as_numpy, read_col_unformated, read_col_formatted_as_numpy, ) with tempfile.TemporaryDirectory() as tmp_dir: feats = datasets.Features({"image": Array2D(SPEED_TEST_SHAPE, dtype="float32")}) data = generate_examples(features=feats, num_examples=SPEED_TEST_N_EXAMPLES) times["write_array2d"] = write(feats, data, tmp_dir) for read_func in read_functions: times[read_func.__name__ + " after write_array2d"] = read_func(feats, tmp_dir) with tempfile.TemporaryDirectory() as tmp_dir: # don't use fixed length for fair comparison # feats = datasets.Features( # {"image": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), SPEED_TEST_SHAPE[1]), SPEED_TEST_SHAPE[0])} # ) feats = datasets.Features({"image": datasets.Sequence(datasets.Sequence(datasets.Value("float32")))}) data = generate_examples( features=feats, num_examples=SPEED_TEST_N_EXAMPLES, seq_shapes={"image": SPEED_TEST_SHAPE} ) times["write_nested_sequence"] = write(feats, data, tmp_dir) for read_func in read_functions: times[read_func.__name__ + " after write_nested_sequence"] = read_func(feats, tmp_dir) with tempfile.TemporaryDirectory() as tmp_dir: # don't use fixed length for fair comparison # feats = datasets.Features( # {"image": datasets.Sequence(datasets.Value("float32"), SPEED_TEST_SHAPE[0] * SPEED_TEST_SHAPE[1])} # ) feats = datasets.Features({"image": datasets.Sequence(datasets.Value("float32"))}) data = generate_examples( features=feats, num_examples=SPEED_TEST_N_EXAMPLES, seq_shapes={"image": [SPEED_TEST_SHAPE[0] * SPEED_TEST_SHAPE[1]]}, ) times["write_flattened_sequence"] = write(feats, data, tmp_dir) for read_func in read_functions: times[read_func.__name__ + " after write_flattened_sequence"] = read_func(feats, tmp_dir) with open(RESULTS_FILE_PATH, "wb") as f: f.write(json.dumps(times).encode("utf-8")) if __name__ == "__main__": # useful to run the profiler benchmark_array_xd()
datasets/benchmarks/benchmark_array_xd.py/0
{ "file_path": "datasets/benchmarks/benchmark_array_xd.py", "repo_id": "datasets", "token_count": 2176 }
56
- sections: - local: index title: 🤗 Datasets - local: quickstart title: Quickstart - local: installation title: Installation title: Get started - sections: - local: tutorial title: Overview - local: load_hub title: Load a dataset from the Hub - local: access title: Know your dataset - local: use_dataset title: Preprocess - local: metrics title: Evaluate predictions - local: create_dataset title: Create a dataset - local: upload_dataset title: Share a dataset to the Hub title: "Tutorials" - sections: - local: how_to title: Overview - sections: - local: loading title: Load - local: process title: Process - local: stream title: Stream - local: use_with_tensorflow title: Use with TensorFlow - local: use_with_pytorch title: Use with PyTorch - local: use_with_jax title: Use with JAX - local: use_with_spark title: Use with Spark - local: cache title: Cache management - local: filesystems title: Cloud storage - local: faiss_es title: Search index - local: how_to_metrics title: Metrics - local: beam title: Beam Datasets - local: troubleshoot title: Troubleshooting title: "General usage" - sections: - local: audio_load title: Load audio data - local: audio_process title: Process audio data - local: audio_dataset title: Create an audio dataset title: "Audio" - sections: - local: image_load title: Load image data - local: image_process title: Process image data - local: image_dataset title: Create an image dataset - local: depth_estimation title: Depth estimation - local: image_classification title: Image classification - local: semantic_segmentation title: Semantic segmentation - local: object_detection title: Object detection title: "Vision" - sections: - local: nlp_load title: Load text data - local: nlp_process title: Process text data title: "Text" - sections: - local: tabular_load title: Load tabular data title: "Tabular" - sections: - local: share title: Share - local: dataset_card title: Create a dataset card - local: repository_structure title: Structure your repository - local: dataset_script title: Create a dataset loading script title: "Dataset repository" title: "How-to guides" - sections: - local: about_arrow title: Datasets 🤝 Arrow - local: about_cache title: The cache - local: about_mapstyle_vs_iterable title: Dataset or IterableDataset - local: about_dataset_features title: Dataset features - local: about_dataset_load title: Build and load - local: about_map_batch title: Batch mapping - local: about_metrics title: All about metrics title: "Conceptual guides" - sections: - local: package_reference/main_classes title: Main classes - local: package_reference/builder_classes title: Builder classes - local: package_reference/loading_methods title: Loading methods - local: package_reference/table_classes title: Table Classes - local: package_reference/utilities title: Utilities - local: package_reference/task_templates title: Task templates title: "Reference"
datasets/docs/source/_toctree.yml/0
{ "file_path": "datasets/docs/source/_toctree.yml", "repo_id": "datasets", "token_count": 1247 }
57
# Create a dataset loading script <Tip> The dataset loading script is likely not needed if your dataset is in one of the following formats: CSV, JSON, JSON lines, text, images, audio or Parquet. With those formats, you should be able to load your dataset automatically with [`~datasets.load_dataset`], as long as your dataset repository has a [required structure](./repository_structure). </Tip> <Tip warning=true> In the next major release, the new safety features of 🤗 Datasets will disable running dataset loading scripts by default, and you will have to pass `trust_remote_code=True` to load datasets that require running a dataset script. </Tip> Write a dataset script to load and share datasets that consist of data files in unsupported formats or require more complex data preparation. This is a more advanced way to define a dataset than using [YAML metadata in the dataset card](./repository_structure#define-your-splits-in-yaml). A dataset script is a Python file that defines the different configurations and splits of your dataset, as well as how to download and process the data. The script can download data files from any website, or from the same dataset repository. A dataset loading script should have the same name as a dataset repository or directory. For example, a repository named `my_dataset` should contain `my_dataset.py` script. This way it can be loaded with: ``` my_dataset/ ├── README.md └── my_dataset.py ``` ```py >>> from datasets import load_dataset >>> load_dataset("path/to/my_dataset") ``` The following guide includes instructions for dataset scripts for how to: - Add dataset metadata. - Download data files. - Generate samples. - Generate dataset metadata. - Upload a dataset to the Hub. Open the [SQuAD dataset loading script](https://huggingface.co/datasets/squad/blob/main/squad.py) template to follow along on how to share a dataset. <Tip> To help you get started, try beginning with the dataset loading script [template](https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py)! </Tip> ## Add dataset attributes The first step is to add some information, or attributes, about your dataset in [`DatasetBuilder._info`]. The most important attributes you should specify are: 1. `DatasetInfo.description` provides a concise description of your dataset. The description informs the user what's in the dataset, how it was collected, and how it can be used for a NLP task. 2. `DatasetInfo.features` defines the name and type of each column in your dataset. This will also provide the structure for each example, so it is possible to create nested subfields in a column if you want. Take a look at [`Features`] for a full list of feature types you can use. ```py datasets.Features( { "id": datasets.Value("string"), "title": datasets.Value("string"), "context": datasets.Value("string"), "question": datasets.Value("string"), "answers": datasets.Sequence( { "text": datasets.Value("string"), "answer_start": datasets.Value("int32"), } ), } ) ``` 3. `DatasetInfo.homepage` contains the URL to the dataset homepage so users can find more details about the dataset. 4. `DatasetInfo.citation` contains a BibTeX citation for the dataset. After you've filled out all these fields in the template, it should look like the following example from the SQuAD loading script: ```py def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "title": datasets.Value("string"), "context": datasets.Value("string"), "question": datasets.Value("string"), "answers": datasets.features.Sequence( {"text": datasets.Value("string"), "answer_start": datasets.Value("int32"),} ), } ), # No default supervised_keys (as we have to pass both question # and context as input). supervised_keys=None, homepage="https://rajpurkar.github.io/SQuAD-explorer/", citation=_CITATION, ) ``` ### Multiple configurations In some cases, your dataset may have multiple configurations. For example, the [SuperGLUE](https://huggingface.co/datasets/super_glue) dataset is a collection of 5 datasets designed to evaluate language understanding tasks. 🤗 Datasets provides [`BuilderConfig`] which allows you to create different configurations for the user to select from. Let's study the [SuperGLUE loading script](https://huggingface.co/datasets/super_glue/blob/main/super_glue.py) to see how you can define several configurations. 1. Create a [`BuilderConfig`] subclass with attributes about your dataset. These attributes can be the features of your dataset, label classes, and a URL to the data files. ```py class SuperGlueConfig(datasets.BuilderConfig): """BuilderConfig for SuperGLUE.""" def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs): """BuilderConfig for SuperGLUE. Args: features: *list[string]*, list of the features that will appear in the feature dict. Should not include "label". data_url: *string*, url to download the zip file from. citation: *string*, citation for the data set. url: *string*, url for information about the data set. label_classes: *list[string]*, the list of classes for the label if the label is present as a string. Non-string labels will be cast to either 'False' or 'True'. **kwargs: keyword arguments forwarded to super. """ # Version history: # 1.0.2: Fixed non-nondeterminism in ReCoRD. # 1.0.1: Change from the pre-release trial version of SuperGLUE (v1.9) to # the full release (v2.0). # 1.0.0: S3 (new shuffling, sharding and slicing mechanism). # 0.0.2: Initial version. super().__init__(version=datasets.Version("1.0.2"), **kwargs) self.features = features self.label_classes = label_classes self.data_url = data_url self.citation = citation self.url = url ``` 2. Create instances of your config to specify the values of the attributes of each configuration. This gives you the flexibility to specify all the name and description of each configuration. These sub-class instances should be listed under `DatasetBuilder.BUILDER_CONFIGS`: ```py class SuperGlue(datasets.GeneratorBasedBuilder): """The SuperGLUE benchmark.""" BUILDER_CONFIG_CLASS = SuperGlueConfig BUILDER_CONFIGS = [ SuperGlueConfig( name="boolq", description=_BOOLQ_DESCRIPTION, features=["question", "passage"], data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/BoolQ.zip", citation=_BOOLQ_CITATION, url="https://github.com/google-research-datasets/boolean-questions", ), ... ... SuperGlueConfig( name="axg", description=_AXG_DESCRIPTION, features=["premise", "hypothesis"], label_classes=["entailment", "not_entailment"], data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/AX-g.zip", citation=_AXG_CITATION, url="https://github.com/rudinger/winogender-schemas", ), ``` 3. Now, users can load a specific configuration of the dataset with the configuration `name`: ```py >>> from datasets import load_dataset >>> dataset = load_dataset('super_glue', 'boolq') ``` Additionally, users can instantiate a custom builder configuration by passing the builder configuration arguments to [`load_dataset`]: ```py >>> from datasets import load_dataset >>> dataset = load_dataset('super_glue', data_url="https://custom_url") ``` ### Default configurations Users must specify a configuration name when they load a dataset with multiple configurations. Otherwise, 🤗 Datasets will raise a `ValueError`, and prompt the user to select a configuration name. You can avoid this by setting a default dataset configuration with the `DEFAULT_CONFIG_NAME` attribute: ```py class NewDataset(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.1.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"), datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"), ] DEFAULT_CONFIG_NAME = "first_domain" ``` <Tip warning={true}> Only use a default configuration when it makes sense. Don't set one because it may be more convenient for the user to not specify a configuration when they load your dataset. For example, multi-lingual datasets often have a separate configuration for each language. An appropriate default may be an aggregated configuration that loads all the languages of the dataset if the user doesn't request a particular one. </Tip> ## Download data files and organize splits After you've defined the attributes of your dataset, the next step is to download the data files and organize them according to their splits. 1. Create a dictionary of URLs in the loading script that point to the original SQuAD data files: ```py _URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/" _URLS = { "train": _URL + "train-v1.1.json", "dev": _URL + "dev-v1.1.json", } ``` <Tip> If the data files live in the same folder or repository of the dataset script, you can just pass the relative paths to the files instead of URLs. </Tip> 2. [`DownloadManager.download_and_extract`] takes this dictionary and downloads the data files. Once the files are downloaded, use [`SplitGenerator`] to organize each split in the dataset. This is a simple class that contains: - The `name` of each split. You should use the standard split names: `Split.TRAIN`, `Split.TEST`, and `Split.VALIDATION`. - `gen_kwargs` provides the file paths to the data files to load for each split. Your `DatasetBuilder._split_generator()` should look like this now: ```py def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: urls_to_download = self._URLS downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), ] ``` ## Generate samples At this point, you have: - Added the dataset attributes. - Provided instructions for how to download the data files. - Organized the splits. The next step is to actually generate the samples in each split. 1. `DatasetBuilder._generate_examples` takes the file path provided by `gen_kwargs` to read and parse the data files. You need to write a function that loads the data files and extracts the columns. 2. Your function should yield a tuple of an `id_`, and an example from the dataset. ```py def _generate_examples(self, filepath): """This function returns the examples in the raw (text) form.""" logger.info("generating examples from = %s", filepath) with open(filepath) as f: squad = json.load(f) for article in squad["data"]: title = article.get("title", "").strip() for paragraph in article["paragraphs"]: context = paragraph["context"].strip() for qa in paragraph["qas"]: question = qa["question"].strip() id_ = qa["id"] answer_starts = [answer["answer_start"] for answer in qa["answers"]] answers = [answer["text"].strip() for answer in qa["answers"]] # Features currently used are "context", "question", and "answers". # Others are extracted here for the ease of future expansions. yield id_, { "title": title, "context": context, "question": question, "id": id_, "answers": {"answer_start": answer_starts, "text": answers,}, } ``` ## (Optional) Generate dataset metadata Adding dataset metadata is a great way to include information about your dataset. The metadata is stored in the dataset card `README.md` in YAML. It includes information like the number of examples required to confirm the dataset was correctly generated, and information about the dataset like its `features`. Run the following command to generate your dataset metadata in `README.md` and make sure your new dataset loading script works correctly: ``` datasets-cli test path/to/<your-dataset-loading-script> --save_info --all_configs ``` If your dataset loading script passed the test, you should now have a `README.md` file in your dataset folder containing a `dataset_info` field with some metadata. ## Upload to the Hub Once your script is ready, [create a dataset card](dataset_card) and [upload it to the Hub](share). Congratulations, you can now load your dataset from the Hub! 🥳 ```py >>> from datasets import load_dataset >>> load_dataset("<username>/my_dataset") ``` ## Advanced features ### Sharding If your dataset is made of many big files, 🤗 Datasets automatically runs your script in parallel to make it super fast! It can help if you have hundreds or thousands of TAR archives, or JSONL files like [oscar](https://huggingface.co/datasets/oscar/blob/main/oscar.py) for example. To make it work, we consider lists of files in `gen_kwargs` to be shards. Therefore 🤗 Datasets can automatically spawn several workers to run `_generate_examples` in parallel, and each worker is given a subset of shards to process. ```python class MyShardedDataset(datasets.GeneratorBasedBuilder): def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: downloaded_files = dl_manager.download([f"data/shard_{i}.jsonl" for i in range(1024)]) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files}), ] def _generate_examples(self, filepaths): # Each worker can be given a slice of the original `filepaths` list defined in the `gen_kwargs` # so that this code can run in parallel on several shards at the same time for filepath in filepaths: ... ``` Users can also specify `num_proc=` in `load_dataset()` to specify the number of processes to use as workers. ### ArrowBasedBuilder For some datasets it can be much faster to yield batches of data rather than examples one by one. You can speed up the dataset generation by yielding Arrow tables directly, instead of examples. This is especially useful if your data comes from Pandas DataFrames for example, since the conversion from Pandas to Arrow is as simple as: ```python import pyarrow as pa pa_table = pa.Table.from_pandas(df) ``` To yield Arrow tables instead of single examples, make your dataset builder inherit from [`ArrowBasedBuilder`] instead of [`GeneratorBasedBuilder`], and use `_generate_tables` instead of `_generate_examples`: ```python class MySuperFastDataset(datasets.ArrowBasedBuilder): def _generate_tables(self, filepaths): idx = 0 for filepath in filepaths: ... yield idx, pa_table idx += 1 ``` Don't forget to keep your script memory efficient, in case users run them on machines with a low amount of RAM.
datasets/docs/source/dataset_script.mdx/0
{ "file_path": "datasets/docs/source/dataset_script.mdx", "repo_id": "datasets", "token_count": 5380 }
58
# Evaluate predictions <Tip warning={true}> Metrics is deprecated in 🤗 Datasets. To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets. </Tip> 🤗 Datasets provides various common and NLP-specific [metrics](https://huggingface.co/metrics) for you to measure your models performance. In this section of the tutorials, you will load a metric and use it to evaluate your models predictions. You can see what metrics are available with [`list_metrics`]: ```py >>> from datasets import list_metrics >>> metrics_list = list_metrics() >>> len(metrics_list) 28 >>> print(metrics_list) ['accuracy', 'bertscore', 'bleu', 'bleurt', 'cer', 'comet', 'coval', 'cuad', 'f1', 'gleu', 'glue', 'indic_glue', 'matthews_correlation', 'meteor', 'pearsonr', 'precision', 'recall', 'rouge', 'sacrebleu', 'sari', 'seqeval', 'spearmanr', 'squad', 'squad_v2', 'super_glue', 'wer', 'wiki_split', 'xnli'] ``` ## Load metric It is very easy to load a metric with 🤗 Datasets. In fact, you will notice that it is very similar to loading a dataset! Load a metric from the Hub with [`load_metric`]: ```py >>> from datasets import load_metric >>> metric = load_metric('glue', 'mrpc') ``` This will load the metric associated with the MRPC dataset from the GLUE benchmark. ## Select a configuration If you are using a benchmark dataset, you need to select a metric that is associated with the configuration you are using. Select a metric configuration by providing the configuration name: ```py >>> metric = load_metric('glue', 'mrpc') ``` ## Metrics object Before you begin using a [`Metric`] object, you should get to know it a little better. As with a dataset, you can return some basic information about a metric. For example, access the `inputs_description` parameter in [`datasets.MetricInfo`] to get more information about a metrics expected input format and some usage examples: ```py >>> print(metric.inputs_description) Compute GLUE evaluation metric associated to each GLUE dataset. Args: predictions: list of predictions to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. Returns: depending on the GLUE subset, one or several of: "accuracy": Accuracy "f1": F1 score "pearson": Pearson Correlation "spearmanr": Spearman Correlation "matthews_correlation": Matthew Correlation Examples: >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} ... >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp' >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} ... ``` Notice for the MRPC configuration, the metric expects the input format to be zero or one. For a complete list of attributes you can return with your metric, take a look at [`MetricInfo`]. ## Compute metric Once you have loaded a metric, you are ready to use it to evaluate a models predictions. Provide the model predictions and references to [`~datasets.Metric.compute`]: ```py >>> model_predictions = model(model_inputs) >>> final_score = metric.compute(predictions=model_predictions, references=gold_references) ```
datasets/docs/source/metrics.mdx/0
{ "file_path": "datasets/docs/source/metrics.mdx", "repo_id": "datasets", "token_count": 1193 }
59
# Load tabular data A tabular dataset is a generic dataset used to describe any data stored in rows and columns, where the rows represent an example and the columns represent a feature (can be continuous or categorical). These datasets are commonly stored in CSV files, Pandas DataFrames, and in database tables. This guide will show you how to load and create a tabular dataset from: - CSV files - Pandas DataFrames - Databases ## CSV files 🤗 Datasets can read CSV files by specifying the generic `csv` dataset builder name in the [`~datasets.load_dataset`] method. To load more than one CSV file, pass them as a list to the `data_files` parameter: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("csv", data_files="my_file.csv") # load multiple CSV files >>> dataset = load_dataset("csv", data_files=["my_file_1.csv", "my_file_2.csv", "my_file_3.csv"]) ``` You can also map specific CSV files to the train and test splits: ```py >>> dataset = load_dataset("csv", data_files={"train": ["my_train_file_1.csv", "my_train_file_2.csv"], "test": "my_test_file.csv"}) ``` To load remote CSV files, pass the URLs instead: ```py >>> base_url = "https://huggingface.co/datasets/lhoestq/demo1/resolve/main/data/" >>> dataset = load_dataset('csv', data_files={"train": base_url + "train.csv", "test": base_url + "test.csv"}) ``` To load zipped CSV files: ```py >>> url = "https://domain.org/train_data.zip" >>> data_files = {"train": url} >>> dataset = load_dataset("csv", data_files=data_files) ``` ## Pandas DataFrames 🤗 Datasets also supports loading datasets from [Pandas DataFrames](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) with the [`~datasets.Dataset.from_pandas`] method: ```py >>> from datasets import Dataset >>> import pandas as pd # create a Pandas DataFrame >>> df = pd.read_csv("https://huggingface.co/datasets/imodels/credit-card/raw/main/train.csv") >>> df = pd.DataFrame(df) # load Dataset from Pandas DataFrame >>> dataset = Dataset.from_pandas(df) ``` Use the `splits` parameter to specify the name of the dataset split: ```py >>> train_ds = Dataset.from_pandas(train_df, split="train") >>> test_ds = Dataset.from_pandas(test_df, split="test") ``` If the dataset doesn't look as expected, you should explicitly [specify your dataset features](loading#specify-features). A [pandas.Series](https://pandas.pydata.org/docs/reference/api/pandas.Series.html) may not always carry enough information for Arrow to automatically infer a data type. For example, if a DataFrame is of length `0` or if the Series only contains `None/NaN` objects, the type is set to `null`. ## Databases Datasets stored in databases are typically accessed with SQL queries. With 🤗 Datasets, you can connect to a database, query for the data you need, and create a dataset out of it. Then you can use all the processing features of 🤗 Datasets to prepare your dataset for training. ### SQLite SQLite is a small, lightweight database that is fast and easy to set up. You can use an existing database if you'd like, or follow along and start from scratch. Start by creating a quick SQLite database with this [Covid-19 data](https://github.com/nytimes/covid-19-data/blob/master/us-states.csv) from the New York Times: ```py >>> import sqlite3 >>> import pandas as pd >>> conn = sqlite3.connect("us_covid_data.db") >>> df = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv") >>> df.to_sql("states", conn, if_exists="replace") ``` This creates a `states` table in the `us_covid_data.db` database which you can now load into a dataset. To connect to the database, you'll need the [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) that identifies your database. Connecting to a database with a URI caches the returned dataset. The URI string differs for each database dialect, so be sure to check the [Database URLs](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) for whichever database you're using. For SQLite, it is: ```py >>> uri = "sqlite:///us_covid_data.db" ``` Load the table by passing the table name and URI to [`~datasets.Dataset.from_sql`]: ```py >>> from datasets import Dataset >>> ds = Dataset.from_sql("states", uri) >>> ds Dataset({ features: ['index', 'date', 'state', 'fips', 'cases', 'deaths'], num_rows: 54382 }) ``` Then you can use all of 🤗 Datasets process features like [`~datasets.Dataset.filter`] for example: ```py >>> ds.filter(lambda x: x["state"] == "California") ``` You can also load a dataset from a SQL query instead of an entire table, which is useful for querying and joining multiple tables. Load the dataset by passing your query and URI to [`~datasets.Dataset.from_sql`]: ```py >>> from datasets import Dataset >>> ds = Dataset.from_sql('SELECT * FROM states WHERE state="California";', uri) >>> ds Dataset({ features: ['index', 'date', 'state', 'fips', 'cases', 'deaths'], num_rows: 1019 }) ``` Then you can use all of 🤗 Datasets process features like [`~datasets.Dataset.filter`] for example: ```py >>> ds.filter(lambda x: x["cases"] > 10000) ``` ### PostgreSQL You can also connect and load a dataset from a PostgreSQL database, however we won't directly demonstrate how in the documentation because the example is only meant to be run in a notebook. Instead, take a look at how to install and setup a PostgreSQL server in this [notebook](https://colab.research.google.com/github/nateraw/huggingface-hub-examples/blob/main/sql_with_huggingface_datasets.ipynb#scrollTo=d83yGQMPHGFi)! After you've setup your PostgreSQL database, you can use the [`~datasets.Dataset.from_sql`] method to load a dataset from a table or query.
datasets/docs/source/tabular_load.mdx/0
{ "file_path": "datasets/docs/source/tabular_load.mdx", "repo_id": "datasets", "token_count": 1868 }
60
# Copyright 2020 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BLEURT metric. """ import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets logger = datasets.logging.get_logger(__name__) _CITATION = """\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } """ _DESCRIPTION = """\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project's README at https://github.com/google-research/bleurt#readme for more information. """ _KWARGS_DESCRIPTION = """ BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: 'scores': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] """ CHECKPOINT_URLS = { "bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip", "bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip", "bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip", "bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip", "bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip", "bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip", "BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip", "BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip", "BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip", "BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class BLEURT(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage="https://github.com/google-research/bleurt", inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("string", id="sequence"), "references": datasets.Value("string", id="sequence"), } ), codebase_urls=["https://github.com/google-research/bleurt"], reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"], ) def _download_and_prepare(self, dl_manager): # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( "Using default BLEURT-Base checkpoint for sequence maximum length 128. " "You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." ) self.config_name = "bleurt-base-128" if self.config_name.lower() in CHECKPOINT_URLS: checkpoint_name = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: checkpoint_name = self.config_name.upper() else: raise KeyError( f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" ) # download the model checkpoint specified by self.config_name and set up the scorer model_path = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name]) self.scorer = score.BleurtScorer(os.path.join(model_path, checkpoint_name)) def _compute(self, predictions, references): scores = self.scorer.score(references=references, candidates=predictions) return {"scores": scores}
datasets/metrics/bleurt/bleurt.py/0
{ "file_path": "datasets/metrics/bleurt/bleurt.py", "repo_id": "datasets", "token_count": 1982 }
61
# Copyright 2020 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ CUAD metric. """ import datasets from .evaluate import evaluate _CITATION = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ _DESCRIPTION = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ _KWARGS_DESCRIPTION = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric("cuad") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class CUAD(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": { "id": datasets.Value("string"), "prediction_text": datasets.features.Sequence(datasets.Value("string")), }, "references": { "id": datasets.Value("string"), "answers": datasets.features.Sequence( { "text": datasets.Value("string"), "answer_start": datasets.Value("int32"), } ), }, } ), codebase_urls=["https://www.atticusprojectai.org/cuad"], reference_urls=["https://www.atticusprojectai.org/cuad"], ) def _compute(self, predictions, references): pred_dict = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} dataset = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] score = evaluate(dataset=dataset, predictions=pred_dict) return score
datasets/metrics/cuad/cuad.py/0
{ "file_path": "datasets/metrics/cuad/cuad.py", "repo_id": "datasets", "token_count": 2242 }
62
# Metric Card for Mahalanobis Distance ## Metric Description Mahalonobis distance is the distance between a point and a distribution (as opposed to the distance between two points), making it the multivariate equivalent of the Euclidean distance. It is often used in multivariate anomaly detection, classification on highly imbalanced datasets and one-class classification. ## How to Use At minimum, this metric requires two `list`s of datapoints: ```python >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) ``` ### Inputs - `X` (`list`): data points to be compared with the `reference_distribution`. - `reference_distribution` (`list`): data points from the reference distribution that we want to compare to. ### Output Values `mahalanobis` (`array`): the Mahalonobis distance for each data point in `X`. ```python >>> print(results) {'mahalanobis': array([0.5])} ``` #### Values from Popular Papers *N/A* ### Example ```python >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} ``` ## Limitations and Bias The Mahalanobis distance is only able to capture linear relationships between the variables, which means it cannot capture all types of outliers. Mahalanobis distance also fails to faithfully represent data that is highly skewed or multimodal. ## Citation ```bibtex @inproceedings{mahalanobis1936generalized, title={On the generalized distance in statistics}, author={Mahalanobis, Prasanta Chandra}, year={1936}, organization={National Institute of Science of India} } ``` ```bibtex @article{de2000mahalanobis, title={The Mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ``` ## Further References -[Wikipedia -- Mahalanobis Distance](https://en.wikipedia.org/wiki/Mahalanobis_distance) -[Machine Learning Plus -- Mahalanobis Distance](https://www.machinelearningplus.com/statistics/mahalanobis-distance/)
datasets/metrics/mahalanobis/README.md/0
{ "file_path": "datasets/metrics/mahalanobis/README.md", "repo_id": "datasets", "token_count": 738 }
63
# Metric Card for Precision ## Metric Description Precision is the fraction of correctly labeled positive examples out of all of the examples that were labeled as positive. It is computed via the equation: Precision = TP / (TP + FP) where TP is the True positives (i.e. the examples correctly labeled as positive) and FP is the False positive examples (i.e. the examples incorrectly labeled as positive). ## How to Use At minimum, precision takes as input a list of predicted labels, `predictions`, and a list of output labels, `references`. ```python >>> precision_metric = datasets.load_metric("precision") >>> results = precision_metric.compute(references=[0, 1], predictions=[0, 1]) >>> print(results) {'precision': 1.0} ``` ### Inputs - **predictions** (`list` of `int`): Predicted class labels. - **references** (`list` of `int`): Actual class labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`. If `average` is `None`, it should be the label order. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. - **pos_label** (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to None. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - 0: Returns 0 when there is a zero division. - 1: Returns 1 when there is a zero division. - 'warn': Raises warnings and then returns 0 when there is a zero division. ### Output Values - **precision**(`float` or `array` of `float`): Precision score or list of precision scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate that fewer negative examples were incorrectly labeled as positive, which means that, generally, higher scores are better. Output Example(s): ```python {'precision': 0.2222222222222222} ``` ```python {'precision': array([0.66666667, 0.0, 0.0])} ``` #### Values from Popular Papers ### Examples Example 1-A simple binary example ```python >>> precision_metric = datasets.load_metric("precision") >>> results = precision_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'precision': 0.5} ``` Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. ```python >>> precision_metric = datasets.load_metric("precision") >>> results = precision_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['precision'], 2)) 0.67 ``` Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. ```python >>> precision_metric = datasets.load_metric("precision") >>> results = precision_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(results) {'precision': 0.23529411764705882} ``` Example 4-A multiclass example, with different values for the `average` input. ```python >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = precision_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'precision': 0.2222222222222222} >>> results = precision_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'precision': 0.3333333333333333} >>> results = precision_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'precision': 0.2222222222222222} >>> results = precision_metric.compute(predictions=predictions, references=references, average=None) >>> print([round(res, 2) for res in results['precision']]) [0.67, 0.0, 0.0] ``` ## Limitations and Bias [Precision](https://huggingface.co/metrics/precision) and [recall](https://huggingface.co/metrics/recall) are complementary and can be used to measure different aspects of model performance -- using both of them (or an averaged measure like [F1 score](https://huggingface.co/metrics/F1) to better represent different aspects of performance. See [Wikipedia](https://en.wikipedia.org/wiki/Precision_and_recall) for more information. ## Citation(s) ```bibtex @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ``` ## Further References - [Wikipedia -- Precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall)
datasets/metrics/precision/README.md/0
{ "file_path": "datasets/metrics/precision/README.md", "repo_id": "datasets", "token_count": 1878 }
64
# Metric Card for SQuAD ## Metric description This metric wraps the official scoring script for version 1 of the [Stanford Question Answering Dataset (SQuAD)](https://huggingface.co/datasets/squad). SQuAD is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. ## How to use The metric takes two files or two lists of question-answers dictionaries as inputs : one with the predictions of the model and the other with the references to be compared to: ```python from datasets import load_metric squad_metric = load_metric("squad") results = squad_metric.compute(predictions=predictions, references=references) ``` ## Output values This metric outputs a dictionary with two values: the average exact match score and the average [F1 score](https://huggingface.co/metrics/f1). ``` {'exact_match': 100.0, 'f1': 100.0} ``` The range of `exact_match` is 0-100, where 0.0 means no answers were matched and 100.0 means all answers were matched. The range of `f1` is 0-1 -- its lowest possible value is 0, if either the precision or the recall is 0, and its highest possible value is 1.0, which means perfect precision and recall. ### Values from popular papers The [original SQuAD paper](https://nlp.stanford.edu/pubs/rajpurkar2016squad.pdf) reported an F1 score of 51.0% and an Exact Match score of 40.0%. They also report that human performance on the dataset represents an F1 score of 90.5% and an Exact Match score of 80.3%. For more recent model performance, see the [dataset leaderboard](https://paperswithcode.com/dataset/squad). ## Examples Maximal values for both exact match and F1 (perfect match): ```python from datasets import load_metric squad_metric = load_metric("squad") predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}] references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] results = squad_metric.compute(predictions=predictions, references=references) results {'exact_match': 100.0, 'f1': 100.0} ``` Minimal values for both exact match and F1 (no match): ```python from datasets import load_metric squad_metric = load_metric("squad") predictions = [{'prediction_text': '1999', 'id': '56e10a3be3433e1400422b22'}] references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] results = squad_metric.compute(predictions=predictions, references=references) results {'exact_match': 0.0, 'f1': 0.0} ``` Partial match (2 out of 3 answers correct) : ```python from datasets import load_metric squad_metric = load_metric("squad") predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}, {'prediction_text': 'Beyonce', 'id': '56d2051ce7d4791d0090260b'}, {'prediction_text': 'climate change', 'id': '5733b5344776f419006610e1'}] references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}, {'answers': {'answer_start': [233], 'text': ['Beyoncé and Bruno Mars']}, 'id': '56d2051ce7d4791d0090260b'}, {'answers': {'answer_start': [891], 'text': ['climate change']}, 'id': '5733b5344776f419006610e1'}] results = squad_metric.compute(predictions=predictions, references=references) results {'exact_match': 66.66666666666667, 'f1': 66.66666666666667} ``` ## Limitations and bias This metric works only with datasets that have the same format as [SQuAD v.1 dataset](https://huggingface.co/datasets/squad). The SQuAD dataset does contain a certain amount of noise, such as duplicate questions as well as missing answers, but these represent a minority of the 100,000 question-answer pairs. Also, neither exact match nor F1 score reflect whether models do better on certain types of questions (e.g. who questions) or those that cover a certain gender or geographical area -- carrying out more in-depth error analysis can complement these numbers. ## Citation @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } ## Further References - [The Stanford Question Answering Dataset: Background, Challenges, Progress (blog post)](https://rajpurkar.github.io/mlx/qa-and-squad/) - [Hugging Face Course -- Question Answering](https://huggingface.co/course/chapter7/7)
datasets/metrics/squad/README.md/0
{ "file_path": "datasets/metrics/squad/README.md", "repo_id": "datasets", "token_count": 1494 }
65
# Copyright 2020 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XNLI benchmark metric. """ import datasets _CITATION = """\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } """ _DESCRIPTION = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ _KWARGS_DESCRIPTION = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def simple_accuracy(preds, labels): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class Xnli(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"), } ), codebase_urls=[], reference_urls=[], format="numpy", ) def _compute(self, predictions, references): return {"accuracy": simple_accuracy(predictions, references)}
datasets/metrics/xnli/xnli.py/0
{ "file_path": "datasets/metrics/xnli/xnli.py", "repo_id": "datasets", "token_count": 1107 }
66
import fnmatch import json import os import shutil import tempfile import xml.etree.ElementTree as ET from argparse import ArgumentParser from pathlib import Path from typing import Optional from datasets import config from datasets.commands import BaseDatasetsCLICommand from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.download.mock_download_manager import MockDownloadManager from datasets.load import dataset_module_factory, import_main_class from datasets.utils.deprecation_utils import deprecated from datasets.utils.logging import get_logger, set_verbosity_warning from datasets.utils.py_utils import map_nested logger = get_logger(__name__) DEFAULT_ENCODING = "utf-8" def dummy_data_command_factory(args): return DummyDataCommand( args.path_to_dataset, args.auto_generate, args.n_lines, args.json_field, args.xml_tag, args.match_text_files, args.keep_uncompressed, args.cache_dir, args.encoding, ) class DummyDataGeneratorDownloadManager(DownloadManager): def __init__(self, mock_download_manager, *args, **kwargs): super().__init__(*args, **kwargs) self.mock_download_manager = mock_download_manager self.downloaded_dummy_paths = [] self.expected_dummy_paths = [] def download(self, url_or_urls): output = super().download(url_or_urls) dummy_output = self.mock_download_manager.download(url_or_urls) map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True) map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True) return output def download_and_extract(self, url_or_urls): output = super().extract(super().download(url_or_urls)) dummy_output = self.mock_download_manager.download(url_or_urls) map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True) map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True) return output def auto_generate_dummy_data_folder( self, n_lines: int = 5, json_field: Optional[str] = None, xml_tag: Optional[str] = None, match_text_files: Optional[str] = None, encoding: Optional[str] = None, ) -> bool: os.makedirs( os.path.join( self.mock_download_manager.datasets_scripts_dir, self.mock_download_manager.dataset_name, self.mock_download_manager.dummy_data_folder, "dummy_data", ), exist_ok=True, ) total = 0 self.mock_download_manager.load_existing_dummy_data = False for src_path, relative_dst_path in zip(self.downloaded_dummy_paths, self.expected_dummy_paths): dst_path = os.path.join( self.mock_download_manager.datasets_scripts_dir, self.mock_download_manager.dataset_name, self.mock_download_manager.dummy_data_folder, relative_dst_path, ) total += self._create_dummy_data( src_path, dst_path, n_lines=n_lines, json_field=json_field, xml_tag=xml_tag, match_text_files=match_text_files, encoding=encoding, ) if total == 0: logger.error( "Dummy data generation failed: no dummy files were created. " "Make sure the data files format is supported by the auto-generation." ) return total > 0 def _create_dummy_data( self, src_path: str, dst_path: str, n_lines: int, json_field: Optional[str] = None, xml_tag: Optional[str] = None, match_text_files: Optional[str] = None, encoding: Optional[str] = None, ) -> int: encoding = encoding or DEFAULT_ENCODING if os.path.isfile(src_path): logger.debug(f"Trying to generate dummy data file {dst_path}") dst_path_extensions = Path(dst_path).suffixes line_by_line_extensions = [".txt", ".csv", ".jsonl", ".tsv"] is_line_by_line_text_file = any(extension in dst_path_extensions for extension in line_by_line_extensions) if match_text_files is not None: file_name = os.path.basename(dst_path) for pattern in match_text_files.split(","): is_line_by_line_text_file |= fnmatch.fnmatch(file_name, pattern) # Line by line text file (txt, csv etc.) if is_line_by_line_text_file: Path(dst_path).parent.mkdir(exist_ok=True, parents=True) with open(src_path, encoding=encoding) as src_file: with open(dst_path, "w", encoding=encoding) as dst_file: first_lines = [] for i, line in enumerate(src_file): if i >= n_lines: break first_lines.append(line) dst_file.write("".join(first_lines).strip()) return 1 # json file elif ".json" in dst_path_extensions: with open(src_path, encoding=encoding) as src_file: json_data = json.load(src_file) if json_field is not None: json_data = json_data[json_field] if isinstance(json_data, dict): if not all(isinstance(v, list) for v in json_data.values()): raise ValueError( f"Couldn't parse columns {list(json_data.keys())}. " "Maybe specify which json field must be used " "to read the data with --json_field <my_field>." ) first_json_data = {k: v[:n_lines] for k, v in json_data.items()} else: first_json_data = json_data[:n_lines] if json_field is not None: first_json_data = {json_field: first_json_data} Path(dst_path).parent.mkdir(exist_ok=True, parents=True) with open(dst_path, "w", encoding=encoding) as dst_file: json.dump(first_json_data, dst_file) return 1 # xml file elif any(extension in dst_path_extensions for extension in [".xml", ".txm"]): if xml_tag is None: logger.warning("Found xml file but 'xml_tag' is set to None. Please provide --xml_tag") else: self._create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=n_lines, encoding=encoding) return 1 logger.warning( f"Couldn't generate dummy file '{dst_path}'. " "Ignore that if this file is not useful for dummy data." ) return 0 # directory, iterate through all files elif os.path.isdir(src_path): total = 0 for path, _, files in os.walk(src_path): for name in files: if not name.startswith("."): # ignore files like .DS_Store etc. src_file_path = os.path.join(path, name) dst_file_path = os.path.join(dst_path, Path(src_file_path).relative_to(src_path)) total += self._create_dummy_data( src_file_path, dst_file_path, n_lines=n_lines, json_field=json_field, xml_tag=xml_tag, match_text_files=match_text_files, encoding=encoding, ) return total @staticmethod def _create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=5, encoding=DEFAULT_ENCODING): Path(dst_path).parent.mkdir(exist_ok=True, parents=True) with open(src_path, encoding=encoding) as src_file: n_line = 0 parents = [] for event, elem in ET.iterparse(src_file, events=("start", "end")): if event == "start": parents.append(elem) else: _ = parents.pop() if elem.tag == xml_tag: if n_line < n_lines: n_line += 1 else: if parents: parents[-1].remove(elem) ET.ElementTree(element=elem).write(dst_path, encoding=encoding) def compress_autogenerated_dummy_data(self, path_to_dataset): root_dir = os.path.join(path_to_dataset, self.mock_download_manager.dummy_data_folder) base_name = os.path.join(root_dir, "dummy_data") base_dir = "dummy_data" logger.info(f"Compressing dummy data folder to '{base_name}.zip'") shutil.make_archive(base_name, "zip", root_dir, base_dir) shutil.rmtree(base_name) @deprecated( "The `datasets` repository does not host the dataset scripts anymore. Therefore, dummy data is no longer needed to test their loading with CI." ) class DummyDataCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): test_parser = parser.add_parser("dummy_data", help="Generate dummy data.") test_parser.add_argument("--auto_generate", action="store_true", help="Automatically generate dummy data") test_parser.add_argument( "--n_lines", type=int, default=5, help="Number of lines or samples to keep when auto-generating dummy data" ) test_parser.add_argument( "--json_field", type=str, default=None, help="Optional, json field to read the data from when auto-generating dummy data. In the json data files, this field must point to a list of samples as json objects (ex: the 'data' field for squad-like files)", ) test_parser.add_argument( "--xml_tag", type=str, default=None, help="Optional, xml tag name of the samples inside the xml files when auto-generating dummy data.", ) test_parser.add_argument( "--match_text_files", type=str, default=None, help="Optional, a comma separated list of file patterns that looks for line-by-line text files other than *.txt or *.csv. Example: --match_text_files *.label", ) test_parser.add_argument( "--keep_uncompressed", action="store_true", help="Whether to leave the dummy data folders uncompressed when auto-generating dummy data. Useful for debugging for to do manual adjustements before compressing.", ) test_parser.add_argument( "--cache_dir", type=str, default=None, help="Cache directory to download and cache files when auto-generating dummy data", ) test_parser.add_argument( "--encoding", type=str, default=None, help=f"Encoding to use when auto-generating dummy data. Defaults to {DEFAULT_ENCODING}", ) test_parser.add_argument("path_to_dataset", type=str, help="Path to the dataset (example: ./datasets/squad)") test_parser.set_defaults(func=dummy_data_command_factory) def __init__( self, path_to_dataset: str, auto_generate: bool, n_lines: int, json_field: Optional[str], xml_tag: Optional[str], match_text_files: Optional[str], keep_uncompressed: bool, cache_dir: Optional[str], encoding: Optional[str], ): self._path_to_dataset = path_to_dataset if os.path.isdir(path_to_dataset): self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-1] else: self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-2] cache_dir = os.path.expanduser(cache_dir or config.HF_DATASETS_CACHE) self._auto_generate = auto_generate self._n_lines = n_lines self._json_field = json_field self._xml_tag = xml_tag self._match_text_files = match_text_files self._keep_uncompressed = keep_uncompressed self._cache_dir = cache_dir self._encoding = encoding def run(self): set_verbosity_warning() dataset_module = dataset_module_factory(self._path_to_dataset) builder_cls = import_main_class(dataset_module.module_path) # use `None` as config if no configs builder_configs = builder_cls.BUILDER_CONFIGS or [None] auto_generate_results = [] with tempfile.TemporaryDirectory() as tmp_dir: for builder_config in builder_configs: config_name = builder_config.name if builder_config else None dataset_builder = builder_cls(config_name=config_name, hash=dataset_module.hash, cache_dir=tmp_dir) version = builder_config.version if builder_config else dataset_builder.config.version mock_dl_manager = MockDownloadManager( dataset_name=self._dataset_name, config=builder_config, version=version, use_local_dummy_data=True, load_existing_dummy_data=False, ) if self._auto_generate: auto_generate_results.append( self._autogenerate_dummy_data( dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager, keep_uncompressed=self._keep_uncompressed, ) ) else: self._print_dummy_data_instructions( dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager ) if self._auto_generate and not self._keep_uncompressed: if all(auto_generate_results): print(f"Automatic dummy data generation succeeded for all configs of '{self._path_to_dataset}'") else: print(f"Automatic dummy data generation failed for some configs of '{self._path_to_dataset}'") def _autogenerate_dummy_data(self, dataset_builder, mock_dl_manager, keep_uncompressed) -> Optional[bool]: dl_cache_dir = ( os.path.join(self._cache_dir, config.DOWNLOADED_DATASETS_DIR) if self._cache_dir else config.DOWNLOADED_DATASETS_PATH ) download_config = DownloadConfig(cache_dir=dl_cache_dir) dl_manager = DummyDataGeneratorDownloadManager( dataset_name=self._dataset_name, mock_download_manager=mock_dl_manager, download_config=download_config ) dataset_builder._split_generators(dl_manager) mock_dl_manager.load_existing_dummy_data = False # don't use real dummy data dl_manager.auto_generate_dummy_data_folder( n_lines=self._n_lines, json_field=self._json_field, xml_tag=self._xml_tag, match_text_files=self._match_text_files, encoding=self._encoding, ) if not keep_uncompressed: path_do_dataset = os.path.join(mock_dl_manager.datasets_scripts_dir, mock_dl_manager.dataset_name) dl_manager.compress_autogenerated_dummy_data(path_do_dataset) # now test that the dummy_data.zip file actually works mock_dl_manager.load_existing_dummy_data = True # use real dummy data n_examples_per_split = {} os.makedirs(dataset_builder._cache_dir, exist_ok=True) try: split_generators = dataset_builder._split_generators(mock_dl_manager) for split_generator in split_generators: dataset_builder._prepare_split(split_generator, check_duplicate_keys=False) n_examples_per_split[split_generator.name] = split_generator.split_info.num_examples except OSError as e: logger.error( f"Failed to load dummy data for config '{dataset_builder.config.name}''.\nOriginal error:\n" + str(e) ) return False else: if all(n_examples > 0 for n_examples in n_examples_per_split.values()): logger.warning( f"Dummy data generation done and dummy data test succeeded for config '{dataset_builder.config.name}''." ) return True else: empty_splits = [ split_name for split_name in n_examples_per_split if n_examples_per_split[split_name] == 0 ] logger.warning( f"Dummy data generation done but dummy data test failed since splits {empty_splits} have 0 examples for config '{dataset_builder.config.name}''." ) return False else: generated_dummy_data_dir = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder) logger.info( f"Dummy data generated in directory '{generated_dummy_data_dir}' but kept uncompressed. " "Please compress this directory into a zip file to use it for dummy data tests." ) def _print_dummy_data_instructions(self, dataset_builder, mock_dl_manager): dummy_data_folder = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder) logger.info(f"Creating dummy folder structure for {dummy_data_folder}... ") os.makedirs(dummy_data_folder, exist_ok=True) try: generator_splits = dataset_builder._split_generators(mock_dl_manager) except FileNotFoundError as e: print( f"Dataset {self._dataset_name} with config {mock_dl_manager.config} seems to already open files in the method `_split_generators(...)`. You might consider to instead only open files in the method `_generate_examples(...)` instead. If this is not possible the dummy data has to be created with less guidance. Make sure you create the file {e.filename}." ) files_to_create = set() split_names = [] dummy_file_name = mock_dl_manager.dummy_file_name for split in generator_splits: logger.info(f"Collecting dummy data file paths to create for {split.name}") split_names.append(split.name) gen_kwargs = split.gen_kwargs generator = dataset_builder._generate_examples(**gen_kwargs) try: dummy_data_guidance_print = "\n" + 30 * "=" + "DUMMY DATA INSTRUCTIONS" + 30 * "=" + "\n" config_string = ( f"config {mock_dl_manager.config.name} of " if mock_dl_manager.config is not None else "" ) dummy_data_guidance_print += ( "- In order to create the dummy data for " + config_string + f"{self._dataset_name}, please go into the folder '{dummy_data_folder}' with `cd {dummy_data_folder}` . \n\n" ) # trigger generate function for key, record in generator: pass dummy_data_guidance_print += f"- It appears that the function `_generate_examples(...)` expects one or more files in the folder {dummy_file_name} using the function `glob.glob(...)`. In this case, please refer to the `_generate_examples(...)` method to see under which filename the dummy data files should be created. \n\n" except FileNotFoundError as e: files_to_create.add(e.filename) split_names = ", ".join(split_names) if len(files_to_create) > 0: # no glob.glob(...) in `_generate_examples(...)` if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name: dummy_data_guidance_print += f"- Please create a single dummy data file called '{next(iter(files_to_create))}' from the folder '{dummy_data_folder}'. Make sure that the dummy data file provides at least one example for the split(s) '{split_names}' \n\n" files_string = dummy_file_name else: files_string = ", ".join(files_to_create) dummy_data_guidance_print += f"- Please create the following dummy data files '{files_string}' from the folder '{dummy_data_folder}'\n\n" dummy_data_guidance_print += f"- For each of the splits '{split_names}', make sure that one or more of the dummy data files provide at least one example \n\n" dummy_data_guidance_print += f"- If the method `_generate_examples(...)` includes multiple `open()` statements, you might have to create other files in addition to '{files_string}'. In this case please refer to the `_generate_examples(...)` method \n\n" if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name: dummy_data_guidance_print += f"- After the dummy data file is created, it should be zipped to '{dummy_file_name}.zip' with the command `zip {dummy_file_name}.zip {dummy_file_name}` \n\n" dummy_data_guidance_print += ( f"- You can now delete the file '{dummy_file_name}' with the command `rm {dummy_file_name}` \n\n" ) dummy_data_guidance_print += f"- To get the file '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n" else: dummy_data_guidance_print += f"- After all dummy data files are created, they should be zipped recursively to '{dummy_file_name}.zip' with the command `zip -r {dummy_file_name}.zip {dummy_file_name}/` \n\n" dummy_data_guidance_print += ( f"- You can now delete the folder '{dummy_file_name}' with the command `rm -r {dummy_file_name}` \n\n" ) dummy_data_guidance_print += f"- To get the folder '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n" dummy_data_guidance_print += ( f"- Make sure you have created the file '{dummy_file_name}.zip' in '{dummy_data_folder}' \n" ) dummy_data_guidance_print += 83 * "=" + "\n" print(dummy_data_guidance_print)
datasets/src/datasets/commands/dummy_data.py/0
{ "file_path": "datasets/src/datasets/commands/dummy_data.py", "repo_id": "datasets", "token_count": 11107 }
67
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """ This class handle features definition in datasets and some utilities to display table type.""" import copy import json import re import sys from collections.abc import Iterable, Mapping from collections.abc import Sequence as SequenceABC from dataclasses import InitVar, dataclass, field, fields from functools import reduce, wraps from operator import mul from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union from typing import Sequence as Sequence_ import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pyarrow.types import pyarrow_hotfix # noqa: F401 # to fix vulnerability on pyarrow<14.0.1 from pandas.api.extensions import ExtensionArray as PandasExtensionArray from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype from .. import config from ..naming import camelcase_to_snakecase, snakecase_to_camelcase from ..table import array_cast from ..utils import logging from ..utils.py_utils import asdict, first_non_null_value, zip_dict from .audio import Audio from .image import Image, encode_pil_image from .translation import Translation, TranslationVariableLanguages logger = logging.get_logger(__name__) def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str: """ _arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype. In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))` """ if pyarrow.types.is_null(arrow_type): return "null" elif pyarrow.types.is_boolean(arrow_type): return "bool" elif pyarrow.types.is_int8(arrow_type): return "int8" elif pyarrow.types.is_int16(arrow_type): return "int16" elif pyarrow.types.is_int32(arrow_type): return "int32" elif pyarrow.types.is_int64(arrow_type): return "int64" elif pyarrow.types.is_uint8(arrow_type): return "uint8" elif pyarrow.types.is_uint16(arrow_type): return "uint16" elif pyarrow.types.is_uint32(arrow_type): return "uint32" elif pyarrow.types.is_uint64(arrow_type): return "uint64" elif pyarrow.types.is_float16(arrow_type): return "float16" # pyarrow dtype is "halffloat" elif pyarrow.types.is_float32(arrow_type): return "float32" # pyarrow dtype is "float" elif pyarrow.types.is_float64(arrow_type): return "float64" # pyarrow dtype is "double" elif pyarrow.types.is_time32(arrow_type): return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]" elif pyarrow.types.is_time64(arrow_type): return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]" elif pyarrow.types.is_timestamp(arrow_type): if arrow_type.tz is None: return f"timestamp[{arrow_type.unit}]" elif arrow_type.tz: return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]" else: raise ValueError(f"Unexpected timestamp object {arrow_type}.") elif pyarrow.types.is_date32(arrow_type): return "date32" # pyarrow dtype is "date32[day]" elif pyarrow.types.is_date64(arrow_type): return "date64" # pyarrow dtype is "date64[ms]" elif pyarrow.types.is_duration(arrow_type): return f"duration[{arrow_type.unit}]" elif pyarrow.types.is_decimal128(arrow_type): return f"decimal128({arrow_type.precision}, {arrow_type.scale})" elif pyarrow.types.is_decimal256(arrow_type): return f"decimal256({arrow_type.precision}, {arrow_type.scale})" elif pyarrow.types.is_binary(arrow_type): return "binary" elif pyarrow.types.is_large_binary(arrow_type): return "large_binary" elif pyarrow.types.is_string(arrow_type): return "string" elif pyarrow.types.is_large_string(arrow_type): return "large_string" else: raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.") def string_to_arrow(datasets_dtype: str) -> pa.DataType: """ string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType. In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))` This is necessary because the datasets.Value() primitive type is constructed using a string dtype Value(dtype=str) But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema, which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the purpose of this function. """ def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None): msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type." if examples: examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0] msg += f"\nValid examples include: {examples}." if urls: urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0] msg += f"\nFor more insformation, see: {urls}." return msg if datasets_dtype in pa.__dict__: return pa.__dict__[datasets_dtype]() if (datasets_dtype + "_") in pa.__dict__: return pa.__dict__[datasets_dtype + "_"]() timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype) if timestamp_matches: timestamp_internals = timestamp_matches.group(1) internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals) if timestamp_internals in ["s", "ms", "us", "ns"]: return pa.timestamp(timestamp_internals) elif internals_matches: return pa.timestamp(internals_matches.group(1), internals_matches.group(2)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "timestamp", examples=["timestamp[us]", "timestamp[us, tz=America/New_York"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"], ) ) duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype) if duration_matches: duration_internals = duration_matches.group(1) if duration_internals in ["s", "ms", "us", "ns"]: return pa.duration(duration_internals) else: raise ValueError( _dtype_error_msg( datasets_dtype, "duration", examples=["duration[s]", "duration[us]"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"], ) ) time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype) if time_matches: time_internals_bits = time_matches.group(1) if time_internals_bits == "32": time_internals_unit = time_matches.group(2) if time_internals_unit in ["s", "ms"]: return pa.time32(time_internals_unit) else: raise ValueError( f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)." ) elif time_internals_bits == "64": time_internals_unit = time_matches.group(2) if time_internals_unit in ["us", "ns"]: return pa.time64(time_internals_unit) else: raise ValueError( f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)." ) else: raise ValueError( _dtype_error_msg( datasets_dtype, "time", examples=["time32[s]", "time64[us]"], urls=[ "https://arrow.apache.org/docs/python/generated/pyarrow.time32.html", "https://arrow.apache.org/docs/python/generated/pyarrow.time64.html", ], ) ) decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype) if decimal_matches: decimal_internals_bits = decimal_matches.group(1) if decimal_internals_bits == "128": decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2)) if decimal_internals_precision_and_scale: precision = decimal_internals_precision_and_scale.group(1) scale = decimal_internals_precision_and_scale.group(2) return pa.decimal128(int(precision), int(scale)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal128", examples=["decimal128(10, 2)", "decimal128(4, -2)"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"], ) ) elif decimal_internals_bits == "256": decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2)) if decimal_internals_precision_and_scale: precision = decimal_internals_precision_and_scale.group(1) scale = decimal_internals_precision_and_scale.group(2) return pa.decimal256(int(precision), int(scale)) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal256", examples=["decimal256(30, 2)", "decimal256(38, -4)"], urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"], ) ) else: raise ValueError( _dtype_error_msg( datasets_dtype, "decimal", examples=["decimal128(12, 3)", "decimal256(40, 6)"], urls=[ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html", "https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html", ], ) ) raise ValueError( f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. " f"Please make sure to use a correct data type, see: " f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions" ) def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]: """ Cast pytorch/tensorflow/pandas objects to python numpy array/lists. It works recursively. If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted. If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same. This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example. Args: obj: the object (nested struct) to cast. only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays. Indeed Arrow only support converting 1-dimensional array values. optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted and if it doesn't, not checking the rest of the list elements. Returns: casted_obj: the casted object has_changed (bool): True if the object has been changed, False if it is identical """ if config.TF_AVAILABLE and "tensorflow" in sys.modules: import tensorflow as tf if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if config.JAX_AVAILABLE and "jax" in sys.modules: import jax.numpy as jnp if config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(obj, np.ndarray): if obj.ndim == 0: return obj[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj, False else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj ], True, ) elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor): if obj.ndim == 0: return obj.detach().cpu().numpy()[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj.detach().cpu().numpy(), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj.detach().cpu().numpy() ], True, ) elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor): if obj.ndim == 0: return obj.numpy()[()], True elif not only_1d_for_numpy or obj.ndim == 1: return obj.numpy(), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in obj.numpy() ], True, ) elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray): if obj.ndim == 0: return np.asarray(obj)[()], True elif not only_1d_for_numpy or obj.ndim == 1: return np.asarray(obj), True else: return ( [ _cast_to_python_objects( x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for x in np.asarray(obj) ], True, ) elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image): return encode_pil_image(obj), True elif isinstance(obj, pd.Series): return ( _cast_to_python_objects( obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0], True, ) elif isinstance(obj, pd.DataFrame): return ( { key: _cast_to_python_objects( value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for key, value in obj.to_dict("series").items() }, True, ) elif isinstance(obj, pd.Timestamp): return obj.to_pydatetime(), True elif isinstance(obj, pd.Timedelta): return obj.to_pytimedelta(), True elif isinstance(obj, Mapping): has_changed = not isinstance(obj, dict) output = {} for k, v in obj.items(): casted_v, has_changed_v = _cast_to_python_objects( v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting ) has_changed |= has_changed_v output[k] = casted_v return output if has_changed else obj, has_changed elif hasattr(obj, "__array__"): return ( _cast_to_python_objects( obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0], True, ) elif isinstance(obj, (list, tuple)): if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt): break casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects( first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting ) if has_changed_first_elmt or not optimize_list_casting: return ( [ _cast_to_python_objects( elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] for elmt in obj ], True, ) else: if isinstance(obj, (list, tuple)): return obj, False else: return list(obj), True else: return obj, False else: return obj, False def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any: """ Cast numpy/pytorch/tensorflow/pandas objects to python lists. It works recursively. If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted. If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same. This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example. Args: obj: the object (nested struct) to cast only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays. Indeed Arrow only support converting 1-dimensional array values. optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted and if it doesn't, not checking the rest of the list elements. Returns: casted_obj: the casted object """ return _cast_to_python_objects( obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting )[0] @dataclass class Value: """ The `Value` dtypes are as follows: - `null` - `bool` - `int8` - `int16` - `int32` - `int64` - `uint8` - `uint16` - `uint32` - `uint64` - `float16` - `float32` (alias float) - `float64` (alias double) - `time32[(s|ms)]` - `time64[(us|ns)]` - `timestamp[(s|ms|us|ns)]` - `timestamp[(s|ms|us|ns), tz=(tzstring)]` - `date32` - `date64` - `duration[(s|ms|us|ns)]` - `decimal128(precision, scale)` - `decimal256(precision, scale)` - `binary` - `large_binary` - `string` - `large_string` Example: ```py >>> from datasets import Features >>> features = Features({'stars': Value(dtype='int32')}) >>> features {'stars': Value(dtype='int32', id=None)} ``` """ dtype: str id: Optional[str] = None # Automatically constructed pa_type: ClassVar[Any] = None _type: str = field(default="Value", init=False, repr=False) def __post_init__(self): if self.dtype == "double": # fix inferred type self.dtype = "float64" if self.dtype == "float": # fix inferred type self.dtype = "float32" self.pa_type = string_to_arrow(self.dtype) def __call__(self): return self.pa_type def encode_example(self, value): if pa.types.is_boolean(self.pa_type): return bool(value) elif pa.types.is_integer(self.pa_type): return int(value) elif pa.types.is_floating(self.pa_type): return float(value) elif pa.types.is_string(self.pa_type): return str(value) else: return value class _ArrayXD: def __post_init__(self): self.shape = tuple(self.shape) def __call__(self): pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype) return pa_type def encode_example(self, value): return value @dataclass class Array2D(_ArrayXD): """Create a two-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array2D", init=False, repr=False) @dataclass class Array3D(_ArrayXD): """Create a three-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array3D", init=False, repr=False) @dataclass class Array4D(_ArrayXD): """Create a four-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array4D", init=False, repr=False) @dataclass class Array5D(_ArrayXD): """Create a five-dimensional array. Args: shape (`tuple`): The size of each dimension. dtype (`str`): The value of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array5D", init=False, repr=False) class _ArrayXDExtensionType(pa.ExtensionType): ndims: Optional[int] = None def __init__(self, shape: tuple, dtype: str): if self.ndims is None or self.ndims <= 1: raise ValueError("You must instantiate an array type with a value for dim that is > 1") if len(shape) != self.ndims: raise ValueError(f"shape={shape} and ndims={self.ndims} don't match") for dim in range(1, self.ndims): if shape[dim] is None: raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}") self.shape = tuple(shape) self.value_type = dtype self.storage_dtype = self._generate_dtype(self.value_type) pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}") def __arrow_ext_serialize__(self): return json.dumps((self.shape, self.value_type)).encode() @classmethod def __arrow_ext_deserialize__(cls, storage_type, serialized): args = json.loads(serialized) return cls(*args) # This was added to pa.ExtensionType in pyarrow >= 13.0.0 def __reduce__(self): return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__()) def __hash__(self): return hash((self.__class__, self.shape, self.value_type)) def __arrow_ext_class__(self): return ArrayExtensionArray def _generate_dtype(self, dtype): dtype = string_to_arrow(dtype) for d in reversed(self.shape): dtype = pa.list_(dtype) # Don't specify the size of the list, since fixed length list arrays have issues # being validated after slicing in pyarrow 0.17.1 return dtype def to_pandas_dtype(self): return PandasArrayExtensionDtype(self.value_type) class Array2DExtensionType(_ArrayXDExtensionType): ndims = 2 class Array3DExtensionType(_ArrayXDExtensionType): ndims = 3 class Array4DExtensionType(_ArrayXDExtensionType): ndims = 4 class Array5DExtensionType(_ArrayXDExtensionType): ndims = 5 # Register the extension types for deserialization pa.register_extension_type(Array2DExtensionType((1, 2), "int64")) pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64")) pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64")) pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64")) def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool: """ When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not. This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array. # zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration) # primitive types are types for which the physical representation in arrow and in numpy # https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821 # see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy # and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22 """ def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType: if pa.types.is_list(pa_type): return _unnest_pa_type(pa_type.value_type) return pa_type if unnest: pa_type = _unnest_pa_type(pa_type) return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type)) class ArrayExtensionArray(pa.ExtensionArray): def __array__(self): zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True) return self.to_numpy(zero_copy_only=zero_copy_only) def __getitem__(self, i): return self.storage[i] def to_numpy(self, zero_copy_only=True): storage: pa.ListArray = self.storage null_mask = storage.is_null().to_numpy(zero_copy_only=False) if self.type.shape[0] is not None: size = 1 null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask)) for i in range(self.type.ndims): size *= self.type.shape[i] storage = storage.flatten() numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only) numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape) if len(null_indices): numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0) else: shape = self.type.shape ndims = self.type.ndims arrays = [] first_dim_offsets = np.array([off.as_py() for off in storage.offsets]) for i, is_null in enumerate(null_mask): if is_null: arrays.append(np.nan) else: storage_el = storage[i : i + 1] first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i] # flatten storage for _ in range(ndims): storage_el = storage_el.flatten() numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only) arrays.append(numpy_arr.reshape(first_dim, *shape[1:])) if len(np.unique(np.diff(first_dim_offsets))) > 1: # ragged numpy_arr = np.empty(len(arrays), dtype=object) numpy_arr[:] = arrays else: numpy_arr = np.array(arrays) return numpy_arr def to_pylist(self): zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True) numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only) if self.type.shape[0] is None and numpy_arr.dtype == object: return [arr.tolist() for arr in numpy_arr.tolist()] else: return numpy_arr.tolist() class PandasArrayExtensionDtype(PandasExtensionDtype): _metadata = "value_type" def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]): self._value_type = value_type def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]): if isinstance(array, pa.ChunkedArray): array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks])) zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True) numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only) return PandasArrayExtensionArray(numpy_arr) @classmethod def construct_array_type(cls): return PandasArrayExtensionArray @property def type(self) -> type: return np.ndarray @property def kind(self) -> str: return "O" @property def name(self) -> str: return f"array[{self.value_type}]" @property def value_type(self) -> np.dtype: return self._value_type class PandasArrayExtensionArray(PandasExtensionArray): def __init__(self, data: np.ndarray, copy: bool = False): self._data = data if not copy else np.array(data) self._dtype = PandasArrayExtensionDtype(data.dtype) def __array__(self, dtype=None): """ Convert to NumPy Array. Note that Pandas expects a 1D array when dtype is set to object. But for other dtypes, the returned shape is the same as the one of ``data``. More info about pandas 1D requirement for PandasExtensionArray here: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray """ if dtype == object: out = np.empty(len(self._data), dtype=object) for i in range(len(self._data)): out[i] = self._data[i] return out if dtype is None: return self._data else: return self._data.astype(dtype) def copy(self, deep: bool = False) -> "PandasArrayExtensionArray": return PandasArrayExtensionArray(self._data, copy=True) @classmethod def _from_sequence( cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False ) -> "PandasArrayExtensionArray": if len(scalars) > 1 and all( isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars ): data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy) else: data = np.empty(len(scalars), dtype=object) data[:] = scalars return cls(data, copy=copy) @classmethod def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray": if len(to_concat) > 1 and all( va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype for va in to_concat ): data = np.vstack([va._data for va in to_concat]) else: data = np.empty(len(to_concat), dtype=object) data[:] = [va._data for va in to_concat] return cls(data, copy=False) @property def dtype(self) -> PandasArrayExtensionDtype: return self._dtype @property def nbytes(self) -> int: return self._data.nbytes def isna(self) -> np.ndarray: return np.array([pd.isna(arr).any() for arr in self._data]) def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None: raise NotImplementedError() def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]: if isinstance(item, int): return self._data[item] return PandasArrayExtensionArray(self._data[item], copy=False) def take( self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None ) -> "PandasArrayExtensionArray": indices: np.ndarray = np.asarray(indices, dtype=int) if allow_fill: fill_value = ( self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type) ) mask = indices == -1 if (indices < -1).any(): raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True") elif len(self) > 0: pass elif not np.all(mask): raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.") else: data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type) return PandasArrayExtensionArray(data, copy=False) took = self._data.take(indices, axis=0) if allow_fill and mask.any(): took[mask] = [fill_value] * np.sum(mask) return PandasArrayExtensionArray(took, copy=False) def __len__(self) -> int: return len(self._data) def __eq__(self, other) -> np.ndarray: if not isinstance(other, PandasArrayExtensionArray): raise NotImplementedError(f"Invalid type to compare to: {type(other)}") return (self._data == other._data).all() def pandas_types_mapper(dtype): if isinstance(dtype, _ArrayXDExtensionType): return PandasArrayExtensionDtype(dtype.value_type) @dataclass class ClassLabel: """Feature type for integer class labels. There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments: * `num_classes`: Create 0 to (num_classes-1) labels. * `names`: List of label strings. * `names_file`: File containing the list of labels. Under the hood the labels are stored as integers. You can use negative integers to represent unknown/missing labels. Args: num_classes (`int`, *optional*): Number of classes. All labels must be < `num_classes`. names (`list` of `str`, *optional*): String names for the integer classes. The order in which the names are provided is kept. names_file (`str`, *optional*): Path to a file with names for the integer classes, one per line. Example: ```py >>> from datasets import Features >>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])}) >>> features {'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)} ``` """ num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict names: List[str] = None names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "int64" pa_type: ClassVar[Any] = pa.int64() _str2int: ClassVar[Dict[str, int]] = None _int2str: ClassVar[Dict[int, int]] = None _type: str = field(default="ClassLabel", init=False, repr=False) def __post_init__(self, num_classes, names_file): self.num_classes = num_classes self.names_file = names_file if self.names_file is not None and self.names is not None: raise ValueError("Please provide either names or names_file but not both.") # Set self.names if self.names is None: if self.names_file is not None: self.names = self._load_names_from_file(self.names_file) elif self.num_classes is not None: self.names = [str(i) for i in range(self.num_classes)] else: raise ValueError("Please provide either num_classes, names or names_file.") elif not isinstance(self.names, SequenceABC): raise TypeError(f"Please provide names as a list, is {type(self.names)}") # Set self.num_classes if self.num_classes is None: self.num_classes = len(self.names) elif self.num_classes != len(self.names): raise ValueError( "ClassLabel number of names do not match the defined num_classes. " f"Got {len(self.names)} names VS {self.num_classes} num_classes" ) # Prepare mappings self._int2str = [str(name) for name in self.names] self._str2int = {name: i for i, name in enumerate(self._int2str)} if len(self._int2str) != len(self._str2int): raise ValueError("Some label names are duplicated. Each label name should be unique.") def __call__(self): return self.pa_type def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]: """Conversion class name `string` => `integer`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> ds.features["label"].str2int('neg') 0 ``` """ if not isinstance(values, str) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, str): values = [values] return_list = False output = [self._strval2int(value) for value in values] return output if return_list else output[0] def _strval2int(self, value: str) -> int: failed_parse = False value = str(value) # first attempt - raw string value int_value = self._str2int.get(value) if int_value is None: # second attempt - strip whitespace int_value = self._str2int.get(value.strip()) if int_value is None: # third attempt - convert str to int try: int_value = int(value) except ValueError: failed_parse = True else: if int_value < -1 or int_value >= self.num_classes: failed_parse = True if failed_parse: raise ValueError(f"Invalid string class label {value}") return int_value def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]: """Conversion `integer` => class name `string`. Regarding unknown/missing labels: passing negative integers raises `ValueError`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> ds.features["label"].int2str(0) 'neg' ``` """ if not isinstance(values, int) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, int): values = [values] return_list = False for v in values: if not 0 <= v < self.num_classes: raise ValueError(f"Invalid integer class label {v:d}") output = [self._int2str[int(v)] for v in values] return output if return_list else output[0] def encode_example(self, example_data): if self.num_classes is None: raise ValueError( "Trying to use ClassLabel feature with undefined number of class. " "Please set ClassLabel.names or num_classes." ) # If a string is given, convert to associated integer if isinstance(example_data, str): example_data = self.str2int(example_data) # Allowing -1 to mean no label. if not -1 <= example_data < self.num_classes: raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}") return example_data def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array: """Cast an Arrow array to the `ClassLabel` arrow storage type. The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are: - `pa.string()` - `pa.int()` Args: storage (`Union[pa.StringArray, pa.IntegerArray]`): PyArrow array to cast. Returns: `pa.Int64Array`: Array in the `ClassLabel` arrow storage type. """ if isinstance(storage, pa.IntegerArray) and len(storage) > 0: min_max = pc.min_max(storage).as_py() if min_max["max"] is not None and min_max["max"] >= self.num_classes: raise ValueError( f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}" ) elif isinstance(storage, pa.StringArray): storage = pa.array( [self._strval2int(label) if label is not None else None for label in storage.to_pylist()] ) return array_cast(storage, self.pa_type) @staticmethod def _load_names_from_file(names_filepath): with open(names_filepath, encoding="utf-8") as f: return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names @dataclass class Sequence: """Construct a list of feature from a single type or a dict of types. Mostly here for compatiblity with tfds. Args: feature: A list of features of a single type or a dictionary of types. length (`int`): Length of the sequence. Example: ```py >>> from datasets import Features, Sequence, Value, ClassLabel >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})}) >>> features {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)} ``` """ feature: Any length: int = -1 id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "list" pa_type: ClassVar[Any] = None _type: str = field(default="Sequence", init=False, repr=False) FeatureType = Union[ dict, list, tuple, Value, ClassLabel, Translation, TranslationVariableLanguages, Sequence, Array2D, Array3D, Array4D, Array5D, Audio, Image, ] def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool: """ Check if the object is not None. If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence. """ if obj is None: return False elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))): if len(obj) > 0: if schema is None: pass elif isinstance(schema, (list, tuple)): schema = schema[0] else: schema = schema.feature return _check_non_null_non_empty_recursive(obj[0], schema) else: return False else: return True def get_nested_type(schema: FeatureType) -> pa.DataType: """ get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of generate_from_arrow_type(). It performs double-duty as the implementation of Features.type and handles the conversion of datasets.Feature->pa.struct """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, Features): return pa.struct( {key: get_nested_type(schema[key]) for key in schema} ) # Features is subclass of dict, and dict order is deterministic since Python 3.6 elif isinstance(schema, dict): return pa.struct( {key: get_nested_type(schema[key]) for key in schema} ) # however don't sort on struct types since the order matters elif isinstance(schema, (list, tuple)): if len(schema) != 1: raise ValueError("When defining list feature, you should just provide one example of the inner type") value_type = get_nested_type(schema[0]) return pa.list_(value_type) elif isinstance(schema, Sequence): value_type = get_nested_type(schema.feature) # We allow to reverse list of dict => dict of list for compatibility with tfds if isinstance(schema.feature, dict): return pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type}) return pa.list_(value_type, schema.length) # Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods) return schema() def encode_nested_example(schema, obj, level=0): """Encode a nested example. This is used since some features (in particular ClassLabel) have some logic during encoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded. If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): if level == 0 and obj is None: raise ValueError("Got None but expected a dictionary instead") return ( {k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema} if obj is not None else None ) elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt: return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj] return list(obj) elif isinstance(schema, Sequence): if obj is None: return None # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): # dict of list to fill list_dict = {} if isinstance(obj, (list, tuple)): # obj is a list of dict for k in schema.feature: list_dict[k] = [encode_nested_example(schema.feature[k], o.get(k), level=level + 1) for o in obj] return list_dict else: # obj is a single dict for k in schema.feature: list_dict[k] = ( [encode_nested_example(schema.feature[k], o, level=level + 1) for o in obj[k]] if k in obj else None ) return list_dict # schema.feature is not a dict if isinstance(obj, str): # don't interpret a string as a list raise ValueError(f"Got a string but expected a list instead: '{obj}'") else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, schema.feature): break # be careful when comparing tensors here if ( not isinstance(first_elmt, list) or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt ): return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj] return list(obj) # Object with special encoding: # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)): return schema.encode_example(obj) if obj is not None else None # Other object should be directly convertible to a native Arrow type (like Translation and Translation) return obj def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode a nested example. This is used since some features (in particular Audio and Image) have some logic during decoding. To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded. If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(schema, dict): return ( {k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)} if obj is not None else None ) elif isinstance(schema, (list, tuple)): sub_schema = schema[0] if obj is None: return None else: if len(obj) > 0: for first_elmt in obj: if _check_non_null_non_empty_recursive(first_elmt, sub_schema): break if decode_nested_example(sub_schema, first_elmt) != first_elmt: return [decode_nested_example(sub_schema, o) for o in obj] return list(obj) elif isinstance(schema, Sequence): # We allow to reverse list of dict => dict of list for compatiblity with tfds if isinstance(schema.feature, dict): return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature} else: return decode_nested_example([schema.feature], obj) # Object with special decoding: elif isinstance(schema, (Audio, Image)): # we pass the token to read and decode files from private repositories in streaming mode if obj is not None and schema.decode: return schema.decode_example(obj, token_per_repo_id=token_per_repo_id) return obj def generate_from_dict(obj: Any): """Regenerate the nested feature object from a deserialized dict. We use the '_type' fields to get the dataclass name to load. generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to :meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that :class:`Value` automatically performs. """ # Nested structures: we allow dict, list/tuples, sequences if isinstance(obj, list): return [generate_from_dict(value) for value in obj] # Otherwise we have a dict or a dataclass if "_type" not in obj or isinstance(obj["_type"], dict): return {key: generate_from_dict(value) for key, value in obj.items()} obj = dict(obj) class_type = globals()[obj.pop("_type")] if class_type == Sequence: return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1)) field_names = {f.name for f in fields(class_type)} return class_type(**{k: v for k, v in obj.items() if k in field_names}) def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType: """ generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for a single field. This is the high-level arrow->datasets type conversion and is inverted by get_nested_type(). This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema """ if isinstance(pa_type, pa.StructType): return {field.name: generate_from_arrow_type(field.type) for field in pa_type} elif isinstance(pa_type, pa.FixedSizeListType): return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size) elif isinstance(pa_type, pa.ListType): feature = generate_from_arrow_type(pa_type.value_type) if isinstance(feature, (dict, tuple, list)): return [feature] return Sequence(feature=feature) elif isinstance(pa_type, _ArrayXDExtensionType): array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims] return array_feature(shape=pa_type.shape, dtype=pa_type.value_type) elif isinstance(pa_type, pa.DictionaryType): raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table elif isinstance(pa_type, pa.DataType): return Value(dtype=_arrow_to_datasets_dtype(pa_type)) else: raise ValueError(f"Cannot convert {pa_type} to a Feature type.") def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray: """Build a PyArrow ListArray from a multidimensional NumPy array""" arr = np.array(arr) values = pa.array(arr.flatten(), type=type) for i in range(arr.ndim - 1): n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1) step_offsets = arr.shape[arr.ndim - i - 1] offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32()) values = pa.ListArray.from_arrays(offsets, values) return values def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray: null_mask = np.array([arr is None for arr in l_arr]) null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask)) l_arr = [arr for arr in l_arr if arr is not None] offsets = np.cumsum( [0] + [len(arr) for arr in l_arr], dtype=object ) # convert to dtype object to allow None insertion offsets = np.insert(offsets, null_indices, None) offsets = pa.array(offsets, type=pa.int32()) values = pa.concat_arrays(l_arr) return pa.ListArray.from_arrays(offsets, values) def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray: """Build a PyArrow ListArray from a possibly nested list of NumPy arrays""" if len(l_arr) > 0: return list_of_pa_arrays_to_pyarrow_listarray( [numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr] ) else: return pa.array([], type=type) def contains_any_np_array(data: Any): """Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray. Args: data (Any): Data. Returns: bool """ if isinstance(data, np.ndarray): return True elif isinstance(data, list): return contains_any_np_array(first_non_null_value(data)[1]) else: return False def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray: """Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray. Args: data (Union[np.ndarray, List]): Data. type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type. Returns: pa.ListArray """ if isinstance(data, np.ndarray): return numpy_to_pyarrow_listarray(data, type=type) elif isinstance(data, list): return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data]) def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array: """Convert to PyArrow ListArray. Args: data (Any): Sequence, iterable, np.ndarray or pd.Series. pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType. Returns: pyarrow.Array """ if contains_any_np_array(data): return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type) else: return pa.array(data, pa_type.storage_dtype) def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType: """Visit a (possibly nested) feature. Args: feature (FeatureType): the feature type to be checked Returns: visited feature (FeatureType) """ if isinstance(feature, dict): out = func({k: _visit(f, func) for k, f in feature.items()}) elif isinstance(feature, (list, tuple)): out = func([_visit(feature[0], func)]) elif isinstance(feature, Sequence): out = func(Sequence(_visit(feature.feature, func), length=feature.length)) else: out = func(feature) return feature if out is None else out def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool: """Check if a (possibly nested) feature requires decoding. Args: feature (FeatureType): the feature type to be checked ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value of the `decode` attribute of the decodable feature types. Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_decoding(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_decoding(feature[0]) elif isinstance(feature, Sequence): return require_decoding(feature.feature) else: return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True) def require_storage_cast(feature: FeatureType) -> bool: """Check if a (possibly nested) feature requires storage casting. Args: feature (FeatureType): the feature type to be checked Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_storage_cast(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_storage_cast(feature[0]) elif isinstance(feature, Sequence): return require_storage_cast(feature.feature) else: return hasattr(feature, "cast_storage") def require_storage_embed(feature: FeatureType) -> bool: """Check if a (possibly nested) feature requires embedding data into storage. Args: feature (FeatureType): the feature type to be checked Returns: :obj:`bool` """ if isinstance(feature, dict): return any(require_storage_cast(f) for f in feature.values()) elif isinstance(feature, (list, tuple)): return require_storage_cast(feature[0]) elif isinstance(feature, Sequence): return require_storage_cast(feature.feature) else: return hasattr(feature, "embed_storage") def keep_features_dicts_synced(func): """ Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object in sync with the main dictionary. """ @wraps(func) def wrapper(*args, **kwargs): if args: self: "Features" = args[0] args = args[1:] else: self: "Features" = kwargs.pop("self") out = func(self, *args, **kwargs) assert hasattr(self, "_column_requires_decoding") self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()} return out wrapper._decorator_name_ = "_keep_dicts_synced" return wrapper class Features(dict): """A special dictionary that defines the internal structure of a dataset. Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names, and values are the type of that column. `FieldType` can be one of the following: - a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`. - a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels associated to them and will be stored as integers in the dataset. - a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields features. It's possible to have nested fields of nested fields in an arbitrary manner. - a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python `list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature type hosted in this list. <Tip> A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the [`~datasets.Sequence`]. </Tip> - a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays. - an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data. - an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data. - [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation. """ def __init__(*args, **kwargs): # self not in the signature to allow passing self as a kwarg if not args: raise TypeError("descriptor '__init__' of 'Features' object needs an argument") self, *args = args super(Features, self).__init__(*args, **kwargs) self._column_requires_decoding: Dict[str, bool] = { col: require_decoding(feature) for col, feature in self.items() } __setitem__ = keep_features_dicts_synced(dict.__setitem__) __delitem__ = keep_features_dicts_synced(dict.__delitem__) update = keep_features_dicts_synced(dict.update) setdefault = keep_features_dicts_synced(dict.setdefault) pop = keep_features_dicts_synced(dict.pop) popitem = keep_features_dicts_synced(dict.popitem) clear = keep_features_dicts_synced(dict.clear) def __reduce__(self): return Features, (dict(self),) @property def type(self): """ Features field types. Returns: :obj:`pyarrow.DataType` """ return get_nested_type(self) @property def arrow_schema(self): """ Features schema. Returns: :obj:`pyarrow.Schema` """ hf_metadata = {"info": {"features": self.to_dict()}} return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)}) @classmethod def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features": """ Construct [`Features`] from Arrow Schema. It also checks the schema metadata for Hugging Face Datasets features. Non-nullable fields are not supported and set to nullable. Args: pa_schema (`pyarrow.Schema`): Arrow Schema. Returns: [`Features`] """ # try to load features from the arrow schema metadata metadata_features = Features() if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata: metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode()) if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None: metadata_features = Features.from_dict(metadata["info"]["features"]) metadata_features_schema = metadata_features.arrow_schema obj = { field.name: ( metadata_features[field.name] if field.name in metadata_features and metadata_features_schema.field(field.name) == field else generate_from_arrow_type(field.type) ) for field in pa_schema } return cls(**obj) @classmethod def from_dict(cls, dic) -> "Features": """ Construct [`Features`] from dict. Regenerate the nested feature object from a deserialized dict. We use the `_type` key to infer the dataclass name of the feature `FieldType`. It allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to [`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that [`Value`] automatically performs. Args: dic (`dict[str, Any]`): Python dictionary. Returns: `Features` Example:: >>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}}) {'_type': Value(dtype='string', id=None)} """ obj = generate_from_dict(dic) return cls(**obj) def to_dict(self): return asdict(self) def _to_yaml_list(self) -> list: # we compute the YAML list from the dict representation that is used for JSON dump yaml_data = self.to_dict() def simplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") # # sequence: -> sequence: int32 # dtype: int32 -> # if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]: feature["sequence"] = feature["sequence"]["dtype"] # # sequence: -> sequence: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]: feature["sequence"] = feature["sequence"]["struct"] # # list: -> list: int32 # dtype: int32 -> # if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]: feature["list"] = feature["list"]["dtype"] # # list: -> list: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]: feature["list"] = feature["list"]["struct"] # # class_label: -> class_label: # names: -> names: # - negative -> '0': negative # - positive -> '1': positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list): # server-side requirement: keys must be strings feature["class_label"]["names"] = { str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"]) } return feature def to_yaml_inner(obj: Union[dict, list]) -> dict: if isinstance(obj, dict): _type = obj.pop("_type", None) if _type == "Sequence": _feature = obj.pop("feature") return simplify({"sequence": to_yaml_inner(_feature), **obj}) elif _type == "Value": return obj elif _type and not obj: return {"dtype": camelcase_to_snakecase(_type)} elif _type: return {"dtype": simplify({camelcase_to_snakecase(_type): obj})} else: return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]} elif isinstance(obj, list): return simplify({"list": simplify(to_yaml_inner(obj[0]))}) elif isinstance(obj, tuple): return to_yaml_inner(list(obj)) else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") def to_yaml_types(obj: dict) -> dict: if isinstance(obj, dict): return {k: to_yaml_types(v) for k, v in obj.items()} elif isinstance(obj, list): return [to_yaml_types(v) for v in obj] elif isinstance(obj, tuple): return to_yaml_types(list(obj)) else: return obj return to_yaml_types(to_yaml_inner(yaml_data)["struct"]) @classmethod def _from_yaml_list(cls, yaml_data: list) -> "Features": yaml_data = copy.deepcopy(yaml_data) # we convert the list obtained from YAML data into the dict representation that is used for JSON dump def unsimplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}") # # sequence: int32 -> sequence: # -> dtype: int32 # if isinstance(feature.get("sequence"), str): feature["sequence"] = {"dtype": feature["sequence"]} # # list: int32 -> list: # -> dtype: int32 # if isinstance(feature.get("list"), str): feature["list"] = {"dtype": feature["list"]} # # class_label: -> class_label: # names: -> names: # '0': negative -> - negative # '1': positive -> - positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict): label_ids = sorted(feature["class_label"]["names"], key=int) if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)): raise ValueError( f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing." ) feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids] return feature def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]: if isinstance(obj, dict): if not obj: return {} _type = next(iter(obj)) if _type == "sequence": _feature = unsimplify(obj).pop(_type) return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"} if _type == "list": return [from_yaml_inner(unsimplify(obj)[_type])] if _type == "struct": return from_yaml_inner(obj["struct"]) elif _type == "dtype": if isinstance(obj["dtype"], str): # e.g. int32, float64, string, audio, image try: Value(obj["dtype"]) return {**obj, "_type": "Value"} except ValueError: # e.g. Audio, Image, ArrayXD return {"_type": snakecase_to_camelcase(obj["dtype"])} else: return from_yaml_inner(obj["dtype"]) else: return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]} elif isinstance(obj, list): names = [_feature.pop("name") for _feature in obj] return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)} else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}") return cls.from_dict(from_yaml_inner(yaml_data)) def encode_example(self, example): """ Encode example into a format for Arrow. Args: example (`dict[str, Any]`): Data in a Dataset row. Returns: `dict[str, Any]` """ example = cast_to_python_objects(example) return encode_nested_example(self, example) def encode_column(self, column, column_name: str): """ Encode column into a format for Arrow. Args: column (`list[Any]`): Data in a Dataset column. column_name (`str`): Dataset column name. Returns: `list[Any]` """ column = cast_to_python_objects(column) return [encode_nested_example(self[column_name], obj) for obj in column] def encode_batch(self, batch): """ Encode batch into a format for Arrow. Args: batch (`dict[str, list[Any]]`): Data in a Dataset batch. Returns: `dict[str, list[Any]]` """ encoded_batch = {} if set(batch) != set(self): raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}") for key, column in batch.items(): column = cast_to_python_objects(column) encoded_batch[key] = [encode_nested_example(self[key], obj) for obj in column] return encoded_batch def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode example with custom feature decoding. Args: example (`dict[str, Any]`): Dataset row data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary `repo_id (str) -> token (bool or str)`. Returns: `dict[str, Any]` """ return { column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id) if self._column_requires_decoding[column_name] else value for column_name, (feature, value) in zip_dict( {key: value for key, value in self.items() if key in example}, example ) } def decode_column(self, column: list, column_name: str): """Decode column with custom feature decoding. Args: column (`list[Any]`): Dataset column data. column_name (`str`): Dataset column name. Returns: `list[Any]` """ return ( [decode_nested_example(self[column_name], value) if value is not None else None for value in column] if self._column_requires_decoding[column_name] else column ) def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode batch with custom feature decoding. Args: batch (`dict[str, list[Any]]`): Dataset batch data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary repo_id (str) -> token (bool or str) Returns: `dict[str, list[Any]]` """ decoded_batch = {} for column_name, column in batch.items(): decoded_batch[column_name] = ( [ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id) if value is not None else None for value in column ] if self._column_requires_decoding[column_name] else column ) return decoded_batch def copy(self) -> "Features": """ Make a deep copy of [`Features`]. Returns: [`Features`] Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> copy_of_features = ds.features.copy() >>> copy_of_features {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} ``` """ return copy.deepcopy(self) def reorder_fields_as(self, other: "Features") -> "Features": """ Reorder Features fields to match the field order of other [`Features`]. The order of the fields is important since it matters for the underlying arrow data. Re-ordering the fields allows to make the underlying arrow data type match. Args: other ([`Features`]): The other [`Features`] to align with. Returns: [`Features`] Example:: >>> from datasets import Features, Sequence, Value >>> # let's say we have to features with a different order of nested fields (for a and b for example) >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})}) >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}}) >>> assert f1.type != f2.type >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match >>> f1.reorder_fields_as(f2) {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)} >>> assert f1.reorder_fields_as(f2).type == f2.type """ def recursive_reorder(source, target, stack=""): stack_position = " at " + stack[1:] if stack else "" if isinstance(target, Sequence): target = target.feature if isinstance(target, dict): target = {k: [v] for k, v in target.items()} else: target = [target] if isinstance(source, Sequence): source, id_, length = source.feature, source.id, source.length if isinstance(source, dict): source = {k: [v] for k, v in source.items()} reordered = recursive_reorder(source, target, stack) return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length) else: source = [source] reordered = recursive_reorder(source, target, stack) return Sequence(reordered[0], id=id_, length=length) elif isinstance(source, dict): if not isinstance(target, dict): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if sorted(source) != sorted(target): message = ( f"Keys mismatch: between {source} (source) and {target} (target).\n" f"{source.keys()-target.keys()} are missing from target " f"and {target.keys()-source.keys()} are missing from source" + stack_position ) raise ValueError(message) return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target} elif isinstance(source, list): if not isinstance(target, list): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if len(source) != len(target): raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position) return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))] else: return source return Features(recursive_reorder(self, other)) def flatten(self, max_depth=16) -> "Features": """Flatten the features. Every dictionary column is removed and is replaced by all the subfields it contains. The new fields are named by concatenating the name of the original column and the subfield name like this: `<original>.<subfield>`. If a column contains nested dictionaries, then all the lower-level subfields names are also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc. Returns: [`Features`]: The flattened features. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("squad", split="train") >>> ds.features.flatten() {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None), 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} ``` """ for depth in range(1, max_depth): no_change = True flattened = self.copy() for column_name, subfeature in self.items(): if isinstance(subfeature, dict): no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()}) del flattened[column_name] elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict): no_change = False flattened.update( { f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v] for k, v in subfeature.feature.items() } ) del flattened[column_name] elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature: no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()}) del flattened[column_name] self = flattened if no_change: break return self def _align_features(features_list: List[Features]) -> List[Features]: """Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature.""" name2feature = {} for features in features_list: for k, v in features.items(): if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"): name2feature[k] = v return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list] def _check_if_features_can_be_aligned(features_list: List[Features]): """Check if the dictionaries of features can be aligned. Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`. """ name2feature = {} for features in features_list: for k, v in features.items(): if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"): name2feature[k] = v for features in features_list: for k, v in features.items(): if not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v: raise ValueError( f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").' )
datasets/src/datasets/features/features.py/0
{ "file_path": "datasets/src/datasets/features/features.py", "repo_id": "datasets", "token_count": 39514 }
68
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class AbstractDatasetReader(ABC): def __init__( self, path_or_paths: Optional[NestedDataStructureLike[PathLike]] = None, split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): self.path_or_paths = path_or_paths self.split = split if split or isinstance(path_or_paths, dict) else "train" self.features = features self.cache_dir = cache_dir self.keep_in_memory = keep_in_memory self.streaming = streaming self.num_proc = num_proc self.kwargs = kwargs @abstractmethod def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: pass class AbstractDatasetInputStream(ABC): def __init__( self, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): self.features = features self.cache_dir = cache_dir self.keep_in_memory = keep_in_memory self.streaming = streaming self.num_proc = num_proc self.kwargs = kwargs @abstractmethod def read(self) -> Union[Dataset, IterableDataset]: pass
datasets/src/datasets/io/abc.py/0
{ "file_path": "datasets/src/datasets/io/abc.py", "repo_id": "datasets", "token_count": 721 }
69
import copy import os import warnings from functools import partial from itertools import groupby from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc from . import config from .utils.logging import get_logger if TYPE_CHECKING: from .features.features import Features, FeatureType logger = get_logger(__name__) def inject_arrow_table_documentation(arrow_table_method): def wrapper(fn): fn.__doc__ = arrow_table_method.__doc__ + (fn.__doc__ if fn.__doc__ is not None else "") fn.__doc__ = fn.__doc__.replace("pyarrow.Table", "Table") if hasattr(arrow_table_method, "__annotations__"): fn.__annotations__ = arrow_table_method.__annotations__ return fn return wrapper def _in_memory_arrow_table_from_file(filename: str) -> pa.Table: in_memory_stream = pa.input_stream(filename) opened_stream = pa.ipc.open_stream(in_memory_stream) pa_table = opened_stream.read_all() return pa_table def _in_memory_arrow_table_from_buffer(buffer: pa.Buffer) -> pa.Table: stream = pa.BufferReader(buffer) opened_stream = pa.ipc.open_stream(stream) table = opened_stream.read_all() return table def _memory_mapped_record_batch_reader_from_file(filename: str) -> pa.RecordBatchStreamReader: memory_mapped_stream = pa.memory_map(filename) return pa.ipc.open_stream(memory_mapped_stream) def read_schema_from_file(filename: str) -> pa.Schema: """ Infer arrow table schema from file without loading whole file into memory. Usefull especially while having very big files. """ with pa.memory_map(filename) as memory_mapped_stream: schema = pa.ipc.open_stream(memory_mapped_stream).schema return schema def _memory_mapped_arrow_table_from_file(filename: str) -> pa.Table: opened_stream = _memory_mapped_record_batch_reader_from_file(filename) pa_table = opened_stream.read_all() return pa_table def _deepcopy(x, memo: dict): """deepcopy a regular class instance""" cls = x.__class__ result = cls.__new__(cls) memo[id(x)] = result for k, v in x.__dict__.items(): setattr(result, k, copy.deepcopy(v, memo)) return result def _interpolation_search(arr: List[int], x: int) -> int: """ Return the position i of a sorted array so that arr[i] <= x < arr[i+1] Args: arr (`List[int]`): non-empty sorted list of integers x (`int`): query Returns: `int`: the position i so that arr[i] <= x < arr[i+1] Raises: `IndexError`: if the array is empty or if the query is outside the array values """ i, j = 0, len(arr) - 1 while i < j and arr[i] <= x < arr[j]: k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i])) if arr[k] <= x < arr[k + 1]: return k elif arr[k] < x: i, j = k + 1, j else: i, j = i, k raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.") class IndexedTableMixin: def __init__(self, table: pa.Table): self._schema: pa.Schema = table.schema self._batches: List[pa.RecordBatch] = [ recordbatch for recordbatch in table.to_batches() if len(recordbatch) > 0 ] self._offsets: np.ndarray = np.cumsum([0] + [len(b) for b in self._batches], dtype=np.int64) def fast_gather(self, indices: Union[List[int], np.ndarray]) -> pa.Table: """ Create a pa.Table by gathering the records at the records at the specified indices. Should be faster than pa.concat_tables(table.fast_slice(int(i) % table.num_rows, 1) for i in indices) since NumPy can compute the binary searches in parallel, highly optimized C """ if not len(indices): raise ValueError("Indices must be non-empty") batch_indices = np.searchsorted(self._offsets, indices, side="right") - 1 return pa.Table.from_batches( [ self._batches[batch_idx].slice(i - self._offsets[batch_idx], 1) for batch_idx, i in zip(batch_indices, indices) ], schema=self._schema, ) def fast_slice(self, offset=0, length=None) -> pa.Table: """ Slice the Table using interpolation search. The behavior is the same as `pyarrow.Table.slice` but it's significantly faster. Interpolation search is used to find the start and end indexes of the batches we want to keep. The batches to keep are then concatenated to form the sliced Table. """ if offset < 0: raise IndexError("Offset must be non-negative") elif offset >= self._offsets[-1] or (length is not None and length <= 0): return pa.Table.from_batches([], schema=self._schema) i = _interpolation_search(self._offsets, offset) if length is None or length + offset >= self._offsets[-1]: batches = self._batches[i:] batches[0] = batches[0].slice(offset - self._offsets[i]) else: j = _interpolation_search(self._offsets, offset + length - 1) batches = self._batches[i : j + 1] batches[-1] = batches[-1].slice(0, offset + length - self._offsets[j]) batches[0] = batches[0].slice(offset - self._offsets[i]) return pa.Table.from_batches(batches, schema=self._schema) class Table(IndexedTableMixin): """ Wraps a pyarrow Table by using composition. This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`. It implements all the basic attributes/methods of the pyarrow Table class except the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column, append_column, remove_column, set_column, rename_columns` and `drop`. The implementation of these methods differs for the subclasses. """ def __init__(self, table: pa.Table): super().__init__(table) self.table = table def __deepcopy__(self, memo: dict): # arrow tables are immutable, so there's no need to copy self.table # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason # by adding it to the memo, self.table won't be copied memo[id(self.table)] = self.table # same for the recordbatches used by the index memo[id(self._batches)] = list(self._batches) return _deepcopy(self, memo) def validate(self, *args, **kwargs): """ Perform validation checks. An exception is raised if validation fails. By default only cheap validation checks are run. Pass `full=True` for thorough validation checks (potentially `O(n)`). Args: full (`bool`, defaults to `False`): If `True`, run expensive checks, otherwise cheap checks only. Raises: `pa.lib.ArrowInvalid`: if validation fails """ return self.table.validate(*args, **kwargs) def equals(self, *args, **kwargs): """ Check if contents of two tables are equal. Args: other ([`~datasets.table.Table`]): Table to compare against. check_metadata `bool`, defaults to `False`): Whether schema metadata equality should be checked as well. Returns: `bool` """ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs) def to_batches(self, *args, **kwargs): """ Convert Table to list of (contiguous) `RecordBatch` objects. Args: max_chunksize (`int`, defaults to `None`): Maximum size for `RecordBatch` chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `List[pyarrow.RecordBatch]` """ return self.table.to_batches(*args, **kwargs) def to_pydict(self, *args, **kwargs): """ Convert the Table to a `dict` or `OrderedDict`. Returns: `dict` """ return self.table.to_pydict(*args, **kwargs) def to_pylist(self, *args, **kwargs): """ Convert the Table to a list Returns: `list` """ try: return self.table.to_pylist(*args, **kwargs) except AttributeError: # pyarrow <7 does not have to_pylist, so we use to_pydict pydict = self.table.to_pydict(*args, **kwargs) return [{k: pydict[k][i] for k in pydict} for i in range(len(self.table))] def to_pandas(self, *args, **kwargs): """ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate. Args: memory_pool (`MemoryPool`, defaults to `None`): Arrow MemoryPool to use for allocations. Uses the default memory pool is not passed. strings_to_categorical (`bool`, defaults to `False`): Encode string (UTF8) and binary types to `pandas.Categorical`. categories (`list`, defaults to `empty`): List of fields that should be returned as `pandas.Categorical`. Only applies to table-like data structures. zero_copy_only (`bool`, defaults to `False`): Raise an `ArrowException` if this function call would require copying the underlying data. integer_object_nulls (`bool`, defaults to `False`): Cast integers with nulls to objects. date_as_object (`bool`, defaults to `True`): Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype. timestamp_as_object (`bool`, defaults to `False`): Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is useful if you have timestamps that don't fit in the normal date range of nanosecond timestamps (1678 CE-2262 CE). If `False`, all timestamps are converted to `datetime64[ns]` dtype. use_threads (`bool`, defaults to `True`): Whether to parallelize the conversion using multiple threads. deduplicate_objects (`bool`, defaults to `False`): Do not create multiple copies Python objects when created, to save on memory use. Conversion will be slower. ignore_metadata (`bool`, defaults to `False`): If `True`, do not use the 'pandas' metadata to reconstruct the DataFrame index, if present. safe (`bool`, defaults to `True`): For certain data types, a cast is needed in order to store the data in a pandas DataFrame or Series (e.g. timestamps are always stored as nanoseconds in pandas). This option controls whether it is a safe cast or not. split_blocks (`bool`, defaults to `False`): If `True`, generate one internal "block" for each column when creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this can temporarily reduce memory note that various pandas operations can trigger "consolidation" which may balloon memory use. self_destruct (`bool`, defaults to `False`): EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow memory while converting the Arrow object to pandas. If you use the object after calling `to_pandas` with this option it will crash your program. types_mapper (`function`, defaults to `None`): A function mapping a pyarrow DataType to a pandas `ExtensionDtype`. This can be used to override the default pandas type for conversion of built-in pyarrow types or in absence of `pandas_metadata` in the Table schema. The function receives a pyarrow DataType and is expected to return a pandas `ExtensionDtype` or `None` if the default conversion should be used for that type. If you have a dictionary mapping, you can pass `dict.get` as function. Returns: `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object """ return self.table.to_pandas(*args, **kwargs) def to_string(self, *args, **kwargs): return self.table.to_string(*args, **kwargs) def to_reader(self, max_chunksize: Optional[int] = None): """ Convert the Table to a RecordBatchReader. Note that this method is zero-copy, it merely exposes the same data under a different API. Args: max_chunksize (`int`, defaults to `None`) Maximum size for RecordBatch chunks. Individual chunks may be smaller depending on the chunk layout of individual columns. Returns: `pyarrow.RecordBatchReader` """ return self.table.to_reader(max_chunksize=max_chunksize) def field(self, *args, **kwargs): """ Select a schema field by its column name or numeric index. Args: i (`Union[int, str]`): The index or name of the field to retrieve. Returns: `pyarrow.Field` """ return self.table.field(*args, **kwargs) def column(self, *args, **kwargs): """ Select a column by its column name, or numeric index. Args: i (`Union[int, str]`): The index or name of the column to retrieve. Returns: `pyarrow.ChunkedArray` """ return self.table.column(*args, **kwargs) def itercolumns(self, *args, **kwargs): """ Iterator over all columns in their numerical order. Yields: `pyarrow.ChunkedArray` """ return self.table.itercolumns(*args, **kwargs) @property def schema(self): """ Schema of the table and its columns. Returns: `pyarrow.Schema` """ return self.table.schema @property def columns(self): """ List of all columns in numerical order. Returns: `List[pa.ChunkedArray]` """ return self.table.columns @property def num_columns(self): """ Number of columns in this table. Returns: int """ return self.table.num_columns @property def num_rows(self): """ Number of rows in this table. Due to the definition of a table, all columns have the same number of rows. Returns: int """ return self.table.num_rows @property def shape(self): """ Dimensions of the table: (#rows, #columns). Returns: `(int, int)`: Number of rows and number of columns. """ return self.table.shape @property def nbytes(self): """ Total number of bytes consumed by the elements of the table. """ return self.table.nbytes @property def column_names(self): """ Names of the table's columns. """ return self.table.column_names def __eq__(self, other): return self.equals(other) def __getitem__(self, i): return self.table[i] def __len__(self): return len(self.table) def __repr__(self): return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__) def __str__(self): return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__) def slice(self, *args, **kwargs): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ raise NotImplementedError() def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ raise NotImplementedError() def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ raise NotImplementedError() def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ raise NotImplementedError() def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ raise NotImplementedError() def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ raise NotImplementedError() def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ raise NotImplementedError() def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ raise NotImplementedError() def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: `datasets.table.Table`: table with only a subset of the columns """ raise NotImplementedError() class TableBlock(Table): """ `TableBlock` is the allowed class inside a `ConcanetationTable`. Only `MemoryMappedTable` and `InMemoryTable` are `TableBlock`. This is because we don't want a `ConcanetationTable` made out of other `ConcanetationTables`. """ pass class InMemoryTable(TableBlock): """ The table is said in-memory when it is loaded into the user's RAM. Pickling it does copy all the data using memory. Its implementation is simple and uses the underlying pyarrow Table methods directly. This is different from the `MemoryMapped` table, for which pickling doesn't copy all the data in memory. For a `MemoryMapped`, unpickling instead reloads the table from the disk. `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for data bigger than memory or when you want the memory footprint of your application to stay low. """ @classmethod def from_file(cls, filename: str): table = _in_memory_arrow_table_from_file(filename) return cls(table) @classmethod def from_buffer(cls, buffer: pa.Buffer): table = _in_memory_arrow_table_from_buffer(buffer) return cls(table) @classmethod def from_pandas(cls, *args, **kwargs): """ Convert pandas.DataFrame to an Arrow Table. The column types in the resulting Arrow Table are inferred from the dtypes of the pandas.Series in the DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the case of `object`, we need to guess the datatype by looking at the Python objects in this Series. Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only contains `None/nan` objects, the type is set to null. This behavior can be avoided by constructing an explicit schema and passing it to this function. Args: df (`pandas.DataFrame`): schema (`pyarrow.Schema`, *optional*): The expected schema of the Arrow Table. This can be used to indicate the type of columns if we cannot infer it automatically. If passed, the output will have exactly this schema. Columns specified in the schema that are not found in the DataFrame columns or its index will raise an error. Additional columns or index levels in the DataFrame which are not specified in the schema will be ignored. preserve_index (`bool`, *optional*): Whether to store the index as an additional column in the resulting `Table`. The default of None will store the index as a column, except for RangeIndex which is stored as metadata only. Use `preserve_index=True` to force it to be stored as a column. nthreads (`int`, defaults to `None` (may use up to system CPU count threads)) If greater than 1, convert columns to Arrow in parallel using indicated number of threads. columns (`List[str]`, *optional*): List of column to be converted. If `None`, use all columns. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions, Returns: `datasets.table.Table`: Examples: ```python >>> import pandas as pd >>> import pyarrow as pa >>> df = pd.DataFrame({ ... 'int': [1, 2], ... 'str': ['a', 'b'] ... }) >>> pa.Table.from_pandas(df) <pyarrow.lib.Table object at 0x7f05d1fb1b40> ``` """ return cls(pa.Table.from_pandas(*args, **kwargs)) @classmethod def from_arrays(cls, *args, **kwargs): """ Construct a Table from Arrow arrays. Args: arrays (`List[Union[pyarrow.Array, pyarrow.ChunkedArray]]`): Equal-length arrays that should form the table. names (`List[str]`, *optional*): Names for the table columns. If not passed, schema must be passed. schema (`Schema`, defaults to `None`): Schema for the created table. If not passed, names must be passed. metadata (`Union[dict, Mapping]`, defaults to `None`): Optional metadata for the schema (if inferred). Returns: `datasets.table.Table` """ return cls(pa.Table.from_arrays(*args, **kwargs)) @classmethod def from_pydict(cls, *args, **kwargs): """ Construct a Table from Arrow arrays or columns. Args: mapping (`Union[dict, Mapping]`): A mapping of strings to Arrays or Python lists. schema (`Schema`, defaults to `None`): If not passed, will be inferred from the Mapping values metadata (`Union[dict, Mapping]`, defaults to `None`): Optional metadata for the schema (if inferred). Returns: `datasets.table.Table` """ return cls(pa.Table.from_pydict(*args, **kwargs)) @classmethod def from_pylist(cls, mapping, *args, **kwargs): """ Construct a Table from list of rows / dictionaries. Args: mapping (`List[dict]`): A mapping of strings to row values. schema (`Schema`, defaults to `None`): If not passed, will be inferred from the Mapping values metadata (`Union[dict, Mapping]`, defaults to `None`): Optional metadata for the schema (if inferred). Returns: `datasets.table.Table` """ return cls(pa.Table.from_pylist(mapping, *args, **kwargs)) @classmethod def from_batches(cls, *args, **kwargs): """ Construct a Table from a sequence or iterator of Arrow `RecordBatches`. Args: batches (`Union[Sequence[pyarrow.RecordBatch], Iterator[pyarrow.RecordBatch]]`): Sequence of `RecordBatch` to be converted, all schemas must be equal. schema (`Schema`, defaults to `None`): If not passed, will be inferred from the first `RecordBatch`. Returns: `datasets.table.Table`: """ return cls(pa.Table.from_batches(*args, **kwargs)) def slice(self, offset=0, length=None): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ # Use fast slicing here return InMemoryTable(self.fast_slice(offset=offset, length=length)) def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ return InMemoryTable(self.table.filter(*args, **kwargs)) def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ return InMemoryTable(table_flatten(self.table, *args, **kwargs)) def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ return InMemoryTable(self.table.combine_chunks(*args, **kwargs)) def cast(self, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ return InMemoryTable(table_cast(self.table, *args, **kwargs)) def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be `None`, which deletes any existing metadata). Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ return InMemoryTable(self.table.replace_schema_metadata(*args, **kwargs)) def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ return InMemoryTable(self.table.add_column(*args, **kwargs)) def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ return InMemoryTable(self.table.append_column(*args, **kwargs)) def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ return InMemoryTable(self.table.remove_column(*args, **kwargs)) def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ return InMemoryTable(self.table.set_column(*args, **kwargs)) def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ return InMemoryTable(self.table.rename_columns(*args, **kwargs)) def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ return InMemoryTable(self.table.drop(*args, **kwargs)) def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. """ return InMemoryTable(self.table.select(*args, **kwargs)) # The MemoryMappedTable needs replays to properly reload tables from the disk Replay = Tuple[str, tuple, dict] class MemoryMappedTable(TableBlock): """ The table is said memory mapped when it doesn't use the user's RAM but loads the data from the disk instead. Pickling it doesn't copy the data into memory. Instead, only the path to the memory mapped arrow file is pickled, as well as the list of transforms to "replay" when reloading the table from the disk. Its implementation requires to store an history of all the transforms that were applied to the underlying pyarrow Table, so that they can be "replayed" when reloading the Table from the disk. This is different from the `InMemoryTable` table, for which pickling does copy all the data in memory. `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for data bigger than memory or when you want the memory footprint of your application to stay low. """ def __init__(self, table: pa.Table, path: str, replays: Optional[List[Replay]] = None): super().__init__(table) self.path = os.path.abspath(path) self.replays: List[Replay] = replays if replays is not None else [] @classmethod def from_file(cls, filename: str, replays=None): table = _memory_mapped_arrow_table_from_file(filename) table = cls._apply_replays(table, replays) return cls(table, filename, replays) def __getstate__(self): return {"path": self.path, "replays": self.replays} def __setstate__(self, state): path = state["path"] replays = state["replays"] table = _memory_mapped_arrow_table_from_file(path) table = self._apply_replays(table, replays) MemoryMappedTable.__init__(self, table, path=path, replays=replays) @staticmethod def _apply_replays(table: pa.Table, replays: Optional[List[Replay]] = None) -> pa.Table: if replays is not None: for name, args, kwargs in replays: if name == "cast": table = table_cast(table, *args, **kwargs) elif name == "flatten": table = table_flatten(table, *args, **kwargs) else: table = getattr(table, name)(*args, **kwargs) return table def _append_replay(self, replay: Replay) -> List[Replay]: replays = copy.deepcopy(self.replays) replays.append(replay) return replays def slice(self, offset=0, length=None): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ replay = ("slice", (offset, length), {}) replays = self._append_replay(replay) # Use fast slicing here return MemoryMappedTable(self.fast_slice(offset=offset, length=length), self.path, replays) def filter(self, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ replay = ("filter", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.filter(*args, **kwargs), self.path, replays) def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ replay = ("flatten", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(table_flatten(self.table, *args, **kwargs), self.path, replays) def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the ChunkedArray of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ replay = ("combine_chunks", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.combine_chunks(*args, **kwargs), self.path, replays) def cast(self, *args, **kwargs): """ Cast table values to another schema Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ replay = ("cast", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(table_cast(self.table, *args, **kwargs), self.path, replays) def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be None, which deletes any existing metadata. Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ replay = ("replace_schema_metadata", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.replace_schema_metadata(*args, **kwargs), self.path, replays) def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ replay = ("add_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.add_column(*args, **kwargs), self.path, replays) def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ replay = ("append_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.append_column(*args, **kwargs), self.path, replays) def remove_column(self, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ replay = ("remove_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.remove_column(*args, **kwargs), self.path, replays) def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ replay = ("set_column", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.set_column(*args, **kwargs), self.path, replays) def rename_columns(self, *args, **kwargs): """ Create new table with columns renamed to provided names. """ replay = ("rename_columns", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.rename_columns(*args, **kwargs), self.path, replays) def drop(self, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ replay = ("drop", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.drop(*args, **kwargs), self.path, replays) def select(self, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. """ replay = ("select", copy.deepcopy(args), copy.deepcopy(kwargs)) replays = self._append_replay(replay) return MemoryMappedTable(self.table.select(*args, **kwargs), self.path, replays) # A ConcatenationTable is the concatenation of several tables. # The ``blocks`` attributes stores a list of list of blocks. # The first axis concatenates the tables along the axis 0 (it appends rows), # while the second axis concatenates tables along the axis 1 (it appends columns). TableBlockContainer = TypeVar("TableBlockContainer", TableBlock, List[TableBlock], List[List[TableBlock]]) class ConcatenationTable(Table): """ The table comes from the concatenation of several tables called blocks. It enables concatenation on both axis 0 (append rows) and axis 1 (append columns). The underlying tables are called "blocks" and can be either `InMemoryTable` or `MemoryMappedTable` objects. This allows to combine tables that come from memory or that are memory mapped. When a `ConcatenationTable` is pickled, then each block is pickled: - the `InMemoryTable` objects are pickled by copying all the data in memory. - the MemoryMappedTable objects are pickled without copying the data into memory. Instead, only the path to the memory mapped arrow file is pickled, as well as the list of transforms to "replays" when reloading the table from the disk. Its implementation requires to store each block separately. The `blocks` attributes stores a list of list of blocks. The first axis concatenates the tables along the axis 0 (it appends rows), while the second axis concatenates tables along the axis 1 (it appends columns). If some columns are missing when concatenating on axis 0, they are filled with null values. This is done using `pyarrow.concat_tables(tables, promote=True)`. You can access the fully combined table by accessing the `ConcatenationTable.table` attribute, and the blocks by accessing the `ConcatenationTable.blocks` attribute. """ def __init__(self, table: pa.Table, blocks: List[List[TableBlock]]): super().__init__(table) self.blocks = blocks # Check that all the blocks have the right type. # Only InMemoryTable and MemoryMappedTable are allowed. for subtables in blocks: for subtable in subtables: if not isinstance(subtable, TableBlock): raise TypeError( "The blocks of a ConcatenationTable must be InMemoryTable or MemoryMappedTable objects" f", but got {subtable}." ) def __getstate__(self): return {"blocks": self.blocks} def __setstate__(self, state): blocks = state["blocks"] table = self._concat_blocks_horizontally_and_vertically(blocks) ConcatenationTable.__init__(self, table, blocks=blocks) @staticmethod def _concat_blocks(blocks: List[Union[TableBlock, pa.Table]], axis: int = 0) -> pa.Table: pa_tables = [table.table if hasattr(table, "table") else table for table in blocks] if axis == 0: # we set promote=True to fill missing columns with null values if config.PYARROW_VERSION.major < 14: return pa.concat_tables(pa_tables, promote=True) else: return pa.concat_tables(pa_tables, promote_options="default") elif axis == 1: for i, table in enumerate(pa_tables): if i == 0: pa_table = table else: for name, col in zip(table.column_names, table.columns): pa_table = pa_table.append_column(name, col) return pa_table else: raise ValueError("'axis' must be either 0 or 1") @classmethod def _concat_blocks_horizontally_and_vertically(cls, blocks: List[List[TableBlock]]) -> pa.Table: pa_tables_to_concat_vertically = [] for i, tables in enumerate(blocks): if not tables: continue pa_table_horizontally_concatenated = cls._concat_blocks(tables, axis=1) pa_tables_to_concat_vertically.append(pa_table_horizontally_concatenated) return cls._concat_blocks(pa_tables_to_concat_vertically, axis=0) @classmethod def _merge_blocks(cls, blocks: TableBlockContainer, axis: Optional[int] = None) -> TableBlockContainer: if axis is not None: merged_blocks = [] for is_in_memory, block_group in groupby(blocks, key=lambda x: isinstance(x, InMemoryTable)): if is_in_memory: block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))] merged_blocks += list(block_group) else: # both merged_blocks = [cls._merge_blocks(row_block, axis=1) for row_block in blocks] if all(len(row_block) == 1 for row_block in merged_blocks): merged_blocks = cls._merge_blocks( [block for row_block in merged_blocks for block in row_block], axis=0 ) return merged_blocks @classmethod def _consolidate_blocks(cls, blocks: TableBlockContainer) -> TableBlockContainer: if isinstance(blocks, TableBlock): return blocks elif isinstance(blocks[0], TableBlock): return cls._merge_blocks(blocks, axis=0) else: return cls._merge_blocks(blocks) @classmethod def from_blocks(cls, blocks: TableBlockContainer) -> "ConcatenationTable": blocks = cls._consolidate_blocks(blocks) if isinstance(blocks, TableBlock): table = blocks return cls(table.table, [[table]]) elif isinstance(blocks[0], TableBlock): table = cls._concat_blocks(blocks, axis=0) blocks = [[t] for t in blocks] return cls(table, blocks) else: table = cls._concat_blocks_horizontally_and_vertically(blocks) return cls(table, blocks) @classmethod def from_tables(cls, tables: List[Union[pa.Table, Table]], axis: int = 0) -> "ConcatenationTable": """Create `ConcatenationTable` from list of tables. Args: tables (list of `Table` or list of `pyarrow.Table`): List of tables. axis (`{0, 1}`, defaults to `0`, meaning over rows): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> """ def to_blocks(table: Union[pa.Table, Table]) -> List[List[TableBlock]]: if isinstance(table, pa.Table): return [[InMemoryTable(table)]] elif isinstance(table, ConcatenationTable): return copy.deepcopy(table.blocks) else: return [[table]] def _slice_row_block(row_block: List[TableBlock], length: int) -> Tuple[List[TableBlock], List[TableBlock]]: sliced = [table.slice(0, length) for table in row_block] remainder = [table.slice(length, len(row_block[0]) - length) for table in row_block] return sliced, remainder def _split_both_like( result: List[List[TableBlock]], blocks: List[List[TableBlock]] ) -> Tuple[List[List[TableBlock]], List[List[TableBlock]]]: """ Make sure each row_block contain the same num_rows to be able to concatenate them on axis=1. To do so, we modify both blocks sets to have the same row_blocks boundaries. For example, if `result` has 2 row_blocks of 3 rows and `blocks` has 3 row_blocks of 2 rows, we modify both to have 4 row_blocks of size 2, 1, 1 and 2: [ x x x | x x x ] + [ y y | y y | y y ] ----------------------------- = [ x x | x | x | x x ] [ y y | y | y | y y ] """ result, blocks = list(result), list(blocks) new_result, new_blocks = [], [] while result and blocks: # we slice the longest row block to save two row blocks of same length # and we replace the long row block by its remainder if necessary if len(result[0][0]) > len(blocks[0][0]): new_blocks.append(blocks[0]) sliced, result[0] = _slice_row_block(result[0], len(blocks.pop(0)[0])) new_result.append(sliced) elif len(result[0][0]) < len(blocks[0][0]): new_result.append(result[0]) sliced, blocks[0] = _slice_row_block(blocks[0], len(result.pop(0)[0])) new_blocks.append(sliced) else: new_result.append(result.pop(0)) new_blocks.append(blocks.pop(0)) if result or blocks: raise ValueError("Failed to concatenate on axis=1 because tables don't have the same number of rows") return new_result, new_blocks def _extend_blocks( result: List[List[TableBlock]], blocks: List[List[TableBlock]], axis: int = 0 ) -> List[List[TableBlock]]: if axis == 0: result.extend(blocks) elif axis == 1: # We make sure each row_block have the same num_rows result, blocks = _split_both_like(result, blocks) for i, row_block in enumerate(blocks): result[i].extend(row_block) return result blocks = to_blocks(tables[0]) for table in tables[1:]: table_blocks = to_blocks(table) blocks = _extend_blocks(blocks, table_blocks, axis=axis) return cls.from_blocks(blocks) @property def _slices(self): offset = 0 for tables in self.blocks: length = len(tables[0]) yield (offset, length) offset += length def slice(self, offset=0, length=None): """ Compute zero-copy slice of this Table. Args: offset (`int`, defaults to `0`): Offset from start of table to slice. length (`int`, defaults to `None`): Length of slice (default is until end of table starting from offset). Returns: `datasets.table.Table` """ table = self.table.slice(offset, length=length) length = length if length is not None else self.num_rows - offset blocks = [] for tables in self.blocks: n_rows = len(tables[0]) if length == 0: break elif n_rows <= offset: offset = offset - n_rows elif n_rows <= offset + length: blocks.append([t.slice(offset) for t in tables]) length, offset = length + offset - n_rows, 0 else: blocks.append([t.slice(offset, length) for t in tables]) length, offset = 0, 0 return ConcatenationTable(table, blocks) def filter(self, mask, *args, **kwargs): """ Select records from a Table. See `pyarrow.compute.filter` for full usage. """ table = self.table.filter(mask, *args, **kwargs) blocks = [] for (offset, length), tables in zip(self._slices, self.blocks): submask = mask.slice(offset, length) blocks.append([t.filter(submask, *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def flatten(self, *args, **kwargs): """ Flatten this Table. Each column with a struct type is flattened into one column per struct field. Other columns are left unchanged. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ table = table_flatten(self.table, *args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.flatten(*args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def combine_chunks(self, *args, **kwargs): """ Make a new table by combining the chunks this table has. All the underlying chunks in the `ChunkedArray` of each column are concatenated into zero or one chunk. Args: memory_pool (`MemoryPool`, defaults to `None`): For memory allocations, if required, otherwise use default pool. Returns: `datasets.table.Table` """ table = self.table.combine_chunks(*args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.combine_chunks(*args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def cast(self, target_schema, *args, **kwargs): """ Cast table values to another schema. Args: target_schema (`Schema`): Schema to cast to, the names and order of fields must match. safe (`bool`, defaults to `True`): Check for overflows or other unsafe conversions. Returns: `datasets.table.Table` """ from .features import Features table = table_cast(self.table, target_schema, *args, **kwargs) target_features = Features.from_arrow_schema(target_schema) blocks = [] for subtables in self.blocks: new_tables = [] fields = list(target_schema) for subtable in subtables: subfields = [] for name in subtable.column_names: subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name))) subfeatures = Features({subfield.name: target_features[subfield.name] for subfield in subfields}) subschema = subfeatures.arrow_schema new_tables.append(subtable.cast(subschema, *args, **kwargs)) blocks.append(new_tables) return ConcatenationTable(table, blocks) def replace_schema_metadata(self, *args, **kwargs): """ EXPERIMENTAL: Create shallow copy of table by replacing schema key-value metadata with the indicated new metadata (which may be `None`, which deletes any existing metadata). Args: metadata (`dict`, defaults to `None`): Returns: `datasets.table.Table`: shallow_copy """ table = self.table.replace_schema_metadata(*args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.replace_schema_metadata(*args, **kwargs) for t in tables]) return ConcatenationTable(table, self.blocks) def add_column(self, *args, **kwargs): """ Add column to Table at position. A new table is returned with the column added, the original table object is left unchanged. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def append_column(self, *args, **kwargs): """ Append column at end of columns. Args: field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column added. """ raise NotImplementedError() def remove_column(self, i, *args, **kwargs): """ Create new Table with the indicated column removed. Args: i (`int`): Index of column to remove. Returns: `datasets.table.Table`: New table without the column. """ table = self.table.remove_column(i, *args, **kwargs) name = self.table.column_names[i] blocks = [] for tables in self.blocks: blocks.append( [ t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t for t in tables ] ) return ConcatenationTable(table, blocks) def set_column(self, *args, **kwargs): """ Replace column in Table at position. Args: i (`int`): Index to place the column at. field_ (`Union[str, pyarrow.Field]`): If a string is passed then the type is deduced from the column data. column (`Union[pyarrow.Array, List[pyarrow.Array]]`): Column data. Returns: `datasets.table.Table`: New table with the passed column set. """ raise NotImplementedError() def rename_columns(self, names, *args, **kwargs): """ Create new table with columns renamed to provided names. """ table = self.table.rename_columns(names, *args, **kwargs) names = dict(zip(self.table.column_names, names)) blocks = [] for tables in self.blocks: blocks.append( [t.rename_columns([names[name] for name in t.column_names], *args, **kwargs) for t in tables] ) return ConcatenationTable(table, blocks) def drop(self, columns, *args, **kwargs): """ Drop one or more columns and return a new table. Args: columns (`List[str]`): List of field names referencing existing columns. Raises: `KeyError` : if any of the passed columns name are not existing. Returns: `datasets.table.Table`: New table without the columns. """ table = self.table.drop(columns, *args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.drop([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def select(self, columns, *args, **kwargs): """ Select columns of the table. Returns a new table with the specified columns, and metadata preserved. Args: columns (:obj:`Union[List[str], List[int]]`): The column names or integer indices to select. Returns: :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved. """ table = self.table.select(columns, *args, **kwargs) blocks = [] for tables in self.blocks: blocks.append([t.select([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables]) return ConcatenationTable(table, blocks) def concat_tables(tables: List[Table], axis: int = 0) -> Table: """ Concatenate tables. Args: tables (list of `Table`): List of tables to be concatenated. axis (`{0, 1}`, defaults to `0`, meaning over rows): Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns (horizontally). <Added version="1.6.0"/> Returns: `datasets.table.Table`: If the number of input tables is > 1, then the returned table is a `datasets.table.ConcatenationTable`. Otherwise if there's only one table, it is returned as is. """ tables = list(tables) if len(tables) == 1: return tables[0] return ConcatenationTable.from_tables(tables, axis=axis) def list_table_cache_files(table: Table) -> List[str]: """ Get the cache files that are loaded by the table. Cache file are used when parts of the table come from the disk via memory mapping. Returns: `List[str]`: A list of paths to the cache files loaded by the table. """ if isinstance(table, ConcatenationTable): cache_files = [] for subtables in table.blocks: for subtable in subtables: cache_files += list_table_cache_files(subtable) return cache_files elif isinstance(table, MemoryMappedTable): return [table.path] else: return [] def _wrap_for_chunked_arrays(func): """Apply the function on each chunk of a `pyarrow.ChunkedArray`, or on the array directly""" def wrapper(array, *args, **kwargs): if isinstance(array, pa.ChunkedArray): return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks]) else: return func(array, *args, **kwargs) return wrapper def _is_extension_type(pa_type: pa.DataType) -> bool: """ Check (recursively) if a pyarrow type is an extension type. """ if isinstance(pa_type, pa.StructType): return any(_is_extension_type(field.type) for field in pa_type) elif isinstance(pa_type, (pa.ListType, pa.FixedSizeListType, pa.LargeListType)): return _is_extension_type(pa_type.value_type) elif isinstance(pa_type, pa.ExtensionType): return True else: return False def array_concat(arrays: List[pa.Array]): """Improved version of pa.concat_arrays It supports concatenating pa.ExtensionArray objects by concatenating the underlying storages. Args: arrays (List[pa.Array]): List of arrays to contatenate Raises: pa.ArrowInvalid: if the arrow array concatenation fails ValueError: if the list of arrays is empty TypeError: if the arrays to be concatenated have different types Returns: array (:obj:`pyarrow.Array`): the concatenated array """ arrays = list(arrays) array_types = {array.type for array in arrays} if not array_types: raise ValueError("Couldn't concatenate empty list of arrays") if len(array_types) > 1: array_types = list(array_types) raise TypeError(f"Couldn't concatenate arrays with different types {array_types[0]} and {array_types[1]}") array_type = arrays[0].type arrays = [chunk for arr in arrays for chunk in (arr.chunks if isinstance(arr, pa.ChunkedArray) else (arr,))] if not _is_extension_type(array_type): return pa.concat_arrays(arrays) def _offsets_concat(offsets): offset = offsets[0] concatenated_offsets = offset for offset in offsets[1:]: offset = pc.subtract(offset, offset[0]) offset = pc.add(offset[1:], concatenated_offsets[-1]) concatenated_offsets = pa.concat_arrays([concatenated_offsets, offset]) return concatenated_offsets def _concat_arrays(arrays): array_type = arrays[0].type if isinstance(array_type, pa.ExtensionType): return array_type.wrap_array(_concat_arrays([array.storage for array in arrays])) elif pa.types.is_struct(array_type): return pa.StructArray.from_arrays( [_concat_arrays([array.field(field.name) for array in arrays]) for field in array_type], fields=list(array_type), mask=pa.concat_arrays([array.is_null() for array in arrays]), ) elif pa.types.is_list(array_type): if any(array.null_count > 0 for array in arrays): if config.PYARROW_VERSION.major < 10: warnings.warn( "None values are converted to empty lists in `pyarrow<10.0.0` when concatenating list arrays with None values. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676." ) else: return pa.ListArray.from_arrays( _offsets_concat([array.offsets for array in arrays]), _concat_arrays([array.values for array in arrays]), mask=pa.concat_arrays([array.is_null() for array in arrays]), ) return pa.ListArray.from_arrays( _offsets_concat([array.offsets for array in arrays]), _concat_arrays([array.values for array in arrays]), ) elif pa.types.is_fixed_size_list(array_type): if config.PYARROW_VERSION.major < 15: # PyArrow bug: https://github.com/apache/arrow/issues/35360 return pa.FixedSizeListArray.from_arrays( _concat_arrays([array.values[array.offset * array.type.list_size :] for array in arrays]), array_type.list_size, ) else: return pa.FixedSizeListArray.from_arrays( _concat_arrays([array.values for array in arrays]), array_type.value_type, array_type.list_size, ) return pa.concat_arrays(arrays) return _concat_arrays(arrays) @_wrap_for_chunked_arrays def array_cast(array: pa.Array, pa_type: pa.DataType, allow_number_to_str=True): """Improved version of `pa.Array.cast` It supports casting `pa.StructArray` objects to re-order the fields. It also let you control certain aspects of the casting, e.g. whether to disable numbers (`floats` or `ints`) to strings. Args: array (`pa.Array`): PyArrow array to cast pa_type (`pa.DataType`): Target PyArrow type allow_number_to_str (`bool`, defaults to `True`): Whether to allow casting numbers to strings. Defaults to `True`. Raises: `pa.ArrowInvalidError`: if the arrow data casting fails `TypeError`: if the target type is not supported according, e.g. - if a field is missing - if casting from numbers to strings and `allow_number_to_str` is `False` Returns: `List[pyarrow.Array]`: the casted array """ _c = partial(array_cast, allow_number_to_str=allow_number_to_str) if isinstance(array, pa.ExtensionArray): array = array.storage if isinstance(pa_type, pa.ExtensionType): return pa_type.wrap_array(_c(array, pa_type.storage_type)) elif array.type == pa_type: return array elif pa.types.is_struct(array.type): if pa.types.is_struct(pa_type) and ({field.name for field in pa_type} == {field.name for field in array.type}): if array.type.num_fields == 0: return array arrays = [_c(array.field(field.name), field.type) for field in pa_type] return pa.StructArray.from_arrays(arrays, fields=list(pa_type), mask=array.is_null()) elif pa.types.is_list(array.type): if pa.types.is_fixed_size_list(pa_type): if pa_type.list_size * len(array) == len(array.values): return pa.FixedSizeListArray.from_arrays( _c(array.values, pa_type.value_type), pa_type.list_size, ) elif pa.types.is_list(pa_type): if array.null_count > 0: if config.PYARROW_VERSION.major < 10: warnings.warn( f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {pa_type}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676." ) else: return pa.ListArray.from_arrays( array.offsets, _c(array.values, pa_type.value_type), mask=array.is_null() ) return pa.ListArray.from_arrays(array.offsets, _c(array.values, pa_type.value_type)) elif pa.types.is_fixed_size_list(array.type): array_values = array.values if config.PYARROW_VERSION.major < 15: # PyArrow bug: https://github.com/apache/arrow/issues/35360 array_values = array.values[array.offset * array.type.list_size :] if pa.types.is_fixed_size_list(pa_type): return pa.FixedSizeListArray.from_arrays( _c(array_values, pa_type.value_type), pa_type.list_size, ) elif pa.types.is_list(pa_type): offsets_arr = pa.array(np.arange(len(array) + 1) * array.type.list_size, pa.int32()) if array.null_count > 0: if config.PYARROW_VERSION.major < 10: warnings.warn( f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {pa_type}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676." ) else: return pa.ListArray.from_arrays( offsets_arr, _c(array_values, pa_type.value_type), mask=array.is_null() ) return pa.ListArray.from_arrays(offsets_arr, _c(array_values, pa_type.value_type)) else: if ( not allow_number_to_str and pa.types.is_string(pa_type) and (pa.types.is_floating(array.type) or pa.types.is_integer(array.type)) ): raise TypeError( f"Couldn't cast array of type {array.type} to {pa_type} since allow_number_to_str is set to {allow_number_to_str}" ) if pa.types.is_null(pa_type) and not pa.types.is_null(array.type): raise TypeError(f"Couldn't cast array of type {array.type} to {pa_type}") return array.cast(pa_type) raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{pa_type}") @_wrap_for_chunked_arrays def cast_array_to_feature(array: pa.Array, feature: "FeatureType", allow_number_to_str=True): """Cast an array to the arrow type that corresponds to the requested feature type. For custom features like [`Audio`] or [`Image`], it takes into account the "cast_storage" methods they defined to enable casting from other arrow types. Args: array (`pa.Array`): The PyArrow array to cast. feature (`datasets.features.FeatureType`): The target feature type. allow_number_to_str (`bool`, defaults to `True`): Whether to allow casting numbers to strings. Defaults to `True`. Raises: `pa.ArrowInvalidError`: if the arrow data casting fails `TypeError`: if the target type is not supported according, e.g. - if a field is missing - if casting from numbers to strings and `allow_number_to_str` is `False` Returns: array (`pyarrow.Array`): the casted array """ from .features.features import Sequence, get_nested_type _c = partial(cast_array_to_feature, allow_number_to_str=allow_number_to_str) if isinstance(array, pa.ExtensionArray): array = array.storage if hasattr(feature, "cast_storage"): return feature.cast_storage(array) elif pa.types.is_struct(array.type): # feature must be a dict or Sequence(subfeatures_dict) if isinstance(feature, Sequence) and isinstance(feature.feature, dict): feature = { name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items() } if isinstance(feature, dict) and {field.name for field in array.type} == set(feature): if array.type.num_fields == 0: return array arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()] return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null()) elif pa.types.is_list(array.type): # feature must be either [subfeature] or Sequence(subfeature) if isinstance(feature, list): casted_values = _c(array.values, feature[0]) if casted_values.type == array.values.type: return array else: if array.null_count > 0: if config.PYARROW_VERSION.major < 10: warnings.warn( f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676." ) else: return pa.ListArray.from_arrays(array.offsets, casted_values, mask=array.is_null()) return pa.ListArray.from_arrays(array.offsets, casted_values) elif isinstance(feature, Sequence): if feature.length > -1: if feature.length * len(array) == len(array.values): return pa.FixedSizeListArray.from_arrays(_c(array.values, feature.feature), feature.length) else: casted_values = _c(array.values, feature.feature) if casted_values.type == array.values.type: return array else: if array.null_count > 0: if config.PYARROW_VERSION.major < 10: warnings.warn( f"None values are converted to empty lists in `pyarrow<10.0.0` when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676." ) else: return pa.ListArray.from_arrays( array.offsets, _c(array.values, feature.feature), mask=array.is_null() ) return pa.ListArray.from_arrays(array.offsets, _c(array.values, feature.feature)) elif pa.types.is_fixed_size_list(array.type): # feature must be either [subfeature] or Sequence(subfeature) array_values = array.values if isinstance(feature, list): if array.null_count > 0: if config.PYARROW_VERSION.major < 10: warnings.warn( f"None values are converted to empty lists when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`" ) else: return pa.ListArray.from_arrays(array.offsets, _c(array_values, feature[0]), mask=array.is_null()) return pa.ListArray.from_arrays(array.offsets, _c(array_values, feature[0])) elif isinstance(feature, Sequence): if feature.length > -1: if array.offset and feature.length * len(array) != len(array_values): array_values = array.values[ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size ] if feature.length * len(array) == len(array_values): return pa.FixedSizeListArray.from_arrays(_c(array_values, feature.feature), feature.length) else: offsets_arr = pa.array(np.arange(len(array) + 1) * array.type.list_size, pa.int32()) if array.null_count > 0: if config.PYARROW_VERSION.major < 10: warnings.warn( f"None values are converted to empty lists when converting array to {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`" ) else: return pa.ListArray.from_arrays( offsets_arr, _c(array_values, feature.feature), mask=array.is_null() ) return pa.ListArray.from_arrays(offsets_arr, _c(array_values, feature.feature)) if pa.types.is_null(array.type): return array_cast(array, get_nested_type(feature), allow_number_to_str=allow_number_to_str) elif not isinstance(feature, (Sequence, dict, list, tuple)): return array_cast(array, feature(), allow_number_to_str=allow_number_to_str) raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{feature}") @_wrap_for_chunked_arrays def embed_array_storage(array: pa.Array, feature: "FeatureType"): """Embed data into an arrays's storage. For custom features like Audio or Image, it takes into account the "embed_storage" methods they defined to enable embedding external data (e.g. an image file) into an other arrow types. <Added version="2.4.0"/> Args: array (`pa.Array`): The PyArrow array in which to embed data. feature (`datasets.features.FeatureType`): Array features. Raises: `TypeError`: if the target type is not supported according, e.g. - if a field is missing Returns: array (`pyarrow.Array`): the casted array """ from .features import Sequence _e = embed_array_storage if isinstance(array, pa.ExtensionArray): array = array.storage if hasattr(feature, "embed_storage"): return feature.embed_storage(array) elif pa.types.is_struct(array.type): # feature must be a dict or Sequence(subfeatures_dict) if isinstance(feature, Sequence) and isinstance(feature.feature, dict): feature = { name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items() } if isinstance(feature, dict): arrays = [_e(array.field(name), subfeature) for name, subfeature in feature.items()] return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null()) elif pa.types.is_list(array.type): # feature must be either [subfeature] or Sequence(subfeature) if isinstance(feature, list): if array.null_count > 0: if config.PYARROW_VERSION.major < 10: warnings.warn( f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`" ) else: return pa.ListArray.from_arrays(array.offsets, _e(array.values, feature[0]), mask=array.is_null()) return pa.ListArray.from_arrays(array.offsets, _e(array.values, feature[0])) elif isinstance(feature, Sequence): if feature.length > -1: if feature.length * len(array) == len(array.values): return pa.FixedSizeListArray.from_arrays(_e(array.values, feature.feature), feature.length) else: casted_values = _e(array.values, feature.feature) if casted_values.type == array.values.type: return array else: if array.null_count > 0: if config.PYARROW_VERSION.major < 10: warnings.warn( f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`" ) else: return pa.ListArray.from_arrays( array.offsets, _e(array.values, feature.feature), mask=array.is_null() ) return pa.ListArray.from_arrays(array.offsets, _e(array.values, feature.feature)) elif pa.types.is_fixed_size_list(array.type): # feature must be either [subfeature] or Sequence(subfeature) array_values = array.values if config.PYARROW_VERSION.major < 15: # PyArrow bug: https://github.com/apache/arrow/issues/35360 array_values = array.values[array.offset * array.type.list_size :] if isinstance(feature, list): if array.null_count > 0: if config.PYARROW_VERSION.major < 10: warnings.warn( f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`" ) else: return pa.ListArray.from_arrays(array.offsets, _e(array_values, feature[0]), mask=array.is_null()) return pa.ListArray.from_arrays(array.offsets, _e(array_values, feature[0])) elif isinstance(feature, Sequence): if feature.length > -1: if feature.length * len(array) == len(array_values): return pa.FixedSizeListArray.from_arrays(_e(array_values, feature.feature), feature.length) else: offsets_arr = pa.array(np.arange(len(array) + 1) * array.type.list_size, pa.int32()) if array.null_count > 0: if config.PYARROW_VERSION.major < 10: warnings.warn( f"None values are converted to empty lists when embedding array storage with {feature}. Install `pyarrow>=10.0.0` to avoid this behavior. More info: https://github.com/huggingface/datasets/issues/3676. This will raise an error in a future major version of `datasets`" ) else: return pa.ListArray.from_arrays( offsets_arr, _e(array_values, feature.feature), mask=array.is_null() ) return pa.ListArray.from_arrays(offsets_arr, _e(array_values, feature.feature)) if not isinstance(feature, (Sequence, dict, list, tuple)): return array raise TypeError(f"Couldn't embed array of type\n{array.type}\nwith\n{feature}") class CastError(ValueError): """When it's not possible to cast an Arrow table to a specific schema or set of features""" def __init__(self, *args, table_column_names: List[str], requested_column_names: List[str]) -> None: super().__init__(*args) self.table_column_names = table_column_names self.requested_column_names = requested_column_names def details(self): new_columns = set(self.table_column_names) - set(self.requested_column_names) missing_columns = set(self.requested_column_names) - set(self.table_column_names) if new_columns and missing_columns: return f"there are {len(new_columns)} new columns ({', '.join(new_columns)}) and {len(missing_columns)} missing columns ({', '.join(missing_columns)})." elif new_columns: return f"there are {len(new_columns)} new columns ({new_columns})" else: return f"there are {len(missing_columns)} missing columns ({missing_columns})" def cast_table_to_features(table: pa.Table, features: "Features"): """Cast a table to the arrow schema that corresponds to the requested features. Args: table (`pyarrow.Table`): PyArrow table to cast. features ([`Features`]): Target features. Returns: table (`pyarrow.Table`): the casted table """ if sorted(table.column_names) != sorted(features): raise CastError( f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match", table_column_names=table.column_names, requested_column_names=list(features), ) arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()] return pa.Table.from_arrays(arrays, schema=features.arrow_schema) def cast_table_to_schema(table: pa.Table, schema: pa.Schema): """Cast a table to the arrow schema. Different from `cast_table_to_features`, this method can preserve nullability. Args: table (`pa.Table`): PyArrow table to cast. features ([`Features`]): Target features. Returns: `pa.Table`: the casted table """ from .features import Features features = Features.from_arrow_schema(schema) if sorted(table.column_names) != sorted(features): raise CastError( f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match", table_column_names=table.column_names, requested_column_names=list(features), ) arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()] return pa.Table.from_arrays(arrays, schema=schema) def embed_table_storage(table: pa.Table): """Embed external data into a table's storage. <Added version="2.4.0"/> Args: table (`pyarrow.Table`): PyArrow table in which to embed data. Returns: table (`pyarrow.Table`): the table with embedded data """ from .features.features import Features, require_storage_embed features = Features.from_arrow_schema(table.schema) arrays = [ embed_array_storage(table[name], feature) if require_storage_embed(feature) else table[name] for name, feature in features.items() ] return pa.Table.from_arrays(arrays, schema=features.arrow_schema) def table_cast(table: pa.Table, schema: pa.Schema): """Improved version of `pa.Table.cast`. It supports casting to feature types stored in the schema metadata. Args: table (`pyarrow.Table`): PyArrow table to cast. schema (`pyarrow.Schema`): Target PyArrow schema. Returns: table (`pyarrow.Table`): the casted table """ if table.schema != schema: return cast_table_to_schema(table, schema) elif table.schema.metadata != schema.metadata: return table.replace_schema_metadata(schema.metadata) else: return table def table_flatten(table: pa.Table): """Improved version of `pa.Table.flatten`. It behaves as `pa.Table.flatten` in a sense it does 1-step flatten of the columns with a struct type into one column per struct field, but updates the metadata and skips decodable features unless the `decode` attribute of these features is set to False. Args: table (`pa.Table`): PyArrow table to flatten. Returns: `Table`: the flattened table """ from .features import Features features = Features.from_arrow_schema(table.schema) if any(hasattr(subfeature, "flatten") and subfeature.flatten() == subfeature for subfeature in features.values()): flat_arrays = [] flat_column_names = [] for field in table.schema: array = table.column(field.name) subfeature = features[field.name] if pa.types.is_struct(field.type) and ( not hasattr(subfeature, "flatten") or subfeature.flatten() != subfeature ): flat_arrays.extend(array.flatten()) flat_column_names.extend([f"{field.name}.{subfield.name}" for subfield in field.type]) else: flat_arrays.append(array) flat_column_names.append(field.name) flat_table = pa.Table.from_arrays( flat_arrays, names=flat_column_names, ) else: flat_table = table.flatten() # Preserve complex types in the metadata flat_features = features.flatten(max_depth=2) flat_features = Features({column_name: flat_features[column_name] for column_name in flat_table.column_names}) return flat_table.replace_schema_metadata(flat_features.arrow_schema.metadata) def table_visitor(table: pa.Table, function: Callable[[pa.Array], None]): """Visit all arrays in a table and apply a function to them. Args: table (`pyarrow.Table`): PyArrow table to visit. function (`Callable[[pa.Array], None]`): Function to apply to each array. """ from .features import Features, Sequence features = Features.from_arrow_schema(table.schema) def _visit(array, feature): if isinstance(array, pa.ChunkedArray): for chunk in array.chunks: _visit(chunk, feature) else: if isinstance(array, pa.ExtensionArray): array = array.storage function(array, feature) if pa.types.is_struct(array.type) and not hasattr(feature, "cast_storage"): if isinstance(feature, Sequence) and isinstance(feature.feature, dict): feature = { name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items() } for name, subfeature in feature.items(): _visit(array.field(name), subfeature) elif pa.types.is_list(array.type): if isinstance(feature, list): _visit(array.values, feature[0]) elif isinstance(feature, Sequence): _visit(array.values, feature.feature) for name, feature in features.items(): _visit(table[name], feature) def table_iter(table: Table, batch_size: int, drop_last_batch=False) -> Iterator[pa.Table]: """Iterate over sub-tables of size `batch_size`. Args: table (`pyarrow.Table`): PyArrow table to iterate over. batch_size (`int`): Size of each sub-table to yield. drop_last_batch (`bool`, defaults to `False`): Drop the last batch if it is smaller than `batch_size`. """ chunks_buffer = [] chunks_buffer_size = 0 for chunk in table.to_reader(max_chunksize=batch_size): if len(chunk) == 0: continue elif chunks_buffer_size + len(chunk) < batch_size: chunks_buffer.append(chunk) chunks_buffer_size += len(chunk) continue elif chunks_buffer_size + len(chunk) == batch_size: chunks_buffer.append(chunk) yield pa.Table.from_batches(chunks_buffer) chunks_buffer = [] chunks_buffer_size = 0 else: cropped_chunk_length = batch_size - chunks_buffer_size chunks_buffer.append(chunk.slice(0, cropped_chunk_length)) yield pa.Table.from_batches(chunks_buffer) chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)] chunks_buffer_size = len(chunk) - cropped_chunk_length if not drop_last_batch and chunks_buffer: yield pa.Table.from_batches(chunks_buffer)
datasets/src/datasets/table.py/0
{ "file_path": "datasets/src/datasets/table.py", "repo_id": "datasets", "token_count": 42695 }
70
from typing import Callable def is_documented_by(function_with_docstring: Callable): """Decorator to share docstrings across common functions. Args: function_with_docstring (`Callable`): Name of the function with the docstring. """ def wrapper(target_function): target_function.__doc__ = function_with_docstring.__doc__ return target_function return wrapper
datasets/src/datasets/utils/doc_utils.py/0
{ "file_path": "datasets/src/datasets/utils/doc_utils.py", "repo_id": "datasets", "token_count": 137 }
71
{ "monolingual": "contains a single language", "multilingual": "contains multiple languages", "translation": "contains translated or aligned text", "other": "other type of language distribution" }
datasets/src/datasets/utils/resources/multilingualities.json/0
{ "file_path": "datasets/src/datasets/utils/resources/multilingualities.json", "repo_id": "datasets", "token_count": 55 }
72
# isort: skip_file # This is the module that test_patching.py uses to test patch_submodule() import os # noqa: F401 - this is just for tests import os as renamed_os # noqa: F401 - this is just for tests from os import path # noqa: F401 - this is just for tests from os import path as renamed_path # noqa: F401 - this is just for tests from os.path import join # noqa: F401 - this is just for tests from os.path import join as renamed_join # noqa: F401 - this is just for tests open = open # noqa we just need to have a builtin inside this module to test it properly
datasets/tests/_test_patching.py/0
{ "file_path": "datasets/tests/_test_patching.py", "repo_id": "datasets", "token_count": 175 }
73
import datetime from unittest import TestCase from unittest.mock import patch import numpy as np import pandas as pd import pyarrow as pa import pytest from datasets import Array2D from datasets.arrow_dataset import Dataset from datasets.features import Audio, ClassLabel, Features, Image, Sequence, Value from datasets.features.features import ( _arrow_to_datasets_dtype, _cast_to_python_objects, cast_to_python_objects, encode_nested_example, generate_from_dict, string_to_arrow, ) from datasets.features.translation import Translation, TranslationVariableLanguages from datasets.info import DatasetInfo from datasets.utils.py_utils import asdict from ..utils import require_jax, require_tf, require_torch class FeaturesTest(TestCase): def test_from_arrow_schema_simple(self): data = {"a": [{"b": {"c": "text"}}] * 10, "foo": [1] * 10} original_features = Features({"a": {"b": {"c": Value("string")}}, "foo": Value("int64")}) dset = Dataset.from_dict(data, features=original_features) new_features = dset.features new_dset = Dataset.from_dict(data, features=new_features) self.assertEqual(original_features.type, new_features.type) self.assertDictEqual(dset[0], new_dset[0]) self.assertDictEqual(dset[:], new_dset[:]) def test_from_arrow_schema_with_sequence(self): data = {"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10} original_features = Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}) dset = Dataset.from_dict(data, features=original_features) new_features = dset.features new_dset = Dataset.from_dict(data, features=new_features) self.assertEqual(original_features.type, new_features.type) self.assertDictEqual(dset[0], new_dset[0]) self.assertDictEqual(dset[:], new_dset[:]) def test_string_to_arrow_bijection_for_primitive_types(self): supported_pyarrow_datatypes = [ pa.time32("s"), pa.time64("us"), pa.timestamp("s"), pa.timestamp("ns", tz="America/New_York"), pa.date32(), pa.date64(), pa.duration("s"), pa.decimal128(10, 2), pa.decimal256(40, -3), pa.string(), pa.int32(), pa.float64(), pa.array([datetime.time(1, 1, 1)]).type, # arrow type: DataType(time64[us]) ] for dt in supported_pyarrow_datatypes: self.assertEqual(dt, string_to_arrow(_arrow_to_datasets_dtype(dt))) unsupported_pyarrow_datatypes = [pa.list_(pa.float64())] for dt in unsupported_pyarrow_datatypes: with self.assertRaises(ValueError): string_to_arrow(_arrow_to_datasets_dtype(dt)) supported_datasets_dtypes = [ "time32[s]", "timestamp[ns]", "timestamp[ns, tz=+07:30]", "duration[us]", "decimal128(30, -4)", "int32", "float64", ] for sdt in supported_datasets_dtypes: self.assertEqual(sdt, _arrow_to_datasets_dtype(string_to_arrow(sdt))) unsupported_datasets_dtypes = [ "time32[ns]", "timestamp[blob]", "timestamp[[ns]]", "timestamp[ns, tz=[ns]]", "duration[[us]]", "decimal20(30, -4)", "int", ] for sdt in unsupported_datasets_dtypes: with self.assertRaises(ValueError): string_to_arrow(sdt) def test_feature_named_type(self): """reference: issue #1110""" features = Features({"_type": Value("string")}) ds_info = DatasetInfo(features=features) reloaded_features = Features.from_dict(asdict(ds_info)["features"]) assert features == reloaded_features def test_feature_named_self_as_kwarg(self): """reference: issue #5641""" features = Features(self=Value("string")) ds_info = DatasetInfo(features=features) reloaded_features = Features.from_dict(asdict(ds_info)["features"]) assert features == reloaded_features def test_class_label_feature_with_no_labels(self): """reference: issue #4681""" features = Features({"label": ClassLabel(names=[])}) ds_info = DatasetInfo(features=features) reloaded_features = Features.from_dict(asdict(ds_info)["features"]) assert features == reloaded_features def test_reorder_fields_as(self): features = Features( { "id": Value("string"), "document": { "title": Value("string"), "url": Value("string"), "html": Value("string"), "tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}), }, "question": { "text": Value("string"), "tokens": Sequence(Value("string")), }, "annotations": Sequence( { "id": Value("string"), "long_answer": { "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), "end_byte": Value("int64"), }, "short_answers": Sequence( { "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), "end_byte": Value("int64"), "text": Value("string"), } ), "yes_no_answer": ClassLabel(names=["NO", "YES"]), } ), } ) other = Features( # same but with [] instead of sequences, and with a shuffled fields order { "id": Value("string"), "document": { "tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}), "title": Value("string"), "url": Value("string"), "html": Value("string"), }, "question": { "text": Value("string"), "tokens": [Value("string")], }, "annotations": { "yes_no_answer": [ClassLabel(names=["NO", "YES"])], "id": [Value("string")], "long_answer": [ { "end_byte": Value("int64"), "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), } ], "short_answers": [ Sequence( { "text": Value("string"), "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), "end_byte": Value("int64"), } ) ], }, } ) expected = Features( { "id": Value("string"), "document": { "tokens": Sequence({"token": Value("string"), "is_html": Value("bool")}), "title": Value("string"), "url": Value("string"), "html": Value("string"), }, "question": { "text": Value("string"), "tokens": Sequence(Value("string")), }, "annotations": Sequence( { "yes_no_answer": ClassLabel(names=["NO", "YES"]), "id": Value("string"), "long_answer": { "end_byte": Value("int64"), "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), }, "short_answers": Sequence( { "text": Value("string"), "start_token": Value("int64"), "end_token": Value("int64"), "start_byte": Value("int64"), "end_byte": Value("int64"), } ), } ), } ) reordered_features = features.reorder_fields_as(other) self.assertDictEqual(reordered_features, expected) self.assertEqual(reordered_features.type, other.type) self.assertEqual(reordered_features.type, expected.type) self.assertNotEqual(reordered_features.type, features.type) def test_flatten(self): features = Features({"foo": {"bar1": Value("int32"), "bar2": {"foobar": Value("string")}}}) _features = features.copy() flattened_features = features.flatten() assert flattened_features == {"foo.bar1": Value("int32"), "foo.bar2.foobar": Value("string")} assert features == _features, "calling flatten shouldn't alter the current features" def test_flatten_with_sequence(self): features = Features({"foo": Sequence({"bar": {"my_value": Value("int32")}})}) _features = features.copy() flattened_features = features.flatten() assert flattened_features == {"foo.bar": [{"my_value": Value("int32")}]} assert features == _features, "calling flatten shouldn't alter the current features" def test_features_dicts_are_synced(self): def assert_features_dicts_are_synced(features: Features): assert ( hasattr(features, "_column_requires_decoding") and features.keys() == features._column_requires_decoding.keys() ) features = Features({"foo": Sequence({"bar": {"my_value": Value("int32")}})}) assert_features_dicts_are_synced(features) features["barfoo"] = Image() assert_features_dicts_are_synced(features) del features["barfoo"] assert_features_dicts_are_synced(features) features.update({"foobar": Value("string")}) assert_features_dicts_are_synced(features) features.pop("foobar") assert_features_dicts_are_synced(features) features.popitem() assert_features_dicts_are_synced(features) features.setdefault("xyz", Value("bool")) assert_features_dicts_are_synced(features) features.clear() assert_features_dicts_are_synced(features) def test_classlabel_init(tmp_path_factory): names = ["negative", "positive"] names_file = str(tmp_path_factory.mktemp("features") / "labels.txt") with open(names_file, "w", encoding="utf-8") as f: f.write("\n".join(names)) classlabel = ClassLabel(names=names) assert classlabel.names == names and classlabel.num_classes == len(names) classlabel = ClassLabel(names_file=names_file) assert classlabel.names == names and classlabel.num_classes == len(names) classlabel = ClassLabel(num_classes=len(names), names=names) assert classlabel.names == names and classlabel.num_classes == len(names) classlabel = ClassLabel(num_classes=len(names)) assert classlabel.names == [str(i) for i in range(len(names))] and classlabel.num_classes == len(names) with pytest.raises(ValueError): classlabel = ClassLabel(num_classes=len(names) + 1, names=names) with pytest.raises(ValueError): classlabel = ClassLabel(names=names, names_file=names_file) with pytest.raises(ValueError): classlabel = ClassLabel() with pytest.raises(TypeError): classlabel = ClassLabel(names=np.array(names)) def test_classlabel_str2int(): names = ["negative", "positive"] classlabel = ClassLabel(names=names) for label in names: assert classlabel.str2int(label) == names.index(label) with pytest.raises(ValueError): classlabel.str2int("__bad_label_name__") with pytest.raises(ValueError): classlabel.str2int(1) with pytest.raises(ValueError): classlabel.str2int(None) def test_classlabel_int2str(): names = ["negative", "positive"] classlabel = ClassLabel(names=names) for i in range(len(names)): assert classlabel.int2str(i) == names[i] with pytest.raises(ValueError): classlabel.int2str(len(names)) with pytest.raises(ValueError): classlabel.int2str(-1) with pytest.raises(ValueError): classlabel.int2str(None) def test_classlabel_cast_storage(): names = ["negative", "positive"] classlabel = ClassLabel(names=names) # from integers arr = pa.array([0, 1, -1, -100], type=pa.int64()) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [0, 1, -1, -100] arr = pa.array([0, 1, -1, -100], type=pa.int32()) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [0, 1, -1, -100] arr = pa.array([3]) with pytest.raises(ValueError): classlabel.cast_storage(arr) # from strings arr = pa.array(["negative", "positive"]) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [0, 1] arr = pa.array(["__label_that_doesnt_exist__"]) with pytest.raises(ValueError): classlabel.cast_storage(arr) # from nulls arr = pa.array([None]) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [None] # from empty arr = pa.array([], pa.int64()) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [] arr = pa.array([], pa.string()) result = classlabel.cast_storage(arr) assert result.type == pa.int64() assert result.to_pylist() == [] @pytest.mark.parametrize("class_label_arg", ["names", "names_file"]) def test_class_label_to_and_from_dict(class_label_arg, tmp_path_factory): names = ["negative", "positive"] names_file = str(tmp_path_factory.mktemp("features") / "labels.txt") with open(names_file, "w", encoding="utf-8") as f: f.write("\n".join(names)) if class_label_arg == "names": class_label = ClassLabel(names=names) elif class_label_arg == "names_file": class_label = ClassLabel(names_file=names_file) generated_class_label = generate_from_dict(asdict(class_label)) assert generated_class_label == class_label @pytest.mark.parametrize("inner_type", [Value("int32"), {"subcolumn": Value("int32")}]) def test_encode_nested_example_sequence_with_none(inner_type): schema = Sequence(inner_type) obj = None result = encode_nested_example(schema, obj) assert result is None def test_encode_batch_with_example_with_empty_first_elem(): features = Features( { "x": Sequence(Sequence(ClassLabel(names=["a", "b"]))), } ) encoded_batch = features.encode_batch( { "x": [ [["a"], ["b"]], [[], ["b"]], ] } ) assert encoded_batch == {"x": [[[0], [1]], [[], [1]]]} @pytest.mark.parametrize( "feature", [ Value("int32"), ClassLabel(num_classes=2), Translation(languages=["en", "fr"]), TranslationVariableLanguages(languages=["en", "fr"]), ], ) def test_dataset_feature_with_none(feature): data = {"col": [None]} features = Features({"col": feature}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"col"} assert item["col"] is None batch = dset[:1] assert len(batch) == 1 assert batch.keys() == {"col"} assert isinstance(batch["col"], list) and all(item is None for item in batch["col"]) column = dset["col"] assert len(column) == 1 assert isinstance(column, list) and all(item is None for item in column) # nested tests data = {"col": [[None]]} features = Features({"col": Sequence(feature)}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"col"} assert all(i is None for i in item["col"]) data = {"nested": [{"col": None}]} features = Features({"nested": {"col": feature}}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"nested"} assert item["nested"].keys() == {"col"} assert item["nested"]["col"] is None def iternumpy(key1, value1, value2): if value1.dtype != value2.dtype: # check only for dtype raise AssertionError( f"dtype of '{key1}' key for casted object: {value1.dtype} and expected object: {value2.dtype} not matching" ) def dict_diff(d1: dict, d2: dict): # check if 2 dictionaries are equal np.testing.assert_equal(d1, d2) # sanity check if dict values are equal or not for (k1, v1), (k2, v2) in zip(d1.items(), d2.items()): # check if their values have same dtype or not if isinstance(v1, dict): # nested dictionary case dict_diff(v1, v2) elif isinstance(v1, np.ndarray): # checks if dtype and value of np.ndarray is equal iternumpy(k1, v1, v2) elif isinstance(v1, list): for element1, element2 in zip(v1, v2): # iterates over all elements of list if isinstance(element1, dict): dict_diff(element1, element2) elif isinstance(element1, np.ndarray): iternumpy(k1, element1, element2) class CastToPythonObjectsTest(TestCase): def test_cast_to_python_objects_list(self): obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} casted_obj = cast_to_python_objects(obj) self.assertDictEqual(casted_obj, expected_obj) def test_cast_to_python_objects_tuple(self): obj = {"col_1": [{"vec": (1, 2, 3), "txt": "foo"}] * 3, "col_2": [(1, 2), (3, 4), (5, 6)]} expected_obj = {"col_1": [{"vec": (1, 2, 3), "txt": "foo"}] * 3, "col_2": [(1, 2), (3, 4), (5, 6)]} casted_obj = cast_to_python_objects(obj) self.assertDictEqual(casted_obj, expected_obj) def test_cast_to_python_or_numpy(self): obj = {"col_1": [{"vec": np.arange(1, 4), "txt": "foo"}] * 3, "col_2": np.arange(1, 7).reshape(3, 2)} expected_obj = { "col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3, "col_2": np.array([[1, 2], [3, 4], [5, 6]]), } casted_obj = cast_to_python_objects(obj) dict_diff(casted_obj, expected_obj) def test_cast_to_python_objects_series(self): obj = { "col_1": pd.Series([{"vec": [1, 2, 3], "txt": "foo"}] * 3), "col_2": pd.Series([[1, 2], [3, 4], [5, 6]]), } expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} casted_obj = cast_to_python_objects(obj) self.assertDictEqual(casted_obj, expected_obj) def test_cast_to_python_objects_dataframe(self): obj = pd.DataFrame({"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]}) expected_obj = {"col_1": [{"vec": [1, 2, 3], "txt": "foo"}] * 3, "col_2": [[1, 2], [3, 4], [5, 6]]} casted_obj = cast_to_python_objects(obj) self.assertDictEqual(casted_obj, expected_obj) def test_cast_to_python_objects_pandas_timestamp(self): obj = pd.Timestamp(2020, 1, 1) expected_obj = obj.to_pydatetime() casted_obj = cast_to_python_objects(obj) self.assertEqual(casted_obj, expected_obj) casted_obj = cast_to_python_objects(pd.Series([obj])) self.assertListEqual(casted_obj, [expected_obj]) casted_obj = cast_to_python_objects(pd.DataFrame({"a": [obj]})) self.assertDictEqual(casted_obj, {"a": [expected_obj]}) def test_cast_to_python_objects_pandas_timedelta(self): obj = pd.Timedelta(seconds=1) expected_obj = obj.to_pytimedelta() casted_obj = cast_to_python_objects(obj) self.assertEqual(casted_obj, expected_obj) casted_obj = cast_to_python_objects(pd.Series([obj])) self.assertListEqual(casted_obj, [expected_obj]) casted_obj = cast_to_python_objects(pd.DataFrame({"a": [obj]})) self.assertDictEqual(casted_obj, {"a": [expected_obj]}) @require_torch def test_cast_to_python_objects_torch(self): import torch obj = { "col_1": [{"vec": torch.tensor(np.arange(1, 4)), "txt": "foo"}] * 3, "col_2": torch.tensor(np.arange(1, 7).reshape(3, 2)), } expected_obj = { "col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3, "col_2": np.array([[1, 2], [3, 4], [5, 6]]), } casted_obj = cast_to_python_objects(obj) dict_diff(casted_obj, expected_obj) @require_tf def test_cast_to_python_objects_tf(self): import tensorflow as tf obj = { "col_1": [{"vec": tf.constant(np.arange(1, 4)), "txt": "foo"}] * 3, "col_2": tf.constant(np.arange(1, 7).reshape(3, 2)), } expected_obj = { "col_1": [{"vec": np.array([1, 2, 3]), "txt": "foo"}] * 3, "col_2": np.array([[1, 2], [3, 4], [5, 6]]), } casted_obj = cast_to_python_objects(obj) dict_diff(casted_obj, expected_obj) @require_jax def test_cast_to_python_objects_jax(self): import jax.numpy as jnp obj = { "col_1": [{"vec": jnp.array(np.arange(1, 4)), "txt": "foo"}] * 3, "col_2": jnp.array(np.arange(1, 7).reshape(3, 2)), } assert obj["col_2"].dtype == jnp.int32 expected_obj = { "col_1": [{"vec": np.array([1, 2, 3], dtype=np.int32), "txt": "foo"}] * 3, "col_2": np.array([[1, 2], [3, 4], [5, 6]], dtype=np.int32), } casted_obj = cast_to_python_objects(obj) dict_diff(casted_obj, expected_obj) @patch("datasets.features.features._cast_to_python_objects", side_effect=_cast_to_python_objects) def test_dont_iterate_over_each_element_in_a_list(self, mocked_cast): obj = {"col_1": [[1, 2], [3, 4], [5, 6]]} cast_to_python_objects(obj) self.assertEqual(mocked_cast.call_count, 4) # 4 = depth of obj SIMPLE_FEATURES = [ Features(), Features({"a": Value("int32")}), Features({"a": Value("int32", id="my feature")}), Features({"a": Value("int32"), "b": Value("float64"), "c": Value("string")}), ] CUSTOM_FEATURES = [ Features({"label": ClassLabel(names=["negative", "positive"])}), Features({"array": Array2D(dtype="float32", shape=(4, 4))}), Features({"image": Image()}), Features({"audio": Audio()}), Features({"image": Image(decode=False)}), Features({"audio": Audio(decode=False)}), Features({"translation": Translation(["en", "fr"])}), Features({"translation": TranslationVariableLanguages(["en", "fr"])}), ] NESTED_FEATURES = [ Features({"foo": {}}), Features({"foo": {"bar": Value("int32")}}), Features({"foo": {"bar1": Value("int32"), "bar2": Value("float64")}}), Features({"foo": Sequence(Value("int32"))}), Features({"foo": Sequence({})}), Features({"foo": Sequence({"bar": Value("int32")})}), Features({"foo": [Value("int32")]}), Features({"foo": [{"bar": Value("int32")}]}), ] NESTED_CUSTOM_FEATURES = [ Features({"foo": {"bar": ClassLabel(names=["negative", "positive"])}}), Features({"foo": Sequence(ClassLabel(names=["negative", "positive"]))}), Features({"foo": Sequence({"bar": ClassLabel(names=["negative", "positive"])})}), Features({"foo": [ClassLabel(names=["negative", "positive"])]}), Features({"foo": [{"bar": ClassLabel(names=["negative", "positive"])}]}), ] @pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES) def test_features_to_dict(features: Features): features_dict = features.to_dict() assert isinstance(features_dict, dict) reloaded = Features.from_dict(features_dict) assert features == reloaded @pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES) def test_features_to_yaml_list(features: Features): features_yaml_list = features._to_yaml_list() assert isinstance(features_yaml_list, list) reloaded = Features._from_yaml_list(features_yaml_list) assert features == reloaded @pytest.mark.parametrize("features", SIMPLE_FEATURES + CUSTOM_FEATURES + NESTED_FEATURES + NESTED_CUSTOM_FEATURES) def test_features_to_arrow_schema(features: Features): arrow_schema = features.arrow_schema assert isinstance(arrow_schema, pa.Schema) reloaded = Features.from_arrow_schema(arrow_schema) assert features == reloaded
datasets/tests/features/test_features.py/0
{ "file_path": "datasets/tests/features/test_features.py", "repo_id": "datasets", "token_count": 12404 }
74
import shutil import textwrap import librosa import numpy as np import pytest import soundfile as sf from datasets import Audio, ClassLabel, Features, Value from datasets.data_files import DataFilesDict, get_data_patterns from datasets.download.streaming_download_manager import StreamingDownloadManager from datasets.packaged_modules.audiofolder.audiofolder import AudioFolder from ..utils import require_sndfile @pytest.fixture def cache_dir(tmp_path): return str(tmp_path / "audiofolder_cache_dir") @pytest.fixture def data_files_with_labels_no_metadata(tmp_path, audio_file): data_dir = tmp_path / "data_files_with_labels_no_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "fr" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "uk" subdir_class_1.mkdir(parents=True, exist_ok=True) audio_filename = subdir_class_0 / "audio_fr.wav" shutil.copyfile(audio_file, audio_filename) audio_filename2 = subdir_class_1 / "audio_uk.wav" shutil.copyfile(audio_file, audio_filename2) data_files_with_labels_no_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) return data_files_with_labels_no_metadata @pytest.fixture def audio_files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, audio_file): data_dir = tmp_path / "audio_files_with_labels_and_label_key_in_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "fr" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "uk" subdir_class_1.mkdir(parents=True, exist_ok=True) audio_filename = subdir_class_0 / "audio_fr.wav" shutil.copyfile(audio_file, audio_filename) audio_filename2 = subdir_class_1 / "audio_uk.wav" shutil.copyfile(audio_file, audio_filename2) audio_metadata_filename = tmp_path / data_dir / "metadata.jsonl" audio_metadata = textwrap.dedent( """\ {"file_name": "fr/audio_fr.wav", "text": "Audio in French", "label": "Fr"} {"file_name": "uk/audio_uk.wav", "text": "Audio in Ukrainian", "label": "Uk"} """ ) with open(audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) return str(audio_filename), str(audio_filename2), str(audio_metadata_filename) @pytest.fixture def audio_file_with_metadata(tmp_path, audio_file): audio_filename = tmp_path / "audio_file.wav" shutil.copyfile(audio_file, audio_filename) audio_metadata_filename = tmp_path / "metadata.jsonl" audio_metadata = textwrap.dedent( """\ {"file_name": "audio_file.wav", "text": "Audio transcription"} """ ) with open(audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) return str(audio_filename), str(audio_metadata_filename) @pytest.fixture def audio_files_with_metadata_that_misses_one_audio(tmp_path, audio_file): audio_filename = tmp_path / "audio_file.wav" shutil.copyfile(audio_file, audio_filename) audio_filename2 = tmp_path / "audio_file2.wav" shutil.copyfile(audio_file, audio_filename2) audio_metadata_filename = tmp_path / "metadata.jsonl" audio_metadata = textwrap.dedent( """\ {"file_name": "audio_file.wav", "text": "Audio transcription"} """ ) with open(audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) return str(audio_filename), str(audio_filename2), str(audio_metadata_filename) @pytest.fixture def data_files_with_one_split_and_metadata(tmp_path, audio_file): data_dir = tmp_path / "audiofolder_data_dir_with_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir = data_dir / "subdir" subdir.mkdir(parents=True, exist_ok=True) audio_filename = data_dir / "audio_file.wav" shutil.copyfile(audio_file, audio_filename) audio_filename2 = data_dir / "audio_file2.wav" shutil.copyfile(audio_file, audio_filename2) audio_filename3 = subdir / "audio_file3.wav" # in subdir shutil.copyfile(audio_file, audio_filename3) audio_metadata_filename = data_dir / "metadata.jsonl" audio_metadata = textwrap.dedent( """\ {"file_name": "audio_file.wav", "text": "First audio transcription"} {"file_name": "audio_file2.wav", "text": "Second audio transcription"} {"file_name": "subdir/audio_file3.wav", "text": "Third audio transcription (in subdir)"} """ ) with open(audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) data_files_with_one_split_and_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) assert len(data_files_with_one_split_and_metadata) == 1 assert len(data_files_with_one_split_and_metadata["train"]) == 4 return data_files_with_one_split_and_metadata @pytest.fixture(params=["jsonl", "csv"]) def data_files_with_two_splits_and_metadata(request, tmp_path, audio_file): data_dir = tmp_path / "audiofolder_data_dir_with_metadata" data_dir.mkdir(parents=True, exist_ok=True) train_dir = data_dir / "train" train_dir.mkdir(parents=True, exist_ok=True) test_dir = data_dir / "test" test_dir.mkdir(parents=True, exist_ok=True) audio_filename = train_dir / "audio_file.wav" # train audio shutil.copyfile(audio_file, audio_filename) audio_filename2 = train_dir / "audio_file2.wav" # train audio shutil.copyfile(audio_file, audio_filename2) audio_filename3 = test_dir / "audio_file3.wav" # test audio shutil.copyfile(audio_file, audio_filename3) train_audio_metadata_filename = train_dir / f"metadata.{request.param}" audio_metadata = ( textwrap.dedent( """\ {"file_name": "audio_file.wav", "text": "First train audio transcription"} {"file_name": "audio_file2.wav", "text": "Second train audio transcription"} """ ) if request.param == "jsonl" else textwrap.dedent( """\ file_name,text audio_file.wav,First train audio transcription audio_file2.wav,Second train audio transcription """ ) ) with open(train_audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) test_audio_metadata_filename = test_dir / f"metadata.{request.param}" audio_metadata = ( textwrap.dedent( """\ {"file_name": "audio_file3.wav", "text": "Test audio transcription"} """ ) if request.param == "jsonl" else textwrap.dedent( """\ file_name,text audio_file3.wav,Test audio transcription """ ) ) with open(test_audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) assert len(data_files_with_two_splits_and_metadata) == 2 assert len(data_files_with_two_splits_and_metadata["train"]) == 3 assert len(data_files_with_two_splits_and_metadata["test"]) == 2 return data_files_with_two_splits_and_metadata @pytest.fixture def data_files_with_zip_archives(tmp_path, audio_file): data_dir = tmp_path / "audiofolder_data_dir_with_zip_archives" data_dir.mkdir(parents=True, exist_ok=True) archive_dir = data_dir / "archive" archive_dir.mkdir(parents=True, exist_ok=True) subdir = archive_dir / "subdir" subdir.mkdir(parents=True, exist_ok=True) audio_filename = archive_dir / "audio_file.wav" shutil.copyfile(audio_file, audio_filename) audio_filename2 = subdir / "audio_file2.wav" # in subdir # make sure they're two different audios # Indeed we won't be able to compare the audio filenames, since the archive is not extracted in streaming mode array, sampling_rate = librosa.load(str(audio_filename), sr=16000) # original sampling rate is 44100 sf.write(str(audio_filename2), array, samplerate=16000) audio_metadata_filename = archive_dir / "metadata.jsonl" audio_metadata = textwrap.dedent( """\ {"file_name": "audio_file.wav", "text": "First audio transcription"} {"file_name": "subdir/audio_file2.wav", "text": "Second audio transcription (in subdir)"} """ ) with open(audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) shutil.make_archive(str(archive_dir), "zip", archive_dir) shutil.rmtree(str(archive_dir)) data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) assert len(data_files_with_zip_archives) == 1 assert len(data_files_with_zip_archives["train"]) == 1 return data_files_with_zip_archives @require_sndfile # check that labels are inferred correctly from dir names def test_generate_examples_with_labels(data_files_with_labels_no_metadata, cache_dir): # there are no metadata.jsonl files in this test case audiofolder = AudioFolder(data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False) audiofolder.download_and_prepare() assert audiofolder.info.features == Features({"audio": Audio(), "label": ClassLabel(names=["fr", "uk"])}) dataset = list(audiofolder.as_dataset()["train"]) label_feature = audiofolder.info.features["label"] assert dataset[0]["label"] == label_feature._str2int["fr"] assert dataset[1]["label"] == label_feature._str2int["uk"] @require_sndfile @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_duplicated_label_key( audio_files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog ): fr_audio_file, uk_audio_file, audio_metadata_file = audio_files_with_labels_and_duplicated_label_key_in_metadata audiofolder = AudioFolder( drop_metadata=drop_metadata, drop_labels=drop_labels, data_files=[fr_audio_file, uk_audio_file, audio_metadata_file], cache_dir=cache_dir, ) if drop_labels is False: # infer labels from directories even if metadata files are found audiofolder.download_and_prepare() warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records) assert warning_in_logs if drop_metadata is not True else not warning_in_logs dataset = audiofolder.as_dataset()["train"] assert audiofolder.info.features["label"] == ClassLabel(names=["fr", "uk"]) assert all(example["label"] in audiofolder.info.features["label"]._str2int.values() for example in dataset) else: audiofolder.download_and_prepare() dataset = audiofolder.as_dataset()["train"] if drop_metadata is not True: # labels are from metadata assert audiofolder.info.features["label"] == Value("string") assert all(example["label"] in ["Fr", "Uk"] for example in dataset) else: # drop both labels and metadata assert audiofolder.info.features == Features({"audio": Audio()}) assert all(example.keys() == {"audio"} for example in dataset) @require_sndfile @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_drop_labels(data_files_with_labels_no_metadata, drop_metadata, drop_labels): audiofolder = AudioFolder( drop_metadata=drop_metadata, drop_labels=drop_labels, data_files=data_files_with_labels_no_metadata ) gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs # removing the labels explicitly requires drop_labels=True assert gen_kwargs["add_labels"] is not bool(drop_labels) assert gen_kwargs["add_metadata"] is False # metadata files is not present in this case generator = audiofolder._generate_examples(**gen_kwargs) if not drop_labels: assert all( example.keys() == {"audio", "label"} and all(val is not None for val in example.values()) for _, example in generator ) else: assert all( example.keys() == {"audio"} and all(val is not None for val in example.values()) for _, example in generator ) @require_sndfile @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_drop_metadata(audio_file_with_metadata, drop_metadata, drop_labels): audio_file, audio_metadata_file = audio_file_with_metadata audiofolder = AudioFolder( drop_metadata=drop_metadata, drop_labels=drop_labels, data_files={"train": [audio_file, audio_metadata_file]} ) gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs # since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True assert gen_kwargs["add_metadata"] is not bool(drop_metadata) # since the dataset has metadata, adding the labels explicitly requires drop_labels=False assert gen_kwargs["add_labels"] is (drop_labels is False) generator = audiofolder._generate_examples(**gen_kwargs) expected_columns = {"audio"} if gen_kwargs["add_metadata"]: expected_columns.add("text") if gen_kwargs["add_labels"]: expected_columns.add("label") result = [example for _, example in generator] assert len(result) == 1 example = result[0] assert example.keys() == expected_columns for column in expected_columns: assert example[column] is not None @require_sndfile @pytest.mark.parametrize("drop_metadata", [None, True, False]) def test_generate_examples_with_metadata_in_wrong_location(audio_file, audio_file_with_metadata, drop_metadata): _, audio_metadata_file = audio_file_with_metadata audiofolder = AudioFolder(drop_metadata=drop_metadata, data_files={"train": [audio_file, audio_metadata_file]}) gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = audiofolder._generate_examples(**gen_kwargs) if not drop_metadata: with pytest.raises(ValueError): list(generator) else: assert all( example.keys() == {"audio"} and all(val is not None for val in example.values()) for _, example in generator ) @require_sndfile @pytest.mark.parametrize("drop_metadata", [None, True, False]) def test_generate_examples_with_metadata_that_misses_one_audio( audio_files_with_metadata_that_misses_one_audio, drop_metadata ): audio_file, audio_file2, audio_metadata_file = audio_files_with_metadata_that_misses_one_audio if not drop_metadata: features = Features({"audio": Audio(), "text": Value("string")}) else: features = Features({"audio": Audio()}) audiofolder = AudioFolder( drop_metadata=drop_metadata, features=features, data_files={"train": [audio_file, audio_file2, audio_metadata_file]}, ) gen_kwargs = audiofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = audiofolder._generate_examples(**gen_kwargs) if not drop_metadata: with pytest.raises(ValueError): _ = list(generator) else: assert all( example.keys() == {"audio"} and all(val is not None for val in example.values()) for _, example in generator ) @require_sndfile @pytest.mark.parametrize("streaming", [False, True]) def test_data_files_with_metadata_and_single_split(streaming, cache_dir, data_files_with_one_split_and_metadata): data_files = data_files_with_one_split_and_metadata audiofolder = AudioFolder(data_files=data_files, cache_dir=cache_dir) audiofolder.download_and_prepare() datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset() for split, data_files in data_files.items(): expected_num_of_audios = len(data_files) - 1 # don't count the metadata file assert split in datasets dataset = list(datasets[split]) assert len(dataset) == expected_num_of_audios # make sure each sample has its own audio and metadata assert len({example["audio"]["path"] for example in dataset}) == expected_num_of_audios assert len({example["text"] for example in dataset}) == expected_num_of_audios assert all(example["text"] is not None for example in dataset) @require_sndfile @pytest.mark.parametrize("streaming", [False, True]) def test_data_files_with_metadata_and_multiple_splits(streaming, cache_dir, data_files_with_two_splits_and_metadata): data_files = data_files_with_two_splits_and_metadata audiofolder = AudioFolder(data_files=data_files, cache_dir=cache_dir) audiofolder.download_and_prepare() datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset() for split, data_files in data_files.items(): expected_num_of_audios = len(data_files) - 1 # don't count the metadata file assert split in datasets dataset = list(datasets[split]) assert len(dataset) == expected_num_of_audios # make sure each sample has its own audio and metadata assert len({example["audio"]["path"] for example in dataset}) == expected_num_of_audios assert len({example["text"] for example in dataset}) == expected_num_of_audios assert all(example["text"] is not None for example in dataset) @require_sndfile @pytest.mark.parametrize("streaming", [False, True]) def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives): audiofolder = AudioFolder(data_files=data_files_with_zip_archives, cache_dir=cache_dir) audiofolder.download_and_prepare() datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset() for split, data_files in data_files_with_zip_archives.items(): num_of_archives = len(data_files) # the metadata file is inside the archive expected_num_of_audios = 2 * num_of_archives assert split in datasets dataset = list(datasets[split]) assert len(dataset) == expected_num_of_audios # make sure each sample has its own audio (all arrays are different) and metadata assert ( sum(np.array_equal(dataset[0]["audio"]["array"], example["audio"]["array"]) for example in dataset[1:]) == 0 ) assert len({example["text"] for example in dataset}) == expected_num_of_audios assert all(example["text"] is not None for example in dataset) @require_sndfile def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, audio_file): data_dir = tmp_path / "data_dir_with_bad_metadata" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(audio_file, data_dir / "audio_file.wav") audio_metadata_filename = data_dir / "bad_metadata.jsonl" # bad file audio_metadata = textwrap.dedent( """\ {"file_name": "audio_file.wav", "text": "Audio transcription"} """ ) with open(audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) audiofolder.download_and_prepare() dataset = audiofolder.as_dataset(split="train") # check that there are no metadata, since the metadata file name doesn't have the right name assert "text" not in dataset.column_names @require_sndfile def test_data_files_with_wrong_audio_file_name_column_in_metadata_file(cache_dir, tmp_path, audio_file): data_dir = tmp_path / "data_dir_with_bad_metadata" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(audio_file, data_dir / "audio_file.wav") audio_metadata_filename = data_dir / "metadata.jsonl" audio_metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name" """\ {"bad_file_name_column": "audio_file.wav", "text": "Audio transcription"} """ ) with open(audio_metadata_filename, "w", encoding="utf-8") as f: f.write(audio_metadata) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) with pytest.raises(ValueError) as exc_info: audiofolder.download_and_prepare() assert "`file_name` must be present" in str(exc_info.value) @require_sndfile def test_data_files_with_with_metadata_in_different_formats(cache_dir, tmp_path, audio_file): data_dir = tmp_path / "data_dir_with_metadata_in_different_format" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(audio_file, data_dir / "audio_file.wav") audio_metadata_filename_jsonl = data_dir / "metadata.jsonl" audio_metadata_jsonl = textwrap.dedent( """\ {"file_name": "audio_file.wav", "text": "Audio transcription"} """ ) with open(audio_metadata_filename_jsonl, "w", encoding="utf-8") as f: f.write(audio_metadata_jsonl) audio_metadata_filename_csv = data_dir / "metadata.csv" audio_metadata_csv = textwrap.dedent( """\ file_name,text audio_file.wav,Audio transcription """ ) with open(audio_metadata_filename_csv, "w", encoding="utf-8") as f: f.write(audio_metadata_csv) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) audiofolder = AudioFolder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) with pytest.raises(ValueError) as exc_info: audiofolder.download_and_prepare() assert "metadata files with different extensions" in str(exc_info.value)
datasets/tests/packaged_modules/test_audiofolder.py/0
{ "file_path": "datasets/tests/packaged_modules/test_audiofolder.py", "repo_id": "datasets", "token_count": 8594 }
75
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class DatasetListTest(TestCase): def _create_example_records(self): return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def _create_example_dict(self): data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} return Dataset.from_dict(data) def test_create(self): example_records = self._create_example_records() dset = Dataset.from_list(example_records) self.assertListEqual(dset.column_names, ["col_1", "col_2"]) for i, r in enumerate(dset): self.assertDictEqual(r, example_records[i]) def test_list_dict_equivalent(self): example_records = self._create_example_records() dset = Dataset.from_list(example_records) dset_from_dict = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]}) self.assertEqual(dset.info, dset_from_dict.info) def test_uneven_records(self): # checks what happens with missing columns uneven_records = [{"col_1": 1}, {"col_2": "x"}] dset = Dataset.from_list(uneven_records) self.assertDictEqual(dset[0], {"col_1": 1}) self.assertDictEqual(dset[1], {"col_1": None}) # NB: first record is used for columns def test_variable_list_records(self): # checks if the type can be inferred from the second record list_records = [{"col_1": []}, {"col_1": [1, 2]}] dset = Dataset.from_list(list_records) self.assertEqual(dset.info.features["col_1"], Sequence(Value("int64"))) def test_create_empty(self): dset = Dataset.from_list([]) self.assertEqual(len(dset), 0) self.assertListEqual(dset.column_names, [])
datasets/tests/test_dataset_list.py/0
{ "file_path": "datasets/tests/test_dataset_list.py", "repo_id": "datasets", "token_count": 875 }
76
import importlib import os import pickle import shutil import tempfile import time from hashlib import sha256 from multiprocessing import Pool from pathlib import Path from unittest import TestCase from unittest.mock import patch import dill import pyarrow as pa import pytest import requests import datasets from datasets import config, load_dataset, load_from_disk from datasets.arrow_dataset import Dataset from datasets.arrow_writer import ArrowWriter from datasets.builder import DatasetBuilder from datasets.config import METADATA_CONFIGS_FIELD from datasets.data_files import DataFilesDict, DataFilesPatternsDict from datasets.dataset_dict import DatasetDict, IterableDatasetDict from datasets.download.download_config import DownloadConfig from datasets.exceptions import DatasetNotFoundError from datasets.features import Features, Image, Value from datasets.iterable_dataset import IterableDataset from datasets.load import ( CachedDatasetModuleFactory, CachedMetricModuleFactory, GithubMetricModuleFactory, HubDatasetModuleFactoryWithoutScript, HubDatasetModuleFactoryWithParquetExport, HubDatasetModuleFactoryWithScript, LocalDatasetModuleFactoryWithoutScript, LocalDatasetModuleFactoryWithScript, LocalMetricModuleFactory, PackagedDatasetModuleFactory, infer_module_for_data_files_list, infer_module_for_data_files_list_in_archives, load_dataset_builder, resolve_trust_remote_code, ) from datasets.packaged_modules.audiofolder.audiofolder import AudioFolder, AudioFolderConfig from datasets.packaged_modules.imagefolder.imagefolder import ImageFolder, ImageFolderConfig from datasets.packaged_modules.parquet.parquet import ParquetConfig from datasets.utils import _datasets_server from datasets.utils.logging import INFO, get_logger from .utils import ( OfflineSimulationMode, assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, offline, require_pil, require_sndfile, set_current_working_directory_to_temp_dir, ) DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__" DATASET_LOADING_SCRIPT_CODE = """ import os import datasets from datasets import DatasetInfo, Features, Split, SplitGenerator, Value class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self) -> DatasetInfo: return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [ SplitGenerator(Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_manager.manual_dir, "train.txt")}), SplitGenerator(Split.TEST, gen_kwargs={"filepath": os.path.join(dl_manager.manual_dir, "test.txt")}), ] def _generate_examples(self, filepath, **kwargs): with open(filepath, "r", encoding="utf-8") as f: for i, line in enumerate(f): yield i, {"text": line.strip()} """ SAMPLE_DATASET_IDENTIFIER = "hf-internal-testing/dataset_with_script" # has dataset script and also a parquet export SAMPLE_DATASET_IDENTIFIER2 = "hf-internal-testing/dataset_with_data_files" # only has data files SAMPLE_DATASET_IDENTIFIER3 = "hf-internal-testing/multi_dir_dataset" # has multiple data directories SAMPLE_DATASET_IDENTIFIER4 = "hf-internal-testing/imagefolder_with_metadata" # imagefolder with a metadata file outside of the train/test directories SAMPLE_DATASET_IDENTIFIER5 = "hf-internal-testing/imagefolder_with_metadata_no_splits" # imagefolder with a metadata file and no default split names in data files SAMPLE_NOT_EXISTING_DATASET_IDENTIFIER = "hf-internal-testing/_dummy" SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST = "_dummy" SAMPLE_DATASET_NO_CONFIGS_IN_METADATA = "hf-internal-testing/audiofolder_no_configs_in_metadata" SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_single_config_in_metadata" SAMPLE_DATASET_TWO_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_two_configs_in_metadata" SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT = ( "hf-internal-testing/audiofolder_two_configs_in_metadata_with_default" ) METRIC_LOADING_SCRIPT_NAME = "__dummy_metric1__" METRIC_LOADING_SCRIPT_CODE = """ import datasets from datasets import MetricInfo, Features, Value class __DummyMetric1__(datasets.Metric): def _info(self): return MetricInfo(features=Features({"predictions": Value("int"), "references": Value("int")})) def _compute(self, predictions, references): return {"__dummy_metric1__": sum(int(p == r) for p, r in zip(predictions, references))} """ @pytest.fixture def data_dir(tmp_path): data_dir = tmp_path / "data_dir" data_dir.mkdir() with open(data_dir / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / "test.txt", "w") as f: f.write("bar\n" * 10) return str(data_dir) @pytest.fixture def data_dir_with_arrow(tmp_path): data_dir = tmp_path / "data_dir" data_dir.mkdir() output_train = os.path.join(data_dir, "train.arrow") with ArrowWriter(path=output_train) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["foo"] * 10})) num_examples, num_bytes = writer.finalize() assert num_examples == 10 assert num_bytes > 0 output_test = os.path.join(data_dir, "test.arrow") with ArrowWriter(path=output_test) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["bar"] * 10})) num_examples, num_bytes = writer.finalize() assert num_examples == 10 assert num_bytes > 0 return str(data_dir) @pytest.fixture def data_dir_with_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_metadata" data_dir.mkdir() with open(data_dir / "train.jpg", "wb") as f: f.write(b"train_image_bytes") with open(data_dir / "test.jpg", "wb") as f: f.write(b"test_image_bytes") with open(data_dir / "metadata.jsonl", "w") as f: f.write( """\ {"file_name": "train.jpg", "caption": "Cool tran image"} {"file_name": "test.jpg", "caption": "Cool test image"} """ ) return str(data_dir) @pytest.fixture def data_dir_with_single_config_in_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_one_default_config_in_metadata" cats_data_dir = data_dir / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") with open(data_dir / "README.md", "w") as f: f.write( f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: custom drop_labels: true --- """ ) return str(data_dir) @pytest.fixture def data_dir_with_two_config_in_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_two_configs_in_metadata" cats_data_dir = data_dir / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") with open(data_dir / "README.md", "w") as f: f.write( f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: "v1" drop_labels: true default: true - config_name: "v2" drop_labels: false --- """ ) return str(data_dir) @pytest.fixture def data_dir_with_data_dir_configs_in_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_two_configs_in_metadata" cats_data_dir = data_dir / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") @pytest.fixture def sub_data_dirs(tmp_path): data_dir2 = tmp_path / "data_dir2" relative_subdir1 = "subdir1" sub_data_dir1 = data_dir2 / relative_subdir1 sub_data_dir1.mkdir(parents=True) with open(sub_data_dir1 / "train.txt", "w") as f: f.write("foo\n" * 10) with open(sub_data_dir1 / "test.txt", "w") as f: f.write("bar\n" * 10) relative_subdir2 = "subdir2" sub_data_dir2 = tmp_path / data_dir2 / relative_subdir2 sub_data_dir2.mkdir(parents=True) with open(sub_data_dir2 / "train.txt", "w") as f: f.write("foo\n" * 10) with open(sub_data_dir2 / "test.txt", "w") as f: f.write("bar\n" * 10) return str(data_dir2), relative_subdir1 @pytest.fixture def complex_data_dir(tmp_path): data_dir = tmp_path / "complex_data_dir" data_dir.mkdir() (data_dir / "data").mkdir() with open(data_dir / "data" / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / "data" / "test.txt", "w") as f: f.write("bar\n" * 10) with open(data_dir / "README.md", "w") as f: f.write("This is a readme") with open(data_dir / ".dummy", "w") as f: f.write("this is a dummy file that is not a data file") return str(data_dir) @pytest.fixture def dataset_loading_script_dir(tmp_path): script_name = DATASET_LOADING_SCRIPT_NAME script_dir = tmp_path / script_name script_dir.mkdir() script_path = script_dir / f"{script_name}.py" with open(script_path, "w") as f: f.write(DATASET_LOADING_SCRIPT_CODE) return str(script_dir) @pytest.fixture def dataset_loading_script_dir_readonly(tmp_path): script_name = DATASET_LOADING_SCRIPT_NAME script_dir = tmp_path / "readonly" / script_name script_dir.mkdir(parents=True) script_path = script_dir / f"{script_name}.py" with open(script_path, "w") as f: f.write(DATASET_LOADING_SCRIPT_CODE) dataset_loading_script_dir = str(script_dir) # Make this directory readonly os.chmod(dataset_loading_script_dir, 0o555) os.chmod(os.path.join(dataset_loading_script_dir, f"{script_name}.py"), 0o555) return dataset_loading_script_dir @pytest.fixture def metric_loading_script_dir(tmp_path): script_name = METRIC_LOADING_SCRIPT_NAME script_dir = tmp_path / script_name script_dir.mkdir() script_path = script_dir / f"{script_name}.py" with open(script_path, "w") as f: f.write(METRIC_LOADING_SCRIPT_CODE) return str(script_dir) @pytest.mark.parametrize( "data_files, expected_module, expected_builder_kwargs", [ (["train.csv"], "csv", {}), (["train.tsv"], "csv", {"sep": "\t"}), (["train.json"], "json", {}), (["train.jsonl"], "json", {}), (["train.parquet"], "parquet", {}), (["train.geoparquet"], "parquet", {}), (["train.gpq"], "parquet", {}), (["train.arrow"], "arrow", {}), (["train.txt"], "text", {}), (["uppercase.TXT"], "text", {}), (["unsupported.ext"], None, {}), ([""], None, {}), ], ) def test_infer_module_for_data_files(data_files, expected_module, expected_builder_kwargs): module, builder_kwargs = infer_module_for_data_files_list(data_files) assert module == expected_module assert builder_kwargs == expected_builder_kwargs @pytest.mark.parametrize( "data_file, expected_module", [ ("zip_csv_path", "csv"), ("zip_csv_with_dir_path", "csv"), ("zip_uppercase_csv_path", "csv"), ("zip_unsupported_ext_path", None), ], ) def test_infer_module_for_data_files_in_archives( data_file, expected_module, zip_csv_path, zip_csv_with_dir_path, zip_uppercase_csv_path, zip_unsupported_ext_path ): data_file_paths = { "zip_csv_path": zip_csv_path, "zip_csv_with_dir_path": zip_csv_with_dir_path, "zip_uppercase_csv_path": zip_uppercase_csv_path, "zip_unsupported_ext_path": zip_unsupported_ext_path, } data_files = [str(data_file_paths[data_file])] inferred_module, _ = infer_module_for_data_files_list_in_archives(data_files) assert inferred_module == expected_module class ModuleFactoryTest(TestCase): @pytest.fixture(autouse=True) def inject_fixtures( self, jsonl_path, data_dir, data_dir_with_metadata, data_dir_with_single_config_in_metadata, data_dir_with_two_config_in_metadata, sub_data_dirs, dataset_loading_script_dir, metric_loading_script_dir, ): self._jsonl_path = jsonl_path self._data_dir = data_dir self._data_dir_with_metadata = data_dir_with_metadata self._data_dir_with_single_config_in_metadata = data_dir_with_single_config_in_metadata self._data_dir_with_two_config_in_metadata = data_dir_with_two_config_in_metadata self._data_dir2 = sub_data_dirs[0] self._sub_data_dir = sub_data_dirs[1] self._dataset_loading_script_dir = dataset_loading_script_dir self._metric_loading_script_dir = metric_loading_script_dir def setUp(self): self.hf_modules_cache = tempfile.mkdtemp() self.cache_dir = tempfile.mkdtemp() self.download_config = DownloadConfig(cache_dir=self.cache_dir) self.dynamic_modules_path = datasets.load.init_dynamic_modules( name="test_datasets_modules_" + os.path.basename(self.hf_modules_cache), hf_modules_cache=self.hf_modules_cache, ) def test_HubDatasetModuleFactoryWithScript_dont_trust_remote_code(self): # "lhoestq/test" has a dataset script factory = HubDatasetModuleFactoryWithScript( "lhoestq/test", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) with patch.object(config, "HF_DATASETS_TRUST_REMOTE_CODE", None): # this will be the default soon self.assertRaises(ValueError, factory.get_module) factory = HubDatasetModuleFactoryWithScript( "lhoestq/test", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path, trust_remote_code=False, ) self.assertRaises(ValueError, factory.get_module) def test_HubDatasetModuleFactoryWithScript_with_github_dataset(self): # "wmt_t2t" has additional imports (internal) factory = HubDatasetModuleFactoryWithScript( "wmt_t2t", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) def test_GithubMetricModuleFactory_with_internal_import(self): # "squad_v2" requires additional imports (internal) factory = GithubMetricModuleFactory( "squad_v2", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None @pytest.mark.filterwarnings("ignore:GithubMetricModuleFactory is deprecated:FutureWarning") def test_GithubMetricModuleFactory_with_external_import(self): # "bleu" requires additional imports (external from github) factory = GithubMetricModuleFactory( "bleu", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None def test_LocalMetricModuleFactory(self): path = os.path.join(self._metric_loading_script_dir, f"{METRIC_LOADING_SCRIPT_NAME}.py") factory = LocalMetricModuleFactory( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None def test_LocalDatasetModuleFactoryWithScript(self): path = os.path.join(self._dataset_loading_script_dir, f"{DATASET_LOADING_SCRIPT_NAME}.py") factory = LocalDatasetModuleFactoryWithScript( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert os.path.isdir(module_factory_result.builder_kwargs["base_path"]) def test_LocalDatasetModuleFactoryWithScript_dont_trust_remote_code(self): path = os.path.join(self._dataset_loading_script_dir, f"{DATASET_LOADING_SCRIPT_NAME}.py") factory = LocalDatasetModuleFactoryWithScript( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) with patch.object(config, "HF_DATASETS_TRUST_REMOTE_CODE", None): # this will be the default soon self.assertRaises(ValueError, factory.get_module) factory = LocalDatasetModuleFactoryWithScript( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path, trust_remote_code=False, ) self.assertRaises(ValueError, factory.get_module) def test_LocalDatasetModuleFactoryWithoutScript(self): factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert os.path.isdir(module_factory_result.builder_kwargs["base_path"]) def test_LocalDatasetModuleFactoryWithoutScript_with_data_dir(self): factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir2, data_dir=self._sub_data_dir) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert ( builder_config.data_files is not None and len(builder_config.data_files["train"]) == 1 and len(builder_config.data_files["test"]) == 1 ) assert all( self._sub_data_dir in Path(data_file).parts for data_file in builder_config.data_files["train"] + builder_config.data_files["test"] ) def test_LocalDatasetModuleFactoryWithoutScript_with_metadata(self): factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir_with_metadata) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert ( builder_config.data_files is not None and len(builder_config.data_files["train"]) > 0 and len(builder_config.data_files["test"]) > 0 ) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["train"]) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["test"]) def test_LocalDatasetModuleFactoryWithoutScript_with_single_config_in_metadata(self): factory = LocalDatasetModuleFactoryWithoutScript( self._data_dir_with_single_config_in_metadata, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 1 assert next(iter(module_metadata_configs)) == "custom" assert "drop_labels" in next(iter(module_metadata_configs.values())) assert next(iter(module_metadata_configs.values()))["drop_labels"] is True module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 1 assert isinstance(module_builder_configs[0], ImageFolderConfig) assert module_builder_configs[0].name == "custom" assert module_builder_configs[0].data_files is not None assert isinstance(module_builder_configs[0].data_files, DataFilesPatternsDict) module_builder_configs[0]._resolve_data_files(self._data_dir_with_single_config_in_metadata, DownloadConfig()) assert isinstance(module_builder_configs[0].data_files, DataFilesDict) assert len(module_builder_configs[0].data_files) == 1 # one train split assert len(module_builder_configs[0].data_files["train"]) == 2 # two files assert module_builder_configs[0].drop_labels is True # parameter is passed from metadata # config named "default" is automatically considered to be a default config assert module_factory_result.builder_configs_parameters.default_config_name is None # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs def test_LocalDatasetModuleFactoryWithoutScript_with_two_configs_in_metadata(self): factory = LocalDatasetModuleFactoryWithoutScript( self._data_dir_with_two_config_in_metadata, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 2 assert list(module_metadata_configs) == ["v1", "v2"] assert "drop_labels" in module_metadata_configs["v1"] assert module_metadata_configs["v1"]["drop_labels"] is True assert "drop_labels" in module_metadata_configs["v2"] assert module_metadata_configs["v2"]["drop_labels"] is False module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 2 module_builder_config_v1, module_builder_config_v2 = module_builder_configs assert module_builder_config_v1.name == "v1" assert module_builder_config_v2.name == "v2" assert isinstance(module_builder_config_v1, ImageFolderConfig) assert isinstance(module_builder_config_v2, ImageFolderConfig) assert isinstance(module_builder_config_v1.data_files, DataFilesPatternsDict) assert isinstance(module_builder_config_v2.data_files, DataFilesPatternsDict) module_builder_config_v1._resolve_data_files(self._data_dir_with_two_config_in_metadata, DownloadConfig()) module_builder_config_v2._resolve_data_files(self._data_dir_with_two_config_in_metadata, DownloadConfig()) assert isinstance(module_builder_config_v1.data_files, DataFilesDict) assert isinstance(module_builder_config_v2.data_files, DataFilesDict) assert sorted(module_builder_config_v1.data_files) == ["train"] assert len(module_builder_config_v1.data_files["train"]) == 2 assert sorted(module_builder_config_v2.data_files) == ["train"] assert len(module_builder_config_v2.data_files["train"]) == 2 assert module_builder_config_v1.drop_labels is True # parameter is passed from metadata assert module_builder_config_v2.drop_labels is False # parameter is passed from metadata assert ( module_factory_result.builder_configs_parameters.default_config_name == "v1" ) # it's marked as a default one in yaml # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs def test_PackagedDatasetModuleFactory(self): factory = PackagedDatasetModuleFactory( "json", data_files=self._jsonl_path, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None def test_PackagedDatasetModuleFactory_with_data_dir(self): factory = PackagedDatasetModuleFactory("json", data_dir=self._data_dir, download_config=self.download_config) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None data_files = module_factory_result.builder_kwargs.get("data_files") assert data_files is not None and len(data_files["train"]) > 0 and len(data_files["test"]) > 0 assert Path(data_files["train"][0]).parent.samefile(self._data_dir) assert Path(data_files["test"][0]).parent.samefile(self._data_dir) def test_PackagedDatasetModuleFactory_with_data_dir_and_metadata(self): factory = PackagedDatasetModuleFactory( "imagefolder", data_dir=self._data_dir_with_metadata, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None data_files = module_factory_result.builder_kwargs.get("data_files") assert data_files is not None and len(data_files["train"]) > 0 and len(data_files["test"]) > 0 assert Path(data_files["train"][0]).parent.samefile(self._data_dir_with_metadata) assert Path(data_files["test"][0]).parent.samefile(self._data_dir_with_metadata) assert any(Path(data_file).name == "metadata.jsonl" for data_file in data_files["train"]) assert any(Path(data_file).name == "metadata.jsonl" for data_file in data_files["test"]) @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript(self): factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_IDENTIFIER2, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript_with_data_dir(self): data_dir = "data2" factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_IDENTIFIER3, data_dir=data_dir, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) assert ( builder_config.data_files is not None and len(builder_config.data_files["train"]) == 1 and len(builder_config.data_files["test"]) == 1 ) assert all( data_dir in Path(data_file).parts for data_file in builder_config.data_files["train"] + builder_config.data_files["test"] ) @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript_with_metadata(self): factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_IDENTIFIER4, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) assert ( builder_config.data_files is not None and len(builder_config.data_files["train"]) > 0 and len(builder_config.data_files["test"]) > 0 ) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["train"]) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["test"]) factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_IDENTIFIER5, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) assert ( builder_config.data_files is not None and len(builder_config.data_files) == 1 and len(builder_config.data_files["train"]) > 0 ) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["train"]) @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript_with_one_default_config_in_metadata(self): factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, download_config=self.download_config, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 1 assert next(iter(module_metadata_configs)) == "custom" assert "drop_labels" in next(iter(module_metadata_configs.values())) assert next(iter(module_metadata_configs.values()))["drop_labels"] is True module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 1 assert isinstance(module_builder_configs[0], AudioFolderConfig) assert module_builder_configs[0].name == "custom" assert module_builder_configs[0].data_files is not None assert isinstance(module_builder_configs[0].data_files, DataFilesPatternsDict) module_builder_configs[0]._resolve_data_files( module_factory_result.builder_kwargs["base_path"], DownloadConfig() ) assert isinstance(module_builder_configs[0].data_files, DataFilesDict) assert sorted(module_builder_configs[0].data_files) == ["test", "train"] assert len(module_builder_configs[0].data_files["train"]) == 3 assert len(module_builder_configs[0].data_files["test"]) == 3 assert module_builder_configs[0].drop_labels is True # parameter is passed from metadata # config named "default" is automatically considered to be a default config assert module_factory_result.builder_configs_parameters.default_config_name is None # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript_with_two_configs_in_metadata(self): datasets_names = [SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT] for dataset_name in datasets_names: factory = HubDatasetModuleFactoryWithoutScript(dataset_name, download_config=self.download_config) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 2 assert list(module_metadata_configs) == ["v1", "v2"] assert "drop_labels" in module_metadata_configs["v1"] assert module_metadata_configs["v1"]["drop_labels"] is True assert "drop_labels" in module_metadata_configs["v2"] assert module_metadata_configs["v2"]["drop_labels"] is False module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 2 module_builder_config_v1, module_builder_config_v2 = module_builder_configs assert module_builder_config_v1.name == "v1" assert module_builder_config_v2.name == "v2" assert isinstance(module_builder_config_v1, AudioFolderConfig) assert isinstance(module_builder_config_v2, AudioFolderConfig) assert isinstance(module_builder_config_v1.data_files, DataFilesPatternsDict) assert isinstance(module_builder_config_v2.data_files, DataFilesPatternsDict) module_builder_config_v1._resolve_data_files( module_factory_result.builder_kwargs["base_path"], DownloadConfig() ) module_builder_config_v2._resolve_data_files( module_factory_result.builder_kwargs["base_path"], DownloadConfig() ) assert isinstance(module_builder_config_v1.data_files, DataFilesDict) assert isinstance(module_builder_config_v2.data_files, DataFilesDict) assert sorted(module_builder_config_v1.data_files) == ["test", "train"] assert len(module_builder_config_v1.data_files["train"]) == 3 assert len(module_builder_config_v1.data_files["test"]) == 3 assert sorted(module_builder_config_v2.data_files) == ["test", "train"] assert len(module_builder_config_v2.data_files["train"]) == 2 assert len(module_builder_config_v2.data_files["test"]) == 1 assert module_builder_config_v1.drop_labels is True # parameter is passed from metadata assert module_builder_config_v2.drop_labels is False # parameter is passed from metadata # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs if dataset_name == SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT: assert module_factory_result.builder_configs_parameters.default_config_name == "v1" else: assert module_factory_result.builder_configs_parameters.default_config_name is None @pytest.mark.integration def test_HubDatasetModuleFactoryWithScript(self): factory = HubDatasetModuleFactoryWithScript( SAMPLE_DATASET_IDENTIFIER, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) @pytest.mark.integration def test_HubDatasetModuleFactoryWithParquetExport(self): factory = HubDatasetModuleFactoryWithParquetExport( SAMPLE_DATASET_IDENTIFIER, download_config=self.download_config, ) module_factory_result = factory.get_module() assert module_factory_result.module_path == "datasets.packaged_modules.parquet.parquet" assert module_factory_result.builder_configs_parameters.builder_configs assert isinstance(module_factory_result.builder_configs_parameters.builder_configs[0], ParquetConfig) module_factory_result.builder_configs_parameters.builder_configs[0]._resolve_data_files( base_path="", download_config=self.download_config ) assert module_factory_result.builder_configs_parameters.builder_configs[0].data_files == { "train": [ "hf://datasets/hf-internal-testing/dataset_with_script@da4ed81df5a1bcd916043c827b75994de8ef7eda/default/train/0000.parquet" ], "validation": [ "hf://datasets/hf-internal-testing/dataset_with_script@da4ed81df5a1bcd916043c827b75994de8ef7eda/default/validation/0000.parquet" ], } @pytest.mark.integration def test_HubDatasetModuleFactoryWithParquetExport_errors_on_wrong_sha(self): factory = HubDatasetModuleFactoryWithParquetExport( SAMPLE_DATASET_IDENTIFIER, download_config=self.download_config, revision="1a21ac5846fc3f36ad5f128740c58932d3d7806f", ) factory.get_module() factory = HubDatasetModuleFactoryWithParquetExport( SAMPLE_DATASET_IDENTIFIER, download_config=self.download_config, revision="wrong_sha", ) with self.assertRaises(_datasets_server.DatasetsServerError): factory.get_module() @pytest.mark.integration def test_CachedDatasetModuleFactory(self): name = SAMPLE_DATASET_IDENTIFIER2 load_dataset_builder(name, cache_dir=self.cache_dir).download_and_prepare() for offline_mode in OfflineSimulationMode: with offline(offline_mode): factory = CachedDatasetModuleFactory( name, cache_dir=self.cache_dir, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None def test_CachedDatasetModuleFactory_with_script(self): path = os.path.join(self._dataset_loading_script_dir, f"{DATASET_LOADING_SCRIPT_NAME}.py") factory = LocalDatasetModuleFactoryWithScript( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) module_factory_result = factory.get_module() for offline_mode in OfflineSimulationMode: with offline(offline_mode): factory = CachedDatasetModuleFactory( DATASET_LOADING_SCRIPT_NAME, dynamic_modules_path=self.dynamic_modules_path, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None @pytest.mark.filterwarnings("ignore:LocalMetricModuleFactory is deprecated:FutureWarning") @pytest.mark.filterwarnings("ignore:CachedMetricModuleFactory is deprecated:FutureWarning") def test_CachedMetricModuleFactory(self): path = os.path.join(self._metric_loading_script_dir, f"{METRIC_LOADING_SCRIPT_NAME}.py") factory = LocalMetricModuleFactory( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) module_factory_result = factory.get_module() for offline_mode in OfflineSimulationMode: with offline(offline_mode): factory = CachedMetricModuleFactory( METRIC_LOADING_SCRIPT_NAME, dynamic_modules_path=self.dynamic_modules_path, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None @pytest.mark.parametrize( "factory_class", [ CachedDatasetModuleFactory, CachedMetricModuleFactory, GithubMetricModuleFactory, HubDatasetModuleFactoryWithoutScript, HubDatasetModuleFactoryWithScript, LocalDatasetModuleFactoryWithoutScript, LocalDatasetModuleFactoryWithScript, LocalMetricModuleFactory, PackagedDatasetModuleFactory, ], ) def test_module_factories(factory_class): name = "dummy_name" factory = factory_class(name) assert factory.name == name @pytest.mark.integration class LoadTest(TestCase): @pytest.fixture(autouse=True) def inject_fixtures(self, caplog): self._caplog = caplog def setUp(self): self.hf_modules_cache = tempfile.mkdtemp() self.cache_dir = tempfile.mkdtemp() self.dynamic_modules_path = datasets.load.init_dynamic_modules( name="test_datasets_modules2", hf_modules_cache=self.hf_modules_cache ) def tearDown(self): shutil.rmtree(self.hf_modules_cache) shutil.rmtree(self.cache_dir) def _dummy_module_dir(self, modules_dir, dummy_module_name, dummy_code): assert dummy_module_name.startswith("__") module_dir = os.path.join(modules_dir, dummy_module_name) os.makedirs(module_dir, exist_ok=True) module_path = os.path.join(module_dir, dummy_module_name + ".py") with open(module_path, "w") as f: f.write(dummy_code) return module_dir def test_dataset_module_factory(self): with tempfile.TemporaryDirectory() as tmp_dir: # prepare module from directory path dummy_code = "MY_DUMMY_VARIABLE = 'hello there'" module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name1__", dummy_code) dataset_module = datasets.load.dataset_module_factory( module_dir, dynamic_modules_path=self.dynamic_modules_path ) dummy_module = importlib.import_module(dataset_module.module_path) self.assertEqual(dummy_module.MY_DUMMY_VARIABLE, "hello there") self.assertEqual(dataset_module.hash, sha256(dummy_code.encode("utf-8")).hexdigest()) # prepare module from file path + check resolved_file_path dummy_code = "MY_DUMMY_VARIABLE = 'general kenobi'" module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name1__", dummy_code) module_path = os.path.join(module_dir, "__dummy_module_name1__.py") dataset_module = datasets.load.dataset_module_factory( module_path, dynamic_modules_path=self.dynamic_modules_path ) dummy_module = importlib.import_module(dataset_module.module_path) self.assertEqual(dummy_module.MY_DUMMY_VARIABLE, "general kenobi") self.assertEqual(dataset_module.hash, sha256(dummy_code.encode("utf-8")).hexdigest()) # missing module for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): with self.assertRaises( (DatasetNotFoundError, ConnectionError, requests.exceptions.ConnectionError) ): datasets.load.dataset_module_factory( "__missing_dummy_module_name__", dynamic_modules_path=self.dynamic_modules_path ) @pytest.mark.integration def test_offline_dataset_module_factory(self): repo_id = SAMPLE_DATASET_IDENTIFIER2 builder = load_dataset_builder(repo_id, cache_dir=self.cache_dir) builder.download_and_prepare() for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): self._caplog.clear() # allow provide the repo id without an explicit path to remote or local actual file dataset_module = datasets.load.dataset_module_factory(repo_id, cache_dir=self.cache_dir) self.assertEqual(dataset_module.module_path, "datasets.packaged_modules.cache.cache") self.assertIn("Using the latest cached version of the dataset", self._caplog.text) def test_offline_dataset_module_factory_with_script(self): with tempfile.TemporaryDirectory() as tmp_dir: dummy_code = "MY_DUMMY_VARIABLE = 'hello there'" module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name2__", dummy_code) dataset_module_1 = datasets.load.dataset_module_factory( module_dir, dynamic_modules_path=self.dynamic_modules_path ) time.sleep(0.1) # make sure there's a difference in the OS update time of the python file dummy_code = "MY_DUMMY_VARIABLE = 'general kenobi'" module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name2__", dummy_code) dataset_module_2 = datasets.load.dataset_module_factory( module_dir, dynamic_modules_path=self.dynamic_modules_path ) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): self._caplog.clear() # allow provide the module name without an explicit path to remote or local actual file dataset_module_3 = datasets.load.dataset_module_factory( "__dummy_module_name2__", dynamic_modules_path=self.dynamic_modules_path ) # it loads the most recent version of the module self.assertEqual(dataset_module_2.module_path, dataset_module_3.module_path) self.assertNotEqual(dataset_module_1.module_path, dataset_module_3.module_path) self.assertIn("Using the latest cached version of the module", self._caplog.text) def test_load_dataset_from_hub(self): with self.assertRaises(DatasetNotFoundError) as context: datasets.load_dataset("_dummy") self.assertIn( "Dataset '_dummy' doesn't exist on the Hub", str(context.exception), ) with self.assertRaises(DatasetNotFoundError) as context: datasets.load_dataset("_dummy", revision="0.0.0") self.assertIn( "Dataset '_dummy' doesn't exist on the Hub", str(context.exception), ) self.assertIn( "at revision '0.0.0'", str(context.exception), ) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): with self.assertRaises(ConnectionError) as context: datasets.load_dataset("_dummy") if offline_simulation_mode != OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: self.assertIn( "Couldn't reach '_dummy' on the Hub", str(context.exception), ) def test_load_dataset_namespace(self): with self.assertRaises(DatasetNotFoundError) as context: datasets.load_dataset("hf-internal-testing/_dummy") self.assertIn( "hf-internal-testing/_dummy", str(context.exception), ) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): with self.assertRaises(ConnectionError) as context: datasets.load_dataset("hf-internal-testing/_dummy") self.assertIn("hf-internal-testing/_dummy", str(context.exception), msg=offline_simulation_mode) @pytest.mark.integration def test_load_dataset_builder_with_metadata(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4) assert isinstance(builder, ImageFolder) assert builder.config.name == "default" assert builder.config.data_files is not None assert builder.config.drop_metadata is None with pytest.raises(ValueError): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4, "non-existing-config") @pytest.mark.integration def test_load_dataset_builder_config_kwargs_passed_as_arguments(): builder_default = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4) builder_custom = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4, drop_metadata=True) assert builder_custom.config.drop_metadata != builder_default.config.drop_metadata assert builder_custom.config.drop_metadata is True @pytest.mark.integration def test_load_dataset_builder_with_two_configs_in_metadata(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") assert isinstance(builder, AudioFolder) assert builder.config.name == "v1" assert builder.config.data_files is not None with pytest.raises(ValueError): datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA) with pytest.raises(ValueError): datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "non-existing-config") @pytest.mark.parametrize("serializer", [pickle, dill]) def test_load_dataset_builder_with_metadata_configs_pickable(serializer): builder = datasets.load_dataset_builder(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA) builder_unpickled = serializer.loads(serializer.dumps(builder)) assert builder.BUILDER_CONFIGS == builder_unpickled.BUILDER_CONFIGS assert list(builder_unpickled.builder_configs) == ["custom"] assert isinstance(builder_unpickled.builder_configs["custom"], AudioFolderConfig) builder2 = datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") builder2_unpickled = serializer.loads(serializer.dumps(builder2)) assert builder2.BUILDER_CONFIGS == builder2_unpickled.BUILDER_CONFIGS != builder_unpickled.BUILDER_CONFIGS assert list(builder2_unpickled.builder_configs) == ["v1", "v2"] assert isinstance(builder2_unpickled.builder_configs["v1"], AudioFolderConfig) assert isinstance(builder2_unpickled.builder_configs["v2"], AudioFolderConfig) def test_load_dataset_builder_for_absolute_script_dir(dataset_loading_script_dir, data_dir): builder = datasets.load_dataset_builder(dataset_loading_script_dir, data_dir=data_dir) assert isinstance(builder, DatasetBuilder) assert builder.name == DATASET_LOADING_SCRIPT_NAME assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME assert builder.info.features == Features({"text": Value("string")}) def test_load_dataset_builder_for_relative_script_dir(dataset_loading_script_dir, data_dir): with set_current_working_directory_to_temp_dir(): relative_script_dir = DATASET_LOADING_SCRIPT_NAME shutil.copytree(dataset_loading_script_dir, relative_script_dir) builder = datasets.load_dataset_builder(relative_script_dir, data_dir=data_dir) assert isinstance(builder, DatasetBuilder) assert builder.name == DATASET_LOADING_SCRIPT_NAME assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME assert builder.info.features == Features({"text": Value("string")}) def test_load_dataset_builder_for_script_path(dataset_loading_script_dir, data_dir): builder = datasets.load_dataset_builder( os.path.join(dataset_loading_script_dir, DATASET_LOADING_SCRIPT_NAME + ".py"), data_dir=data_dir ) assert isinstance(builder, DatasetBuilder) assert builder.name == DATASET_LOADING_SCRIPT_NAME assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME assert builder.info.features == Features({"text": Value("string")}) def test_load_dataset_builder_for_absolute_data_dir(complex_data_dir): builder = datasets.load_dataset_builder(complex_data_dir) assert isinstance(builder, DatasetBuilder) assert builder.name == "text" assert builder.dataset_name == Path(complex_data_dir).name assert builder.config.name == "default" assert isinstance(builder.config.data_files, DataFilesDict) assert len(builder.config.data_files["train"]) > 0 assert len(builder.config.data_files["test"]) > 0 def test_load_dataset_builder_for_relative_data_dir(complex_data_dir): with set_current_working_directory_to_temp_dir(): relative_data_dir = "relative_data_dir" shutil.copytree(complex_data_dir, relative_data_dir) builder = datasets.load_dataset_builder(relative_data_dir) assert isinstance(builder, DatasetBuilder) assert builder.name == "text" assert builder.dataset_name == relative_data_dir assert builder.config.name == "default" assert isinstance(builder.config.data_files, DataFilesDict) assert len(builder.config.data_files["train"]) > 0 assert len(builder.config.data_files["test"]) > 0 @pytest.mark.integration def test_load_dataset_builder_for_community_dataset_with_script(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER) assert isinstance(builder, DatasetBuilder) assert builder.name == "parquet" assert builder.dataset_name == SAMPLE_DATASET_IDENTIFIER.split("/")[-1] assert builder.config.name == "default" assert builder.info.features == Features({"text": Value("string")}) namespace = SAMPLE_DATASET_IDENTIFIER[: SAMPLE_DATASET_IDENTIFIER.index("/")] assert builder._relative_data_dir().startswith(namespace) assert builder.__module__.startswith("datasets.") @pytest.mark.integration def test_load_dataset_builder_for_community_dataset_with_script_no_parquet_export(): with patch.object(config, "USE_PARQUET_EXPORT", False): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER) assert isinstance(builder, DatasetBuilder) assert builder.name == SAMPLE_DATASET_IDENTIFIER.split("/")[-1] assert builder.dataset_name == SAMPLE_DATASET_IDENTIFIER.split("/")[-1] assert builder.config.name == "default" assert builder.info.features == Features({"text": Value("string")}) namespace = SAMPLE_DATASET_IDENTIFIER[: SAMPLE_DATASET_IDENTIFIER.index("/")] assert builder._relative_data_dir().startswith(namespace) assert SAMPLE_DATASET_IDENTIFIER.replace("/", "--") in builder.__module__ @pytest.mark.integration def test_load_dataset_builder_use_parquet_export_if_dont_trust_remote_code_keeps_features(): dataset_name = "food101" builder = datasets.load_dataset_builder(dataset_name, trust_remote_code=False) assert isinstance(builder, DatasetBuilder) assert builder.name == "parquet" assert builder.dataset_name == dataset_name assert builder.config.name == "default" assert list(builder.info.features) == ["image", "label"] assert builder.info.features["image"] == Image() @pytest.mark.integration def test_load_dataset_builder_for_community_dataset_without_script(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER2) assert isinstance(builder, DatasetBuilder) assert builder.name == "text" assert builder.dataset_name == SAMPLE_DATASET_IDENTIFIER2.split("/")[-1] assert builder.config.name == "default" assert isinstance(builder.config.data_files, DataFilesDict) assert len(builder.config.data_files["train"]) > 0 assert len(builder.config.data_files["test"]) > 0 def test_load_dataset_builder_fail(): with pytest.raises(DatasetNotFoundError): datasets.load_dataset_builder("blabla") @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_load_dataset_local_script(dataset_loading_script_dir, data_dir, keep_in_memory, caplog): with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, keep_in_memory=keep_in_memory) assert isinstance(dataset, DatasetDict) assert all(isinstance(d, Dataset) for d in dataset.values()) assert len(dataset) == 2 assert isinstance(next(iter(dataset["train"])), dict) def test_load_dataset_cached_local_script(dataset_loading_script_dir, data_dir, caplog): dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir) assert isinstance(dataset, DatasetDict) assert all(isinstance(d, Dataset) for d in dataset.values()) assert len(dataset) == 2 assert isinstance(next(iter(dataset["train"])), dict) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): caplog.clear() # Load dataset from cache dataset = datasets.load_dataset(DATASET_LOADING_SCRIPT_NAME, data_dir=data_dir) assert len(dataset) == 2 assert "Using the latest cached version of the module" in caplog.text assert isinstance(next(iter(dataset["train"])), dict) with pytest.raises(DatasetNotFoundError) as exc_info: datasets.load_dataset(SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST) assert f"Dataset '{SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST}' doesn't exist on the Hub" in str(exc_info.value) @pytest.mark.integration @pytest.mark.parametrize("stream_from_cache, ", [False, True]) def test_load_dataset_cached_from_hub(stream_from_cache, caplog): dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER3) assert isinstance(dataset, DatasetDict) assert all(isinstance(d, Dataset) for d in dataset.values()) assert len(dataset) == 2 assert isinstance(next(iter(dataset["train"])), dict) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): caplog.clear() # Load dataset from cache dataset = datasets.load_dataset(SAMPLE_DATASET_IDENTIFIER3, streaming=stream_from_cache) assert len(dataset) == 2 assert "Using the latest cached version of the dataset" in caplog.text assert isinstance(next(iter(dataset["train"])), dict) with pytest.raises(DatasetNotFoundError) as exc_info: datasets.load_dataset(SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST) assert f"Dataset '{SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST}' doesn't exist on the Hub" in str(exc_info.value) def test_load_dataset_streaming(dataset_loading_script_dir, data_dir): dataset = load_dataset(dataset_loading_script_dir, streaming=True, data_dir=data_dir) assert isinstance(dataset, IterableDatasetDict) assert all(isinstance(d, IterableDataset) for d in dataset.values()) assert len(dataset) == 2 assert isinstance(next(iter(dataset["train"])), dict) def test_load_dataset_streaming_gz_json(jsonl_gz_path): data_files = jsonl_gz_path ds = load_dataset("json", split="train", data_files=data_files, streaming=True) assert isinstance(ds, IterableDataset) ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.integration @pytest.mark.parametrize( "path", ["sample.jsonl", "sample.jsonl.gz", "sample.tar", "sample.jsonl.xz", "sample.zip", "sample.jsonl.zst"] ) def test_load_dataset_streaming_compressed_files(path): repo_id = "hf-internal-testing/compressed_files" data_files = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{path}" if data_files[-3:] in ("zip", "tar"): # we need to glob "*" inside archives data_files = data_files[-3:] + "://*::" + data_files return # TODO(QL, albert): support re-add support for ZIP and TAR archives streaming ds = load_dataset("json", split="train", data_files=data_files, streaming=True) assert isinstance(ds, IterableDataset) ds_item = next(iter(ds)) assert ds_item == { "tokens": ["Ministeri", "de", "Justícia", "d'Espanya"], "ner_tags": [1, 2, 2, 2], "langs": ["ca", "ca", "ca", "ca"], "spans": ["PER: Ministeri de Justícia d'Espanya"], } @pytest.mark.parametrize("path_extension", ["csv", "csv.bz2"]) @pytest.mark.parametrize("streaming", [False, True]) def test_load_dataset_streaming_csv(path_extension, streaming, csv_path, bz2_csv_path): paths = {"csv": csv_path, "csv.bz2": bz2_csv_path} data_files = str(paths[path_extension]) features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) ds = load_dataset("csv", split="train", data_files=data_files, features=features, streaming=streaming) assert isinstance(ds, IterableDataset if streaming else Dataset) ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("data_file", ["zip_csv_path", "zip_csv_with_dir_path", "csv_path"]) def test_load_dataset_zip_csv(data_file, streaming, zip_csv_path, zip_csv_with_dir_path, csv_path): data_file_paths = { "zip_csv_path": zip_csv_path, "zip_csv_with_dir_path": zip_csv_with_dir_path, "csv_path": csv_path, } data_files = str(data_file_paths[data_file]) expected_size = 8 if data_file.startswith("zip") else 4 features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) ds = load_dataset("csv", split="train", data_files=data_files, features=features, streaming=streaming) if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} ds_item_counter += 1 assert ds_item_counter == expected_size else: assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("data_file", ["zip_jsonl_path", "zip_jsonl_with_dir_path", "jsonl_path"]) def test_load_dataset_zip_jsonl(data_file, streaming, zip_jsonl_path, zip_jsonl_with_dir_path, jsonl_path): data_file_paths = { "zip_jsonl_path": zip_jsonl_path, "zip_jsonl_with_dir_path": zip_jsonl_with_dir_path, "jsonl_path": jsonl_path, } data_files = str(data_file_paths[data_file]) expected_size = 8 if data_file.startswith("zip") else 4 features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) ds = load_dataset("json", split="train", data_files=data_files, features=features, streaming=streaming) if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} ds_item_counter += 1 assert ds_item_counter == expected_size else: assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("data_file", ["zip_text_path", "zip_text_with_dir_path", "text_path"]) def test_load_dataset_zip_text(data_file, streaming, zip_text_path, zip_text_with_dir_path, text_path): data_file_paths = { "zip_text_path": zip_text_path, "zip_text_with_dir_path": zip_text_with_dir_path, "text_path": text_path, } data_files = str(data_file_paths[data_file]) expected_size = 8 if data_file.startswith("zip") else 4 ds = load_dataset("text", split="train", data_files=data_files, streaming=streaming) if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"text": "0"} ds_item_counter += 1 assert ds_item_counter == expected_size else: assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"text": "0"} @pytest.mark.parametrize("streaming", [False, True]) def test_load_dataset_arrow(streaming, data_dir_with_arrow): ds = load_dataset("arrow", split="train", data_dir=data_dir_with_arrow, streaming=streaming) expected_size = 10 if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"col_1": "foo"} ds_item_counter += 1 assert ds_item_counter == 10 else: assert ds.num_rows == 10 assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"col_1": "foo"} def test_load_dataset_text_with_unicode_new_lines(text_path_with_unicode_new_lines): data_files = str(text_path_with_unicode_new_lines) ds = load_dataset("text", split="train", data_files=data_files) assert ds.num_rows == 3 def test_load_dataset_with_unsupported_extensions(text_dir_with_unsupported_extension): data_files = str(text_dir_with_unsupported_extension) ds = load_dataset("text", split="train", data_files=data_files) assert ds.num_rows == 4 @pytest.mark.integration def test_loading_from_the_datasets_hub(): with tempfile.TemporaryDirectory() as tmp_dir: with load_dataset(SAMPLE_DATASET_IDENTIFIER, cache_dir=tmp_dir) as dataset: assert len(dataset["train"]) == 2 assert len(dataset["validation"]) == 3 @pytest.mark.integration def test_loading_from_the_datasets_hub_with_token(): true_request = requests.Session().request def assert_auth(method, url, *args, headers, **kwargs): assert headers["authorization"] == "Bearer foo" return true_request(method, url, *args, headers=headers, **kwargs) with patch("requests.Session.request") as mock_request: mock_request.side_effect = assert_auth with tempfile.TemporaryDirectory() as tmp_dir: with offline(): with pytest.raises((ConnectionError, requests.exceptions.ConnectionError)): load_dataset(SAMPLE_NOT_EXISTING_DATASET_IDENTIFIER, cache_dir=tmp_dir, token="foo") mock_request.assert_called() @pytest.mark.integration def test_load_streaming_private_dataset(hf_token, hf_private_dataset_repo_txt_data): ds = load_dataset(hf_private_dataset_repo_txt_data, streaming=True, token=hf_token) assert next(iter(ds)) is not None @pytest.mark.integration def test_load_dataset_builder_private_dataset(hf_token, hf_private_dataset_repo_txt_data): builder = load_dataset_builder(hf_private_dataset_repo_txt_data, token=hf_token) assert isinstance(builder, DatasetBuilder) @pytest.mark.integration def test_load_streaming_private_dataset_with_zipped_data(hf_token, hf_private_dataset_repo_zipped_txt_data): ds = load_dataset(hf_private_dataset_repo_zipped_txt_data, streaming=True, token=hf_token) assert next(iter(ds)) is not None @pytest.mark.integration def test_load_dataset_config_kwargs_passed_as_arguments(): ds_default = load_dataset(SAMPLE_DATASET_IDENTIFIER4) ds_custom = load_dataset(SAMPLE_DATASET_IDENTIFIER4, drop_metadata=True) assert list(ds_default["train"].features) == ["image", "caption"] assert list(ds_custom["train"].features) == ["image"] @require_sndfile @pytest.mark.integration def test_load_hub_dataset_without_script_with_single_config_in_metadata(): # load the same dataset but with no configurations (=with default parameters) ds = load_dataset(SAMPLE_DATASET_NO_CONFIGS_IN_METADATA) assert list(ds["train"].features) == ["audio", "label"] # assert label feature is here as expected by default assert len(ds["train"]) == 5 and len(ds["test"]) == 4 ds2 = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA) # single config -> no need to specify it assert list(ds2["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed assert len(ds2["train"]) == 3 and len(ds2["test"]) == 3 ds3 = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, "custom") assert list(ds3["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed assert len(ds3["train"]) == 3 and len(ds3["test"]) == 3 with pytest.raises(ValueError): # no config named "default" _ = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, "default") @require_sndfile @pytest.mark.integration def test_load_hub_dataset_without_script_with_two_config_in_metadata(): ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") assert list(ds["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed assert len(ds["train"]) == 3 and len(ds["test"]) == 3 ds2 = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v2") assert list(ds2["train"].features) == [ "audio", "label", ] # assert param `drop_labels=False` from metadata is passed assert len(ds2["train"]) == 2 and len(ds2["test"]) == 1 with pytest.raises(ValueError): # config is required but not specified _ = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA) with pytest.raises(ValueError): # no config named "default" _ = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "default") ds_with_default = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT) # it's a dataset with the same data but "v1" config is marked as a default one assert list(ds_with_default["train"].features) == list(ds["train"].features) assert len(ds_with_default["train"]) == len(ds["train"]) and len(ds_with_default["test"]) == len(ds["test"]) @require_sndfile @pytest.mark.integration def test_load_hub_dataset_without_script_with_metadata_config_in_parallel(): # assert it doesn't fail (pickling of dynamically created class works) ds = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, num_proc=2) assert "label" not in ds["train"].features # assert param `drop_labels=True` from metadata is passed assert len(ds["train"]) == 3 and len(ds["test"]) == 3 ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1", num_proc=2) assert "label" not in ds["train"].features # assert param `drop_labels=True` from metadata is passed assert len(ds["train"]) == 3 and len(ds["test"]) == 3 ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v2", num_proc=2) assert "label" in ds["train"].features assert len(ds["train"]) == 2 and len(ds["test"]) == 1 @require_pil @pytest.mark.integration @pytest.mark.parametrize("streaming", [True]) def test_load_dataset_private_zipped_images(hf_private_dataset_repo_zipped_img_data, hf_token, streaming): ds = load_dataset(hf_private_dataset_repo_zipped_img_data, split="train", streaming=streaming, token=hf_token) assert isinstance(ds, IterableDataset if streaming else Dataset) ds_items = list(ds) assert len(ds_items) == 2 def test_load_dataset_then_move_then_reload(dataset_loading_script_dir, data_dir, tmp_path, caplog): cache_dir1 = tmp_path / "cache1" cache_dir2 = tmp_path / "cache2" dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir1) fingerprint1 = dataset._fingerprint del dataset os.rename(cache_dir1, cache_dir2) caplog.clear() with caplog.at_level(INFO, logger=get_logger().name): dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir2) assert "Found cached dataset" in caplog.text assert dataset._fingerprint == fingerprint1, "for the caching mechanism to work, fingerprint should stay the same" dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="test", cache_dir=cache_dir2) assert dataset._fingerprint != fingerprint1 def test_load_dataset_builder_then_edit_then_load_again(tmp_path: Path): dataset_dir = tmp_path / "test_load_dataset_then_edit_then_load_again" dataset_dir.mkdir() with open(dataset_dir / "train.txt", "w") as f: f.write("Hello there") dataset_builder = load_dataset_builder(str(dataset_dir)) with open(dataset_dir / "train.txt", "w") as f: f.write("General Kenobi !") edited_dataset_builder = load_dataset_builder(str(dataset_dir)) assert dataset_builder.cache_dir != edited_dataset_builder.cache_dir def test_load_dataset_readonly(dataset_loading_script_dir, dataset_loading_script_dir_readonly, data_dir, tmp_path): cache_dir1 = tmp_path / "cache1" cache_dir2 = tmp_path / "cache2" dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir1) fingerprint1 = dataset._fingerprint del dataset # Load readonly dataset and check that the fingerprint is the same. dataset = load_dataset(dataset_loading_script_dir_readonly, data_dir=data_dir, split="train", cache_dir=cache_dir2) assert dataset._fingerprint == fingerprint1, "Cannot load a dataset in a readonly folder." @pytest.mark.parametrize("max_in_memory_dataset_size", ["default", 0, 50, 500]) def test_load_dataset_local_with_default_in_memory( max_in_memory_dataset_size, dataset_loading_script_dir, data_dir, monkeypatch ): current_dataset_size = 148 if max_in_memory_dataset_size == "default": max_in_memory_dataset_size = 0 # default else: monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", max_in_memory_dataset_size) if max_in_memory_dataset_size: expected_in_memory = current_dataset_size < max_in_memory_dataset_size else: expected_in_memory = False with assert_arrow_memory_increases() if expected_in_memory else assert_arrow_memory_doesnt_increase(): dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir) assert (dataset["train"].dataset_size < max_in_memory_dataset_size) is expected_in_memory @pytest.mark.parametrize("max_in_memory_dataset_size", ["default", 0, 100, 1000]) def test_load_from_disk_with_default_in_memory( max_in_memory_dataset_size, dataset_loading_script_dir, data_dir, tmp_path, monkeypatch ): current_dataset_size = 512 # arrow file size = 512, in-memory dataset size = 148 if max_in_memory_dataset_size == "default": max_in_memory_dataset_size = 0 # default else: monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", max_in_memory_dataset_size) if max_in_memory_dataset_size: expected_in_memory = current_dataset_size < max_in_memory_dataset_size else: expected_in_memory = False dset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, keep_in_memory=True) dataset_path = os.path.join(tmp_path, "saved_dataset") dset.save_to_disk(dataset_path) with assert_arrow_memory_increases() if expected_in_memory else assert_arrow_memory_doesnt_increase(): _ = load_from_disk(dataset_path) @pytest.mark.integration def test_remote_data_files(): repo_id = "hf-internal-testing/raw_jsonl" filename = "wikiann-bn-validation.jsonl" data_files = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{filename}" ds = load_dataset("json", split="train", data_files=data_files, streaming=True) assert isinstance(ds, IterableDataset) ds_item = next(iter(ds)) assert ds_item.keys() == {"langs", "ner_tags", "spans", "tokens"} @pytest.mark.parametrize("deleted", [False, True]) def test_load_dataset_deletes_extracted_files(deleted, jsonl_gz_path, tmp_path): data_files = jsonl_gz_path cache_dir = tmp_path / "cache" if deleted: download_config = DownloadConfig(delete_extracted=True, cache_dir=cache_dir / "downloads") ds = load_dataset( "json", split="train", data_files=data_files, cache_dir=cache_dir, download_config=download_config ) else: # default ds = load_dataset("json", split="train", data_files=data_files, cache_dir=cache_dir) assert ds[0] == {"col_1": "0", "col_2": 0, "col_3": 0.0} assert ( [path for path in (cache_dir / "downloads" / "extracted").iterdir() if path.suffix != ".lock"] == [] ) is deleted def distributed_load_dataset(args): data_name, tmp_dir, datafiles = args dataset = load_dataset(data_name, cache_dir=tmp_dir, data_files=datafiles) return dataset def test_load_dataset_distributed(tmp_path, csv_path): num_workers = 5 args = "csv", str(tmp_path), csv_path with Pool(processes=num_workers) as pool: # start num_workers processes datasets = pool.map(distributed_load_dataset, [args] * num_workers) assert len(datasets) == num_workers assert all(len(dataset) == len(datasets[0]) > 0 for dataset in datasets) assert len(datasets[0].cache_files) > 0 assert all(dataset.cache_files == datasets[0].cache_files for dataset in datasets) def test_load_dataset_with_storage_options(mockfs): with mockfs.open("data.txt", "w") as f: f.write("Hello there\n") f.write("General Kenobi !") data_files = {"train": ["mock://data.txt"]} ds = load_dataset("text", data_files=data_files, storage_options=mockfs.storage_options) assert list(ds["train"]) == [{"text": "Hello there"}, {"text": "General Kenobi !"}] @require_pil def test_load_dataset_with_storage_options_with_decoding(mockfs, image_file): import PIL.Image filename = os.path.basename(image_file) with mockfs.open(filename, "wb") as fout: with open(image_file, "rb") as fin: fout.write(fin.read()) data_files = {"train": ["mock://" + filename]} ds = load_dataset("imagefolder", data_files=data_files, storage_options=mockfs.storage_options) assert len(ds["train"]) == 1 assert isinstance(ds["train"][0]["image"], PIL.Image.Image) def test_load_dataset_without_script_with_zip(zip_csv_path): path = str(zip_csv_path.parent) ds = load_dataset(path) assert list(ds.keys()) == ["train"] assert ds["train"].column_names == ["col_1", "col_2", "col_3"] assert ds["train"].num_rows == 8 assert ds["train"][0] == {"col_1": 0, "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("trust_remote_code, expected", [(False, False), (True, True), (None, True)]) def test_resolve_trust_remote_code(trust_remote_code, expected): assert resolve_trust_remote_code(trust_remote_code, repo_id="dummy") is expected @pytest.mark.parametrize("trust_remote_code, expected", [(False, False), (True, True), (None, ValueError)]) def test_resolve_trust_remote_code_future(trust_remote_code, expected): with patch.object(config, "HF_DATASETS_TRUST_REMOTE_CODE", None): # this will be the default soon if isinstance(expected, bool): resolve_trust_remote_code(trust_remote_code, repo_id="dummy") is expected else: with pytest.raises(expected): resolve_trust_remote_code(trust_remote_code, repo_id="dummy") @pytest.mark.integration def test_reload_old_cache_from_2_15(tmp_path: Path): cache_dir = tmp_path / "test_reload_old_cache_from_2_15" builder_cache_dir = ( cache_dir / "polinaeterna___audiofolder_two_configs_in_metadata/v2-374bfde4f55442bc/0.0.0/7896925d64deea5d" ) builder_cache_dir.mkdir(parents=True) arrow_path = builder_cache_dir / "audiofolder_two_configs_in_metadata-train.arrow" dataset_info_path = builder_cache_dir / "dataset_info.json" with dataset_info_path.open("w") as f: f.write("{}") arrow_path.touch() builder = load_dataset_builder( "polinaeterna/audiofolder_two_configs_in_metadata", "v2", data_files="v2/train/*", cache_dir=cache_dir.as_posix(), ) assert builder.cache_dir == builder_cache_dir.as_posix() # old cache from 2.15 builder = load_dataset_builder( "polinaeterna/audiofolder_two_configs_in_metadata", "v2", cache_dir=cache_dir.as_posix() ) assert ( builder.cache_dir == ( cache_dir / "polinaeterna___audiofolder_two_configs_in_metadata" / "v2" / "0.0.0" / str(builder.hash) ).as_posix() ) # new cache @pytest.mark.integration def test_update_dataset_card_data_with_standalone_yaml(): # Labels defined in .huggingface.yml because they are too long to be in README.md from datasets.utils.metadata import MetadataConfigs with patch( "datasets.utils.metadata.MetadataConfigs.from_dataset_card_data", side_effect=MetadataConfigs.from_dataset_card_data, ) as card_data_read_mock: builder = load_dataset_builder("datasets-maintainers/dataset-with-standalone-yaml") assert card_data_read_mock.call_args.args[0]["license"] is not None # from README.md assert card_data_read_mock.call_args.args[0]["dataset_info"] is not None # from standalone yaml assert card_data_read_mock.call_args.args[0]["tags"] == ["test"] # standalone yaml has precedence assert isinstance( builder.info.features["label"], datasets.ClassLabel ) # correctly loaded from long labels list in standalone yaml
datasets/tests/test_load.py/0
{ "file_path": "datasets/tests/test_load.py", "repo_id": "datasets", "token_count": 33517 }
77
import fnmatch import gc import os import shutil import tempfile import textwrap import time import unittest from io import BytesIO from pathlib import Path from unittest.mock import patch import numpy as np import pytest from huggingface_hub import DatasetCard, HfApi from huggingface_hub.utils import RepositoryNotFoundError from datasets import ( Audio, ClassLabel, Dataset, DatasetDict, DownloadManager, Features, Image, Value, load_dataset, load_dataset_builder, ) from datasets.config import METADATA_CONFIGS_FIELD from datasets.data_files import get_data_patterns from datasets.packaged_modules.folder_based_builder.folder_based_builder import ( FolderBasedBuilder, FolderBasedBuilderConfig, ) from datasets.utils.file_utils import cached_path from datasets.utils.hub import hf_hub_url from tests.fixtures.hub import CI_HUB_ENDPOINT, CI_HUB_USER, CI_HUB_USER_TOKEN from tests.utils import for_all_test_methods, require_pil, require_sndfile, xfail_if_500_502_http_error pytestmark = pytest.mark.integration @for_all_test_methods(xfail_if_500_502_http_error) @pytest.mark.usefixtures("ci_hub_config", "ci_hfh_hf_hub_url") class TestPushToHub: _api = HfApi(endpoint=CI_HUB_ENDPOINT) _token = CI_HUB_USER_TOKEN def test_push_dataset_dict_to_hub_no_token(self, temporary_repo, set_ci_hub_access_token): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there is a single file on the repository that has the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset")) assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] def test_push_dataset_dict_to_hub_name_without_namespace(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: # cannot create a repo without namespace with pytest.raises(RepositoryNotFoundError): local_ds.push_to_hub(ds_name.split("/")[-1], token=self._token) def test_push_dataset_dict_to_hub_datasets_with_different_features(self, cleanup_repo): ds_train = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_test = Dataset.from_dict({"x": [True, False, True], "y": ["a", "b", "c"]}) local_ds = DatasetDict({"train": ds_train, "test": ds_test}) ds_name = f"{CI_HUB_USER}/test-{int(time.time() * 10e6)}" try: with pytest.raises(ValueError): local_ds.push_to_hub(ds_name.split("/")[-1], token=self._token) except AssertionError: cleanup_repo(ds_name) raise def test_push_dataset_dict_to_hub_private(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token, private=True) hub_ds = load_dataset(ds_name, download_mode="force_redownload", token=self._token) assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there is a single file on the repository that has the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] def test_push_dataset_dict_to_hub(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there is a single file on the repository that has the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] def test_push_dataset_dict_to_hub_with_pull_request(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token, create_pr=True) hub_ds = load_dataset(ds_name, revision="refs/pr/1", download_mode="force_redownload") assert local_ds["train"].features == hub_ds["train"].features assert list(local_ds.keys()) == list(hub_ds.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there is a single file on the repository that has the correct name files = sorted( self._api.list_repo_files(ds_name, revision="refs/pr/1", repo_type="dataset", token=self._token) ) assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] def test_push_dataset_dict_to_hub_with_revision(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token, revision="dev") hub_ds = load_dataset(ds_name, revision="dev", download_mode="force_redownload") assert local_ds["train"].features == hub_ds["train"].features assert list(local_ds.keys()) == list(hub_ds.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there is a single file on the repository that has the correct name files = sorted(self._api.list_repo_files(ds_name, revision="dev", repo_type="dataset", token=self._token)) assert files == [".gitattributes", "README.md", "data/train-00000-of-00001.parquet"] def test_push_dataset_dict_to_hub_multiple_files(self, temporary_repo): ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: with patch("datasets.config.MAX_SHARD_SIZE", "16KB"): local_ds.push_to_hub(ds_name, token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there are two files on the repository that have the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [ ".gitattributes", "README.md", "data/train-00000-of-00002.parquet", "data/train-00001-of-00002.parquet", ] def test_push_dataset_dict_to_hub_multiple_files_with_max_shard_size(self, temporary_repo): ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token, max_shard_size="16KB") hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there are two files on the repository that have the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [ ".gitattributes", "README.md", "data/train-00000-of-00002.parquet", "data/train-00001-of-00002.parquet", ] def test_push_dataset_dict_to_hub_multiple_files_with_num_shards(self, temporary_repo): ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token, num_shards={"train": 2}) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there are two files on the repository that have the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [ ".gitattributes", "README.md", "data/train-00000-of-00002.parquet", "data/train-00001-of-00002.parquet", ] def test_push_dataset_dict_to_hub_with_multiple_commits(self, temporary_repo): ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) local_ds = DatasetDict({"train": ds}) with temporary_repo() as ds_name: self._api.create_repo(ds_name, token=self._token, repo_type="dataset") num_commits_before_push = len(self._api.list_repo_commits(ds_name, repo_type="dataset", token=self._token)) with patch("datasets.config.MAX_SHARD_SIZE", "16KB"), patch( "datasets.config.UPLOADS_MAX_NUMBER_PER_COMMIT", 1 ): local_ds.push_to_hub(ds_name, token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features # Ensure that there are two files on the repository that have the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [ ".gitattributes", "README.md", "data/train-00000-of-00002.parquet", "data/train-00001-of-00002.parquet", ] num_commits_after_push = len(self._api.list_repo_commits(ds_name, repo_type="dataset", token=self._token)) assert num_commits_after_push - num_commits_before_push > 1 def test_push_dataset_dict_to_hub_overwrite_files(self, temporary_repo): ds = Dataset.from_dict({"x": list(range(1000)), "y": list(range(1000))}) ds2 = Dataset.from_dict({"x": list(range(100)), "y": list(range(100))}) local_ds = DatasetDict({"train": ds, "random": ds2}) # Push to hub two times, but the second time with a larger amount of files. # Verify that the new files contain the correct dataset. with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token) with tempfile.TemporaryDirectory() as tmp: # Add a file starting with "data" to ensure it doesn't get deleted. path = Path(tmp) / "datafile.txt" with open(path, "w") as f: f.write("Bogus file") self._api.upload_file( path_or_fileobj=str(path), path_in_repo="datafile.txt", repo_id=ds_name, repo_type="dataset", token=self._token, ) local_ds.push_to_hub(ds_name, token=self._token, max_shard_size=500 << 5) # Ensure that there are two files on the repository that have the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [ ".gitattributes", "README.md", "data/random-00000-of-00001.parquet", "data/train-00000-of-00002.parquet", "data/train-00001-of-00002.parquet", "datafile.txt", ] self._api.delete_file("datafile.txt", repo_id=ds_name, repo_type="dataset", token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features del hub_ds # To ensure the reference to the memory-mapped Arrow file is dropped to avoid the PermissionError on Windows gc.collect() # Push to hub two times, but the second time with fewer files. # Verify that the new files contain the correct dataset and that non-necessary files have been deleted. with temporary_repo(ds_name): local_ds.push_to_hub(ds_name, token=self._token, max_shard_size=500 << 5) with tempfile.TemporaryDirectory() as tmp: # Add a file starting with "data" to ensure it doesn't get deleted. path = Path(tmp) / "datafile.txt" with open(path, "w") as f: f.write("Bogus file") self._api.upload_file( path_or_fileobj=str(path), path_in_repo="datafile.txt", repo_id=ds_name, repo_type="dataset", token=self._token, ) local_ds.push_to_hub(ds_name, token=self._token) # Ensure that there are two files on the repository that have the correct name files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset", token=self._token)) assert files == [ ".gitattributes", "README.md", "data/random-00000-of-00001.parquet", "data/train-00000-of-00001.parquet", "datafile.txt", ] # Keeping the "datafile.txt" breaks the load_dataset to think it's a text-based dataset self._api.delete_file("datafile.txt", repo_id=ds_name, repo_type="dataset", token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features def test_push_dataset_to_hub(self, temporary_repo): local_ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, split="train", token=self._token) local_ds_dict = {"train": local_ds} hub_ds_dict = load_dataset(ds_name, download_mode="force_redownload") assert list(local_ds_dict.keys()) == list(hub_ds_dict.keys()) for ds_split_name in local_ds_dict.keys(): local_ds = local_ds_dict[ds_split_name] hub_ds = hub_ds_dict[ds_split_name] assert local_ds.column_names == hub_ds.column_names assert list(local_ds.features.keys()) == list(hub_ds.features.keys()) assert local_ds.features == hub_ds.features def test_push_dataset_to_hub_custom_features(self, temporary_repo): features = Features({"x": Value("int64"), "y": ClassLabel(names=["neg", "pos"])}) ds = Dataset.from_dict({"x": [1, 2, 3], "y": [0, 0, 1]}, features=features) with temporary_repo() as ds_name: ds.push_to_hub(ds_name, token=self._token) hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload") assert ds.column_names == hub_ds.column_names assert list(ds.features.keys()) == list(hub_ds.features.keys()) assert ds.features == hub_ds.features assert ds[:] == hub_ds[:] @require_sndfile def test_push_dataset_to_hub_custom_features_audio(self, temporary_repo): audio_path = os.path.join(os.path.dirname(__file__), "features", "data", "test_audio_44100.wav") data = {"x": [audio_path, None], "y": [0, -1]} features = Features({"x": Audio(), "y": Value("int32")}) ds = Dataset.from_dict(data, features=features) for embed_external_files in [True, False]: with temporary_repo() as ds_name: ds.push_to_hub(ds_name, embed_external_files=embed_external_files, token=self._token) hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload") assert ds.column_names == hub_ds.column_names assert list(ds.features.keys()) == list(hub_ds.features.keys()) assert ds.features == hub_ds.features np.testing.assert_equal(ds[0]["x"]["array"], hub_ds[0]["x"]["array"]) assert ds[1] == hub_ds[1] # don't test hub_ds[0] since audio decoding might be slightly different hub_ds = hub_ds.cast_column("x", Audio(decode=False)) elem = hub_ds[0]["x"] path, bytes_ = elem["path"], elem["bytes"] assert isinstance(path, str) assert os.path.basename(path) == "test_audio_44100.wav" assert bool(bytes_) == embed_external_files @require_pil def test_push_dataset_to_hub_custom_features_image(self, temporary_repo): image_path = os.path.join(os.path.dirname(__file__), "features", "data", "test_image_rgb.jpg") data = {"x": [image_path, None], "y": [0, -1]} features = Features({"x": Image(), "y": Value("int32")}) ds = Dataset.from_dict(data, features=features) for embed_external_files in [True, False]: with temporary_repo() as ds_name: ds.push_to_hub(ds_name, embed_external_files=embed_external_files, token=self._token) hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload") assert ds.column_names == hub_ds.column_names assert list(ds.features.keys()) == list(hub_ds.features.keys()) assert ds.features == hub_ds.features assert ds[:] == hub_ds[:] hub_ds = hub_ds.cast_column("x", Image(decode=False)) elem = hub_ds[0]["x"] path, bytes_ = elem["path"], elem["bytes"] assert isinstance(path, str) assert bool(bytes_) == embed_external_files @require_pil def test_push_dataset_to_hub_custom_features_image_list(self, temporary_repo): image_path = os.path.join(os.path.dirname(__file__), "features", "data", "test_image_rgb.jpg") data = {"x": [[image_path], [image_path, image_path]], "y": [0, -1]} features = Features({"x": [Image()], "y": Value("int32")}) ds = Dataset.from_dict(data, features=features) for embed_external_files in [True, False]: with temporary_repo() as ds_name: ds.push_to_hub(ds_name, embed_external_files=embed_external_files, token=self._token) hub_ds = load_dataset(ds_name, split="train", download_mode="force_redownload") assert ds.column_names == hub_ds.column_names assert list(ds.features.keys()) == list(hub_ds.features.keys()) assert ds.features == hub_ds.features assert ds[:] == hub_ds[:] hub_ds = hub_ds.cast_column("x", [Image(decode=False)]) elem = hub_ds[0]["x"][0] path, bytes_ = elem["path"], elem["bytes"] assert isinstance(path, str) assert bool(bytes_) == embed_external_files def test_push_dataset_dict_to_hub_custom_features(self, temporary_repo): features = Features({"x": Value("int64"), "y": ClassLabel(names=["neg", "pos"])}) ds = Dataset.from_dict({"x": [1, 2, 3], "y": [0, 0, 1]}, features=features) local_ds = DatasetDict({"test": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["test"].features.keys()) == list(hub_ds["test"].features.keys()) assert local_ds["test"].features == hub_ds["test"].features def test_push_dataset_to_hub_custom_splits(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) with temporary_repo() as ds_name: ds.push_to_hub(ds_name, split="random", token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert ds.column_names == hub_ds["random"].column_names assert list(ds.features.keys()) == list(hub_ds["random"].features.keys()) assert ds.features == hub_ds["random"].features def test_push_dataset_to_hub_multiple_splits_one_by_one(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) with temporary_repo() as ds_name: ds.push_to_hub(ds_name, split="train", token=self._token) ds.push_to_hub(ds_name, split="test", token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert sorted(hub_ds) == ["test", "train"] assert ds.column_names == hub_ds["train"].column_names assert list(ds.features.keys()) == list(hub_ds["train"].features.keys()) assert ds.features == hub_ds["train"].features def test_push_dataset_dict_to_hub_custom_splits(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"random": ds}) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["random"].features.keys()) == list(hub_ds["random"].features.keys()) assert local_ds["random"].features == hub_ds["random"].features @unittest.skip("This test cannot pass until iterable datasets have push to hub") def test_push_streaming_dataset_dict_to_hub(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) local_ds = DatasetDict({"train": ds}) with tempfile.TemporaryDirectory() as tmp: local_ds.save_to_disk(tmp) local_ds = load_dataset(tmp, streaming=True) with temporary_repo() as ds_name: local_ds.push_to_hub(ds_name, token=self._token) hub_ds = load_dataset(ds_name, download_mode="force_redownload") assert local_ds.column_names == hub_ds.column_names assert list(local_ds["train"].features.keys()) == list(hub_ds["train"].features.keys()) assert local_ds["train"].features == hub_ds["train"].features def test_push_multiple_dataset_configs_to_hub_load_dataset_builder(self, temporary_repo): ds_default = Dataset.from_dict({"a": [0], "b": [1]}) ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) with temporary_repo() as ds_name: ds_default.push_to_hub(ds_name, token=self._token) ds_config1.push_to_hub(ds_name, "config1", token=self._token) ds_config2.push_to_hub(ds_name, "config2", token=self._token) ds_builder_default = load_dataset_builder(ds_name, download_mode="force_redownload") # default config assert len(ds_builder_default.BUILDER_CONFIGS) == 3 assert len(ds_builder_default.config.data_files["train"]) == 1 assert fnmatch.fnmatch( ds_builder_default.config.data_files["train"][0], "*/data/train-*", ) ds_builder_config1 = load_dataset_builder(ds_name, "config1", download_mode="force_redownload") assert len(ds_builder_config1.BUILDER_CONFIGS) == 3 assert len(ds_builder_config1.config.data_files["train"]) == 1 assert fnmatch.fnmatch( ds_builder_config1.config.data_files["train"][0], "*/config1/train-*", ) ds_builder_config2 = load_dataset_builder(ds_name, "config2", download_mode="force_redownload") assert len(ds_builder_config2.BUILDER_CONFIGS) == 3 assert len(ds_builder_config2.config.data_files["train"]) == 1 assert fnmatch.fnmatch( ds_builder_config2.config.data_files["train"][0], "*/config2/train-*", ) with pytest.raises(ValueError): # no config 'config3' load_dataset_builder(ds_name, "config3", download_mode="force_redownload") def test_push_multiple_dataset_configs_to_hub_load_dataset(self, temporary_repo): ds_default = Dataset.from_dict({"a": [0], "b": [1]}) ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) with temporary_repo() as ds_name: ds_default.push_to_hub(ds_name, token=self._token) ds_config1.push_to_hub(ds_name, "config1", token=self._token) ds_config2.push_to_hub(ds_name, "config2", token=self._token) files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset")) assert files == [ ".gitattributes", "README.md", "config1/train-00000-of-00001.parquet", "config2/train-00000-of-00001.parquet", "data/train-00000-of-00001.parquet", ] hub_ds_default = load_dataset(ds_name, download_mode="force_redownload") hub_ds_config1 = load_dataset(ds_name, "config1", download_mode="force_redownload") hub_ds_config2 = load_dataset(ds_name, "config2", download_mode="force_redownload") # only "train" split assert len(hub_ds_default) == len(hub_ds_config1) == len(hub_ds_config2) == 1 assert ds_default.column_names == hub_ds_default["train"].column_names == ["a", "b"] assert ds_config1.column_names == hub_ds_config1["train"].column_names == ["x", "y"] assert ds_config2.column_names == hub_ds_config2["train"].column_names == ["foo", "bar"] assert ds_default.features == hub_ds_default["train"].features assert ds_config1.features == hub_ds_config1["train"].features assert ds_config2.features == hub_ds_config2["train"].features assert ds_default.num_rows == hub_ds_default["train"].num_rows == 1 assert ds_config1.num_rows == hub_ds_config1["train"].num_rows == 3 assert ds_config2.num_rows == hub_ds_config2["train"].num_rows == 2 with pytest.raises(ValueError): # no config 'config3' load_dataset(ds_name, "config3", download_mode="force_redownload") @pytest.mark.parametrize("specific_default_config_name", [False, True]) def test_push_multiple_dataset_configs_to_hub_readme_metadata_content( self, specific_default_config_name, temporary_repo ): ds_default = Dataset.from_dict({"a": [0], "b": [2]}) ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) with temporary_repo() as ds_name: if specific_default_config_name: ds_default.push_to_hub(ds_name, config_name="config0", set_default=True, token=self._token) else: ds_default.push_to_hub(ds_name, token=self._token) ds_config1.push_to_hub(ds_name, "config1", token=self._token) ds_config2.push_to_hub(ds_name, "config2", token=self._token) # check that configs args was correctly pushed to README.md ds_readme_path = cached_path(hf_hub_url(ds_name, "README.md")) dataset_card_data = DatasetCard.load(ds_readme_path).data assert METADATA_CONFIGS_FIELD in dataset_card_data assert isinstance(dataset_card_data[METADATA_CONFIGS_FIELD], list) assert sorted(dataset_card_data[METADATA_CONFIGS_FIELD], key=lambda x: x["config_name"]) == ( [ { "config_name": "config0", "data_files": [ {"split": "train", "path": "config0/train-*"}, ], "default": True, }, ] if specific_default_config_name else [] ) + [ { "config_name": "config1", "data_files": [ {"split": "train", "path": "config1/train-*"}, ], }, { "config_name": "config2", "data_files": [ {"split": "train", "path": "config2/train-*"}, ], }, ] + ( [] if specific_default_config_name else [ { "config_name": "default", "data_files": [ {"split": "train", "path": "data/train-*"}, ], }, ] ) def test_push_multiple_dataset_dict_configs_to_hub_load_dataset_builder(self, temporary_repo): ds_default = Dataset.from_dict({"a": [0], "b": [1]}) ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) ds_default = DatasetDict({"random": ds_default}) ds_config1 = DatasetDict({"random": ds_config1}) ds_config2 = DatasetDict({"random": ds_config2}) with temporary_repo() as ds_name: ds_default.push_to_hub(ds_name, token=self._token) ds_config1.push_to_hub(ds_name, "config1", token=self._token) ds_config2.push_to_hub(ds_name, "config2", token=self._token) ds_builder_default = load_dataset_builder(ds_name, download_mode="force_redownload") # default config assert len(ds_builder_default.BUILDER_CONFIGS) == 3 assert len(ds_builder_default.config.data_files["random"]) == 1 assert fnmatch.fnmatch( ds_builder_default.config.data_files["random"][0], "*/data/random-*", ) ds_builder_config1 = load_dataset_builder(ds_name, "config1", download_mode="force_redownload") assert len(ds_builder_config1.BUILDER_CONFIGS) == 3 assert len(ds_builder_config1.config.data_files["random"]) == 1 assert fnmatch.fnmatch( ds_builder_config1.config.data_files["random"][0], "*/config1/random-*", ) ds_builder_config2 = load_dataset_builder(ds_name, "config2", download_mode="force_redownload") assert len(ds_builder_config2.BUILDER_CONFIGS) == 3 assert len(ds_builder_config2.config.data_files["random"]) == 1 assert fnmatch.fnmatch( ds_builder_config2.config.data_files["random"][0], "*/config2/random-*", ) with pytest.raises(ValueError): # no config named 'config3' load_dataset_builder(ds_name, "config3", download_mode="force_redownload") def test_push_multiple_dataset_dict_configs_to_hub_load_dataset(self, temporary_repo): ds_default = Dataset.from_dict({"a": [0], "b": [1]}) ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) ds_default = DatasetDict({"train": ds_default, "random": ds_default}) ds_config1 = DatasetDict({"train": ds_config1, "random": ds_config1}) ds_config2 = DatasetDict({"train": ds_config2, "random": ds_config2}) with temporary_repo() as ds_name: ds_default.push_to_hub(ds_name, token=self._token) ds_config1.push_to_hub(ds_name, "config1", token=self._token) ds_config2.push_to_hub(ds_name, "config2", token=self._token) files = sorted(self._api.list_repo_files(ds_name, repo_type="dataset")) assert files == [ ".gitattributes", "README.md", "config1/random-00000-of-00001.parquet", "config1/train-00000-of-00001.parquet", "config2/random-00000-of-00001.parquet", "config2/train-00000-of-00001.parquet", "data/random-00000-of-00001.parquet", "data/train-00000-of-00001.parquet", ] hub_ds_default = load_dataset(ds_name, download_mode="force_redownload") hub_ds_config1 = load_dataset(ds_name, "config1", download_mode="force_redownload") hub_ds_config2 = load_dataset(ds_name, "config2", download_mode="force_redownload") # two splits expected_splits = ["random", "train"] assert len(hub_ds_default) == len(hub_ds_config1) == len(hub_ds_config2) == 2 assert sorted(hub_ds_default) == sorted(hub_ds_config1) == sorted(hub_ds_config2) == expected_splits for split in expected_splits: assert ds_default[split].column_names == hub_ds_default[split].column_names == ["a", "b"] assert ds_config1[split].column_names == hub_ds_config1[split].column_names == ["x", "y"] assert ds_config2[split].column_names == hub_ds_config2[split].column_names == ["foo", "bar"] assert ds_default[split].features == hub_ds_default[split].features assert ds_config1[split].features == hub_ds_config1[split].features assert ds_config2[split].features == hub_ds_config2["train"].features assert ds_default[split].num_rows == hub_ds_default[split].num_rows == 1 assert ds_config1[split].num_rows == hub_ds_config1[split].num_rows == 3 assert ds_config2[split].num_rows == hub_ds_config2[split].num_rows == 2 with pytest.raises(ValueError): # no config 'config3' load_dataset(ds_name, "config3", download_mode="force_redownload") @pytest.mark.parametrize("specific_default_config_name", [False, True]) def test_push_multiple_dataset_dict_configs_to_hub_readme_metadata_content( self, specific_default_config_name, temporary_repo ): ds_default = Dataset.from_dict({"a": [0], "b": [1]}) ds_config1 = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_config2 = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) ds_default = DatasetDict({"train": ds_default, "random": ds_default}) ds_config1 = DatasetDict({"train": ds_config1, "random": ds_config1}) ds_config2 = DatasetDict({"train": ds_config2, "random": ds_config2}) with temporary_repo() as ds_name: if specific_default_config_name: ds_default.push_to_hub(ds_name, config_name="config0", set_default=True, token=self._token) else: ds_default.push_to_hub(ds_name, token=self._token) ds_config1.push_to_hub(ds_name, "config1", token=self._token) ds_config2.push_to_hub(ds_name, "config2", token=self._token) # check that configs args was correctly pushed to README.md ds_readme_path = cached_path(hf_hub_url(ds_name, "README.md")) dataset_card_data = DatasetCard.load(ds_readme_path).data assert METADATA_CONFIGS_FIELD in dataset_card_data assert isinstance(dataset_card_data[METADATA_CONFIGS_FIELD], list) assert sorted(dataset_card_data[METADATA_CONFIGS_FIELD], key=lambda x: x["config_name"]) == ( [ { "config_name": "config0", "data_files": [ {"split": "train", "path": "config0/train-*"}, {"split": "random", "path": "config0/random-*"}, ], "default": True, }, ] if specific_default_config_name else [] ) + [ { "config_name": "config1", "data_files": [ {"split": "train", "path": "config1/train-*"}, {"split": "random", "path": "config1/random-*"}, ], }, { "config_name": "config2", "data_files": [ {"split": "train", "path": "config2/train-*"}, {"split": "random", "path": "config2/random-*"}, ], }, ] + ( [] if specific_default_config_name else [ { "config_name": "default", "data_files": [ {"split": "train", "path": "data/train-*"}, {"split": "random", "path": "data/random-*"}, ], }, ] ) def test_push_dataset_to_hub_with_config_no_metadata_configs(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_another_config = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) parquet_buf = BytesIO() ds.to_parquet(parquet_buf) parquet_content = parquet_buf.getvalue() with temporary_repo() as ds_name: self._api.create_repo(ds_name, token=self._token, repo_type="dataset") # old push_to_hub was uploading the parquet files only - without metadata configs self._api.upload_file( path_or_fileobj=parquet_content, path_in_repo="data/train-00000-of-00001.parquet", repo_id=ds_name, repo_type="dataset", token=self._token, ) ds_another_config.push_to_hub(ds_name, "another_config", token=self._token) ds_builder = load_dataset_builder(ds_name, download_mode="force_redownload") assert len(ds_builder.config.data_files) == 1 assert len(ds_builder.config.data_files["train"]) == 1 assert fnmatch.fnmatch(ds_builder.config.data_files["train"][0], "*/data/train-00000-of-00001.parquet") ds_another_config_builder = load_dataset_builder( ds_name, "another_config", download_mode="force_redownload" ) assert len(ds_another_config_builder.config.data_files) == 1 assert len(ds_another_config_builder.config.data_files["train"]) == 1 assert fnmatch.fnmatch( ds_another_config_builder.config.data_files["train"][0], "*/another_config/train-00000-of-00001.parquet", ) def test_push_dataset_dict_to_hub_with_config_no_metadata_configs(self, temporary_repo): ds = Dataset.from_dict({"x": [1, 2, 3], "y": [4, 5, 6]}) ds_another_config = Dataset.from_dict({"foo": [1, 2], "bar": [4, 5]}) parquet_buf = BytesIO() ds.to_parquet(parquet_buf) parquet_content = parquet_buf.getvalue() local_ds_another_config = DatasetDict({"random": ds_another_config}) with temporary_repo() as ds_name: self._api.create_repo(ds_name, token=self._token, repo_type="dataset") # old push_to_hub was uploading the parquet files only - without metadata configs self._api.upload_file( path_or_fileobj=parquet_content, path_in_repo="data/random-00000-of-00001.parquet", repo_id=ds_name, repo_type="dataset", token=self._token, ) local_ds_another_config.push_to_hub(ds_name, "another_config", token=self._token) ds_builder = load_dataset_builder(ds_name, download_mode="force_redownload") assert len(ds_builder.config.data_files) == 1 assert len(ds_builder.config.data_files["random"]) == 1 assert fnmatch.fnmatch(ds_builder.config.data_files["random"][0], "*/data/random-00000-of-00001.parquet") ds_another_config_builder = load_dataset_builder( ds_name, "another_config", download_mode="force_redownload" ) assert len(ds_another_config_builder.config.data_files) == 1 assert len(ds_another_config_builder.config.data_files["random"]) == 1 assert fnmatch.fnmatch( ds_another_config_builder.config.data_files["random"][0], "*/another_config/random-00000-of-00001.parquet", ) class DummyFolderBasedBuilder(FolderBasedBuilder): BASE_FEATURE = dict BASE_COLUMN_NAME = "base" BUILDER_CONFIG_CLASS = FolderBasedBuilderConfig EXTENSIONS = [".txt"] # CLASSIFICATION_TASK = TextClassification(text_column="base", label_column="label") @pytest.fixture(params=[".jsonl", ".csv"]) def text_file_with_metadata(request, tmp_path, text_file): metadata_filename_extension = request.param data_dir = tmp_path / "data_dir" data_dir.mkdir() text_file_path = data_dir / "file.txt" shutil.copyfile(text_file, text_file_path) metadata_file_path = data_dir / f"metadata{metadata_filename_extension}" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} """ if metadata_filename_extension == ".jsonl" else """\ file_name,additional_feature file.txt,Dummy file """ ) with open(metadata_file_path, "w", encoding="utf-8") as f: f.write(metadata) return text_file_path, metadata_file_path @for_all_test_methods(xfail_if_500_502_http_error) @pytest.mark.usefixtures("ci_hub_config", "ci_hfh_hf_hub_url") class TestLoadFromHub: _api = HfApi(endpoint=CI_HUB_ENDPOINT) _token = CI_HUB_USER_TOKEN def test_load_dataset_with_metadata_file(self, temporary_repo, text_file_with_metadata, tmp_path): text_file_path, metadata_file_path = text_file_with_metadata data_dir_path = text_file_path.parent cache_dir_path = tmp_path / ".cache" cache_dir_path.mkdir() with temporary_repo() as repo_id: self._api.create_repo(repo_id, token=self._token, repo_type="dataset") self._api.upload_folder( folder_path=str(data_dir_path), repo_id=repo_id, repo_type="dataset", token=self._token, ) data_files = [ f"hf://datasets/{repo_id}/{text_file_path.name}", f"hf://datasets/{repo_id}/{metadata_file_path.name}", ] builder = DummyFolderBasedBuilder( dataset_name=repo_id.split("/")[-1], data_files=data_files, cache_dir=str(cache_dir_path) ) download_manager = DownloadManager() gen_kwargs = builder._split_generators(download_manager)[0].gen_kwargs generator = builder._generate_examples(**gen_kwargs) result = [example for _, example in generator] assert len(result) == 1 def test_get_data_patterns(self, temporary_repo, tmp_path): repo_dir = tmp_path / "test_get_data_patterns" data_dir = repo_dir / "data" data_dir.mkdir(parents=True) data_file = data_dir / "train-00001-of-00009.parquet" data_file.touch() with temporary_repo() as repo_id: self._api.create_repo(repo_id, token=self._token, repo_type="dataset") self._api.upload_folder( folder_path=str(repo_dir), repo_id=repo_id, repo_type="dataset", token=self._token, ) data_file_patterns = get_data_patterns(f"hf://datasets/{repo_id}") assert data_file_patterns == { "train": ["data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*"] }
datasets/tests/test_upstream_hub.py/0
{ "file_path": "datasets/tests/test_upstream_hub.py", "repo_id": "datasets", "token_count": 22617 }
78
<jupyter_start><jupyter_text>Unit 4: Code your first Deep Reinforcement Learning Algorithm with PyTorch: Reinforce. And test its robustness 💪In this notebook, you'll code your first Deep Reinforcement Learning algorithm from scratch: Reinforce (also called Monte Carlo Policy Gradient).Reinforce is a *Policy-based method*: a Deep Reinforcement Learning algorithm that tries **to optimize the policy directly without using an action-value function**.More precisely, Reinforce is a *Policy-gradient method*, a subclass of *Policy-based methods* that aims **to optimize the policy directly by estimating the weights of the optimal policy using gradient ascent**.To test its robustness, we're going to train it in 2 different simple environments:- Cartpole-v1- PixelcopterEnv⬇️ Here is an example of what **you will achieve at the end of this notebook.** ⬇️ 🎮 Environments: - [CartPole-v1](https://www.gymlibrary.dev/environments/classic_control/cart_pole/)- [PixelCopter](https://pygame-learning-environment.readthedocs.io/en/latest/user/games/pixelcopter.html) 📚 RL-Library: - Python- PyTorchWe're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). Objectives of this notebook 🏆At the end of the notebook, you will:- Be able to **code from scratch a Reinforce algorithm using PyTorch.**- Be able to **test the robustness of your agent using simple environments.**- Be able to **push your trained agent to the Hub** with a nice video replay and an evaluation score 🔥. This notebook is from the Deep Reinforcement Learning Course In this free course, you will:- 📖 Study Deep Reinforcement Learning in **theory and practice**.- 🧑‍💻 Learn to **use famous Deep RL libraries** such as Stable Baselines3, RL Baselines3 Zoo, CleanRL and Sample Factory 2.0.- 🤖 Train **agents in unique environments** And more check 📚 the syllabus 👉 https://simoninithomas.github.io/deep-rl-courseDon’t forget to **sign up to the course** (we are collecting your email to be able to **send you the links when each Unit is published and give you information about the challenges and updates).**The best way to keep in touch is to join our discord server to exchange with the community and with us 👉🏻 https://discord.gg/ydHrjt3WP5 Prerequisites 🏗️Before diving into the notebook, you need to:🔲 📚 [Study Policy Gradients by reading Unit 4](https://huggingface.co/deep-rl-course/unit4/introduction) Let's code Reinforce algorithm from scratch 🔥To validate this hands-on for the certification process, you need to push your trained models to the Hub.- Get a result of >= 350 for `Cartpole-v1`.- Get a result of >= 5 for `PixelCopter`.To find your result, go to the leaderboard and find your model, **the result = mean_reward - std of reward**. **If you don't see your model on the leaderboard, go at the bottom of the leaderboard page and click on the refresh button**.For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process An advice 💡It's better to run this colab in a copy on your Google Drive, so that **if it timeouts** you still have the saved notebook on your Google Drive and do not need to fill everything from scratch.To do that you can either do `Ctrl + S` or `File > Save a copy in Google Drive.` Set the GPU 💪- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` - `Hardware Accelerator > GPU` Create a virtual display 🖥During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames). Hence the following cell will install the librairies and create and run a virtual screen 🖥<jupyter_code>%%capture !apt install python-opengl !apt install ffmpeg !apt install xvfb !pip install pyvirtualdisplay !pip install pyglet==1.5.1 # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start()<jupyter_output><empty_output><jupyter_text>Install the dependencies 🔽The first step is to install the dependencies. We’ll install multiple ones:- `gym`- `gym-games`: Extra gym environments made with PyGame.- `huggingface_hub`: 🤗 works as a central place where anyone can share and explore models and datasets. It has versioning, metrics, visualizations, and other features that will allow you to easily collaborate with others.You may be wondering why we install gym and not gymnasium, a more recent version of gym? **Because the gym-games we are using are not updated yet with gymnasium**. The differences you'll encounter here:- In `gym` we don't have `terminated` and `truncated` but only `done`.- In `gym` using `env.step()` returns `state, reward, done, info`You can learn more about the differences between Gym and Gymnasium here 👉 https://gymnasium.farama.org/content/migration-guide/You can see here all the Reinforce models available 👉 https://huggingface.co/models?other=reinforceAnd you can find all the Deep Reinforcement Learning models here 👉 https://huggingface.co/models?pipeline_tag=reinforcement-learning<jupyter_code>!pip install -r https://raw.githubusercontent.com/huggingface/deep-rl-class/main/notebooks/unit4/requirements-unit4.txt<jupyter_output><empty_output><jupyter_text>Import the packages 📦In addition to import the installed libraries, we also import:- `imageio`: A library that will help us to generate a replay video<jupyter_code>import numpy as np from collections import deque import matplotlib.pyplot as plt %matplotlib inline # PyTorch import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.distributions import Categorical # Gym import gym import gym_pygame # Hugging Face Hub from huggingface_hub import notebook_login # To log to our Hugging Face account to be able to upload models to the Hub. import imageio<jupyter_output><empty_output><jupyter_text>Check if we have a GPU- Let's check if we have a GPU- If it's the case you should see `device:cuda0`<jupyter_code>device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device)<jupyter_output><empty_output><jupyter_text>We're now ready to implement our Reinforce algorithm 🔥 First agent: Playing CartPole-v1 🤖 Create the CartPole environment and understand how it works [The environment 🎮](https://www.gymlibrary.dev/environments/classic_control/cart_pole/) Why do we use a simple environment like CartPole-v1?As explained in [Reinforcement Learning Tips and Tricks](https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html), when you implement your agent from scratch you need **to be sure that it works correctly and find bugs with easy environments before going deeper**. Since finding bugs will be much easier in simple environments.> Try to have some “sign of life” on toy problems> Validate the implementation by making it run on harder and harder envs (you can compare results against the RL zoo). You usually need to run hyperparameter optimization for that step.___ The CartPole-v1 environment> A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum is placed upright on the cart and the goal is to balance the pole by applying forces in the left and right direction on the cart.So, we start with CartPole-v1. The goal is to push the cart left or right **so that the pole stays in the equilibrium.**The episode ends if:- The pole Angle is greater than ±12°- Cart Position is greater than ±2.4- Episode length is greater than 500We get a reward 💰 of +1 every timestep the Pole stays in the equilibrium.<jupyter_code>env_id = "CartPole-v1" # Create the env env = gym.make(env_id) # Create the evaluation env eval_env = gym.make(env_id) # Get the state space and action space s_size = env.observation_space.shape[0] a_size = env.action_space.n print("_____OBSERVATION SPACE_____ \n") print("The State Space is: ", s_size) print("Sample observation", env.observation_space.sample()) # Get a random observation print("\n _____ACTION SPACE_____ \n") print("The Action Space is: ", a_size) print("Action Space Sample", env.action_space.sample()) # Take a random action<jupyter_output><empty_output><jupyter_text>Let's build the Reinforce ArchitectureThis implementation is based on two implementations:- [PyTorch official Reinforcement Learning example](https://github.com/pytorch/examples/blob/main/reinforcement_learning/reinforce.py)- [Udacity Reinforce](https://github.com/udacity/deep-reinforcement-learning/blob/master/reinforce/REINFORCE.ipynb)- [Improvement of the integration by Chris1nexus](https://github.com/huggingface/deep-rl-class/pull/95) So we want:- Two fully connected layers (fc1 and fc2).- Using ReLU as activation function of fc1- Using Softmax to output a probability distribution over actions<jupyter_code>class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() # Create two fully connected layers def forward(self, x): # Define the forward pass # state goes to fc1 then we apply ReLU activation function # fc1 outputs goes to fc2 # We output the softmax def act(self, state): """ Given a state, take action """ state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = np.argmax(m) return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size, a_size) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = np.argmax(m) return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>I make a mistake, can you guess where?- To find out let's make a forward pass:<jupyter_code>debug_policy = Policy(s_size, a_size, 64).to(device) debug_policy.act(env.reset())<jupyter_output><empty_output><jupyter_text>- Here we see that the error says `ValueError: The value argument to log_prob must be a Tensor`- It means that `action` in `m.log_prob(action)` must be a Tensor **but it's not.**- Do you know why? Check the act function and try to see why it does not work. Advice 💡: Something is wrong in this implementation. Remember that we act function **we want to sample an action from the probability distribution over actions**. (Real) Solution<jupyter_code>class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size, a_size) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = m.sample() return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>By using CartPole, it was easier to debug since **we know that the bug comes from our integration and not from our simple environment**. - Since **we want to sample an action from the probability distribution over actions**, we can't use `action = np.argmax(m)` since it will always output the action that have the highest probability.- We need to replace with `action = m.sample()` that will sample an action from the probability distribution P(.|s) Let's build the Reinforce Training AlgorithmThis is the Reinforce algorithm pseudocode: - When we calculate the return Gt (line 6) we see that we calculate the sum of discounted rewards **starting at timestep t**.- Why? Because our policy should only **reinforce actions on the basis of the consequences**: so rewards obtained before taking an action are useless (since they were not because of the action), **only the ones that come after the action matters**.- Before coding this you should read this section [don't let the past distract you](https://spinningup.openai.com/en/latest/spinningup/rl_intro3.htmldon-t-let-the-past-distract-you) that explains why we use reward-to-go policy gradient.We use an interesting technique coded by [Chris1nexus](https://github.com/Chris1nexus) to **compute the return at each timestep efficiently**. The comments explained the procedure. Don't hesitate also [to check the PR explanation](https://github.com/huggingface/deep-rl-class/pull/95)But overall the idea is to **compute the return at each timestep efficiently**. The second question you may ask is **why do we minimize the loss**? You talked about Gradient Ascent not Gradient Descent?- We want to maximize our utility function $J(\theta)$ but in PyTorch like in Tensorflow it's better to **minimize an objective function.** - So let's say we want to reinforce action 3 at a certain timestep. Before training this action P is 0.25. - So we want to modify $\theta$ such that $\pi_\theta(a_3|s; \theta) > 0.25$ - Because all P must sum to 1, max $\pi_\theta(a_3|s; \theta)$ will **minimize other action probability.** - So we should tell PyTorch **to min $1 - \pi_\theta(a_3|s; \theta)$.** - This loss function approaches 0 as $\pi_\theta(a_3|s; \theta)$ nears 1. - So we are encouraging the gradient to max $\pi_\theta(a_3|s; \theta)$<jupyter_code>def reinforce(policy, optimizer, n_training_episodes, max_t, gamma, print_every): # Help us to calculate the score during the training scores_deque = deque(maxlen=100) scores = [] # Line 3 of pseudocode for i_episode in range(1, n_training_episodes+1): saved_log_probs = [] rewards = [] state = # TODO: reset the environment # Line 4 of pseudocode for t in range(max_t): action, log_prob = # TODO get the action saved_log_probs.append(log_prob) state, reward, done, _ = # TODO: take an env step rewards.append(reward) if done: break scores_deque.append(sum(rewards)) scores.append(sum(rewards)) # Line 6 of pseudocode: calculate the return returns = deque(maxlen=max_t) n_steps = len(rewards) # Compute the discounted returns at each timestep, # as the sum of the gamma-discounted return at time t (G_t) + the reward at time t # In O(N) time, where N is the number of time steps # (this definition of the discounted return G_t follows the definition of this quantity # shown at page 44 of Sutton&Barto 2017 2nd draft) # G_t = r_(t+1) + r_(t+2) + ... # Given this formulation, the returns at each timestep t can be computed # by re-using the computed future returns G_(t+1) to compute the current return G_t # G_t = r_(t+1) + gamma*G_(t+1) # G_(t-1) = r_t + gamma* G_t # (this follows a dynamic programming approach, with which we memorize solutions in order # to avoid computing them multiple times) # This is correct since the above is equivalent to (see also page 46 of Sutton&Barto 2017 2nd draft) # G_(t-1) = r_t + gamma*r_(t+1) + gamma*gamma*r_(t+2) + ... ## Given the above, we calculate the returns at timestep t as: # gamma[t] * return[t] + reward[t] # ## We compute this starting from the last timestep to the first, in order ## to employ the formula presented above and avoid redundant computations that would be needed ## if we were to do it from first to last. ## Hence, the queue "returns" will hold the returns in chronological order, from t=0 to t=n_steps ## thanks to the appendleft() function which allows to append to the position 0 in constant time O(1) ## a normal python list would instead require O(N) to do this. for t in range(n_steps)[::-1]: disc_return_t = (returns[0] if len(returns)>0 else 0) returns.appendleft( ) # TODO: complete here ## standardization of the returns is employed to make training more stable eps = np.finfo(np.float32).eps.item() ## eps is the smallest representable float, which is # added to the standard deviation of the returns to avoid numerical instabilities returns = torch.tensor(returns) returns = (returns - returns.mean()) / (returns.std() + eps) # Line 7: policy_loss = [] for log_prob, disc_return in zip(saved_log_probs, returns): policy_loss.append(-log_prob * disc_return) policy_loss = torch.cat(policy_loss).sum() # Line 8: PyTorch prefers gradient descent optimizer.zero_grad() policy_loss.backward() optimizer.step() if i_episode % print_every == 0: print('Episode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque))) return scores<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>def reinforce(policy, optimizer, n_training_episodes, max_t, gamma, print_every): # Help us to calculate the score during the training scores_deque = deque(maxlen=100) scores = [] # Line 3 of pseudocode for i_episode in range(1, n_training_episodes+1): saved_log_probs = [] rewards = [] state = env.reset() # Line 4 of pseudocode for t in range(max_t): action, log_prob = policy.act(state) saved_log_probs.append(log_prob) state, reward, done, _ = env.step(action) rewards.append(reward) if done: break scores_deque.append(sum(rewards)) scores.append(sum(rewards)) # Line 6 of pseudocode: calculate the return returns = deque(maxlen=max_t) n_steps = len(rewards) # Compute the discounted returns at each timestep, # as # the sum of the gamma-discounted return at time t (G_t) + the reward at time t # # In O(N) time, where N is the number of time steps # (this definition of the discounted return G_t follows the definition of this quantity # shown at page 44 of Sutton&Barto 2017 2nd draft) # G_t = r_(t+1) + r_(t+2) + ... # Given this formulation, the returns at each timestep t can be computed # by re-using the computed future returns G_(t+1) to compute the current return G_t # G_t = r_(t+1) + gamma*G_(t+1) # G_(t-1) = r_t + gamma* G_t # (this follows a dynamic programming approach, with which we memorize solutions in order # to avoid computing them multiple times) # This is correct since the above is equivalent to (see also page 46 of Sutton&Barto 2017 2nd draft) # G_(t-1) = r_t + gamma*r_(t+1) + gamma*gamma*r_(t+2) + ... ## Given the above, we calculate the returns at timestep t as: # gamma[t] * return[t] + reward[t] # ## We compute this starting from the last timestep to the first, in order ## to employ the formula presented above and avoid redundant computations that would be needed ## if we were to do it from first to last. ## Hence, the queue "returns" will hold the returns in chronological order, from t=0 to t=n_steps ## thanks to the appendleft() function which allows to append to the position 0 in constant time O(1) ## a normal python list would instead require O(N) to do this. for t in range(n_steps)[::-1]: disc_return_t = (returns[0] if len(returns)>0 else 0) returns.appendleft( gamma*disc_return_t + rewards[t] ) ## standardization of the returns is employed to make training more stable eps = np.finfo(np.float32).eps.item() ## eps is the smallest representable float, which is # added to the standard deviation of the returns to avoid numerical instabilities returns = torch.tensor(returns) returns = (returns - returns.mean()) / (returns.std() + eps) # Line 7: policy_loss = [] for log_prob, disc_return in zip(saved_log_probs, returns): policy_loss.append(-log_prob * disc_return) policy_loss = torch.cat(policy_loss).sum() # Line 8: PyTorch prefers gradient descent optimizer.zero_grad() policy_loss.backward() optimizer.step() if i_episode % print_every == 0: print('Episode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque))) return scores<jupyter_output><empty_output><jupyter_text>Train it- We're now ready to train our agent.- But first, we define a variable containing all the training hyperparameters.- You can change the training parameters (and should 😉)<jupyter_code>cartpole_hyperparameters = { "h_size": 16, "n_training_episodes": 1000, "n_evaluation_episodes": 10, "max_t": 1000, "gamma": 1.0, "lr": 1e-2, "env_id": env_id, "state_space": s_size, "action_space": a_size, } # Create policy and place it to the device cartpole_policy = Policy(cartpole_hyperparameters["state_space"], cartpole_hyperparameters["action_space"], cartpole_hyperparameters["h_size"]).to(device) cartpole_optimizer = optim.Adam(cartpole_policy.parameters(), lr=cartpole_hyperparameters["lr"]) scores = reinforce(cartpole_policy, cartpole_optimizer, cartpole_hyperparameters["n_training_episodes"], cartpole_hyperparameters["max_t"], cartpole_hyperparameters["gamma"], 100)<jupyter_output><empty_output><jupyter_text>Define evaluation method 📝- Here we define the evaluation method that we're going to use to test our Reinforce agent.<jupyter_code>def evaluate_agent(env, max_steps, n_eval_episodes, policy): """ Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward. :param env: The evaluation environment :param n_eval_episodes: Number of episode to evaluate the agent :param policy: The Reinforce agent """ episode_rewards = [] for episode in range(n_eval_episodes): state = env.reset() step = 0 done = False total_rewards_ep = 0 for step in range(max_steps): action, _ = policy.act(state) new_state, reward, done, info = env.step(action) total_rewards_ep += reward if done: break state = new_state episode_rewards.append(total_rewards_ep) mean_reward = np.mean(episode_rewards) std_reward = np.std(episode_rewards) return mean_reward, std_reward<jupyter_output><empty_output><jupyter_text>Evaluate our agent 📈<jupyter_code>evaluate_agent(eval_env, cartpole_hyperparameters["max_t"], cartpole_hyperparameters["n_evaluation_episodes"], cartpole_policy)<jupyter_output><empty_output><jupyter_text>Publish our trained model on the Hub 🔥Now that we saw we got good results after the training, we can publish our trained model on the hub 🤗 with one line of code.Here's an example of a Model Card: Push to the Hub Do not modify this code<jupyter_code>from huggingface_hub import HfApi, snapshot_download from huggingface_hub.repocard import metadata_eval_result, metadata_save from pathlib import Path import datetime import json import imageio import tempfile import os def record_video(env, policy, out_directory, fps=30): """ Generate a replay video of the agent :param env :param Qtable: Qtable of our agent :param out_directory :param fps: how many frame per seconds (with taxi-v3 and frozenlake-v1 we use 1) """ images = [] done = False state = env.reset() img = env.render(mode='rgb_array') images.append(img) while not done: # Take the action (index) that have the maximum expected future reward given that state action, _ = policy.act(state) state, reward, done, info = env.step(action) # We directly put next_state = state for recording logic img = env.render(mode='rgb_array') images.append(img) imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps) def push_to_hub(repo_id, model, hyperparameters, eval_env, video_fps=30 ): """ Evaluate, Generate a video and Upload a model to Hugging Face Hub. This method does the complete pipeline: - It evaluates the model - It generates the model card - It generates a replay video of the agent - It pushes everything to the Hub :param repo_id: repo_id: id of the model repository from the Hugging Face Hub :param model: the pytorch model we want to save :param hyperparameters: training hyperparameters :param eval_env: evaluation environment :param video_fps: how many frame per seconds to record our video replay """ _, repo_name = repo_id.split("/") api = HfApi() # Step 1: Create the repo repo_url = api.create_repo( repo_id=repo_id, exist_ok=True, ) with tempfile.TemporaryDirectory() as tmpdirname: local_directory = Path(tmpdirname) # Step 2: Save the model torch.save(model, local_directory / "model.pt") # Step 3: Save the hyperparameters to JSON with open(local_directory / "hyperparameters.json", "w") as outfile: json.dump(hyperparameters, outfile) # Step 4: Evaluate the model and build JSON mean_reward, std_reward = evaluate_agent(eval_env, hyperparameters["max_t"], hyperparameters["n_evaluation_episodes"], model) # Get datetime eval_datetime = datetime.datetime.now() eval_form_datetime = eval_datetime.isoformat() evaluate_data = { "env_id": hyperparameters["env_id"], "mean_reward": mean_reward, "n_evaluation_episodes": hyperparameters["n_evaluation_episodes"], "eval_datetime": eval_form_datetime, } # Write a JSON file with open(local_directory / "results.json", "w") as outfile: json.dump(evaluate_data, outfile) # Step 5: Create the model card env_name = hyperparameters["env_id"] metadata = {} metadata["tags"] = [ env_name, "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class" ] # Add metrics eval = metadata_eval_result( model_pretty_name=repo_name, task_pretty_name="reinforcement-learning", task_id="reinforcement-learning", metrics_pretty_name="mean_reward", metrics_id="mean_reward", metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}", dataset_pretty_name=env_name, dataset_id=env_name, ) # Merges both dictionaries metadata = {**metadata, **eval} model_card = f""" # **Reinforce** Agent playing **{env_id}** This is a trained model of a **Reinforce** agent playing **{env_id}** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction """ readme_path = local_directory / "README.md" readme = "" if readme_path.exists(): with readme_path.open("r", encoding="utf8") as f: readme = f.read() else: readme = model_card with readme_path.open("w", encoding="utf-8") as f: f.write(readme) # Save our metrics to Readme metadata metadata_save(readme_path, metadata) # Step 6: Record a video video_path = local_directory / "replay.mp4" record_video(env, model, video_path, video_fps) # Step 7. Push everything to the Hub api.upload_folder( repo_id=repo_id, folder_path=local_directory, path_in_repo=".", ) print(f"Your model is pushed to the Hub. You can view your model here: {repo_url}")<jupyter_output><empty_output><jupyter_text>.By using `push_to_hub` **you evaluate, record a replay, generate a model card of your agent and push it to the Hub**.This way:- You can **showcase our work** 🔥- You can **visualize your agent playing** 👀- You can **share with the community an agent that others can use** 💾- You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow:1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.- Create a new token (https://huggingface.co/settings/tokens) **with write role**<jupyter_code>notebook_login()<jupyter_output><empty_output><jupyter_text>If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` (or `login`) 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 using `package_to_hub()` function<jupyter_code>repo_id = "" #TODO Define your repo id {username/Reinforce-{model-id}} push_to_hub(repo_id, cartpole_policy, # The model we want to save cartpole_hyperparameters, # Hyperparameters eval_env, # Evaluation environment video_fps=30 )<jupyter_output><empty_output><jupyter_text>Now that we try the robustness of our implementation, let's try a more complex environment: PixelCopter 🚁 Second agent: PixelCopter 🚁 Study the PixelCopter environment 👀- [The Environment documentation](https://pygame-learning-environment.readthedocs.io/en/latest/user/games/pixelcopter.html)<jupyter_code>env_id = "Pixelcopter-PLE-v0" env = gym.make(env_id) eval_env = gym.make(env_id) s_size = env.observation_space.shape[0] a_size = env.action_space.n print("_____OBSERVATION SPACE_____ \n") print("The State Space is: ", s_size) print("Sample observation", env.observation_space.sample()) # Get a random observation print("\n _____ACTION SPACE_____ \n") print("The Action Space is: ", a_size) print("Action Space Sample", env.action_space.sample()) # Take a random action<jupyter_output><empty_output><jupyter_text>The observation space (7) 👀:- player y position- player velocity- player distance to floor- player distance to ceiling- next block x distance to player- next blocks top y location- next blocks bottom y locationThe action space(2) 🎮:- Up (press accelerator) - Do nothing (don't press accelerator) The reward function 💰: - For each vertical block it passes through it gains a positive reward of +1. Each time a terminal state reached it receives a negative reward of -1. Define the new Policy 🧠- We need to have a deeper neural network since the environment is more complex<jupyter_code>class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() # Define the three layers here def forward(self, x): # Define the forward process here return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = m.sample() return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size, h_size*2) self.fc3 = nn.Linear(h_size*2, a_size) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = m.sample() return action.item(), m.log_prob(action)<jupyter_output><empty_output><jupyter_text>Define the hyperparameters ⚙️- Because this environment is more complex.- Especially for the hidden size, we need more neurons.<jupyter_code>pixelcopter_hyperparameters = { "h_size": 64, "n_training_episodes": 50000, "n_evaluation_episodes": 10, "max_t": 10000, "gamma": 0.99, "lr": 1e-4, "env_id": env_id, "state_space": s_size, "action_space": a_size, }<jupyter_output><empty_output><jupyter_text>Train it- We're now ready to train our agent 🔥.<jupyter_code># Create policy and place it to the device # torch.manual_seed(50) pixelcopter_policy = Policy(pixelcopter_hyperparameters["state_space"], pixelcopter_hyperparameters["action_space"], pixelcopter_hyperparameters["h_size"]).to(device) pixelcopter_optimizer = optim.Adam(pixelcopter_policy.parameters(), lr=pixelcopter_hyperparameters["lr"]) scores = reinforce(pixelcopter_policy, pixelcopter_optimizer, pixelcopter_hyperparameters["n_training_episodes"], pixelcopter_hyperparameters["max_t"], pixelcopter_hyperparameters["gamma"], 1000)<jupyter_output><empty_output><jupyter_text>Publish our trained model on the Hub 🔥<jupyter_code>repo_id = "" #TODO Define your repo id {username/Reinforce-{model-id}} push_to_hub(repo_id, pixelcopter_policy, # The model we want to save pixelcopter_hyperparameters, # Hyperparameters eval_env, # Evaluation environment video_fps=30 )<jupyter_output><empty_output>
deep-rl-class/notebooks/unit4/unit4.ipynb/0
{ "file_path": "deep-rl-class/notebooks/unit4/unit4.ipynb", "repo_id": "deep-rl-class", "token_count": 12740 }
79
# The Exploration/Exploitation trade-off [[exp-exp-tradeoff]] Finally, before looking at the different methods to solve Reinforcement Learning problems, we must cover one more very important topic: *the exploration/exploitation trade-off.* - *Exploration* is exploring the environment by trying random actions in order to **find more information about the environment.** - *Exploitation* is **exploiting known information to maximize the reward.** Remember, the goal of our RL agent is to maximize the expected cumulative reward. However, **we can fall into a common trap**. Let’s take an example: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/exp_1.jpg" alt="Exploration" width="100%"> In this game, our mouse can have an **infinite amount of small cheese** (+1 each). But at the top of the maze, there is a gigantic sum of cheese (+1000). However, if we only focus on exploitation, our agent will never reach the gigantic sum of cheese. Instead, it will only exploit **the nearest source of rewards,** even if this source is small (exploitation). But if our agent does a little bit of exploration, it can **discover the big reward** (the pile of big cheese). This is what we call the exploration/exploitation trade-off. We need to balance how much we **explore the environment** and how much we **exploit what we know about the environment.** Therefore, we must **define a rule that helps to handle this trade-off**. We’ll see the different ways to handle it in the future units. If it’s still confusing, **think of a real problem: the choice of picking a restaurant:** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/exp_2.jpg" alt="Exploration"> <figcaption>Source: <a href="https://inst.eecs.berkeley.edu/~cs188/sp20/assets/lecture/lec15_6up.pdf"> Berkley AI Course</a> </figcaption> </figure> - *Exploitation*: You go to the same one that you know is good every day and **take the risk to miss another better restaurant.** - *Exploration*: Try restaurants you never went to before, with the risk of having a bad experience **but the probable opportunity of a fantastic experience.** To recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/expexpltradeoff.jpg" alt="Exploration Exploitation Tradeoff" width="100%">
deep-rl-class/units/en/unit1/exp-exp-tradeoff.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/exp-exp-tradeoff.mdx", "repo_id": "deep-rl-class", "token_count": 699 }
80
# Monte Carlo vs Temporal Difference Learning [[mc-vs-td]] The last thing we need to discuss before diving into Q-Learning is the two learning strategies. Remember that an RL agent **learns by interacting with its environment.** The idea is that **given the experience and the received reward, the agent will update its value function or policy.** Monte Carlo and Temporal Difference Learning are two different **strategies on how to train our value function or our policy function.** Both of them **use experience to solve the RL problem.** On one hand, Monte Carlo uses **an entire episode of experience before learning.** On the other hand, Temporal Difference uses **only a step ( \\(S_t, A_t, R_{t+1}, S_{t+1}\\) ) to learn.** We'll explain both of them **using a value-based method example.** ## Monte Carlo: learning at the end of the episode [[monte-carlo]] Monte Carlo waits until the end of the episode, calculates \\(G_t\\) (return) and uses it as **a target for updating \\(V(S_t)\\).** So it requires a **complete episode of interaction before updating our value function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/monte-carlo-approach.jpg" alt="Monte Carlo"/> If we take an example: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-2.jpg" alt="Monte Carlo"/> - We always start the episode **at the same starting point.** - **The agent takes actions using the policy**. For instance, using an Epsilon Greedy Strategy, a policy that alternates between exploration (random actions) and exploitation. - We get **the reward and the next state.** - We terminate the episode if the cat eats the mouse or if the mouse moves > 10 steps. - At the end of the episode, **we have a list of State, Actions, Rewards, and Next States tuples** For instance [[State tile 3 bottom, Go Left, +1, State tile 2 bottom], [State tile 2 bottom, Go Left, +0, State tile 1 bottom]...] - **The agent will sum the total rewards \\(G_t\\)** (to see how well it did). - It will then **update \\(V(s_t)\\) based on the formula** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-3.jpg" alt="Monte Carlo"/> - Then **start a new game with this new knowledge** By running more and more episodes, **the agent will learn to play better and better.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-3p.jpg" alt="Monte Carlo"/> For instance, if we train a state-value function using Monte Carlo: - We initialize our value function **so that it returns 0 value for each state** - Our learning rate (lr) is 0.1 and our discount rate is 1 (= no discount) - Our mouse **explores the environment and takes random actions** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-4.jpg" alt="Monte Carlo"/> - The mouse made more than 10 steps, so the episode ends . <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-4p.jpg" alt="Monte Carlo"/> - We have a list of state, action, rewards, next_state, **we need to calculate the return \\(G{t=0}\\)** \\(G_t = R_{t+1} + R_{t+2} + R_{t+3} ...\\) (for simplicity, we don't discount the rewards) \\(G_0 = R_{1} + R_{2} + R_{3}…\\) \\(G_0 = 1 + 0 + 0 + 0 + 0 + 0 + 1 + 1 + 0 + 0\\) \\(G_0 = 3\\) - We can now compute the **new** \\(V(S_0)\\): <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-5.jpg" alt="Monte Carlo"/> \\(V(S_0) = V(S_0) + lr * [G_0 — V(S_0)]\\) \\(V(S_0) = 0 + 0.1 * [3 – 0]\\) \\(V(S_0) = 0.3\\) <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-5p.jpg" alt="Monte Carlo"/> ## Temporal Difference Learning: learning at each step [[td-learning]] **Temporal Difference, on the other hand, waits for only one interaction (one step) \\(S_{t+1}\\)** to form a TD target and update \\(V(S_t)\\) using \\(R_{t+1}\\) and \\( \gamma * V(S_{t+1})\\). The idea with **TD is to update the \\(V(S_t)\\) at each step.** But because we didn't experience an entire episode, we don't have \\(G_t\\) (expected return). Instead, **we estimate \\(G_t\\) by adding \\(R_{t+1}\\) and the discounted value of the next state.** This is called bootstrapping. It's called this **because TD bases its update in part on an existing estimate \\(V(S_{t+1})\\) and not a complete sample \\(G_t\\).** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-1.jpg" alt="Temporal Difference"/> This method is called TD(0) or **one-step TD (update the value function after any individual step).** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-1p.jpg" alt="Temporal Difference"/> If we take the same example, <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-2.jpg" alt="Temporal Difference"/> - We initialize our value function so that it returns 0 value for each state. - Our learning rate (lr) is 0.1, and our discount rate is 1 (no discount). - Our mouse begins to explore the environment and takes a random action: **going to the left** - It gets a reward \\(R_{t+1} = 1\\) since **it eats a piece of cheese** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-2p.jpg" alt="Temporal Difference"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-3.jpg" alt="Temporal Difference"/> We can now update \\(V(S_0)\\): New \\(V(S_0) = V(S_0) + lr * [R_1 + \gamma * V(S_1) - V(S_0)]\\) New \\(V(S_0) = 0 + 0.1 * [1 + 1 * 0–0]\\) New \\(V(S_0) = 0.1\\) So we just updated our value function for State 0. Now we **continue to interact with this environment with our updated value function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-3p.jpg" alt="Temporal Difference"/> To summarize: - With *Monte Carlo*, we update the value function from a complete episode, and so we **use the actual accurate discounted return of this episode.** - With *TD Learning*, we update the value function from a step, and we replace \\(G_t\\), which we don't know, with **an estimated return called the TD target.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Summary.jpg" alt="Summary"/>
deep-rl-class/units/en/unit2/mc-vs-td.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/mc-vs-td.mdx", "repo_id": "deep-rl-class", "token_count": 2316 }
81
# Deep Q-Learning [[deep-q-learning]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/thumbnail.jpg" alt="Unit 3 thumbnail" width="100%"> In the last unit, we learned our first reinforcement learning algorithm: Q-Learning, **implemented it from scratch**, and trained it in two environments, FrozenLake-v1 ☃️ and Taxi-v3 🚕. We got excellent results with this simple algorithm, but these environments were relatively simple because the **state space was discrete and small** (16 different states for FrozenLake-v1 and 500 for Taxi-v3). For comparison, the state space in Atari games can **contain \\(10^{9}\\) to \\(10^{11}\\) states**. But as we'll see, producing and updating a **Q-table can become ineffective in large state space environments.** So in this unit, **we'll study our first Deep Reinforcement Learning agent**: Deep Q-Learning. Instead of using a Q-table, Deep Q-Learning uses a Neural Network that takes a state and approximates Q-values for each action based on that state. And **we'll train it to play Space Invaders and other Atari environments using [RL-Zoo](https://github.com/DLR-RM/rl-baselines3-zoo)**, a training framework for RL using Stable-Baselines that provides scripts for training, evaluating agents, tuning hyperparameters, plotting results, and recording videos. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari-envs.gif" alt="Environments"/> So let’s get started! 🚀
deep-rl-class/units/en/unit3/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unit3/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 437 }
82
# How do Unity ML-Agents work? [[how-mlagents-works]] Before training our agent, we need to understand **what ML-Agents is and how it works**. ## What is Unity ML-Agents? [[what-is-mlagents]] [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents) is a toolkit for the game engine Unity that **allows us to create environments using Unity or use pre-made environments to train our agents**. It’s developed by [Unity Technologies](https://unity.com/), the developers of Unity, one of the most famous Game Engines used by the creators of Firewatch, Cuphead, and Cities: Skylines. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/firewatch.jpeg" alt="Firewatch"/> <figcaption>Firewatch was made with Unity</figcaption> </figure> ## The six components [[six-components]] With Unity ML-Agents, you have six essential components: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/mlagents-1.png" alt="MLAgents"/> <figcaption>Source: <a href="https://unity-technologies.github.io/ml-agents/">Unity ML-Agents Documentation</a> </figcaption> </figure> - The first is the *Learning Environment*, which contains **the Unity scene (the environment) and the environment elements** (game characters). - The second is the *Python Low-level API*, which contains **the low-level Python interface for interacting and manipulating the environment**. It’s the API we use to launch the training. - Then, we have the *External Communicator* that **connects the Learning Environment (made with C#) with the low level Python API (Python)**. - The *Python trainers*: the **Reinforcement algorithms made with PyTorch (PPO, SAC…)**. - The *Gym wrapper*: to encapsulate the RL environment in a gym wrapper. - The *PettingZoo wrapper*: PettingZoo is the multi-agents version of the gym wrapper. ## Inside the Learning Component [[inside-learning-component]] Inside the Learning Component, we have **two important elements**: - The first is the *agent component*, the actor of the scene. We’ll **train the agent by optimizing its policy** (which will tell us what action to take in each state). The policy is called the *Brain*. - Finally, there is the *Academy*. This component **orchestrates agents and their decision-making processes**. Think of this Academy as a teacher who handles Python API requests. To better understand its role, let’s remember the RL process. This can be modeled as a loop that works like this: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process.jpg" alt="The RL process" width="100%"> <figcaption>The RL Process: a loop of state, action, reward and next state</figcaption> <figcaption>Source: <a href="http://incompleteideas.net/book/RLbook2020.pdf">Reinforcement Learning: An Introduction, Richard Sutton and Andrew G. Barto</a></figcaption> </figure> Now, let’s imagine an agent learning to play a platform game. The RL process looks like this: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process_game.jpg" alt="The RL process" width="100%"> - Our Agent receives **state \\(S_0\\)** from the **Environment** — we receive the first frame of our game (Environment). - Based on that **state \\(S_0\\),** the Agent takes **action \\(A_0\\)** — our Agent will move to the right. - The environment goes to a **new** **state \\(S_1\\)** — new frame. - The environment gives some **reward \\(R_1\\)** to the Agent — we’re not dead *(Positive Reward +1)*. This RL loop outputs a sequence of **state, action, reward and next state.** The goal of the agent is to **maximize the expected cumulative reward**. The Academy will be the one that will **send the order to our Agents and ensure that agents are in sync**: - Collect Observations - Select your action using your policy - Take the Action - Reset if you reached the max step or if you’re done. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/academy.png" alt="The MLAgents Academy" width="100%"> Now that we understand how ML-Agents works, **we’re ready to train our agents.**
deep-rl-class/units/en/unit5/how-mlagents-works.mdx/0
{ "file_path": "deep-rl-class/units/en/unit5/how-mlagents-works.mdx", "repo_id": "deep-rl-class", "token_count": 1276 }
83
# Introduction [[introduction]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/thumbnail.png" alt="Thumbnail"/> Since the beginning of this course, we learned to train agents in a *single-agent system* where our agent was alone in its environment: it was **not cooperating or collaborating with other agents**. This worked great, and the single-agent system is useful for many applications. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/patchwork.jpg" alt="Patchwork"/> <figcaption> A patchwork of all the environments you’ve trained your agents on since the beginning of the course </figcaption> </figure> But, as humans, **we live in a multi-agent world**. Our intelligence comes from interaction with other agents. And so, our **goal is to create agents that can interact with other humans and other agents**. Consequently, we must study how to train deep reinforcement learning agents in a *multi-agents system* to build robust agents that can adapt, collaborate, or compete. So today we’re going to **learn the basics of the fascinating topic of multi-agents reinforcement learning (MARL)**. And the most exciting part is that, during this unit, you’re going to train your first agents in a multi-agents system: **a 2vs2 soccer team that needs to beat the opponent team**. And you’re going to participate in **AI vs. AI challenge** where your trained agent will compete against other classmates’ agents every day and be ranked on a [new leaderboard](https://huggingface.co/spaces/huggingface-projects/AIvsAI-SoccerTwos). <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccertwos.gif" alt="SoccerTwos"/> <figcaption>This environment was made by the <a href="https://github.com/Unity-Technologies/ml-agents">Unity MLAgents Team</a></figcaption> </figure> So let’s get started!
deep-rl-class/units/en/unit7/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unit7/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 574 }
84
# Introduction [[introduction]] In this bonus unit, we'll reinforce what we learned in the first unit by teaching Huggy the Dog to fetch the stick and then [play with him directly in your browser](https://huggingface.co/spaces/ThomasSimonini/Huggy) 🐶 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit2/thumbnail.png" alt="Unit bonus 1 thumbnail" width="100%"> So let's get started 🚀
deep-rl-class/units/en/unitbonus1/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus1/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 138 }
85
# Brief introduction to RL documentation In this advanced topic, we address the question: **how should we monitor and keep track of powerful reinforcement learning agents that we are training in the real world and interfacing with humans?** As machine learning systems have increasingly impacted modern life, the **call for the documentation of these systems has grown**. Such documentation can cover aspects such as the training data used — where it is stored, when it was collected, who was involved, etc. — or the model optimization framework — the architecture, evaluation metrics, relevant papers, etc. — and more. Today, model cards and datasheets are becoming increasingly available. For example, on the Hub (see documentation [here](https://huggingface.co/docs/hub/model-cards)). If you click on a [popular model on the Hub](https://huggingface.co/models), you can learn about its creation process. These model and data specific logs are designed to be completed when the model or dataset are created, leaving them to go un-updated when these models are built into evolving systems in the future. ​ ## Motivating Reward Reports Reinforcement learning systems are fundamentally designed to optimize based on measurements of reward and time. While the notion of a reward function can be mapped nicely to many well-understood fields of supervised learning (via a loss function), understanding of how machine learning systems evolve over time is limited. To that end, the authors introduce [*Reward Reports for Reinforcement Learning*](https://www.notion.so/Brief-introduction-to-RL-documentation-b8cbda5a6f5242338e0756e6bef72af4) (the pithy naming is designed to mirror the popular papers *Model Cards for Model Reporting* and *Datasheets for Datasets*). The goal is to propose a type of documentation focused on the **human factors of reward** and **time-varying feedback systems**. Building on the documentation frameworks for [model cards](https://arxiv.org/abs/1810.03993) and [datasheets](https://arxiv.org/abs/1803.09010) proposed by Mitchell et al. and Gebru et al., we argue the need for Reward Reports for AI systems. **Reward Reports** are living documents for proposed RL deployments that demarcate design choices. However, many questions remain about the applicability of this framework to different RL applications, roadblocks to system interpretability, and the resonances between deployed supervised machine learning systems and the sequential decision-making utilized in RL. At a minimum, Reward Reports are an opportunity for RL practitioners to deliberate on these questions and begin the work of deciding how to resolve them in practice. ​ ## Capturing temporal behavior with documentation The core piece specific to documentation designed for RL and feedback-driven ML systems is a *change-log*. The change-log updates information from the designer (changed training parameters, data, etc.) along with noticed changes from the user (harmful behavior, unexpected responses, etc.). The change log is accompanied by update triggers that encourage monitoring these effects. ## Contributing Some of the most impactful RL-driven systems are multi-stakeholder in nature and behind the closed doors of private corporations. These corporations are largely without regulation, so the burden of documentation falls on the public. If you are interested in contributing, we are building Reward Reports for popular machine learning systems on a public record on [GitHub](https://github.com/RewardReports/reward-reports). ​ For further reading, you can visit the Reward Reports [paper](https://arxiv.org/abs/2204.10817) or look [an example report](https://github.com/RewardReports/reward-reports/tree/main/examples). ## Author This section was written by <a href="https://twitter.com/natolambert"> Nathan Lambert </a>
deep-rl-class/units/en/unitbonus3/rl-documentation.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus3/rl-documentation.mdx", "repo_id": "deep-rl-class", "token_count": 886 }
86
import os import sys import torch from diffusers import ( AutoPipelineForImage2Image, AutoPipelineForInpainting, AutoPipelineForText2Image, ControlNetModel, LCMScheduler, StableDiffusionAdapterPipeline, StableDiffusionControlNetPipeline, StableDiffusionXLAdapterPipeline, StableDiffusionXLControlNetPipeline, T2IAdapter, WuerstchenCombinedPipeline, ) from diffusers.utils import load_image sys.path.append(".") from utils import ( # noqa: E402 BASE_PATH, PROMPT, BenchmarkInfo, benchmark_fn, bytes_to_giga_bytes, flush, generate_csv_dict, write_to_csv, ) RESOLUTION_MAPPING = { "runwayml/stable-diffusion-v1-5": (512, 512), "lllyasviel/sd-controlnet-canny": (512, 512), "diffusers/controlnet-canny-sdxl-1.0": (1024, 1024), "TencentARC/t2iadapter_canny_sd14v1": (512, 512), "TencentARC/t2i-adapter-canny-sdxl-1.0": (1024, 1024), "stabilityai/stable-diffusion-2-1": (768, 768), "stabilityai/stable-diffusion-xl-base-1.0": (1024, 1024), "stabilityai/stable-diffusion-xl-refiner-1.0": (1024, 1024), "stabilityai/sdxl-turbo": (512, 512), } class BaseBenchmak: pipeline_class = None def __init__(self, args): super().__init__() def run_inference(self, args): raise NotImplementedError def benchmark(self, args): raise NotImplementedError def get_result_filepath(self, args): pipeline_class_name = str(self.pipe.__class__.__name__) name = ( args.ckpt.replace("/", "_") + "_" + pipeline_class_name + f"-bs@{args.batch_size}-steps@{args.num_inference_steps}-mco@{args.model_cpu_offload}-compile@{args.run_compile}.csv" ) filepath = os.path.join(BASE_PATH, name) return filepath class TextToImageBenchmark(BaseBenchmak): pipeline_class = AutoPipelineForText2Image def __init__(self, args): pipe = self.pipeline_class.from_pretrained(args.ckpt, torch_dtype=torch.float16) pipe = pipe.to("cuda") if args.run_compile: if not isinstance(pipe, WuerstchenCombinedPipeline): pipe.unet.to(memory_format=torch.channels_last) print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) if hasattr(pipe, "movq") and getattr(pipe, "movq", None) is not None: pipe.movq.to(memory_format=torch.channels_last) pipe.movq = torch.compile(pipe.movq, mode="reduce-overhead", fullgraph=True) else: print("Run torch compile") pipe.decoder = torch.compile(pipe.decoder, mode="reduce-overhead", fullgraph=True) pipe.vqgan = torch.compile(pipe.vqgan, mode="reduce-overhead", fullgraph=True) pipe.set_progress_bar_config(disable=True) self.pipe = pipe def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, ) def benchmark(self, args): flush() print(f"[INFO] {self.pipe.__class__.__name__}: Running benchmark with: {vars(args)}\n") time = benchmark_fn(self.run_inference, self.pipe, args) # in seconds. memory = bytes_to_giga_bytes(torch.cuda.max_memory_allocated()) # in GBs. benchmark_info = BenchmarkInfo(time=time, memory=memory) pipeline_class_name = str(self.pipe.__class__.__name__) flush() csv_dict = generate_csv_dict( pipeline_cls=pipeline_class_name, ckpt=args.ckpt, args=args, benchmark_info=benchmark_info ) filepath = self.get_result_filepath(args) write_to_csv(filepath, csv_dict) print(f"Logs written to: {filepath}") flush() class TurboTextToImageBenchmark(TextToImageBenchmark): def __init__(self, args): super().__init__(args) def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, guidance_scale=0.0, ) class LCMLoRATextToImageBenchmark(TextToImageBenchmark): lora_id = "latent-consistency/lcm-lora-sdxl" def __init__(self, args): super().__init__(args) self.pipe.load_lora_weights(self.lora_id) self.pipe.fuse_lora() self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config) def get_result_filepath(self, args): pipeline_class_name = str(self.pipe.__class__.__name__) name = ( self.lora_id.replace("/", "_") + "_" + pipeline_class_name + f"-bs@{args.batch_size}-steps@{args.num_inference_steps}-mco@{args.model_cpu_offload}-compile@{args.run_compile}.csv" ) filepath = os.path.join(BASE_PATH, name) return filepath def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, guidance_scale=1.0, ) def benchmark(self, args): flush() print(f"[INFO] {self.pipe.__class__.__name__}: Running benchmark with: {vars(args)}\n") time = benchmark_fn(self.run_inference, self.pipe, args) # in seconds. memory = bytes_to_giga_bytes(torch.cuda.max_memory_allocated()) # in GBs. benchmark_info = BenchmarkInfo(time=time, memory=memory) pipeline_class_name = str(self.pipe.__class__.__name__) flush() csv_dict = generate_csv_dict( pipeline_cls=pipeline_class_name, ckpt=self.lora_id, args=args, benchmark_info=benchmark_info ) filepath = self.get_result_filepath(args) write_to_csv(filepath, csv_dict) print(f"Logs written to: {filepath}") flush() class ImageToImageBenchmark(TextToImageBenchmark): pipeline_class = AutoPipelineForImage2Image url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/1665_Girl_with_a_Pearl_Earring.jpg" image = load_image(url).convert("RGB") def __init__(self, args): super().__init__(args) self.image = self.image.resize(RESOLUTION_MAPPING[args.ckpt]) def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, image=self.image, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, ) class TurboImageToImageBenchmark(ImageToImageBenchmark): def __init__(self, args): super().__init__(args) def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, image=self.image, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, guidance_scale=0.0, strength=0.5, ) class InpaintingBenchmark(ImageToImageBenchmark): pipeline_class = AutoPipelineForInpainting mask_url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/overture-creations-5sI6fQgYIuo_mask.png" mask = load_image(mask_url).convert("RGB") def __init__(self, args): super().__init__(args) self.image = self.image.resize(RESOLUTION_MAPPING[args.ckpt]) self.mask = self.mask.resize(RESOLUTION_MAPPING[args.ckpt]) def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, image=self.image, mask_image=self.mask, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, ) class ControlNetBenchmark(TextToImageBenchmark): pipeline_class = StableDiffusionControlNetPipeline aux_network_class = ControlNetModel root_ckpt = "runwayml/stable-diffusion-v1-5" url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/canny_image_condition.png" image = load_image(url).convert("RGB") def __init__(self, args): aux_network = self.aux_network_class.from_pretrained(args.ckpt, torch_dtype=torch.float16) pipe = self.pipeline_class.from_pretrained(self.root_ckpt, controlnet=aux_network, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.set_progress_bar_config(disable=True) self.pipe = pipe if args.run_compile: pipe.unet.to(memory_format=torch.channels_last) pipe.controlnet.to(memory_format=torch.channels_last) print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True) self.image = self.image.resize(RESOLUTION_MAPPING[args.ckpt]) def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, image=self.image, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, ) class ControlNetSDXLBenchmark(ControlNetBenchmark): pipeline_class = StableDiffusionXLControlNetPipeline root_ckpt = "stabilityai/stable-diffusion-xl-base-1.0" def __init__(self, args): super().__init__(args) class T2IAdapterBenchmark(ControlNetBenchmark): pipeline_class = StableDiffusionAdapterPipeline aux_network_class = T2IAdapter root_ckpt = "CompVis/stable-diffusion-v1-4" url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/canny_for_adapter.png" image = load_image(url).convert("L") def __init__(self, args): aux_network = self.aux_network_class.from_pretrained(args.ckpt, torch_dtype=torch.float16) pipe = self.pipeline_class.from_pretrained(self.root_ckpt, adapter=aux_network, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.set_progress_bar_config(disable=True) self.pipe = pipe if args.run_compile: pipe.unet.to(memory_format=torch.channels_last) pipe.adapter.to(memory_format=torch.channels_last) print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.adapter = torch.compile(pipe.adapter, mode="reduce-overhead", fullgraph=True) self.image = self.image.resize(RESOLUTION_MAPPING[args.ckpt]) class T2IAdapterSDXLBenchmark(T2IAdapterBenchmark): pipeline_class = StableDiffusionXLAdapterPipeline root_ckpt = "stabilityai/stable-diffusion-xl-base-1.0" url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/canny_for_adapter_sdxl.png" image = load_image(url) def __init__(self, args): super().__init__(args)
diffusers/benchmarks/base_classes.py/0
{ "file_path": "diffusers/benchmarks/base_classes.py", "repo_id": "diffusers", "token_count": 5055 }
87
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # UNet Some training methods - like LoRA and Custom Diffusion - typically target the UNet's attention layers, but these training methods can also target other non-attention layers. Instead of training all of a model's parameters, only a subset of the parameters are trained, which is faster and more efficient. This class is useful if you're *only* loading weights into a UNet. If you need to load weights into the text encoder or a text encoder and UNet, try using the [`~loaders.LoraLoaderMixin.load_lora_weights`] function instead. The [`UNet2DConditionLoadersMixin`] class provides functions for loading and saving weights, fusing and unfusing LoRAs, disabling and enabling LoRAs, and setting and deleting adapters. <Tip> To learn more about how to load LoRA weights, see the [LoRA](../../using-diffusers/loading_adapters#lora) loading guide. </Tip> ## UNet2DConditionLoadersMixin [[autodoc]] loaders.unet.UNet2DConditionLoadersMixin
diffusers/docs/source/en/api/loaders/unet.md/0
{ "file_path": "diffusers/docs/source/en/api/loaders/unet.md", "repo_id": "diffusers", "token_count": 403 }
88
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Speed up inference There are several ways to optimize 🤗 Diffusers for inference speed. As a general rule of thumb, we recommend using either [xFormers](xformers) or `torch.nn.functional.scaled_dot_product_attention` in PyTorch 2.0 for their memory-efficient attention. <Tip> In many cases, optimizing for speed or memory leads to improved performance in the other, so you should try to optimize for both whenever you can. This guide focuses on inference speed, but you can learn more about preserving memory in the [Reduce memory usage](memory) guide. </Tip> The results below are obtained from generating a single 512x512 image from the prompt `a photo of an astronaut riding a horse on mars` with 50 DDIM steps on a Nvidia Titan RTX, demonstrating the speed-up you can expect. | | latency | speed-up | | ---------------- | ------- | ------- | | original | 9.50s | x1 | | fp16 | 3.61s | x2.63 | | channels last | 3.30s | x2.88 | | traced UNet | 3.21s | x2.96 | | memory efficient attention | 2.63s | x3.61 | ## Use TensorFloat-32 On Ampere and later CUDA devices, matrix multiplications and convolutions can use the [TensorFloat-32 (TF32)](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) mode for faster, but slightly less accurate computations. By default, PyTorch enables TF32 mode for convolutions but not matrix multiplications. Unless your network requires full float32 precision, we recommend enabling TF32 for matrix multiplications. It can significantly speeds up computations with typically negligible loss in numerical accuracy. ```python import torch torch.backends.cuda.matmul.allow_tf32 = True ``` You can learn more about TF32 in the [Mixed precision training](https://huggingface.co/docs/transformers/en/perf_train_gpu_one#tf32) guide. ## Half-precision weights To save GPU memory and get more speed, try loading and running the model weights directly in half-precision or float16: ```Python import torch from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, ) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] ``` <Tip warning={true}> Don't use [`torch.autocast`](https://pytorch.org/docs/stable/amp.html#torch.autocast) in any of the pipelines as it can lead to black images and is always slower than pure float16 precision. </Tip>
diffusers/docs/source/en/optimization/fp16.md/0
{ "file_path": "diffusers/docs/source/en/optimization/fp16.md", "repo_id": "diffusers", "token_count": 943 }
89
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Reinforcement learning training with DDPO You can fine-tune Stable Diffusion on a reward function via reinforcement learning with the 🤗 TRL library and 🤗 Diffusers. This is done with the Denoising Diffusion Policy Optimization (DDPO) algorithm introduced by Black et al. in [Training Diffusion Models with Reinforcement Learning](https://arxiv.org/abs/2305.13301), which is implemented in 🤗 TRL with the [`~trl.DDPOTrainer`]. For more information, check out the [`~trl.DDPOTrainer`] API reference and the [Finetune Stable Diffusion Models with DDPO via TRL](https://huggingface.co/blog/trl-ddpo) blog post.
diffusers/docs/source/en/training/ddpo.md/0
{ "file_path": "diffusers/docs/source/en/training/ddpo.md", "repo_id": "diffusers", "token_count": 322 }
90
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Accelerate inference of text-to-image diffusion models Diffusion models are slower than their GAN counterparts because of the iterative and sequential reverse diffusion process. There are several techniques that can address this limitation such as progressive timestep distillation ([LCM LoRA](../using-diffusers/inference_with_lcm_lora)), model compression ([SSD-1B](https://huggingface.co/segmind/SSD-1B)), and reusing adjacent features of the denoiser ([DeepCache](../optimization/deepcache)). However, you don't necessarily need to use these techniques to speed up inference. With PyTorch 2 alone, you can accelerate the inference latency of text-to-image diffusion pipelines by up to 3x. This tutorial will show you how to progressively apply the optimizations found in PyTorch 2 to reduce inference latency. You'll use the [Stable Diffusion XL (SDXL)](../using-diffusers/sdxl) pipeline in this tutorial, but these techniques are applicable to other text-to-image diffusion pipelines too. Make sure you're using the latest version of Diffusers: ```bash pip install -U diffusers ``` Then upgrade the other required libraries too: ```bash pip install -U transformers accelerate peft ``` Install [PyTorch nightly](https://pytorch.org/) to benefit from the latest and fastest kernels: ```bash pip3 install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu121 ``` <Tip> The results reported below are from a 80GB 400W A100 with its clock rate set to the maximum. <br> If you're interested in the full benchmarking code, take a look at [huggingface/diffusion-fast](https://github.com/huggingface/diffusion-fast). </Tip> ## Baseline Let's start with a baseline. Disable reduced precision and the [`scaled_dot_product_attention` (SDPA)](../optimization/torch2.0#scaled-dot-product-attention) function which is automatically used by Diffusers: ```python from diffusers import StableDiffusionXLPipeline # Load the pipeline in full-precision and place its model components on CUDA. pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0" ).to("cuda") # Run the attention ops without SDPA. pipe.unet.set_default_attn_processor() pipe.vae.set_default_attn_processor() prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe(prompt, num_inference_steps=30).images[0] ``` This default setup takes 7.36 seconds. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/progressive-acceleration-sdxl/SDXL%2C_Batch_Size%3A_1%2C_Steps%3A_30_0.png" width=500> </div> ## bfloat16 Enable the first optimization, reduced precision or more specifically bfloat16. There are several benefits of using reduced precision: * Using a reduced numerical precision (such as float16 or bfloat16) for inference doesn’t affect the generation quality but significantly improves latency. * The benefits of using bfloat16 compared to float16 are hardware dependent, but modern GPUs tend to favor bfloat16. * bfloat16 is much more resilient when used with quantization compared to float16, but more recent versions of the quantization library ([torchao](https://github.com/pytorch-labs/ao)) we used don't have numerical issues with float16. ```python from diffusers import StableDiffusionXLPipeline import torch pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16 ).to("cuda") # Run the attention ops without SDPA. pipe.unet.set_default_attn_processor() pipe.vae.set_default_attn_processor() prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe(prompt, num_inference_steps=30).images[0] ``` bfloat16 reduces the latency from 7.36 seconds to 4.63 seconds. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/progressive-acceleration-sdxl/SDXL%2C_Batch_Size%3A_1%2C_Steps%3A_30_1.png" width=500> </div> <Tip> In our later experiments with float16, recent versions of torchao do not incur numerical problems from float16. </Tip> Take a look at the [Speed up inference](../optimization/fp16) guide to learn more about running inference with reduced precision. ## SDPA Attention blocks are intensive to run. But with PyTorch's [`scaled_dot_product_attention`](../optimization/torch2.0#scaled-dot-product-attention) function, it is a lot more efficient. This function is used by default in Diffusers so you don't need to make any changes to the code. ```python from diffusers import StableDiffusionXLPipeline import torch pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16 ).to("cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe(prompt, num_inference_steps=30).images[0] ``` Scaled dot product attention improves the latency from 4.63 seconds to 3.31 seconds. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/progressive-acceleration-sdxl/SDXL%2C_Batch_Size%3A_1%2C_Steps%3A_30_2.png" width=500> </div> ## torch.compile PyTorch 2 includes `torch.compile` which uses fast and optimized kernels. In Diffusers, the UNet and VAE are usually compiled because these are the most compute-intensive modules. First, configure a few compiler flags (refer to the [full list](https://github.com/pytorch/pytorch/blob/main/torch/_inductor/config.py) for more options): ```python from diffusers import StableDiffusionXLPipeline import torch torch._inductor.config.conv_1x1_as_mm = True torch._inductor.config.coordinate_descent_tuning = True torch._inductor.config.epilogue_fusion = False torch._inductor.config.coordinate_descent_check_all_directions = True ``` It is also important to change the UNet and VAE's memory layout to "channels_last" when compiling them to ensure maximum speed. ```python pipe.unet.to(memory_format=torch.channels_last) pipe.vae.to(memory_format=torch.channels_last) ``` Now compile and perform inference: ```python # Compile the UNet and VAE. pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=True) pipe.vae.decode = torch.compile(pipe.vae.decode, mode="max-autotune", fullgraph=True) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # First call to `pipe` is slow, subsequent ones are faster. image = pipe(prompt, num_inference_steps=30).images[0] ``` `torch.compile` offers different backends and modes. For maximum inference speed, use "max-autotune" for the inductor backend. “max-autotune” uses CUDA graphs and optimizes the compilation graph specifically for latency. CUDA graphs greatly reduces the overhead of launching GPU operations by using a mechanism to launch multiple GPU operations through a single CPU operation. Using SDPA attention and compiling both the UNet and VAE cuts the latency from 3.31 seconds to 2.54 seconds. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/progressive-acceleration-sdxl/SDXL%2C_Batch_Size%3A_1%2C_Steps%3A_30_3.png" width=500> </div> ### Prevent graph breaks Specifying `fullgraph=True` ensures there are no graph breaks in the underlying model to take full advantage of `torch.compile` without any performance degradation. For the UNet and VAE, this means changing how you access the return variables. ```diff - latents = unet( - latents, timestep=timestep, encoder_hidden_states=prompt_embeds -).sample + latents = unet( + latents, timestep=timestep, encoder_hidden_states=prompt_embeds, return_dict=False +)[0] ``` ### Remove GPU sync after compilation During the iterative reverse diffusion process, the `step()` function is [called](https://github.com/huggingface/diffusers/blob/1d686bac8146037e97f3fd8c56e4063230f71751/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py#L1228) on the scheduler each time after the denoiser predicts the less noisy latent embeddings. Inside `step()`, the `sigmas` variable is [indexed](https://github.com/huggingface/diffusers/blob/1d686bac8146037e97f3fd8c56e4063230f71751/src/diffusers/schedulers/scheduling_euler_discrete.py#L476) which when placed on the GPU, causes a communication sync between the CPU and GPU. This introduces latency and it becomes more evident when the denoiser has already been compiled. But if the `sigmas` array always [stays on the CPU](https://github.com/huggingface/diffusers/blob/35a969d297cba69110d175ee79c59312b9f49e1e/src/diffusers/schedulers/scheduling_euler_discrete.py#L240), the CPU and GPU sync doesn’t occur and you don't get any latency. In general, any CPU and GPU communication sync should be none or be kept to a bare minimum because it can impact inference latency. ## Combine the attention block's projection matrices The UNet and VAE in SDXL use Transformer-like blocks which consists of attention blocks and feed-forward blocks. In an attention block, the input is projected into three sub-spaces using three different projection matrices – Q, K, and V. These projections are performed separately on the input. But we can horizontally combine the projection matrices into a single matrix and perform the projection in one step. This increases the size of the matrix multiplications of the input projections and improves the impact of quantization. You can combine the projection matrices with just a single line of code: ```python pipe.fuse_qkv_projections() ``` This provides a minor improvement from 2.54 seconds to 2.52 seconds. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/progressive-acceleration-sdxl/SDXL%2C_Batch_Size%3A_1%2C_Steps%3A_30_4.png" width=500> </div> <Tip warning={true}> Support for [`~StableDiffusionXLPipeline.fuse_qkv_projections`] is limited and experimental. It's not available for many non-Stable Diffusion pipelines such as [Kandinsky](../using-diffusers/kandinsky). You can refer to this [PR](https://github.com/huggingface/diffusers/pull/6179) to get an idea about how to enable this for the other pipelines. </Tip> ## Dynamic quantization You can also use the ultra-lightweight PyTorch quantization library, [torchao](https://github.com/pytorch-labs/ao) (commit SHA `54bcd5a10d0abbe7b0c045052029257099f83fd9`), to apply [dynamic int8 quantization](https://pytorch.org/tutorials/recipes/recipes/dynamic_quantization.html) to the UNet and VAE. Quantization adds additional conversion overhead to the model that is hopefully made up for by faster matmuls (dynamic quantization). If the matmuls are too small, these techniques may degrade performance. First, configure all the compiler tags: ```python from diffusers import StableDiffusionXLPipeline import torch # Notice the two new flags at the end. torch._inductor.config.conv_1x1_as_mm = True torch._inductor.config.coordinate_descent_tuning = True torch._inductor.config.epilogue_fusion = False torch._inductor.config.coordinate_descent_check_all_directions = True torch._inductor.config.force_fuse_int_mm_with_mul = True torch._inductor.config.use_mixed_mm = True ``` Certain linear layers in the UNet and VAE don’t benefit from dynamic int8 quantization. You can filter out those layers with the [`dynamic_quant_filter_fn`](https://github.com/huggingface/diffusion-fast/blob/0f169640b1db106fe6a479f78c1ed3bfaeba3386/utils/pipeline_utils.py#L16) shown below. ```python def dynamic_quant_filter_fn(mod, *args): return ( isinstance(mod, torch.nn.Linear) and mod.in_features > 16 and (mod.in_features, mod.out_features) not in [ (1280, 640), (1920, 1280), (1920, 640), (2048, 1280), (2048, 2560), (2560, 1280), (256, 128), (2816, 1280), (320, 640), (512, 1536), (512, 256), (512, 512), (640, 1280), (640, 1920), (640, 320), (640, 5120), (640, 640), (960, 320), (960, 640), ] ) def conv_filter_fn(mod, *args): return ( isinstance(mod, torch.nn.Conv2d) and mod.kernel_size == (1, 1) and 128 in [mod.in_channels, mod.out_channels] ) ``` Finally, apply all the optimizations discussed so far: ```python # SDPA + bfloat16. pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.bfloat16 ).to("cuda") # Combine attention projection matrices. pipe.fuse_qkv_projections() # Change the memory layout. pipe.unet.to(memory_format=torch.channels_last) pipe.vae.to(memory_format=torch.channels_last) ``` Since dynamic quantization is only limited to the linear layers, convert the appropriate pointwise convolution layers into linear layers to maximize its benefit. ```python from torchao import swap_conv2d_1x1_to_linear swap_conv2d_1x1_to_linear(pipe.unet, conv_filter_fn) swap_conv2d_1x1_to_linear(pipe.vae, conv_filter_fn) ``` Apply dynamic quantization: ```python from torchao import apply_dynamic_quant apply_dynamic_quant(pipe.unet, dynamic_quant_filter_fn) apply_dynamic_quant(pipe.vae, dynamic_quant_filter_fn) ``` Finally, compile and perform inference: ```python pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=True) pipe.vae.decode = torch.compile(pipe.vae.decode, mode="max-autotune", fullgraph=True) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe(prompt, num_inference_steps=30).images[0] ``` Applying dynamic quantization improves the latency from 2.52 seconds to 2.43 seconds. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/progressive-acceleration-sdxl/SDXL%2C_Batch_Size%3A_1%2C_Steps%3A_30_5.png" width=500> </div>
diffusers/docs/source/en/tutorials/fast_diffusion.md/0
{ "file_path": "diffusers/docs/source/en/tutorials/fast_diffusion.md", "repo_id": "diffusers", "token_count": 4864 }
91
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # Latent Consistency Model Latent Consistency Models (LCM) enable quality image generation in typically 2-4 steps making it possible to use diffusion models in almost real-time settings. From the [official website](https://latent-consistency-models.github.io/): > LCMs can be distilled from any pre-trained Stable Diffusion (SD) in only 4,000 training steps (~32 A100 GPU Hours) for generating high quality 768 x 768 resolution images in 2~4 steps or even one step, significantly accelerating text-to-image generation. We employ LCM to distill the Dreamshaper-V7 version of SD in just 4,000 training iterations. For a more technical overview of LCMs, refer to [the paper](https://huggingface.co/papers/2310.04378). LCM distilled models are available for [stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5), [stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0), and the [SSD-1B](https://huggingface.co/segmind/SSD-1B) model. All the checkpoints can be found in this [collection](https://huggingface.co/collections/latent-consistency/latent-consistency-models-weights-654ce61a95edd6dffccef6a8). This guide shows how to perform inference with LCMs for - text-to-image - image-to-image - combined with style LoRAs - ControlNet/T2I-Adapter ## Text-to-image You'll use the [`StableDiffusionXLPipeline`] pipeline with the [`LCMScheduler`] and then load the LCM-LoRA. Together with the LCM-LoRA and the scheduler, the pipeline enables a fast inference workflow, overcoming the slow iterative nature of diffusion models. ```python from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, LCMScheduler import torch unet = UNet2DConditionModel.from_pretrained( "latent-consistency/lcm-sdxl", torch_dtype=torch.float16, variant="fp16", ) pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", unet=unet, torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k" generator = torch.manual_seed(0) image = pipe( prompt=prompt, num_inference_steps=4, generator=generator, guidance_scale=8.0 ).images[0] ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_full_sdxl_t2i.png) Notice that we use only 4 steps for generation which is way less than what's typically used for standard SDXL. Some details to keep in mind: * To perform classifier-free guidance, batch size is usually doubled inside the pipeline. LCM, however, applies guidance using guidance embeddings, so the batch size does not have to be doubled in this case. This leads to a faster inference time, with the drawback that negative prompts don't have any effect on the denoising process. * The UNet was trained using the [3., 13.] guidance scale range. So, that is the ideal range for `guidance_scale`. However, disabling `guidance_scale` using a value of 1.0 is also effective in most cases. ## Image-to-image LCMs can be applied to image-to-image tasks too. For this example, we'll use the [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) model, but the same steps can be applied to other LCM models as well. ```python import torch from diffusers import AutoPipelineForImage2Image, UNet2DConditionModel, LCMScheduler from diffusers.utils import make_image_grid, load_image unet = UNet2DConditionModel.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", subfolder="unet", torch_dtype=torch.float16, ) pipe = AutoPipelineForImage2Image.from_pretrained( "Lykon/dreamshaper-7", unet=unet, torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" init_image = load_image(url) prompt = "Astronauts in a jungle, cold color palette, muted colors, detailed, 8k" # pass prompt and image to pipeline generator = torch.manual_seed(0) image = pipe( prompt, image=init_image, num_inference_steps=4, guidance_scale=7.5, strength=0.5, generator=generator ).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_full_sdv1-5_i2i.png) <Tip> You can get different results based on your prompt and the image you provide. To get the best results, we recommend trying different values for `num_inference_steps`, `strength`, and `guidance_scale` parameters and choose the best one. </Tip> ## Combine with style LoRAs LCMs can be used with other styled LoRAs to generate styled-images in very few steps (4-8). In the following example, we'll use the [papercut LoRA](TheLastBen/Papercut_SDXL). ```python from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, LCMScheduler import torch unet = UNet2DConditionModel.from_pretrained( "latent-consistency/lcm-sdxl", torch_dtype=torch.float16, variant="fp16", ) pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", unet=unet, torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe.load_lora_weights("TheLastBen/Papercut_SDXL", weight_name="papercut.safetensors", adapter_name="papercut") prompt = "papercut, a cute fox" generator = torch.manual_seed(0) image = pipe( prompt=prompt, num_inference_steps=4, generator=generator, guidance_scale=8.0 ).images[0] image ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_full_sdx_lora_mix.png) ## ControlNet/T2I-Adapter Let's look at how we can perform inference with ControlNet/T2I-Adapter and a LCM. ### ControlNet For this example, we'll use the [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) model with canny ControlNet, but the same steps can be applied to other LCM models as well. ```python import torch import cv2 import numpy as np from PIL import Image from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, LCMScheduler from diffusers.utils import load_image, make_image_grid image = load_image( "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" ).resize((512, 512)) image = np.array(image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image) controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", controlnet=controlnet, torch_dtype=torch.float16, safety_checker=None, ).to("cuda") # set scheduler pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) generator = torch.manual_seed(0) image = pipe( "the mona lisa", image=canny_image, num_inference_steps=4, generator=generator, ).images[0] make_image_grid([canny_image, image], rows=1, cols=2) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_full_sdv1-5_controlnet.png) <Tip> The inference parameters in this example might not work for all examples, so we recommend trying different values for the `num_inference_steps`, `guidance_scale`, `controlnet_conditioning_scale`, and `cross_attention_kwargs` parameters and choosing the best one. </Tip> ### T2I-Adapter This example shows how to use the `lcm-sdxl` with the [Canny T2I-Adapter](TencentARC/t2i-adapter-canny-sdxl-1.0). ```python import torch import cv2 import numpy as np from PIL import Image from diffusers import StableDiffusionXLAdapterPipeline, UNet2DConditionModel, T2IAdapter, LCMScheduler from diffusers.utils import load_image, make_image_grid # Prepare image # Detect the canny map in low resolution to avoid high-frequency details image = load_image( "https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_canny.jpg" ).resize((384, 384)) image = np.array(image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image).resize((1024, 1216)) # load adapter adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-canny-sdxl-1.0", torch_dtype=torch.float16, varient="fp16").to("cuda") unet = UNet2DConditionModel.from_pretrained( "latent-consistency/lcm-sdxl", torch_dtype=torch.float16, variant="fp16", ) pipe = StableDiffusionXLAdapterPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", unet=unet, adapter=adapter, torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) prompt = "Mystical fairy in real, magic, 4k picture, high quality" negative_prompt = "extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured" generator = torch.manual_seed(0) image = pipe( prompt=prompt, negative_prompt=negative_prompt, image=canny_image, num_inference_steps=4, guidance_scale=5, adapter_conditioning_scale=0.8, adapter_conditioning_factor=1, generator=generator, ).images[0] grid = make_image_grid([canny_image, image], rows=1, cols=2) ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lcm/lcm_full_sdxl_t2iadapter.png)
diffusers/docs/source/en/using-diffusers/inference_with_lcm.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/inference_with_lcm.md", "repo_id": "diffusers", "token_count": 3678 }
92
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Shap-E [[open-in-colab]] Shap-E is a conditional model for generating 3D assets which could be used for video game development, interior design, and architecture. It is trained on a large dataset of 3D assets, and post-processed to render more views of each object and produce 16K instead of 4K point clouds. The Shap-E model is trained in two steps: 1. an encoder accepts the point clouds and rendered views of a 3D asset and outputs the parameters of implicit functions that represent the asset 2. a diffusion model is trained on the latents produced by the encoder to generate either neural radiance fields (NeRFs) or a textured 3D mesh, making it easier to render and use the 3D asset in downstream applications This guide will show you how to use Shap-E to start generating your own 3D assets! Before you begin, make sure you have the following libraries installed: ```py # uncomment to install the necessary libraries in Colab #!pip install -q diffusers transformers accelerate trimesh ``` ## Text-to-3D To generate a gif of a 3D object, pass a text prompt to the [`ShapEPipeline`]. The pipeline generates a list of image frames which are used to create the 3D object. ```py import torch from diffusers import ShapEPipeline device = torch.device("cuda" if torch.cuda.is_available() else "cpu") pipe = ShapEPipeline.from_pretrained("openai/shap-e", torch_dtype=torch.float16, variant="fp16") pipe = pipe.to(device) guidance_scale = 15.0 prompt = ["A firecracker", "A birthday cupcake"] images = pipe( prompt, guidance_scale=guidance_scale, num_inference_steps=64, frame_size=256, ).images ``` Now use the [`~utils.export_to_gif`] function to turn the list of image frames into a gif of the 3D object. ```py from diffusers.utils import export_to_gif export_to_gif(images[0], "firecracker_3d.gif") export_to_gif(images[1], "cake_3d.gif") ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/shap_e/firecracker_out.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">prompt = "A firecracker"</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/shap_e/cake_out.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">prompt = "A birthday cupcake"</figcaption> </div> </div> ## Image-to-3D To generate a 3D object from another image, use the [`ShapEImg2ImgPipeline`]. You can use an existing image or generate an entirely new one. Let's use the [Kandinsky 2.1](../api/pipelines/kandinsky) model to generate a new image. ```py from diffusers import DiffusionPipeline import torch prior_pipeline = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") pipeline = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16, use_safetensors=True).to("cuda") prompt = "A cheeseburger, white background" image_embeds, negative_image_embeds = prior_pipeline(prompt, guidance_scale=1.0).to_tuple() image = pipeline( prompt, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, ).images[0] image.save("burger.png") ``` Pass the cheeseburger to the [`ShapEImg2ImgPipeline`] to generate a 3D representation of it. ```py from PIL import Image from diffusers import ShapEImg2ImgPipeline from diffusers.utils import export_to_gif pipe = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img", torch_dtype=torch.float16, variant="fp16").to("cuda") guidance_scale = 3.0 image = Image.open("burger.png").resize((256, 256)) images = pipe( image, guidance_scale=guidance_scale, num_inference_steps=64, frame_size=256, ).images gif_path = export_to_gif(images[0], "burger_3d.gif") ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/shap_e/burger_in.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">cheeseburger</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/shap_e/burger_out.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">3D cheeseburger</figcaption> </div> </div> ## Generate mesh Shap-E is a flexible model that can also generate textured mesh outputs to be rendered for downstream applications. In this example, you'll convert the output into a `glb` file because the 🤗 Datasets library supports mesh visualization of `glb` files which can be rendered by the [Dataset viewer](https://huggingface.co/docs/hub/datasets-viewer#dataset-preview). You can generate mesh outputs for both the [`ShapEPipeline`] and [`ShapEImg2ImgPipeline`] by specifying the `output_type` parameter as `"mesh"`: ```py import torch from diffusers import ShapEPipeline device = torch.device("cuda" if torch.cuda.is_available() else "cpu") pipe = ShapEPipeline.from_pretrained("openai/shap-e", torch_dtype=torch.float16, variant="fp16") pipe = pipe.to(device) guidance_scale = 15.0 prompt = "A birthday cupcake" images = pipe(prompt, guidance_scale=guidance_scale, num_inference_steps=64, frame_size=256, output_type="mesh").images ``` Use the [`~utils.export_to_ply`] function to save the mesh output as a `ply` file: <Tip> You can optionally save the mesh output as an `obj` file with the [`~utils.export_to_obj`] function. The ability to save the mesh output in a variety of formats makes it more flexible for downstream usage! </Tip> ```py from diffusers.utils import export_to_ply ply_path = export_to_ply(images[0], "3d_cake.ply") print(f"Saved to folder: {ply_path}") ``` Then you can convert the `ply` file to a `glb` file with the trimesh library: ```py import trimesh mesh = trimesh.load("3d_cake.ply") mesh_export = mesh.export("3d_cake.glb", file_type="glb") ``` By default, the mesh output is focused from the bottom viewpoint but you can change the default viewpoint by applying a rotation transform: ```py import trimesh import numpy as np mesh = trimesh.load("3d_cake.ply") rot = trimesh.transformations.rotation_matrix(-np.pi / 2, [1, 0, 0]) mesh = mesh.apply_transform(rot) mesh_export = mesh.export("3d_cake.glb", file_type="glb") ``` Upload the mesh file to your dataset repository to visualize it with the Dataset viewer! <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/3D-cake.gif"/> </div>
diffusers/docs/source/en/using-diffusers/shap-e.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/shap-e.md", "repo_id": "diffusers", "token_count": 2476 }
93
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Stable diffusion XL Stable Diffusion XL은 Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, Robin Rombach에 의해 [SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis](https://arxiv.org/abs/2307.01952)에서 제안되었습니다. 논문 초록은 다음을 따릅니다: *text-to-image의 latent diffusion 모델인 SDXL을 소개합니다. 이전 버전의 Stable Diffusion과 비교하면, SDXL은 세 배 더큰 규모의 UNet 백본을 포함합니다: 모델 파라미터의 증가는 많은 attention 블럭을 사용하고 더 큰 cross-attention context를 SDXL의 두 번째 텍스트 인코더에 사용하기 때문입니다. 다중 종횡비에 다수의 새로운 conditioning 방법을 구성했습니다. 또한 후에 수정하는 image-to-image 기술을 사용함으로써 SDXL에 의해 생성된 시각적 품질을 향상하기 위해 정제된 모델을 소개합니다. SDXL은 이전 버전의 Stable Diffusion보다 성능이 향상되었고, 이러한 black-box 최신 이미지 생성자와 경쟁력있는 결과를 달성했습니다.* ## 팁 - Stable Diffusion XL은 특히 786과 1024사이의 이미지에 잘 작동합니다. - Stable Diffusion XL은 아래와 같이 학습된 각 텍스트 인코더에 대해 서로 다른 프롬프트를 전달할 수 있습니다. 동일한 프롬프트의 다른 부분을 텍스트 인코더에 전달할 수도 있습니다. - Stable Diffusion XL 결과 이미지는 아래에 보여지듯이 정제기(refiner)를 사용함으로써 향상될 수 있습니다. ### 이용가능한 체크포인트: - *Text-to-Image (1024x1024 해상도)*: [`StableDiffusionXLPipeline`]을 사용한 [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) - *Image-to-Image / 정제기(refiner) (1024x1024 해상도)*: [`StableDiffusionXLImg2ImgPipeline`]를 사용한 [stabilityai/stable-diffusion-xl-refiner-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0) ## 사용 예시 SDXL을 사용하기 전에 `transformers`, `accelerate`, `safetensors` 와 `invisible_watermark`를 설치하세요. 다음과 같이 라이브러리를 설치할 수 있습니다: ``` pip install transformers pip install accelerate pip install safetensors pip install invisible-watermark>=0.2.0 ``` ### 워터마커 Stable Diffusion XL로 이미지를 생성할 때 워터마크가 보이지 않도록 추가하는 것을 권장하는데, 이는 다운스트림(downstream) 어플리케이션에서 기계에 합성되었는지를 식별하는데 도움을 줄 수 있습니다. 그렇게 하려면 [invisible_watermark 라이브러리](https://pypi.org/project/invisible-watermark/)를 통해 설치해주세요: ``` pip install invisible-watermark>=0.2.0 ``` `invisible-watermark` 라이브러리가 설치되면 워터마커가 **기본적으로** 사용될 것입니다. 생성 또는 안전하게 이미지를 배포하기 위해 다른 규정이 있다면, 다음과 같이 워터마커를 비활성화할 수 있습니다: ```py pipe = StableDiffusionXLPipeline.from_pretrained(..., add_watermarker=False) ``` ### Text-to-Image *text-to-image*를 위해 다음과 같이 SDXL을 사용할 수 있습니다: ```py from diffusers import StableDiffusionXLPipeline import torch pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipe.to("cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe(prompt=prompt).images[0] ``` ### Image-to-image *image-to-image*를 위해 다음과 같이 SDXL을 사용할 수 있습니다: ```py import torch from diffusers import StableDiffusionXLImg2ImgPipeline from diffusers.utils import load_image pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipe = pipe.to("cuda") url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png" init_image = load_image(url).convert("RGB") prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt, image=init_image).images[0] ``` ### 인페인팅 *inpainting*를 위해 다음과 같이 SDXL을 사용할 수 있습니다: ```py import torch from diffusers import StableDiffusionXLInpaintPipeline from diffusers.utils import load_image pipe = StableDiffusionXLInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipe.to("cuda") img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = load_image(img_url).convert("RGB") mask_image = load_image(mask_url).convert("RGB") prompt = "A majestic tiger sitting on a bench" image = pipe(prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80).images[0] ``` ### 이미지 결과물을 정제하기 [base 모델 체크포인트](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)에서, StableDiffusion-XL 또한 고주파 품질을 향상시키는 이미지를 생성하기 위해 낮은 노이즈 단계 이미지를 제거하는데 특화된 [refiner 체크포인트](huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0)를 포함하고 있습니다. 이 refiner 체크포인트는 이미지 품질을 향상시키기 위해 base 체크포인트를 실행한 후 "두 번째 단계" 파이프라인에 사용될 수 있습니다. refiner를 사용할 때, 쉽게 사용할 수 있습니다 - 1.) base 모델과 refiner을 사용하는데, 이는 *Denoisers의 앙상블*을 위한 첫 번째 제안된 [eDiff-I](https://research.nvidia.com/labs/dir/eDiff-I/)를 사용하거나 - 2.) base 모델을 거친 후 [SDEdit](https://arxiv.org/abs/2108.01073) 방법으로 단순하게 refiner를 실행시킬 수 있습니다. **참고**: SD-XL base와 refiner를 앙상블로 사용하는 아이디어는 커뮤니티 기여자들이 처음으로 제안했으며, 이는 다음과 같은 `diffusers`를 구현하는 데도 도움을 주셨습니다. - [SytanSD](https://github.com/SytanSD) - [bghira](https://github.com/bghira) - [Birch-san](https://github.com/Birch-san) - [AmericanPresidentJimmyCarter](https://github.com/AmericanPresidentJimmyCarter) #### 1.) Denoisers의 앙상블 base와 refiner 모델을 denoiser의 앙상블로 사용할 때, base 모델은 고주파 diffusion 단계를 위한 전문가의 역할을 해야하고, refiner는 낮은 노이즈 diffusion 단계를 위한 전문가의 역할을 해야 합니다. 2.)에 비해 1.)의 장점은 전체적으로 denoising 단계가 덜 필요하므로 속도가 훨씬 더 빨라집니다. 단점은 base 모델의 결과를 검사할 수 없다는 것입니다. 즉, 여전히 노이즈가 심하게 제거됩니다. base 모델과 refiner를 denoiser의 앙상블로 사용하기 위해 각각 고노이즈(high-nosise) (*즉* base 모델)와 저노이즈 (*즉* refiner 모델)의 노이즈를 제거하는 단계를 거쳐야하는 타임스텝의 기간을 정의해야 합니다. base 모델의 [`denoising_end`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLPipeline.__call__.denoising_end)와 refiner 모델의 [`denoising_start`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLImg2ImgPipeline.__call__.denoising_start)를 사용해 간격을 정합니다. `denoising_end`와 `denoising_start` 모두 0과 1사이의 실수 값으로 전달되어야 합니다. 전달되면 노이즈 제거의 끝과 시작은 모델 스케줄에 의해 정의된 이산적(discrete) 시간 간격의 비율로 정의됩니다. 노이즈 제거 단계의 수는 모델이 학습된 불연속적인 시간 간격과 선언된 fractional cutoff에 의해 결정되므로 '강도' 또한 선언된 경우 이 값이 '강도'를 재정의합니다. 예시를 들어보겠습니다. 우선, 두 개의 파이프라인을 가져옵니다. 텍스트 인코더와 variational autoencoder는 동일하므로 refiner를 위해 다시 불러오지 않아도 됩니다. ```py from diffusers import DiffusionPipeline import torch base = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipe.to("cuda") refiner = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", text_encoder_2=base.text_encoder_2, vae=base.vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", ) refiner.to("cuda") ``` 이제 추론 단계의 수와 고노이즈에서 노이즈를 제거하는 단계(*즉* base 모델)를 거쳐 실행되는 지점을 정의합니다. ```py n_steps = 40 high_noise_frac = 0.8 ``` Stable Diffusion XL base 모델은 타임스텝 0-999에 학습되며 Stable Diffusion XL refiner는 포괄적인 낮은 노이즈 타임스텝인 0-199에 base 모델로 부터 파인튜닝되어, 첫 800 타임스텝 (높은 노이즈)에 base 모델을 사용하고 마지막 200 타입스텝 (낮은 노이즈)에서 refiner가 사용됩니다. 따라서, `high_noise_frac`는 0.8로 설정하고, 모든 200-999 스텝(노이즈 제거 타임스텝의 첫 80%)은 base 모델에 의해 수행되며 0-199 스텝(노이즈 제거 타임스텝의 마지막 20%)은 refiner 모델에 의해 수행됩니다. 기억하세요, 노이즈 제거 절차는 **높은 값**(높은 노이즈) 타임스텝에서 시작되고, **낮은 값** (낮은 노이즈) 타임스텝에서 끝납니다. 이제 두 파이프라인을 실행해봅시다. `denoising_end`과 `denoising_start`를 같은 값으로 설정하고 `num_inference_steps`는 상수로 유지합니다. 또한 base 모델의 출력은 잠재 공간에 있어야 한다는 점을 기억하세요: ```py prompt = "A majestic lion jumping from a big stone at night" image = base( prompt=prompt, num_inference_steps=n_steps, denoising_end=high_noise_frac, output_type="latent", ).images image = refiner( prompt=prompt, num_inference_steps=n_steps, denoising_start=high_noise_frac, image=image, ).images[0] ``` 이미지를 살펴보겠습니다. | 원래의 이미지 | Denoiser들의 앙상블 | |---|---| | ![lion_base](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lion_base.png) | ![lion_ref](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/lion_refined.png) 동일한 40 단계에서 base 모델을 실행한다면, 이미지의 디테일(예: 사자의 눈과 코)이 떨어졌을 것입니다: <Tip> 앙상블 방식은 사용 가능한 모든 스케줄러에서 잘 작동합니다! </Tip> #### 2.) 노이즈가 완전히 제거된 기본 이미지에서 이미지 출력을 정제하기 일반적인 [`StableDiffusionImg2ImgPipeline`] 방식에서, 기본 모델에서 생성된 완전히 노이즈가 제거된 이미지는 [refiner checkpoint](huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0)를 사용해 더 향상시킬 수 있습니다. 이를 위해, 보통의 "base" text-to-image 파이프라인을 수행 후에 image-to-image 파이프라인으로써 refiner를 실행시킬 수 있습니다. base 모델의 출력을 잠재 공간에 남겨둘 수 있습니다. ```py from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipe.to("cuda") refiner = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", text_encoder_2=pipe.text_encoder_2, vae=pipe.vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", ) refiner.to("cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe(prompt=prompt, output_type="latent" if use_refiner else "pil").images[0] image = refiner(prompt=prompt, image=image[None, :]).images[0] ``` | 원래의 이미지 | 정제된 이미지 | |---|---| | ![](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/init_image.png) | ![](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/refined_image.png) | <Tip> refiner는 또한 인페인팅 설정에 잘 사용될 수 있습니다. 아래에 보여지듯이 [`StableDiffusionXLInpaintPipeline`] 클래스를 사용해서 만들어보세요. </Tip> Denoiser 앙상블 설정에서 인페인팅에 refiner를 사용하려면 다음을 수행하면 됩니다: ```py from diffusers import StableDiffusionXLInpaintPipeline from diffusers.utils import load_image pipe = StableDiffusionXLInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipe.to("cuda") refiner = StableDiffusionXLInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", text_encoder_2=pipe.text_encoder_2, vae=pipe.vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", ) refiner.to("cuda") img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = load_image(img_url).convert("RGB") mask_image = load_image(mask_url).convert("RGB") prompt = "A majestic tiger sitting on a bench" num_inference_steps = 75 high_noise_frac = 0.7 image = pipe( prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=num_inference_steps, denoising_start=high_noise_frac, output_type="latent", ).images image = refiner( prompt=prompt, image=image, mask_image=mask_image, num_inference_steps=num_inference_steps, denoising_start=high_noise_frac, ).images[0] ``` 일반적인 SDE 설정에서 인페인팅에 refiner를 사용하기 위해, `denoising_end`와 `denoising_start`를 제거하고 refiner의 추론 단계의 수를 적게 선택하세요. ### 단독 체크포인트 파일 / 원래의 파일 형식으로 불러오기 [`~diffusers.loaders.FromSingleFileMixin.from_single_file`]를 사용함으로써 원래의 파일 형식을 `diffusers` 형식으로 불러올 수 있습니다: ```py from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline import torch pipe = StableDiffusionXLPipeline.from_single_file( "./sd_xl_base_1.0.safetensors", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipe.to("cuda") refiner = StableDiffusionXLImg2ImgPipeline.from_single_file( "./sd_xl_refiner_1.0.safetensors", torch_dtype=torch.float16, use_safetensors=True, variant="fp16" ) refiner.to("cuda") ``` ### 모델 offloading을 통해 메모리 최적화하기 out-of-memory 에러가 난다면, [`StableDiffusionXLPipeline.enable_model_cpu_offload`]을 사용하는 것을 권장합니다. ```diff - pipe.to("cuda") + pipe.enable_model_cpu_offload() ``` 그리고 ```diff - refiner.to("cuda") + refiner.enable_model_cpu_offload() ``` ### `torch.compile`로 추론 속도를 올리기 `torch.compile`를 사용함으로써 추론 속도를 올릴 수 있습니다. 이는 **ca.** 20% 속도 향상이 됩니다. ```diff + pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) + refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True) ``` ### `torch < 2.0`일 때 실행하기 **참고** Stable Diffusion XL을 `torch`가 2.0 버전 미만에서 실행시키고 싶을 때, xformers 어텐션을 사용해주세요: ``` pip install xformers ``` ```diff +pipe.enable_xformers_memory_efficient_attention() +refiner.enable_xformers_memory_efficient_attention() ``` ## StableDiffusionXLPipeline [[autodoc]] StableDiffusionXLPipeline - all - __call__ ## StableDiffusionXLImg2ImgPipeline [[autodoc]] StableDiffusionXLImg2ImgPipeline - all - __call__ ## StableDiffusionXLInpaintPipeline [[autodoc]] StableDiffusionXLInpaintPipeline - all - __call__ ### 각 텍스트 인코더에 다른 프롬프트를 전달하기 Stable Diffusion XL는 두 개의 텍스트 인코더에 학습되었습니다. 기본 동작은 각 프롬프트에 동일한 프롬프트를 전달하는 것입니다. 그러나 [일부 사용자](https://github.com/huggingface/diffusers/issues/4004#issuecomment-1627764201)가 품질을 향상시킬 수 있다고 지적한 것처럼 텍스트 인코더마다 다른 프롬프트를 전달할 수 있습니다. 그렇게 하려면, `prompt_2`와 `negative_prompt_2`를 `prompt`와 `negative_prompt`에 전달해야 합니다. 그렇게 함으로써, 원래의 프롬프트들(`prompt`)과 부정 프롬프트들(`negative_prompt`)를 `텍스트 인코더`에 전달할 것입니다.(공식 SDXL 0.9/1.0의 [OpenAI CLIP-ViT/L-14](https://huggingface.co/openai/clip-vit-large-patch14)에서 볼 수 있습니다.) 그리고 `prompt_2`와 `negative_prompt_2`는 `text_encoder_2`에 전달됩니다.(공식 SDXL 0.9/1.0의 [OpenCLIP-ViT/bigG-14](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)에서 볼 수 있습니다.) ```py from diffusers import StableDiffusionXLPipeline import torch pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-0.9", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipe.to("cuda") # OAI CLIP-ViT/L-14에 prompt가 전달됩니다 prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # OpenCLIP-ViT/bigG-14에 prompt_2가 전달됩니다 prompt_2 = "monet painting" image = pipe(prompt=prompt, prompt_2=prompt_2).images[0] ```
diffusers/docs/source/ko/api/pipelines/stable_diffusion/stable_diffusion_xl.md/0
{ "file_path": "diffusers/docs/source/ko/api/pipelines/stable_diffusion/stable_diffusion_xl.md", "repo_id": "diffusers", "token_count": 10988 }
94
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 새로운 작업에 대한 모델을 적용하기 많은 diffusion 시스템은 같은 구성 요소들을 공유하므로 한 작업에 대해 사전학습된 모델을 완전히 다른 작업에 적용할 수 있습니다. 이 인페인팅을 위한 가이드는 사전학습된 [`UNet2DConditionModel`]의 아키텍처를 초기화하고 수정하여 사전학습된 text-to-image 모델을 어떻게 인페인팅에 적용하는지를 알려줄 것입니다. ## UNet2DConditionModel 파라미터 구성 [`UNet2DConditionModel`]은 [input sample](https://huggingface.co/docs/diffusers/v0.16.0/en/api/models#diffusers.UNet2DConditionModel.in_channels)에서 4개의 채널을 기본적으로 허용합니다. 예를 들어, [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)와 같은 사전학습된 text-to-image 모델을 불러오고 `in_channels`의 수를 확인합니다: ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipeline.unet.config["in_channels"] 4 ``` 인페인팅은 입력 샘플에 9개의 채널이 필요합니다. [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting)와 같은 사전학습된 인페인팅 모델에서 이 값을 확인할 수 있습니다: ```py from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") pipeline.unet.config["in_channels"] 9 ``` 인페인팅에 대한 text-to-image 모델을 적용하기 위해, `in_channels` 수를 4에서 9로 수정해야 할 것입니다. 사전학습된 text-to-image 모델의 가중치와 [`UNet2DConditionModel`]을 초기화하고 `in_channels`를 9로 수정해 주세요. `in_channels`의 수를 수정하면 크기가 달라지기 때문에 크기가 안 맞는 오류를 피하기 위해 `ignore_mismatched_sizes=True` 및 `low_cpu_mem_usage=False`를 설정해야 합니다. ```py from diffusers import UNet2DConditionModel model_id = "runwayml/stable-diffusion-v1-5" unet = UNet2DConditionModel.from_pretrained( model_id, subfolder="unet", in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True ) ``` Text-to-image 모델로부터 다른 구성 요소의 사전학습된 가중치는 체크포인트로부터 초기화되지만 `unet`의 입력 채널 가중치 (`conv_in.weight`)는 랜덤하게 초기화됩니다. 그렇지 않으면 모델이 노이즈를 리턴하기 때문에 인페인팅의 모델을 파인튜닝 할 때 중요합니다.
diffusers/docs/source/ko/training/adapt_a_model.md/0
{ "file_path": "diffusers/docs/source/ko/training/adapt_a_model.md", "repo_id": "diffusers", "token_count": 1827 }
95
# 이미지 밝기 조절하기 Stable Diffusion 파이프라인은 [일반적인 디퓨전 노이즈 스케줄과 샘플 단계에 결함이 있음](https://huggingface.co/papers/2305.08891) 논문에서 설명한 것처럼 매우 밝거나 어두운 이미지를 생성하는 데는 성능이 평범합니다. 이 논문에서 제안한 솔루션은 현재 [`DDIMScheduler`]에 구현되어 있으며 이미지의 밝기를 개선하는 데 사용할 수 있습니다. <Tip> 💡 제안된 솔루션에 대한 자세한 내용은 위에 링크된 논문을 참고하세요! </Tip> 해결책 중 하나는 *v 예측값*과 *v 로스*로 모델을 훈련하는 것입니다. 다음 flag를 [`train_text_to_image.py`](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) 또는 [`train_text_to_image_lora.py`](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py) 스크립트에 추가하여 `v_prediction`을 활성화합니다: ```bash --prediction_type="v_prediction" ``` 예를 들어, `v_prediction`으로 미세 조정된 [`ptx0/pseudo-journey-v2`](https://huggingface.co/ptx0/pseudo-journey-v2) 체크포인트를 사용해 보겠습니다. 다음으로 [`DDIMScheduler`]에서 다음 파라미터를 설정합니다: 1. rescale_betas_zero_snr=True`, 노이즈 스케줄을 제로 터미널 신호 대 잡음비(SNR)로 재조정합니다. 2. `timestep_spacing="trailing"`, 마지막 타임스텝부터 샘플링 시작 ```py >>> from diffusers import DiffusionPipeline, DDIMScheduler >>> pipeline = DiffusionPipeline.from_pretrained("ptx0/pseudo-journey-v2") # switch the scheduler in the pipeline to use the DDIMScheduler >>> pipeline.scheduler = DDIMScheduler.from_config( ... pipeline.scheduler.config, rescale_betas_zero_snr=True, timestep_spacing="trailing" ... ) >>> pipeline.to("cuda") ``` 마지막으로 파이프라인에 대한 호출에서 `guidance_rescale`을 설정하여 과다 노출을 방지합니다: ```py prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" image = pipeline(prompt, guidance_rescale=0.7).images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/zero_snr.png"/> </div>
diffusers/docs/source/ko/using-diffusers/control_brightness.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/control_brightness.md", "repo_id": "diffusers", "token_count": 1435 }
96
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Unconditional 이미지 생성 [[open-in-colab]] Unconditional 이미지 생성은 비교적 간단한 작업입니다. 모델이 텍스트나 이미지와 같은 추가 조건 없이 이미 학습된 학습 데이터와 유사한 이미지만 생성합니다. ['DiffusionPipeline']은 추론을 위해 미리 학습된 diffusion 시스템을 사용하는 가장 쉬운 방법입니다. 먼저 ['DiffusionPipeline']의 인스턴스를 생성하고 다운로드할 파이프라인의 [체크포인트](https://huggingface.co/models?library=diffusers&sort=downloads)를 지정합니다. 허브의 🧨 diffusion 체크포인트 중 하나를 사용할 수 있습니다(사용할 체크포인트는 나비 이미지를 생성합니다). <Tip> 💡 나만의 unconditional 이미지 생성 모델을 학습시키고 싶으신가요? 학습 가이드를 살펴보고 나만의 이미지를 생성하는 방법을 알아보세요. </Tip> 이 가이드에서는 unconditional 이미지 생성에 ['DiffusionPipeline']과 [DDPM](https://arxiv.org/abs/2006.11239)을 사용합니다: ```python >>> from diffusers import DiffusionPipeline >>> generator = DiffusionPipeline.from_pretrained("anton-l/ddpm-butterflies-128") ``` [diffusion 파이프라인]은 모든 모델링, 토큰화, 스케줄링 구성 요소를 다운로드하고 캐시합니다. 이 모델은 약 14억 개의 파라미터로 구성되어 있기 때문에 GPU에서 실행할 것을 강력히 권장합니다. PyTorch에서와 마찬가지로 제너레이터 객체를 GPU로 옮길 수 있습니다: ```python >>> generator.to("cuda") ``` 이제 제너레이터를 사용하여 이미지를 생성할 수 있습니다: ```python >>> image = generator().images[0] ``` 출력은 기본적으로 [PIL.Image](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) 객체로 감싸집니다. 다음을 호출하여 이미지를 저장할 수 있습니다: ```python >>> image.save("generated_image.png") ``` 아래 스페이스(데모 링크)를 이용해 보고, 추론 단계의 매개변수를 자유롭게 조절하여 이미지 품질에 어떤 영향을 미치는지 확인해 보세요! <iframe src="https://stevhliu-ddpm-butterflies-128.hf.space" frameborder="0" width="850" height="500"></iframe>
diffusers/docs/source/ko/using-diffusers/unconditional_image_generation.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/unconditional_image_generation.md", "repo_id": "diffusers", "token_count": 1742 }
97
## Amused training Amused can be finetuned on simple datasets relatively cheaply and quickly. Using 8bit optimizers, lora, and gradient accumulation, amused can be finetuned with as little as 5.5 GB. Here are a set of examples for finetuning amused on some relatively simple datasets. These training recipies are aggressively oriented towards minimal resources and fast verification -- i.e. the batch sizes are quite low and the learning rates are quite high. For optimal quality, you will probably want to increase the batch sizes and decrease learning rates. All training examples use fp16 mixed precision and gradient checkpointing. We don't show 8 bit adam + lora as its about the same memory use as just using lora (bitsandbytes uses full precision optimizer states for weights below a minimum size). ### Finetuning the 256 checkpoint These examples finetune on this [nouns](https://huggingface.co/datasets/m1guelpf/nouns) dataset. Example results: ![noun1](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/noun1.png) ![noun2](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/noun2.png) ![noun3](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/noun3.png) #### Full finetuning Batch size: 8, Learning rate: 1e-4, Gives decent results in 750-1000 steps | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used | |------------|-----------------------------|------------------|-------------| | 8 | 1 | 8 | 19.7 GB | | 4 | 2 | 8 | 18.3 GB | | 1 | 8 | 8 | 17.9 GB | ```sh accelerate launch train_amused.py \ --output_dir <output path> \ --train_batch_size <batch size> \ --gradient_accumulation_steps <gradient accumulation steps> \ --learning_rate 1e-4 \ --pretrained_model_name_or_path amused/amused-256 \ --instance_data_dataset 'm1guelpf/nouns' \ --image_key image \ --prompt_key text \ --resolution 256 \ --mixed_precision fp16 \ --lr_scheduler constant \ --validation_prompts \ 'a pixel art character with square red glasses, a baseball-shaped head and a orange-colored body on a dark background' \ 'a pixel art character with square orange glasses, a lips-shaped head and a red-colored body on a light background' \ 'a pixel art character with square blue glasses, a microwave-shaped head and a purple-colored body on a sunny background' \ 'a pixel art character with square red glasses, a baseball-shaped head and a blue-colored body on an orange background' \ 'a pixel art character with square red glasses' \ 'a pixel art character' \ 'square red glasses on a pixel art character' \ 'square red glasses on a pixel art character with a baseball-shaped head' \ --max_train_steps 10000 \ --checkpointing_steps 500 \ --validation_steps 250 \ --gradient_checkpointing ``` #### Full finetuning + 8 bit adam Note that this training config keeps the batch size low and the learning rate high to get results fast with low resources. However, due to 8 bit adam, it will diverge eventually. If you want to train for longer, you will have to up the batch size and lower the learning rate. Batch size: 16, Learning rate: 2e-5, Gives decent results in ~750 steps | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used | |------------|-----------------------------|------------------|-------------| | 16 | 1 | 16 | 20.1 GB | | 8 | 2 | 16 | 15.6 GB | | 1 | 16 | 16 | 10.7 GB | ```sh accelerate launch train_amused.py \ --output_dir <output path> \ --train_batch_size <batch size> \ --gradient_accumulation_steps <gradient accumulation steps> \ --learning_rate 2e-5 \ --use_8bit_adam \ --pretrained_model_name_or_path amused/amused-256 \ --instance_data_dataset 'm1guelpf/nouns' \ --image_key image \ --prompt_key text \ --resolution 256 \ --mixed_precision fp16 \ --lr_scheduler constant \ --validation_prompts \ 'a pixel art character with square red glasses, a baseball-shaped head and a orange-colored body on a dark background' \ 'a pixel art character with square orange glasses, a lips-shaped head and a red-colored body on a light background' \ 'a pixel art character with square blue glasses, a microwave-shaped head and a purple-colored body on a sunny background' \ 'a pixel art character with square red glasses, a baseball-shaped head and a blue-colored body on an orange background' \ 'a pixel art character with square red glasses' \ 'a pixel art character' \ 'square red glasses on a pixel art character' \ 'square red glasses on a pixel art character with a baseball-shaped head' \ --max_train_steps 10000 \ --checkpointing_steps 500 \ --validation_steps 250 \ --gradient_checkpointing ``` #### Full finetuning + lora Batch size: 16, Learning rate: 8e-4, Gives decent results in 1000-1250 steps | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used | |------------|-----------------------------|------------------|-------------| | 16 | 1 | 16 | 14.1 GB | | 8 | 2 | 16 | 10.1 GB | | 1 | 16 | 16 | 6.5 GB | ```sh accelerate launch train_amused.py \ --output_dir <output path> \ --train_batch_size <batch size> \ --gradient_accumulation_steps <gradient accumulation steps> \ --learning_rate 8e-4 \ --use_lora \ --pretrained_model_name_or_path amused/amused-256 \ --instance_data_dataset 'm1guelpf/nouns' \ --image_key image \ --prompt_key text \ --resolution 256 \ --mixed_precision fp16 \ --lr_scheduler constant \ --validation_prompts \ 'a pixel art character with square red glasses, a baseball-shaped head and a orange-colored body on a dark background' \ 'a pixel art character with square orange glasses, a lips-shaped head and a red-colored body on a light background' \ 'a pixel art character with square blue glasses, a microwave-shaped head and a purple-colored body on a sunny background' \ 'a pixel art character with square red glasses, a baseball-shaped head and a blue-colored body on an orange background' \ 'a pixel art character with square red glasses' \ 'a pixel art character' \ 'square red glasses on a pixel art character' \ 'square red glasses on a pixel art character with a baseball-shaped head' \ --max_train_steps 10000 \ --checkpointing_steps 500 \ --validation_steps 250 \ --gradient_checkpointing ``` ### Finetuning the 512 checkpoint These examples finetune on this [minecraft](https://huggingface.co/monadical-labs/minecraft-preview) dataset. Example results: ![minecraft1](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/minecraft1.png) ![minecraft2](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/minecraft2.png) ![minecraft3](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/minecraft3.png) #### Full finetuning Batch size: 8, Learning rate: 8e-5, Gives decent results in 500-1000 steps | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used | |------------|-----------------------------|------------------|-------------| | 8 | 1 | 8 | 24.2 GB | | 4 | 2 | 8 | 19.7 GB | | 1 | 8 | 8 | 16.99 GB | ```sh accelerate launch train_amused.py \ --output_dir <output path> \ --train_batch_size <batch size> \ --gradient_accumulation_steps <gradient accumulation steps> \ --learning_rate 8e-5 \ --pretrained_model_name_or_path amused/amused-512 \ --instance_data_dataset 'monadical-labs/minecraft-preview' \ --prompt_prefix 'minecraft ' \ --image_key image \ --prompt_key text \ --resolution 512 \ --mixed_precision fp16 \ --lr_scheduler constant \ --validation_prompts \ 'minecraft Avatar' \ 'minecraft character' \ 'minecraft' \ 'minecraft president' \ 'minecraft pig' \ --max_train_steps 10000 \ --checkpointing_steps 500 \ --validation_steps 250 \ --gradient_checkpointing ``` #### Full finetuning + 8 bit adam Batch size: 8, Learning rate: 5e-6, Gives decent results in 500-1000 steps | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used | |------------|-----------------------------|------------------|-------------| | 8 | 1 | 8 | 21.2 GB | | 4 | 2 | 8 | 13.3 GB | | 1 | 8 | 8 | 9.9 GB | ```sh accelerate launch train_amused.py \ --output_dir <output path> \ --train_batch_size <batch size> \ --gradient_accumulation_steps <gradient accumulation steps> \ --learning_rate 5e-6 \ --pretrained_model_name_or_path amused/amused-512 \ --instance_data_dataset 'monadical-labs/minecraft-preview' \ --prompt_prefix 'minecraft ' \ --image_key image \ --prompt_key text \ --resolution 512 \ --mixed_precision fp16 \ --lr_scheduler constant \ --validation_prompts \ 'minecraft Avatar' \ 'minecraft character' \ 'minecraft' \ 'minecraft president' \ 'minecraft pig' \ --max_train_steps 10000 \ --checkpointing_steps 500 \ --validation_steps 250 \ --gradient_checkpointing ``` #### Full finetuning + lora Batch size: 8, Learning rate: 1e-4, Gives decent results in 500-1000 steps | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used | |------------|-----------------------------|------------------|-------------| | 8 | 1 | 8 | 12.7 GB | | 4 | 2 | 8 | 9.0 GB | | 1 | 8 | 8 | 5.6 GB | ```sh accelerate launch train_amused.py \ --output_dir <output path> \ --train_batch_size <batch size> \ --gradient_accumulation_steps <gradient accumulation steps> \ --learning_rate 1e-4 \ --use_lora \ --pretrained_model_name_or_path amused/amused-512 \ --instance_data_dataset 'monadical-labs/minecraft-preview' \ --prompt_prefix 'minecraft ' \ --image_key image \ --prompt_key text \ --resolution 512 \ --mixed_precision fp16 \ --lr_scheduler constant \ --validation_prompts \ 'minecraft Avatar' \ 'minecraft character' \ 'minecraft' \ 'minecraft president' \ 'minecraft pig' \ --max_train_steps 10000 \ --checkpointing_steps 500 \ --validation_steps 250 \ --gradient_checkpointing ``` ### Styledrop [Styledrop](https://arxiv.org/abs/2306.00983) is an efficient finetuning method for learning a new style from just one or very few images. It has an optional first stage to generate human picked additional training samples. The additional training samples can be used to augment the initial images. Our examples exclude the optional additional image selection stage and instead we just finetune on a single image. This is our example style image: ![example](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/A%20mushroom%20in%20%5BV%5D%20style.png) Download it to your local directory with ```sh wget https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/A%20mushroom%20in%20%5BV%5D%20style.png ``` #### 256 Example results: ![glowing_256_1](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_256_1.png) ![glowing_256_2](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_256_2.png) ![glowing_256_3](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_256_3.png) Learning rate: 4e-4, Gives decent results in 1500-2000 steps Memory used: 6.5 GB ```sh accelerate launch train_amused.py \ --output_dir <output path> \ --mixed_precision fp16 \ --report_to wandb \ --use_lora \ --pretrained_model_name_or_path amused/amused-256 \ --train_batch_size 1 \ --lr_scheduler constant \ --learning_rate 4e-4 \ --validation_prompts \ 'A chihuahua walking on the street in [V] style' \ 'A banana on the table in [V] style' \ 'A church on the street in [V] style' \ 'A tabby cat walking in the forest in [V] style' \ --instance_data_image 'A mushroom in [V] style.png' \ --max_train_steps 10000 \ --checkpointing_steps 500 \ --validation_steps 100 \ --resolution 256 ``` #### 512 Example results: ![glowing_512_1](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_512_1.png) ![glowing_512_2](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_512_2.png) ![glowing_512_3](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_512_3.png) Learning rate: 1e-3, Lora alpha 1, Gives decent results in 1500-2000 steps Memory used: 5.6 GB ``` accelerate launch train_amused.py \ --output_dir <output path> \ --mixed_precision fp16 \ --report_to wandb \ --use_lora \ --pretrained_model_name_or_path amused/amused-512 \ --train_batch_size 1 \ --lr_scheduler constant \ --learning_rate 1e-3 \ --validation_prompts \ 'A chihuahua walking on the street in [V] style' \ 'A banana on the table in [V] style' \ 'A church on the street in [V] style' \ 'A tabby cat walking in the forest in [V] style' \ --instance_data_image 'A mushroom in [V] style.png' \ --max_train_steps 100000 \ --checkpointing_steps 500 \ --validation_steps 100 \ --resolution 512 \ --lora_alpha 1 ```
diffusers/examples/amused/README.md/0
{ "file_path": "diffusers/examples/amused/README.md", "repo_id": "diffusers", "token_count": 5921 }
98
#!/usr/bin/env python3 import torch from diffusers import DiffusionPipeline class UnetSchedulerOneForwardPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) def __call__(self): image = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), ) timestep = 1 model_output = self.unet(image, timestep).sample scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample result = scheduler_output - scheduler_output + torch.ones_like(scheduler_output) return result
diffusers/examples/community/one_step_unet.py/0
{ "file_path": "diffusers/examples/community/one_step_unet.py", "repo_id": "diffusers", "token_count": 299 }
99
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 Custom Diffusion authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import argparse import itertools import json import logging import math import os import random import shutil import warnings from pathlib import Path import numpy as np import safetensors import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from huggingface_hub import HfApi, create_repo from huggingface_hub.utils import insecure_hashlib from packaging import version from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.loaders import AttnProcsLayers from diffusers.models.attention_processor import ( CustomDiffusionAttnProcessor, CustomDiffusionAttnProcessor2_0, CustomDiffusionXFormersAttnProcessor, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.import_utils import is_xformers_available # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.26.0.dev0") logger = get_logger(__name__) def freeze_params(params): for param in params: param.requires_grad = False def save_model_card(repo_id: str, images=None, base_model=str, prompt=str, repo_folder=None): img_str = "" for i, image in enumerate(images): image.save(os.path.join(repo_folder, f"image_{i}.png")) img_str += f"![img_{i}](./image_{i}.png)\n" yaml = f""" --- license: creativeml-openrail-m base_model: {base_model} instance_prompt: {prompt} tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - custom-diffusion inference: true --- """ model_card = f""" # Custom Diffusion - {repo_id} These are Custom Diffusion adaption weights for {base_model}. The weights were trained on {prompt} using [Custom Diffusion](https://www.cs.cmu.edu/~custom-diffusion). You can find some example images in the following. \n {img_str} \nFor more details on the training, please follow [this link](https://github.com/huggingface/diffusers/blob/main/examples/custom_diffusion). """ with open(os.path.join(repo_folder, "README.md"), "w") as f: f.write(yaml + model_card) def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation return RobertaSeriesModelWithTransformation else: raise ValueError(f"{model_class} is not supported.") def collate_fn(examples, with_prior_preservation): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] mask = [example["mask"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] mask += [example["class_mask"] for example in examples] input_ids = torch.cat(input_ids, dim=0) pixel_values = torch.stack(pixel_values) mask = torch.stack(mask) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() mask = mask.to(memory_format=torch.contiguous_format).float() batch = {"input_ids": input_ids, "pixel_values": pixel_values, "mask": mask.unsqueeze(1)} return batch class PromptDataset(Dataset): "A simple dataset to prepare the prompts to generate class images on multiple GPUs." def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example class CustomDiffusionDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and the tokenizes prompts. """ def __init__( self, concepts_list, tokenizer, size=512, mask_size=64, center_crop=False, with_prior_preservation=False, num_class_images=200, hflip=False, aug=True, ): self.size = size self.mask_size = mask_size self.center_crop = center_crop self.tokenizer = tokenizer self.interpolation = Image.BILINEAR self.aug = aug self.instance_images_path = [] self.class_images_path = [] self.with_prior_preservation = with_prior_preservation for concept in concepts_list: inst_img_path = [ (x, concept["instance_prompt"]) for x in Path(concept["instance_data_dir"]).iterdir() if x.is_file() ] self.instance_images_path.extend(inst_img_path) if with_prior_preservation: class_data_root = Path(concept["class_data_dir"]) if os.path.isdir(class_data_root): class_images_path = list(class_data_root.iterdir()) class_prompt = [concept["class_prompt"] for _ in range(len(class_images_path))] else: with open(class_data_root, "r") as f: class_images_path = f.read().splitlines() with open(concept["class_prompt"], "r") as f: class_prompt = f.read().splitlines() class_img_path = list(zip(class_images_path, class_prompt)) self.class_images_path.extend(class_img_path[:num_class_images]) random.shuffle(self.instance_images_path) self.num_instance_images = len(self.instance_images_path) self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) self.flip = transforms.RandomHorizontalFlip(0.5 * hflip) self.image_transforms = transforms.Compose( [ self.flip, transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def preprocess(self, image, scale, resample): outer, inner = self.size, scale factor = self.size // self.mask_size if scale > self.size: outer, inner = scale, self.size top, left = np.random.randint(0, outer - inner + 1), np.random.randint(0, outer - inner + 1) image = image.resize((scale, scale), resample=resample) image = np.array(image).astype(np.uint8) image = (image / 127.5 - 1.0).astype(np.float32) instance_image = np.zeros((self.size, self.size, 3), dtype=np.float32) mask = np.zeros((self.size // factor, self.size // factor)) if scale > self.size: instance_image = image[top : top + inner, left : left + inner, :] mask = np.ones((self.size // factor, self.size // factor)) else: instance_image[top : top + inner, left : left + inner, :] = image mask[ top // factor + 1 : (top + scale) // factor - 1, left // factor + 1 : (left + scale) // factor - 1 ] = 1.0 return instance_image, mask def __getitem__(self, index): example = {} instance_image, instance_prompt = self.instance_images_path[index % self.num_instance_images] instance_image = Image.open(instance_image) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") instance_image = self.flip(instance_image) # apply resize augmentation and create a valid image region mask random_scale = self.size if self.aug: random_scale = ( np.random.randint(self.size // 3, self.size + 1) if np.random.uniform() < 0.66 else np.random.randint(int(1.2 * self.size), int(1.4 * self.size)) ) instance_image, mask = self.preprocess(instance_image, random_scale, self.interpolation) if random_scale < 0.6 * self.size: instance_prompt = np.random.choice(["a far away ", "very small "]) + instance_prompt elif random_scale > self.size: instance_prompt = np.random.choice(["zoomed in ", "close up "]) + instance_prompt example["instance_images"] = torch.from_numpy(instance_image).permute(2, 0, 1) example["mask"] = torch.from_numpy(mask) example["instance_prompt_ids"] = self.tokenizer( instance_prompt, truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids if self.with_prior_preservation: class_image, class_prompt = self.class_images_path[index % self.num_class_images] class_image = Image.open(class_image) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example["class_images"] = self.image_transforms(class_image) example["class_mask"] = torch.ones_like(example["mask"]) example["class_prompt_ids"] = self.tokenizer( class_prompt, truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids return example def save_new_embed(text_encoder, modifier_token_id, accelerator, args, output_dir, safe_serialization=True): """Saves the new token embeddings from the text encoder.""" logger.info("Saving embeddings") learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight for x, y in zip(modifier_token_id, args.modifier_token): learned_embeds_dict = {} learned_embeds_dict[y] = learned_embeds[x] filename = f"{output_dir}/{y}.bin" if safe_serialization: safetensors.torch.save_file(learned_embeds_dict, filename, metadata={"format": "pt"}) else: torch.save(learned_embeds_dict, filename) def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Custom Diffusion training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=2, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_steps", type=int, default=50, help=( "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument( "--real_prior", default=False, action="store_true", help="real images as prior.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=200, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--output_dir", type=str, default="custom-diffusion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=250, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--dataloader_num_workers", type=int, default=2, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument( "--freeze_model", type=str, default="crossattn_kv", choices=["crossattn_kv", "crossattn"], help="crossattn to enable fine-tuning of all params in the cross attention", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." ), ) parser.add_argument( "--concepts_list", type=str, default=None, help="Path to json containing multiple concepts, will overwrite parameters like instance_prompt, class_prompt, etc.", ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--set_grads_to_none", action="store_true", help=( "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain" " behaviors, so disable this argument if it causes any problems. More info:" " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html" ), ) parser.add_argument( "--modifier_token", type=str, default=None, help="A token to use as a modifier for the concept.", ) parser.add_argument( "--initializer_token", type=str, default="ktn+pll+ucd", help="A token to use as initializer word." ) parser.add_argument("--hflip", action="store_true", help="Apply horizontal flip data augmentation.") parser.add_argument( "--noaug", action="store_true", help="Dont apply augmentation during data augmentation when this flag is enabled.", ) parser.add_argument( "--no_safe_serialization", action="store_true", help="If specified save the checkpoint not in `safetensors` format, but in original PyTorch format instead.", ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.concepts_list is None: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: # logger is not available yet if args.class_data_dir is not None: warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: warnings.warn("You need not use --class_prompt without --with_prior_preservation.") return args def main(args): logging_dir = Path(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") import wandb # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("custom-diffusion", config=vars(args)) # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) if args.concepts_list is None: args.concepts_list = [ { "instance_prompt": args.instance_prompt, "class_prompt": args.class_prompt, "instance_data_dir": args.instance_data_dir, "class_data_dir": args.class_data_dir, } ] else: with open(args.concepts_list, "r") as f: args.concepts_list = json.load(f) # Generate class images if prior preservation is enabled. if args.with_prior_preservation: for i, concept in enumerate(args.concepts_list): class_images_dir = Path(concept["class_data_dir"]) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True, exist_ok=True) if args.real_prior: assert ( class_images_dir / "images" ).exists(), f"Please run: python retrieve.py --class_prompt \"{concept['class_prompt']}\" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}" assert ( len(list((class_images_dir / "images").iterdir())) == args.num_class_images ), f"Please run: python retrieve.py --class_prompt \"{concept['class_prompt']}\" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}" assert ( class_images_dir / "caption.txt" ).exists(), f"Please run: python retrieve.py --class_prompt \"{concept['class_prompt']}\" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}" assert ( class_images_dir / "images.txt" ).exists(), f"Please run: python retrieve.py --class_prompt \"{concept['class_prompt']}\" --class_data_dir {class_images_dir} --num_class_images {args.num_class_images}" concept["class_prompt"] = os.path.join(class_images_dir, "caption.txt") concept["class_data_dir"] = os.path.join(class_images_dir, "images.txt") args.concepts_list[i] = concept accelerator.wait_for_everyone() else: cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 if args.prior_generation_precision == "fp32": torch_dtype = torch.float32 elif args.prior_generation_precision == "fp16": torch_dtype = torch.float16 elif args.prior_generation_precision == "bf16": torch_dtype = torch.bfloat16 pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision, variant=args.variant, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(concept["class_prompt"], num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process, ): images = pipeline(example["prompt"]).images for i, image in enumerate(images): hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest() image_filename = ( class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" ) image.save(image_filename) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load the tokenizer if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name, revision=args.revision, use_fast=False, ) elif args.pretrained_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # import correct text encoder class text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) # Load scheduler and models noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = text_encoder_cls.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) # Adding a modifier token which is optimized #### # Code taken from https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py modifier_token_id = [] initializer_token_id = [] if args.modifier_token is not None: args.modifier_token = args.modifier_token.split("+") args.initializer_token = args.initializer_token.split("+") if len(args.modifier_token) > len(args.initializer_token): raise ValueError("You must specify + separated initializer token for each modifier token.") for modifier_token, initializer_token in zip( args.modifier_token, args.initializer_token[: len(args.modifier_token)] ): # Add the placeholder token in tokenizer num_added_tokens = tokenizer.add_tokens(modifier_token) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {modifier_token}. Please pass a different" " `modifier_token` that is not already in the tokenizer." ) # Convert the initializer_token, placeholder_token to ids token_ids = tokenizer.encode([initializer_token], add_special_tokens=False) print(token_ids) # Check if initializer_token is a single token or a sequence of tokens if len(token_ids) > 1: raise ValueError("The initializer token must be a single token.") initializer_token_id.append(token_ids[0]) modifier_token_id.append(tokenizer.convert_tokens_to_ids(modifier_token)) # Resize the token embeddings as we are adding new special tokens to the tokenizer text_encoder.resize_token_embeddings(len(tokenizer)) # Initialise the newly added placeholder token with the embeddings of the initializer token token_embeds = text_encoder.get_input_embeddings().weight.data for x, y in zip(modifier_token_id, initializer_token_id): token_embeds[x] = token_embeds[y] # Freeze all parameters except for the token embeddings in text encoder params_to_freeze = itertools.chain( text_encoder.text_model.encoder.parameters(), text_encoder.text_model.final_layer_norm.parameters(), text_encoder.text_model.embeddings.position_embedding.parameters(), ) freeze_params(params_to_freeze) ######################################################## ######################################################## vae.requires_grad_(False) if args.modifier_token is None: text_encoder.requires_grad_(False) unet.requires_grad_(False) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move unet, vae and text_encoder to device and cast to weight_dtype if accelerator.mixed_precision != "fp16" and args.modifier_token is not None: text_encoder.to(accelerator.device, dtype=weight_dtype) unet.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) attention_class = ( CustomDiffusionAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else CustomDiffusionAttnProcessor ) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warn( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) attention_class = CustomDiffusionXFormersAttnProcessor else: raise ValueError("xformers is not available. Make sure it is installed correctly") # now we will add new Custom Diffusion weights to the attention layers # It's important to realize here how many attention weights will be added and of which sizes # The sizes of the attention layers consist only of two different variables: # 1) - the "hidden_size", which is increased according to `unet.config.block_out_channels`. # 2) - the "cross attention size", which is set to `unet.config.cross_attention_dim`. # Let's first see how many attention processors we will have to set. # For Stable Diffusion, it should be equal to: # - down blocks (2x attention layers) * (2x transformer layers) * (3x down blocks) = 12 # - mid blocks (2x attention layers) * (1x transformer layers) * (1x mid blocks) = 2 # - up blocks (2x attention layers) * (3x transformer layers) * (3x down blocks) = 18 # => 32 layers # Only train key, value projection layers if freeze_model = 'crossattn_kv' else train all params in the cross attention layer train_kv = True train_q_out = False if args.freeze_model == "crossattn_kv" else True custom_diffusion_attn_procs = {} st = unet.state_dict() for name, _ in unet.attn_processors.items(): cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = unet.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(unet.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = unet.config.block_out_channels[block_id] layer_name = name.split(".processor")[0] weights = { "to_k_custom_diffusion.weight": st[layer_name + ".to_k.weight"], "to_v_custom_diffusion.weight": st[layer_name + ".to_v.weight"], } if train_q_out: weights["to_q_custom_diffusion.weight"] = st[layer_name + ".to_q.weight"] weights["to_out_custom_diffusion.0.weight"] = st[layer_name + ".to_out.0.weight"] weights["to_out_custom_diffusion.0.bias"] = st[layer_name + ".to_out.0.bias"] if cross_attention_dim is not None: custom_diffusion_attn_procs[name] = attention_class( train_kv=train_kv, train_q_out=train_q_out, hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, ).to(unet.device) custom_diffusion_attn_procs[name].load_state_dict(weights) else: custom_diffusion_attn_procs[name] = attention_class( train_kv=False, train_q_out=False, hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, ) del st unet.set_attn_processor(custom_diffusion_attn_procs) custom_diffusion_layers = AttnProcsLayers(unet.attn_processors) accelerator.register_for_checkpointing(custom_diffusion_layers) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() if args.modifier_token is not None: text_encoder.gradient_checkpointing_enable() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) if args.with_prior_preservation: args.learning_rate = args.learning_rate * 2.0 # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation optimizer = optimizer_class( itertools.chain(text_encoder.get_input_embeddings().parameters(), custom_diffusion_layers.parameters()) if args.modifier_token is not None else custom_diffusion_layers.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Dataset and DataLoaders creation: train_dataset = CustomDiffusionDataset( concepts_list=args.concepts_list, tokenizer=tokenizer, with_prior_preservation=args.with_prior_preservation, size=args.resolution, mask_size=vae.encode( torch.randn(1, 3, args.resolution, args.resolution).to(dtype=weight_dtype).to(accelerator.device) ) .latent_dist.sample() .size()[-1], center_crop=args.center_crop, num_class_images=args.num_class_images, hflip=args.hflip, aug=not args.noaug, ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. if args.modifier_token is not None: custom_diffusion_layers, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( custom_diffusion_layers, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: custom_diffusion_layers, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( custom_diffusion_layers, optimizer, train_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): unet.train() if args.modifier_token is not None: text_encoder.train() for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet), accelerator.accumulate(text_encoder): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) mask = torch.chunk(batch["mask"], 2, dim=0)[0] # Compute instance loss loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") loss = ((loss * mask).sum([1, 2, 3]) / mask.sum([1, 2, 3])).mean() # Compute prior loss prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: mask = batch["mask"] loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") loss = ((loss * mask).sum([1, 2, 3]) / mask.sum([1, 2, 3])).mean() accelerator.backward(loss) # Zero out the gradients for all token embeddings except the newly added # embeddings for the concept, as we only want to optimize the concept embeddings if args.modifier_token is not None: if accelerator.num_processes > 1: grads_text_encoder = text_encoder.module.get_input_embeddings().weight.grad else: grads_text_encoder = text_encoder.get_input_embeddings().weight.grad # Get the index for tokens that we want to zero the grads for index_grads_to_zero = torch.arange(len(tokenizer)) != modifier_token_id[0] for i in range(len(modifier_token_id[1:])): index_grads_to_zero = index_grads_to_zero & ( torch.arange(len(tokenizer)) != modifier_token_id[i] ) grads_text_encoder.data[index_grads_to_zero, :] = grads_text_encoder.data[ index_grads_to_zero, : ].fill_(0) if accelerator.sync_gradients: params_to_clip = ( itertools.chain(text_encoder.parameters(), custom_diffusion_layers.parameters()) if args.modifier_token is not None else custom_diffusion_layers.parameters() ) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=args.set_grads_to_none) # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: break if accelerator.is_main_process: images = [] if args.validation_prompt is not None and global_step % args.validation_steps == 0: logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) # create pipeline pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), tokenizer=tokenizer, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) images = [ pipeline(args.validation_prompt, num_inference_steps=25, generator=generator, eta=1.0).images[ 0 ] for _ in range(args.num_validation_images) ] for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline torch.cuda.empty_cache() # Save the custom diffusion layers accelerator.wait_for_everyone() if accelerator.is_main_process: unet = unet.to(torch.float32) unet.save_attn_procs(args.output_dir, safe_serialization=not args.no_safe_serialization) save_new_embed( text_encoder, modifier_token_id, accelerator, args, args.output_dir, safe_serialization=not args.no_safe_serialization, ) # Final inference # Load previous pipeline pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype ) pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) pipeline = pipeline.to(accelerator.device) # load attention processors weight_name = ( "pytorch_custom_diffusion_weights.safetensors" if not args.no_safe_serialization else "pytorch_custom_diffusion_weights.bin" ) pipeline.unet.load_attn_procs(args.output_dir, weight_name=weight_name) for token in args.modifier_token: token_weight_name = f"{token}.safetensors" if not args.no_safe_serialization else f"{token}.bin" pipeline.load_textual_inversion(args.output_dir, weight_name=token_weight_name) # run inference if args.validation_prompt and args.num_validation_images > 0: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None images = [ pipeline(args.validation_prompt, num_inference_steps=25, generator=generator, eta=1.0).images[0] for _ in range(args.num_validation_images) ] for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "test": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) if args.push_to_hub: save_model_card( repo_id, images=images, base_model=args.pretrained_model_name_or_path, prompt=args.instance_prompt, repo_folder=args.output_dir, ) api = HfApi(token=args.hub_token) api.upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
diffusers/examples/custom_diffusion/train_custom_diffusion.py/0
{ "file_path": "diffusers/examples/custom_diffusion/train_custom_diffusion.py", "repo_id": "diffusers", "token_count": 26323 }
100
# InstructPix2Pix SDXL training example ***This is based on the original InstructPix2Pix training example.*** [Stable Diffusion XL](https://huggingface.co/papers/2307.01952) (or SDXL) is the latest image generation model that is tailored towards more photorealistic outputs with more detailed imagery and composition compared to previous SD models. It leverages a three times larger UNet backbone. The increase of model parameters is mainly due to more attention blocks and a larger cross-attention context as SDXL uses a second text encoder. The `train_instruct_pix2pix_sdxl.py` script shows how to implement the training procedure and adapt it for Stable Diffusion XL. ***Disclaimer: Even though `train_instruct_pix2pix_sdxl.py` implements the InstructPix2Pix training procedure while being faithful to the [original implementation](https://github.com/timothybrooks/instruct-pix2pix) we have only tested it on a [small-scale dataset](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples). This can impact the end results. For better results, we recommend longer training runs with a larger dataset. [Here](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) you can find a large dataset for InstructPix2Pix training.*** ## Running locally with PyTorch ### Installing the dependencies Refer to the original InstructPix2Pix training example for installing the dependencies. You will also need to get access of SDXL by filling the [form](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0). ### Toy example As mentioned before, we'll use a [small toy dataset](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples) for training. The dataset is a smaller version of the [original dataset](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) used in the InstructPix2Pix paper. Configure environment variables such as the dataset identifier and the Stable Diffusion checkpoint: ```bash export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0" export DATASET_ID="fusing/instructpix2pix-1000-samples" ``` Now, we can launch training: ```bash accelerate launch train_instruct_pix2pix_sdxl.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_ID \ --enable_xformers_memory_efficient_attention \ --resolution=256 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --seed=42 \ --push_to_hub ``` Additionally, we support performing validation inference to monitor training progress with Weights and Biases. You can enable this feature with `report_to="wandb"`: ```bash accelerate launch train_instruct_pix2pix_sdxl.py \ --pretrained_model_name_or_path=stabilityai/stable-diffusion-xl-base-1.0 \ --dataset_name=$DATASET_ID \ --use_ema \ --enable_xformers_memory_efficient_attention \ --resolution=512 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --seed=42 \ --val_image_url_or_path="https://datasets-server.huggingface.co/assets/fusing/instructpix2pix-1000-samples/--/fusing--instructpix2pix-1000-samples/train/23/input_image/image.jpg" \ --validation_prompt="make it in japan" \ --report_to=wandb \ --push_to_hub ``` We recommend this type of validation as it can be useful for model debugging. Note that you need `wandb` installed to use this. You can install `wandb` by running `pip install wandb`. [Here](https://wandb.ai/sayakpaul/instruct-pix2pix-sdxl-new/runs/sw53gxmc), you can find an example training run that includes some validation samples and the training hyperparameters. ***Note: In the original paper, the authors observed that even when the model is trained with an image resolution of 256x256, it generalizes well to bigger resolutions such as 512x512. This is likely because of the larger dataset they used during training.*** ## Training with multiple GPUs `accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch) for running distributed training with `accelerate`. Here is an example command: ```bash accelerate launch --mixed_precision="fp16" --multi_gpu train_instruct_pix2pix_sdxl.py \ --pretrained_model_name_or_path=stabilityai/stable-diffusion-xl-base-1.0 \ --dataset_name=$DATASET_ID \ --use_ema \ --enable_xformers_memory_efficient_attention \ --resolution=512 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --seed=42 \ --val_image_url_or_path="https://datasets-server.huggingface.co/assets/fusing/instructpix2pix-1000-samples/--/fusing--instructpix2pix-1000-samples/train/23/input_image/image.jpg" \ --validation_prompt="make it in japan" \ --report_to=wandb \ --push_to_hub ``` ## Inference Once training is complete, we can perform inference: ```python import PIL import requests import torch from diffusers import StableDiffusionXLInstructPix2PixPipeline model_id = "your_model_id" # <- replace this pipe = StableDiffusionXLInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") generator = torch.Generator("cuda").manual_seed(0) url = "https://datasets-server.huggingface.co/assets/fusing/instructpix2pix-1000-samples/--/fusing--instructpix2pix-1000-samples/train/23/input_image/image.jpg" def download_image(url): image = PIL.Image.open(requests.get(url, stream=True).raw) image = PIL.ImageOps.exif_transpose(image) image = image.convert("RGB") return image image = download_image(url) prompt = "make it Japan" num_inference_steps = 20 image_guidance_scale = 1.5 guidance_scale = 10 edited_image = pipe(prompt, image=image, num_inference_steps=num_inference_steps, image_guidance_scale=image_guidance_scale, guidance_scale=guidance_scale, generator=generator, ).images[0] edited_image.save("edited_image.png") ``` We encourage you to play with the following three parameters to control speed and quality during performance: * `num_inference_steps` * `image_guidance_scale` * `guidance_scale` Particularly, `image_guidance_scale` and `guidance_scale` can have a profound impact on the generated ("edited") image (see [here](https://twitter.com/RisingSayak/status/1628392199196151808?s=20) for an example). If you're looking for some interesting ways to use the InstructPix2Pix training methodology, we welcome you to check out this blog post: [Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd). ## Compare between SD and SDXL We aim to understand the differences resulting from the use of SD-1.5 and SDXL-0.9 as pretrained models. To achieve this, we trained on the [small toy dataset](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples) using both of these pretrained models. The training script is as follows: ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" or "stabilityai/stable-diffusion-xl-base-0.9" export DATASET_ID="fusing/instructpix2pix-1000-samples" accelerate launch train_instruct_pix2pix.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_ID \ --use_ema \ --enable_xformers_memory_efficient_attention \ --resolution=512 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --seed=42 \ --val_image_url="https://datasets-server.huggingface.co/assets/fusing/instructpix2pix-1000-samples/--/fusing--instructpix2pix-1000-samples/train/23/input_image/image.jpg" \ --validation_prompt="make it in Japan" \ --report_to=wandb \ --push_to_hub ``` We discovered that compared to training with SD-1.5 as the pretrained model, SDXL-0.9 results in a lower training loss value (SD-1.5 yields 0.0599, SDXL scores 0.0254). Moreover, from a visual perspective, the results obtained using SDXL demonstrated fewer artifacts and a richer detail. Notably, SDXL starts to preserve the structure of the original image earlier on. The following two GIFs provide intuitive visual results. We observed, for each step, what kind of results could be achieved using the image <p align="center"> <img src="https://datasets-server.huggingface.co/assets/fusing/instructpix2pix-1000-samples/--/fusing--instructpix2pix-1000-samples/train/23/input_image/image.jpg" alt="input for make it Japan" width=600/> </p> with "make it in Japan” as the prompt. It can be seen that SDXL starts preserving the details of the original image earlier, resulting in higher fidelity outcomes sooner. * SD-1.5: https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sd_ip2p_training_val_img_progress.gif <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sd_ip2p_training_val_img_progress.gif" alt="input for make it Japan" width=600/> </p> * SDXL: https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl_ip2p_training_val_img_progress.gif <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl_ip2p_training_val_img_progress.gif" alt="input for make it Japan" width=600/> </p>
diffusers/examples/instruct_pix2pix/README_sdxl.md/0
{ "file_path": "diffusers/examples/instruct_pix2pix/README_sdxl.md", "repo_id": "diffusers", "token_count": 3490 }
101
# Stable Diffusion text-to-image fine-tuning This extended LoRA training script was authored by [haofanwang](https://github.com/haofanwang). This is an experimental LoRA extension of [this example](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py). We further support add LoRA layers for text encoder. ## Training with LoRA Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: - Previous pretrained weights are kept frozen so that model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114). - Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable. - LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter. [cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository. With LoRA, it's possible to fine-tune Stable Diffusion on a custom image-caption pair dataset on consumer GPUs like Tesla T4, Tesla V100. ### Training First, you need to set up your development environment as is explained in the [installation section](#installing-the-dependencies). Make sure to set the `MODEL_NAME` and `DATASET_NAME` environment variables. Here, we will use [Stable Diffusion v1-4](https://hf.co/CompVis/stable-diffusion-v1-4) and the [Pokemons dataset](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions). **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** **___Note: It is quite useful to monitor the training progress by regularly generating sample images during training. [Weights and Biases](https://docs.wandb.ai/quickstart) is a nice solution to easily see generating images during training. All you need to do is to run `pip install wandb` before training to automatically log images.___** ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export DATASET_NAME="lambdalabs/pokemon-blip-captions" ``` For this example we want to directly store the trained LoRA embeddings on the Hub, so we need to be logged in and add the `--push_to_hub` flag. ```bash huggingface-cli login ``` Now we can start training! ```bash accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_NAME --caption_column="text" \ --resolution=512 --random_flip \ --train_batch_size=1 \ --num_train_epochs=100 --checkpointing_steps=5000 \ --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ --seed=42 \ --output_dir="sd-pokemon-model-lora" \ --validation_prompt="cute dragon creature" --report_to="wandb" --use_peft \ --lora_r=4 --lora_alpha=32 \ --lora_text_encoder_r=4 --lora_text_encoder_alpha=32 ``` The above command will also run inference as fine-tuning progresses and log the results to Weights and Biases. **___Note: When using LoRA we can use a much higher learning rate compared to non-LoRA fine-tuning. Here we use *1e-4* instead of the usual *1e-5*. Also, by using LoRA, it's possible to run `train_text_to_image_lora.py` in consumer GPUs like T4 or V100.___** The final LoRA embedding weights have been uploaded to [sayakpaul/sd-model-finetuned-lora-t4](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4). **___Note: [The final weights](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4/blob/main/pytorch_lora_weights.bin) are only 3 MB in size, which is orders of magnitudes smaller than the original model.___** You can check some inference samples that were logged during the course of the fine-tuning process [here](https://wandb.ai/sayakpaul/text2image-fine-tune/runs/q4lc0xsw). ### Inference Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline` after loading the trained LoRA weights. You need to pass the `output_dir` for loading the LoRA weights which, in this case, is `sd-pokemon-model-lora`. ```python from diffusers import StableDiffusionPipeline import torch model_path = "sayakpaul/sd-model-finetuned-lora-t4" pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe.unet.load_attn_procs(model_path) pipe.to("cuda") prompt = "A pokemon with green eyes and red legs." image = pipe(prompt, num_inference_steps=30, guidance_scale=7.5).images[0] image.save("pokemon.png") ```
diffusers/examples/research_projects/lora/README.md/0
{ "file_path": "diffusers/examples/research_projects/lora/README.md", "repo_id": "diffusers", "token_count": 1628 }
102
# Stable Diffusion text-to-image fine-tuning The `train_text_to_image.py` script shows how to fine-tune stable diffusion model on your own dataset. ___Note___: ___This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparamters to get the best result on your dataset.___ ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` ### Pokemon example You need to accept the model license before downloading or using the weights. In this example we'll use model version `v1-4`, so you'll need to visit [its card](https://huggingface.co/CompVis/stable-diffusion-v1-4), read the license and tick the checkbox if you agree. You have to be a registered user in 🤗 Hugging Face Hub, and you'll also need to use an access token for the code to work. For more information on access tokens, please refer to [this section of the documentation](https://huggingface.co/docs/hub/security-tokens). Run the following command to authenticate your token ```bash huggingface-cli login ``` If you have already cloned the repo, then you won't need to go through these steps. <br> ## Use ONNXRuntime to accelerate training In order to leverage onnxruntime to accelerate training, please use train_text_to_image.py The command to train a DDPM UNetCondition model on the Pokemon dataset with onnxruntime: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export dataset_name="lambdalabs/pokemon-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$dataset_name \ --use_ema \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --lr_scheduler="constant" --lr_warmup_steps=0 \ --output_dir="sd-pokemon-model" ``` Please contact Prathik Rao (prathikr), Sunghoon Choi (hanbitmyths), Ashwini Khade (askhade), or Peng Wang (pengwa) on github with any questions.
diffusers/examples/research_projects/onnxruntime/text_to_image/README.md/0
{ "file_path": "diffusers/examples/research_projects/onnxruntime/text_to_image/README.md", "repo_id": "diffusers", "token_count": 847 }
103
# Stable Diffusion XL for JAX + TPUv5e [TPU v5e](https://cloud.google.com/blog/products/compute/how-cloud-tpu-v5e-accelerates-large-scale-ai-inference) is a new generation of TPUs from Google Cloud. It is the most cost-effective, versatile, and scalable Cloud TPU to date. This makes them ideal for serving and scaling large diffusion models. [JAX](https://github.com/google/jax) is a high-performance numerical computation library that is well-suited to develop and deploy diffusion models: - **High performance**. All JAX operations are implemented in terms of operations in [XLA](https://www.tensorflow.org/xla/) - the Accelerated Linear Algebra compiler - **Compilation**. JAX uses just-in-time (jit) compilation of JAX Python functions so it can be executed efficiently in XLA. In order to get the best performance, we must use static shapes for jitted functions, this is because JAX transforms work by tracing a function and to determine its effect on inputs of a specific shape and type. When a new shape is introduced to an already compiled function, it retriggers compilation on the new shape, which can greatly reduce performance. **Note**: JIT compilation is particularly well-suited for text-to-image generation because all inputs and outputs (image input / output sizes) are static. - **Parallelization**. Workloads can be scaled across multiple devices using JAX's [pmap](https://jax.readthedocs.io/en/latest/_autosummary/jax.pmap.html), which expresses single-program multiple-data (SPMD) programs. Applying pmap to a function will compile a function with XLA, then execute in parallel on XLA devices. For text-to-image generation workloads this means that increasing the number of images rendered simultaneously is straightforward to implement and doesn't compromise performance. 👉 Try it out for yourself: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/google/sdxl) ## Stable Diffusion XL pipeline in JAX Upon having access to a TPU VM (TPUs higher than version 3), you should first install a TPU-compatible version of JAX: ``` pip install jax[tpu] -f https://storage.googleapis.com/jax-releases/libtpu_releases.html ``` Next, we can install [flax](https://github.com/google/flax) and the diffusers library: ``` pip install flax diffusers transformers ``` In [sdxl_single.py](./sdxl_single.py) we give a simple example of how to write a text-to-image generation pipeline in JAX using [StabilityAI's Stable Diffusion XL](stabilityai/stable-diffusion-xl-base-1.0). Let's explain it step-by-step: **Imports and Setup** ```python import jax import jax.numpy as jnp import numpy as np from flax.jax_utils import replicate from diffusers import FlaxStableDiffusionXLPipeline from jax.experimental.compilation_cache import compilation_cache as cc cc.initialize_cache("/tmp/sdxl_cache") import time NUM_DEVICES = jax.device_count() ``` First, we import the necessary libraries: - `jax` is provides the primitives for TPU operations - `flax.jax_utils` contains some useful utility functions for `Flax`, a neural network library built on top of JAX - `diffusers` has all the code that is relevant for SDXL. - We also initialize a cache to speed up the JAX model compilation. - We automatically determine the number of available TPU devices. **1. Downloading Model and Loading Pipeline** ```python pipeline, params = FlaxStableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", revision="refs/pr/95", split_head_dim=True ) ``` Here, a pre-trained model `stable-diffusion-xl-base-1.0` from the namespace `stabilityai` is loaded. It returns a pipeline for inference and its parameters. **2. Casting Parameter Types** ```python scheduler_state = params.pop("scheduler") params = jax.tree_util.tree_map(lambda x: x.astype(jnp.bfloat16), params) params["scheduler"] = scheduler_state ``` This section adjusts the data types of the model parameters. We convert all parameters to `bfloat16` to speed-up the computation with model weights. **Note** that the scheduler parameters are **not** converted to `blfoat16` as the loss in precision is degrading the pipeline's performance too significantly. **3. Define Inputs to Pipeline** ```python default_prompt = ... default_neg_prompt = ... default_seed = 33 default_guidance_scale = 5.0 default_num_steps = 25 ``` Here, various default inputs for the pipeline are set, including the prompt, negative prompt, random seed, guidance scale, and the number of inference steps. **4. Tokenizing Inputs** ```python def tokenize_prompt(prompt, neg_prompt): prompt_ids = pipeline.prepare_inputs(prompt) neg_prompt_ids = pipeline.prepare_inputs(neg_prompt) return prompt_ids, neg_prompt_ids ``` This function tokenizes the given prompts. It's essential because the text encoders of SDXL don't understand raw text; they work with numbers. Tokenization converts text to numbers. **5. Parallelization and Replication** ```python p_params = replicate(params) def replicate_all(prompt_ids, neg_prompt_ids, seed): ... ``` To utilize JAX's parallel capabilities, the parameters and input tensors are duplicated across devices. The `replicate_all` function also ensures that every device produces a different image by creating a unique random seed for each device. **6. Putting Everything Together** ```python def generate(...): ... ``` This function integrates all the steps to produce the desired outputs from the model. It takes in prompts, tokenizes them, replicates them across devices, runs them through the pipeline, and converts the images to a format that's more interpretable (PIL format). **7. Compilation Step** ```python start = time.time() print(f"Compiling ...") generate(default_prompt, default_neg_prompt) print(f"Compiled in {time.time() - start}") ``` The initial run of the `generate` function will be slow because JAX compiles the function during this call. By running it once here, subsequent calls will be much faster. This section measures and prints the compilation time. **8. Fast Inference** ```python start = time.time() prompt = ... neg_prompt = ... images = generate(prompt, neg_prompt) print(f"Inference in {time.time() - start}") ``` Now that the function is compiled, this section shows how to use it for fast inference. It measures and prints the inference time. In summary, the code demonstrates how to load a pre-trained model using Flax and JAX, prepare it for inference, and run it efficiently using JAX's capabilities. ## Ahead of Time (AOT) Compilation FlaxStableDiffusionXLPipeline takes care of parallelization across multiple devices using jit. Now let's build parallelization ourselves. For this we will be using a JAX feature called [Ahead of Time](https://jax.readthedocs.io/en/latest/aot.html) (AOT) lowering and compilation. AOT allows to fully compile prior to execution time and have control over different parts of the compilation process. In [sdxl_single_aot.py](./sdxl_single_aot.py) we give a simple example of how to write our own parallelization logic for text-to-image generation pipeline in JAX using [StabilityAI's Stable Diffusion XL](stabilityai/stable-diffusion-xl-base-1.0) We add a `aot_compile` function that compiles the `pipeline._generate` function telling JAX which input arguments are static, that is, arguments that are known at compile time and won't change. In our case, it is num_inference_steps, height, width and return_latents. Once the function is compiled, these parameters are omitted from future calls and cannot be changed without modifying the code and recompiling. ```python def aot_compile( prompt=default_prompt, negative_prompt=default_neg_prompt, seed=default_seed, guidance_scale=default_guidance_scale, num_inference_steps=default_num_steps ): prompt_ids, neg_prompt_ids = tokenize_prompt(prompt, negative_prompt) prompt_ids, neg_prompt_ids, rng = replicate_all(prompt_ids, neg_prompt_ids, seed) g = jnp.array([guidance_scale] * prompt_ids.shape[0], dtype=jnp.float32) g = g[:, None] return pmap( pipeline._generate,static_broadcasted_argnums=[3, 4, 5, 9] ).lower( prompt_ids, p_params, rng, num_inference_steps, # num_inference_steps height, # height width, # width g, None, neg_prompt_ids, False # return_latents ).compile() ```` Next we can compile the generate function by executing `aot_compile`. ```python start = time.time() print("Compiling ...") p_generate = aot_compile() print(f"Compiled in {time.time() - start}") ``` And again we put everything together in a `generate` function. ```python def generate( prompt, negative_prompt, seed=default_seed, guidance_scale=default_guidance_scale ): prompt_ids, neg_prompt_ids = tokenize_prompt(prompt, negative_prompt) prompt_ids, neg_prompt_ids, rng = replicate_all(prompt_ids, neg_prompt_ids, seed) g = jnp.array([guidance_scale] * prompt_ids.shape[0], dtype=jnp.float32) g = g[:, None] images = p_generate( prompt_ids, p_params, rng, g, None, neg_prompt_ids) # convert the images to PIL images = images.reshape((images.shape[0] * images.shape[1], ) + images.shape[-3:]) return pipeline.numpy_to_pil(np.array(images)) ``` The first forward pass after AOT compilation still takes a while longer than subsequent passes, this is because on the first pass, JAX uses Python dispatch, which Fills the C++ dispatch cache. When using jit, this extra step is done automatically, but when using AOT compilation, it doesn't happen until the function call is made. ```python start = time.time() prompt = "photo of a rhino dressed suit and tie sitting at a table in a bar with a bar stools, award winning photography, Elke vogelsang" neg_prompt = "cartoon, illustration, animation. face. male, female" images = generate(prompt, neg_prompt) print(f"First inference in {time.time() - start}") ``` From this point forward, any calls to generate should result in a faster inference time and it won't change. ```python start = time.time() prompt = "photo of a rhino dressed suit and tie sitting at a table in a bar with a bar stools, award winning photography, Elke vogelsang" neg_prompt = "cartoon, illustration, animation. face. male, female" images = generate(prompt, neg_prompt) print(f"Inference in {time.time() - start}") ```
diffusers/examples/research_projects/sdxl_flax/README.md/0
{ "file_path": "diffusers/examples/research_projects/sdxl_flax/README.md", "repo_id": "diffusers", "token_count": 3342 }
104
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import argparse import logging import math import os import random import shutil from pathlib import Path import accelerate import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.state import AcceleratorState from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from transformers.utils import ContextManagers import diffusers from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel, compute_snr from diffusers.utils import check_min_version, deprecate, is_wandb_available, make_image_grid from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.26.0.dev0") logger = get_logger(__name__, log_level="INFO") DATASET_NAME_MAPPING = { "lambdalabs/pokemon-blip-captions": ("image", "text"), } def save_model_card( args, repo_id: str, images=None, repo_folder=None, ): img_str = "" if len(images) > 0: image_grid = make_image_grid(images, 1, len(args.validation_prompts)) image_grid.save(os.path.join(repo_folder, "val_imgs_grid.png")) img_str += "![val_imgs_grid](./val_imgs_grid.png)\n" yaml = f""" --- license: creativeml-openrail-m base_model: {args.pretrained_model_name_or_path} datasets: - {args.dataset_name} tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers inference: true --- """ model_card = f""" # Text-to-image finetuning - {repo_id} This pipeline was finetuned from **{args.pretrained_model_name_or_path}** on the **{args.dataset_name}** dataset. Below are some example images generated with the finetuned pipeline using the following prompts: {args.validation_prompts}: \n {img_str} ## Pipeline usage You can use the pipeline like so: ```python from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("{repo_id}", torch_dtype=torch.float16) prompt = "{args.validation_prompts[0]}" image = pipeline(prompt).images[0] image.save("my_image.png") ``` ## Training info These are the key hyperparameters used during training: * Epochs: {args.num_train_epochs} * Learning rate: {args.learning_rate} * Batch size: {args.train_batch_size} * Gradient accumulation steps: {args.gradient_accumulation_steps} * Image resolution: {args.resolution} * Mixed-precision: {args.mixed_precision} """ wandb_info = "" if is_wandb_available(): wandb_run_url = None if wandb.run is not None: wandb_run_url = wandb.run.url if wandb_run_url is not None: wandb_info = f""" More information on all the CLI arguments and the environment are available on your [`wandb` run page]({wandb_run_url}). """ model_card += wandb_info with open(os.path.join(repo_folder, "README.md"), "w") as f: f.write(yaml + model_card) def log_validation(vae, text_encoder, tokenizer, unet, args, accelerator, weight_dtype, epoch): logger.info("Running validation... ") pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, vae=accelerator.unwrap_model(vae), text_encoder=accelerator.unwrap_model(text_encoder), tokenizer=tokenizer, unet=accelerator.unwrap_model(unet), safety_checker=None, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) images = [] for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") elif tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompts[i]}") for i, image in enumerate(images) ] } ) else: logger.warn(f"image logging not implemented for {tracker.name}") del pipeline torch.cuda.empty_cache() return images def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--input_perturbation", type=float, default=0, help="The scale of input perturbation. Recommended 0.1." ) parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing an image." ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--validation_prompts", type=str, default=None, nargs="+", help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), ) parser.add_argument( "--output_dir", type=str, default="sd-model-finetuned", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://arxiv.org/abs/2303.09556.", ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--non_ema_revision", type=str, default=None, required=False, help=( "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" " remote repository specified with --pretrained_model_name_or_path." ), ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--prediction_type", type=str, default=None, help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.", ) parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") parser.add_argument( "--validation_epochs", type=int, default=5, help="Run validation every X epochs.", ) parser.add_argument( "--tracker_project_name", type=str, default="text2image-fine-tune", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") # default to using the same revision for the non-ema model if not specified if args.non_ema_revision is None: args.non_ema_revision = args.revision return args def main(): args = parse_args() if args.non_ema_revision is not None: deprecate( "non_ema_revision!=None", "0.15.0", message=( "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" " use `--variant=non_ema` instead." ), ) logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load scheduler, tokenizer and models. noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) def deepspeed_zero_init_disabled_context_manager(): """ returns either a context list that includes one that will disable zero.Init or an empty context list """ deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None if deepspeed_plugin is None: return [] return [deepspeed_plugin.zero3_init_context_manager(enable=False)] # Currently Accelerate doesn't know how to handle multiple models under Deepspeed ZeRO stage 3. # For this to work properly all models must be run through `accelerate.prepare`. But accelerate # will try to assign the same optimizer with the same weights to all models during # `deepspeed.initialize`, which of course doesn't work. # # For now the following workaround will partially support Deepspeed ZeRO-3, by excluding the 2 # frozen models from being partitioned during `zero.Init` which gets called during # `from_pretrained` So CLIPTextModel and AutoencoderKL will not enjoy the parameter sharding # across multiple gpus and only UNet2DConditionModel will get ZeRO sharded. with ContextManagers(deepspeed_zero_init_disabled_context_manager()): text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision ) # Freeze vae and text_encoder and set unet to trainable vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.train() # Create EMA for the unet. if args.use_ema: ema_unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warn( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for i in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW optimizer = optimizer_cls( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, data_dir=args.train_data_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) if args.image_column is None: image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" ) if args.caption_column is None: caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: caption_column = args.caption_column if caption_column not in column_names: raise ValueError( f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" ) # Preprocessing the datasets. # We need to tokenize input captions and transform the images. def tokenize_captions(examples, is_train=True): captions = [] for caption in examples[caption_column]: if isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) else: raise ValueError( f"Caption column `{caption_column}` should contain either strings or lists of strings." ) inputs = tokenizer( captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" ) return inputs.input_ids # Preprocessing the datasets. train_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["input_ids"] = tokenize_captions(examples) return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.stack([example["input_ids"] for example in examples]) return {"pixel_values": pixel_values, "input_ids": input_ids} # DataLoaders creation: train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) if args.use_ema: ema_unet.to(accelerator.device) # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 args.mixed_precision = accelerator.mixed_precision elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 args.mixed_precision = accelerator.mixed_precision # Move text_encode and vae to gpu and cast to weight_dtype text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) tracker_config.pop("validation_prompts") accelerator.init_trackers(args.tracker_project_name, tracker_config) # Function for unwrapping if model was compiled with `torch.compile`. def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): train_loss = 0.0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) if args.noise_offset: # https://www.crosslabs.org//blog/diffusion-with-offset-noise noise += args.noise_offset * torch.randn( (latents.shape[0], latents.shape[1], 1, 1), device=latents.device ) if args.input_perturbation: new_noise = noise + args.input_perturbation * torch.randn_like(noise) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) if args.input_perturbation: noisy_latents = noise_scheduler.add_noise(latents, new_noise, timesteps) else: noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"], return_dict=False)[0] # Get the target for loss depending on the prediction type if args.prediction_type is not None: # set prediction_type of scheduler if defined noise_scheduler.register_to_config(prediction_type=args.prediction_type) if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") # Predict the noise residual and compute loss model_pred = unet(noisy_latents, timesteps, encoder_hidden_states, return_dict=False)[0] if args.snr_gamma is None: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") else: # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. # Since we predict the noise instead of x_0, the original formulation is slightly changed. # This is discussed in Section 4.2 of the same paper. snr = compute_snr(noise_scheduler, timesteps) mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( dim=1 )[0] if noise_scheduler.config.prediction_type == "epsilon": mse_loss_weights = mse_loss_weights / snr elif noise_scheduler.config.prediction_type == "v_prediction": mse_loss_weights = mse_loss_weights / (snr + 1) loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights loss = loss.mean() # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompts is not None and epoch % args.validation_epochs == 0: if args.use_ema: # Store the UNet parameters temporarily and load the EMA parameters to perform inference. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) log_validation( vae, text_encoder, tokenizer, unet, args, accelerator, weight_dtype, global_step, ) if args.use_ema: # Switch back to the original UNet parameters. ema_unet.restore(unet.parameters()) # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: unet = unwrap_model(unet) if args.use_ema: ema_unet.copy_to(unet.parameters()) pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, revision=args.revision, variant=args.variant, ) pipeline.save_pretrained(args.output_dir) # Run a final round of inference. images = [] if args.validation_prompts is not None: logger.info("Running inference for collecting generated images...") pipeline = pipeline.to(accelerator.device) pipeline.torch_dtype = weight_dtype pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) if args.push_to_hub: save_model_card(args, repo_id, images, repo_folder=args.output_dir) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/text_to_image/train_text_to_image.py/0
{ "file_path": "diffusers/examples/text_to_image/train_text_to_image.py", "repo_id": "diffusers", "token_count": 19182 }
105
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class Unconditional(ExamplesTestsAccelerate): def test_train_unconditional(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/unconditional_image_generation/train_unconditional.py --dataset_name hf-internal-testing/dummy_image_class_data --model_config_name_or_path diffusers/ddpm_dummy --resolution 64 --output_dir {tmpdir} --train_batch_size 2 --num_epochs 1 --gradient_accumulation_steps 1 --ddpm_num_inference_steps 2 --learning_rate 1e-3 --lr_warmup_steps 5 """.split() run_command(self._launch_args + test_args, return_stdout=True) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.safetensors"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json"))) def test_unconditional_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: initial_run_args = f""" examples/unconditional_image_generation/train_unconditional.py --dataset_name hf-internal-testing/dummy_image_class_data --model_config_name_or_path diffusers/ddpm_dummy --resolution 64 --output_dir {tmpdir} --train_batch_size 1 --num_epochs 1 --gradient_accumulation_steps 1 --ddpm_num_inference_steps 2 --learning_rate 1e-3 --lr_warmup_steps 5 --checkpointing_steps=2 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + initial_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, # checkpoint-2 should have been deleted {"checkpoint-4", "checkpoint-6"}, ) def test_unconditional_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: initial_run_args = f""" examples/unconditional_image_generation/train_unconditional.py --dataset_name hf-internal-testing/dummy_image_class_data --model_config_name_or_path diffusers/ddpm_dummy --resolution 64 --output_dir {tmpdir} --train_batch_size 1 --num_epochs 1 --gradient_accumulation_steps 1 --ddpm_num_inference_steps 1 --learning_rate 1e-3 --lr_warmup_steps 5 --checkpointing_steps=2 """.split() run_command(self._launch_args + initial_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4", "checkpoint-6"}, ) resume_run_args = f""" examples/unconditional_image_generation/train_unconditional.py --dataset_name hf-internal-testing/dummy_image_class_data --model_config_name_or_path diffusers/ddpm_dummy --resolution 64 --output_dir {tmpdir} --train_batch_size 1 --num_epochs 2 --gradient_accumulation_steps 1 --ddpm_num_inference_steps 1 --learning_rate 1e-3 --lr_warmup_steps 5 --resume_from_checkpoint=checkpoint-6 --checkpointing_steps=2 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + resume_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-10", "checkpoint-12"}, )
diffusers/examples/unconditional_image_generation/test_unconditional.py/0
{ "file_path": "diffusers/examples/unconditional_image_generation/test_unconditional.py", "repo_id": "diffusers", "token_count": 2493 }
106
""" This script requires you to build `LAVIS` from source, since the pip version doesn't have BLIP Diffusion. Follow instructions here: https://github.com/salesforce/LAVIS/tree/main. """ import argparse import os import tempfile import torch from lavis.models import load_model_and_preprocess from transformers import CLIPTokenizer from transformers.models.blip_2.configuration_blip_2 import Blip2Config from diffusers import ( AutoencoderKL, PNDMScheduler, UNet2DConditionModel, ) from diffusers.pipelines import BlipDiffusionPipeline from diffusers.pipelines.blip_diffusion.blip_image_processing import BlipImageProcessor from diffusers.pipelines.blip_diffusion.modeling_blip2 import Blip2QFormerModel from diffusers.pipelines.blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel BLIP2_CONFIG = { "vision_config": { "hidden_size": 1024, "num_hidden_layers": 23, "num_attention_heads": 16, "image_size": 224, "patch_size": 14, "intermediate_size": 4096, "hidden_act": "quick_gelu", }, "qformer_config": { "cross_attention_frequency": 1, "encoder_hidden_size": 1024, "vocab_size": 30523, }, "num_query_tokens": 16, } blip2config = Blip2Config(**BLIP2_CONFIG) def qformer_model_from_original_config(): qformer = Blip2QFormerModel(blip2config) return qformer def embeddings_from_original_checkpoint(model, diffuser_embeddings_prefix, original_embeddings_prefix): embeddings = {} embeddings.update( { f"{diffuser_embeddings_prefix}.word_embeddings.weight": model[ f"{original_embeddings_prefix}.word_embeddings.weight" ] } ) embeddings.update( { f"{diffuser_embeddings_prefix}.position_embeddings.weight": model[ f"{original_embeddings_prefix}.position_embeddings.weight" ] } ) embeddings.update( {f"{diffuser_embeddings_prefix}.LayerNorm.weight": model[f"{original_embeddings_prefix}.LayerNorm.weight"]} ) embeddings.update( {f"{diffuser_embeddings_prefix}.LayerNorm.bias": model[f"{original_embeddings_prefix}.LayerNorm.bias"]} ) return embeddings def proj_layer_from_original_checkpoint(model, diffuser_proj_prefix, original_proj_prefix): proj_layer = {} proj_layer.update({f"{diffuser_proj_prefix}.dense1.weight": model[f"{original_proj_prefix}.dense1.weight"]}) proj_layer.update({f"{diffuser_proj_prefix}.dense1.bias": model[f"{original_proj_prefix}.dense1.bias"]}) proj_layer.update({f"{diffuser_proj_prefix}.dense2.weight": model[f"{original_proj_prefix}.dense2.weight"]}) proj_layer.update({f"{diffuser_proj_prefix}.dense2.bias": model[f"{original_proj_prefix}.dense2.bias"]}) proj_layer.update({f"{diffuser_proj_prefix}.LayerNorm.weight": model[f"{original_proj_prefix}.LayerNorm.weight"]}) proj_layer.update({f"{diffuser_proj_prefix}.LayerNorm.bias": model[f"{original_proj_prefix}.LayerNorm.bias"]}) return proj_layer def attention_from_original_checkpoint(model, diffuser_attention_prefix, original_attention_prefix): attention = {} attention.update( { f"{diffuser_attention_prefix}.attention.query.weight": model[ f"{original_attention_prefix}.self.query.weight" ] } ) attention.update( {f"{diffuser_attention_prefix}.attention.query.bias": model[f"{original_attention_prefix}.self.query.bias"]} ) attention.update( {f"{diffuser_attention_prefix}.attention.key.weight": model[f"{original_attention_prefix}.self.key.weight"]} ) attention.update( {f"{diffuser_attention_prefix}.attention.key.bias": model[f"{original_attention_prefix}.self.key.bias"]} ) attention.update( { f"{diffuser_attention_prefix}.attention.value.weight": model[ f"{original_attention_prefix}.self.value.weight" ] } ) attention.update( {f"{diffuser_attention_prefix}.attention.value.bias": model[f"{original_attention_prefix}.self.value.bias"]} ) attention.update( {f"{diffuser_attention_prefix}.output.dense.weight": model[f"{original_attention_prefix}.output.dense.weight"]} ) attention.update( {f"{diffuser_attention_prefix}.output.dense.bias": model[f"{original_attention_prefix}.output.dense.bias"]} ) attention.update( { f"{diffuser_attention_prefix}.output.LayerNorm.weight": model[ f"{original_attention_prefix}.output.LayerNorm.weight" ] } ) attention.update( { f"{diffuser_attention_prefix}.output.LayerNorm.bias": model[ f"{original_attention_prefix}.output.LayerNorm.bias" ] } ) return attention def output_layers_from_original_checkpoint(model, diffuser_output_prefix, original_output_prefix): output_layers = {} output_layers.update({f"{diffuser_output_prefix}.dense.weight": model[f"{original_output_prefix}.dense.weight"]}) output_layers.update({f"{diffuser_output_prefix}.dense.bias": model[f"{original_output_prefix}.dense.bias"]}) output_layers.update( {f"{diffuser_output_prefix}.LayerNorm.weight": model[f"{original_output_prefix}.LayerNorm.weight"]} ) output_layers.update( {f"{diffuser_output_prefix}.LayerNorm.bias": model[f"{original_output_prefix}.LayerNorm.bias"]} ) return output_layers def encoder_from_original_checkpoint(model, diffuser_encoder_prefix, original_encoder_prefix): encoder = {} for i in range(blip2config.qformer_config.num_hidden_layers): encoder.update( attention_from_original_checkpoint( model, f"{diffuser_encoder_prefix}.{i}.attention", f"{original_encoder_prefix}.{i}.attention" ) ) encoder.update( attention_from_original_checkpoint( model, f"{diffuser_encoder_prefix}.{i}.crossattention", f"{original_encoder_prefix}.{i}.crossattention" ) ) encoder.update( { f"{diffuser_encoder_prefix}.{i}.intermediate.dense.weight": model[ f"{original_encoder_prefix}.{i}.intermediate.dense.weight" ] } ) encoder.update( { f"{diffuser_encoder_prefix}.{i}.intermediate.dense.bias": model[ f"{original_encoder_prefix}.{i}.intermediate.dense.bias" ] } ) encoder.update( { f"{diffuser_encoder_prefix}.{i}.intermediate_query.dense.weight": model[ f"{original_encoder_prefix}.{i}.intermediate_query.dense.weight" ] } ) encoder.update( { f"{diffuser_encoder_prefix}.{i}.intermediate_query.dense.bias": model[ f"{original_encoder_prefix}.{i}.intermediate_query.dense.bias" ] } ) encoder.update( output_layers_from_original_checkpoint( model, f"{diffuser_encoder_prefix}.{i}.output", f"{original_encoder_prefix}.{i}.output" ) ) encoder.update( output_layers_from_original_checkpoint( model, f"{diffuser_encoder_prefix}.{i}.output_query", f"{original_encoder_prefix}.{i}.output_query" ) ) return encoder def visual_encoder_layer_from_original_checkpoint(model, diffuser_prefix, original_prefix): visual_encoder_layer = {} visual_encoder_layer.update({f"{diffuser_prefix}.layer_norm1.weight": model[f"{original_prefix}.ln_1.weight"]}) visual_encoder_layer.update({f"{diffuser_prefix}.layer_norm1.bias": model[f"{original_prefix}.ln_1.bias"]}) visual_encoder_layer.update({f"{diffuser_prefix}.layer_norm2.weight": model[f"{original_prefix}.ln_2.weight"]}) visual_encoder_layer.update({f"{diffuser_prefix}.layer_norm2.bias": model[f"{original_prefix}.ln_2.bias"]}) visual_encoder_layer.update( {f"{diffuser_prefix}.self_attn.qkv.weight": model[f"{original_prefix}.attn.in_proj_weight"]} ) visual_encoder_layer.update( {f"{diffuser_prefix}.self_attn.qkv.bias": model[f"{original_prefix}.attn.in_proj_bias"]} ) visual_encoder_layer.update( {f"{diffuser_prefix}.self_attn.projection.weight": model[f"{original_prefix}.attn.out_proj.weight"]} ) visual_encoder_layer.update( {f"{diffuser_prefix}.self_attn.projection.bias": model[f"{original_prefix}.attn.out_proj.bias"]} ) visual_encoder_layer.update({f"{diffuser_prefix}.mlp.fc1.weight": model[f"{original_prefix}.mlp.c_fc.weight"]}) visual_encoder_layer.update({f"{diffuser_prefix}.mlp.fc1.bias": model[f"{original_prefix}.mlp.c_fc.bias"]}) visual_encoder_layer.update({f"{diffuser_prefix}.mlp.fc2.weight": model[f"{original_prefix}.mlp.c_proj.weight"]}) visual_encoder_layer.update({f"{diffuser_prefix}.mlp.fc2.bias": model[f"{original_prefix}.mlp.c_proj.bias"]}) return visual_encoder_layer def visual_encoder_from_original_checkpoint(model, diffuser_prefix, original_prefix): visual_encoder = {} visual_encoder.update( { f"{diffuser_prefix}.embeddings.class_embedding": model[f"{original_prefix}.class_embedding"] .unsqueeze(0) .unsqueeze(0) } ) visual_encoder.update( { f"{diffuser_prefix}.embeddings.position_embedding": model[ f"{original_prefix}.positional_embedding" ].unsqueeze(0) } ) visual_encoder.update( {f"{diffuser_prefix}.embeddings.patch_embedding.weight": model[f"{original_prefix}.conv1.weight"]} ) visual_encoder.update({f"{diffuser_prefix}.pre_layernorm.weight": model[f"{original_prefix}.ln_pre.weight"]}) visual_encoder.update({f"{diffuser_prefix}.pre_layernorm.bias": model[f"{original_prefix}.ln_pre.bias"]}) for i in range(blip2config.vision_config.num_hidden_layers): visual_encoder.update( visual_encoder_layer_from_original_checkpoint( model, f"{diffuser_prefix}.encoder.layers.{i}", f"{original_prefix}.transformer.resblocks.{i}" ) ) visual_encoder.update({f"{diffuser_prefix}.post_layernorm.weight": model["blip.ln_vision.weight"]}) visual_encoder.update({f"{diffuser_prefix}.post_layernorm.bias": model["blip.ln_vision.bias"]}) return visual_encoder def qformer_original_checkpoint_to_diffusers_checkpoint(model): qformer_checkpoint = {} qformer_checkpoint.update(embeddings_from_original_checkpoint(model, "embeddings", "blip.Qformer.bert.embeddings")) qformer_checkpoint.update({"query_tokens": model["blip.query_tokens"]}) qformer_checkpoint.update(proj_layer_from_original_checkpoint(model, "proj_layer", "proj_layer")) qformer_checkpoint.update( encoder_from_original_checkpoint(model, "encoder.layer", "blip.Qformer.bert.encoder.layer") ) qformer_checkpoint.update(visual_encoder_from_original_checkpoint(model, "visual_encoder", "blip.visual_encoder")) return qformer_checkpoint def get_qformer(model): print("loading qformer") qformer = qformer_model_from_original_config() qformer_diffusers_checkpoint = qformer_original_checkpoint_to_diffusers_checkpoint(model) load_checkpoint_to_model(qformer_diffusers_checkpoint, qformer) print("done loading qformer") return qformer def load_checkpoint_to_model(checkpoint, model): with tempfile.NamedTemporaryFile(delete=False) as file: torch.save(checkpoint, file.name) del checkpoint model.load_state_dict(torch.load(file.name), strict=False) os.remove(file.name) def save_blip_diffusion_model(model, args): qformer = get_qformer(model) qformer.eval() text_encoder = ContextCLIPTextModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="text_encoder") vae = AutoencoderKL.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="vae") unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet") vae.eval() text_encoder.eval() scheduler = PNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=False, skip_prk_steps=True, ) tokenizer = CLIPTokenizer.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="tokenizer") image_processor = BlipImageProcessor() blip_diffusion = BlipDiffusionPipeline( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet, scheduler=scheduler, qformer=qformer, image_processor=image_processor, ) blip_diffusion.save_pretrained(args.checkpoint_path) def main(args): model, _, _ = load_model_and_preprocess("blip_diffusion", "base", device="cpu", is_eval=True) save_blip_diffusion_model(model.state_dict(), args) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() main(args)
diffusers/scripts/convert_blipdiffusion_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_blipdiffusion_to_diffusers.py", "repo_id": "diffusers", "token_count": 5920 }
107
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Conversion script for the LDM checkpoints. """ import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNet2DModel, VQModel def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. """ if n_shave_prefix_segments >= 0: return ".".join(path.split(".")[n_shave_prefix_segments:]) else: return ".".join(path.split(".")[:n_shave_prefix_segments]) def renew_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item.replace("in_layers.0", "norm1") new_item = new_item.replace("in_layers.2", "conv1") new_item = new_item.replace("out_layers.0", "norm2") new_item = new_item.replace("out_layers.3", "conv2") new_item = new_item.replace("emb_layers.1", "time_emb_proj") new_item = new_item.replace("skip_connection", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("norm.weight", "group_norm.weight") new_item = new_item.replace("norm.bias", "group_norm.bias") new_item = new_item.replace("proj_out.weight", "proj_attn.weight") new_item = new_item.replace("proj_out.bias", "proj_attn.bias") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def assign_to_checkpoint( paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None ): """ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits attention layers, and takes into account additional replacements that may arise. Assigns the weights to the new checkpoint. """ assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map["query"]] = query.reshape(target_shape) checkpoint[path_map["key"]] = key.reshape(target_shape) checkpoint[path_map["value"]] = value.reshape(target_shape) for path in paths: new_path = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here new_path = new_path.replace("middle_block.0", "mid_block.resnets.0") new_path = new_path.replace("middle_block.1", "mid_block.attentions.0") new_path = new_path.replace("middle_block.2", "mid_block.resnets.1") if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace(replacement["old"], replacement["new"]) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0] else: checkpoint[new_path] = old_checkpoint[path["old"]] def convert_ldm_checkpoint(checkpoint, config): """ Takes a state dict and a config, and returns a converted checkpoint. """ new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = checkpoint["time_embed.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = checkpoint["time_embed.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = checkpoint["time_embed.2.weight"] new_checkpoint["time_embedding.linear_2.bias"] = checkpoint["time_embed.2.bias"] new_checkpoint["conv_in.weight"] = checkpoint["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = checkpoint["input_blocks.0.0.bias"] new_checkpoint["conv_norm_out.weight"] = checkpoint["out.0.weight"] new_checkpoint["conv_norm_out.bias"] = checkpoint["out.0.bias"] new_checkpoint["conv_out.weight"] = checkpoint["out.2.weight"] new_checkpoint["conv_out.bias"] = checkpoint["out.2.bias"] # Retrieves the keys for the input blocks only num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "input_blocks" in layer}) input_blocks = { layer_id: [key for key in checkpoint if f"input_blocks.{layer_id}" in key] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "middle_block" in layer}) middle_blocks = { layer_id: [key for key in checkpoint if f"middle_block.{layer_id}" in key] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "output_blocks" in layer}) output_blocks = { layer_id: [key for key in checkpoint if f"output_blocks.{layer_id}" in key] for layer_id in range(num_output_blocks) } for i in range(1, num_input_blocks): block_id = (i - 1) // (config["num_res_blocks"] + 1) layer_in_block_id = (i - 1) % (config["num_res_blocks"] + 1) resnets = [key for key in input_blocks[i] if f"input_blocks.{i}.0" in key] attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] if f"input_blocks.{i}.0.op.weight" in checkpoint: new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = checkpoint[ f"input_blocks.{i}.0.op.weight" ] new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = checkpoint[ f"input_blocks.{i}.0.op.bias" ] continue paths = renew_resnet_paths(resnets) meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} resnet_op = {"old": "resnets.2.op", "new": "downsamplers.0.op"} assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[meta_path, resnet_op], config=config ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = { "old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}", } to_split = { f"input_blocks.{i}.1.qkv.bias": { "key": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", "query": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", "value": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, f"input_blocks.{i}.1.qkv.weight": { "key": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", "query": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", "value": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[meta_path], attention_paths_to_split=to_split, config=config, ) resnet_0 = middle_blocks[0] attentions = middle_blocks[1] resnet_1 = middle_blocks[2] resnet_0_paths = renew_resnet_paths(resnet_0) assign_to_checkpoint(resnet_0_paths, new_checkpoint, checkpoint, config=config) resnet_1_paths = renew_resnet_paths(resnet_1) assign_to_checkpoint(resnet_1_paths, new_checkpoint, checkpoint, config=config) attentions_paths = renew_attention_paths(attentions) to_split = { "middle_block.1.qkv.bias": { "key": "mid_block.attentions.0.key.bias", "query": "mid_block.attentions.0.query.bias", "value": "mid_block.attentions.0.value.bias", }, "middle_block.1.qkv.weight": { "key": "mid_block.attentions.0.key.weight", "query": "mid_block.attentions.0.query.weight", "value": "mid_block.attentions.0.value.weight", }, } assign_to_checkpoint( attentions_paths, new_checkpoint, checkpoint, attention_paths_to_split=to_split, config=config ) for i in range(num_output_blocks): block_id = i // (config["num_res_blocks"] + 1) layer_in_block_id = i % (config["num_res_blocks"] + 1) output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] output_block_list = {} for layer in output_block_layers: layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] resnet_0_paths = renew_resnet_paths(resnets) paths = renew_resnet_paths(resnets) meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[meta_path], config=config) if ["conv.weight", "conv.bias"] in output_block_list.values(): index = list(output_block_list.values()).index(["conv.weight", "conv.bias"]) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = checkpoint[ f"output_blocks.{i}.{index}.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = checkpoint[ f"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] if len(attentions): paths = renew_attention_paths(attentions) meta_path = { "old": f"output_blocks.{i}.1", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", } to_split = { f"output_blocks.{i}.1.qkv.bias": { "key": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", "query": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", "value": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, f"output_blocks.{i}.1.qkv.weight": { "key": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", "query": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", "value": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( paths, new_checkpoint, checkpoint, additional_replacements=[meta_path], attention_paths_to_split=to_split if any("qkv" in key for key in attentions) else None, config=config, ) else: resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) for path in resnet_0_paths: old_path = ".".join(["output_blocks", str(i), path["old"]]) new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) new_checkpoint[new_path] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() checkpoint = torch.load(args.checkpoint_path) with open(args.config_file) as f: config = json.loads(f.read()) converted_checkpoint = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] model = UNet2DModel(**config) model.load_state_dict(converted_checkpoint) try: scheduler = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1])) vqvae = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1])) pipe = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
diffusers/scripts/convert_ldm_original_checkpoint_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_ldm_original_checkpoint_to_diffusers.py", "repo_id": "diffusers", "token_count": 6854 }
108
import argparse import sys import tensorrt as trt def convert_models(onnx_path: str, num_controlnet: int, output_path: str, fp16: bool = False, sd_xl: bool = False): """ Function to convert models in stable diffusion controlnet pipeline into TensorRT format Example: python convert_stable_diffusion_controlnet_to_tensorrt.py --onnx_path path-to-models-stable_diffusion/RevAnimated-v1-2-2/unet/model.onnx --output_path path-to-models-stable_diffusion/RevAnimated-v1-2-2/unet/model.engine --fp16 --num_controlnet 2 Example for SD XL: python convert_stable_diffusion_controlnet_to_tensorrt.py --onnx_path path-to-models-stable_diffusion/stable-diffusion-xl-base-1.0/unet/model.onnx --output_path path-to-models-stable_diffusion/stable-diffusion-xl-base-1.0/unet/model.engine --fp16 --num_controlnet 1 --sd_xl Returns: unet/model.engine run test script in diffusers/examples/community python test_onnx_controlnet.py --sd_model danbrown/RevAnimated-v1-2-2 --onnx_model_dir path-to-models-stable_diffusion/RevAnimated-v1-2-2 --unet_engine_path path-to-models-stable_diffusion/stable-diffusion-xl-base-1.0/unet/model.engine --qr_img_path path-to-qr-code-image """ # UNET if sd_xl: batch_size = 1 unet_in_channels = 4 unet_sample_size = 64 num_tokens = 77 text_hidden_size = 2048 img_size = 512 text_embeds_shape = (2 * batch_size, 1280) time_ids_shape = (2 * batch_size, 6) else: batch_size = 1 unet_in_channels = 4 unet_sample_size = 64 num_tokens = 77 text_hidden_size = 768 img_size = 512 batch_size = 1 latents_shape = (2 * batch_size, unet_in_channels, unet_sample_size, unet_sample_size) embed_shape = (2 * batch_size, num_tokens, text_hidden_size) controlnet_conds_shape = (num_controlnet, 2 * batch_size, 3, img_size, img_size) TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE) TRT_BUILDER = trt.Builder(TRT_LOGGER) TRT_RUNTIME = trt.Runtime(TRT_LOGGER) network = TRT_BUILDER.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) onnx_parser = trt.OnnxParser(network, TRT_LOGGER) parse_success = onnx_parser.parse_from_file(onnx_path) for idx in range(onnx_parser.num_errors): print(onnx_parser.get_error(idx)) if not parse_success: sys.exit("ONNX model parsing failed") print("Load Onnx model done") profile = TRT_BUILDER.create_optimization_profile() profile.set_shape("sample", latents_shape, latents_shape, latents_shape) profile.set_shape("encoder_hidden_states", embed_shape, embed_shape, embed_shape) profile.set_shape("controlnet_conds", controlnet_conds_shape, controlnet_conds_shape, controlnet_conds_shape) if sd_xl: profile.set_shape("text_embeds", text_embeds_shape, text_embeds_shape, text_embeds_shape) profile.set_shape("time_ids", time_ids_shape, time_ids_shape, time_ids_shape) config = TRT_BUILDER.create_builder_config() config.add_optimization_profile(profile) config.set_preview_feature(trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805, True) if fp16: config.set_flag(trt.BuilderFlag.FP16) plan = TRT_BUILDER.build_serialized_network(network, config) if plan is None: sys.exit("Failed building engine") print("Succeeded building engine") engine = TRT_RUNTIME.deserialize_cuda_engine(plan) ## save TRT engine with open(output_path, "wb") as f: f.write(engine.serialize()) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--sd_xl", action="store_true", default=False, help="SD XL pipeline") parser.add_argument( "--onnx_path", type=str, required=True, help="Path to the onnx checkpoint to convert", ) parser.add_argument("--num_controlnet", type=int) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") args = parser.parse_args() convert_models(args.onnx_path, args.num_controlnet, args.output_path, args.fp16, args.sd_xl)
diffusers/scripts/convert_stable_diffusion_controlnet_to_tensorrt.py/0
{ "file_path": "diffusers/scripts/convert_stable_diffusion_controlnet_to_tensorrt.py", "repo_id": "diffusers", "token_count": 1860 }
109
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def info_command_factory(_): return EnvironmentCommand() class EnvironmentCommand(BaseDiffusersCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): download_parser = parser.add_parser("env") download_parser.set_defaults(func=info_command_factory) def run(self): hub_version = huggingface_hub.__version__ pt_version = "not installed" pt_cuda_available = "NA" if is_torch_available(): import torch pt_version = torch.__version__ pt_cuda_available = torch.cuda.is_available() transformers_version = "not installed" if is_transformers_available(): import transformers transformers_version = transformers.__version__ accelerate_version = "not installed" if is_accelerate_available(): import accelerate accelerate_version = accelerate.__version__ xformers_version = "not installed" if is_xformers_available(): import xformers xformers_version = xformers.__version__ info = { "`diffusers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", "Huggingface_hub version": hub_version, "Transformers version": transformers_version, "Accelerate version": accelerate_version, "xFormers version": xformers_version, "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n") print(self.format_dict(info)) return info @staticmethod def format_dict(d): return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
diffusers/src/diffusers/commands/env.py/0
{ "file_path": "diffusers/src/diffusers/commands/env.py", "repo_id": "diffusers", "token_count": 1070 }
110
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Union from ..utils import MIN_PEFT_VERSION, check_peft_version, is_peft_available class PeftAdapterMixin: """ A class containing all functions for loading and using adapters weights that are supported in PEFT library. For more details about adapters and injecting them in a transformer-based model, check out the PEFT [documentation](https://huggingface.co/docs/peft/index). Install the latest version of PEFT, and use this mixin to: - Attach new adapters in the model. - Attach multiple adapters and iteratively activate/deactivate them. - Activate/deactivate all adapters from the model. - Get a list of the active adapters. """ _hf_peft_config_loaded = False def add_adapter(self, adapter_config, adapter_name: str = "default") -> None: r""" Adds a new adapter to the current model for training. If no adapter name is passed, a default name is assigned to the adapter to follow the convention of the PEFT library. If you are not familiar with adapters and PEFT methods, we invite you to read more about them in the PEFT [documentation](https://huggingface.co/docs/peft). Args: adapter_config (`[~peft.PeftConfig]`): The configuration of the adapter to add; supported adapters are non-prefix tuning and adaption prompt methods. adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to add. If no name is passed, a default name is assigned to the adapter. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not is_peft_available(): raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.") from peft import PeftConfig, inject_adapter_in_model if not self._hf_peft_config_loaded: self._hf_peft_config_loaded = True elif adapter_name in self.peft_config: raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") if not isinstance(adapter_config, PeftConfig): raise ValueError( f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead." ) # Unlike transformers, here we don't need to retrieve the name_or_path of the unet as the loading logic is # handled by the `load_lora_layers` or `LoraLoaderMixin`. Therefore we set it to `None` here. adapter_config.base_model_name_or_path = None inject_adapter_in_model(adapter_config, self, adapter_name) self.set_adapter(adapter_name) def set_adapter(self, adapter_name: Union[str, List[str]]) -> None: """ Sets a specific adapter by forcing the model to only use that adapter and disables the other adapters. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT [documentation](https://huggingface.co/docs/peft). Args: adapter_name (Union[str, List[str]])): The list of adapters to set or the adapter name in the case of a single adapter. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") if isinstance(adapter_name, str): adapter_name = [adapter_name] missing = set(adapter_name) - set(self.peft_config) if len(missing) > 0: raise ValueError( f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)." f" current loaded adapters are: {list(self.peft_config.keys())}" ) from peft.tuners.tuners_utils import BaseTunerLayer _adapters_has_been_set = False for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, "set_adapter"): module.set_adapter(adapter_name) # Previous versions of PEFT does not support multi-adapter inference elif not hasattr(module, "set_adapter") and len(adapter_name) != 1: raise ValueError( "You are trying to set multiple adapters and you have a PEFT version that does not support multi-adapter inference. Please upgrade to the latest version of PEFT." " `pip install -U peft` or `pip install -U git+https://github.com/huggingface/peft.git`" ) else: module.active_adapter = adapter_name _adapters_has_been_set = True if not _adapters_has_been_set: raise ValueError( "Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters." ) def disable_adapters(self) -> None: r""" Disable all adapters attached to the model and fallback to inference with the base model only. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT [documentation](https://huggingface.co/docs/peft). """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, "enable_adapters"): module.enable_adapters(enabled=False) else: # support for older PEFT versions module.disable_adapters = True def enable_adapters(self) -> None: """ Enable adapters that are attached to the model. The model uses `self.active_adapters()` to retrieve the list of adapters to enable. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT [documentation](https://huggingface.co/docs/peft). """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, "enable_adapters"): module.enable_adapters(enabled=True) else: # support for older PEFT versions module.disable_adapters = False def active_adapters(self) -> List[str]: """ Gets the current list of active adapters of the model. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT [documentation](https://huggingface.co/docs/peft). """ check_peft_version(min_version=MIN_PEFT_VERSION) if not is_peft_available(): raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.") if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): return module.active_adapter
diffusers/src/diffusers/loaders/peft.py/0
{ "file_path": "diffusers/src/diffusers/loaders/peft.py", "repo_id": "diffusers", "token_count": 3290 }
111
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import is_torch_version from ...utils.accelerate_utils import apply_forward_hook from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from ..unets.unet_3d_blocks import MidBlockTemporalDecoder, UpBlockTemporalDecoder from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder class TemporalDecoder(nn.Module): def __init__( self, in_channels: int = 4, out_channels: int = 3, block_out_channels: Tuple[int] = (128, 256, 512, 512), layers_per_block: int = 2, ): super().__init__() self.layers_per_block = layers_per_block self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1) self.mid_block = MidBlockTemporalDecoder( num_layers=self.layers_per_block, in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], attention_head_dim=block_out_channels[-1], ) # up self.up_blocks = nn.ModuleList([]) reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] for i in range(len(block_out_channels)): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 up_block = UpBlockTemporalDecoder( num_layers=self.layers_per_block + 1, in_channels=prev_output_channel, out_channels=output_channel, add_upsample=not is_final_block, ) self.up_blocks.append(up_block) prev_output_channel = output_channel self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-6) self.conv_act = nn.SiLU() self.conv_out = torch.nn.Conv2d( in_channels=block_out_channels[0], out_channels=out_channels, kernel_size=3, padding=1, ) conv_out_kernel_size = (3, 1, 1) padding = [int(k // 2) for k in conv_out_kernel_size] self.time_conv_out = torch.nn.Conv3d( in_channels=out_channels, out_channels=out_channels, kernel_size=conv_out_kernel_size, padding=padding, ) self.gradient_checkpointing = False def forward( self, sample: torch.FloatTensor, image_only_indicator: torch.FloatTensor, num_frames: int = 1, ) -> torch.FloatTensor: r"""The forward method of the `Decoder` class.""" sample = self.conv_in(sample) upscale_dtype = next(iter(self.up_blocks.parameters())).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version(">=", "1.11.0"): # middle sample = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block), sample, image_only_indicator, use_reentrant=False, ) sample = sample.to(upscale_dtype) # up for up_block in self.up_blocks: sample = torch.utils.checkpoint.checkpoint( create_custom_forward(up_block), sample, image_only_indicator, use_reentrant=False, ) else: # middle sample = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block), sample, image_only_indicator, ) sample = sample.to(upscale_dtype) # up for up_block in self.up_blocks: sample = torch.utils.checkpoint.checkpoint( create_custom_forward(up_block), sample, image_only_indicator, ) else: # middle sample = self.mid_block(sample, image_only_indicator=image_only_indicator) sample = sample.to(upscale_dtype) # up for up_block in self.up_blocks: sample = up_block(sample, image_only_indicator=image_only_indicator) # post-process sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) batch_frames, channels, height, width = sample.shape batch_size = batch_frames // num_frames sample = sample[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) sample = self.time_conv_out(sample) sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width) return sample class AutoencoderKLTemporalDecoder(ModelMixin, ConfigMixin): r""" A VAE model with KL loss for encoding images into latents and decoding latent representations into images. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: in_channels (int, *optional*, defaults to 3): Number of channels in the input image. out_channels (int, *optional*, defaults to 3): Number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): Tuple of downsample block types. block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): Tuple of block output channels. layers_per_block: (`int`, *optional*, defaults to 1): Number of layers per block. latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space. sample_size (`int`, *optional*, defaults to `32`): Sample input size. scaling_factor (`float`, *optional*, defaults to 0.18215): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. force_upcast (`bool`, *optional*, default to `True`): If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE can be fine-tuned / trained to a lower range without loosing too much precision in which case `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 3, out_channels: int = 3, down_block_types: Tuple[str] = ("DownEncoderBlock2D",), block_out_channels: Tuple[int] = (64,), layers_per_block: int = 1, latent_channels: int = 4, sample_size: int = 32, scaling_factor: float = 0.18215, force_upcast: float = True, ): super().__init__() # pass init params to Encoder self.encoder = Encoder( in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, double_z=True, ) # pass init params to Decoder self.decoder = TemporalDecoder( in_channels=latent_channels, out_channels=out_channels, block_out_channels=block_out_channels, layers_per_block=layers_per_block, ) self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) sample_size = ( self.config.sample_size[0] if isinstance(self.config.sample_size, (list, tuple)) else self.config.sample_size ) self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1))) self.tile_overlap_factor = 0.25 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (Encoder, TemporalDecoder)): module.gradient_checkpointing = value @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) @apply_forward_hook def encode( self, x: torch.FloatTensor, return_dict: bool = True ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: """ Encode a batch of images into latents. Args: x (`torch.FloatTensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded images. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) @apply_forward_hook def decode( self, z: torch.FloatTensor, num_frames: int, return_dict: bool = True, ) -> Union[DecoderOutput, torch.FloatTensor]: """ Decode a batch of images. Args: z (`torch.FloatTensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ batch_size = z.shape[0] // num_frames image_only_indicator = torch.zeros(batch_size, num_frames, dtype=z.dtype, device=z.device) decoded = self.decoder(z, num_frames=num_frames, image_only_indicator=image_only_indicator) if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def forward( self, sample: torch.FloatTensor, sample_posterior: bool = False, return_dict: bool = True, generator: Optional[torch.Generator] = None, num_frames: int = 1, ) -> Union[DecoderOutput, torch.FloatTensor]: r""" Args: sample (`torch.FloatTensor`): Input sample. sample_posterior (`bool`, *optional*, defaults to `False`): Whether to sample from the posterior. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z, num_frames=num_frames).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec)
diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py/0
{ "file_path": "diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py", "repo_id": "diffusers", "token_count": 7180 }
112
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numbers from typing import Dict, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from ..utils import is_torch_version from .activations import get_activation from .embeddings import CombinedTimestepLabelEmbeddings, PixArtAlphaCombinedTimestepSizeEmbeddings class AdaLayerNorm(nn.Module): r""" Norm layer modified to incorporate timestep embeddings. Parameters: embedding_dim (`int`): The size of each embedding vector. num_embeddings (`int`): The size of the embeddings dictionary. """ def __init__(self, embedding_dim: int, num_embeddings: int): super().__init__() self.emb = nn.Embedding(num_embeddings, embedding_dim) self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, embedding_dim * 2) self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False) def forward(self, x: torch.Tensor, timestep: torch.Tensor) -> torch.Tensor: emb = self.linear(self.silu(self.emb(timestep))) scale, shift = torch.chunk(emb, 2) x = self.norm(x) * (1 + scale) + shift return x class AdaLayerNormZero(nn.Module): r""" Norm layer adaptive layer norm zero (adaLN-Zero). Parameters: embedding_dim (`int`): The size of each embedding vector. num_embeddings (`int`): The size of the embeddings dictionary. """ def __init__(self, embedding_dim: int, num_embeddings: int): super().__init__() self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim) self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True) self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6) def forward( self, x: torch.Tensor, timestep: torch.Tensor, class_labels: torch.LongTensor, hidden_dtype: Optional[torch.dtype] = None, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: emb = self.linear(self.silu(self.emb(timestep, class_labels, hidden_dtype=hidden_dtype))) shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1) x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class AdaLayerNormSingle(nn.Module): r""" Norm layer adaptive layer norm single (adaLN-single). As proposed in PixArt-Alpha (see: https://arxiv.org/abs/2310.00426; Section 2.3). Parameters: embedding_dim (`int`): The size of each embedding vector. use_additional_conditions (`bool`): To use additional conditions for normalization or not. """ def __init__(self, embedding_dim: int, use_additional_conditions: bool = False): super().__init__() self.emb = PixArtAlphaCombinedTimestepSizeEmbeddings( embedding_dim, size_emb_dim=embedding_dim // 3, use_additional_conditions=use_additional_conditions ) self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True) def forward( self, timestep: torch.Tensor, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, batch_size: Optional[int] = None, hidden_dtype: Optional[torch.dtype] = None, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: # No modulation happening here. embedded_timestep = self.emb(timestep, **added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_dtype) return self.linear(self.silu(embedded_timestep)), embedded_timestep class AdaGroupNorm(nn.Module): r""" GroupNorm layer modified to incorporate timestep embeddings. Parameters: embedding_dim (`int`): The size of each embedding vector. num_embeddings (`int`): The size of the embeddings dictionary. num_groups (`int`): The number of groups to separate the channels into. act_fn (`str`, *optional*, defaults to `None`): The activation function to use. eps (`float`, *optional*, defaults to `1e-5`): The epsilon value to use for numerical stability. """ def __init__( self, embedding_dim: int, out_dim: int, num_groups: int, act_fn: Optional[str] = None, eps: float = 1e-5 ): super().__init__() self.num_groups = num_groups self.eps = eps if act_fn is None: self.act = None else: self.act = get_activation(act_fn) self.linear = nn.Linear(embedding_dim, out_dim * 2) def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor: if self.act: emb = self.act(emb) emb = self.linear(emb) emb = emb[:, :, None, None] scale, shift = emb.chunk(2, dim=1) x = F.group_norm(x, self.num_groups, eps=self.eps) x = x * (1 + scale) + shift return x class AdaLayerNormContinuous(nn.Module): def __init__( self, embedding_dim: int, conditioning_embedding_dim: int, # NOTE: It is a bit weird that the norm layer can be configured to have scale and shift parameters # because the output is immediately scaled and shifted by the projected conditioning embeddings. # Note that AdaLayerNorm does not let the norm layer have scale and shift parameters. # However, this is how it was implemented in the original code, and it's rather likely you should # set `elementwise_affine` to False. elementwise_affine=True, eps=1e-5, bias=True, norm_type="layer_norm", ): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias) if norm_type == "layer_norm": self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias) elif norm_type == "rms_norm": self.norm = RMSNorm(embedding_dim, eps, elementwise_affine) else: raise ValueError(f"unknown norm_type {norm_type}") def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor: emb = self.linear(self.silu(conditioning_embedding)) scale, shift = torch.chunk(emb, 2, dim=1) x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] return x if is_torch_version(">=", "2.1.0"): LayerNorm = nn.LayerNorm else: # Has optional bias parameter compared to torch layer norm # TODO: replace with torch layernorm once min required torch version >= 2.1 class LayerNorm(nn.Module): def __init__(self, dim, eps: float = 1e-5, elementwise_affine: bool = True, bias: bool = True): super().__init__() self.eps = eps if isinstance(dim, numbers.Integral): dim = (dim,) self.dim = torch.Size(dim) if elementwise_affine: self.weight = nn.Parameter(torch.ones(dim)) self.bias = nn.Parameter(torch.zeros(dim)) if bias else None else: self.weight = None self.bias = None def forward(self, input): return F.layer_norm(input, self.dim, self.weight, self.bias, self.eps) class RMSNorm(nn.Module): def __init__(self, dim, eps: float, elementwise_affine: bool = True): super().__init__() self.eps = eps if isinstance(dim, numbers.Integral): dim = (dim,) self.dim = torch.Size(dim) if elementwise_affine: self.weight = nn.Parameter(torch.ones(dim)) else: self.weight = None def forward(self, hidden_states): input_dtype = hidden_states.dtype variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.eps) if self.weight is not None: # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) hidden_states = hidden_states * self.weight else: hidden_states = hidden_states.to(input_dtype) return hidden_states class GlobalResponseNorm(nn.Module): # Taken from https://github.com/facebookresearch/ConvNeXt-V2/blob/3608f67cc1dae164790c5d0aead7bf2d73d9719b/models/utils.py#L105 def __init__(self, dim): super().__init__() self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim)) self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim)) def forward(self, x): gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True) nx = gx / (gx.mean(dim=-1, keepdim=True) + 1e-6) return self.gamma * (x * nx) + self.beta + x
diffusers/src/diffusers/models/normalization.py/0
{ "file_path": "diffusers/src/diffusers/models/normalization.py", "repo_id": "diffusers", "token_count": 4030 }
113
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from ..utils import deprecate from .unets.unet_2d_blocks import ( AttnDownBlock2D, AttnDownEncoderBlock2D, AttnSkipDownBlock2D, AttnSkipUpBlock2D, AttnUpBlock2D, AttnUpDecoderBlock2D, AutoencoderTinyBlock, CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, KAttentionBlock, KCrossAttnDownBlock2D, KCrossAttnUpBlock2D, KDownBlock2D, KUpBlock2D, ResnetDownsampleBlock2D, ResnetUpsampleBlock2D, SimpleCrossAttnDownBlock2D, SimpleCrossAttnUpBlock2D, SkipDownBlock2D, SkipUpBlock2D, UNetMidBlock2D, UNetMidBlock2DCrossAttn, UNetMidBlock2DSimpleCrossAttn, UpBlock2D, UpDecoderBlock2D, ) def get_down_block( down_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_downsample: bool, resnet_eps: float, resnet_act_fn: str, transformer_layers_per_block: int = 1, num_attention_heads: Optional[int] = None, resnet_groups: Optional[int] = None, cross_attention_dim: Optional[int] = None, downsample_padding: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", attention_type: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: float = 1.0, cross_attention_norm: Optional[str] = None, attention_head_dim: Optional[int] = None, downsample_type: Optional[str] = None, dropout: float = 0.0, ): deprecation_message = "Importing `get_down_block` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import get_down_block`, instead." deprecate("get_down_block", "0.29", deprecation_message) from .unets.unet_2d_blocks import get_down_block return get_down_block( down_block_type=down_block_type, num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, transformer_layers_per_block=transformer_layers_per_block, num_attention_heads=num_attention_heads, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim, downsample_type=downsample_type, dropout=dropout, ) def get_mid_block( mid_block_type: str, temb_channels: int, in_channels: int, resnet_eps: float, resnet_act_fn: str, resnet_groups: int, output_scale_factor: float = 1.0, transformer_layers_per_block: int = 1, num_attention_heads: Optional[int] = None, cross_attention_dim: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, mid_block_only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", attention_type: str = "default", resnet_skip_time_act: bool = False, cross_attention_norm: Optional[str] = None, attention_head_dim: Optional[int] = 1, dropout: float = 0.0, ): if mid_block_type == "UNetMidBlock2DCrossAttn": return UNetMidBlock2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, temb_channels=temb_channels, dropout=dropout, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, output_scale_factor=output_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, resnet_groups=resnet_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type, ) elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn": return UNetMidBlock2DSimpleCrossAttn( in_channels=in_channels, temb_channels=temb_channels, dropout=dropout, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, output_scale_factor=output_scale_factor, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, only_cross_attention=mid_block_only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif mid_block_type == "UNetMidBlock2D": return UNetMidBlock2D( in_channels=in_channels, temb_channels=temb_channels, dropout=dropout, num_layers=0, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, output_scale_factor=output_scale_factor, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, add_attention=False, ) elif mid_block_type is None: return None else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") def get_up_block( up_block_type: str, num_layers: int, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, add_upsample: bool, resnet_eps: float, resnet_act_fn: str, resolution_idx: Optional[int] = None, transformer_layers_per_block: int = 1, num_attention_heads: Optional[int] = None, resnet_groups: Optional[int] = None, cross_attention_dim: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", attention_type: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: float = 1.0, cross_attention_norm: Optional[str] = None, attention_head_dim: Optional[int] = None, upsample_type: Optional[str] = None, dropout: float = 0.0, ): deprecation_message = "Importing `get_up_block` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import get_up_block`, instead." deprecate("get_up_block", "0.29", deprecation_message) from .unets.unet_2d_blocks import get_up_block return get_up_block( up_block_type=up_block_type, num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resolution_idx=resolution_idx, transformer_layers_per_block=transformer_layers_per_block, num_attention_heads=num_attention_heads, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim, upsample_type=upsample_type, dropout=dropout, ) class AutoencoderTinyBlock(AutoencoderTinyBlock): deprecation_message = "Importing `AutoencoderTinyBlock` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AutoencoderTinyBlock`, instead." deprecate("AutoencoderTinyBlock", "0.29", deprecation_message) class UNetMidBlock2D(UNetMidBlock2D): deprecation_message = "Importing `UNetMidBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2D`, instead." deprecate("UNetMidBlock2D", "0.29", deprecation_message) class UNetMidBlock2DCrossAttn(UNetMidBlock2DCrossAttn): deprecation_message = "Importing `UNetMidBlock2DCrossAttn` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2DCrossAttn`, instead." deprecate("UNetMidBlock2DCrossAttn", "0.29", deprecation_message) class UNetMidBlock2DSimpleCrossAttn(UNetMidBlock2DSimpleCrossAttn): deprecation_message = "Importing `UNetMidBlock2DSimpleCrossAttn` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2DSimpleCrossAttn`, instead." deprecate("UNetMidBlock2DSimpleCrossAttn", "0.29", deprecation_message) class AttnDownBlock2D(AttnDownBlock2D): deprecation_message = "Importing `AttnDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnDownBlock2D`, instead." deprecate("AttnDownBlock2D", "0.29", deprecation_message) class CrossAttnDownBlock2D(CrossAttnDownBlock2D): deprecation_message = "Importing `AttnDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import CrossAttnDownBlock2D`, instead." deprecate("CrossAttnDownBlock2D", "0.29", deprecation_message) class DownBlock2D(DownBlock2D): deprecation_message = "Importing `DownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import DownBlock2D`, instead." deprecate("DownBlock2D", "0.29", deprecation_message) class AttnDownEncoderBlock2D(AttnDownEncoderBlock2D): deprecation_message = "Importing `AttnDownEncoderBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnDownEncoderBlock2D`, instead." deprecate("AttnDownEncoderBlock2D", "0.29", deprecation_message) class AttnSkipDownBlock2D(AttnSkipDownBlock2D): deprecation_message = "Importing `AttnSkipDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnSkipDownBlock2D`, instead." deprecate("AttnSkipDownBlock2D", "0.29", deprecation_message) class SkipDownBlock2D(SkipDownBlock2D): deprecation_message = "Importing `SkipDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import SkipDownBlock2D`, instead." deprecate("SkipDownBlock2D", "0.29", deprecation_message) class ResnetDownsampleBlock2D(ResnetDownsampleBlock2D): deprecation_message = "Importing `ResnetDownsampleBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import ResnetDownsampleBlock2D`, instead." deprecate("ResnetDownsampleBlock2D", "0.29", deprecation_message) class SimpleCrossAttnDownBlock2D(SimpleCrossAttnDownBlock2D): deprecation_message = "Importing `SimpleCrossAttnDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import SimpleCrossAttnDownBlock2D`, instead." deprecate("SimpleCrossAttnDownBlock2D", "0.29", deprecation_message) class KDownBlock2D(KDownBlock2D): deprecation_message = "Importing `KDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KDownBlock2D`, instead." deprecate("KDownBlock2D", "0.29", deprecation_message) class KCrossAttnDownBlock2D(KCrossAttnDownBlock2D): deprecation_message = "Importing `KCrossAttnDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KCrossAttnDownBlock2D`, instead." deprecate("KCrossAttnDownBlock2D", "0.29", deprecation_message) class AttnUpBlock2D(AttnUpBlock2D): deprecation_message = "Importing `AttnUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnUpBlock2D`, instead." deprecate("AttnUpBlock2D", "0.29", deprecation_message) class CrossAttnUpBlock2D(CrossAttnUpBlock2D): deprecation_message = "Importing `CrossAttnUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import CrossAttnUpBlock2D`, instead." deprecate("CrossAttnUpBlock2D", "0.29", deprecation_message) class UpBlock2D(UpBlock2D): deprecation_message = "Importing `UpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UpBlock2D`, instead." deprecate("UpBlock2D", "0.29", deprecation_message) class UpDecoderBlock2D(UpDecoderBlock2D): deprecation_message = "Importing `UpDecoderBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UpDecoderBlock2D`, instead." deprecate("UpDecoderBlock2D", "0.29", deprecation_message) class AttnUpDecoderBlock2D(AttnUpDecoderBlock2D): deprecation_message = "Importing `AttnUpDecoderBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnUpDecoderBlock2D`, instead." deprecate("AttnUpDecoderBlock2D", "0.29", deprecation_message) class AttnSkipUpBlock2D(AttnSkipUpBlock2D): deprecation_message = "Importing `AttnSkipUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnSkipUpBlock2D`, instead." deprecate("AttnSkipUpBlock2D", "0.29", deprecation_message) class SkipUpBlock2D(SkipUpBlock2D): deprecation_message = "Importing `SkipUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import SkipUpBlock2D`, instead." deprecate("SkipUpBlock2D", "0.29", deprecation_message) class ResnetUpsampleBlock2D(ResnetUpsampleBlock2D): deprecation_message = "Importing `ResnetUpsampleBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import ResnetUpsampleBlock2D`, instead." deprecate("ResnetUpsampleBlock2D", "0.29", deprecation_message) class SimpleCrossAttnUpBlock2D(SimpleCrossAttnUpBlock2D): deprecation_message = "Importing `SimpleCrossAttnUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import SimpleCrossAttnUpBlock2D`, instead." deprecate("SimpleCrossAttnUpBlock2D", "0.29", deprecation_message) class KUpBlock2D(KUpBlock2D): deprecation_message = "Importing `KUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KUpBlock2D`, instead." deprecate("KUpBlock2D", "0.29", deprecation_message) class KCrossAttnUpBlock2D(KCrossAttnUpBlock2D): deprecation_message = "Importing `KCrossAttnUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KCrossAttnUpBlock2D`, instead." deprecate("KCrossAttnUpBlock2D", "0.29", deprecation_message) # can potentially later be renamed to `No-feed-forward` attention class KAttentionBlock(KAttentionBlock): deprecation_message = "Importing `KAttentionBlock` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KAttentionBlock`, instead." deprecate("KAttentionBlock", "0.29", deprecation_message)
diffusers/src/diffusers/models/unet_2d_blocks.py/0
{ "file_path": "diffusers/src/diffusers/models/unet_2d_blocks.py", "repo_id": "diffusers", "token_count": 7249 }
114
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Union import torch import torch.nn.functional as F from torch import nn from torch.utils.checkpoint import checkpoint from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin from ..attention import BasicTransformerBlock, SkipFFTransformerBlock from ..attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, ) from ..embeddings import TimestepEmbedding, get_timestep_embedding from ..modeling_utils import ModelMixin from ..normalization import GlobalResponseNorm, RMSNorm from ..resnet import Downsample2D, Upsample2D class UVit2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): _supports_gradient_checkpointing = True @register_to_config def __init__( self, # global config hidden_size: int = 1024, use_bias: bool = False, hidden_dropout: float = 0.0, # conditioning dimensions cond_embed_dim: int = 768, micro_cond_encode_dim: int = 256, micro_cond_embed_dim: int = 1280, encoder_hidden_size: int = 768, # num tokens vocab_size: int = 8256, # codebook_size + 1 (for the mask token) rounded codebook_size: int = 8192, # `UVit2DConvEmbed` in_channels: int = 768, block_out_channels: int = 768, num_res_blocks: int = 3, downsample: bool = False, upsample: bool = False, block_num_heads: int = 12, # `TransformerLayer` num_hidden_layers: int = 22, num_attention_heads: int = 16, # `Attention` attention_dropout: float = 0.0, # `FeedForward` intermediate_size: int = 2816, # `Norm` layer_norm_eps: float = 1e-6, ln_elementwise_affine: bool = True, sample_size: int = 64, ): super().__init__() self.encoder_proj = nn.Linear(encoder_hidden_size, hidden_size, bias=use_bias) self.encoder_proj_layer_norm = RMSNorm(hidden_size, layer_norm_eps, ln_elementwise_affine) self.embed = UVit2DConvEmbed( in_channels, block_out_channels, vocab_size, ln_elementwise_affine, layer_norm_eps, use_bias ) self.cond_embed = TimestepEmbedding( micro_cond_embed_dim + cond_embed_dim, hidden_size, sample_proj_bias=use_bias ) self.down_block = UVitBlock( block_out_channels, num_res_blocks, hidden_size, hidden_dropout, ln_elementwise_affine, layer_norm_eps, use_bias, block_num_heads, attention_dropout, downsample, False, ) self.project_to_hidden_norm = RMSNorm(block_out_channels, layer_norm_eps, ln_elementwise_affine) self.project_to_hidden = nn.Linear(block_out_channels, hidden_size, bias=use_bias) self.transformer_layers = nn.ModuleList( [ BasicTransformerBlock( dim=hidden_size, num_attention_heads=num_attention_heads, attention_head_dim=hidden_size // num_attention_heads, dropout=hidden_dropout, cross_attention_dim=hidden_size, attention_bias=use_bias, norm_type="ada_norm_continuous", ada_norm_continous_conditioning_embedding_dim=hidden_size, norm_elementwise_affine=ln_elementwise_affine, norm_eps=layer_norm_eps, ada_norm_bias=use_bias, ff_inner_dim=intermediate_size, ff_bias=use_bias, attention_out_bias=use_bias, ) for _ in range(num_hidden_layers) ] ) self.project_from_hidden_norm = RMSNorm(hidden_size, layer_norm_eps, ln_elementwise_affine) self.project_from_hidden = nn.Linear(hidden_size, block_out_channels, bias=use_bias) self.up_block = UVitBlock( block_out_channels, num_res_blocks, hidden_size, hidden_dropout, ln_elementwise_affine, layer_norm_eps, use_bias, block_num_heads, attention_dropout, downsample=False, upsample=upsample, ) self.mlm_layer = ConvMlmLayer( block_out_channels, in_channels, use_bias, ln_elementwise_affine, layer_norm_eps, codebook_size ) self.gradient_checkpointing = False def _set_gradient_checkpointing(self, module, value: bool = False) -> None: pass def forward(self, input_ids, encoder_hidden_states, pooled_text_emb, micro_conds, cross_attention_kwargs=None): encoder_hidden_states = self.encoder_proj(encoder_hidden_states) encoder_hidden_states = self.encoder_proj_layer_norm(encoder_hidden_states) micro_cond_embeds = get_timestep_embedding( micro_conds.flatten(), self.config.micro_cond_encode_dim, flip_sin_to_cos=True, downscale_freq_shift=0 ) micro_cond_embeds = micro_cond_embeds.reshape((input_ids.shape[0], -1)) pooled_text_emb = torch.cat([pooled_text_emb, micro_cond_embeds], dim=1) pooled_text_emb = pooled_text_emb.to(dtype=self.dtype) pooled_text_emb = self.cond_embed(pooled_text_emb).to(encoder_hidden_states.dtype) hidden_states = self.embed(input_ids) hidden_states = self.down_block( hidden_states, pooled_text_emb=pooled_text_emb, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, ) batch_size, channels, height, width = hidden_states.shape hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch_size, height * width, channels) hidden_states = self.project_to_hidden_norm(hidden_states) hidden_states = self.project_to_hidden(hidden_states) for layer in self.transformer_layers: if self.training and self.gradient_checkpointing: def layer_(*args): return checkpoint(layer, *args) else: layer_ = layer hidden_states = layer_( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs={"pooled_text_emb": pooled_text_emb}, ) hidden_states = self.project_from_hidden_norm(hidden_states) hidden_states = self.project_from_hidden(hidden_states) hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2) hidden_states = self.up_block( hidden_states, pooled_text_emb=pooled_text_emb, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, ) logits = self.mlm_layer(hidden_states) return logits @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) class UVit2DConvEmbed(nn.Module): def __init__(self, in_channels, block_out_channels, vocab_size, elementwise_affine, eps, bias): super().__init__() self.embeddings = nn.Embedding(vocab_size, in_channels) self.layer_norm = RMSNorm(in_channels, eps, elementwise_affine) self.conv = nn.Conv2d(in_channels, block_out_channels, kernel_size=1, bias=bias) def forward(self, input_ids): embeddings = self.embeddings(input_ids) embeddings = self.layer_norm(embeddings) embeddings = embeddings.permute(0, 3, 1, 2) embeddings = self.conv(embeddings) return embeddings class UVitBlock(nn.Module): def __init__( self, channels, num_res_blocks: int, hidden_size, hidden_dropout, ln_elementwise_affine, layer_norm_eps, use_bias, block_num_heads, attention_dropout, downsample: bool, upsample: bool, ): super().__init__() if downsample: self.downsample = Downsample2D( channels, use_conv=True, padding=0, name="Conv2d_0", kernel_size=2, norm_type="rms_norm", eps=layer_norm_eps, elementwise_affine=ln_elementwise_affine, bias=use_bias, ) else: self.downsample = None self.res_blocks = nn.ModuleList( [ ConvNextBlock( channels, layer_norm_eps, ln_elementwise_affine, use_bias, hidden_dropout, hidden_size, ) for i in range(num_res_blocks) ] ) self.attention_blocks = nn.ModuleList( [ SkipFFTransformerBlock( channels, block_num_heads, channels // block_num_heads, hidden_size, use_bias, attention_dropout, channels, attention_bias=use_bias, attention_out_bias=use_bias, ) for _ in range(num_res_blocks) ] ) if upsample: self.upsample = Upsample2D( channels, use_conv_transpose=True, kernel_size=2, padding=0, name="conv", norm_type="rms_norm", eps=layer_norm_eps, elementwise_affine=ln_elementwise_affine, bias=use_bias, interpolate=False, ) else: self.upsample = None def forward(self, x, pooled_text_emb, encoder_hidden_states, cross_attention_kwargs): if self.downsample is not None: x = self.downsample(x) for res_block, attention_block in zip(self.res_blocks, self.attention_blocks): x = res_block(x, pooled_text_emb) batch_size, channels, height, width = x.shape x = x.view(batch_size, channels, height * width).permute(0, 2, 1) x = attention_block( x, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs ) x = x.permute(0, 2, 1).view(batch_size, channels, height, width) if self.upsample is not None: x = self.upsample(x) return x class ConvNextBlock(nn.Module): def __init__( self, channels, layer_norm_eps, ln_elementwise_affine, use_bias, hidden_dropout, hidden_size, res_ffn_factor=4 ): super().__init__() self.depthwise = nn.Conv2d( channels, channels, kernel_size=3, padding=1, groups=channels, bias=use_bias, ) self.norm = RMSNorm(channels, layer_norm_eps, ln_elementwise_affine) self.channelwise_linear_1 = nn.Linear(channels, int(channels * res_ffn_factor), bias=use_bias) self.channelwise_act = nn.GELU() self.channelwise_norm = GlobalResponseNorm(int(channels * res_ffn_factor)) self.channelwise_linear_2 = nn.Linear(int(channels * res_ffn_factor), channels, bias=use_bias) self.channelwise_dropout = nn.Dropout(hidden_dropout) self.cond_embeds_mapper = nn.Linear(hidden_size, channels * 2, use_bias) def forward(self, x, cond_embeds): x_res = x x = self.depthwise(x) x = x.permute(0, 2, 3, 1) x = self.norm(x) x = self.channelwise_linear_1(x) x = self.channelwise_act(x) x = self.channelwise_norm(x) x = self.channelwise_linear_2(x) x = self.channelwise_dropout(x) x = x.permute(0, 3, 1, 2) x = x + x_res scale, shift = self.cond_embeds_mapper(F.silu(cond_embeds)).chunk(2, dim=1) x = x * (1 + scale[:, :, None, None]) + shift[:, :, None, None] return x class ConvMlmLayer(nn.Module): def __init__( self, block_out_channels: int, in_channels: int, use_bias: bool, ln_elementwise_affine: bool, layer_norm_eps: float, codebook_size: int, ): super().__init__() self.conv1 = nn.Conv2d(block_out_channels, in_channels, kernel_size=1, bias=use_bias) self.layer_norm = RMSNorm(in_channels, layer_norm_eps, ln_elementwise_affine) self.conv2 = nn.Conv2d(in_channels, codebook_size, kernel_size=1, bias=use_bias) def forward(self, hidden_states): hidden_states = self.conv1(hidden_states) hidden_states = self.layer_norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) logits = self.conv2(hidden_states) return logits
diffusers/src/diffusers/models/unets/uvit_2d.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/uvit_2d.py", "repo_id": "diffusers", "token_count": 8291 }
115
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch import torch.nn.functional as F from transformers import ClapTextModelWithProjection, RobertaTokenizer, RobertaTokenizerFast, SpeechT5HifiGan from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusers import AudioLDMPipeline >>> import torch >>> import scipy >>> repo_id = "cvssp/audioldm-s-full-v2" >>> pipe = AudioLDMPipeline.from_pretrained(repo_id, torch_dtype=torch.float16) >>> pipe = pipe.to("cuda") >>> prompt = "Techno music with a strong, upbeat tempo and high melodic riffs" >>> audio = pipe(prompt, num_inference_steps=10, audio_length_in_s=5.0).audios[0] >>> # save the audio sample as a .wav file >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio) ``` """ class AudioLDMPipeline(DiffusionPipeline): r""" Pipeline for text-to-audio generation using AudioLDM. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.ClapTextModelWithProjection`]): Frozen text-encoder (`ClapTextModelWithProjection`, specifically the [laion/clap-htsat-unfused](https://huggingface.co/laion/clap-htsat-unfused) variant. tokenizer ([`PreTrainedTokenizer`]): A [`~transformers.RobertaTokenizer`] to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded audio latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. vocoder ([`~transformers.SpeechT5HifiGan`]): Vocoder of class `SpeechT5HifiGan`. """ model_cpu_offload_seq = "text_encoder->unet->vae" def __init__( self, vae: AutoencoderKL, text_encoder: ClapTextModelWithProjection, tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast], unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, vocoder: SpeechT5HifiGan, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, vocoder=vocoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def _encode_prompt( self, prompt, device, num_waveforms_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device (`torch.device`): torch device num_waveforms_per_prompt (`int`): number of waveforms that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the audio generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLAP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask.to(device), ) prompt_embeds = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state prompt_embeds = F.normalize(prompt_embeds, dim=-1) prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) ( bs_embed, seq_len, ) = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_waveforms_per_prompt) prompt_embeds = prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) uncond_input_ids = uncond_input.input_ids.to(device) attention_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds = self.text_encoder( uncond_input_ids, attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state negative_prompt_embeds = F.normalize(negative_prompt_embeds, dim=-1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_waveforms_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents mel_spectrogram = self.vae.decode(latents).sample return mel_spectrogram def mel_spectrogram_to_waveform(self, mel_spectrogram): if mel_spectrogram.dim() == 4: mel_spectrogram = mel_spectrogram.squeeze(1) waveform = self.vocoder(mel_spectrogram) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 waveform = waveform.cpu().float() return waveform # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, audio_length_in_s, vocoder_upsample_factor, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ): min_audio_length_in_s = vocoder_upsample_factor * self.vae_scale_factor if audio_length_in_s < min_audio_length_in_s: raise ValueError( f"`audio_length_in_s` has to be a positive value greater than or equal to {min_audio_length_in_s}, but " f"is {audio_length_in_s}." ) if self.vocoder.config.model_in_dim % self.vae_scale_factor != 0: raise ValueError( f"The number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the " f"VAE scale factor, but got {self.vocoder.config.model_in_dim} bins and a scale factor of " f"{self.vae_scale_factor}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents with width->self.vocoder.config.model_in_dim def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, height // self.vae_scale_factor, self.vocoder.config.model_in_dim // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, audio_length_in_s: Optional[float] = None, num_inference_steps: int = 10, guidance_scale: float = 2.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_waveforms_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, output_type: Optional[str] = "np", ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide audio generation. If not defined, you need to pass `prompt_embeds`. audio_length_in_s (`int`, *optional*, defaults to 5.12): The length of the generated audio sample in seconds. num_inference_steps (`int`, *optional*, defaults to 10): The number of denoising steps. More denoising steps usually lead to a higher quality audio at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 2.5): A higher guidance scale value encourages the model to generate audio that is closely linked to the text `prompt` at the expense of lower sound quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in audio generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_waveforms_per_prompt (`int`, *optional*, defaults to 1): The number of waveforms to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated image. Choose between `"np"` to return a NumPy `np.ndarray` or `"pt"` to return a PyTorch `torch.Tensor` object. Examples: Returns: [`~pipelines.AudioPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated audio. """ # 0. Convert audio input length from seconds to spectrogram height vocoder_upsample_factor = np.prod(self.vocoder.config.upsample_rates) / self.vocoder.config.sampling_rate if audio_length_in_s is None: audio_length_in_s = self.unet.config.sample_size * self.vae_scale_factor * vocoder_upsample_factor height = int(audio_length_in_s / vocoder_upsample_factor) original_waveform_length = int(audio_length_in_s * self.vocoder.config.sampling_rate) if height % self.vae_scale_factor != 0: height = int(np.ceil(height / self.vae_scale_factor)) * self.vae_scale_factor logger.info( f"Audio length in seconds {audio_length_in_s} is increased to {height * vocoder_upsample_factor} " f"so that it can be handled by the model. It will be cut to {audio_length_in_s} after the " f"denoising process." ) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, audio_length_in_s, vocoder_upsample_factor, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, device, num_waveforms_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_waveforms_per_prompt, num_channels_latents, height, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=None, class_labels=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # 8. Post-processing mel_spectrogram = self.decode_latents(latents) audio = self.mel_spectrogram_to_waveform(mel_spectrogram) audio = audio[:, :original_waveform_length] if output_type == "np": audio = audio.numpy() if not return_dict: return (audio,) return AudioPipelineOutput(audios=audio)
diffusers/src/diffusers/pipelines/audioldm/pipeline_audioldm.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/audioldm/pipeline_audioldm.py", "repo_id": "diffusers", "token_count": 11819 }
116
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ....models import AutoencoderKL, UNet2DConditionModel from ....schedulers import DDIMScheduler, DDPMScheduler from ....utils.torch_utils import randn_tensor from ...pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class AudioDiffusionPipeline(DiffusionPipeline): """ Pipeline for audio diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: vqae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. mel ([`Mel`]): Transform audio into a spectrogram. scheduler ([`DDIMScheduler`] or [`DDPMScheduler`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`] or [`DDPMScheduler`]. """ _optional_components = ["vqvae"] def __init__( self, vqvae: AutoencoderKL, unet: UNet2DConditionModel, mel: Mel, scheduler: Union[DDIMScheduler, DDPMScheduler], ): super().__init__() self.register_modules(unet=unet, scheduler=scheduler, mel=mel, vqvae=vqvae) def get_default_steps(self) -> int: """Returns default number of steps recommended for inference. Returns: `int`: The number of steps. """ return 50 if isinstance(self.scheduler, DDIMScheduler) else 1000 @torch.no_grad() def __call__( self, batch_size: int = 1, audio_file: str = None, raw_audio: np.ndarray = None, slice: int = 0, start_step: int = 0, steps: int = None, generator: torch.Generator = None, mask_start_secs: float = 0, mask_end_secs: float = 0, step_generator: torch.Generator = None, eta: float = 0, noise: torch.Tensor = None, encoding: torch.Tensor = None, return_dict=True, ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: """ The call function to the pipeline for generation. Args: batch_size (`int`): Number of samples to generate. audio_file (`str`): An audio file that must be on disk due to [Librosa](https://librosa.org/) limitation. raw_audio (`np.ndarray`): The raw audio file as a NumPy array. slice (`int`): Slice number of audio to convert. start_step (int): Step to start diffusion from. steps (`int`): Number of denoising steps (defaults to `50` for DDIM and `1000` for DDPM). generator (`torch.Generator`): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. mask_start_secs (`float`): Number of seconds of audio to mask (not generate) at start. mask_end_secs (`float`): Number of seconds of audio to mask (not generate) at end. step_generator (`torch.Generator`): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) used to denoise. None eta (`float`): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. noise (`torch.Tensor`): A noise tensor of shape `(batch_size, 1, height, width)` or `None`. encoding (`torch.Tensor`): A tensor for [`UNet2DConditionModel`] of shape `(batch_size, seq_length, cross_attention_dim)`. return_dict (`bool`): Whether or not to return a [`AudioPipelineOutput`], [`ImagePipelineOutput`] or a plain tuple. Examples: For audio diffusion: ```py import torch from IPython.display import Audio from diffusers import DiffusionPipeline device = "cuda" if torch.cuda.is_available() else "cpu" pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-256").to(device) output = pipe() display(output.images[0]) display(Audio(output.audios[0], rate=mel.get_sample_rate())) ``` For latent audio diffusion: ```py import torch from IPython.display import Audio from diffusers import DiffusionPipeline device = "cuda" if torch.cuda.is_available() else "cpu" pipe = DiffusionPipeline.from_pretrained("teticio/latent-audio-diffusion-256").to(device) output = pipe() display(output.images[0]) display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate())) ``` For other tasks like variation, inpainting, outpainting, etc: ```py output = pipe( raw_audio=output.audios[0, 0], start_step=int(pipe.get_default_steps() / 2), mask_start_secs=1, mask_end_secs=1, ) display(output.images[0]) display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate())) ``` Returns: `List[PIL Image]`: A list of Mel spectrograms (`float`, `List[np.ndarray]`) with the sample rate and raw audio. """ steps = steps or self.get_default_steps() self.scheduler.set_timesteps(steps) step_generator = step_generator or generator # For backwards compatibility if isinstance(self.unet.config.sample_size, int): self.unet.config.sample_size = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: noise = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ), generator=generator, device=self.device, ) images = noise mask = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(audio_file, raw_audio) input_image = self.mel.audio_slice_to_image(slice) input_image = np.frombuffer(input_image.tobytes(), dtype="uint8").reshape( (input_image.height, input_image.width) ) input_image = (input_image / 255) * 2 - 1 input_images = torch.tensor(input_image[np.newaxis, :, :], dtype=torch.float).to(self.device) if self.vqvae is not None: input_images = self.vqvae.encode(torch.unsqueeze(input_images, 0)).latent_dist.sample( generator=generator )[0] input_images = self.vqvae.config.scaling_factor * input_images if start_step > 0: images[0, 0] = self.scheduler.add_noise(input_images, noise, self.scheduler.timesteps[start_step - 1]) pixels_per_second = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) mask_start = int(mask_start_secs * pixels_per_second) mask_end = int(mask_end_secs * pixels_per_second) mask = self.scheduler.add_noise(input_images, noise, torch.tensor(self.scheduler.timesteps[start_step:])) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])): if isinstance(self.unet, UNet2DConditionModel): model_output = self.unet(images, t, encoding)["sample"] else: model_output = self.unet(images, t)["sample"] if isinstance(self.scheduler, DDIMScheduler): images = self.scheduler.step( model_output=model_output, timestep=t, sample=images, eta=eta, generator=step_generator, )["prev_sample"] else: images = self.scheduler.step( model_output=model_output, timestep=t, sample=images, generator=step_generator, )["prev_sample"] if mask is not None: if mask_start > 0: images[:, :, :, :mask_start] = mask[:, step, :, :mask_start] if mask_end > 0: images[:, :, :, -mask_end:] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance images = 1 / self.vqvae.config.scaling_factor * images images = self.vqvae.decode(images)["sample"] images = (images / 2 + 0.5).clamp(0, 1) images = images.cpu().permute(0, 2, 3, 1).numpy() images = (images * 255).round().astype("uint8") images = list( (Image.fromarray(_[:, :, 0]) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_, mode="RGB").convert("L") for _ in images) ) audios = [self.mel.image_to_audio(_) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(audios)[:, np.newaxis, :]), **ImagePipelineOutput(images)) @torch.no_grad() def encode(self, images: List[Image.Image], steps: int = 50) -> np.ndarray: """ Reverse the denoising step process to recover a noisy image from the generated image. Args: images (`List[PIL Image]`): List of images to encode. steps (`int`): Number of encoding steps to perform (defaults to `50`). Returns: `np.ndarray`: A noise tensor of shape `(batch_size, 1, height, width)`. """ # Only works with DDIM as this method is deterministic assert isinstance(self.scheduler, DDIMScheduler) self.scheduler.set_timesteps(steps) sample = np.array( [np.frombuffer(image.tobytes(), dtype="uint8").reshape((1, image.height, image.width)) for image in images] ) sample = (sample / 255) * 2 - 1 sample = torch.Tensor(sample).to(self.device) for t in self.progress_bar(torch.flip(self.scheduler.timesteps, (0,))): prev_timestep = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps alpha_prod_t = self.scheduler.alphas_cumprod[t] alpha_prod_t_prev = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) beta_prod_t = 1 - alpha_prod_t model_output = self.unet(sample, t)["sample"] pred_sample_direction = (1 - alpha_prod_t_prev) ** (0.5) * model_output sample = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) sample = sample * alpha_prod_t ** (0.5) + beta_prod_t ** (0.5) * model_output return sample @staticmethod def slerp(x0: torch.Tensor, x1: torch.Tensor, alpha: float) -> torch.Tensor: """Spherical Linear intERPolation. Args: x0 (`torch.Tensor`): The first tensor to interpolate between. x1 (`torch.Tensor`): Second tensor to interpolate between. alpha (`float`): Interpolation between 0 and 1 Returns: `torch.Tensor`: The interpolated tensor. """ theta = acos(torch.dot(torch.flatten(x0), torch.flatten(x1)) / torch.norm(x0) / torch.norm(x1)) return sin((1 - alpha) * theta) * x0 / sin(theta) + sin(alpha * theta) * x1 / sin(theta)
diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/pipeline_audio_diffusion.py", "repo_id": "diffusers", "token_count": 6241 }
117