text
stringlengths
7
318k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
439
[package] name = "candle-nn" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] accelerate-src = { workspace = true, optional = true } candle = { workspace = true } half = { workspace = true } thiserror = { workspace = true } intel-mkl-src = { workspace = true, optional = true } num-traits = { workspace = true } rayon = { workspace = true } safetensors = { workspace = true } serde = { workspace = true } metal = { workspace = true, optional = true } candle-metal-kernels = { workspace = true, optional = true } [dev-dependencies] anyhow = { workspace = true } clap = { workspace = true } [features] default = [] accelerate = ["dep:accelerate-src", "candle/accelerate"] cuda = ["candle/cuda"] mkl = ["dep:intel-mkl-src", "candle/mkl"] metal = ["candle/metal", "dep:candle-metal-kernels", "dep:metal"]
candle/candle-nn/Cargo.toml/0
{ "file_path": "candle/candle-nn/Cargo.toml", "repo_id": "candle", "token_count": 325 }
26
use candle::{CpuStorage, Layout, Result, Shape, Tensor}; use rayon::prelude::*; /// Applies the softmax function to the input tensor, rescaling the element so that elements on /// a slice of fixed index on dimension `dim` are between 0 and 1 and sum to 1. /// /// ```rust /// use candle::{Tensor, Device, test_utils::to_vec2_round}; /// let a = Tensor::new(&[[0f32, 1., 0., 1.], [-2., 2., 3., -3.]], &Device::Cpu)?; /// let a = candle_nn::ops::softmax(&a, 1)?; /// assert_eq!( /// to_vec2_round(&a, 4)?, /// &[ /// [0.1345, 0.3655, 0.1345, 0.3655], /// [0.0049, 0.2671, 0.7262, 0.0018] /// ]); /// # Ok::<(), candle::Error>(()) /// ``` pub fn softmax<D: candle::shape::Dim>(xs: &Tensor, dim: D) -> Result<Tensor> { let dim = dim.to_index(xs.shape(), "softmax")?; let max = xs.max_keepdim(dim)?; let diff = xs.broadcast_sub(&max)?; let num = diff.exp()?; let den = num.sum_keepdim(dim)?; num.broadcast_div(&den) } pub fn log_softmax<D: candle::shape::Dim>(xs: &Tensor, d: D) -> Result<Tensor> { let d = d.to_index(xs.shape(), "log-softmax")?; let max = xs.max_keepdim(d)?; let diff = xs.broadcast_sub(&max)?; let sum_exp = diff.exp()?.sum_keepdim(d)?; let log_sm = diff.broadcast_sub(&sum_exp.log()?)?; Ok(log_sm) } pub fn silu(xs: &Tensor) -> Result<Tensor> { // TODO: Should we have a specialized op for this? xs / (xs.neg()?.exp()? + 1.0)? } pub fn swiglu(xs: &Tensor) -> Result<Tensor> { let xs = xs.chunk(2, candle::D::Minus1)?; crate::ops::silu(&xs[0])? * &xs[1] } pub fn sigmoid(xs: &Tensor) -> Result<Tensor> { // TODO: Should we have a specialized op for this? (xs.neg()?.exp()? + 1.0)?.recip() } pub fn hard_sigmoid(xs: &Tensor) -> Result<Tensor> { // TODO: Should we have a specialized op for this? ((xs + 3.0)? / 6.0)?.clamp(0f32, 1f32) } pub fn leaky_relu(xs: &Tensor, negative_slope: f64) -> Result<Tensor> { let zeros = xs.zeros_like()?; xs.maximum(&zeros)? + xs.minimum(&zeros)? * negative_slope } pub fn dropout(xs: &Tensor, drop_p: f32) -> Result<Tensor> { // This implementation is inefficient as it stores the full mask for the backward pass. // Instead we could just store the seed and have a specialized kernel that would both // generate the random mask and apply it. // Another easier optimization would be to be able to generate boolean mask using just a bit of // entropy per element rather than generating a full float per element. if !(0. ..1.).contains(&drop_p) { candle::bail!("dropout probability has to be in [0, 1), got {drop_p}") } let rand = Tensor::rand(0f32, 1f32, xs.shape(), xs.device())?; let scale = 1.0 / (1.0 - drop_p as f64); let drop_p = Tensor::new(drop_p, xs.device())?.broadcast_as(xs.shape())?; let mask = (rand.ge(&drop_p)? * scale)?.to_dtype(xs.dtype())?; xs * mask } #[derive(Debug)] pub struct Dropout { drop_p: f32, } impl Dropout { pub fn new(drop_p: f32) -> Dropout { Self { drop_p } } pub fn forward(&self, xs: &Tensor, train: bool) -> Result<Tensor> { if train { dropout(xs, self.drop_p) } else { Ok(xs.clone()) } } } impl candle::ModuleT for Dropout { fn forward_t(&self, xs: &Tensor, train: bool) -> Result<Tensor> { self.forward(xs, train) } } struct SoftmaxLastDim; impl candle::CustomOp1 for SoftmaxLastDim { fn name(&self) -> &'static str { "softmax-last-dim" } fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> { fn softmax<T: candle::WithDType + num_traits::Float>( src: &[T], layout: &Layout, ) -> Result<(CpuStorage, Shape)> { let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let el_count = layout.shape().elem_count(); let dims = layout.shape().dims(); let dim_m1 = dims[dims.len() - 1]; let mut dst = vec![T::zero(); el_count]; src.par_chunks(dim_m1) .zip(dst.par_chunks_mut(dim_m1)) .for_each(|(src, dst)| { let mut max = T::neg_infinity(); unsafe { T::vec_reduce_max(src.as_ptr(), &mut max, dim_m1) }; for (s, d) in src.iter().zip(dst.iter_mut()) { *d = (*s - max).exp(); } let mut sum_exp = T::zero(); unsafe { T::vec_reduce_sum(dst.as_ptr(), &mut sum_exp, dim_m1) }; for d in dst.iter_mut() { *d /= sum_exp } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, Shape::from_dims(dims))) } match storage { CpuStorage::BF16(slice) => softmax::<half::bf16>(slice, layout), CpuStorage::F16(slice) => softmax::<half::f16>(slice, layout), CpuStorage::F32(slice) => softmax::<f32>(slice, layout), CpuStorage::F64(slice) => softmax::<f64>(slice, layout), _ => candle::bail!("unsupported dtype for softmax {:?}", storage), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, storage: &candle::CudaStorage, layout: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, }; use candle::cuda_backend::{kernel_name, kernels, Map1, WrapErr}; use candle::{CudaDevice, WithDType}; struct S; impl Map1 for S { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let el = layout.shape().elem_count(); let dims = layout.shape().dims(); let dim_m1 = dims[dims.len() - 1]; let (n_rows, n_cols) = (el / dim_m1, dim_m1); let cfg = LaunchConfig { grid_dim: (n_rows as u32, 1, 1), block_dim: (1, 32, 1), shared_mem_bytes: 0, }; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>("softmax"), kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el) }.w()?; let params = (src, &dst, n_cols as i32); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } } use candle::backend::BackendStorage; let dev = storage.device(); let slice = S.map(&storage.slice, dev, layout)?; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, layout.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, storage: &candle::MetalStorage, layout: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::{backend::BackendStorage, DType}; let device = storage.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); let name = match storage.dtype() { DType::F32 => "softmax_f32", DType::F16 => "softmax_f16", DType::BF16 => "softmax_bf16", dtype => candle::bail!("softmax-last-dim is not implemented for {dtype:?}"), }; let n = layout.stride().len(); if !(layout.is_contiguous() && layout.stride()[n - 1] == 1) { candle::bail!("Non contiguous softmax-last-dim is not implemented"); } let last_dim = layout.dims()[layout.shape().rank() - 1]; let elem_count = layout.shape().elem_count(); let output = device.new_buffer(elem_count, storage.dtype(), "softmax")?; candle_metal_kernels::call_last_softmax( device.metal_device(), &command_buffer, kernels, name, elem_count, last_dim, storage.buffer(), layout.start_offset() * storage.dtype().size_in_bytes(), &output, ) .unwrap(); let newstorage = candle::MetalStorage::new(output, device.clone(), storage.dtype()); Ok((newstorage, layout.shape().clone())) } } pub fn softmax_last_dim(xs: &Tensor) -> Result<Tensor> { xs.apply_op1_no_bwd(&SoftmaxLastDim) } // https://pytorch.org/docs/stable/generated/torch.nn.PixelShuffle.html pub fn pixel_shuffle(xs: &Tensor, upscale_factor: usize) -> Result<Tensor> { let (b_size, c, h, w) = xs.dims4()?; let out_c = c / upscale_factor / upscale_factor; xs.reshape((b_size, out_c, upscale_factor, upscale_factor, h, w))? .permute((0, 1, 4, 2, 5, 3))? .reshape((b_size, out_c, h * upscale_factor, w * upscale_factor)) } pub fn pixel_unshuffle(xs: &Tensor, downscale_factor: usize) -> Result<Tensor> { let (b_size, c, h, w) = xs.dims4()?; let out_c = c * downscale_factor * downscale_factor; xs.reshape(( b_size, c, h / downscale_factor, downscale_factor, w / downscale_factor, downscale_factor, ))? .permute((0, 1, 3, 5, 2, 4))? .reshape((b_size, out_c, h / downscale_factor, w / downscale_factor)) } // https://pytorch.org/docs/stable/generated/torch.nn.ReplicationPad2d.html pub fn replication_pad2d(xs: &Tensor, pad: usize) -> Result<Tensor> { match pad { 0 => Ok(xs.clone()), 1 => { let (_b_size, _c, h, w) = xs.dims4()?; let (first, last) = (xs.narrow(3, 0, 1)?, xs.narrow(3, w - 1, 1)?); let xs = Tensor::cat(&[&first, xs, &last], 3)?; let (first, last) = (xs.narrow(2, 0, 1)?, xs.narrow(2, h - 1, 1)?); Tensor::cat(&[&first, &xs, &last], 2) } n => candle::bail!("replication-pad with a size of {n} is not supported"), } }
candle/candle-nn/src/ops.rs/0
{ "file_path": "candle/candle-nn/src/ops.rs", "repo_id": "candle", "token_count": 5237 }
27
use std::io::Result; fn main() -> Result<()> { prost_build::compile_protos(&["src/onnx.proto3"], &["src/"])?; Ok(()) }
candle/candle-onnx/build.rs/0
{ "file_path": "candle/candle-onnx/build.rs", "repo_id": "candle", "token_count": 60 }
28
from dataclasses import dataclass from typing import Optional from candle.nn import Module, Embedding, LayerNorm, Linear, ModuleList from candle import Tensor import candle import candle.functional as F from typing import Tuple, Optional @dataclass class Config: vocab_size: int = 30522 hidden_size: int = 768 num_hidden_layers: int = 12 num_attention_heads: int = 12 intermediate_size: int = 3072 hidden_act: str = "gelu" hidden_dropout_prob: float = 0.1 max_position_embeddings: int = 512 type_vocab_size: int = 2 initializer_range: float = 0.02 layer_norm_eps: float = 1e-12 pad_token_id: int = 0 position_embedding_type: str = "absolute" use_cache: bool = True classifier_dropout: Optional[float] = None model_type: Optional[str] = "bert" class BertSelfAttention(Module): def __init__(self, config: Config) -> None: super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / self.num_attention_heads) all_head_size = int(config.num_attention_heads * self.attention_head_size) hidden_size = config.hidden_size self.query = Linear(hidden_size, all_head_size) self.key = Linear(hidden_size, all_head_size) self.value = Linear(hidden_size, all_head_size) def transpose_for_scores(self, x: Tensor) -> Tensor: new_x_shape = x.shape[:-1] + ( self.num_attention_heads, self.attention_head_size, ) x = x.reshape(new_x_shape).transpose(1, 2) return x.contiguous() def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor: query = self.query.forward(hidden_states) key = self.key.forward(hidden_states) value = self.value.forward(hidden_states) query = self.transpose_for_scores(query) key = self.transpose_for_scores(key) value = self.transpose_for_scores(value) attention_scores = query.matmul(key.t()) attention_scores = attention_scores / float(self.attention_head_size) ** 0.5 if attention_mask is not None: b_size, _, _, last_dim = attention_scores.shape attention_scores = attention_scores.broadcast_add(attention_mask.reshape((b_size, 1, 1, last_dim))) attention_probs = F.softmax(attention_scores, dim=-1) context_layer = attention_probs.matmul(value) context_layer = context_layer.transpose(1, 2).contiguous() context_layer = context_layer.flatten_from(-2) return context_layer class BertSelfOutput(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.hidden_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor: hidden_states = self.dense.forward(hidden_states) return self.LayerNorm.forward(hidden_states + input_tensor) class BertAttention(Module): def __init__(self, config: Config) -> None: super().__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, hidden_states: Tensor, attention_mask: None) -> Tensor: self_outputs = self.self.forward(hidden_states, attention_mask=attention_mask) attention_output = self.output.forward(self_outputs, hidden_states) return attention_output class BertIntermediate(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.hidden_size, config.intermediate_size) self.act = F.gelu if config.hidden_act == "gelu" else F.relu def forward(self, hidden_states: Tensor) -> Tensor: hidden_states = self.dense.forward(hidden_states) return self.act(hidden_states) class BertOutput(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor: hidden_states = self.dense.forward(hidden_states) return self.LayerNorm.forward(hidden_states + input_tensor) class BertLayer(Module): def __init__(self, config: Config) -> None: super().__init__() self.attention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor: attention_output = self.attention.forward(hidden_states, attention_mask=attention_mask) # TODO: Support cross-attention? # https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523 # TODO: Support something similar to `apply_chunking_to_forward`? intermediate_output = self.intermediate.forward(attention_output) layer_output = self.output.forward(intermediate_output, attention_output) return layer_output class BertEncoder(Module): def __init__(self, config: Config) -> None: super().__init__() self.layer = ModuleList() for _ in range(config.num_hidden_layers): self.layer.append(BertLayer(config)) def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor: for l in self.layer: hidden_states = l.forward(hidden_states, attention_mask=attention_mask) return hidden_states class BertEmbeddings(Module): def __init__(self, config: Config) -> None: super().__init__() self.word_embeddings = Embedding(config.vocab_size, config.hidden_size) self.position_embeddings = Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.position_ids = candle.Tensor(list(range(config.max_position_embeddings))).reshape( (1, config.max_position_embeddings) ) def forward(self, input_ids: Tensor, token_type_ids: Tensor) -> Tensor: (_batch_size, seq_len) = input_ids.shape input_embeddings = self.word_embeddings.forward(input_ids) token_type_embeddings = self.token_type_embeddings.forward(token_type_ids) embeddings: Tensor = input_embeddings + token_type_embeddings position_ids = list(range(seq_len)) position_ids = Tensor(position_ids).to_dtype(input_ids.dtype).to_device(input_ids.device) embeddings = embeddings.broadcast_add(self.position_embeddings.forward(position_ids)) embeddings = self.LayerNorm(embeddings) return embeddings class BertPooler(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.hidden_size, config.hidden_size) self.activation = F.tanh def forward(self, hidden_states: Tensor) -> Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense.forward(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output def masked_fill(on_false: float, mask: Tensor, on_true: float): shape = mask.shape on_true = candle.tensor(on_true).broadcast_as(shape) on_false = candle.tensor(on_false).broadcast_as(shape) return mask.where_cond(on_true, on_false) # https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L874 class BertModel(Module): def __init__(self, config: Config, add_pooling_layer=True) -> None: super().__init__() self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None def forward( self, input_ids: Tensor, token_type_ids: Tensor, attention_mask=None ) -> Tuple[Tensor, Optional[Tensor]]: if attention_mask is not None: # Replace 0s with -inf, and 1s with 0s. attention_mask = masked_fill(float("-inf"), attention_mask, 1.0) embeddings = self.embeddings.forward(input_ids, token_type_ids) encoder_out = self.encoder.forward(embeddings, attention_mask=attention_mask) pooled_output = self.pooler(encoder_out) if self.pooler is not None else None return encoder_out, pooled_output
candle/candle-pyo3/py_src/candle/models/bert.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/models/bert.py", "repo_id": "candle", "token_count": 3528 }
29
#![allow(clippy::redundant_closure_call)] use pyo3::exceptions::{PyTypeError, PyValueError}; use pyo3::prelude::*; use pyo3::pyclass::CompareOp; use pyo3::types::{IntoPyDict, PyDict, PyTuple}; use pyo3::ToPyObject; use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; use std::os::raw::c_long; use std::sync::Arc; use half::{bf16, f16}; #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use ::candle::{quantized::QTensor, DType, Device, Module, Tensor, WithDType}; mod utils; use utils::wrap_err; mod shape; use shape::{PyShape, PyShapeWithHole}; #[cfg(feature = "onnx")] mod onnx; #[derive(Clone, Debug)] #[pyclass(name = "Tensor")] /// A `candle` tensor. struct PyTensor(Tensor); impl std::ops::Deref for PyTensor { type Target = Tensor; fn deref(&self) -> &Self::Target { &self.0 } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] #[pyclass(name = "DType")] /// A `candle` dtype. struct PyDType(DType); #[pymethods] impl PyDType { fn __repr__(&self) -> String { format!("{:?}", self.0) } fn __str__(&self) -> String { self.__repr__() } } impl PyDType { fn from_pyobject(ob: PyObject, py: Python<'_>) -> PyResult<Self> { use std::str::FromStr; if let Ok(dtype) = ob.extract::<&str>(py) { let dtype = DType::from_str(dtype) .map_err(|_| PyTypeError::new_err(format!("invalid dtype '{dtype}'")))?; Ok(Self(dtype)) } else { ob.extract(py) } } } static CUDA_DEVICE: std::sync::Mutex<Option<Device>> = std::sync::Mutex::new(None); static METAL_DEVICE: std::sync::Mutex<Option<Device>> = std::sync::Mutex::new(None); #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum PyDevice { Cpu, Cuda, Metal, } impl PyDevice { fn from_device(device: &Device) -> Self { match device { Device::Cpu => Self::Cpu, Device::Cuda(_) => Self::Cuda, Device::Metal(_) => Self::Metal, } } fn as_device(&self) -> PyResult<Device> { match self { Self::Cpu => Ok(Device::Cpu), Self::Cuda => { let mut device = CUDA_DEVICE.lock().unwrap(); if let Some(device) = device.as_ref() { return Ok(device.clone()); }; let d = Device::new_cuda(0).map_err(wrap_err)?; *device = Some(d.clone()); Ok(d) } Self::Metal => { let mut device = METAL_DEVICE.lock().unwrap(); if let Some(device) = device.as_ref() { return Ok(device.clone()); }; let d = Device::new_metal(0).map_err(wrap_err)?; *device = Some(d.clone()); Ok(d) } } } } impl<'source> FromPyObject<'source> for PyDevice { fn extract(ob: &'source PyAny) -> PyResult<Self> { let device: &str = ob.extract()?; let device = match device { "cpu" => PyDevice::Cpu, "cuda" => PyDevice::Cuda, _ => Err(PyTypeError::new_err(format!("invalid device '{device}'")))?, }; Ok(device) } } impl ToPyObject for PyDevice { fn to_object(&self, py: Python<'_>) -> PyObject { let str = match self { PyDevice::Cpu => "cpu", PyDevice::Cuda => "cuda", PyDevice::Metal => "metal", }; str.to_object(py) } } trait PyWithDType: WithDType { fn to_py(&self, py: Python<'_>) -> PyObject; } macro_rules! pydtype { ($ty:ty, $conv:expr) => { impl PyWithDType for $ty { fn to_py(&self, py: Python<'_>) -> PyObject { $conv(*self).to_object(py) } } }; } pydtype!(i64, |v| v); pydtype!(u8, |v| v); pydtype!(u32, |v| v); pydtype!(f16, f32::from); pydtype!(bf16, f32::from); pydtype!(f32, |v| v); pydtype!(f64, |v| v); fn actual_index(t: &Tensor, dim: usize, index: i64) -> ::candle::Result<usize> { let dim = t.dim(dim)?; if 0 <= index { let index = index as usize; if dim <= index { ::candle::bail!("index {index} is too large for tensor dimension {dim}") } Ok(index) } else { if (dim as i64) < -index { ::candle::bail!("index {index} is too low for tensor dimension {dim}") } Ok((dim as i64 + index) as usize) } } fn actual_dim(t: &Tensor, dim: i64) -> ::candle::Result<usize> { let rank = t.rank(); if 0 <= dim { let dim = dim as usize; if rank <= dim { ::candle::bail!("dimension index {dim} is too large for tensor rank {rank}") } Ok(dim) } else { if (rank as i64) < -dim { ::candle::bail!("dimension index {dim} is too low for tensor rank {rank}") } Ok((rank as i64 + dim) as usize) } } // TODO: Something similar to this should probably be a part of candle core. trait MapDType { type Output; fn f<T: PyWithDType>(&self, t: &Tensor) -> PyResult<Self::Output>; fn map(&self, t: &Tensor) -> PyResult<Self::Output> { match t.dtype() { DType::U8 => self.f::<u8>(t), DType::U32 => self.f::<u32>(t), DType::I64 => self.f::<i64>(t), DType::BF16 => self.f::<bf16>(t), DType::F16 => self.f::<f16>(t), DType::F32 => self.f::<f32>(t), DType::F64 => self.f::<f64>(t), } } } enum Indexer { Index(usize), Slice(usize, usize), Ellipsis, Expand, IndexSelect(Tensor), } #[derive(Clone, Debug)] struct TorchTensor(PyObject); impl<'source> pyo3::FromPyObject<'source> for TorchTensor { fn extract(ob: &'source PyAny) -> PyResult<Self> { let numpy_value: PyObject = ob.getattr("numpy")?.call0()?.extract()?; Ok(TorchTensor(numpy_value)) } } #[pymethods] impl PyTensor { #[new] #[pyo3(text_signature = "(self, data:_ArrayLike)")] // TODO: Handle arbitrary input dtype and shape. /// Creates a new tensor from a Python value. The value can be a scalar or array-like object. fn new(py: Python<'_>, data: PyObject) -> PyResult<Self> { use Device::Cpu; let tensor = if let Ok(vs) = data.extract::<u32>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<i64>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<f32>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<u32>>(py) { let len = vs.len(); Tensor::from_vec(vs, len, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<i64>>(py) { let len = vs.len(); Tensor::from_vec(vs, len, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<f32>>(py) { let len = vs.len(); Tensor::from_vec(vs, len, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<u32>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<i64>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<f32>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<Vec<u32>>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<Vec<i64>>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<Vec<f32>>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(TorchTensor(numpy)) = data.extract::<TorchTensor>(py) { return PyTensor::new(py, numpy); } else { let ty = data.as_ref(py).get_type(); Err(PyTypeError::new_err(format!( "incorrect type {ty} for tensor" )))? }; Ok(Self(tensor)) } /// Gets the tensor's data as a Python scalar or array-like object. /// &RETURNS&: _ArrayLike fn values(&self, py: Python<'_>) -> PyResult<PyObject> { struct M<'a>(Python<'a>); impl<'a> MapDType for M<'a> { type Output = PyObject; fn f<T: PyWithDType>(&self, t: &Tensor) -> PyResult<Self::Output> { match t.rank() { 0 => Ok(t.to_scalar::<T>().map_err(wrap_err)?.to_py(self.0)), 1 => { let v = t.to_vec1::<T>().map_err(wrap_err)?; let v = v.iter().map(|v| v.to_py(self.0)).collect::<Vec<_>>(); Ok(v.to_object(self.0)) } 2 => { let v = t.to_vec2::<T>().map_err(wrap_err)?; let v = v .iter() .map(|v| v.iter().map(|v| v.to_py(self.0)).collect()) .collect::<Vec<Vec<_>>>(); Ok(v.to_object(self.0)) } 3 => { let v = t.to_vec3::<T>().map_err(wrap_err)?; let v = v .iter() .map(|v| { v.iter() .map(|v| v.iter().map(|v| v.to_py(self.0)).collect()) .collect() }) .collect::<Vec<Vec<Vec<_>>>>(); Ok(v.to_object(self.0)) } n => Err(PyTypeError::new_err(format!( "TODO: conversion to PyObject is not handled for rank {n}" )))?, } } } // TODO: Handle arbitrary shapes. M(py).map(self) } /// Converts candle's tensor to pytorch's tensor /// &RETURNS&: torch.Tensor fn to_torch(&self, py: Python<'_>) -> PyResult<PyObject> { let candle_values = self.values(py)?; let torch_tensor: PyObject = py .import("torch")? .getattr("tensor")? .call1((candle_values,))? .extract()?; Ok(torch_tensor) } #[getter] /// Gets the tensor's shape. /// &RETURNS&: Tuple[int] fn shape(&self, py: Python<'_>) -> PyObject { PyTuple::new(py, self.0.dims()).to_object(py) } #[getter] /// Gets the tensor's element count. /// &RETURNS&: int fn nelement(&self) -> usize { self.0.elem_count() } #[getter] /// Gets the tensor's strides. /// &RETURNS&: Tuple[int] fn stride(&self, py: Python<'_>) -> PyObject { PyTuple::new(py, self.0.stride()).to_object(py) } #[getter] /// Gets the tensor's dtype. /// &RETURNS&: DType fn dtype(&self) -> PyDType { PyDType(self.0.dtype()) } #[getter] /// Gets the tensor's device. /// &RETURNS&: Device fn device(&self, py: Python<'_>) -> PyObject { PyDevice::from_device(self.0.device()).to_object(py) } #[getter] /// Gets the tensor's rank. /// &RETURNS&: int fn rank(&self) -> usize { self.0.rank() } fn __repr__(&self) -> String { format!("{}", self.0) } fn __str__(&self) -> String { self.__repr__() } /// Performs the `abs` operation on the tensor. /// &RETURNS&: Tensor fn abs(&self) -> PyResult<Self> { Ok(PyTensor(self.0.abs().map_err(wrap_err)?)) } /// Performs the `sin` operation on the tensor. /// &RETURNS&: Tensor fn sin(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sin().map_err(wrap_err)?)) } /// Performs the `cos` operation on the tensor. /// &RETURNS&: Tensor fn cos(&self) -> PyResult<Self> { Ok(PyTensor(self.0.cos().map_err(wrap_err)?)) } /// Performs the `log` operation on the tensor. /// &RETURNS&: Tensor fn log(&self) -> PyResult<Self> { Ok(PyTensor(self.0.log().map_err(wrap_err)?)) } /// Squares the tensor. /// &RETURNS&: Tensor fn sqr(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sqr().map_err(wrap_err)?)) } /// Calculates the square root of the tensor. /// &RETURNS&: Tensor fn sqrt(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sqrt().map_err(wrap_err)?)) } /// Get the `recip` of the tensor. /// &RETURNS&: Tensor fn recip(&self) -> PyResult<Self> { Ok(PyTensor(self.0.recip().map_err(wrap_err)?)) } /// Performs the `exp` operation on the tensor. /// &RETURNS&: Tensor fn exp(&self) -> PyResult<Self> { Ok(PyTensor(self.0.exp().map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, p:float)")] /// Performs the `pow` operation on the tensor with the given exponent. /// &RETURNS&: Tensor fn powf(&self, p: f64) -> PyResult<Self> { Ok(PyTensor(self.0.powf(p).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor, dim:int)")] /// Select values for the input tensor at the target indexes across the specified dimension. /// /// The `indexes` is argument is an int tensor with a single dimension. /// The output has the same number of dimension as the `self` input. The target dimension of /// the output has length the length of `indexes` and the values are taken from `self` using /// the index from `indexes`. Other dimensions have the same number of elements as the input /// tensor. /// &RETURNS&: Tensor fn index_select(&self, rhs: &Self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.index_select(rhs, dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Performs a matrix multiplication between the two tensors. /// &RETURNS&: Tensor fn matmul(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.matmul(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Adds the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_add(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_add(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Subtracts the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_sub(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_sub(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Multiplies the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_mul(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_mul(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Divides the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_div(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_div(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, on_true:Tensor, on_false:Tensor)")] /// Returns a tensor with the same shape as the input tensor, the values are taken from /// `on_true` if the input tensor value is not zero, and `on_false` at the positions where the /// input tensor is equal to zero. /// &RETURNS&: Tensor fn where_cond(&self, on_true: &Self, on_false: &Self) -> PyResult<Self> { Ok(PyTensor( self.0.where_cond(on_true, on_false).map_err(wrap_err)?, )) } #[getter] /// Index a tensor. /// &RETURNS&: Tensor fn __getitem__(&self, py: Python, idx: PyObject) -> PyResult<Self> { let mut indexers: Vec<Indexer> = vec![]; let dims = self.0.shape().dims(); fn to_absolute_index(index: isize, current_dim: usize, dims: &[usize]) -> PyResult<usize> { // Convert a relative index to an absolute index e.g. tensor[-1] -> tensor[0] let actual_index = if index < 0 { dims[current_dim] as isize + index } else { index }; // Check that the index is in range if actual_index < 0 || actual_index >= dims[current_dim] as isize { return Err(PyValueError::new_err(format!( "index out of range for dimension '{i}' with indexer '{value}'", i = current_dim, value = index ))); } Ok(actual_index as usize) } fn extract_indexer( py_indexer: &PyAny, current_dim: usize, dims: &[usize], index_argument_count: usize, ) -> PyResult<(Indexer, usize)> { if let Ok(index) = py_indexer.extract() { // Handle a single index e.g. tensor[0] or tensor[-1] Ok(( Indexer::Index(to_absolute_index(index, current_dim, dims)?), current_dim + 1, )) } else if let Ok(slice) = py_indexer.downcast::<pyo3::types::PySlice>() { // Handle a single slice e.g. tensor[0:1] or tensor[0:-1] let index = slice.indices(dims[current_dim] as c_long)?; Ok(( Indexer::Slice(index.start as usize, index.stop as usize), current_dim + 1, )) } else if let Ok(tensor) = py_indexer.extract::<PyTensor>() { // Handle a tensor as indices e.g. tensor[tensor([0,1])] let t = tensor.0; if t.rank() != 1 { return Err(PyTypeError::new_err( "multi-dimensional tensor indexing is not supported", )); } Ok((Indexer::IndexSelect(t), current_dim + 1)) } else if let Ok(list) = py_indexer.downcast::<pyo3::types::PyList>() { // Handle a list of indices e.g. tensor[[0,1]] let mut indexes = vec![]; for item in list.iter() { let index = item.extract::<i64>()?; indexes.push(index); } Ok(( Indexer::IndexSelect( Tensor::from_vec(indexes, list.len(), &Device::Cpu).map_err(wrap_err)?, ), current_dim + 1, )) } else if py_indexer.is_ellipsis() { // Handle '...' e.g. tensor[..., 0] if current_dim > 0 { return Err(PyTypeError::new_err( "Ellipsis ('...') can only be used at the start of an indexing operation", )); } Ok((Indexer::Ellipsis, dims.len() - (index_argument_count - 1))) } else if py_indexer.is_none() { // Handle None e.g. tensor[None, 0] Ok((Indexer::Expand, current_dim)) } else { Err(PyTypeError::new_err(format!( "unsupported indexer {}", py_indexer ))) } } if let Ok(tuple) = idx.downcast::<pyo3::types::PyTuple>(py) { let not_none_count: usize = tuple.iter().filter(|x| !x.is_none()).count(); if not_none_count > dims.len() { return Err(PyValueError::new_err("provided too many indices")); } let mut current_dim = 0; for item in tuple.iter() { let (indexer, new_current_dim) = extract_indexer(item, current_dim, dims, not_none_count)?; current_dim = new_current_dim; indexers.push(indexer); } } else { let (indexer, _) = extract_indexer(idx.downcast::<PyAny>(py)?, 0, dims, 1)?; indexers.push(indexer); } let mut x = self.0.clone(); let mut current_dim = 0; // Apply the indexers for indexer in indexers.iter() { x = match indexer { Indexer::Index(n) => x .narrow(current_dim, *n, 1) .map_err(wrap_err)? .squeeze(current_dim) .map_err(wrap_err)?, Indexer::Slice(start, stop) => { let out = x .narrow(current_dim, *start, stop.saturating_sub(*start)) .map_err(wrap_err)?; current_dim += 1; out } Indexer::Ellipsis => { // Ellipsis is a special case, it means that all remaining dimensions should be // selected => advance the current_dim to the last dimension we have indexers for current_dim += dims.len() - (indexers.len() - 1); x } Indexer::Expand => { // Expand is a special case, it means that a new dimension should be added => unsqueeze and advance the current_dim let out = x.unsqueeze(current_dim).map_err(wrap_err)?; current_dim += 1; out } Indexer::IndexSelect(indexes) => { let out = x .index_select( &indexes.to_device(x.device()).map_err(wrap_err)?, current_dim, ) .map_err(wrap_err)?; current_dim += 1; out } } } Ok(Self(x)) } /// Add two tensors. /// &RETURNS&: Tensor fn __add__(&self, rhs: &PyAny) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_add(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 + rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for add"))? }; Ok(Self(tensor)) } fn __radd__(&self, rhs: &PyAny) -> PyResult<Self> { self.__add__(rhs) } /// Multiply two tensors. /// &RETURNS&: Tensor fn __mul__(&self, rhs: &PyAny) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_mul(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 * rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for mul"))? }; Ok(Self(tensor)) } fn __rmul__(&self, rhs: &PyAny) -> PyResult<Self> { self.__mul__(rhs) } /// Subtract two tensors. /// &RETURNS&: Tensor fn __sub__(&self, rhs: &PyAny) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_sub(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 - rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for sub"))? }; Ok(Self(tensor)) } /// Divide two tensors. /// &RETURNS&: Tensor fn __truediv__(&self, rhs: &PyAny) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_div(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 / rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for div"))? }; Ok(Self(tensor)) } /// Rich-compare two tensors. /// &RETURNS&: Tensor fn __richcmp__(&self, rhs: &PyAny, op: CompareOp) -> PyResult<Self> { let compare = |lhs: &Tensor, rhs: &Tensor| { let t = match op { CompareOp::Eq => lhs.eq(rhs), CompareOp::Ne => lhs.ne(rhs), CompareOp::Lt => lhs.lt(rhs), CompareOp::Le => lhs.le(rhs), CompareOp::Gt => lhs.gt(rhs), CompareOp::Ge => lhs.ge(rhs), }; Ok(PyTensor(t.map_err(wrap_err)?)) }; if let Ok(rhs) = rhs.extract::<PyTensor>() { if self.0.shape() == rhs.0.shape() { compare(&self.0, &rhs.0) } else { // We broadcast manually here because `candle.cmp` does not support automatic broadcasting let broadcast_shape = self .0 .shape() .broadcast_shape_binary_op(rhs.0.shape(), "cmp") .map_err(wrap_err)?; let broadcasted_lhs = self.0.broadcast_as(&broadcast_shape).map_err(wrap_err)?; let broadcasted_rhs = rhs.0.broadcast_as(&broadcast_shape).map_err(wrap_err)?; compare(&broadcasted_lhs, &broadcasted_rhs) } } else if let Ok(rhs) = rhs.extract::<f64>() { let scalar_tensor = Tensor::new(rhs, self.0.device()) .map_err(wrap_err)? .to_dtype(self.0.dtype()) .map_err(wrap_err)? .broadcast_as(self.0.shape()) .map_err(wrap_err)?; compare(&self.0, &scalar_tensor) } else { return Err(PyTypeError::new_err("unsupported rhs for __richcmp__")); } } fn __hash__(&self) -> u64 { // we have overridden __richcmp__ => py03 wants us to also override __hash__ // we simply hash the address of the tensor let mut hasher = DefaultHasher::new(); let pointer = &self.0 as *const Tensor; let address = pointer as usize; address.hash(&mut hasher); hasher.finish() } #[pyo3(signature=(*shape), text_signature = "(self, *shape:Shape)")] /// Reshapes the tensor to the given shape. /// &RETURNS&: Tensor fn reshape(&self, shape: PyShapeWithHole) -> PyResult<Self> { Ok(PyTensor( self.0 .reshape(shape.to_absolute(&self.0)?) .map_err(wrap_err)?, )) } #[pyo3(signature=(*shape), text_signature = "(self, *shape:Shape)")] /// Broadcasts the tensor to the given shape. /// &RETURNS&: Tensor fn broadcast_as(&self, shape: PyShapeWithHole) -> PyResult<Self> { Ok(PyTensor( self.0 .broadcast_as(shape.to_absolute(&self.0)?) .map_err(wrap_err)?, )) } #[pyo3(signature=(*shape), text_signature = "(self, *shape:Shape)")] /// Broadcasts the tensor to the given shape, adding new dimensions on the left. /// &RETURNS&: Tensor fn broadcast_left(&self, shape: PyShapeWithHole) -> PyResult<Self> { Ok(PyTensor( self.0 .broadcast_left(shape.to_absolute(&self.0)?) .map_err(wrap_err)?, )) } #[pyo3(text_signature = "(self, dim:int)")] /// Creates a new tensor with the specified dimension removed if its size was one. /// &RETURNS&: Tensor fn squeeze(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.squeeze(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Creates a new tensor with a dimension of size one inserted at the specified position. /// &RETURNS&: Tensor fn unsqueeze(&self, dim: usize) -> PyResult<Self> { Ok(PyTensor(self.0.unsqueeze(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, index:int)")] /// Gets the value at the specified index. /// &RETURNS&: Tensor fn get(&self, index: i64) -> PyResult<Self> { let index = actual_index(self, 0, index).map_err(wrap_err)?; Ok(PyTensor(self.0.get(index).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim1:int, dim2:int)")] /// Returns a tensor that is a transposed version of the input, the given dimensions are swapped. /// &RETURNS&: Tensor fn transpose(&self, dim1: usize, dim2: usize) -> PyResult<Self> { Ok(PyTensor(self.0.transpose(dim1, dim2).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int, start:int, len:int)")] /// Returns a new tensor that is a narrowed version of the input, the dimension `dim` /// ranges from `start` to `start + len`. /// &RETURNS&: Tensor fn narrow(&self, dim: i64, start: i64, len: usize) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; let start = actual_index(self, dim, start).map_err(wrap_err)?; Ok(PyTensor(self.0.narrow(dim, start, len).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Returns the indices of the maximum value(s) across the selected dimension. /// &RETURNS&: Tensor fn argmax_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.argmax_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Returns the indices of the minimum value(s) across the selected dimension. /// &RETURNS&: Tensor fn argmin_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.argmin_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Gathers the maximum value across the selected dimension. /// &RETURNS&: Tensor fn max_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.max_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Gathers the minimum value across the selected dimension. /// &RETURNS&: Tensor fn min_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.min_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:Union[int, List[int]])")] /// Returns the sum of all elements in the input tensor. The sum is performed over all the input dimensions. /// &RETURNS&: Tensor fn sum_keepdim(&self, dims: PyObject, py: Python<'_>) -> PyResult<Self> { let dims = if let Ok(dim) = dims.extract::<usize>(py) { vec![dim] } else { dims.extract::<Vec<usize>>(py)? }; Ok(PyTensor( self.0.sum_keepdim(dims.as_slice()).map_err(wrap_err)?, )) } /// Returns the sum of the tensor. /// &RETURNS&: Tensor fn sum_all(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sum_all().map_err(wrap_err)?)) } /// Returns the mean of the tensor. /// &RETURNS&: Tensor fn mean_all(&self) -> PyResult<Self> { let elements = self.0.elem_count(); let sum = self.0.sum_all().map_err(wrap_err)?; let mean = (sum / elements as f64).map_err(wrap_err)?; Ok(PyTensor(mean)) } #[pyo3(text_signature = "(self, dim:int)")] /// Flattens the tensor on the dimension indexes from `dim` (inclusive) to the last dimension. /// &RETURNS&: Tensor fn flatten_from(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.flatten_from(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] ///Flattens the tensor on the dimension indexes from `0` to `dim` (inclusive). /// &RETURNS&: Tensor fn flatten_to(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.flatten_to(dim).map_err(wrap_err)?)) } /// Flattens the tensor into a 1D tensor. /// &RETURNS&: Tensor fn flatten_all(&self) -> PyResult<Self> { Ok(PyTensor(self.0.flatten_all().map_err(wrap_err)?)) } /// Transposes the tensor. /// &RETURNS&: Tensor fn t(&self) -> PyResult<Self> { Ok(PyTensor(self.0.t().map_err(wrap_err)?)) } /// Makes the tensor contiguous in memory. /// &RETURNS&: Tensor fn contiguous(&self) -> PyResult<Self> { Ok(PyTensor(self.0.contiguous().map_err(wrap_err)?)) } /// Returns true if the tensor is contiguous in C order. /// &RETURNS&: bool fn is_contiguous(&self) -> bool { self.0.is_contiguous() } /// Returns true if the tensor is contiguous in Fortran order. /// &RETURNS&: bool fn is_fortran_contiguous(&self) -> bool { self.0.is_fortran_contiguous() } /// Detach the tensor from the computation graph. /// &RETURNS&: Tensor fn detach(&self) -> PyResult<Self> { Ok(PyTensor(self.0.detach().map_err(wrap_err)?)) } /// Returns a copy of the tensor. /// &RETURNS&: Tensor fn copy(&self) -> PyResult<Self> { Ok(PyTensor(self.0.copy().map_err(wrap_err)?)) } #[pyo3(signature = (*args, **kwargs), text_signature = "(self, *args, **kwargs)")] /// Performs Tensor dtype and/or device conversion. /// &RETURNS&: Tensor fn to(&self, args: &PyTuple, kwargs: Option<&PyDict>) -> PyResult<Self> { let mut device: Option<PyDevice> = None; let mut dtype: Option<PyDType> = None; let mut other: Option<PyTensor> = None; fn handle_duplicates<T>( opt: &mut Option<T>, extraction_result: PyResult<T>, err_msg: &'static str, ) -> PyResult<()> { if let Ok(successful_extraction) = extraction_result { if opt.is_some() { return Err(PyValueError::new_err(err_msg)); } *opt = Some(successful_extraction); } Ok(()) } //handle args for arg in args.iter() { if arg.extract::<PyDevice>().is_ok() { handle_duplicates( &mut device, arg.extract::<PyDevice>(), "cannot specify multiple devices", )?; } else if arg.extract::<PyDType>().is_ok() { handle_duplicates( &mut dtype, arg.extract::<PyDType>(), "cannot specify multiple dtypes", )?; } else if arg.extract::<PyTensor>().is_ok() { handle_duplicates( &mut other, arg.extract::<PyTensor>(), "cannot specify multiple output tensors", )?; } else { return Err(PyTypeError::new_err(format!( "unsupported argument type `{:#?}`", arg.get_type().name() ))); } } if let Some(kwargs) = kwargs { if let Ok(Some(any)) = kwargs.get_item("dtype") { handle_duplicates( &mut dtype, any.extract::<PyDType>(), "cannot specify multiple dtypes", )?; } if let Ok(Some(any)) = kwargs.get_item("device") { handle_duplicates( &mut device, any.extract::<PyDevice>(), "cannot specify multiple devices", )?; } if let Ok(Some(any)) = kwargs.get_item("other") { handle_duplicates( &mut other, any.extract::<PyTensor>(), "cannot specify multiple output tensors", )?; } } if let Some(other) = other { if device.is_some() { return Err(PyValueError::new_err( "cannot specify both an output tensor and a device", )); } if dtype.is_some() { return Err(PyValueError::new_err( "cannot specify both an output tensor and a dtype", )); } dtype = Some(other.dtype()); device = Some(PyDevice::from_device(other.0.device())); } let result = match (device, dtype) { (Some(device), Some(dtype)) => self .0 .to_device(&device.as_device()?) .map_err(wrap_err)? .to_dtype(dtype.0) .map_err(wrap_err)?, (Some(device), None) => self.0.to_device(&device.as_device()?).map_err(wrap_err)?, (None, Some(dtype)) => self.0.to_dtype(dtype.0).map_err(wrap_err)?, (None, None) => return Err(PyTypeError::new_err("No valid dtype or device specified")), }; Ok(PyTensor(result)) } #[pyo3(text_signature = "(self, dtype:Union[str,DType])")] /// Convert the tensor to a new dtype. /// &RETURNS&: Tensor fn to_dtype(&self, dtype: PyObject, py: Python<'_>) -> PyResult<Self> { let dtype = PyDType::from_pyobject(dtype, py)?; Ok(PyTensor(self.0.to_dtype(dtype.0).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, device:Union[str,Device])")] /// Move the tensor to a new device. /// &RETURNS&: Tensor fn to_device(&self, device: PyDevice) -> PyResult<Self> { let device = device.as_device()?; Ok(PyTensor(self.0.to_device(&device).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, quantized_dtype:str)")] /// Quantize the tensor. /// &RETURNS&: QTensor fn quantize(&self, quantized_dtype: &str) -> PyResult<PyQTensor> { use ::candle::quantized; let res = match quantized_dtype.to_lowercase().as_str() { "q2k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q2K), "q3k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q3K), "q4_0" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q4_0), "q4_1" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q4_1), "q4k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q4K), "q5_0" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q5_0), "q5_1" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q5_1), "q5k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q5K), "q6k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q6K), "q8_0" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q8_0), "q8_1" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q8_1), "q8k" => quantized::QTensor::quantize(self, quantized::GgmlDType::Q8K), "f16" => quantized::QTensor::quantize(self, quantized::GgmlDType::F16), "f32" => quantized::QTensor::quantize(self, quantized::GgmlDType::F32), dt => { return Err(PyErr::new::<PyValueError, _>(format!( "unknown quantized-dtype {dt}" ))) } }; Ok(PyQTensor(Arc::new(res.map_err(wrap_err)?))) } } #[pyfunction] #[pyo3(text_signature = "(tensors:List[Tensor], dim:int )")] /// Concatenate the tensors across one axis. /// &RETURNS&: Tensor fn cat(tensors: Vec<PyTensor>, dim: i64) -> PyResult<PyTensor> { if tensors.is_empty() { return Err(PyErr::new::<PyValueError, _>("empty input to cat")); } let dim = actual_dim(&tensors[0], dim).map_err(wrap_err)?; let tensors = tensors.into_iter().map(|t| t.0).collect::<Vec<_>>(); let tensor = Tensor::cat(&tensors, dim).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(text_signature = "(tensors:List[Tensor], dim:int)")] /// Stack the tensors along a new axis. /// &RETURNS&: Tensor fn stack(tensors: Vec<PyTensor>, dim: usize) -> PyResult<PyTensor> { let tensors = tensors.into_iter().map(|t| t.0).collect::<Vec<_>>(); let tensor = Tensor::stack(&tensors, dim).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(text_signature = "(data:_ArrayLike)")] /// Creates a new tensor from a Python value. The value can be a scalar or array-like object. /// &RETURNS&: Tensor fn tensor(py: Python<'_>, data: PyObject) -> PyResult<PyTensor> { PyTensor::new(py, data) } #[pyfunction] #[pyo3(signature = (*shape,device=None), text_signature = "(*shape:Shape, device:Optional[Device]=None)")] /// Creates a new tensor with random values. /// &RETURNS&: Tensor fn rand(_py: Python<'_>, shape: PyShape, device: Option<PyDevice>) -> PyResult<PyTensor> { let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::rand(0f32, 1f32, shape, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (*shape,device=None), text_signature = "(*shape:Shape, device:Optional[Device]=None)")] /// Creates a new tensor with random values from a normal distribution. /// &RETURNS&: Tensor fn randn(_py: Python<'_>, shape: PyShape, device: Option<PyDevice>) -> PyResult<PyTensor> { let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::randn(0f32, 1f32, shape, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (*shape, dtype=None, device=None),text_signature = "(*shape:Shape, dtype:Optional[DType]=None, device:Optional[Device]=None)")] /// Creates a new tensor filled with ones. /// &RETURNS&: Tensor fn ones( py: Python<'_>, shape: PyShape, dtype: Option<PyObject>, device: Option<PyDevice>, ) -> PyResult<PyTensor> { let dtype = match dtype { None => DType::F32, Some(dtype) => PyDType::from_pyobject(dtype, py)?.0, }; let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::ones(shape, dtype, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (*shape, dtype=None, device=None), text_signature = "(*shape:Shape, dtype:Optional[DType]=None, device:Optional[Device]=None)")] /// Creates a new tensor filled with zeros. /// &RETURNS&: Tensor fn zeros( py: Python<'_>, shape: PyShape, dtype: Option<PyObject>, device: Option<PyDevice>, ) -> PyResult<PyTensor> { let dtype = match dtype { None => DType::F32, Some(dtype) => PyDType::from_pyobject(dtype, py)?.0, }; let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::zeros(shape, dtype, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[derive(Debug, Clone)] #[pyclass(name = "QTensor")] /// A quantized tensor. struct PyQTensor(Arc<QTensor>); impl std::ops::Deref for PyQTensor { type Target = QTensor; fn deref(&self) -> &Self::Target { self.0.as_ref() } } #[pymethods] impl PyQTensor { #[getter] ///Gets the tensors quantized dtype. /// &RETURNS&: str fn ggml_dtype(&self) -> String { format!("{:?}", self.0.dtype()) } #[getter] ///Gets the rank of the tensor. /// &RETURNS&: int fn rank(&self) -> usize { self.0.rank() } #[getter] ///Gets the shape of the tensor. /// &RETURNS&: Tuple[int] fn shape(&self, py: Python<'_>) -> PyObject { PyTuple::new(py, self.0.shape().dims()).to_object(py) } fn __repr__(&self) -> String { format!("{:?}", self.0) } fn __str__(&self) -> String { self.__repr__() } /// Dequantizes the tensor. /// &RETURNS&: Tensor fn dequantize(&self) -> PyResult<PyTensor> { let tensor = self.0.dequantize(&Device::Cpu).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyo3(text_signature = "(self, lhs:Tensor)")] /// Performs a quantized matrix multiplication, with the quantized tensor as the right hand side. /// &RETURNS&: Tensor fn matmul_t(&self, lhs: &PyTensor) -> PyResult<PyTensor> { let qmatmul = ::candle::quantized::QMatMul::from_arc(self.0.clone()).map_err(wrap_err)?; let res = qmatmul.forward(lhs).map_err(wrap_err)?; Ok(PyTensor(res)) } } #[pyfunction] #[pyo3(text_signature = "(path:Union[str,PathLike])")] /// Loads a safetensors file. Returns a dictionary mapping tensor names to tensors. /// &RETURNS&: Dict[str,Tensor] fn load_safetensors(path: &str, py: Python<'_>) -> PyResult<PyObject> { let res = ::candle::safetensors::load(path, &Device::Cpu).map_err(wrap_err)?; let res = res .into_iter() .map(|(key, value)| (key, PyTensor(value).into_py(py))) .collect::<Vec<_>>(); Ok(res.into_py_dict(py).to_object(py)) } #[pyfunction] #[pyo3(text_signature = "(path:Union[str,PathLike], tensors:Dict[str,Tensor])")] /// Saves a dictionary of tensors to a safetensors file. /// &RETURNS&: None fn save_safetensors( path: &str, tensors: std::collections::HashMap<String, PyTensor>, ) -> PyResult<()> { let tensors = tensors .into_iter() .map(|(s, t)| (s, t.0)) .collect::<std::collections::HashMap<_, _>>(); ::candle::safetensors::save(&tensors, path).map_err(wrap_err) } #[pyfunction] #[pyo3(text_signature = "(path:Union[str,PathLike], device: Optional[Device] = None)")] /// Load a GGML file. Returns a tuple of three objects: a dictionary mapping tensor names to tensors, /// a dictionary mapping hyperparameter names to hyperparameter values, and a vocabulary. /// &RETURNS&: Tuple[Dict[str,QTensor], Dict[str,Any], List[str]] fn load_ggml( path: &str, device: Option<PyDevice>, py: Python<'_>, ) -> PyResult<(PyObject, PyObject, PyObject)> { let mut file = std::fs::File::open(path)?; let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let ggml = ::candle::quantized::ggml_file::Content::read(&mut file, &device).map_err(wrap_err)?; let tensors = ggml .tensors .into_iter() .map(|(key, qtensor)| Ok((key, PyQTensor(Arc::new(qtensor)).into_py(py)))) .collect::<::candle::Result<Vec<_>>>() .map_err(wrap_err)?; let tensors = tensors.into_py_dict(py).to_object(py); let hparams = [ ("n_vocab", ggml.hparams.n_vocab), ("n_embd", ggml.hparams.n_embd), ("n_mult", ggml.hparams.n_mult), ("n_head", ggml.hparams.n_head), ("n_layer", ggml.hparams.n_layer), ("n_rot", ggml.hparams.n_rot), ("ftype", ggml.hparams.ftype), ]; let hparams = hparams.into_py_dict(py).to_object(py); let vocab = ggml .vocab .token_score_pairs .iter() .map(|(bytes, _)| String::from_utf8_lossy(bytes.as_slice()).to_string()) .collect::<Vec<String>>() .to_object(py); Ok((tensors, hparams, vocab)) } #[pyfunction] #[pyo3(text_signature = "(path:Union[str,PathLike], device: Optional[Device] = None)")] /// Loads a GGUF file. Returns a tuple of two dictionaries: the first maps tensor names to tensors, /// and the second maps metadata keys to metadata values. /// &RETURNS&: Tuple[Dict[str,QTensor], Dict[str,Any]] fn load_gguf( path: &str, device: Option<PyDevice>, py: Python<'_>, ) -> PyResult<(PyObject, PyObject)> { let device = device.unwrap_or(PyDevice::Cpu).as_device()?; use ::candle::quantized::gguf_file; fn gguf_value_to_pyobject(v: &gguf_file::Value, py: Python<'_>) -> PyResult<PyObject> { let v: PyObject = match v { gguf_file::Value::U8(x) => x.into_py(py), gguf_file::Value::I8(x) => x.into_py(py), gguf_file::Value::U16(x) => x.into_py(py), gguf_file::Value::I16(x) => x.into_py(py), gguf_file::Value::U32(x) => x.into_py(py), gguf_file::Value::I32(x) => x.into_py(py), gguf_file::Value::U64(x) => x.into_py(py), gguf_file::Value::I64(x) => x.into_py(py), gguf_file::Value::F32(x) => x.into_py(py), gguf_file::Value::F64(x) => x.into_py(py), gguf_file::Value::Bool(x) => x.into_py(py), gguf_file::Value::String(x) => x.into_py(py), gguf_file::Value::Array(x) => { let list = pyo3::types::PyList::empty(py); for elem in x.iter() { list.append(gguf_value_to_pyobject(elem, py)?)?; } list.into() } }; Ok(v) } let mut file = std::fs::File::open(path)?; let gguf = gguf_file::Content::read(&mut file).map_err(wrap_err)?; let tensors = gguf .tensor_infos .keys() .map(|key| { let qtensor = gguf.tensor(&mut file, key, &device)?; Ok((key, PyQTensor(Arc::new(qtensor)).into_py(py))) }) .collect::<::candle::Result<Vec<_>>>() .map_err(wrap_err)?; let tensors = tensors.into_py_dict(py).to_object(py); let metadata = gguf .metadata .iter() .map(|(key, value)| Ok((key, gguf_value_to_pyobject(value, py)?))) .collect::<PyResult<Vec<_>>>()? .into_py_dict(py) .to_object(py); Ok((tensors, metadata)) } #[pyfunction] #[pyo3( text_signature = "(path:Union[str,PathLike], tensors:Dict[str,QTensor], metadata:Dict[str,Any])" )] /// Save quanitzed tensors and metadata to a GGUF file. fn save_gguf(path: &str, tensors: PyObject, metadata: PyObject, py: Python<'_>) -> PyResult<()> { use ::candle::quantized::gguf_file; fn pyobject_to_gguf_value(v: &PyAny, py: Python<'_>) -> PyResult<gguf_file::Value> { let v: gguf_file::Value = if let Ok(x) = v.extract::<u8>() { gguf_file::Value::U8(x) } else if let Ok(x) = v.extract::<i8>() { gguf_file::Value::I8(x) } else if let Ok(x) = v.extract::<u16>() { gguf_file::Value::U16(x) } else if let Ok(x) = v.extract::<i16>() { gguf_file::Value::I16(x) } else if let Ok(x) = v.extract::<u32>() { gguf_file::Value::U32(x) } else if let Ok(x) = v.extract::<i32>() { gguf_file::Value::I32(x) } else if let Ok(x) = v.extract::<u64>() { gguf_file::Value::U64(x) } else if let Ok(x) = v.extract::<i64>() { gguf_file::Value::I64(x) } else if let Ok(x) = v.extract::<f32>() { gguf_file::Value::F32(x) } else if let Ok(x) = v.extract::<f64>() { gguf_file::Value::F64(x) } else if let Ok(x) = v.extract::<bool>() { gguf_file::Value::Bool(x) } else if let Ok(x) = v.extract::<String>() { gguf_file::Value::String(x) } else if let Ok(x) = v.extract::<Vec<PyObject>>() { let x = x .into_iter() .map(|f| pyobject_to_gguf_value(f.as_ref(py), py)) .collect::<PyResult<Vec<_>>>()?; gguf_file::Value::Array(x) } else { return Err(PyErr::new::<PyValueError, _>(format!( "unsupported type {:?}", v ))); }; Ok(v) } let tensors = tensors .extract::<&PyDict>(py) .map_err(|_| PyErr::new::<PyValueError, _>("expected a dict"))? .iter() .map(|(key, value)| { Ok(( key.extract::<String>() .map_err(|_| PyErr::new::<PyValueError, _>("keys must be strings"))?, value.extract::<PyQTensor>()?.0, )) }) .collect::<PyResult<Vec<_>>>()?; let metadata = metadata .extract::<&PyDict>(py) .map_err(|_| PyErr::new::<PyValueError, _>("expected a dict"))? .iter() .map(|(key, value)| { Ok(( key.extract::<String>() .map_err(|_| PyErr::new::<PyValueError, _>("keys must be strings"))?, pyobject_to_gguf_value(value, py)?, )) }) .collect::<PyResult<Vec<_>>>()?; let converted_metadata: Vec<_> = metadata .iter() .map(|(name, value)| (name.as_str(), value)) .collect(); let converted_tensors: Vec<_> = tensors .iter() .map(|(name, tensor)| (name.as_str(), tensor.as_ref())) .collect(); let mut file = std::fs::File::create(path)?; gguf_file::write(&mut file, &converted_metadata, &converted_tensors).map_err(wrap_err) } #[pyfunction] /// Returns true if the 'cuda' backend is available. /// &RETURNS&: bool fn cuda_is_available() -> bool { ::candle::utils::cuda_is_available() } #[pyfunction] /// Returns true if candle was compiled with 'accelerate' support. /// &RETURNS&: bool fn has_accelerate() -> bool { ::candle::utils::has_accelerate() } #[pyfunction] /// Returns true if candle was compiled with MKL support. /// &RETURNS&: bool fn has_mkl() -> bool { ::candle::utils::has_mkl() } #[pyfunction] /// Returns the number of threads used by the candle. /// &RETURNS&: int fn get_num_threads() -> usize { ::candle::utils::get_num_threads() } fn candle_utils(_py: Python<'_>, m: &PyModule) -> PyResult<()> { m.add_function(wrap_pyfunction!(cuda_is_available, m)?)?; m.add_function(wrap_pyfunction!(get_num_threads, m)?)?; m.add_function(wrap_pyfunction!(has_accelerate, m)?)?; m.add_function(wrap_pyfunction!(has_mkl, m)?)?; m.add_function(wrap_pyfunction!(load_ggml, m)?)?; m.add_function(wrap_pyfunction!(load_gguf, m)?)?; m.add_function(wrap_pyfunction!(save_gguf, m)?)?; m.add_function(wrap_pyfunction!(load_safetensors, m)?)?; m.add_function(wrap_pyfunction!(save_safetensors, m)?)?; Ok(()) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor, dim:int)")] /// Applies the Softmax function to a given tensor.# /// &RETURNS&: Tensor fn softmax(tensor: PyTensor, dim: i64) -> PyResult<PyTensor> { let dim = actual_dim(&tensor, dim).map_err(wrap_err)?; let sm = candle_nn::ops::softmax(&tensor.0, dim).map_err(wrap_err)?; Ok(PyTensor(sm)) } #[pyfunction] #[pyo3(signature = (tensor, ksize, *, stride=1), text_signature = "(tensor:Tensor, ksize:int, stride:int=1)")] /// Applies the 2d avg-pool function to a given tensor.# /// &RETURNS&: Tensor fn avg_pool2d(tensor: PyTensor, ksize: usize, stride: usize) -> PyResult<PyTensor> { let tensor = tensor .avg_pool2d_with_stride(ksize, stride) .map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (tensor, ksize, *, stride=1), text_signature = "(tensor:Tensor, ksize:int, stride:int=1)")] /// Applies the 2d max-pool function to a given tensor.# /// &RETURNS&: Tensor fn max_pool2d(tensor: PyTensor, ksize: usize, stride: usize) -> PyResult<PyTensor> { let tensor = tensor .max_pool2d_with_stride(ksize, stride) .map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the Sigmoid Linear Unit (SiLU) function to a given tensor. /// &RETURNS&: Tensor fn silu(tensor: PyTensor) -> PyResult<PyTensor> { let s = candle_nn::ops::silu(&tensor.0).map_err(wrap_err)?; Ok(PyTensor(s)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the Gaussian Error Linear Unit (GELU) function to a given tensor. /// &RETURNS&: Tensor fn gelu(tensor: PyTensor) -> PyResult<PyTensor> { let s = tensor.0.gelu_erf().map_err(wrap_err)?; Ok(PyTensor(s)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the Rectified Linear Unit (ReLU) function to a given tensor. /// &RETURNS&: Tensor fn relu(tensor: PyTensor) -> PyResult<PyTensor> { let s = tensor.0.relu().map_err(wrap_err)?; Ok(PyTensor(s)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the tanh function to a given tensor. /// &RETURNS&: Tensor fn tanh(tensor: PyTensor) -> PyResult<PyTensor> { let s = tensor.0.tanh().map_err(wrap_err)?; Ok(PyTensor(s)) } fn candle_functional_m(_py: Python<'_>, m: &PyModule) -> PyResult<()> { m.add_function(wrap_pyfunction!(silu, m)?)?; m.add_function(wrap_pyfunction!(softmax, m)?)?; m.add_function(wrap_pyfunction!(max_pool2d, m)?)?; m.add_function(wrap_pyfunction!(avg_pool2d, m)?)?; m.add_function(wrap_pyfunction!(gelu, m)?)?; m.add_function(wrap_pyfunction!(relu, m)?)?; m.add_function(wrap_pyfunction!(tanh, m)?)?; Ok(()) } #[cfg(feature = "onnx")] fn candle_onnx_m(_py: Python<'_>, m: &PyModule) -> PyResult<()> { use onnx::{PyONNXModel, PyONNXTensorDescriptor}; m.add_class::<PyONNXModel>()?; m.add_class::<PyONNXTensorDescriptor>()?; Ok(()) } #[pymodule] fn candle(py: Python<'_>, m: &PyModule) -> PyResult<()> { let utils = PyModule::new(py, "utils")?; candle_utils(py, utils)?; m.add_submodule(utils)?; let nn = PyModule::new(py, "functional")?; candle_functional_m(py, nn)?; m.add_submodule(nn)?; #[cfg(feature = "onnx")] { let onnx = PyModule::new(py, "onnx")?; candle_onnx_m(py, onnx)?; m.add_submodule(onnx)?; } m.add_class::<PyTensor>()?; m.add_class::<PyQTensor>()?; m.add_class::<PyDType>()?; m.add("u8", PyDType(DType::U8))?; m.add("u32", PyDType(DType::U32))?; m.add("i64", PyDType(DType::I64))?; m.add("bf16", PyDType(DType::BF16))?; m.add("f16", PyDType(DType::F16))?; m.add("f32", PyDType(DType::F32))?; m.add("f64", PyDType(DType::F64))?; m.add_function(wrap_pyfunction!(cat, m)?)?; m.add_function(wrap_pyfunction!(ones, m)?)?; m.add_function(wrap_pyfunction!(rand, m)?)?; m.add_function(wrap_pyfunction!(randn, m)?)?; m.add_function(wrap_pyfunction!(tensor, m)?)?; m.add_function(wrap_pyfunction!(stack, m)?)?; m.add_function(wrap_pyfunction!(zeros, m)?)?; Ok(()) }
candle/candle-pyo3/src/lib.rs/0
{ "file_path": "candle/candle-pyo3/src/lib.rs", "repo_id": "candle", "token_count": 29554 }
30
use candle::{DType, Error, Result, Tensor}; use rand::{distributions::Distribution, SeedableRng}; pub struct LogitsProcessor { rng: rand::rngs::StdRng, temperature: Option<f64>, top_p: Option<f64>, } impl LogitsProcessor { pub fn new(seed: u64, temperature: Option<f64>, top_p: Option<f64>) -> Self { let temperature = if temperature.map_or(true, |v| v < 1e-7) { None } else { temperature }; Self { rng: rand::rngs::StdRng::seed_from_u64(seed), temperature, top_p, } } fn sample_argmax(&mut self, logits: Tensor) -> Result<u32> { let logits_v: Vec<f32> = logits.to_vec1()?; let next_token = logits_v .iter() .enumerate() .max_by(|(_, u), (_, v)| u.total_cmp(v)) .map(|(i, _)| i as u32) .unwrap(); Ok(next_token) } fn sample_multinomial(&mut self, prs: &Vec<f32>) -> Result<u32> { let distr = rand::distributions::WeightedIndex::new(prs).map_err(Error::wrap)?; let next_token = distr.sample(&mut self.rng) as u32; Ok(next_token) } fn sample_topp(&mut self, prs: &mut Vec<f32>, top_p: f32) -> Result<u32> { // top-p sampling (or "nucleus sampling") samples from the smallest set of // tokens that exceed probability top_p. This way we never sample tokens that // have very low probabilities and are less likely to go "off the rails". let mut argsort_indices = (0..prs.len()).collect::<Vec<_>>(); // Sort by descending probability. argsort_indices.sort_by(|&i, &j| prs[j].partial_cmp(&prs[i]).unwrap()); // Clamp smaller probabilities to zero. let mut cumsum = 0.; for index in &argsort_indices { if cumsum >= top_p { prs[*index] = 0.0; } else { cumsum += prs[*index]; } } // Sample with clamped probabilities. self.sample_multinomial(prs) } pub fn sample(&mut self, logits: &Tensor) -> Result<u32> { let logits = logits.to_dtype(DType::F32)?; let next_token = match self.temperature { None => self.sample_argmax(logits)?, Some(temperature) => { let logits = &(&logits / temperature)?; let prs = candle_nn::ops::softmax_last_dim(logits)?; let mut prs: Vec<f32> = prs.to_vec1()?; let top_p = self.top_p.unwrap_or(1.); if top_p <= 0.0 || top_p >= 1.0 { // simply sample from the predicted probability distribution self.sample_multinomial(&prs)? } else { // top-p (nucleus) sampling, clamping the least likely tokens to zero self.sample_topp(&mut prs, top_p as f32)? } } }; Ok(next_token) } }
candle/candle-transformers/src/generation/mod.rs/0
{ "file_path": "candle/candle-transformers/src/generation/mod.rs", "repo_id": "candle", "token_count": 1507 }
31
// T5 Text Model, quantized version // https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py use crate::models::t5::{deserialize_feed_forward_proj_activation, ActivationWithOptionalGating}; use crate::models::with_tracing::QMatMul; use crate::quantized_nn::Embedding; pub use crate::quantized_var_builder::VarBuilder; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::Activation; use serde::Deserialize; use std::sync::Arc; fn default_relative_attention_max_distance() -> usize { 128 } fn default_is_decoder() -> bool { false } fn default_use_cache() -> bool { true } fn default_tie_word_embeddings() -> bool { true } fn get_mask(size: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| u8::from(j > i))) .collect(); Tensor::from_slice(&mask, (size, size), device) } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { vocab_size: usize, d_model: usize, d_kv: usize, d_ff: usize, num_layers: usize, num_decoder_layers: Option<usize>, num_heads: usize, relative_attention_num_buckets: usize, #[serde(default = "default_relative_attention_max_distance")] relative_attention_max_distance: usize, dropout_rate: f64, layer_norm_epsilon: f64, initializer_factor: f64, #[serde(default, deserialize_with = "deserialize_feed_forward_proj_activation")] pub feed_forward_proj: ActivationWithOptionalGating, #[serde(default = "default_tie_word_embeddings")] tie_word_embeddings: bool, #[serde(default = "default_is_decoder")] is_decoder: bool, is_encoder_decoder: bool, #[serde(default = "default_use_cache")] pub use_cache: bool, pub pad_token_id: usize, pub eos_token_id: usize, pub decoder_start_token_id: Option<usize>, } impl Default for Config { fn default() -> Self { Self { vocab_size: 32128, d_model: 512, d_kv: 64, d_ff: 2048, num_layers: 6, num_decoder_layers: None, num_heads: 8, relative_attention_num_buckets: 32, relative_attention_max_distance: 128, dropout_rate: 0.1, layer_norm_epsilon: 1e-6, initializer_factor: 1.0, feed_forward_proj: ActivationWithOptionalGating { gated: false, activation: Activation::Relu, }, tie_word_embeddings: true, is_decoder: false, is_encoder_decoder: true, use_cache: true, pad_token_id: 0, eos_token_id: 1, decoder_start_token_id: Some(0), } } } #[derive(Debug, Clone)] struct T5LayerNorm { weight: Tensor, variance_epsilon: f64, span: tracing::Span, } impl T5LayerNorm { fn load(h: usize, eps: f64, vb: VarBuilder) -> Result<Self> { let weight = vb.get(h, "weight")?.dequantize(vb.device())?; Ok(Self { weight, variance_epsilon: eps, span: tracing::span!(tracing::Level::TRACE, "layer-norm"), }) } } impl Module for T5LayerNorm { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let dtype = xs.dtype(); let xs_f32 = xs.to_dtype(DType::F32)?; // variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) let variance = xs_f32.sqr()?.mean_keepdim(D::Minus1)?; let xs = xs.broadcast_div(&(variance + self.variance_epsilon)?.sqrt()?)?; let xs = xs.to_dtype(dtype)?; let xs = xs.broadcast_mul(&self.weight)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5DenseActDense { wi: QMatMul, wo: QMatMul, act: Activation, span: tracing::Span, } impl T5DenseActDense { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let wi = QMatMul::new(cfg.d_model, cfg.d_ff, vb.pp("wi"))?; let wo = QMatMul::new(cfg.d_ff, cfg.d_model, vb.pp("wo"))?; Ok(Self { wi, wo, act: Activation::Relu, span: tracing::span!(tracing::Level::TRACE, "dense-act-dense"), }) } } impl Module for T5DenseActDense { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.wi.forward(xs)?; let xs = self.act.forward(&xs)?; let xs = self.wo.forward(&xs)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5DenseGatedActDense { wi_0: QMatMul, wi_1: QMatMul, wo: QMatMul, act: Activation, span: tracing::Span, } impl T5DenseGatedActDense { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let wi_0 = QMatMul::new(cfg.d_model, cfg.d_ff, vb.pp("wi_0"))?; let wi_1 = QMatMul::new(cfg.d_model, cfg.d_ff, vb.pp("wi_1"))?; let wo = QMatMul::new(cfg.d_ff, cfg.d_model, vb.pp("wo"))?; Ok(Self { wi_0, wi_1, wo, act: cfg.feed_forward_proj.activation, span: tracing::span!(tracing::Level::TRACE, "dense-gated-act-dense"), }) } } impl Module for T5DenseGatedActDense { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_gelu = self.act.forward(&self.wi_0.forward(xs)?)?; let hidden_linear = self.wi_1.forward(xs)?; let xs = hidden_gelu.broadcast_mul(&hidden_linear)?; let xs = self.wo.forward(&xs)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5LayerFF { dense_act: Option<T5DenseActDense>, gated_dense_act: Option<T5DenseGatedActDense>, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerFF { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; let (dense_act, gated_dense_act) = if cfg.feed_forward_proj.gated { ( None, Some(T5DenseGatedActDense::load(vb.pp("DenseReluDense"), cfg)?), ) } else { ( Some(T5DenseActDense::load(vb.pp("DenseReluDense"), cfg)?), None, ) }; Ok(Self { dense_act, gated_dense_act, layer_norm, span: tracing::span!(tracing::Level::TRACE, "layer-ff"), }) } } impl Module for T5LayerFF { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let ys = self.layer_norm.forward(xs)?; let ys = match &self.dense_act { Some(dense_act) => dense_act.forward(&ys)?, None => self.gated_dense_act.as_ref().unwrap().forward(&ys)?, }; let xs = (xs + ys)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5Attention { q: QMatMul, k: QMatMul, v: QMatMul, o: QMatMul, n_heads: usize, d_kv: usize, relative_attention_bias: Option<Embedding>, relative_attention_num_buckets: usize, relative_attention_max_distance: usize, inner_dim: usize, use_cache: bool, kv_cache: Option<(Tensor, Tensor)>, span: tracing::Span, span_cache: tracing::Span, span_mm: tracing::Span, span_sm: tracing::Span, } impl T5Attention { fn load( has_relative_attention_bias: bool, decoder: bool, vb: VarBuilder, cfg: &Config, ) -> Result<Self> { let inner_dim = cfg.num_heads * cfg.d_kv; let q = QMatMul::new(cfg.d_model, inner_dim, vb.pp("q"))?; let k = QMatMul::new(cfg.d_model, inner_dim, vb.pp("k"))?; let v = QMatMul::new(cfg.d_model, inner_dim, vb.pp("v"))?; let o = QMatMul::new(inner_dim, cfg.d_model, vb.pp("o"))?; let relative_attention_bias = if has_relative_attention_bias { let emb = Embedding::new( cfg.relative_attention_num_buckets, cfg.num_heads, vb.pp("relative_attention_bias"), )?; Some(emb) } else { None }; Ok(Self { q, k, v, o, n_heads: cfg.num_heads, d_kv: cfg.d_kv, relative_attention_bias, relative_attention_num_buckets: cfg.relative_attention_num_buckets, relative_attention_max_distance: cfg.relative_attention_max_distance, inner_dim, use_cache: cfg.use_cache && decoder, kv_cache: None, span: tracing::span!(tracing::Level::TRACE, "attention"), span_cache: tracing::span!(tracing::Level::TRACE, "attention-cache"), span_mm: tracing::span!(tracing::Level::TRACE, "attention-mm"), span_sm: tracing::span!(tracing::Level::TRACE, "attention-sm"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, key_value_states: Option<&Tensor>, mask: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { // Performs Self-attention (if key_value_states is None) or attention // over source sentence (provided by key_value_states). let _enter = self.span.enter(); let kv_input = match key_value_states { None => xs, Some(key_value_states) => key_value_states, }; let (b_sz, q_len) = (xs.dim(0)?, xs.dim(1)?); let kv_len = kv_input.dim(1)?; let q = self.q.forward(xs)?; let k = self.k.forward(kv_input)?; let v = self.v.forward(kv_input)?; let q = q .reshape((b_sz, q_len, self.n_heads, self.d_kv))? .transpose(1, 2)? .contiguous()?; let mut k = k .reshape((b_sz, kv_len, self.n_heads, self.d_kv))? .transpose(1, 2)?; let mut v = v .reshape((b_sz, kv_len, self.n_heads, self.d_kv))? .transpose(1, 2)?; if self.use_cache && key_value_states.is_none() { let _enter = self.span_cache.enter(); if let Some((kv_cache_k, kv_cache_v)) = &self.kv_cache { k = Tensor::cat(&[kv_cache_k, &k], 2)?; v = Tensor::cat(&[kv_cache_v, &v], 2)?; }; self.kv_cache = Some((k.clone(), v.clone())); }; let k = k.contiguous()?; let v = v.contiguous()?; // TODO: Use flash_attn. let scores = { let _enter = self.span_mm.enter(); q.matmul(&k.t()?)? }; let scores = match mask { None => scores, Some(mask) => masked_fill( &scores, &mask .unsqueeze(0)? .unsqueeze(0)? .repeat((b_sz, self.n_heads))?, f32::NEG_INFINITY, )?, }; let (scores, position_bias) = match position_bias { Some(position_bias) => ( scores.broadcast_add(position_bias)?, Some(position_bias.clone()), ), None => match &self.relative_attention_bias { None => (scores, None), Some(relative_attention_bias) => { // This only handles the bidirectional case. let kv_len = k.dim(2)?; let (q_start, q_end) = match self.use_cache { true => ((kv_len - q_len) as u32, kv_len as u32), false => (0_u32, kv_len as u32), }; let num_buckets = self.relative_attention_num_buckets as u32 / 2; let max_exact = num_buckets / 2; let relative_position = (q_start..q_end) .map(|i| { (0..kv_len as u32) .map(|j| { if i < j { if j - i < max_exact { j - i + num_buckets } else { let b = f32::log( (j - i) as f32 / max_exact as f32, self.relative_attention_max_distance as f32 / max_exact as f32, ) * (num_buckets - max_exact) as f32; u32::min( max_exact + num_buckets + b as u32, self.relative_attention_num_buckets as u32 - 1, ) } } else if i - j < max_exact { i - j } else { let b = f32::log( (i - j) as f32 / max_exact as f32, self.relative_attention_max_distance as f32 / max_exact as f32, ) * (num_buckets - max_exact) as f32; max_exact + b as u32 } }) .collect::<Vec<u32>>() }) .collect::<Vec<Vec<_>>>(); let relative_buckets = Tensor::new(relative_position, q.device())?; let position_bias = relative_attention_bias .forward(&relative_buckets)? .permute((2, 0, 1))? .unsqueeze(0)?; (scores.broadcast_add(&position_bias)?, Some(position_bias)) // TODO: position_bias_masked? } }, }; let attn_weights = { let _enter = self.span_sm.enter(); candle_nn::ops::softmax_last_dim(&scores)? }; let attn_output = attn_weights.matmul(&v)?; let attn_output = attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.inner_dim))?; let attn_output = self.o.forward(&attn_output)?; Ok((attn_output, position_bias)) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct T5LayerSelfAttention { self_attention: T5Attention, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerSelfAttention { fn load(h: bool, d: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> { let self_attention = T5Attention::load(h, d, vb.pp("SelfAttention"), cfg)?; let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; Ok(Self { self_attention, layer_norm, span: tracing::span!(tracing::Level::TRACE, "self-attn"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, mask: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); let normed_xs = self.layer_norm.forward(xs)?; let (ys, position_bias) = self.self_attention .forward(&normed_xs, position_bias, None, mask)?; let ys = (xs + ys)?; Ok((ys, position_bias)) } fn clear_kv_cache(&mut self) { self.self_attention.clear_kv_cache() } } #[derive(Debug, Clone)] struct T5LayerCrossAttention { cross_attention: T5Attention, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerCrossAttention { fn load(decoder: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> { let cross_attention = T5Attention::load(false, decoder, vb.pp("EncDecAttention"), cfg)?; let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; Ok(Self { cross_attention, layer_norm, span: tracing::span!(tracing::Level::TRACE, "cross-attn"), }) } fn forward( &mut self, hidden_states: &Tensor, position_bias: Option<&Tensor>, key_value_states: &Tensor, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); let normed_hidden_states = self.layer_norm.forward(hidden_states)?; let (ys, position_bias) = self.cross_attention.forward( &normed_hidden_states, position_bias, Some(key_value_states), None, )?; let ys = (hidden_states + ys)?; Ok((ys, position_bias)) } fn clear_kv_cache(&mut self) { self.cross_attention.clear_kv_cache() } } #[derive(Debug, Clone)] struct T5Block { self_attn: T5LayerSelfAttention, cross_attn: Option<T5LayerCrossAttention>, ff: T5LayerFF, span: tracing::Span, } impl T5Block { fn load( has_relative_attention_bias: bool, decoder: bool, vb: VarBuilder, cfg: &Config, ) -> Result<Self> { let vb = vb.pp("layer"); let self_attn = T5LayerSelfAttention::load(has_relative_attention_bias, decoder, vb.pp("0"), cfg)?; let cross_attn = if cfg.is_decoder { Some(T5LayerCrossAttention::load(decoder, vb.pp("1"), cfg)?) } else { None }; let ff_i = if cross_attn.is_some() { 2 } else { 1 }; let ff = T5LayerFF::load(vb.pp(ff_i), cfg)?; Ok(Self { self_attn, cross_attn, ff, span: tracing::span!(tracing::Level::TRACE, "block"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, encoder_hidden_states: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); // TODO: Cache masks let mask = match self.cross_attn.is_some() { true => { let mask_len = xs.dim(1)?; // If the input seq length is 1, no need for a mask, this is also helpful to avoid shape // issues when using the KV cache in the decoder. if mask_len <= 1 { None } else { Some(get_mask(mask_len, xs.device())?) } } false => None, }; let (mut xs, position_bias) = self.self_attn.forward(xs, position_bias, mask.as_ref())?; // TODO: clamp for f16? if let Some(cross_attn) = &mut self.cross_attn { (xs, _) = cross_attn.forward(&xs, None, encoder_hidden_states.unwrap())?; // TODO: clamp for f16? } let xs = self.ff.forward(&xs)?; // TODO: clamp for f16? Ok((xs, position_bias)) } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache(); self.cross_attn.iter_mut().for_each(|c| c.clear_kv_cache()); } } #[derive(Debug, Clone)] struct T5Stack { block: Vec<T5Block>, shared: Arc<Embedding>, final_layer_norm: T5LayerNorm, span: tracing::Span, } impl T5Stack { fn load(decoder: bool, vb: VarBuilder, shared: &Arc<Embedding>, cfg: &Config) -> Result<Self> { let block = (0..cfg.num_layers) .map(|i| T5Block::load(i == 0, decoder, vb.pp(format!("block.{i}")), cfg)) .collect::<Result<Vec<_>>>()?; let final_layer_norm = T5LayerNorm::load( cfg.d_model, cfg.layer_norm_epsilon, vb.pp("final_layer_norm"), )?; Ok(Self { block, shared: shared.clone(), final_layer_norm, span: tracing::span!(tracing::Level::TRACE, "stack"), }) } fn forward( &mut self, input_ids: &Tensor, encoder_hidden_states: Option<&Tensor>, ) -> Result<Tensor> { let _enter = self.span.enter(); let input_embeds = self.shared.as_ref().forward(input_ids)?; let mut hidden_states = input_embeds; let mut position_bias = None; for block in self.block.iter_mut() { (hidden_states, position_bias) = block.forward( &hidden_states, position_bias.as_ref(), encoder_hidden_states, )? } self.final_layer_norm.forward(&hidden_states) } fn clear_kv_cache(&mut self) { self.block.iter_mut().for_each(|b| b.clear_kv_cache()) } } #[derive(Debug, Clone)] pub struct T5EncoderModel { encoder: T5Stack, device: Device, span: tracing::Span, } impl T5EncoderModel { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let shared_vb = if vb.contains_key("shared.weight") { vb.pp("shared") } else { vb.pp("decoder").pp("embed_tokens") }; let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?; let shared = Arc::new(shared); let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, cfg)?; Ok(Self { encoder, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "encoder"), }) } pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.encoder.forward(input_ids, None) } pub fn device(&self) -> &Device { &self.device } pub fn clear_kv_cache(&mut self) { self.encoder.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct T5ForConditionalGeneration { encoder: T5Stack, decoder: T5Stack, d_model: usize, tie_word_embeddings: bool, lm_head: Option<QMatMul>, shared: Arc<Embedding>, device: Device, span_decode: tracing::Span, span_decode_head: tracing::Span, } impl T5ForConditionalGeneration { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { assert!(cfg.is_encoder_decoder); let d_model = cfg.d_model; let shared_vb = if vb.contains_key("shared.weight") { vb.pp("shared") } else { vb.pp("decoder").pp("embed_tokens") }; let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?; let shared = Arc::new(shared); let mut encoder_cfg = cfg.clone(); encoder_cfg.is_decoder = false; encoder_cfg.use_cache = false; encoder_cfg.is_encoder_decoder = false; let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, &encoder_cfg)?; let mut decoder_cfg = cfg.clone(); decoder_cfg.is_decoder = true; decoder_cfg.is_encoder_decoder = false; decoder_cfg.num_layers = cfg.num_decoder_layers.unwrap_or(cfg.num_layers); let decoder = T5Stack::load(true, vb.pp("decoder"), &shared, &decoder_cfg)?; let tie_word_embeddings = cfg.tie_word_embeddings; let lm_head = if tie_word_embeddings { None } else { Some(QMatMul::new(cfg.d_model, cfg.vocab_size, vb.pp("lm_head"))?) }; Ok(Self { encoder, decoder, d_model, tie_word_embeddings, lm_head, shared, device: vb.device().clone(), span_decode: tracing::span!(tracing::Level::TRACE, "decode"), span_decode_head: tracing::span!(tracing::Level::TRACE, "decode-head"), }) } pub fn encode(&mut self, input_ids: &Tensor) -> Result<Tensor> { self.encoder.forward(input_ids, None) } pub fn decode( &mut self, decoder_input_ids: &Tensor, encoder_output: &Tensor, ) -> Result<Tensor> { let _enter = self.span_decode.enter(); let decoder_output = self .decoder .forward(decoder_input_ids, Some(encoder_output))?; let scaling_factor = if self.tie_word_embeddings { // Rescale output before projecting on vocab // See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 (self.d_model as f64).sqrt() } else { 1.0 }; let sequence_output = ((decoder_output .narrow(1, decoder_output.dim(1)? - 1, 1)? .squeeze(1)?) * scaling_factor)?; let output = { let _enter = self.span_decode_head.enter(); match self.lm_head { None => sequence_output.matmul(&self.shared.embeddings().t()?)?, Some(ref lm_head) => lm_head.forward(&sequence_output)?, } }; Ok(output) } pub fn forward(&mut self, input_ids: &Tensor, decoder_input_ids: &Tensor) -> Result<Tensor> { let encoder_output = self.encode(input_ids)?; self.decode(decoder_input_ids, &encoder_output) } pub fn device(&self) -> &Device { &self.device } pub fn clear_kv_cache(&mut self) { self.encoder.clear_kv_cache(); self.decoder.clear_kv_cache(); } }
candle/candle-transformers/src/models/quantized_t5.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_t5.rs", "repo_id": "candle", "token_count": 13996 }
32
pub mod attention; pub mod clip; pub mod ddim; pub mod ddpm; pub mod embeddings; pub mod euler_ancestral_discrete; pub mod resnet; pub mod schedulers; pub mod unet_2d; pub mod unet_2d_blocks; pub mod utils; pub mod vae; use std::sync::Arc; use candle::{DType, Device, Result}; use candle_nn as nn; use self::schedulers::{Scheduler, SchedulerConfig}; #[derive(Clone, Debug)] pub struct StableDiffusionConfig { pub width: usize, pub height: usize, pub clip: clip::Config, pub clip2: Option<clip::Config>, autoencoder: vae::AutoEncoderKLConfig, unet: unet_2d::UNet2DConditionModelConfig, scheduler: Arc<dyn SchedulerConfig>, } impl StableDiffusionConfig { pub fn v1_5( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, Some(1), 8), bc(640, Some(1), 8), bc(1280, Some(1), 8), bc(1280, None, 8), ], center_input_sample: false, cross_attention_dim: 768, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: false, }; let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, }; let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 512 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 512 }; let scheduler = Arc::new(ddim::DDIMSchedulerConfig { prediction_type: schedulers::PredictionType::Epsilon, ..Default::default() }); StableDiffusionConfig { width, height, clip: clip::Config::v1_5(), clip2: None, autoencoder, scheduler, unet, } } fn v2_1_( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, prediction_type: schedulers::PredictionType, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, Some(1), 5), bc(640, Some(1), 10), bc(1280, Some(1), 20), bc(1280, None, 20), ], center_input_sample: false, cross_attention_dim: 1024, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: true, }; // https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, }; let scheduler = Arc::new(ddim::DDIMSchedulerConfig { prediction_type, ..Default::default() }); let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 768 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 768 }; StableDiffusionConfig { width, height, clip: clip::Config::v2_1(), clip2: None, autoencoder, scheduler, unet, } } pub fn v2_1( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { // https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/scheduler/scheduler_config.json Self::v2_1_( sliced_attention_size, height, width, schedulers::PredictionType::VPrediction, ) } fn sdxl_( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, prediction_type: schedulers::PredictionType, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, None, 5), bc(640, Some(2), 10), bc(1280, Some(10), 20), ], center_input_sample: false, cross_attention_dim: 2048, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: true, }; // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, }; let scheduler = Arc::new(ddim::DDIMSchedulerConfig { prediction_type, ..Default::default() }); let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 1024 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 1024 }; StableDiffusionConfig { width, height, clip: clip::Config::sdxl(), clip2: Some(clip::Config::sdxl2()), autoencoder, scheduler, unet, } } fn sdxl_turbo_( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, prediction_type: schedulers::PredictionType, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/stabilityai/sdxl-turbo/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, None, 5), bc(640, Some(2), 10), bc(1280, Some(10), 20), ], center_input_sample: false, cross_attention_dim: 2048, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: true, }; // https://huggingface.co/stabilityai/sdxl-turbo/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, }; let scheduler = Arc::new( euler_ancestral_discrete::EulerAncestralDiscreteSchedulerConfig { prediction_type, timestep_spacing: schedulers::TimestepSpacing::Trailing, ..Default::default() }, ); let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 512 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 512 }; Self { width, height, clip: clip::Config::sdxl(), clip2: Some(clip::Config::sdxl2()), autoencoder, scheduler, unet, } } pub fn sdxl( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { Self::sdxl_( sliced_attention_size, height, width, // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/scheduler/scheduler_config.json schedulers::PredictionType::Epsilon, ) } pub fn sdxl_turbo( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { Self::sdxl_turbo_( sliced_attention_size, height, width, // https://huggingface.co/stabilityai/sdxl-turbo/blob/main/scheduler/scheduler_config.json schedulers::PredictionType::Epsilon, ) } pub fn ssd1b( sliced_attention_size: Option<usize>, height: Option<usize>, width: Option<usize>, ) -> Self { let bc = |out_channels, use_cross_attn, attention_head_dim| unet_2d::BlockConfig { out_channels, use_cross_attn, attention_head_dim, }; // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/unet/config.json let unet = unet_2d::UNet2DConditionModelConfig { blocks: vec![ bc(320, None, 5), bc(640, Some(2), 10), bc(1280, Some(10), 20), ], center_input_sample: false, cross_attention_dim: 2048, downsample_padding: 1, flip_sin_to_cos: true, freq_shift: 0., layers_per_block: 2, mid_block_scale_factor: 1., norm_eps: 1e-5, norm_num_groups: 32, sliced_attention_size, use_linear_projection: true, }; // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 4, norm_num_groups: 32, }; let scheduler = Arc::new(ddim::DDIMSchedulerConfig { ..Default::default() }); let height = if let Some(height) = height { assert_eq!(height % 8, 0, "height has to be divisible by 8"); height } else { 1024 }; let width = if let Some(width) = width { assert_eq!(width % 8, 0, "width has to be divisible by 8"); width } else { 1024 }; Self { width, height, clip: clip::Config::ssd1b(), clip2: Some(clip::Config::ssd1b2()), autoencoder, scheduler, unet, } } pub fn build_vae<P: AsRef<std::path::Path>>( &self, vae_weights: P, device: &Device, dtype: DType, ) -> Result<vae::AutoEncoderKL> { let vs_ae = unsafe { nn::VarBuilder::from_mmaped_safetensors(&[vae_weights], dtype, device)? }; // https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/vae/config.json let autoencoder = vae::AutoEncoderKL::new(vs_ae, 3, 3, self.autoencoder.clone())?; Ok(autoencoder) } pub fn build_unet<P: AsRef<std::path::Path>>( &self, unet_weights: P, device: &Device, in_channels: usize, use_flash_attn: bool, dtype: DType, ) -> Result<unet_2d::UNet2DConditionModel> { let vs_unet = unsafe { nn::VarBuilder::from_mmaped_safetensors(&[unet_weights], dtype, device)? }; let unet = unet_2d::UNet2DConditionModel::new( vs_unet, in_channels, 4, use_flash_attn, self.unet.clone(), )?; Ok(unet) } pub fn build_scheduler(&self, n_steps: usize) -> Result<Box<dyn Scheduler>> { self.scheduler.build(n_steps) } } pub fn build_clip_transformer<P: AsRef<std::path::Path>>( clip: &clip::Config, clip_weights: P, device: &Device, dtype: DType, ) -> Result<clip::ClipTextTransformer> { let vs = unsafe { nn::VarBuilder::from_mmaped_safetensors(&[clip_weights], dtype, device)? }; let text_model = clip::ClipTextTransformer::new(vs, clip)?; Ok(text_model) }
candle/candle-transformers/src/models/stable_diffusion/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/mod.rs", "repo_id": "candle", "token_count": 7668 }
33
use candle::{Module, Result, Tensor}; use candle_nn::VarBuilder; #[derive(Debug, Clone)] pub struct Embedding { inner: candle_nn::Embedding, span: tracing::Span, } impl Embedding { pub fn new(d1: usize, d2: usize, vb: VarBuilder) -> Result<Self> { let inner = candle_nn::embedding(d1, d2, vb)?; let span = tracing::span!(tracing::Level::TRACE, "embedding"); Ok(Self { inner, span }) } pub fn from_weights(weights: Tensor) -> Result<Self> { let (_in_size, out_size) = weights.dims2()?; let inner = candle_nn::Embedding::new(weights, out_size); let span = tracing::span!(tracing::Level::TRACE, "embedding"); Ok(Self { inner, span }) } pub fn embeddings(&self) -> &Tensor { self.inner.embeddings() } } impl Module for Embedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } #[derive(Debug, Clone)] pub struct Linear { inner: candle_nn::Linear, span: tracing::Span, } impl Linear { pub fn from_weights(weights: Tensor, bias: Option<Tensor>) -> Self { let inner = candle_nn::Linear::new(weights, bias); let span = tracing::span!(tracing::Level::TRACE, "linear"); Self { inner, span } } } pub fn linear(d1: usize, d2: usize, vb: VarBuilder) -> Result<Linear> { let inner = candle_nn::linear(d1, d2, vb)?; let span = tracing::span!(tracing::Level::TRACE, "linear"); Ok(Linear { inner, span }) } pub fn linear_no_bias(d1: usize, d2: usize, vb: VarBuilder) -> Result<Linear> { let inner = candle_nn::linear_no_bias(d1, d2, vb)?; let span = tracing::span!(tracing::Level::TRACE, "linear"); Ok(Linear { inner, span }) } impl Module for Linear { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } // Wrap the conv2d op to provide some tracing. #[derive(Debug, Clone)] pub struct Conv2d { inner: candle_nn::Conv2d, span: tracing::Span, } impl Module for Conv2d { fn forward(&self, x: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(x) } } pub fn conv2d( in_channels: usize, out_channels: usize, kernel_size: usize, cfg: candle_nn::Conv2dConfig, vs: candle_nn::VarBuilder, ) -> Result<Conv2d> { let span = tracing::span!(tracing::Level::TRACE, "conv2d"); let inner = candle_nn::conv2d(in_channels, out_channels, kernel_size, cfg, vs)?; Ok(Conv2d { inner, span }) } // QMatMul wrapper adding some tracing. #[derive(Clone)] pub struct QMatMul { inner: candle::quantized::QMatMul, span: tracing::Span, } impl QMatMul { pub fn new( out_dim: usize, in_dim: usize, vb: crate::quantized_var_builder::VarBuilder, ) -> Result<Self> { let ws = vb.get((in_dim, out_dim), "weight")?; let inner = candle::quantized::QMatMul::from_arc(ws)?; let span = tracing::span!(tracing::Level::TRACE, "qmatmul"); Ok(Self { inner, span }) } } impl Module for QMatMul { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } impl std::fmt::Debug for QMatMul { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "QMatMul") } } #[derive(Clone, Debug)] pub struct LayerNorm { inner: candle_nn::LayerNorm, span: tracing::Span, } impl LayerNorm { pub fn new(weight: Tensor, bias: Tensor, eps: f64) -> Self { let inner = candle_nn::LayerNorm::new(weight, bias, eps); let span = tracing::span!(tracing::Level::TRACE, "layer-norm"); Self { inner, span } } } impl Module for LayerNorm { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } pub fn layer_norm<C: Into<candle_nn::LayerNormConfig>>( size: usize, c: C, vb: VarBuilder, ) -> Result<LayerNorm> { let inner = candle_nn::layer_norm(size, c, vb)?; let span = tracing::span!(tracing::Level::TRACE, "layer-norm"); Ok(LayerNorm { inner, span }) }
candle/candle-transformers/src/models/with_tracing.rs/0
{ "file_path": "candle/candle-transformers/src/models/with_tracing.rs", "repo_id": "candle", "token_count": 1863 }
34
[package] name = "candle-wasm-example-llama2" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { workspace = true } candle-nn = { workspace = true } candle-transformers = { workspace = true } num-traits = { workspace = true } tokenizers = { workspace = true, features = ["unstable_wasm"] } # App crates. anyhow = { workspace = true } byteorder = { workspace = true } log = { workspace = true } rand = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } # Wasm specific crates. console_error_panic_hook = "0.1.7" getrandom = { version = "0.2", features = ["js"] } gloo = "0.11" js-sys = "0.3.64" wasm-bindgen = "0.2.87" wasm-bindgen-futures = "0.4.37" wasm-logger = "0.2" yew-agent = "0.2.0" yew = { version = "0.20.0", features = ["csr"] } [dependencies.web-sys] version = "0.3.64" features = [ 'Blob', 'Document', 'Element', 'HtmlElement', 'Node', 'Window', 'Request', 'RequestCache', 'RequestInit', 'RequestMode', 'Response', 'Performance', ]
candle/candle-wasm-examples/llama2-c/Cargo.toml/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/Cargo.toml", "repo_id": "candle", "token_count": 434 }
35
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle Phi 1.5 / Phi 2.0 Rust/WASM</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/highlightjs/[email protected]/build/styles/default.min.css" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } code, output, select, pre { font-family: "Source Code Pro", monospace; } </style> <style type="text/tailwindcss"> .link { @apply underline hover:text-blue-500 hover:no-underline; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> import snarkdown from "https://cdn.skypack.dev/snarkdown"; import hljs from "https://cdn.skypack.dev/highlight.js"; // models base url const MODELS = { phi_1_5_q4k: { base_url: "https://huggingface.co/lmz/candle-quantized-phi/resolve/main/", model: "model-q4k.gguf", tokenizer: "tokenizer.json", config: "phi-1_5.json", quantized: true, seq_len: 2048, size: "800 MB", }, phi_1_5_q80: { base_url: "https://huggingface.co/lmz/candle-quantized-phi/resolve/main/", model: "model-q80.gguf", tokenizer: "tokenizer.json", config: "phi-1_5.json", quantized: true, seq_len: 2048, size: "1.51 GB", }, phi_2_0_q4k: { base_url: "https://huggingface.co/radames/phi-2-quantized/resolve/main/", model: [ "model-v2-q4k.gguf_aa.part", "model-v2-q4k.gguf_ab.part", "model-v2-q4k.gguf_ac.part", ], tokenizer: "tokenizer.json", config: "config.json", quantized: true, seq_len: 2048, size: "1.57GB", }, puffin_phi_v2_q4k: { base_url: "https://huggingface.co/lmz/candle-quantized-phi/resolve/main/", model: "model-puffin-phi-v2-q4k.gguf", tokenizer: "tokenizer-puffin-phi-v2.json", config: "puffin-phi-v2.json", quantized: true, seq_len: 2048, size: "798 MB", }, puffin_phi_v2_q80: { base_url: "https://huggingface.co/lmz/candle-quantized-phi/resolve/main/", model: "model-puffin-phi-v2-q80.gguf", tokenizer: "tokenizer-puffin-phi-v2.json", config: "puffin-phi-v2.json", quantized: true, seq_len: 2048, size: "1.50 GB", }, }; const TEMPLATES = [ { title: "Simple prompt", prompt: `Sebastien is in London today, it’s the middle of July yet it’s raining, so Sebastien is feeling gloomy. He`, }, { title: "Think step by step", prompt: `Suppose Alice originally had 3 apples, then Bob gave Alice 7 apples, then Alice gave Cook 5 apples, and then Tim gave Alice 3x the amount of apples Alice had. How many apples does Alice have now? Let’s think step by step.`, }, { title: "Explaing a code snippet", prompt: `What does this script do? \`\`\`python s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 0)) s.listen(1) conn, addr = s.accept() print('Connected by', addr) return conn.getsockname()[1] \`\`\` Let’s think step by step.`, }, { title: "Question answering", prompt: `Instruct: What is the capital of France? Output:`, }, { title: "Chat mode", prompt: `Alice: Can you tell me how to create a python application to go through all the files in one directory where the file’s name DOES NOT end with '.json'? Bob:`, }, { title: "Python code completion", prompt: `"""write a python function called batch(function, list) which call function(x) for x in list in parallel""" Solution:`, }, { title: "Python Sample", prompt: `"""Can you make sure those histograms appear side by side on the same plot: \`\`\`python plt.hist(intreps_retrained[0][1].view(64,-1).norm(dim=1).detach().cpu().numpy(), bins = 20) plt.hist(intreps_pretrained[0][1].view(64,-1).norm(dim=1).detach().cpu().numpy(), bins = 20) \`\`\` """`, }, { title: "Write a Twitter post", prompt: `Write a twitter post for the discovery of gravitational wave. Twitter Post:`, }, { title: "Write a review", prompt: `Write a polite review complaining that the video game 'Random Game' was too badly optimized and it burned my laptop. Very polite review:`, }, ]; const phiWorker = new Worker("./phiWorker.js", { type: "module", }); async function generateSequence(controller) { const getValue = (id) => document.querySelector(`#${id}`).value; const modelID = getValue("model"); const model = MODELS[modelID]; const weightsURL = model.model instanceof Array ? model.model.map((m) => model.base_url + m) : model.base_url + model.model; const tokenizerURL = model.base_url + model.tokenizer; const configURL = model.base_url + model.config; const prompt = getValue("prompt").trim(); const temperature = getValue("temperature"); const topP = getValue("top-p"); const repeatPenalty = getValue("repeat_penalty"); const seed = getValue("seed"); const maxSeqLen = getValue("max-seq"); function updateStatus(data) { const outStatus = document.querySelector("#output-status"); const outGen = document.querySelector("#output-generation"); const outCounter = document.querySelector("#output-counter"); switch (data.status) { case "loading": outStatus.hidden = false; outStatus.textContent = data.message; outGen.hidden = true; outCounter.hidden = true; break; case "generating": const { message, prompt, sentence, tokensSec, totalTime } = data; outStatus.hidden = true; outCounter.hidden = false; outGen.hidden = false; outGen.innerHTML = snarkdown(prompt + sentence); outCounter.innerHTML = `${(totalTime / 1000).toFixed( 2 )}s (${tokensSec.toFixed(2)} tok/s)`; hljs.highlightAll(); break; case "complete": outStatus.hidden = true; outGen.hidden = false; break; } } return new Promise((resolve, reject) => { phiWorker.postMessage({ weightsURL, modelID, tokenizerURL, configURL, quantized: model.quantized, prompt, temp: temperature, top_p: topP, repeatPenalty, seed: seed, maxSeqLen, command: "start", }); const handleAbort = () => { phiWorker.postMessage({ command: "abort" }); }; const handleMessage = (event) => { const { status, error, message, prompt, sentence } = event.data; if (status) updateStatus(event.data); if (error) { phiWorker.removeEventListener("message", handleMessage); reject(new Error(error)); } if (status === "aborted") { phiWorker.removeEventListener("message", handleMessage); resolve(event.data); } if (status === "complete") { phiWorker.removeEventListener("message", handleMessage); resolve(event.data); } }; controller.signal.addEventListener("abort", handleAbort); phiWorker.addEventListener("message", handleMessage); }); } const form = document.querySelector("#form"); const prompt = document.querySelector("#prompt"); const clearBtn = document.querySelector("#clear-btn"); const runBtn = document.querySelector("#run"); const modelSelect = document.querySelector("#model"); const promptTemplates = document.querySelector("#prompt-templates"); let runController = new AbortController(); let isRunning = false; document.addEventListener("DOMContentLoaded", () => { for (const [id, model] of Object.entries(MODELS)) { const option = document.createElement("option"); option.value = id; option.innerText = `${id} (${model.size})`; modelSelect.appendChild(option); } const query = new URLSearchParams(window.location.search); const modelID = query.get("model"); if (modelID) { modelSelect.value = modelID; } else { modelSelect.value = "phi_1_5_q4k"; } for (const [i, { title, prompt }] of TEMPLATES.entries()) { const div = document.createElement("div"); const input = document.createElement("input"); input.type = "radio"; input.name = "task"; input.id = `templates-${i}`; input.classList.add("font-light", "cursor-pointer"); input.value = prompt; const label = document.createElement("label"); label.htmlFor = `templates-${i}`; label.classList.add("cursor-pointer"); label.innerText = title; div.appendChild(input); div.appendChild(label); promptTemplates.appendChild(div); } }); promptTemplates.addEventListener("change", (e) => { const template = e.target.value; prompt.value = template; prompt.style.height = "auto"; prompt.style.height = prompt.scrollHeight + "px"; runBtn.disabled = false; clearBtn.classList.remove("invisible"); }); modelSelect.addEventListener("change", (e) => { const query = new URLSearchParams(window.location.search); query.set("model", e.target.value); window.history.replaceState( {}, "", `${window.location.pathname}?${query}` ); window.parent.postMessage({ queryString: "?" + query }, "*"); const model = MODELS[e.target.value]; document.querySelector("#max-seq").max = model.seq_len; document.querySelector("#max-seq").nextElementSibling.value = 200; }); form.addEventListener("submit", async (e) => { e.preventDefault(); if (isRunning) { stopRunning(); } else { startRunning(); await generateSequence(runController); stopRunning(); } }); function startRunning() { isRunning = true; runBtn.textContent = "Stop"; } function stopRunning() { runController.abort(); runController = new AbortController(); runBtn.textContent = "Run"; isRunning = false; } clearBtn.addEventListener("click", (e) => { e.preventDefault(); prompt.value = ""; clearBtn.classList.add("invisible"); runBtn.disabled = true; stopRunning(); }); prompt.addEventListener("input", (e) => { runBtn.disabled = false; if (e.target.value.length > 0) { clearBtn.classList.remove("invisible"); } else { clearBtn.classList.add("invisible"); } }); </script> </head> <body class="container max-w-4xl mx-auto p-4 text-gray-800"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle Phi 1.5 / Phi 2.0</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> The <a href="https://huggingface.co/microsoft/phi-1_5" class="link" target="_blank" >Phi-1.5</a > and <a href="https://huggingface.co/microsoft/phi-2" class="link" target="_blank" >Phi-2</a > models achieve state-of-the-art performance with only 1.3 billion and 2.7 billion parameters, compared to larger models with up to 13 billion parameters. Here you can try the quantized versions. Additional prompt examples are available in the <a href="https://arxiv.org/pdf/2309.05463.pdf#page=8" class="link" target="_blank" > technical report </a >. </p> <p class="max-w-lg"> You can also try <a href="https://huggingface.co/teknium/Puffin-Phi-v2" class="link" target="_blank" >Puffin-Phi V2 </a> quantized version, a fine-tuned version of Phi-1.5 on the <a href="https://huggingface.co/datasets/LDJnr/Puffin" class="link" target="_blank" >Puffin dataset </a> </p> </div> <div> <p class="text-xs italic max-w-lg"> <b>Note:</b> When first run, the app will download and cache the model, which could take a few minutes. The models are <b>~800MB</b> or <b>~1.57GB</b> in size. </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light" ></select> </div> <div> <details> <summary class="font-medium cursor-pointer">Prompt Templates</summary> <form id="prompt-templates" class="grid grid-cols-1 sm:grid-cols-2 gap-1 my-2" ></form> </details> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center" > <input type="submit" hidden /> <textarea type="text" id="prompt" class="font-light text-lg w-full px-3 py-2 mx-1 resize-none outline-none" oninput="this.style.height = 0;this.style.height = this.scrollHeight + 'px'" placeholder="Add your prompt here..." > Instruct: Write a detailed analogy between mathematics and a lighthouse. Output:</textarea > <button id="clear-btn"> <svg fill="none" xmlns="http://www.w3.org/2000/svg" width="40" viewBox="0 0 70 40" > <path opacity=".5" d="M39 .2v40.2" stroke="#1F2937" /> <path d="M1.5 11.5 19 29.1m0-17.6L1.5 29.1" opacity=".5" stroke="#1F2937" stroke-width="2" /> </svg> </button> <button id="run" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed" > Run </button> </form> <details> <summary class="font-medium cursor-pointer">Advanced Options</summary> <div class="grid grid-cols-3 max-w-md items-center gap-3 py-3"> <label class="text-sm font-medium" for="max-seq" >Maximum length </label> <input type="range" id="max-seq" name="max-seq" min="1" max="2048" step="1" value="200" oninput="this.nextElementSibling.value = Number(this.value)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" > 200</output > <label class="text-sm font-medium" for="temperature" >Temperature</label > <input type="range" id="temperature" name="temperature" min="0" max="2" step="0.01" value="0.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" > 0.00</output > <label class="text-sm font-medium" for="top-p">Top-p</label> <input type="range" id="top-p" name="top-p" min="0" max="1" step="0.01" value="1.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" > 1.00</output > <label class="text-sm font-medium" for="repeat_penalty" >Repeat Penalty</label > <input type="range" id="repeat_penalty" name="repeat_penalty" min="1" max="2" step="0.01" value="1.10" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" >1.10</output > <label class="text-sm font-medium" for="seed">Seed</label> <input type="number" id="seed" name="seed" value="299792458" class="font-light border border-gray-700 text-right rounded-md p-2" /> <button id="run" onclick="document.querySelector('#seed').value = Math.floor(Math.random() * Number.MAX_SAFE_INTEGER)" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-1 w-[50px] rounded disabled:bg-gray-300 disabled:cursor-not-allowed text-sm" > Rand </button> </div> </details> <div> <h3 class="font-medium">Generation:</h3> <div class="min-h-[250px] bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2" > <div id="output-counter" hidden class="ml-auto font-semibold grid-rows-1" ></div> <p hidden id="output-generation" class="grid-rows-2 text-lg"></p> <span id="output-status" class="m-auto font-light" >No output yet</span > </div> </div> </main> </body> </html>
candle/candle-wasm-examples/phi/index.html/0
{ "file_path": "candle/candle-wasm-examples/phi/index.html", "repo_id": "candle", "token_count": 9818 }
36
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle T5</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <style type="text/tailwindcss"> .link { @apply underline hover:text-blue-500 hover:no-underline; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> import { getModelInfo, MODELS, extractEmbeddings, generateText, } from "./utils.js"; const t5ModelEncoderWorker = new Worker("./T5ModelEncoderWorker.js", { type: "module", }); const t5ModelConditionalGeneration = new Worker( "./T5ModelConditionalGeneration.js", { type: "module" } ); const formEl = document.querySelector("#form"); const modelEl = document.querySelector("#model"); const promptEl = document.querySelector("#prompt"); const temperatureEl = document.querySelector("#temperature"); const toppEL = document.querySelector("#top-p"); const repeatPenaltyEl = document.querySelector("#repeat_penalty"); const seedEl = document.querySelector("#seed"); const outputEl = document.querySelector("#output-generation"); const tasksEl = document.querySelector("#tasks"); let selectedTaskID = ""; document.addEventListener("DOMContentLoaded", () => { for (const [id, model] of Object.entries(MODELS)) { const option = document.createElement("option"); option.value = id; option.innerText = `${id} (${model.size})`; modelEl.appendChild(option); } populateTasks(modelEl.value); modelEl.addEventListener("change", (e) => { populateTasks(e.target.value); }); tasksEl.addEventListener("change", (e) => { const task = e.target.value; const modelID = modelEl.value; promptEl.value = MODELS[modelID].tasks[task].prefix; selectedTaskID = task; }); }); function populateTasks(modelID) { const tasks = MODELS[modelID].tasks; tasksEl.innerHTML = ""; for (const [task, params] of Object.entries(tasks)) { const div = document.createElement("div"); div.innerHTML = ` <input type="radio" name="task" id="${task}" class="font-light cursor-pointer" value="${task}" /> <label for="${task}" class="cursor-pointer"> ${params.prefix} </label> `; tasksEl.appendChild(div); } selectedTaskID = Object.keys(tasks)[0]; tasksEl.querySelector(`#${selectedTaskID}`).checked = true; } form.addEventListener("submit", (e) => { e.preventDefault(); const promptText = promptEl.value; const modelID = modelEl.value; const { modelURL, configURL, tokenizerURL, maxLength } = getModelInfo( modelID, selectedTaskID ); const params = { temperature: Number(temperatureEl.value), top_p: Number(toppEL.value), repetition_penalty: Number(repeatPenaltyEl.value), seed: BigInt(seedEl.value), max_length: maxLength, }; generateText( t5ModelConditionalGeneration, modelURL, tokenizerURL, configURL, modelID, promptText, params, (status) => { if (status.status === "loading") { outputEl.innerText = "Loading model..."; } if (status.status === "decoding") { outputEl.innerText = "Generating..."; } } ).then(({ output }) => { outputEl.innerText = output.generation; }); }); </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle T5 Transformer</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> This demo showcase Text-To-Text Transfer Transformer (<a href="https://blog.research.google/2020/02/exploring-transfer-learning-with-t5.html" target="_blank" class="link" >T5</a >) models right in your browser, thanks to <a href="https://github.com/huggingface/candle/" target="_blank" class="link"> Candle </a> ML framework and rust/wasm. You can choose from a range of available models, including <a href="https://huggingface.co/t5-small" target="_blank" class="link"> t5-small</a >, <a href="https://huggingface.co/t5-base" target="_blank" class="link" >t5-base</a >, <a href="https://huggingface.co/google/flan-t5-small" target="_blank" class="link" >flan-t5-small</a >, several <a href="https://huggingface.co/lmz/candle-quantized-t5/tree/main" target="_blank" class="link"> t5 quantized gguf models</a >, and also a quantized <a href="https://huggingface.co/jbochi/candle-coedit-quantized/tree/main" target="_blank" class="link"> CoEdIT model for text rewrite</a >. </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light"></select> </div> <div> <h3 class="font-medium">Task Prefix:</h3> <form id="tasks" class="flex flex-col gap-1 my-2"></form> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center"> <input type="submit" hidden /> <input type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 resize-none outline-none" placeholder="Add prompt here, e.g. 'translate English to German: Today I'm going to eat Ice Cream'" value="translate English to German: Today I'm going to eat Ice Cream" /> <button class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed"> Run </button> </form> <div class="grid grid-cols-3 max-w-md items-center gap-3"> <label class="text-sm font-medium" for="temperature">Temperature</label> <input type="range" id="temperature" name="temperature" min="0" max="2" step="0.01" value="0.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 0.00</output > <label class="text-sm font-medium" for="top-p">Top-p</label> <input type="range" id="top-p" name="top-p" min="0" max="1" step="0.01" value="1.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 1.00</output > <label class="text-sm font-medium" for="repeat_penalty" >Repeat Penalty</label > <input type="range" id="repeat_penalty" name="repeat_penalty" min="1" max="2" step="0.01" value="1.10" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" >1.10</output > <label class="text-sm font-medium" for="seed">Seed</label> <input type="number" id="seed" name="seed" value="299792458" class="font-light border border-gray-700 text-right rounded-md p-2" /> <button id="run" onclick="document.querySelector('#seed').value = BigInt(Math.floor(Math.random() * 2**64-1))" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-1 w-[50px] rounded disabled:bg-gray-300 disabled:cursor-not-allowed text-sm"> Rand </button> </div> <div> <h3 class="font-medium">Generation:</h3> <div class="min-h-[250px] bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2 text-lg"> <p id="output-generation" class="grid-rows-2">No output yet</p> </div> </div> </main> </body> </html>
candle/candle-wasm-examples/t5/index.html/0
{ "file_path": "candle/candle-wasm-examples/t5/index.html", "repo_id": "candle", "token_count": 4724 }
37
pub const LANGUAGES: [(&str, &str); 99] = [ ("en", "english"), ("zh", "chinese"), ("de", "german"), ("es", "spanish"), ("ru", "russian"), ("ko", "korean"), ("fr", "french"), ("ja", "japanese"), ("pt", "portuguese"), ("tr", "turkish"), ("pl", "polish"), ("ca", "catalan"), ("nl", "dutch"), ("ar", "arabic"), ("sv", "swedish"), ("it", "italian"), ("id", "indonesian"), ("hi", "hindi"), ("fi", "finnish"), ("vi", "vietnamese"), ("he", "hebrew"), ("uk", "ukrainian"), ("el", "greek"), ("ms", "malay"), ("cs", "czech"), ("ro", "romanian"), ("da", "danish"), ("hu", "hungarian"), ("ta", "tamil"), ("no", "norwegian"), ("th", "thai"), ("ur", "urdu"), ("hr", "croatian"), ("bg", "bulgarian"), ("lt", "lithuanian"), ("la", "latin"), ("mi", "maori"), ("ml", "malayalam"), ("cy", "welsh"), ("sk", "slovak"), ("te", "telugu"), ("fa", "persian"), ("lv", "latvian"), ("bn", "bengali"), ("sr", "serbian"), ("az", "azerbaijani"), ("sl", "slovenian"), ("kn", "kannada"), ("et", "estonian"), ("mk", "macedonian"), ("br", "breton"), ("eu", "basque"), ("is", "icelandic"), ("hy", "armenian"), ("ne", "nepali"), ("mn", "mongolian"), ("bs", "bosnian"), ("kk", "kazakh"), ("sq", "albanian"), ("sw", "swahili"), ("gl", "galician"), ("mr", "marathi"), ("pa", "punjabi"), ("si", "sinhala"), ("km", "khmer"), ("sn", "shona"), ("yo", "yoruba"), ("so", "somali"), ("af", "afrikaans"), ("oc", "occitan"), ("ka", "georgian"), ("be", "belarusian"), ("tg", "tajik"), ("sd", "sindhi"), ("gu", "gujarati"), ("am", "amharic"), ("yi", "yiddish"), ("lo", "lao"), ("uz", "uzbek"), ("fo", "faroese"), ("ht", "haitian creole"), ("ps", "pashto"), ("tk", "turkmen"), ("nn", "nynorsk"), ("mt", "maltese"), ("sa", "sanskrit"), ("lb", "luxembourgish"), ("my", "myanmar"), ("bo", "tibetan"), ("tl", "tagalog"), ("mg", "malagasy"), ("as", "assamese"), ("tt", "tatar"), ("haw", "hawaiian"), ("ln", "lingala"), ("ha", "hausa"), ("ba", "bashkir"), ("jw", "javanese"), ("su", "sundanese"), ];
candle/candle-wasm-examples/whisper/src/languages.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/languages.rs", "repo_id": "candle", "token_count": 1175 }
38
use crate::model::{report_detect, report_pose, Bbox, Multiples, YoloV8, YoloV8Pose}; use candle::{DType, Device, Result, Tensor}; use candle_nn::{Module, VarBuilder}; use serde::{Deserialize, Serialize}; use wasm_bindgen::prelude::*; use yew_agent::{HandlerId, Public, WorkerLink}; #[wasm_bindgen] extern "C" { // Use `js_namespace` here to bind `console.log(..)` instead of just // `log(..)` #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); } #[macro_export] macro_rules! console_log { // Note that this is using the `log` function imported above during // `bare_bones` ($($t:tt)*) => ($crate::worker::log(&format_args!($($t)*).to_string())) } // Communication to the worker happens through bincode, the model weights and configs are fetched // on the main thread and transferred via the following structure. #[derive(Serialize, Deserialize)] pub struct ModelData { pub weights: Vec<u8>, pub model_size: String, } #[derive(Serialize, Deserialize)] pub struct RunData { pub image_data: Vec<u8>, pub conf_threshold: f32, pub iou_threshold: f32, } pub struct Model { model: YoloV8, } impl Model { pub fn run( &self, image_data: Vec<u8>, conf_threshold: f32, iou_threshold: f32, ) -> Result<Vec<Vec<Bbox>>> { console_log!("image data: {}", image_data.len()); let image_data = std::io::Cursor::new(image_data); let original_image = image::io::Reader::new(image_data) .with_guessed_format()? .decode() .map_err(candle::Error::wrap)?; let (width, height) = { let w = original_image.width() as usize; let h = original_image.height() as usize; if w < h { let w = w * 640 / h; // Sizes have to be divisible by 32. (w / 32 * 32, 640) } else { let h = h * 640 / w; (640, h / 32 * 32) } }; let image_t = { let img = original_image.resize_exact( width as u32, height as u32, image::imageops::FilterType::CatmullRom, ); let data = img.to_rgb8().into_raw(); Tensor::from_vec( data, (img.height() as usize, img.width() as usize, 3), &Device::Cpu, )? .permute((2, 0, 1))? }; let image_t = (image_t.unsqueeze(0)?.to_dtype(DType::F32)? * (1. / 255.))?; let predictions = self.model.forward(&image_t)?.squeeze(0)?; console_log!("generated predictions {predictions:?}"); let bboxes = report_detect( &predictions, original_image, width, height, conf_threshold, iou_threshold, )?; Ok(bboxes) } pub fn load_(weights: Vec<u8>, model_size: &str) -> Result<Self> { let multiples = match model_size { "n" => Multiples::n(), "s" => Multiples::s(), "m" => Multiples::m(), "l" => Multiples::l(), "x" => Multiples::x(), _ => Err(candle::Error::Msg( "invalid model size: must be n, s, m, l or x".to_string(), ))?, }; let dev = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, dev)?; let model = YoloV8::load(vb, multiples, 80)?; Ok(Self { model }) } pub fn load(md: ModelData) -> Result<Self> { Self::load_(md.weights, &md.model_size.to_string()) } } pub struct ModelPose { model: YoloV8Pose, } impl ModelPose { pub fn run( &self, image_data: Vec<u8>, conf_threshold: f32, iou_threshold: f32, ) -> Result<Vec<Bbox>> { console_log!("image data: {}", image_data.len()); let image_data = std::io::Cursor::new(image_data); let original_image = image::io::Reader::new(image_data) .with_guessed_format()? .decode() .map_err(candle::Error::wrap)?; let (width, height) = { let w = original_image.width() as usize; let h = original_image.height() as usize; if w < h { let w = w * 640 / h; // Sizes have to be divisible by 32. (w / 32 * 32, 640) } else { let h = h * 640 / w; (640, h / 32 * 32) } }; let image_t = { let img = original_image.resize_exact( width as u32, height as u32, image::imageops::FilterType::CatmullRom, ); let data = img.to_rgb8().into_raw(); Tensor::from_vec( data, (img.height() as usize, img.width() as usize, 3), &Device::Cpu, )? .permute((2, 0, 1))? }; let image_t = (image_t.unsqueeze(0)?.to_dtype(DType::F32)? * (1. / 255.))?; let predictions = self.model.forward(&image_t)?.squeeze(0)?; console_log!("generated predictions {predictions:?}"); let bboxes = report_pose( &predictions, original_image, width, height, conf_threshold, iou_threshold, )?; Ok(bboxes) } pub fn load_(weights: Vec<u8>, model_size: &str) -> Result<Self> { let multiples = match model_size { "n" => Multiples::n(), "s" => Multiples::s(), "m" => Multiples::m(), "l" => Multiples::l(), "x" => Multiples::x(), _ => Err(candle::Error::Msg( "invalid model size: must be n, s, m, l or x".to_string(), ))?, }; let dev = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, dev)?; let model = YoloV8Pose::load(vb, multiples, 1, (17, 3))?; Ok(Self { model }) } pub fn load(md: ModelData) -> Result<Self> { Self::load_(md.weights, &md.model_size.to_string()) } } pub struct Worker { link: WorkerLink<Self>, model: Option<Model>, } #[derive(Serialize, Deserialize)] pub enum WorkerInput { ModelData(ModelData), RunData(RunData), } #[derive(Serialize, Deserialize)] pub enum WorkerOutput { ProcessingDone(std::result::Result<Vec<Vec<Bbox>>, String>), WeightsLoaded, } impl yew_agent::Worker for Worker { type Input = WorkerInput; type Message = (); type Output = std::result::Result<WorkerOutput, String>; type Reach = Public<Self>; fn create(link: WorkerLink<Self>) -> Self { Self { link, model: None } } fn update(&mut self, _msg: Self::Message) { // no messaging } fn handle_input(&mut self, msg: Self::Input, id: HandlerId) { let output = match msg { WorkerInput::ModelData(md) => match Model::load(md) { Ok(model) => { self.model = Some(model); Ok(WorkerOutput::WeightsLoaded) } Err(err) => Err(format!("model creation error {err:?}")), }, WorkerInput::RunData(rd) => match &mut self.model { None => Err("model has not been set yet".to_string()), Some(model) => { let result = model .run(rd.image_data, rd.conf_threshold, rd.iou_threshold) .map_err(|e| e.to_string()); Ok(WorkerOutput::ProcessingDone(result)) } }, }; self.link.respond(id, output); } fn name_of_resource() -> &'static str { "worker.js" } fn resource_path_is_relative() -> bool { true } }
candle/candle-wasm-examples/yolo/src/worker.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/worker.rs", "repo_id": "candle", "token_count": 4077 }
39
# syntax=docker/dockerfile:1 # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker # you will also find guides on how best to write your Dockerfile FROM node:20 as builder-production WORKDIR /app COPY --link --chown=1000 package-lock.json package.json ./ RUN --mount=type=cache,target=/app/.npm \ npm set cache /app/.npm && \ npm ci --omit=dev FROM builder-production as builder RUN --mount=type=cache,target=/app/.npm \ npm set cache /app/.npm && \ npm ci COPY --link --chown=1000 . . RUN --mount=type=secret,id=DOTENV_LOCAL,dst=.env.local \ npm run build FROM node:20-slim RUN npm install -g pm2 COPY --from=builder-production /app/node_modules /app/node_modules COPY --link --chown=1000 package.json /app/package.json COPY --from=builder /app/build /app/build CMD pm2 start /app/build/index.js -i $CPU_CORES --no-daemon
chat-ui/Dockerfile/0
{ "file_path": "chat-ui/Dockerfile", "repo_id": "chat-ui", "token_count": 351 }
40
export function clickOutside(element: HTMLDialogElement, callbackFunction: () => void) { function onClick(event: MouseEvent) { if (!element.contains(event.target as Node)) { callbackFunction(); } } document.body.addEventListener("click", onClick); return { update(newCallbackFunction: () => void) { callbackFunction = newCallbackFunction; }, destroy() { document.body.removeEventListener("click", onClick); }, }; }
chat-ui/src/lib/actions/clickOutside.ts/0
{ "file_path": "chat-ui/src/lib/actions/clickOutside.ts", "repo_id": "chat-ui", "token_count": 143 }
41
<script lang="ts"> import { onMount, onDestroy } from "svelte"; let el: HTMLElement; onMount(() => { el.ownerDocument.body.appendChild(el); }); onDestroy(() => { if (el?.parentNode) { el.parentNode.removeChild(el); } }); </script> <div bind:this={el} class="contents" hidden> <slot /> </div>
chat-ui/src/lib/components/Portal.svelte/0
{ "file_path": "chat-ui/src/lib/components/Portal.svelte", "repo_id": "chat-ui", "token_count": 130 }
42
<script lang="ts"> import { onDestroy } from "svelte"; import CarbonImage from "~icons/carbon/image"; // import EosIconsLoading from "~icons/eos-icons/loading"; export let files: File[]; let file_error_message = ""; let errorTimeout: ReturnType<typeof setTimeout>; export let onDrag = false; async function dropHandle(event: DragEvent) { event.preventDefault(); if (event.dataTransfer && event.dataTransfer.items) { // Use DataTransferItemList interface to access the file(s) if (files.length > 0) { files = []; } // get only the first file // optionally: we need to handle multiple files, if we want to support document upload for example // for multimodal we only support one image at a time but we could support multiple PDFs if (event.dataTransfer.items[0].kind === "file") { const file = event.dataTransfer.items[0].getAsFile(); if (file) { if (!event.dataTransfer.items[0].type.startsWith("image")) { setErrorMsg("Only images are supported"); files = []; return; } // if image is bigger than 2MB abort if (file.size > 2 * 1024 * 1024) { setErrorMsg("Image is too big. (2MB max)"); files = []; return; } files = [file]; onDrag = false; } } } } function setErrorMsg(errorMsg: string) { if (errorTimeout) { clearTimeout(errorTimeout); } file_error_message = errorMsg; errorTimeout = setTimeout(() => { file_error_message = ""; onDrag = false; }, 2000); } onDestroy(() => { if (errorTimeout) { clearTimeout(errorTimeout); } }); </script> <div id="dropzone" role="form" on:drop={dropHandle} class="relative flex w-full max-w-4xl flex-col items-center rounded-xl border border-dashed bg-gray-100 focus-within:border-gray-300 dark:border-gray-500 dark:bg-gray-700 dark:focus-within:border-gray-500" > <div class="object-center"> {#if file_error_message} <div class="absolute bottom-0 left-0 right-0 top-0 flex flex-col items-center justify-center gap-2 rounded-xl bg-gray-100 bg-opacity-50 dark:bg-gray-700 dark:bg-opacity-50" > <p class="text-red-500 dark:text-red-400">{file_error_message}</p> <div class="h-2.5 w-1/2 rounded-full bg-gray-200 dark:bg-gray-700"> <div class="animate-progress-bar h-2.5 rounded-full bg-red-500 dark:text-red-400 " /> </div> </div> {/if} <div class="mt-3 flex justify-center" class:opacity-0={file_error_message}> <CarbonImage class="text-xl text-gray-500 dark:text-gray-400" /> </div> <p class="mb-3 mt-1.5 text-sm text-gray-500 dark:text-gray-400" class:opacity-0={file_error_message} > Drag and drop <span class="font-semibold">one image</span> here </p> </div> </div> <style> @keyframes slideInFromLeft { 0% { width: 0; } 100% { width: 100%; } } .animate-progress-bar { /* This section calls the slideInFromLeft animation we defined above */ animation: 2s linear 0s 1 slideInFromLeft; } </style>
chat-ui/src/lib/components/chat/FileDropzone.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/FileDropzone.svelte", "repo_id": "chat-ui", "token_count": 1232 }
43
import { TEXT_EMBEDDING_MODELS } from "$env/static/private"; import { z } from "zod"; import { sum } from "$lib/utils/sum"; import { embeddingEndpoints, embeddingEndpointSchema, type EmbeddingEndpoint, } from "$lib/server/embeddingEndpoints/embeddingEndpoints"; import { embeddingEndpointTransformersJS } from "$lib/server/embeddingEndpoints/transformersjs/embeddingEndpoints"; import JSON5 from "json5"; const modelConfig = z.object({ /** Used as an identifier in DB */ id: z.string().optional(), /** Used to link to the model page, and for inference */ name: z.string().min(1), displayName: z.string().min(1).optional(), description: z.string().min(1).optional(), websiteUrl: z.string().url().optional(), modelUrl: z.string().url().optional(), endpoints: z.array(embeddingEndpointSchema).nonempty(), chunkCharLength: z.number().positive(), preQuery: z.string().default(""), prePassage: z.string().default(""), }); // Default embedding model for backward compatibility const rawEmbeddingModelJSON = TEXT_EMBEDDING_MODELS || `[ { "name": "Xenova/gte-small", "chunkCharLength": 512, "endpoints": [ { "type": "transformersjs" } ] } ]`; const embeddingModelsRaw = z.array(modelConfig).parse(JSON5.parse(rawEmbeddingModelJSON)); const processEmbeddingModel = async (m: z.infer<typeof modelConfig>) => ({ ...m, id: m.id || m.name, }); const addEndpoint = (m: Awaited<ReturnType<typeof processEmbeddingModel>>) => ({ ...m, getEndpoint: async (): Promise<EmbeddingEndpoint> => { if (!m.endpoints) { return embeddingEndpointTransformersJS({ type: "transformersjs", weight: 1, model: m, }); } const totalWeight = sum(m.endpoints.map((e) => e.weight)); let random = Math.random() * totalWeight; for (const endpoint of m.endpoints) { if (random < endpoint.weight) { const args = { ...endpoint, model: m }; switch (args.type) { case "tei": return embeddingEndpoints.tei(args); case "transformersjs": return embeddingEndpoints.transformersjs(args); } } random -= endpoint.weight; } throw new Error(`Failed to select embedding endpoint`); }, }); export const embeddingModels = await Promise.all( embeddingModelsRaw.map((e) => processEmbeddingModel(e).then(addEndpoint)) ); export const defaultEmbeddingModel = embeddingModels[0]; const validateEmbeddingModel = (_models: EmbeddingBackendModel[], key: "id" | "name") => { return z.enum([_models[0][key], ..._models.slice(1).map((m) => m[key])]); }; export const validateEmbeddingModelById = (_models: EmbeddingBackendModel[]) => { return validateEmbeddingModel(_models, "id"); }; export const validateEmbeddingModelByName = (_models: EmbeddingBackendModel[]) => { return validateEmbeddingModel(_models, "name"); }; export type EmbeddingBackendModel = typeof defaultEmbeddingModel;
chat-ui/src/lib/server/embeddingModels.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingModels.ts", "repo_id": "chat-ui", "token_count": 1017 }
44
import { JSDOM, VirtualConsole } from "jsdom"; export async function parseWeb(url: string) { const abortController = new AbortController(); setTimeout(() => abortController.abort(), 10000); const htmlString = await fetch(url, { signal: abortController.signal }) .then((response) => response.text()) .catch(); const virtualConsole = new VirtualConsole(); virtualConsole.on("error", () => { // No-op to skip console errors. }); // put the html string into a DOM const dom = new JSDOM(htmlString ?? "", { virtualConsole, }); const { document } = dom.window; const textElTags = "p"; const paragraphs = document.querySelectorAll(textElTags); if (!paragraphs.length) { throw new Error(`webpage doesn't have any "${textElTags}" element`); } const paragraphTexts = Array.from(paragraphs).map((p) => p.textContent); // combine text contents from paragraphs and then remove newlines and multiple spaces const text = paragraphTexts.join(" ").replace(/ {2}|\r\n|\n|\r/gm, ""); return text; }
chat-ui/src/lib/server/websearch/parseWeb.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/parseWeb.ts", "repo_id": "chat-ui", "token_count": 320 }
45
import type { MessageUpdate } from "./MessageUpdate"; import type { Timestamps } from "./Timestamps"; import type { WebSearch } from "./WebSearch"; export type Message = Partial<Timestamps> & { from: "user" | "assistant"; id: ReturnType<typeof crypto.randomUUID>; content: string; updates?: MessageUpdate[]; webSearchId?: WebSearch["_id"]; // legacy version webSearch?: WebSearch; score?: -1 | 0 | 1; files?: string[]; // can contain either the hash of the file or the b64 encoded image data on the client side when uploading interrupted?: boolean; };
chat-ui/src/lib/types/Message.ts/0
{ "file_path": "chat-ui/src/lib/types/Message.ts", "repo_id": "chat-ui", "token_count": 170 }
46
import { browser } from "$app/environment"; export function cookiesAreEnabled(): boolean { if (!browser) return false; if (navigator.cookieEnabled) return navigator.cookieEnabled; // Create cookie document.cookie = "cookietest=1"; const ret = document.cookie.indexOf("cookietest=") != -1; // Delete cookie document.cookie = "cookietest=1; expires=Thu, 01-Jan-1970 00:00:01 GMT"; return ret; }
chat-ui/src/lib/utils/cookiesAreEnabled.ts/0
{ "file_path": "chat-ui/src/lib/utils/cookiesAreEnabled.ts", "repo_id": "chat-ui", "token_count": 127 }
47
import type { LayoutServerLoad } from "./$types"; import { collections } from "$lib/server/database"; import type { Conversation } from "$lib/types/Conversation"; import { UrlDependency } from "$lib/types/UrlDependency"; import { defaultModel, models, oldModels, validateModel } from "$lib/server/models"; import { authCondition, requiresUser } from "$lib/server/auth"; import { DEFAULT_SETTINGS } from "$lib/types/Settings"; import { SERPAPI_KEY, SERPER_API_KEY, SERPSTACK_API_KEY, MESSAGES_BEFORE_LOGIN, YDC_API_KEY, USE_LOCAL_WEBSEARCH, ENABLE_ASSISTANTS, } from "$env/static/private"; import { ObjectId } from "mongodb"; import type { ConvSidebar } from "$lib/types/ConvSidebar"; export const load: LayoutServerLoad = async ({ locals, depends }) => { depends(UrlDependency.ConversationList); const settings = await collections.settings.findOne(authCondition(locals)); // If the active model in settings is not valid, set it to the default model. This can happen if model was disabled. if ( settings && !validateModel(models).safeParse(settings?.activeModel).success && !settings.assistants?.map((el) => el.toString())?.includes(settings?.activeModel) ) { settings.activeModel = defaultModel.id; await collections.settings.updateOne(authCondition(locals), { $set: { activeModel: defaultModel.id }, }); } // if the model is unlisted, set the active model to the default model if ( settings?.activeModel && models.find((m) => m.id === settings?.activeModel)?.unlisted === true ) { settings.activeModel = defaultModel.id; await collections.settings.updateOne(authCondition(locals), { $set: { activeModel: defaultModel.id }, }); } // get the number of messages where `from === "assistant"` across all conversations. const totalMessages = ( await collections.conversations .aggregate([ { $match: authCondition(locals) }, { $project: { messages: 1 } }, { $unwind: "$messages" }, { $match: { "messages.from": "assistant" } }, { $count: "messages" }, ]) .toArray() )[0]?.messages ?? 0; const messagesBeforeLogin = MESSAGES_BEFORE_LOGIN ? parseInt(MESSAGES_BEFORE_LOGIN) : 0; const userHasExceededMessages = messagesBeforeLogin > 0 && totalMessages > messagesBeforeLogin; const loginRequired = requiresUser && !locals.user && userHasExceededMessages; const enableAssistants = ENABLE_ASSISTANTS === "true"; const assistantActive = !models.map(({ id }) => id).includes(settings?.activeModel ?? ""); const assistant = assistantActive ? JSON.parse( JSON.stringify( await collections.assistants.findOne({ _id: new ObjectId(settings?.activeModel), }) ) ) : null; const conversations = await collections.conversations .find(authCondition(locals)) .sort({ updatedAt: -1 }) .project< Pick<Conversation, "title" | "model" | "_id" | "updatedAt" | "createdAt" | "assistantId"> >({ title: 1, model: 1, _id: 1, updatedAt: 1, createdAt: 1, assistantId: 1, }) .toArray(); const assistantIds = conversations .map((conv) => conv.assistantId) .filter((el) => !!el) as ObjectId[]; const assistants = await collections.assistants.find({ _id: { $in: assistantIds } }).toArray(); return { conversations: conversations.map((conv) => { if (settings?.hideEmojiOnSidebar) { conv.title = conv.title.replace(/\p{Emoji}/gu, ""); } // remove invalid unicode and trim whitespaces conv.title = conv.title.replace(/\uFFFD/gu, "").trimStart(); return { id: conv._id.toString(), title: conv.title, model: conv.model ?? defaultModel, updatedAt: conv.updatedAt, assistantId: conv.assistantId?.toString(), avatarHash: conv.assistantId && assistants.find((a) => a._id.toString() === conv.assistantId?.toString())?.avatar, }; }) satisfies ConvSidebar[], settings: { searchEnabled: !!( SERPAPI_KEY || SERPER_API_KEY || SERPSTACK_API_KEY || YDC_API_KEY || USE_LOCAL_WEBSEARCH ), ethicsModalAccepted: !!settings?.ethicsModalAcceptedAt, ethicsModalAcceptedAt: settings?.ethicsModalAcceptedAt ?? null, activeModel: settings?.activeModel ?? DEFAULT_SETTINGS.activeModel, hideEmojiOnSidebar: settings?.hideEmojiOnSidebar ?? false, shareConversationsWithModelAuthors: settings?.shareConversationsWithModelAuthors ?? DEFAULT_SETTINGS.shareConversationsWithModelAuthors, customPrompts: settings?.customPrompts ?? {}, assistants: settings?.assistants?.map((el) => el.toString()) ?? [], }, models: models.map((model) => ({ id: model.id, name: model.name, websiteUrl: model.websiteUrl, modelUrl: model.modelUrl, datasetName: model.datasetName, datasetUrl: model.datasetUrl, displayName: model.displayName, description: model.description, promptExamples: model.promptExamples, parameters: model.parameters, preprompt: model.preprompt, multimodal: model.multimodal, unlisted: model.unlisted, })), oldModels, user: locals.user && { id: locals.user._id.toString(), username: locals.user.username, avatarUrl: locals.user.avatarUrl, email: locals.user.email, }, assistant, enableAssistants, loginRequired, loginEnabled: requiresUser, guestMode: requiresUser && messagesBeforeLogin > 0, }; };
chat-ui/src/routes/+layout.server.ts/0
{ "file_path": "chat-ui/src/routes/+layout.server.ts", "repo_id": "chat-ui", "token_count": 2009 }
48
<script lang="ts"> import ChatWindow from "$lib/components/chat/ChatWindow.svelte"; import { pendingMessage } from "$lib/stores/pendingMessage"; import { isAborted } from "$lib/stores/isAborted"; import { onMount } from "svelte"; import { page } from "$app/stores"; import { goto, invalidate } from "$app/navigation"; import { base } from "$app/paths"; import { shareConversation } from "$lib/shareConversation"; import { UrlDependency } from "$lib/types/UrlDependency"; import { ERROR_MESSAGES, error } from "$lib/stores/errors"; import { randomUUID } from "$lib/utils/randomUuid"; import { findCurrentModel } from "$lib/utils/models"; import { webSearchParameters } from "$lib/stores/webSearchParameters"; import type { Message } from "$lib/types/Message"; import type { MessageUpdate, WebSearchUpdate } from "$lib/types/MessageUpdate"; import titleUpdate from "$lib/stores/titleUpdate"; import file2base64 from "$lib/utils/file2base64"; export let data; let messages = data.messages; let lastLoadedMessages = data.messages; let webSearchMessages: WebSearchUpdate[] = []; // Since we modify the messages array locally, we don't want to reset it if an old version is passed $: if (data.messages !== lastLoadedMessages) { messages = data.messages; lastLoadedMessages = data.messages; } let loading = false; let pending = false; let files: File[] = []; async function convFromShared() { try { loading = true; const res = await fetch(`${base}/conversation`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ fromShare: $page.params.id, model: data.model, }), }); if (!res.ok) { error.set("Error while creating conversation, try again."); console.error("Error while creating conversation: " + (await res.text())); return; } const { conversationId } = await res.json(); return conversationId; } catch (err) { error.set(ERROR_MESSAGES.default); console.error(String(err)); throw err; } } // this function is used to send new message to the backends async function writeMessage({ prompt, messageId = randomUUID(), isRetry = false, isContinue = false, }: { prompt?: string; messageId?: ReturnType<typeof randomUUID>; isRetry?: boolean; isContinue?: boolean; }): Promise<void> { try { $isAborted = false; loading = true; pending = true; // first we check if the messageId already exists, indicating a retry let msgIndex = messages.findIndex((msg) => msg.id === messageId); if (msgIndex === -1) { msgIndex = messages.length - 1; } if (isRetry && messages[msgIndex].from === "assistant") { throw new Error("Trying to retry a message that is not from user"); } if (isContinue && messages[msgIndex].from === "user") { throw new Error("Trying to continue a message that is not from assistant"); } // const isNewMessage = !isRetry && !isContinue; const module = await import("browser-image-resizer"); // currently, only IDEFICS is supported by TGI // the size of images is hardcoded to 224x224 in TGI // this will need to be configurable when support for more models is added const resizedImages = await Promise.all( files.map(async (file) => { return await module .readAndCompressImage(file, { maxHeight: 224, maxWidth: 224, quality: 1, }) .then(async (el) => await file2base64(el as File)); }) ); // slice up to the point of the retry if (isRetry) { messages = [ ...messages.slice(0, msgIndex), { from: "user", content: messages[msgIndex].content, id: messageId, files: messages[msgIndex].files, }, ]; } else if (!isContinue) { // or add a new message if its not a continue request if (!prompt) { throw new Error("Prompt is undefined"); } messages = [ ...messages, { from: "user", content: prompt ?? "", id: messageId, files: resizedImages, }, ]; } files = []; const response = await fetch(`${base}/conversation/${$page.params.id}`, { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ inputs: prompt, id: messageId, is_retry: isRetry, is_continue: isContinue, web_search: $webSearchParameters.useSearch, files: isRetry ? undefined : resizedImages, }), }); files = []; if (!response.body) { throw new Error("Body not defined"); } if (!response.ok) { error.set((await response.json())?.message); return; } // eslint-disable-next-line no-undef const encoder = new TextDecoderStream(); const reader = response?.body?.pipeThrough(encoder).getReader(); let finalAnswer = ""; const messageUpdates: MessageUpdate[] = []; // set str queue // ex) if the last response is => {"type": "stream", "token": // It should be => {"type": "stream", "token": "Hello"} = prev_input_chunk + "Hello"} let prev_input_chunk = [""]; // this is a bit ugly // we read the stream until we get the final answer while (finalAnswer === "") { // check for abort if ($isAborted) { reader?.cancel(); break; } // if there is something to read await reader?.read().then(async ({ done, value }) => { // we read, if it's done we cancel if (done) { reader.cancel(); return; } if (!value) { return; } value = prev_input_chunk.pop() + value; // if it's not done we parse the value, which contains all messages const inputs = value.split("\n"); inputs.forEach(async (el: string) => { try { const update = JSON.parse(el) as MessageUpdate; if (update.type !== "stream") { messageUpdates.push(update); } if (update.type === "finalAnswer") { finalAnswer = update.text; reader.cancel(); loading = false; pending = false; invalidate(UrlDependency.Conversation); } else if (update.type === "stream") { pending = false; let lastMessage = messages[messages.length - 1]; if (lastMessage.from !== "assistant") { messages = [ ...messages, { from: "assistant", id: randomUUID(), content: update.token }, ]; } else { lastMessage.content += update.token; messages = [...messages]; } } else if (update.type === "webSearch") { webSearchMessages = [...webSearchMessages, update]; } else if (update.type === "status") { if (update.status === "title" && update.message) { const conv = data.conversations.find(({ id }) => id === $page.params.id); if (conv) { conv.title = update.message; $titleUpdate = { title: update.message, convId: $page.params.id, }; } } else if (update.status === "error") { $error = update.message ?? "An error has occurred"; } } else if (update.type === "error") { error.set(update.message); reader.cancel(); } } catch (parseError) { // in case of parsing error we wait for the next message if (el === inputs[inputs.length - 1]) { prev_input_chunk.push(el); } return; } }); }); } webSearchMessages = []; const lastMessage = messages[messages.length - 1]; lastMessage.updates = messageUpdates; await invalidate(UrlDependency.ConversationList); } catch (err) { if (err instanceof Error && err.message.includes("overloaded")) { $error = "Too much traffic, please try again."; } else if (err instanceof Error && err.message.includes("429")) { $error = ERROR_MESSAGES.rateLimited; } else if (err instanceof Error) { $error = err.message; } else { $error = ERROR_MESSAGES.default; } console.error(err); } finally { loading = false; pending = false; } } async function voteMessage(score: Message["score"], messageId: string) { let conversationId = $page.params.id; let oldScore: Message["score"] | undefined; // optimistic update to avoid waiting for the server messages = messages.map((message) => { if (message.id === messageId) { oldScore = message.score; return { ...message, score }; } return message; }); try { await fetch(`${base}/conversation/${conversationId}/message/${messageId}/vote`, { method: "POST", body: JSON.stringify({ score }), }); } catch { // revert score on any error messages = messages.map((message) => { return message.id !== messageId ? message : { ...message, score: oldScore }; }); } } onMount(async () => { // only used in case of creating new conversations (from the parent POST endpoint) if ($pendingMessage) { files = $pendingMessage.files; await writeMessage({ prompt: $pendingMessage.content }); $pendingMessage = undefined; } }); async function onMessage(event: CustomEvent<string>) { if (!data.shared) { await writeMessage({ prompt: event.detail }); } else { await convFromShared() .then(async (convId) => { await goto(`${base}/conversation/${convId}`, { invalidateAll: true }); }) .then(async () => await writeMessage({ prompt: event.detail })) .finally(() => (loading = false)); } } async function onRetry(event: CustomEvent<{ id: Message["id"]; content: string }>) { if (!data.shared) { await writeMessage({ prompt: event.detail.content, messageId: event.detail.id, isRetry: true, }); } else { await convFromShared() .then(async (convId) => { await goto(`${base}/conversation/${convId}`, { invalidateAll: true }); }) .then( async () => await writeMessage({ prompt: event.detail.content, messageId: event.detail.id, isRetry: true, }) ) .finally(() => (loading = false)); } } async function onContinue(event: CustomEvent<{ id: Message["id"] }>) { if (!data.shared) { writeMessage({ messageId: event.detail.id, isContinue: true }); } } $: $page.params.id, (($isAborted = true), (loading = false)); $: title = data.conversations.find((conv) => conv.id === $page.params.id)?.title ?? data.title; </script> <svelte:head> <title>{title}</title> <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css" integrity="sha384-GvrOXuhMATgEsSwCs4smul74iXGOixntILdUW9XmUC6+HX0sLNAK3q71HotJqlAn" crossorigin="anonymous" /> </svelte:head> <ChatWindow {loading} {pending} {messages} shared={data.shared} preprompt={data.preprompt} bind:webSearchMessages bind:files on:message={onMessage} on:retry={onRetry} on:continue={onContinue} on:vote={(event) => voteMessage(event.detail.score, event.detail.id)} on:share={() => shareConversation($page.params.id, data.title)} on:stop={() => (($isAborted = true), (loading = false))} models={data.models} currentModel={findCurrentModel([...data.models, ...data.oldModels], data.model)} assistant={data.assistant} />
chat-ui/src/routes/conversation/[id]/+page.svelte/0
{ "file_path": "chat-ui/src/routes/conversation/[id]/+page.svelte", "repo_id": "chat-ui", "token_count": 4566 }
49
<script lang="ts"> import { onMount } from "svelte"; import { base } from "$app/paths"; import { clickOutside } from "$lib/actions/clickOutside"; import { afterNavigate, goto } from "$app/navigation"; import { page } from "$app/stores"; import { useSettingsStore } from "$lib/stores/settings"; import CarbonClose from "~icons/carbon/close"; import CarbonArrowUpRight from "~icons/carbon/ArrowUpRight"; import CarbonCheckmark from "~icons/carbon/checkmark"; import CarbonAdd from "~icons/carbon/add"; import UserIcon from "~icons/carbon/user"; import { fade, fly } from "svelte/transition"; export let data; let previousPage: string = base; let assistantsSection: HTMLHeadingElement; onMount(() => { if ($page.params?.assistantId) { assistantsSection.scrollIntoView(); } }); afterNavigate(({ from }) => { if (!from?.url.pathname.includes("settings")) { previousPage = from?.url.pathname || previousPage; } }); const settings = useSettingsStore(); </script> <div class="fixed inset-0 flex items-center justify-center bg-black/80 backdrop-blur-sm dark:bg-black/50" in:fade > <dialog in:fly={{ y: 100 }} open use:clickOutside={() => { goto(previousPage); }} class="xl: z-10 grid h-[95dvh] w-[90dvw] grid-cols-1 content-start gap-x-8 overflow-hidden rounded-2xl bg-white p-4 shadow-2xl outline-none sm:h-[80dvh] md:grid-cols-3 md:grid-rows-[auto,1fr] md:p-8 xl:w-[1200px] 2xl:h-[70dvh]" > <div class="col-span-1 mb-4 flex items-center justify-between md:col-span-3"> <h2 class="text-xl font-bold">Settings</h2> <button class="btn rounded-lg" on:click={() => { goto(previousPage); }} > <CarbonClose class="text-xl text-gray-900 hover:text-black" /> </button> </div> <div class="col-span-1 flex flex-col overflow-y-auto whitespace-nowrap max-md:-mx-4 max-md:h-[245px] max-md:border max-md:border-b-2 md:pr-6" > <h3 class="pb-3 pl-3 pt-2 text-[.8rem] text-gray-800 sm:pl-1">Models</h3> {#each data.models.filter((el) => !el.unlisted) as model} <a href="{base}/settings/{model.id}" class="group flex h-10 flex-none items-center gap-2 pl-3 pr-2 text-sm text-gray-500 hover:bg-gray-100 md:rounded-xl {model.id === $page.params.model ? '!bg-gray-100 !text-gray-800' : ''}" > <div class="truncate">{model.displayName}</div> {#if model.id === $settings.activeModel} <div class="ml-auto rounded-lg bg-black px-2 py-1.5 text-xs font-semibold leading-none text-white" > Active </div> {/if} </a> {/each} <!-- if its huggingchat, the number of assistants owned by the user must be non-zero to show the UI --> {#if data.enableAssistants} <h3 bind:this={assistantsSection} class="pb-3 pl-3 pt-5 text-[.8rem] text-gray-800 sm:pl-1"> Assistants </h3> {#if !data.loginEnabled || (data.loginEnabled && !!data.user)} <a href="{base}/settings/assistants/new" class="group flex h-10 flex-none items-center gap-2 pl-3 pr-2 text-sm text-gray-500 hover:bg-gray-100 md:rounded-xl {$page.url.pathname === `${base}/settings/assistants/new` ? '!bg-gray-100 !text-gray-800' : ''}" > <CarbonAdd /> <div class="truncate">Create new assistant</div> </a> {/if} {#each data.assistants as assistant} <a href="{base}/settings/assistants/{assistant._id.toString()}" class="group flex h-10 flex-none items-center gap-2 pl-2 pr-2 text-sm text-gray-500 hover:bg-gray-100 md:rounded-xl {assistant._id.toString() === $page.params.assistantId ? '!bg-gray-100 !text-gray-800' : ''}" > {#if assistant.avatar} <img src="{base}/settings/assistants/{assistant._id.toString()}/avatar.jpg?hash={assistant.avatar}" alt="Avatar" class="h-6 w-6 rounded-full object-cover" /> {:else} <div class="flex size-6 items-center justify-center rounded-full bg-gray-300 font-bold uppercase text-gray-500" > {assistant.name[0]} </div> {/if} <div class="truncate">{assistant.name}</div> {#if assistant._id.toString() === $settings.activeModel} <div class="ml-auto rounded-lg bg-black px-2 py-1.5 text-xs font-semibold leading-none text-white" > Active </div> {/if} </a> {/each} <a href="{base}/assistants" class="group flex h-10 flex-none items-center gap-2 pl-3 pr-2 text-sm text-gray-500 hover:bg-gray-100 md:rounded-xl" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs " /> <div class="truncate">Browse Assistants</div> </a> {/if} <a href="{base}/settings" class="group mt-auto flex h-10 flex-none items-center gap-2 pl-3 pr-2 text-sm text-gray-500 hover:bg-gray-100 max-md:order-first md:rounded-xl {$page.url.pathname === `${base}/settings` ? '!bg-gray-100 !text-gray-800' : ''}" > <UserIcon class="text-sm" /> Application Settings </a> </div> <div class="col-span-1 overflow-y-auto px-4 max-md:-mx-4 max-md:pt-6 md:col-span-2"> <slot /> </div> {#if $settings.recentlySaved} <div class="absolute bottom-4 right-4 m-2 flex items-center gap-1.5 rounded-full border border-gray-300 bg-gray-200 px-3 py-1 text-black" > <CarbonCheckmark class="text-green-500" /> Saved </div> {/if} </dialog> </div>
chat-ui/src/routes/settings/+layout.svelte/0
{ "file_path": "chat-ui/src/routes/settings/+layout.svelte", "repo_id": "chat-ui", "token_count": 2424 }
50
# This is the list of HuggingFace Datasets authors for copyright purposes. # # This does not necessarily list everyone who has contributed code, since in # some cases, their employer may be the copyright holder. To see the full list # of contributors, see the revision history in source control. Google Inc. HuggingFace Inc.
datasets/AUTHORS/0
{ "file_path": "datasets/AUTHORS", "repo_id": "datasets", "token_count": 78 }
51
# Create an image dataset There are two methods for creating and sharing an image dataset. This guide will show you how to: * Create an image dataset with `ImageFolder` and some metadata. This is a no-code solution for quickly creating an image dataset with several thousand images. * Create an image dataset by writing a loading script. This method is a bit more involved, but you have greater flexibility over how a dataset is defined, downloaded, and generated which can be useful for more complex or large scale image datasets. <Tip> You can control access to your dataset by requiring users to share their contact information first. Check out the [Gated datasets](https://huggingface.co/docs/hub/datasets-gated) guide for more information about how to enable this feature on the Hub. </Tip> ## ImageFolder The `ImageFolder` is a dataset builder designed to quickly load an image dataset with several thousand images without requiring you to write any code. <Tip> 💡 Take a look at the [Split pattern hierarchy](repository_structure#split-pattern-hierarchy) to learn more about how `ImageFolder` creates dataset splits based on your dataset repository structure. </Tip> `ImageFolder` automatically infers the class labels of your dataset based on the directory name. Store your dataset in a directory structure like: ``` folder/train/dog/golden_retriever.png folder/train/dog/german_shepherd.png folder/train/dog/chihuahua.png folder/train/cat/maine_coon.png folder/train/cat/bengal.png folder/train/cat/birman.png ``` Then users can load your dataset by specifying `imagefolder` in [`load_dataset`] and the directory in `data_dir`: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder") ``` You can also use `imagefolder` to load datasets involving multiple splits. To do so, your dataset directory should have the following structure: ``` folder/train/dog/golden_retriever.png folder/train/cat/maine_coon.png folder/test/dog/german_shepherd.png folder/test/cat/bengal.png ``` <Tip warning={true}> If all image files are contained in a single directory or if they are not on the same level of directory structure, `label` column won't be added automatically. If you need it, set `drop_labels=False` explicitly. </Tip> If there is additional information you'd like to include about your dataset, like text captions or bounding boxes, add it as a `metadata.csv` file in your folder. This lets you quickly create datasets for different computer vision tasks like text captioning or object detection. You can also use a JSONL file `metadata.jsonl`. ``` folder/train/metadata.csv folder/train/0001.png folder/train/0002.png folder/train/0003.png ``` You can also zip your images: ``` folder/metadata.csv folder/train.zip folder/test.zip folder/valid.zip ``` Your `metadata.csv` file must have a `file_name` column which links image files with their metadata: ```csv file_name,additional_feature 0001.png,This is a first value of a text feature you added to your images 0002.png,This is a second value of a text feature you added to your images 0003.png,This is a third value of a text feature you added to your images ``` or using `metadata.jsonl`: ```jsonl {"file_name": "0001.png", "additional_feature": "This is a first value of a text feature you added to your images"} {"file_name": "0002.png", "additional_feature": "This is a second value of a text feature you added to your images"} {"file_name": "0003.png", "additional_feature": "This is a third value of a text feature you added to your images"} ``` <Tip> If metadata files are present, the inferred labels based on the directory name are dropped by default. To include those labels, set `drop_labels=False` in `load_dataset`. </Tip> ### Image captioning Image captioning datasets have text describing an image. An example `metadata.csv` may look like: ```csv file_name,text 0001.png,This is a golden retriever playing with a ball 0002.png,A german shepherd 0003.png,One chihuahua ``` Load the dataset with `ImageFolder`, and it will create a `text` column for the image captions: ```py >>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train") >>> dataset[0]["text"] "This is a golden retriever playing with a ball" ``` ### Object detection Object detection datasets have bounding boxes and categories identifying objects in an image. An example `metadata.jsonl` may look like: ```jsonl {"file_name": "0001.png", "objects": {"bbox": [[302.0, 109.0, 73.0, 52.0]], "categories": [0]}} {"file_name": "0002.png", "objects": {"bbox": [[810.0, 100.0, 57.0, 28.0]], "categories": [1]}} {"file_name": "0003.png", "objects": {"bbox": [[160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0]], "categories": [2, 2]}} ``` Load the dataset with `ImageFolder`, and it will create a `objects` column with the bounding boxes and the categories: ```py >>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train") >>> dataset[0]["objects"] {"bbox": [[302.0, 109.0, 73.0, 52.0]], "categories": [0]} ``` ### Upload dataset to the Hub Once you've created a dataset, you can share it to the Hub with the [`~datasets.DatasetDict.push_to_hub`] method. Make sure you have the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) library installed and you're logged in to your Hugging Face account (see the [Upload with Python tutorial](upload_dataset#upload-with-python) for more details). Upload your dataset with [`~datasets.DatasetDict.push_to_hub`]: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", split="train") >>> dataset.push_to_hub("stevhliu/my-image-captioning-dataset") ``` ## WebDataset The [WebDataset](https://github.com/webdataset/webdataset) format is based on TAR archives and is suitable for big image datasets. Indeed you can group your images in TAR archives (e.g. 1GB of images per TAR archive) and have thousands of TAR archives: ``` folder/train/00000.tar folder/train/00001.tar folder/train/00002.tar ... ``` In the archives, each example is made of files sharing the same prefix: ``` e39871fd9fd74f55.jpg e39871fd9fd74f55.json f18b91585c4d3f3e.jpg f18b91585c4d3f3e.json ede6e66b2fb59aab.jpg ede6e66b2fb59aab.json ed600d57fcee4f94.jpg ed600d57fcee4f94.json ... ``` You can put your images labels/captions/bounding boxes using JSON or text files for example. For more details on the WebDataset format and the python library, please check the [WebDataset documentation](https://webdataset.github.io/webdataset). Load your WebDataset and it will create on column per file suffix (here "jpg" and "json"): ```python >>> from datasets import load_dataset >>> dataset = load_dataset("webdataset", data_dir="/path/to/folder", split="train") >>> dataset[0]["json"] {"bbox": [[302.0, 109.0, 73.0, 52.0]], "categories": [0]} ``` ## Loading script Write a dataset loading script to share a dataset. It defines a dataset's splits and configurations, and handles downloading and generating a dataset. The script is located in the same folder or repository as the dataset and should have the same name. ``` my_dataset/ ├── README.md ├── my_dataset.py └── data/ # optional, may contain your images or TAR archives ``` This structure allows your dataset to be loaded in one line: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("path/to/my_dataset") ``` This guide will show you how to create a dataset loading script for image datasets, which is a bit different from <a class="underline decoration-green-400 decoration-2 font-semibold" href="./dataset_script">creating a loading script for text datasets</a>. You'll learn how to: * Create a dataset builder class. * Create dataset configurations. * Add dataset metadata. * Download and define the dataset splits. * Generate the dataset. * Generate the dataset metadata (optional). * Upload the dataset to the Hub. The best way to learn is to open up an existing image dataset loading script, like [Food-101](https://huggingface.co/datasets/food101/blob/main/food101.py), and follow along! <Tip> To help you get started, we created a loading script [template](https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py) you can copy and use as a starting point! </Tip> ### Create a dataset builder class [`GeneratorBasedBuilder`] is the base class for datasets generated from a dictionary generator. Within this class, there are three methods to help create your dataset: * `info` stores information about your dataset like its description, license, and features. * `split_generators` downloads the dataset and defines its splits. * `generate_examples` generates the images and labels for each split. Start by creating your dataset class as a subclass of [`GeneratorBasedBuilder`] and add the three methods. Don't worry about filling in each of these methods yet, you'll develop those over the next few sections: ```py class Food101(datasets.GeneratorBasedBuilder): """Food-101 Images dataset""" def _info(self): def _split_generators(self, dl_manager): def _generate_examples(self, images, metadata_path): ``` #### Multiple configurations In some cases, a dataset may have more than one configuration. For example, if you check out the [Imagenette dataset](https://huggingface.co/datasets/frgfm/imagenette), you'll notice there are three subsets. To create different configurations, use the [`BuilderConfig`] class to create a subclass for your dataset. Provide the links to download the images and labels in `data_url` and `metadata_urls`: ```py class Food101Config(datasets.BuilderConfig): """Builder Config for Food-101""" def __init__(self, data_url, metadata_urls, **kwargs): """BuilderConfig for Food-101. Args: data_url: `string`, url to download the zip file from. metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs **kwargs: keyword arguments forwarded to super. """ super(Food101Config, self).__init__(version=datasets.Version("1.0.0"), **kwargs) self.data_url = data_url self.metadata_urls = metadata_urls ``` Now you can define your subsets at the top of [`GeneratorBasedBuilder`]. Imagine you want to create two subsets in the Food-101 dataset based on whether it is a breakfast or dinner food. 1. Define your subsets with `Food101Config` in a list in `BUILDER_CONFIGS`. 2. For each configuration, provide a name, description, and where to download the images and labels from. ```py class Food101(datasets.GeneratorBasedBuilder): """Food-101 Images dataset""" BUILDER_CONFIGS = [ Food101Config( name="breakfast", description="Food types commonly eaten during breakfast.", data_url="https://link-to-breakfast-foods.zip", metadata_urls={ "train": "https://link-to-breakfast-foods-train.txt", "validation": "https://link-to-breakfast-foods-validation.txt" }, , Food101Config( name="dinner", description="Food types commonly eaten during dinner.", data_url="https://link-to-dinner-foods.zip", metadata_urls={ "train": "https://link-to-dinner-foods-train.txt", "validation": "https://link-to-dinner-foods-validation.txt" }, )... ] ``` Now if users want to load the `breakfast` configuration, they can use the configuration name: ```py >>> from datasets import load_dataset >>> ds = load_dataset("food101", "breakfast", split="train") ``` ### Add dataset metadata Adding information about your dataset is useful for users to learn more about it. This information is stored in the [`DatasetInfo`] class which is returned by the `info` method. Users can access this information by: ```py >>> from datasets import load_dataset_builder >>> ds_builder = load_dataset_builder("food101") >>> ds_builder.info ``` There is a lot of information you can specify about your dataset, but some important ones to include are: 1. `description` provides a concise description of the dataset. 2. `features` specify the dataset column types. Since you're creating an image loading script, you'll need to include the [`Image`] feature. 3. `supervised_keys` specify the input feature and label. 4. `homepage` provides a link to the dataset homepage. 5. `citation` is a BibTeX citation of the dataset. 6. `license` states the dataset's license. <Tip> You'll notice a lot of the dataset information is defined earlier in the loading script which makes it easier to read. There are also other [`~Datasets.Features`] you can input, so be sure to check out the full list for more details. </Tip> ```py def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "image": datasets.Image(), "label": datasets.ClassLabel(names=_NAMES), } ), supervised_keys=("image", "label"), homepage=_HOMEPAGE, citation=_CITATION, license=_LICENSE, task_templates=[ImageClassification(image_column="image", label_column="label")], ) ``` ### Download and define the dataset splits Now that you've added some information about your dataset, the next step is to download the dataset and generate the splits. 1. Use the [`DownloadManager.download`] method to download the dataset and any other metadata you'd like to associate with it. This method accepts: * a name to a file inside a Hub dataset repository (in other words, the `data/` folder) * a URL to a file hosted somewhere else * a list or dictionary of file names or URLs In the Food-101 loading script, you'll notice again the URLs are defined earlier in the script. 2. After you've downloaded the dataset, use the [`SplitGenerator`] to organize the images and labels in each split. Name each split with a standard name like: `Split.TRAIN`, `Split.TEST`, and `SPLIT.Validation`. In the `gen_kwargs` parameter, specify the file paths to the `images` to iterate over and load. If necessary, you can use [`DownloadManager.iter_archive`] to iterate over images in TAR archives. You can also specify the associated labels in the `metadata_path`. The `images` and `metadata_path` are actually passed onto the next step where you'll actually generate the dataset. <Tip warning={true}> To stream a TAR archive file, you need to use [`DownloadManager.iter_archive`]! The [`DownloadManager.download_and_extract`] function does not support TAR archives in streaming mode. </Tip> ```py def _split_generators(self, dl_manager): archive_path = dl_manager.download(_BASE_URL) split_metadata_paths = dl_manager.download(_METADATA_URLS) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "images": dl_manager.iter_archive(archive_path), "metadata_path": split_metadata_paths["train"], }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "images": dl_manager.iter_archive(archive_path), "metadata_path": split_metadata_paths["test"], }, ), ] ``` ### Generate the dataset The last method in the [`GeneratorBasedBuilder`] class actually generates the images and labels in the dataset. It yields a dataset according to the stucture specified in `features` from the `info` method. As you can see, `generate_examples` accepts the `images` and `metadata_path` from the previous method as arguments. <Tip warning={true}> To stream a TAR archive file, the `metadata_path` needs to be opened and read first. TAR files are accessed and yielded sequentially. This means you need to have the metadata information in hand first so you can yield it with its corresponding image. </Tip> Now you can write a function for opening and loading examples from the dataset: ```py def _generate_examples(self, images, metadata_path): """Generate images and labels for splits.""" with open(metadata_path, encoding="utf-8") as f: files_to_keep = set(f.read().split("\n")) for file_path, file_obj in images: if file_path.startswith(_IMAGES_DIR): if file_path[len(_IMAGES_DIR) : -len(".jpg")] in files_to_keep: label = file_path.split("/")[2] yield file_path, { "image": {"path": file_path, "bytes": file_obj.read()}, "label": label, } ``` ### Generate the dataset metadata (optional) The dataset metadata can be generated and stored in the dataset card (`README.md` file). Run the following command to generate your dataset metadata in `README.md` and make sure your new loading script works correctly: ```bash datasets-cli test path/to/<your-dataset-loading-script> --save_info --all_configs ``` If your loading script passed the test, you should now have the `dataset_info` YAML fields in the header of the `README.md` file in your dataset folder. ### Upload the dataset to the Hub Once your script is ready, [create a dataset card](./dataset_card) and [upload it to the Hub](./share). Congratulations, you can now load your dataset from the Hub! 🥳 ```py >>> from datasets import load_dataset >>> load_dataset("<username>/my_dataset") ```
datasets/docs/source/image_dataset.mdx/0
{ "file_path": "datasets/docs/source/image_dataset.mdx", "repo_id": "datasets", "token_count": 5724 }
52
# Table Classes Each `Dataset` object is backed by a PyArrow Table. A Table can be loaded from either the disk (memory mapped) or in memory. Several Table types are available, and they all inherit from [`table.Table`]. ## Table [[autodoc]] datasets.table.Table - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes ## InMemoryTable [[autodoc]] datasets.table.InMemoryTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_file - from_buffer - from_pandas - from_arrays - from_pydict - from_batches ## MemoryMappedTable [[autodoc]] datasets.table.MemoryMappedTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_file ## ConcatenationTable [[autodoc]] datasets.table.ConcatenationTable - validate - equals - to_batches - to_pydict - to_pandas - to_string - field - column - itercolumns - schema - columns - num_columns - num_rows - shape - nbytes - column_names - slice - filter - flatten - combine_chunks - cast - replace_schema_metadata - add_column - append_column - remove_column - set_column - rename_columns - select - drop - from_blocks - from_tables ## Utils [[autodoc]] datasets.table.concat_tables [[autodoc]] datasets.table.list_table_cache_files
datasets/docs/source/package_reference/table_classes.mdx/0
{ "file_path": "datasets/docs/source/package_reference/table_classes.mdx", "repo_id": "datasets", "token_count": 1029 }
53
# Use with Spark This document is a quick introduction to using 🤗 Datasets with Spark, with a particular focus on how to load a Spark DataFrame into a [`Dataset`] object. From there, you have fast access to any element and you can use it as a data loader to train models. ## Load from Spark A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to PyTorch, TensorFlow and JAX tensors. The Arrow table is memory mapped from disk, which can load datasets bigger than your available RAM. You can get a [`Dataset`] from a Spark DataFrame using [`Dataset.from_spark`]: ```py >>> from datasets import Dataset >>> df = spark.createDataFrame( ... data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], ... columns=["id", "name"], ... ) >>> ds = Dataset.from_spark(df) ``` The Spark workers write the dataset on disk in a cache directory as Arrow files, and the [`Dataset`] is loaded from there. Alternatively, you can skip materialization by using [`IterableDataset.from_spark`], which returns an [`IterableDataset`]: ```py >>> from datasets import IterableDataset >>> df = spark.createDataFrame( ... data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]], ... columns=["id", "name"], ... ) >>> ds = IterableDataset.from_spark(df) >>> print(next(iter(ds))) {"id": 1, "name": "Elia"} ``` ### Caching When using [`Dataset.from_spark`], the resulting [`Dataset`] is cached; if you call [`Dataset.from_spark`] multiple times on the same DataFrame it won't re-run the Spark job that writes the dataset as Arrow files on disk. You can set the cache location by passing `cache_dir=` to [`Dataset.from_spark`]. Make sure to use a disk that is available to both your workers and your current machine (the driver). <Tip warning={true}> In a different session, a Spark DataFrame doesn't have the same [semantic hash](https://spark.apache.org/docs/3.2.0/api/python/reference/api/pyspark.sql.DataFrame.semanticHash.html), and it will rerun a Spark job and store it in a new cache. </Tip> ### Feature types If your dataset is made of images, audio data or N-dimensional arrays, you can specify the `features=` argument in [`Dataset.from_spark`] (or [`IterableDataset.from_spark`]): ```py >>> from datasets import Dataset, Features, Image, Value >>> data = [(0, open("image.png", "rb").read())] >>> df = spark.createDataFrame(data, "idx: int, image: binary") >>> # Also works if you have arrays >>> # data = [(0, np.zeros(shape=(32, 32, 3), dtype=np.int32).tolist())] >>> # df = spark.createDataFrame(data, "idx: int, image: array<array<array<int>>>") >>> features = Features({"idx": Value("int64"), "image": Image()}) >>> dataset = Dataset.from_spark(df, features=features) >>> dataset[0] {'idx': 0, 'image': <PIL.PngImagePlugin.PngImageFile image mode=RGB size=32x32>} ``` You can check the [`Features`] documentation to know about all the feature types available.
datasets/docs/source/use_with_spark.mdx/0
{ "file_path": "datasets/docs/source/use_with_spark.mdx", "repo_id": "datasets", "token_count": 962 }
54
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The CodeEval metric estimates the pass@k metric for code synthesis. This is an evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness _CITATION = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ _DESCRIPTION = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). """ _KWARGS_DESCRIPTION = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ _WARNING = """ ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ """ _LICENSE = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class CodeEval(datasets.Metric): def _info(self): return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, # This defines the format of each prediction and reference features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string")), "references": datasets.Value("string"), } ), homepage="https://github.com/openai/human-eval", codebase_urls=["https://github.com/openai/human-eval"], reference_urls=["https://github.com/openai/human-eval"], license=_LICENSE, ) def _compute(self, predictions, references, k=[1, 10, 100], num_workers=4, timeout=3.0): """Returns the scores""" if os.getenv("HF_ALLOW_CODE_EVAL", 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError("This metric is currently not supported on Windows.") with ThreadPoolExecutor(max_workers=num_workers) as executor: futures = [] completion_id = Counter() n_samples = 0 results = defaultdict(list) for task_id, (candidates, test_case) in enumerate(zip(predictions, references)): for candidate in candidates: test_program = candidate + "\n" + test_case args = (test_program, timeout, task_id, completion_id[task_id]) future = executor.submit(check_correctness, *args) futures.append(future) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(futures): result = future.result() results[result["task_id"]].append((result["completion_id"], result)) total, correct = [], [] for result in results.values(): result.sort() passed = [r[1]["passed"] for r in result] total.append(len(passed)) correct.append(sum(passed)) total = np.array(total) correct = np.array(correct) ks = k pass_at_k = {f"pass@{k}": estimate_pass_at_k(total, correct, k).mean() for k in ks if (total >= k).all()} return pass_at_k, results def estimate_pass_at_k(num_samples, num_correct, k): """Estimates pass@k of each problem and returns them in an array.""" def estimator(n: int, c: int, k: int) -> float: """Calculates 1 - comb(n - c, k) / comb(n, k).""" if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1)) if isinstance(num_samples, int): num_samples_it = itertools.repeat(num_samples, len(num_correct)) else: assert len(num_samples) == len(num_correct) num_samples_it = iter(num_samples) return np.array([estimator(int(n), int(c), k) for n, c in zip(num_samples_it, num_correct)])
datasets/metrics/code_eval/code_eval.py/0
{ "file_path": "datasets/metrics/code_eval/code_eval.py", "repo_id": "datasets", "token_count": 3175 }
55
# Copyright 2022 The HuggingFace Datasets Authors and the current metric script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """FrugalScore metric.""" import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments import datasets _CITATION = """\ @article{eddine2021frugalscore, title={FrugalScore: Learning Cheaper, Lighter and Faster Evaluation Metrics for Automatic Text Generation}, author={Eddine, Moussa Kamal and Shang, Guokan and Tixier, Antoine J-P and Vazirgiannis, Michalis}, journal={arXiv preprint arXiv:2110.08559}, year={2021} } """ _DESCRIPTION = """\ FrugalScore is a reference-based metric for NLG models evaluation. It is based on a distillation approach that allows to learn a fixed, low cost version of any expensive NLG metric, while retaining most of its original performance. """ _KWARGS_DESCRIPTION = """ Calculates how good are predictions given some references, using certain scores. Args: predictions (list of str): list of predictions to score. Each predictions should be a string. references (list of str): list of reference for each prediction. Each reference should be a string. batch_size (int): the batch size for predictions. max_length (int): maximum sequence length. device (str): either gpu or cpu Returns: scores (list of int): list of scores. Examples: >>> frugalscore = datasets.load_metric("frugalscore") >>> results = frugalscore.compute(predictions=['hello there', 'huggingface'], references=['hello world', 'hugging face']) >>> print([round(s, 3) for s in results["scores"]]) [0.631, 0.645] """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class FRUGALSCORE(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("string"), "references": datasets.Value("string"), } ), homepage="https://github.com/moussaKam/FrugalScore", ) def _download_and_prepare(self, dl_manager): if self.config_name == "default": checkpoint = "moussaKam/frugalscore_tiny_bert-base_bert-score" else: checkpoint = self.config_name self.model = AutoModelForSequenceClassification.from_pretrained(checkpoint) self.tokenizer = AutoTokenizer.from_pretrained(checkpoint) def _compute( self, predictions, references, batch_size=32, max_length=128, device=None, ): """Returns the scores""" assert len(predictions) == len( references ), "predictions and references should have the same number of sentences." if device is not None: assert device in ["gpu", "cpu"], "device should be either gpu or cpu." else: device = "gpu" if torch.cuda.is_available() else "cpu" training_args = TrainingArguments( "trainer", fp16=(device == "gpu"), per_device_eval_batch_size=batch_size, report_to="all", no_cuda=(device == "cpu"), log_level="warning", ) dataset = {"sentence1": predictions, "sentence2": references} raw_datasets = datasets.Dataset.from_dict(dataset) def tokenize_function(data): return self.tokenizer( data["sentence1"], data["sentence2"], max_length=max_length, truncation=True, padding=True ) tokenized_datasets = raw_datasets.map(tokenize_function, batched=True) tokenized_datasets.remove_columns(["sentence1", "sentence2"]) trainer = Trainer(self.model, training_args, tokenizer=self.tokenizer) predictions = trainer.predict(tokenized_datasets) return {"scores": list(predictions.predictions.squeeze(-1))}
datasets/metrics/frugalscore/frugalscore.py/0
{ "file_path": "datasets/metrics/frugalscore/frugalscore.py", "repo_id": "datasets", "token_count": 1754 }
56
# Copyright 2022 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mean IoU (Intersection-over-Union) metric.""" from typing import Dict, Optional import numpy as np import datasets _DESCRIPTION = """ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ _KWARGS_DESCRIPTION = """ Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric("mean_iou") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ _CITATION = """\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }""" def intersect_and_union( pred_label, label, num_labels, ignore_index: bool, label_map: Optional[Dict[int, int]] = None, reduce_labels: bool = False, ): """Calculate intersection and Union. Args: pred_label (`ndarray`): Prediction segmentation map of shape (height, width). label (`ndarray`): Ground truth segmentation map of shape (height, width). num_labels (`int`): Number of categories. ignore_index (`int`): Index that will be ignored during evaluation. label_map (`dict`, *optional*): Mapping old labels to new labels. The parameter will work only when label is str. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: area_intersect (`ndarray`): The intersection of prediction and ground truth histogram on all classes. area_union (`ndarray`): The union of prediction and ground truth histogram on all classes. area_pred_label (`ndarray`): The prediction histogram on all classes. area_label (`ndarray`): The ground truth histogram on all classes. """ if label_map is not None: for old_id, new_id in label_map.items(): label[label == old_id] = new_id # turn into Numpy arrays pred_label = np.array(pred_label) label = np.array(label) if reduce_labels: label[label == 0] = 255 label = label - 1 label[label == 254] = 255 mask = label != ignore_index mask = np.not_equal(label, ignore_index) pred_label = pred_label[mask] label = np.array(label)[mask] intersect = pred_label[pred_label == label] area_intersect = np.histogram(intersect, bins=num_labels, range=(0, num_labels - 1))[0] area_pred_label = np.histogram(pred_label, bins=num_labels, range=(0, num_labels - 1))[0] area_label = np.histogram(label, bins=num_labels, range=(0, num_labels - 1))[0] area_union = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def total_intersect_and_union( results, gt_seg_maps, num_labels, ignore_index: bool, label_map: Optional[Dict[int, int]] = None, reduce_labels: bool = False, ): """Calculate Total Intersection and Union, by calculating `intersect_and_union` for each (predicted, ground truth) pair. Args: results (`ndarray`): List of prediction segmentation maps, each of shape (height, width). gt_seg_maps (`ndarray`): List of ground truth segmentation maps, each of shape (height, width). num_labels (`int`): Number of categories. ignore_index (`int`): Index that will be ignored during evaluation. label_map (`dict`, *optional*): Mapping old labels to new labels. The parameter will work only when label is str. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: total_area_intersect (`ndarray`): The intersection of prediction and ground truth histogram on all classes. total_area_union (`ndarray`): The union of prediction and ground truth histogram on all classes. total_area_pred_label (`ndarray`): The prediction histogram on all classes. total_area_label (`ndarray`): The ground truth histogram on all classes. """ total_area_intersect = np.zeros((num_labels,), dtype=np.float64) total_area_union = np.zeros((num_labels,), dtype=np.float64) total_area_pred_label = np.zeros((num_labels,), dtype=np.float64) total_area_label = np.zeros((num_labels,), dtype=np.float64) for result, gt_seg_map in zip(results, gt_seg_maps): area_intersect, area_union, area_pred_label, area_label = intersect_and_union( result, gt_seg_map, num_labels, ignore_index, label_map, reduce_labels ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def mean_iou( results, gt_seg_maps, num_labels, ignore_index: bool, nan_to_num: Optional[int] = None, label_map: Optional[Dict[int, int]] = None, reduce_labels: bool = False, ): """Calculate Mean Intersection and Union (mIoU). Args: results (`ndarray`): List of prediction segmentation maps, each of shape (height, width). gt_seg_maps (`ndarray`): List of ground truth segmentation maps, each of shape (height, width). num_labels (`int`): Number of categories. ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): Mapping old labels to new labels. The parameter will work only when label is str. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. """ total_area_intersect, total_area_union, total_area_pred_label, total_area_label = total_intersect_and_union( results, gt_seg_maps, num_labels, ignore_index, label_map, reduce_labels ) # compute metrics metrics = {} all_acc = total_area_intersect.sum() / total_area_label.sum() iou = total_area_intersect / total_area_union acc = total_area_intersect / total_area_label metrics["mean_iou"] = np.nanmean(iou) metrics["mean_accuracy"] = np.nanmean(acc) metrics["overall_accuracy"] = all_acc metrics["per_category_iou"] = iou metrics["per_category_accuracy"] = acc if nan_to_num is not None: metrics = {metric: np.nan_to_num(metric_value, nan=nan_to_num) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class MeanIoU(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))), "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))), } ), reference_urls=[ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" ], ) def _compute( self, predictions, references, num_labels: int, ignore_index: bool, nan_to_num: Optional[int] = None, label_map: Optional[Dict[int, int]] = None, reduce_labels: bool = False, ): iou_result = mean_iou( results=predictions, gt_seg_maps=references, num_labels=num_labels, ignore_index=ignore_index, nan_to_num=nan_to_num, label_map=label_map, reduce_labels=reduce_labels, ) return iou_result
datasets/metrics/mean_iou/mean_iou.py/0
{ "file_path": "datasets/metrics/mean_iou/mean_iou.py", "repo_id": "datasets", "token_count": 5236 }
57
# Copyright 2020 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ROUGE metric from Google Research github repo. """ # The dependencies in https://github.com/google-research/google-research/blob/master/rouge/requirements.txt import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets _CITATION = """\ @inproceedings{lin-2004-rouge, title = "{ROUGE}: A Package for Automatic Evaluation of Summaries", author = "Lin, Chin-Yew", booktitle = "Text Summarization Branches Out", month = jul, year = "2004", address = "Barcelona, Spain", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W04-1013", pages = "74--81", } """ _DESCRIPTION = """\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge """ _KWARGS_DESCRIPTION = """ Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring, `"rougeL"`: Longest common subsequence based scoring. `"rougeLSum"`: rougeLsum splits text using `"\n"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric('rouge') >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] >>> print(results["rouge1"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results["rouge1"].mid.fmeasure) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class Rouge(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("string", id="sequence"), "references": datasets.Value("string", id="sequence"), } ), codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"], reference_urls=[ "https://en.wikipedia.org/wiki/ROUGE_(metric)", "https://github.com/google-research/google-research/tree/master/rouge", ], ) def _compute(self, predictions, references, rouge_types=None, use_aggregator=True, use_stemmer=False): if rouge_types is None: rouge_types = ["rouge1", "rouge2", "rougeL", "rougeLsum"] scorer = rouge_scorer.RougeScorer(rouge_types=rouge_types, use_stemmer=use_stemmer) if use_aggregator: aggregator = scoring.BootstrapAggregator() else: scores = [] for ref, pred in zip(references, predictions): score = scorer.score(ref, pred) if use_aggregator: aggregator.add_scores(score) else: scores.append(score) if use_aggregator: result = aggregator.aggregate() else: result = {} for key in scores[0]: result[key] = [score[key] for score in scores] return result
datasets/metrics/rouge/rouge.py/0
{ "file_path": "datasets/metrics/rouge/rouge.py", "repo_id": "datasets", "token_count": 2100 }
58
""" Official evaluation script for ReCoRD v1.0. (Some functions are adopted from the SQuAD evaluation script.) """ import argparse import json import re import string import sys from collections import Counter def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): return re.sub(r"\b(a|an|the)\b", " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def f1_score(prediction, ground_truth): prediction_tokens = normalize_answer(prediction).split() ground_truth_tokens = normalize_answer(ground_truth).split() common = Counter(prediction_tokens) & Counter(ground_truth_tokens) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(ground_truth_tokens) f1 = (2 * precision * recall) / (precision + recall) return f1 def exact_match_score(prediction, ground_truth): return normalize_answer(prediction) == normalize_answer(ground_truth) def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): scores_for_ground_truths = [] for ground_truth in ground_truths: score = metric_fn(prediction, ground_truth) scores_for_ground_truths.append(score) return max(scores_for_ground_truths) def evaluate(dataset, predictions): f1 = exact_match = total = 0 correct_ids = [] for passage in dataset: for qa in passage["qas"]: total += 1 if qa["id"] not in predictions: message = f'Unanswered question {qa["id"]} will receive score 0.' print(message, file=sys.stderr) continue ground_truths = [x["text"] for x in qa["answers"]] prediction = predictions[qa["id"]] _exact_match = metric_max_over_ground_truths(exact_match_score, prediction, ground_truths) if int(_exact_match) == 1: correct_ids.append(qa["id"]) exact_match += _exact_match f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths) exact_match = exact_match / total f1 = f1 / total return {"exact_match": exact_match, "f1": f1}, correct_ids if __name__ == "__main__": expected_version = "1.0" parser = argparse.ArgumentParser("Official evaluation script for ReCoRD v1.0.") parser.add_argument("data_file", help="The dataset file in JSON format.") parser.add_argument("pred_file", help="The model prediction file in JSON format.") parser.add_argument("--output_correct_ids", action="store_true", help="Output the correctly answered query IDs.") args = parser.parse_args() with open(args.data_file) as data_file: dataset_json = json.load(data_file) if dataset_json["version"] != expected_version: print( f'Evaluation expects v-{expected_version}, but got dataset with v-{dataset_json["version"]}', file=sys.stderr, ) dataset = dataset_json["data"] with open(args.pred_file) as pred_file: predictions = json.load(pred_file) metrics, correct_ids = evaluate(dataset, predictions) if args.output_correct_ids: print(f"Output {len(correct_ids)} correctly answered question IDs.") with open("correct_ids.json", "w") as f: json.dump(correct_ids, f)
datasets/metrics/super_glue/record_evaluation.py/0
{ "file_path": "datasets/metrics/super_glue/record_evaluation.py", "repo_id": "datasets", "token_count": 1480 }
59
# ruff: noqa # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "2.16.2.dev0" from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_default_config_name, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _arrow_dataset.concatenate_datasets = concatenate_datasets _utils.DownloadConfig = DownloadConfig _utils.DownloadManager = DownloadManager _utils.DownloadMode = DownloadMode _deprecated_download_manager.DownloadConfig = DownloadConfig _deprecated_download_manager.DownloadMode = DownloadMode _deprecated_download_manager.DownloadManager = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
datasets/src/datasets/__init__.py/0
{ "file_path": "datasets/src/datasets/__init__.py", "repo_id": "datasets", "token_count": 772 }
60
from typing import TypeVar from .arrow_dataset import Dataset, _split_by_node_map_style_dataset from .iterable_dataset import IterableDataset, _split_by_node_iterable_dataset DatasetType = TypeVar("DatasetType", Dataset, IterableDataset) def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType: """ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`. For map-style datasets: Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset. To maximize data loading throughput, chunks are made of contiguous data on disk if possible. For iterable datasets: If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`), then the shards are evenly assigned across the nodes, which is the most optimized. Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples. Args: dataset ([`Dataset`] or [`IterableDataset`]): The dataset to split by node. rank (`int`): Rank of the current node. world_size (`int`): Total number of nodes. Returns: [`Dataset`] or [`IterableDataset`]: The dataset to be used on the node at rank `rank`. """ if isinstance(dataset, Dataset): return _split_by_node_map_style_dataset(dataset, rank=rank, world_size=world_size) else: return _split_by_node_iterable_dataset(dataset, rank=rank, world_size=world_size)
datasets/src/datasets/distributed.py/0
{ "file_path": "datasets/src/datasets/distributed.py", "repo_id": "datasets", "token_count": 582 }
61
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ruff: noqa from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter logger = logging.get_logger(__name__) _FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {} _FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {} _FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {} def _register_formatter( formatter_cls: type, format_type: Optional[str], aliases: Optional[List[str]] = None, ): """ Register a Formatter object using a name and optional aliases. This function must be used on a Formatter class. """ aliases = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" ) _FORMAT_TYPES[format_type] = formatter_cls for alias in set(aliases + [format_type]): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" ) _FORMAT_TYPES_ALIASES[alias] = format_type def _register_unavailable_formatter( unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None ): """ Register an unavailable Formatter object using a name and optional aliases. This function must be used on an Exception object that is raised when trying to get the unavailable formatter. """ aliases = aliases if aliases is not None else [] for alias in set(aliases + [format_type]): _FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=["python"]) _register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"]) _register_formatter(NumpyFormatter, "numpy", aliases=["np"]) _register_formatter(PandasFormatter, "pandas", aliases=["pd"]) _register_formatter(CustomFormatter, "custom") if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"]) else: _torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.") _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"]) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, "tensorflow", aliases=["tf"]) else: _tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.") _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"]) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, "jax", aliases=[]) else: _jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.") _register_unavailable_formatter(_jax_error, "jax", aliases=[]) def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]: """If the given format type is a known alias, then return its main type name. Otherwise return the type with no change.""" if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter: """ Factory function to get a Formatter given its type name and keyword arguments. A formatter is an object that extracts and formats data from pyarrow table. It defines the formatting for rows, colums and batches. If the formatter for a given type name doesn't exist or is not available, an error is raised. """ format_type = get_format_type_from_alias(format_type) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**format_kwargs) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got '{format_type}'" )
datasets/src/datasets/formatting/__init__.py/0
{ "file_path": "datasets/src/datasets/formatting/__init__.py", "repo_id": "datasets", "token_count": 1818 }
62
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class TextDatasetReader(AbstractDatasetReader): def __init__( self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): super().__init__( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} self.builder = Text( cache_dir=cache_dir, data_files=path_or_paths, features=features, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, # try_from_hf_gcs=try_from_hf_gcs, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset
datasets/src/datasets/io/text.py/0
{ "file_path": "datasets/src/datasets/io/text.py", "repo_id": "datasets", "token_count": 998 }
63
import collections import itertools import os from dataclasses import dataclass from typing import List, Optional, Tuple, Type import pandas as pd import pyarrow as pa import pyarrow.json as paj import datasets from datasets.features.features import FeatureType from datasets.tasks.base import TaskTemplate logger = datasets.utils.logging.get_logger(__name__) def count_path_segments(path): return path.replace("\\", "/").count("/") @dataclass class FolderBasedBuilderConfig(datasets.BuilderConfig): """BuilderConfig for AutoFolder.""" features: Optional[datasets.Features] = None drop_labels: bool = None drop_metadata: bool = None class FolderBasedBuilder(datasets.GeneratorBasedBuilder): """ Base class for generic data loaders for vision and image data. Abstract class attributes to be overridden by a child class: BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...) BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...) BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig` EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files will be included in a dataset) CLASSIFICATION_TASK: classification task to use if labels are obtained from the folder structure """ BASE_FEATURE: Type[FeatureType] BASE_COLUMN_NAME: str BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig EXTENSIONS: List[str] CLASSIFICATION_TASK: TaskTemplate METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"] def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") # Do an early pass if: # * `drop_labels` is None (default) or False, to infer the class labels # * `drop_metadata` is None (default) or False, to find the metadata files do_analyze = not self.config.drop_labels or not self.config.drop_metadata labels, path_depths = set(), set() metadata_files = collections.defaultdict(set) def analyze(files_or_archives, downloaded_files_or_dirs, split): if len(downloaded_files_or_dirs) == 0: return # The files are separated from the archives at this point, so check the first sample # to see if it's a file or a directory and iterate accordingly if os.path.isfile(downloaded_files_or_dirs[0]): original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs for original_file, downloaded_file in zip(original_files, downloaded_files): original_file, downloaded_file = str(original_file), str(downloaded_file) _, original_file_ext = os.path.splitext(original_file) if original_file_ext.lower() in self.EXTENSIONS: if not self.config.drop_labels: labels.add(os.path.basename(os.path.dirname(original_file))) path_depths.add(count_path_segments(original_file)) elif os.path.basename(original_file) in self.METADATA_FILENAMES: metadata_files[split].add((original_file, downloaded_file)) else: original_file_name = os.path.basename(original_file) logger.debug( f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either." ) else: archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs for archive, downloaded_dir in zip(archives, downloaded_dirs): archive, downloaded_dir = str(archive), str(downloaded_dir) for downloaded_dir_file in dl_manager.iter_files(downloaded_dir): _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) if downloaded_dir_file_ext in self.EXTENSIONS: if not self.config.drop_labels: labels.add(os.path.basename(os.path.dirname(downloaded_dir_file))) path_depths.add(count_path_segments(downloaded_dir_file)) elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES: metadata_files[split].add((None, downloaded_dir_file)) else: archive_file_name = os.path.basename(archive) original_file_name = os.path.basename(downloaded_dir_file) logger.debug( f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either." ) data_files = self.config.data_files splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files, archives = self._split_files_and_archives(files) downloaded_files = dl_manager.download(files) downloaded_dirs = dl_manager.download_and_extract(archives) if do_analyze: # drop_metadata is None or False, drop_labels is None or False logger.info(f"Searching for labels and/or metadata files in {split_name} data files...") analyze(files, downloaded_files, split_name) analyze(archives, downloaded_dirs, split_name) if metadata_files: # add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False add_metadata = not self.config.drop_metadata # if `metadata_files` are found, add labels only if # `drop_labels` is set up to False explicitly (not-default behavior) add_labels = self.config.drop_labels is False else: # if `metadata_files` are not found, don't add metadata add_metadata = False # if `metadata_files` are not found and `drop_labels` is None (default) - # add labels if files are on the same level in directory hierarchy and there is more than one label add_labels = ( (len(labels) > 1 and len(path_depths) == 1) if self.config.drop_labels is None else not self.config.drop_labels ) if add_labels: logger.info("Adding the labels inferred from data directories to the dataset's features...") if add_metadata: logger.info("Adding metadata to the dataset...") else: add_labels, add_metadata, metadata_files = False, False, {} splits.append( datasets.SplitGenerator( name=split_name, gen_kwargs={ "files": list(zip(files, downloaded_files)) + [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs], "metadata_files": metadata_files, "split_name": split_name, "add_labels": add_labels, "add_metadata": add_metadata, }, ) ) if add_metadata: # Verify that: # * all metadata files have the same set of features # * the `file_name` key is one of the metadata keys and is of type string features_per_metadata_file: List[Tuple[str, datasets.Features]] = [] # Check that all metadata files share the same format metadata_ext = { os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values()) } if len(metadata_ext) > 1: raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}") metadata_ext = metadata_ext.pop() for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()): pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext) features_per_metadata_file.append( (downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema)) ) for downloaded_metadata_file, metadata_features in features_per_metadata_file: if metadata_features != features_per_metadata_file[0][1]: raise ValueError( f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}" ) metadata_features = features_per_metadata_file[0][1] if "file_name" not in metadata_features: raise ValueError("`file_name` must be present as dictionary key in metadata files") if metadata_features["file_name"] != datasets.Value("string"): raise ValueError("`file_name` key must be a string") del metadata_features["file_name"] else: metadata_features = None # Normally, we would do this in _info, but we need to know the labels and/or metadata # before building the features if self.config.features is None: if add_labels: self.info.features = datasets.Features( { self.BASE_COLUMN_NAME: self.BASE_FEATURE(), "label": datasets.ClassLabel(names=sorted(labels)), } ) self.info.task_templates = [self.CLASSIFICATION_TASK.align_with_features(self.info.features)] else: self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()}) if add_metadata: # Warn if there are duplicated keys in metadata compared to the existing features # (`BASE_COLUMN_NAME`, optionally "label") duplicated_keys = set(self.info.features) & set(metadata_features) if duplicated_keys: logger.warning( f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in " f"the features dictionary." ) # skip metadata duplicated keys self.info.features.update( { feature: metadata_features[feature] for feature in metadata_features if feature not in duplicated_keys } ) return splits def _split_files_and_archives(self, data_files): files, archives = [], [] for data_file in data_files: _, data_file_ext = os.path.splitext(data_file) if data_file_ext.lower() in self.EXTENSIONS: files.append(data_file) elif os.path.basename(data_file) in self.METADATA_FILENAMES: files.append(data_file) else: archives.append(data_file) return files, archives def _read_metadata(self, metadata_file, metadata_ext: str = ""): if metadata_ext == ".csv": # Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module return pa.Table.from_pandas(pd.read_csv(metadata_file)) else: with open(metadata_file, "rb") as f: return paj.read_json(f) def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels): split_metadata_files = metadata_files.get(split_name, []) sample_empty_metadata = ( {k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {} ) last_checked_dir = None metadata_dir = None metadata_dict = None downloaded_metadata_file = None metadata_ext = "" if split_metadata_files: metadata_ext = { os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files } metadata_ext = metadata_ext.pop() file_idx = 0 for original_file, downloaded_file_or_dir in files: if original_file is not None: _, original_file_ext = os.path.splitext(original_file) if original_file_ext.lower() in self.EXTENSIONS: if add_metadata: # If the file is a file of a needed type, and we've just entered a new directory, # find the nereast metadata file (by counting path segments) for the directory current_dir = os.path.dirname(original_file) if last_checked_dir is None or last_checked_dir != current_dir: last_checked_dir = current_dir metadata_file_candidates = [ ( os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)), metadata_file_candidate, downloaded_metadata_file, ) for metadata_file_candidate, downloaded_metadata_file in split_metadata_files if metadata_file_candidate is not None # ignore metadata_files that are inside archives and not os.path.relpath( original_file, os.path.dirname(metadata_file_candidate) ).startswith("..") ] if metadata_file_candidates: _, metadata_file, downloaded_metadata_file = min( metadata_file_candidates, key=lambda x: count_path_segments(x[0]) ) pa_metadata_table = self._read_metadata( downloaded_metadata_file, metadata_ext=metadata_ext ) pa_file_name_array = pa_metadata_table["file_name"] pa_metadata_table = pa_metadata_table.drop(["file_name"]) metadata_dir = os.path.dirname(metadata_file) metadata_dict = { os.path.normpath(file_name).replace("\\", "/"): sample_metadata for file_name, sample_metadata in zip( pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() ) } else: raise ValueError( f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." ) if metadata_dir is not None and downloaded_metadata_file is not None: file_relpath = os.path.relpath(original_file, metadata_dir) file_relpath = file_relpath.replace("\\", "/") if file_relpath not in metadata_dict: raise ValueError( f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}." ) sample_metadata = metadata_dict[file_relpath] else: raise ValueError( f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." ) else: sample_metadata = {} if add_labels: sample_label = {"label": os.path.basename(os.path.dirname(original_file))} else: sample_label = {} yield ( file_idx, { **sample_empty_metadata, self.BASE_COLUMN_NAME: downloaded_file_or_dir, **sample_metadata, **sample_label, }, ) file_idx += 1 else: for downloaded_dir_file in downloaded_file_or_dir: _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) if downloaded_dir_file_ext.lower() in self.EXTENSIONS: if add_metadata: current_dir = os.path.dirname(downloaded_dir_file) if last_checked_dir is None or last_checked_dir != current_dir: last_checked_dir = current_dir metadata_file_candidates = [ ( os.path.relpath( downloaded_dir_file, os.path.dirname(downloaded_metadata_file) ), metadata_file_candidate, downloaded_metadata_file, ) for metadata_file_candidate, downloaded_metadata_file in split_metadata_files if metadata_file_candidate is None # ignore metadata_files that are not inside archives and not os.path.relpath( downloaded_dir_file, os.path.dirname(downloaded_metadata_file) ).startswith("..") ] if metadata_file_candidates: _, metadata_file, downloaded_metadata_file = min( metadata_file_candidates, key=lambda x: count_path_segments(x[0]) ) pa_metadata_table = self._read_metadata( downloaded_metadata_file, metadata_ext=metadata_ext ) pa_file_name_array = pa_metadata_table["file_name"] pa_metadata_table = pa_metadata_table.drop(["file_name"]) metadata_dir = os.path.dirname(downloaded_metadata_file) metadata_dict = { os.path.normpath(file_name).replace("\\", "/"): sample_metadata for file_name, sample_metadata in zip( pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() ) } else: raise ValueError( f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." ) if metadata_dir is not None and downloaded_metadata_file is not None: downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir) downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/") if downloaded_dir_file_relpath not in metadata_dict: raise ValueError( f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}." ) sample_metadata = metadata_dict[downloaded_dir_file_relpath] else: raise ValueError( f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." ) else: sample_metadata = {} if add_labels: sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))} else: sample_label = {} yield ( file_idx, { **sample_empty_metadata, self.BASE_COLUMN_NAME: downloaded_dir_file, **sample_metadata, **sample_label, }, ) file_idx += 1
datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py", "repo_id": "datasets", "token_count": 11960 }
64
import itertools import warnings from dataclasses import InitVar, dataclass from io import StringIO from typing import Optional import pyarrow as pa import datasets from datasets.features.features import require_storage_cast from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class TextConfig(datasets.BuilderConfig): """BuilderConfig for text files.""" features: Optional[datasets.Features] = None encoding: str = "utf-8" errors: InitVar[Optional[str]] = "deprecated" encoding_errors: Optional[str] = None chunksize: int = 10 << 20 # 10MB keep_linebreaks: bool = False sample_by: str = "line" def __post_init__(self, errors): if errors != "deprecated": warnings.warn( "'errors' was deprecated in favor of 'encoding_errors' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'encoding_errors={errors}' instead.", FutureWarning, ) self.encoding_errors = errors class Text(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = TextConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]]. If str or List[str], then the dataset returns only the 'train' split. If dict, then keys should be from the `datasets.Split` enum. """ if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: schema = self.config.features.arrow_schema if all(not require_storage_cast(feature) for feature in self.config.features.values()): # cheaper cast pa_table = pa_table.cast(schema) else: # more expensive cast; allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, schema) return pa_table else: return pa_table.cast(pa.schema({"text": pa.string()})) def _generate_tables(self, files): pa_table_names = list(self.config.features) if self.config.features is not None else ["text"] for file_idx, file in enumerate(itertools.chain.from_iterable(files)): # open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n" with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: if self.config.sample_by == "line": batch_idx = 0 while True: batch = f.read(self.config.chunksize) if not batch: break batch += f.readline() # finish current line # StringIO.readlines, by default splits only on "\n" (and keeps line breaks) batch = StringIO(batch).readlines() if not self.config.keep_linebreaks: batch = [line.rstrip("\n") for line in batch] pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1 elif self.config.sample_by == "paragraph": batch_idx = 0 batch = "" while True: new_batch = f.read(self.config.chunksize) if not new_batch: break batch += new_batch batch += f.readline() # finish current line batch = batch.split("\n\n") pa_table = pa.Table.from_arrays( [pa.array([example for example in batch[:-1] if example])], names=pa_table_names ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1 batch = batch[-1] if batch: pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names) yield (file_idx, batch_idx), self._cast_table(pa_table) elif self.config.sample_by == "document": text = f.read() pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names) yield file_idx, self._cast_table(pa_table)
datasets/src/datasets/packaged_modules/text/text.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/text/text.py", "repo_id": "datasets", "token_count": 3042 }
65
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=True) class QuestionAnsweringExtractive(TaskTemplate): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization task: str = field(default="question-answering-extractive", metadata={"include_in_asdict_even_if_is_default": True}) input_schema: ClassVar[Features] = Features({"question": Value("string"), "context": Value("string")}) label_schema: ClassVar[Features] = Features( { "answers": Sequence( { "text": Value("string"), "answer_start": Value("int32"), } ) } ) question_column: str = "question" context_column: str = "context" answers_column: str = "answers" @property def column_mapping(self) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
datasets/src/datasets/tasks/question_answering.py/0
{ "file_path": "datasets/src/datasets/tasks/question_answering.py", "repo_id": "datasets", "token_count": 437 }
66
import enum import os from typing import Optional from huggingface_hub.utils import insecure_hashlib from .. import config from .logging import get_logger logger = get_logger(__name__) class VerificationMode(enum.Enum): """`Enum` that specifies which verification checks to run. The default mode is `BASIC_CHECKS`, which will perform only rudimentary checks to avoid slowdowns when generating/downloading a dataset for the first time. The verification modes: | | Verification checks | |---------------------------|------------------------------------------------------------------------------ | | `ALL_CHECKS` | Split checks, uniqueness of the keys yielded in case of the GeneratorBuilder | | | and the validity (number of files, checksums, etc.) of downloaded files | | `BASIC_CHECKS` (default) | Same as `ALL_CHECKS` but without checking downloaded files | | `NO_CHECKS` | None | """ ALL_CHECKS = "all_checks" BASIC_CHECKS = "basic_checks" NO_CHECKS = "no_checks" class ChecksumVerificationException(Exception): """Exceptions during checksums verifications of downloaded files.""" class UnexpectedDownloadedFile(ChecksumVerificationException): """Some downloaded files were not expected.""" class ExpectedMoreDownloadedFiles(ChecksumVerificationException): """Some files were supposed to be downloaded but were not.""" class NonMatchingChecksumError(ChecksumVerificationException): """The downloaded file checksum don't match the expected checksum.""" def verify_checksums(expected_checksums: Optional[dict], recorded_checksums: dict, verification_name=None): if expected_checksums is None: logger.info("Unable to verify checksums.") return if len(set(expected_checksums) - set(recorded_checksums)) > 0: raise ExpectedMoreDownloadedFiles(str(set(expected_checksums) - set(recorded_checksums))) if len(set(recorded_checksums) - set(expected_checksums)) > 0: raise UnexpectedDownloadedFile(str(set(recorded_checksums) - set(expected_checksums))) bad_urls = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] for_verification_name = " for " + verification_name if verification_name is not None else "" if len(bad_urls) > 0: raise NonMatchingChecksumError( f"Checksums didn't match{for_verification_name}:\n" f"{bad_urls}\n" "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" ) logger.info("All the checksums matched successfully" + for_verification_name) class SplitsVerificationException(Exception): """Exceptions during splis verifications""" class UnexpectedSplits(SplitsVerificationException): """The expected splits of the downloaded file is missing.""" class ExpectedMoreSplits(SplitsVerificationException): """Some recorded splits are missing.""" class NonMatchingSplitsSizesError(SplitsVerificationException): """The splits sizes don't match the expected splits sizes.""" def verify_splits(expected_splits: Optional[dict], recorded_splits: dict): if expected_splits is None: logger.info("Unable to verify splits sizes.") return if len(set(expected_splits) - set(recorded_splits)) > 0: raise ExpectedMoreSplits(str(set(expected_splits) - set(recorded_splits))) if len(set(recorded_splits) - set(expected_splits)) > 0: raise UnexpectedSplits(str(set(recorded_splits) - set(expected_splits))) bad_splits = [ {"expected": expected_splits[name], "recorded": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(bad_splits) > 0: raise NonMatchingSplitsSizesError(str(bad_splits)) logger.info("All the splits matched successfully.") def get_size_checksum_dict(path: str, record_checksum: bool = True) -> dict: """Compute the file size and the sha256 checksum of a file""" if record_checksum: m = insecure_hashlib.sha256() with open(path, "rb") as f: for chunk in iter(lambda: f.read(1 << 20), b""): m.update(chunk) checksum = m.hexdigest() else: checksum = None return {"num_bytes": os.path.getsize(path), "checksum": checksum} def is_small_dataset(dataset_size): """Check if `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`. Args: dataset_size (int): Dataset size in bytes. Returns: bool: Whether `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`. """ if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
datasets/src/datasets/utils/info_utils.py/0
{ "file_path": "datasets/src/datasets/utils/info_utils.py", "repo_id": "datasets", "token_count": 1893 }
67
from collections.abc import Iterator from typing import Iterable class tracked_str(str): origins = {} def set_origin(self, origin: str): if super().__repr__() not in self.origins: self.origins[super().__repr__()] = origin def get_origin(self): return self.origins.get(super().__repr__(), str(self)) def __repr__(self) -> str: if super().__repr__() not in self.origins or self.origins[super().__repr__()] == self: return super().__repr__() else: return f"{str(self)} (origin={self.origins[super().__repr__()]})" class tracked_list(list): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.last_item = None def __iter__(self) -> Iterator: for x in super().__iter__(): self.last_item = x yield x self.last_item = None def __repr__(self) -> str: if self.last_item is None: return super().__repr__() else: return f"{self.__class__.__name__}(current={self.last_item})" class TrackedIterable(Iterable): def __init__(self) -> None: super().__init__() self.last_item = None def __repr__(self) -> str: if self.last_item is None: super().__repr__() else: return f"{self.__class__.__name__}(current={self.last_item})"
datasets/src/datasets/utils/track.py/0
{ "file_path": "datasets/src/datasets/utils/track.py", "repo_id": "datasets", "token_count": 648 }
68
import textwrap import pyarrow as pa import pytest from datasets import Features, Image from datasets.packaged_modules.text.text import Text from ..utils import require_pil @pytest.fixture def text_file(tmp_path): filename = tmp_path / "text.txt" data = textwrap.dedent( """\ Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. Second paragraph: Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. """ ) with open(filename, "w", encoding="utf-8") as f: f.write(data) return str(filename) @pytest.fixture def text_file_with_image(tmp_path, image_file): filename = tmp_path / "text_with_image.txt" with open(filename, "w", encoding="utf-8") as f: f.write(image_file) return str(filename) @pytest.mark.parametrize("keep_linebreaks", [True, False]) def test_text_linebreaks(text_file, keep_linebreaks): with open(text_file, encoding="utf-8") as f: expected_content = f.read().splitlines(keepends=keep_linebreaks) text = Text(keep_linebreaks=keep_linebreaks, encoding="utf-8") generator = text._generate_tables([[text_file]]) generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"] assert generated_content == expected_content @require_pil def test_text_cast_image(text_file_with_image): with open(text_file_with_image, encoding="utf-8") as f: image_file = f.read().splitlines()[0] text = Text(encoding="utf-8", features=Features({"image": Image()})) generator = text._generate_tables([[text_file_with_image]]) pa_table = pa.concat_tables([table for _, table in generator]) assert pa_table.schema.field("image").type == Image()() generated_content = pa_table.to_pydict()["image"] assert generated_content == [{"path": image_file, "bytes": None}] @pytest.mark.parametrize("sample_by", ["line", "paragraph", "document"]) def test_text_sample_by(sample_by, text_file): with open(text_file, encoding="utf-8") as f: expected_content = f.read() if sample_by == "line": expected_content = expected_content.splitlines() elif sample_by == "paragraph": expected_content = expected_content.split("\n\n") elif sample_by == "document": expected_content = [expected_content] text = Text(sample_by=sample_by, encoding="utf-8", chunksize=100) generator = text._generate_tables([[text_file]]) generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"] assert generated_content == expected_content
datasets/tests/packaged_modules/test_text.py/0
{ "file_path": "datasets/tests/packaged_modules/test_text.py", "repo_id": "datasets", "token_count": 1289 }
69
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, extract_path_from_uri, is_remote_filesystem from .utils import require_lz4, require_zstandard def test_mockfs(mockfs): assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def test_non_mockfs(): assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def test_extract_path_from_uri(): mock_bucket = "mock-s3-bucket" dataset_path = f"s3://{mock_bucket}" dataset_path = extract_path_from_uri(dataset_path) assert dataset_path.startswith("s3://") is False dataset_path = "./local/path" new_dataset_path = extract_path_from_uri(dataset_path) assert dataset_path == new_dataset_path def test_is_remote_filesystem(mockfs): is_remote = is_remote_filesystem(mockfs) assert is_remote is True fs = fsspec.filesystem("file") is_remote = is_remote_filesystem(fs) assert is_remote is False @pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS) def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file): input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file} input_path = input_paths[compression_fs_class.protocol] if input_path is None: reason = f"for '{compression_fs_class.protocol}' compression protocol, " if compression_fs_class.protocol == "lz4": reason += require_lz4.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(reason) fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path) assert isinstance(fs, compression_fs_class) expected_filename = os.path.basename(input_path) expected_filename = expected_filename[: expected_filename.rindex(".")] assert fs.glob("*") == [expected_filename] with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol", ["zip", "gzip"]) def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path): compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} compressed_file_path = compressed_file_paths[protocol] member_file_path = "dataset.jsonl" path = f"{protocol}://{member_file_path}::{compressed_file_path}" fs, *_ = fsspec.get_fs_token_paths(path) assert fs.isfile(member_file_path) assert not fs.isfile("non_existing_" + member_file_path) def test_fs_overwrites(): protocol = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(protocol, None, clobber=True) with pytest.warns(UserWarning) as warning_info: importlib.reload(datasets.filesystems) assert len(warning_info) == 1 assert ( str(warning_info[0].message) == f"A filesystem protocol was already set for {protocol} and will be overwritten." )
datasets/tests/test_filesystem.py/0
{ "file_path": "datasets/tests/test_filesystem.py", "repo_id": "datasets", "token_count": 1297 }
70
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def np_sum(x): # picklable for multiprocessing return x.sum() def add_one(i): # picklable for multiprocessing return i + 1 @dataclass class A: x: int y: str class PyUtilsTest(TestCase): def test_map_nested(self): s1 = {} s2 = [] s3 = 1 s4 = [1, 2] s5 = {"a": 1, "b": 2} s6 = {"a": [1, 2], "b": [3, 4]} s7 = {"a": {"1": 1}, "b": 2} s8 = {"a": 1, "b": 2, "c": 3, "d": 4} expected_map_nested_s1 = {} expected_map_nested_s2 = [] expected_map_nested_s3 = 2 expected_map_nested_s4 = [2, 3] expected_map_nested_s5 = {"a": 2, "b": 3} expected_map_nested_s6 = {"a": [2, 3], "b": [4, 5]} expected_map_nested_s7 = {"a": {"1": 2}, "b": 3} expected_map_nested_s8 = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(add_one, s1), expected_map_nested_s1) self.assertEqual(map_nested(add_one, s2), expected_map_nested_s2) self.assertEqual(map_nested(add_one, s3), expected_map_nested_s3) self.assertEqual(map_nested(add_one, s4), expected_map_nested_s4) self.assertEqual(map_nested(add_one, s5), expected_map_nested_s5) self.assertEqual(map_nested(add_one, s6), expected_map_nested_s6) self.assertEqual(map_nested(add_one, s7), expected_map_nested_s7) self.assertEqual(map_nested(add_one, s8), expected_map_nested_s8) num_proc = 2 self.assertEqual(map_nested(add_one, s1, num_proc=num_proc), expected_map_nested_s1) self.assertEqual(map_nested(add_one, s2, num_proc=num_proc), expected_map_nested_s2) self.assertEqual(map_nested(add_one, s3, num_proc=num_proc), expected_map_nested_s3) self.assertEqual(map_nested(add_one, s4, num_proc=num_proc), expected_map_nested_s4) self.assertEqual(map_nested(add_one, s5, num_proc=num_proc), expected_map_nested_s5) self.assertEqual(map_nested(add_one, s6, num_proc=num_proc), expected_map_nested_s6) self.assertEqual(map_nested(add_one, s7, num_proc=num_proc), expected_map_nested_s7) self.assertEqual(map_nested(add_one, s8, num_proc=num_proc), expected_map_nested_s8) sn1 = {"a": np.eye(2), "b": np.zeros(3), "c": np.ones(2)} expected_map_nested_sn1_sum = {"a": 2, "b": 0, "c": 2} expected_map_nested_sn1_int = { "a": np.eye(2).astype(int), "b": np.zeros(3).astype(int), "c": np.ones(2).astype(int), } self.assertEqual(map_nested(np_sum, sn1, map_numpy=False), expected_map_nested_sn1_sum) self.assertEqual( {k: v.tolist() for k, v in map_nested(int, sn1, map_numpy=True).items()}, {k: v.tolist() for k, v in expected_map_nested_sn1_int.items()}, ) self.assertEqual(map_nested(np_sum, sn1, map_numpy=False, num_proc=num_proc), expected_map_nested_sn1_sum) self.assertEqual( {k: v.tolist() for k, v in map_nested(int, sn1, map_numpy=True, num_proc=num_proc).items()}, {k: v.tolist() for k, v in expected_map_nested_sn1_int.items()}, ) with self.assertRaises(AttributeError): # can't pickle a local lambda map_nested(lambda x: x + 1, sn1, num_proc=num_proc) def test_zip_dict(self): d1 = {"a": 1, "b": 2} d2 = {"a": 3, "b": 4} d3 = {"a": 5, "b": 6} expected_zip_dict_result = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))]) self.assertEqual(sorted(zip_dict(d1, d2, d3)), expected_zip_dict_result) def test_temporary_assignment(self): class Foo: my_attr = "bar" foo = Foo() self.assertEqual(foo.my_attr, "bar") with temporary_assignment(foo, "my_attr", "BAR"): self.assertEqual(foo.my_attr, "BAR") self.assertEqual(foo.my_attr, "bar") @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc", [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ], ) def test_map_nested_num_proc(iterable_length, num_proc, expected_num_proc): with patch("datasets.utils.py_utils._single_map_nested") as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: data_struct = {f"{i}": i for i in range(iterable_length)} _ = map_nested(lambda x: x + 10, data_struct, num_proc=num_proc, parallel_min_length=16) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class TempSeedTest(TestCase): @require_tf def test_tensorflow(self): import tensorflow as tf from tensorflow.keras import layers model = layers.Dense(2) def gen_random_output(): x = tf.random.uniform((1, 3)) return model(x).numpy() with temp_seed(42, set_tensorflow=True): out1 = gen_random_output() with temp_seed(42, set_tensorflow=True): out2 = gen_random_output() out3 = gen_random_output() np.testing.assert_equal(out1, out2) self.assertGreater(np.abs(out1 - out3).sum(), 0) @require_torch def test_torch(self): import torch def gen_random_output(): model = torch.nn.Linear(3, 2) x = torch.rand(1, 3) return model(x).detach().numpy() with temp_seed(42, set_pytorch=True): out1 = gen_random_output() with temp_seed(42, set_pytorch=True): out2 = gen_random_output() out3 = gen_random_output() np.testing.assert_equal(out1, out2) self.assertGreater(np.abs(out1 - out3).sum(), 0) def test_numpy(self): def gen_random_output(): return np.random.rand(1, 3) with temp_seed(42): out1 = gen_random_output() with temp_seed(42): out2 = gen_random_output() out3 = gen_random_output() np.testing.assert_equal(out1, out2) self.assertGreater(np.abs(out1 - out3).sum(), 0) @pytest.mark.parametrize("input_data", [{}]) def test_nested_data_structure_data(input_data): output_data = NestedDataStructure(input_data).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output", [ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ], ) def test_flatten(data, expected_output): output = NestedDataStructure(data).flatten() assert output == expected_output def test_asdict(): input = A(x=1, y="foobar") expected_output = {"x": 1, "y": "foobar"} assert asdict(input) == expected_output input = {"a": {"b": A(x=10, y="foo")}, "c": [A(x=20, y="bar")]} expected_output = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(input) == expected_output with pytest.raises(TypeError): asdict([1, A(x=10, y="foo")]) def _split_text(text: str): return text.split() def _2seconds_generator_of_2items_with_timing(content): yield (time.time(), content) time.sleep(2) yield (time.time(), content) def test_iflatmap_unordered(): with Pool(2) as pool: out = list(iflatmap_unordered(pool, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10)) assert out.count("hello") == 10 assert out.count("there") == 10 assert len(out) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2) as pool: out = list(iflatmap_unordered(pool, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10)) assert out.count("hello") == 10 assert out.count("there") == 10 assert len(out) == 20 # check that we get items as fast as possible with Pool(2) as pool: out = [] for yield_time, content in iflatmap_unordered( pool, _2seconds_generator_of_2items_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(content) assert out.count("a") == 2 assert out.count("b") == 2 assert len(out) == 4
datasets/tests/test_py_utils.py/0
{ "file_path": "datasets/tests/test_py_utils.py", "repo_id": "datasets", "token_count": 4821 }
71
# [The Hugging Face Deep Reinforcement Learning Course 🤗 (v2.0)](https://huggingface.co/deep-rl-course/unit0/introduction) <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/thumbnail.jpg" alt="Thumbnail"/> If you like the course, don't hesitate to **⭐ star this repository. This helps us 🤗**. This repository contains the Deep Reinforcement Learning Course mdx files and notebooks. **The website is here**: https://huggingface.co/deep-rl-course/unit0/introduction?fw=pt - The syllabus 📚: https://simoninithomas.github.io/deep-rl-course - The course 📚: https://huggingface.co/deep-rl-course/unit0/introduction?fw=pt - **Sign up here** ➡️➡️➡️ http://eepurl.com/ic5ZUD ## Citing the project To cite this repository in publications: ```bibtex @misc{deep-rl-course, author = {Simonini, Thomas and Sanseviero, Omar}, title = {The Hugging Face Deep Reinforcement Learning Class}, year = {2023}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\url{https://github.com/huggingface/deep-rl-class}}, } ```
deep-rl-class/README.md/0
{ "file_path": "deep-rl-class/README.md", "repo_id": "deep-rl-class", "token_count": 388 }
72
# The certification process The certification process is **completely free**: - To get a *certificate of completion*: you need **to pass 80% of the assignments**. - To get a *certificate of excellence*: you need **to pass 100% of the assignments**. There's **no deadlines, the course is self-paced**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/certification.jpg" alt="Course certification" width="100%"/> When we say pass, **we mean that your model must be pushed to the Hub and get a result equal or above the minimal requirement**. To check your progression and which unit you passed/not passed: https://huggingface.co/spaces/ThomasSimonini/Check-my-progress-Deep-RL-Course Now that you're ready for the certification process, you need to: 1. Go here: https://huggingface.co/spaces/huggingface-projects/Deep-RL-Course-Certification/ 2. Type your *hugging face username*, your *first name*, *last name* 3. Click on "Generate my certificate". - If you passed 80% of the assignments, **congratulations** you've just got the certificate of completion. - If you passed 100% of the assignments, **congratulations** you've just got the excellence certificate. - If you are below 80%, don't be discouraged! Check which units you need to do again to get your certificate. 4. You can download your certificate in pdf format and png format. Don't hesitate to share your certificate on Twitter (tag me @ThomasSimonini and @huggingface) and on Linkedin.
deep-rl-class/units/en/communication/certification.mdx/0
{ "file_path": "deep-rl-class/units/en/communication/certification.mdx", "repo_id": "deep-rl-class", "token_count": 418 }
73
# Type of tasks [[tasks]] A task is an **instance** of a Reinforcement Learning problem. We can have two types of tasks: **episodic** and **continuing**. ## Episodic task [[episodic-task]] In this case, we have a starting point and an ending point **(a terminal state). This creates an episode**: a list of States, Actions, Rewards, and new States. For instance, think about Super Mario Bros: an episode begin at the launch of a new Mario Level and ends **when you’re killed or you reached the end of the level.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/mario.jpg" alt="Mario"> <figcaption>Beginning of a new episode. </figcaption> </figure> ## Continuing tasks [[continuing-tasks]] These are tasks that continue forever (**no terminal state**). In this case, the agent must **learn how to choose the best actions and simultaneously interact with the environment.** For instance, an agent that does automated stock trading. For this task, there is no starting point and terminal state. **The agent keeps running until we decide to stop it.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/stock.jpg" alt="Stock Market" width="100%"> To recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/tasks.jpg" alt="Tasks recap" width="100%">
deep-rl-class/units/en/unit1/tasks.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/tasks.mdx", "repo_id": "deep-rl-class", "token_count": 436 }
74
# Two types of value-based methods [[two-types-value-based-methods]] In value-based methods, **we learn a value function** that **maps a state to the expected value of being at that state.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/vbm-1.jpg" alt="Value Based Methods"/> The value of a state is the **expected discounted return** the agent can get if it **starts at that state and then acts according to our policy.** <Tip> But what does it mean to act according to our policy? After all, we don't have a policy in value-based methods since we train a value function and not a policy. </Tip> Remember that the goal of an **RL agent is to have an optimal policy π\*.** To find the optimal policy, we learned about two different methods: - *Policy-based methods:* **Directly train the policy** to select what action to take given a state (or a probability distribution over actions at that state). In this case, we **don't have a value function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/two-approaches-2.jpg" alt="Two RL approaches"/> The policy takes a state as input and outputs what action to take at that state (deterministic policy: a policy that output one action given a state, contrary to stochastic policy that output a probability distribution over actions). And consequently, **we don't define by hand the behavior of our policy; it's the training that will define it.** - *Value-based methods:* **Indirectly, by training a value function** that outputs the value of a state or a state-action pair. Given this value function, our policy **will take an action.** Since the policy is not trained/learned, **we need to specify its behavior.** For instance, if we want a policy that, given the value function, will take actions that always lead to the biggest reward, **we'll create a Greedy Policy.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/two-approaches-3.jpg" alt="Two RL approaches"/> <figcaption>Given a state, our action-value function (that we train) outputs the value of each action at that state. Then, our pre-defined Greedy Policy selects the action that will yield the highest value given a state or a state action pair.</figcaption> </figure> Consequently, whatever method you use to solve your problem, **you will have a policy**. In the case of value-based methods, you don't train the policy: your policy **is just a simple pre-specified function** (for instance, the Greedy Policy) that uses the values given by the value-function to select its actions. So the difference is: - In policy-based training, **the optimal policy (denoted π\*) is found by training the policy directly.** - In value-based training, **finding an optimal value function (denoted Q\* or V\*, we'll study the difference below) leads to having an optimal policy.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="Link between value and policy"/> In fact, most of the time, in value-based methods, you'll use **an Epsilon-Greedy Policy** that handles the exploration/exploitation trade-off; we'll talk about this when we talk about Q-Learning in the second part of this unit. As we mentioned above, we have two types of value-based functions: ## The state-value function [[state-value-function]] We write the state value function under a policy π like this: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/state-value-function-1.jpg" alt="State value function"/> For each state, the state-value function outputs the expected return if the agent **starts at that state** and then follows the policy forever afterward (for all future timesteps, if you prefer). <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/state-value-function-2.jpg" alt="State value function"/> <figcaption>If we take the state with value -7: it's the expected return starting at that state and taking actions according to our policy (greedy policy), so right, right, right, down, down, right, right.</figcaption> </figure> ## The action-value function [[action-value-function]] In the action-value function, for each state and action pair, the action-value function **outputs the expected return** if the agent starts in that state, takes that action, and then follows the policy forever after. The value of taking action \\(a\\) in state \\(s\\) under a policy \\(π\\) is: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/action-state-value-function-1.jpg" alt="Action State value function"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/action-state-value-function-2.jpg" alt="Action State value function"/> We see that the difference is: - For the state-value function, we calculate **the value of a state \\(S_t\\)** - For the action-value function, we calculate **the value of the state-action pair ( \\(S_t, A_t\\) ) hence the value of taking that action at that state.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/two-types.jpg" alt="Two types of value function"/> <figcaption> Note: We didn't fill all the state-action pairs for the example of Action-value function</figcaption> </figure> In either case, whichever value function we choose (state-value or action-value function), **the returned value is the expected return.** However, the problem is that **to calculate EACH value of a state or a state-action pair, we need to sum all the rewards an agent can get if it starts at that state.** This can be a computationally expensive process, and that's **where the Bellman equation comes in to help us.**
deep-rl-class/units/en/unit2/two-types-value-based-methods.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/two-types-value-based-methods.mdx", "repo_id": "deep-rl-class", "token_count": 1727 }
75
# Introduction [[introduction]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/thumbnail.png" alt="thumbnail"/> In the last unit, we learned about Deep Q-Learning. In this value-based deep reinforcement learning algorithm, we **used a deep neural network to approximate the different Q-values for each possible action at a state.** Since the beginning of the course, we have only studied value-based methods, **where we estimate a value function as an intermediate step towards finding an optimal policy.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="Link value policy" /> In value-based methods, the policy ** \(π\) only exists because of the action value estimates since the policy is just a function** (for instance, greedy-policy) that will select the action with the highest value given a state. With policy-based methods, we want to optimize the policy directly **without having an intermediate step of learning a value function.** So today, **we'll learn about policy-based methods and study a subset of these methods called policy gradient**. Then we'll implement our first policy gradient algorithm called Monte Carlo **Reinforce** from scratch using PyTorch. Then, we'll test its robustness using the CartPole-v1 and PixelCopter environments. You'll then be able to iterate and improve this implementation for more advanced environments. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/envs.gif" alt="Environments"/> </figure> Let's get started!
deep-rl-class/units/en/unit4/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unit4/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 462 }
76
# Conclusion [[conclusion]] Congrats on finishing this unit and the tutorial. You've just trained your first virtual robots 🥳. **Take time to grasp the material before continuing**. You can also look at the additional reading materials we provided in the *additional reading* section. Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill out this form](https://forms.gle/BzKXWzLAGZESGNaE9) See you in next unit! ### Keep learning, stay awesome 🤗
deep-rl-class/units/en/unit6/conclusion.mdx/0
{ "file_path": "deep-rl-class/units/en/unit6/conclusion.mdx", "repo_id": "deep-rl-class", "token_count": 145 }
77
# Conclusion [[Conclusion]] That’s all for today. Congrats on finishing this unit and the tutorial! The best way to learn is to practice and try stuff. **Why not improve the implementation to handle frames as input?**. See you on second part of this Unit 🔥 ## Keep Learning, Stay awesome 🤗
deep-rl-class/units/en/unit8/conclusion.mdx/0
{ "file_path": "deep-rl-class/units/en/unit8/conclusion.mdx", "repo_id": "deep-rl-class", "token_count": 78 }
78
# Decision Transformers The Decision Transformer model was introduced by ["Decision Transformer: Reinforcement Learning via Sequence Modeling” by Chen L. et al](https://arxiv.org/abs/2106.01345). It abstracts Reinforcement Learning as a conditional-sequence modeling problem. The main idea is that instead of training a policy using RL methods, such as fitting a value function, that will tell us what action to take to maximize the return (cumulative reward), **we use a sequence modeling algorithm (Transformer) that, given a desired return, past states, and actions, will generate future actions to achieve this desired return**. It’s an autoregressive model conditioned on the desired return, past states, and actions to generate future actions that achieve the desired return. This is a complete shift in the Reinforcement Learning paradigm since we use generative trajectory modeling (modeling the joint distribution of the sequence of states, actions, and rewards) to replace conventional RL algorithms. This means that in Decision Transformers, we don’t maximize the return but rather generate a series of future actions that achieve the desired return. The 🤗 Transformers team integrated the Decision Transformer, an Offline Reinforcement Learning method, into the library as well as the Hugging Face Hub. ## Learn about Decision Transformers To learn more about Decision Transformers, you should read the blogpost we wrote about it [Introducing Decision Transformers on Hugging Face](https://huggingface.co/blog/decision-transformers) ## Train your first Decision Transformers Now that you understand how Decision Transformers work thanks to [Introducing Decision Transformers on Hugging Face](https://huggingface.co/blog/decision-transformers), you’re ready to learn to train your first Offline Decision Transformer model from scratch to make a half-cheetah run. Start the tutorial here 👉 https://huggingface.co/blog/train-decision-transformers ## Further reading For more information, we recommend that you check out the following resources: - [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) - [Online Decision Transformer](https://arxiv.org/abs/2202.05607) ## Author This section was written by <a href="https://twitter.com/edwardbeeching">Edward Beeching</a>
deep-rl-class/units/en/unitbonus3/decision-transformers.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus3/decision-transformers.mdx", "repo_id": "deep-rl-class", "token_count": 543 }
79
cff-version: 1.2.0 title: 'Diffusers: State-of-the-art diffusion models' message: >- If you use this software, please cite it using the metadata from this file. type: software authors: - given-names: Patrick family-names: von Platen - given-names: Suraj family-names: Patil - given-names: Anton family-names: Lozhkov - given-names: Pedro family-names: Cuenca - given-names: Nathan family-names: Lambert - given-names: Kashif family-names: Rasul - given-names: Mishig family-names: Davaadorj - given-names: Thomas family-names: Wolf repository-code: 'https://github.com/huggingface/diffusers' abstract: >- Diffusers provides pretrained diffusion models across multiple modalities, such as vision and audio, and serves as a modular toolbox for inference and training of diffusion models. keywords: - deep-learning - pytorch - image-generation - hacktoberfest - diffusion - text2image - image2image - score-based-generative-modeling - stable-diffusion - stable-diffusion-diffusers license: Apache-2.0 version: 0.12.1
diffusers/CITATION.cff/0
{ "file_path": "diffusers/CITATION.cff", "repo_id": "diffusers", "token_count": 369 }
80
import glob import sys import pandas as pd from huggingface_hub import hf_hub_download, upload_file from huggingface_hub.utils._errors import EntryNotFoundError sys.path.append(".") from utils import BASE_PATH, FINAL_CSV_FILE, GITHUB_SHA, REPO_ID, collate_csv # noqa: E402 def has_previous_benchmark() -> str: csv_path = None try: csv_path = hf_hub_download(repo_id=REPO_ID, repo_type="dataset", filename=FINAL_CSV_FILE) except EntryNotFoundError: csv_path = None return csv_path def filter_float(value): if isinstance(value, str): return float(value.split()[0]) return value def push_to_hf_dataset(): all_csvs = sorted(glob.glob(f"{BASE_PATH}/*.csv")) collate_csv(all_csvs, FINAL_CSV_FILE) # If there's an existing benchmark file, we should report the changes. csv_path = has_previous_benchmark() if csv_path is not None: current_results = pd.read_csv(FINAL_CSV_FILE) previous_results = pd.read_csv(csv_path) numeric_columns = current_results.select_dtypes(include=["float64", "int64"]).columns numeric_columns = [ c for c in numeric_columns if c not in ["batch_size", "num_inference_steps", "actual_gpu_memory (gbs)"] ] for column in numeric_columns: previous_results[column] = previous_results[column].map(lambda x: filter_float(x)) # Calculate the percentage change current_results[column] = current_results[column].astype(float) previous_results[column] = previous_results[column].astype(float) percent_change = ((current_results[column] - previous_results[column]) / previous_results[column]) * 100 # Format the values with '+' or '-' sign and append to original values current_results[column] = current_results[column].map(str) + percent_change.map( lambda x: f" ({'+' if x > 0 else ''}{x:.2f}%)" ) # There might be newly added rows. So, filter out the NaNs. current_results[column] = current_results[column].map(lambda x: x.replace(" (nan%)", "")) # Overwrite the current result file. current_results.to_csv(FINAL_CSV_FILE, index=False) commit_message = f"upload from sha: {GITHUB_SHA}" if GITHUB_SHA is not None else "upload benchmark results" upload_file( repo_id=REPO_ID, path_in_repo=FINAL_CSV_FILE, path_or_fileobj=FINAL_CSV_FILE, repo_type="dataset", commit_message=commit_message, ) if __name__ == "__main__": push_to_hf_dataset()
diffusers/benchmarks/push_results.py/0
{ "file_path": "diffusers/benchmarks/push_results.py", "repo_id": "diffusers", "token_count": 1089 }
81
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Models 🤗 Diffusers provides pretrained models for popular algorithms and modules to create custom diffusion systems. The primary function of models is to denoise an input sample as modeled by the distribution \\(p_{\theta}(x_{t-1}|x_{t})\\). All models are built from the base [`ModelMixin`] class which is a [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html) providing basic functionality for saving and loading models, locally and from the Hugging Face Hub. ## ModelMixin [[autodoc]] ModelMixin ## FlaxModelMixin [[autodoc]] FlaxModelMixin ## PushToHubMixin [[autodoc]] utils.PushToHubMixin
diffusers/docs/source/en/api/models/overview.md/0
{ "file_path": "diffusers/docs/source/en/api/models/overview.md", "repo_id": "diffusers", "token_count": 337 }
82
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Kandinsky 2.2 Kandinsky 2.2 is created by [Arseniy Shakhmatov](https://github.com/cene555), [Anton Razzhigaev](https://github.com/razzant), [Aleksandr Nikolich](https://github.com/AlexWortega), [Vladimir Arkhipkin](https://github.com/oriBetelgeuse), [Igor Pavlov](https://github.com/boomb0om), [Andrey Kuznetsov](https://github.com/kuznetsoffandrey), and [Denis Dimitrov](https://github.com/denndimitrov). The description from it's GitHub page is: *Kandinsky 2.2 brings substantial improvements upon its predecessor, Kandinsky 2.1, by introducing a new, more powerful image encoder - CLIP-ViT-G and the ControlNet support. The switch to CLIP-ViT-G as the image encoder significantly increases the model's capability to generate more aesthetic pictures and better understand text, thus enhancing the model's overall performance. The addition of the ControlNet mechanism allows the model to effectively control the process of generating images. This leads to more accurate and visually appealing outputs and opens new possibilities for text-guided image manipulation.* The original codebase can be found at [ai-forever/Kandinsky-2](https://github.com/ai-forever/Kandinsky-2). <Tip> Check out the [Kandinsky Community](https://huggingface.co/kandinsky-community) organization on the Hub for the official model checkpoints for tasks like text-to-image, image-to-image, and inpainting. </Tip> <Tip> Make sure to check out the schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## KandinskyV22PriorPipeline [[autodoc]] KandinskyV22PriorPipeline - all - __call__ - interpolate ## KandinskyV22Pipeline [[autodoc]] KandinskyV22Pipeline - all - __call__ ## KandinskyV22CombinedPipeline [[autodoc]] KandinskyV22CombinedPipeline - all - __call__ ## KandinskyV22ControlnetPipeline [[autodoc]] KandinskyV22ControlnetPipeline - all - __call__ ## KandinskyV22PriorEmb2EmbPipeline [[autodoc]] KandinskyV22PriorEmb2EmbPipeline - all - __call__ - interpolate ## KandinskyV22Img2ImgPipeline [[autodoc]] KandinskyV22Img2ImgPipeline - all - __call__ ## KandinskyV22Img2ImgCombinedPipeline [[autodoc]] KandinskyV22Img2ImgCombinedPipeline - all - __call__ ## KandinskyV22ControlnetImg2ImgPipeline [[autodoc]] KandinskyV22ControlnetImg2ImgPipeline - all - __call__ ## KandinskyV22InpaintPipeline [[autodoc]] KandinskyV22InpaintPipeline - all - __call__ ## KandinskyV22InpaintCombinedPipeline [[autodoc]] KandinskyV22InpaintCombinedPipeline - all - __call__
diffusers/docs/source/en/api/pipelines/kandinsky_v22.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/kandinsky_v22.md", "repo_id": "diffusers", "token_count": 1051 }
83
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Image variation The Stable Diffusion model can also generate variations from an input image. It uses a fine-tuned version of a Stable Diffusion model by [Justin Pinkney](https://www.justinpinkney.com/) from [Lambda](https://lambdalabs.com/). The original codebase can be found at [LambdaLabsML/lambda-diffusers](https://github.com/LambdaLabsML/lambda-diffusers#stable-diffusion-image-variations) and additional official checkpoints for image variation can be found at [lambdalabs/sd-image-variations-diffusers](https://huggingface.co/lambdalabs/sd-image-variations-diffusers). <Tip> Make sure to check out the Stable Diffusion [Tips](./overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! </Tip> ## StableDiffusionImageVariationPipeline [[autodoc]] StableDiffusionImageVariationPipeline - all - __call__ - enable_attention_slicing - disable_attention_slicing - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention ## StableDiffusionPipelineOutput [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
diffusers/docs/source/en/api/pipelines/stable_diffusion/image_variation.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/image_variation.md", "repo_id": "diffusers", "token_count": 495 }
84
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 🧨 Diffusers’ Ethical Guidelines ## Preamble [Diffusers](https://huggingface.co/docs/diffusers/index) provides pre-trained diffusion models and serves as a modular toolbox for inference and training. Given its real case applications in the world and potential negative impacts on society, we think it is important to provide the project with ethical guidelines to guide the development, users’ contributions, and usage of the Diffusers library. The risks associated with using this technology are still being examined, but to name a few: copyrights issues for artists; deep-fake exploitation; sexual content generation in inappropriate contexts; non-consensual impersonation; harmful social biases perpetuating the oppression of marginalized groups. We will keep tracking risks and adapt the following guidelines based on the community's responsiveness and valuable feedback. ## Scope The Diffusers community will apply the following ethical guidelines to the project’s development and help coordinate how the community will integrate the contributions, especially concerning sensitive topics related to ethical concerns. ## Ethical guidelines The following ethical guidelines apply generally, but we will primarily implement them when dealing with ethically sensitive issues while making a technical choice. Furthermore, we commit to adapting those ethical principles over time following emerging harms related to the state of the art of the technology in question. - **Transparency**: we are committed to being transparent in managing PRs, explaining our choices to users, and making technical decisions. - **Consistency**: we are committed to guaranteeing our users the same level of attention in project management, keeping it technically stable and consistent. - **Simplicity**: with a desire to make it easy to use and exploit the Diffusers library, we are committed to keeping the project’s goals lean and coherent. - **Accessibility**: the Diffusers project helps lower the entry bar for contributors who can help run it even without technical expertise. Doing so makes research artifacts more accessible to the community. - **Reproducibility**: we aim to be transparent about the reproducibility of upstream code, models, and datasets when made available through the Diffusers library. - **Responsibility**: as a community and through teamwork, we hold a collective responsibility to our users by anticipating and mitigating this technology's potential risks and dangers. ## Examples of implementations: Safety features and Mechanisms The team works daily to make the technical and non-technical tools available to deal with the potential ethical and social risks associated with diffusion technology. Moreover, the community's input is invaluable in ensuring these features' implementation and raising awareness with us. - [**Community tab**](https://huggingface.co/docs/hub/repositories-pull-requests-discussions): it enables the community to discuss and better collaborate on a project. - **Bias exploration and evaluation**: the Hugging Face team provides a [space](https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer) to demonstrate the biases in Stable Diffusion interactively. In this sense, we support and encourage bias explorers and evaluations. - **Encouraging safety in deployment** - [**Safe Stable Diffusion**](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_safe): It mitigates the well-known issue that models, like Stable Diffusion, that are trained on unfiltered, web-crawled datasets tend to suffer from inappropriate degeneration. Related paper: [Safe Latent Diffusion: Mitigating Inappropriate Degeneration in Diffusion Models](https://arxiv.org/abs/2211.05105). - [**Safety Checker**](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py): It checks and compares the class probability of a set of hard-coded harmful concepts in the embedding space against an image after it has been generated. The harmful concepts are intentionally hidden to prevent reverse engineering of the checker. - **Staged released on the Hub**: in particularly sensitive situations, access to some repositories should be restricted. This staged release is an intermediary step that allows the repository’s authors to have more control over its use. - **Licensing**: [OpenRAILs](https://huggingface.co/blog/open_rail), a new type of licensing, allow us to ensure free access while having a set of restrictions that ensure more responsible use.
diffusers/docs/source/en/conceptual/ethical_guidelines.md/0
{ "file_path": "diffusers/docs/source/en/conceptual/ethical_guidelines.md", "repo_id": "diffusers", "token_count": 1156 }
85
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Token merging [Token merging](https://huggingface.co/papers/2303.17604) (ToMe) merges redundant tokens/patches progressively in the forward pass of a Transformer-based network which can speed-up the inference latency of [`StableDiffusionPipeline`]. Install ToMe from `pip`: ```bash pip install tomesd ``` You can use ToMe from the [`tomesd`](https://github.com/dbolya/tomesd) library with the [`apply_patch`](https://github.com/dbolya/tomesd?tab=readme-ov-file#usage) function: ```diff from diffusers import StableDiffusionPipeline import torch import tomesd pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, ).to("cuda") + tomesd.apply_patch(pipeline, ratio=0.5) image = pipeline("a photo of an astronaut riding a horse on mars").images[0] ``` The `apply_patch` function exposes a number of [arguments](https://github.com/dbolya/tomesd#usage) to help strike a balance between pipeline inference speed and the quality of the generated tokens. The most important argument is `ratio` which controls the number of tokens that are merged during the forward pass. As reported in the [paper](https://huggingface.co/papers/2303.17604), ToMe can greatly preserve the quality of the generated images while boosting inference speed. By increasing the `ratio`, you can speed-up inference even further, but at the cost of some degraded image quality. To test the quality of the generated images, we sampled a few prompts from [Parti Prompts](https://parti.research.google/) and performed inference with the [`StableDiffusionPipeline`] with the following settings: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/tome/tome_samples.png"> </div> We didn’t notice any significant decrease in the quality of the generated samples, and you can check out the generated samples in this [WandB report](https://wandb.ai/sayakpaul/tomesd-results/runs/23j4bj3i?workspace=). If you're interested in reproducing this experiment, use this [script](https://gist.github.com/sayakpaul/8cac98d7f22399085a060992f411ecbd). ## Benchmarks We also benchmarked the impact of `tomesd` on the [`StableDiffusionPipeline`] with [xFormers](https://huggingface.co/docs/diffusers/optimization/xformers) enabled across several image resolutions. The results are obtained from A100 and V100 GPUs in the following development environment: ```bash - `diffusers` version: 0.15.1 - Python version: 3.8.16 - PyTorch version (GPU?): 1.13.1+cu116 (True) - Huggingface_hub version: 0.13.2 - Transformers version: 4.27.2 - Accelerate version: 0.18.0 - xFormers version: 0.0.16 - tomesd version: 0.1.2 ``` To reproduce this benchmark, feel free to use this [script](https://gist.github.com/sayakpaul/27aec6bca7eb7b0e0aa4112205850335). The results are reported in seconds, and where applicable we report the speed-up percentage over the vanilla pipeline when using ToMe and ToMe + xFormers. | **GPU** | **Resolution** | **Batch size** | **Vanilla** | **ToMe** | **ToMe + xFormers** | |----------|----------------|----------------|-------------|----------------|---------------------| | **A100** | 512 | 10 | 6.88 | 5.26 (+23.55%) | 4.69 (+31.83%) | | | 768 | 10 | OOM | 14.71 | 11 | | | | 8 | OOM | 11.56 | 8.84 | | | | 4 | OOM | 5.98 | 4.66 | | | | 2 | 4.99 | 3.24 (+35.07%) | 2.1 (+37.88%) | | | | 1 | 3.29 | 2.24 (+31.91%) | 2.03 (+38.3%) | | | 1024 | 10 | OOM | OOM | OOM | | | | 8 | OOM | OOM | OOM | | | | 4 | OOM | 12.51 | 9.09 | | | | 2 | OOM | 6.52 | 4.96 | | | | 1 | 6.4 | 3.61 (+43.59%) | 2.81 (+56.09%) | | **V100** | 512 | 10 | OOM | 10.03 | 9.29 | | | | 8 | OOM | 8.05 | 7.47 | | | | 4 | 5.7 | 4.3 (+24.56%) | 3.98 (+30.18%) | | | | 2 | 3.14 | 2.43 (+22.61%) | 2.27 (+27.71%) | | | | 1 | 1.88 | 1.57 (+16.49%) | 1.57 (+16.49%) | | | 768 | 10 | OOM | OOM | 23.67 | | | | 8 | OOM | OOM | 18.81 | | | | 4 | OOM | 11.81 | 9.7 | | | | 2 | OOM | 6.27 | 5.2 | | | | 1 | 5.43 | 3.38 (+37.75%) | 2.82 (+48.07%) | | | 1024 | 10 | OOM | OOM | OOM | | | | 8 | OOM | OOM | OOM | | | | 4 | OOM | OOM | 19.35 | | | | 2 | OOM | 13 | 10.78 | | | | 1 | OOM | 6.66 | 5.54 | As seen in the tables above, the speed-up from `tomesd` becomes more pronounced for larger image resolutions. It is also interesting to note that with `tomesd`, it is possible to run the pipeline on a higher resolution like 1024x1024. You may be able to speed-up inference even more with [`torch.compile`](torch2.0).
diffusers/docs/source/en/optimization/tome.md/0
{ "file_path": "diffusers/docs/source/en/optimization/tome.md", "repo_id": "diffusers", "token_count": 3380 }
86
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Overview 🤗 Diffusers provides a collection of training scripts for you to train your own diffusion models. You can find all of our training scripts in [diffusers/examples](https://github.com/huggingface/diffusers/tree/main/examples). Each training script is: - **Self-contained**: the training script does not depend on any local files, and all packages required to run the script are installed from the `requirements.txt` file. - **Easy-to-tweak**: the training scripts are an example of how to train a diffusion model for a specific task and won't work out-of-the-box for every training scenario. You'll likely need to adapt the training script for your specific use-case. To help you with that, we've fully exposed the data preprocessing code and the training loop so you can modify it for your own use. - **Beginner-friendly**: the training scripts are designed to be beginner-friendly and easy to understand, rather than including the latest state-of-the-art methods to get the best and most competitive results. Any training methods we consider too complex are purposefully left out. - **Single-purpose**: each training script is expressly designed for only one task to keep it readable and understandable. Our current collection of training scripts include: | Training | SDXL-support | LoRA-support | Flax-support | |---|---|---|---| | [unconditional image generation](https://github.com/huggingface/diffusers/tree/main/examples/unconditional_image_generation) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) | | | | | [text-to-image](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image) | 👍 | 👍 | 👍 | | [textual inversion](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) | | | 👍 | | [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb) | 👍 | 👍 | 👍 | | [ControlNet](https://github.com/huggingface/diffusers/tree/main/examples/controlnet) | 👍 | | 👍 | | [InstructPix2Pix](https://github.com/huggingface/diffusers/tree/main/examples/instruct_pix2pix) | 👍 | | | | [Custom Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/custom_diffusion) | | | | | [T2I-Adapters](https://github.com/huggingface/diffusers/tree/main/examples/t2i_adapter) | 👍 | | | | [Kandinsky 2.2](https://github.com/huggingface/diffusers/tree/main/examples/kandinsky2_2/text_to_image) | | 👍 | | | [Wuerstchen](https://github.com/huggingface/diffusers/tree/main/examples/wuerstchen/text_to_image) | | 👍 | | These examples are **actively** maintained, so please feel free to open an issue if they aren't working as expected. If you feel like another training example should be included, you're more than welcome to start a [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) to discuss your feature idea with us and whether it meets our criteria of being self-contained, easy-to-tweak, beginner-friendly, and single-purpose. ## Install Make sure you can successfully run the latest versions of the example scripts by installing the library from source in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then navigate to the folder of the training script (for example, [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)) and install the `requirements.txt` file. Some training scripts have a specific requirement file for SDXL, LoRA or Flax. If you're using one of these scripts, make sure you install its corresponding requirements file. ```bash cd examples/dreambooth pip install -r requirements.txt # to train SDXL with DreamBooth pip install -r requirements_sdxl.txt ``` To speedup training and reduce memory-usage, we recommend: - using PyTorch 2.0 or higher to automatically use [scaled dot product attention](../optimization/torch2.0#scaled-dot-product-attention) during training (you don't need to make any changes to the training code) - installing [xFormers](../optimization/xformers) to enable memory-efficient attention
diffusers/docs/source/en/training/overview.md/0
{ "file_path": "diffusers/docs/source/en/training/overview.md", "repo_id": "diffusers", "token_count": 1546 }
87
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Controlled generation Controlling outputs generated by diffusion models has been long pursued by the community and is now an active research topic. In many popular diffusion models, subtle changes in inputs, both images and text prompts, can drastically change outputs. In an ideal world we want to be able to control how semantics are preserved and changed. Most examples of preserving semantics reduce to being able to accurately map a change in input to a change in output. I.e. adding an adjective to a subject in a prompt preserves the entire image, only modifying the changed subject. Or, image variation of a particular subject preserves the subject's pose. Additionally, there are qualities of generated images that we would like to influence beyond semantic preservation. I.e. in general, we would like our outputs to be of good quality, adhere to a particular style, or be realistic. We will document some of the techniques `diffusers` supports to control generation of diffusion models. Much is cutting edge research and can be quite nuanced. If something needs clarifying or you have a suggestion, don't hesitate to open a discussion on the [forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) or a [GitHub issue](https://github.com/huggingface/diffusers/issues). We provide a high level explanation of how the generation can be controlled as well as a snippet of the technicals. For more in depth explanations on the technicals, the original papers which are linked from the pipelines are always the best resources. Depending on the use case, one should choose a technique accordingly. In many cases, these techniques can be combined. For example, one can combine Textual Inversion with SEGA to provide more semantic guidance to the outputs generated using Textual Inversion. Unless otherwise mentioned, these are techniques that work with existing models and don't require their own weights. 1. [InstructPix2Pix](#instruct-pix2pix) 2. [Pix2Pix Zero](#pix2pix-zero) 3. [Attend and Excite](#attend-and-excite) 4. [Semantic Guidance](#semantic-guidance-sega) 5. [Self-attention Guidance](#self-attention-guidance-sag) 6. [Depth2Image](#depth2image) 7. [MultiDiffusion Panorama](#multidiffusion-panorama) 8. [DreamBooth](#dreambooth) 9. [Textual Inversion](#textual-inversion) 10. [ControlNet](#controlnet) 11. [Prompt Weighting](#prompt-weighting) 12. [Custom Diffusion](#custom-diffusion) 13. [Model Editing](#model-editing) 14. [DiffEdit](#diffedit) 15. [T2I-Adapter](#t2i-adapter) 16. [FABRIC](#fabric) For convenience, we provide a table to denote which methods are inference-only and which require fine-tuning/training. | **Method** | **Inference only** | **Requires training /<br> fine-tuning** | **Comments** | | :-------------------------------------------------: | :----------------: | :-------------------------------------: | :---------------------------------------------------------------------------------------------: | | [InstructPix2Pix](#instruct-pix2pix) | ✅ | ❌ | Can additionally be<br>fine-tuned for better <br>performance on specific <br>edit instructions. | | [Pix2Pix Zero](#pix2pix-zero) | ✅ | ❌ | | | [Attend and Excite](#attend-and-excite) | ✅ | ❌ | | | [Semantic Guidance](#semantic-guidance-sega) | ✅ | ❌ | | | [Self-attention Guidance](#self-attention-guidance-sag) | ✅ | ❌ | | | [Depth2Image](#depth2image) | ✅ | ❌ | | | [MultiDiffusion Panorama](#multidiffusion-panorama) | ✅ | ❌ | | | [DreamBooth](#dreambooth) | ❌ | ✅ | | | [Textual Inversion](#textual-inversion) | ❌ | ✅ | | | [ControlNet](#controlnet) | ✅ | ❌ | A ControlNet can be <br>trained/fine-tuned on<br>a custom conditioning. | | [Prompt Weighting](#prompt-weighting) | ✅ | ❌ | | | [Custom Diffusion](#custom-diffusion) | ❌ | ✅ | | | [Model Editing](#model-editing) | ✅ | ❌ | | | [DiffEdit](#diffedit) | ✅ | ❌ | | | [T2I-Adapter](#t2i-adapter) | ✅ | ❌ | | | [Fabric](#fabric) | ✅ | ❌ | | ## InstructPix2Pix [Paper](https://arxiv.org/abs/2211.09800) [InstructPix2Pix](../api/pipelines/pix2pix) is fine-tuned from Stable Diffusion to support editing input images. It takes as inputs an image and a prompt describing an edit, and it outputs the edited image. InstructPix2Pix has been explicitly trained to work well with [InstructGPT](https://openai.com/blog/instruction-following/)-like prompts. ## Pix2Pix Zero [Paper](https://arxiv.org/abs/2302.03027) [Pix2Pix Zero](../api/pipelines/pix2pix_zero) allows modifying an image so that one concept or subject is translated to another one while preserving general image semantics. The denoising process is guided from one conceptual embedding towards another conceptual embedding. The intermediate latents are optimized during the denoising process to push the attention maps towards reference attention maps. The reference attention maps are from the denoising process of the input image and are used to encourage semantic preservation. Pix2Pix Zero can be used both to edit synthetic images as well as real images. - To edit synthetic images, one first generates an image given a caption. Next, we generate image captions for the concept that shall be edited and for the new target concept. We can use a model like [Flan-T5](https://huggingface.co/docs/transformers/model_doc/flan-t5) for this purpose. Then, "mean" prompt embeddings for both the source and target concepts are created via the text encoder. Finally, the pix2pix-zero algorithm is used to edit the synthetic image. - To edit a real image, one first generates an image caption using a model like [BLIP](https://huggingface.co/docs/transformers/model_doc/blip). Then one applies DDIM inversion on the prompt and image to generate "inverse" latents. Similar to before, "mean" prompt embeddings for both source and target concepts are created and finally the pix2pix-zero algorithm in combination with the "inverse" latents is used to edit the image. <Tip> Pix2Pix Zero is the first model that allows "zero-shot" image editing. This means that the model can edit an image in less than a minute on a consumer GPU as shown [here](../api/pipelines/pix2pix_zero#usage-example). </Tip> As mentioned above, Pix2Pix Zero includes optimizing the latents (and not any of the UNet, VAE, or the text encoder) to steer the generation toward a specific concept. This means that the overall pipeline might require more memory than a standard [StableDiffusionPipeline](../api/pipelines/stable_diffusion/text2img). <Tip> An important distinction between methods like InstructPix2Pix and Pix2Pix Zero is that the former involves fine-tuning the pre-trained weights while the latter does not. This means that you can apply Pix2Pix Zero to any of the available Stable Diffusion models. </Tip> ## Attend and Excite [Paper](https://arxiv.org/abs/2301.13826) [Attend and Excite](../api/pipelines/attend_and_excite) allows subjects in the prompt to be faithfully represented in the final image. A set of token indices are given as input, corresponding to the subjects in the prompt that need to be present in the image. During denoising, each token index is guaranteed to have a minimum attention threshold for at least one patch of the image. The intermediate latents are iteratively optimized during the denoising process to strengthen the attention of the most neglected subject token until the attention threshold is passed for all subject tokens. Like Pix2Pix Zero, Attend and Excite also involves a mini optimization loop (leaving the pre-trained weights untouched) in its pipeline and can require more memory than the usual [StableDiffusionPipeline](../api/pipelines/stable_diffusion/text2img). ## Semantic Guidance (SEGA) [Paper](https://arxiv.org/abs/2301.12247) [SEGA](../api/pipelines/semantic_stable_diffusion) allows applying or removing one or more concepts from an image. The strength of the concept can also be controlled. I.e. the smile concept can be used to incrementally increase or decrease the smile of a portrait. Similar to how classifier free guidance provides guidance via empty prompt inputs, SEGA provides guidance on conceptual prompts. Multiple of these conceptual prompts can be applied simultaneously. Each conceptual prompt can either add or remove their concept depending on if the guidance is applied positively or negatively. Unlike Pix2Pix Zero or Attend and Excite, SEGA directly interacts with the diffusion process instead of performing any explicit gradient-based optimization. ## Self-attention Guidance (SAG) [Paper](https://arxiv.org/abs/2210.00939) [Self-attention Guidance](../api/pipelines/self_attention_guidance) improves the general quality of images. SAG provides guidance from predictions not conditioned on high-frequency details to fully conditioned images. The high frequency details are extracted out of the UNet self-attention maps. ## Depth2Image [Project](https://huggingface.co/stabilityai/stable-diffusion-2-depth) [Depth2Image](../api/pipelines/stable_diffusion/depth2img) is fine-tuned from Stable Diffusion to better preserve semantics for text guided image variation. It conditions on a monocular depth estimate of the original image. ## MultiDiffusion Panorama [Paper](https://arxiv.org/abs/2302.08113) [MultiDiffusion Panorama](../api/pipelines/panorama) defines a new generation process over a pre-trained diffusion model. This process binds together multiple diffusion generation methods that can be readily applied to generate high quality and diverse images. Results adhere to user-provided controls, such as desired aspect ratio (e.g., panorama), and spatial guiding signals, ranging from tight segmentation masks to bounding boxes. MultiDiffusion Panorama allows to generate high-quality images at arbitrary aspect ratios (e.g., panoramas). ## Fine-tuning your own models In addition to pre-trained models, Diffusers has training scripts for fine-tuning models on user-provided data. ## DreamBooth [Project](https://dreambooth.github.io/) [DreamBooth](../training/dreambooth) fine-tunes a model to teach it about a new subject. I.e. a few pictures of a person can be used to generate images of that person in different styles. ## Textual Inversion [Paper](https://arxiv.org/abs/2208.01618) [Textual Inversion](../training/text_inversion) fine-tunes a model to teach it about a new concept. I.e. a few pictures of a style of artwork can be used to generate images in that style. ## ControlNet [Paper](https://arxiv.org/abs/2302.05543) [ControlNet](../api/pipelines/controlnet) is an auxiliary network which adds an extra condition. There are 8 canonical pre-trained ControlNets trained on different conditionings such as edge detection, scribbles, depth maps, and semantic segmentations. ## Prompt Weighting [Prompt weighting](../using-diffusers/weighted_prompts) is a simple technique that puts more attention weight on certain parts of the text input. ## Custom Diffusion [Paper](https://arxiv.org/abs/2212.04488) [Custom Diffusion](../training/custom_diffusion) only fine-tunes the cross-attention maps of a pre-trained text-to-image diffusion model. It also allows for additionally performing Textual Inversion. It supports multi-concept training by design. Like DreamBooth and Textual Inversion, Custom Diffusion is also used to teach a pre-trained text-to-image diffusion model about new concepts to generate outputs involving the concept(s) of interest. ## Model Editing [Paper](https://arxiv.org/abs/2303.08084) The [text-to-image model editing pipeline](../api/pipelines/model_editing) helps you mitigate some of the incorrect implicit assumptions a pre-trained text-to-image diffusion model might make about the subjects present in the input prompt. For example, if you prompt Stable Diffusion to generate images for "A pack of roses", the roses in the generated images are more likely to be red. This pipeline helps you change that assumption. ## DiffEdit [Paper](https://arxiv.org/abs/2210.11427) [DiffEdit](../api/pipelines/diffedit) allows for semantic editing of input images along with input prompts while preserving the original input images as much as possible. ## T2I-Adapter [Paper](https://arxiv.org/abs/2302.08453) [T2I-Adapter](../api/pipelines/stable_diffusion/adapter) is an auxiliary network which adds an extra condition. There are 8 canonical pre-trained adapters trained on different conditionings such as edge detection, sketch, depth maps, and semantic segmentations. ## Fabric [Paper](https://arxiv.org/abs/2307.10159) [Fabric](https://github.com/huggingface/diffusers/tree/442017ccc877279bcf24fbe92f92d3d0def191b6/examples/community#stable-diffusion-fabric-pipeline) is a training-free approach applicable to a wide range of popular diffusion models, which exploits the self-attention layer present in the most widely used architectures to condition the diffusion process on a set of feedback images.
diffusers/docs/source/en/using-diffusers/controlling_generation.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/controlling_generation.md", "repo_id": "diffusers", "token_count": 6312 }
88
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Load different Stable Diffusion formats [[open-in-colab]] Stable Diffusion models are available in different formats depending on the framework they're trained and saved with, and where you download them from. Converting these formats for use in 🤗 Diffusers allows you to use all the features supported by the library, such as [using different schedulers](schedulers) for inference, [building your custom pipeline](write_own_pipeline), and a variety of techniques and methods for [optimizing inference speed](../optimization/opt_overview). <Tip> We highly recommend using the `.safetensors` format because it is more secure than traditional pickled files which are vulnerable and can be exploited to execute any code on your machine (learn more in the [Load safetensors](using_safetensors) guide). </Tip> This guide will show you how to convert other Stable Diffusion formats to be compatible with 🤗 Diffusers. ## PyTorch .ckpt The checkpoint - or `.ckpt` - format is commonly used to store and save models. The `.ckpt` file contains the entire model and is typically several GBs in size. While you can load and use a `.ckpt` file directly with the [`~StableDiffusionPipeline.from_single_file`] method, it is generally better to convert the `.ckpt` file to 🤗 Diffusers so both formats are available. There are two options for converting a `.ckpt` file: use a Space to convert the checkpoint or convert the `.ckpt` file with a script. ### Convert with a Space The easiest and most convenient way to convert a `.ckpt` file is to use the [SD to Diffusers](https://huggingface.co/spaces/diffusers/sd-to-diffusers) Space. You can follow the instructions on the Space to convert the `.ckpt` file. This approach works well for basic models, but it may struggle with more customized models. You'll know the Space failed if it returns an empty pull request or error. In this case, you can try converting the `.ckpt` file with a script. ### Convert with a script 🤗 Diffusers provides a [conversion script](https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py) for converting `.ckpt` files. This approach is more reliable than the Space above. Before you start, make sure you have a local clone of 🤗 Diffusers to run the script and log in to your Hugging Face account so you can open pull requests and push your converted model to the Hub. ```bash huggingface-cli login ``` To use the script: 1. Git clone the repository containing the `.ckpt` file you want to convert. For this example, let's convert this [TemporalNet](https://huggingface.co/CiaraRowles/TemporalNet) `.ckpt` file: ```bash git lfs install git clone https://huggingface.co/CiaraRowles/TemporalNet ``` 2. Open a pull request on the repository where you're converting the checkpoint from: ```bash cd TemporalNet && git fetch origin refs/pr/13:pr/13 git checkout pr/13 ``` 3. There are several input arguments to configure in the conversion script, but the most important ones are: - `checkpoint_path`: the path to the `.ckpt` file to convert. - `original_config_file`: a YAML file defining the configuration of the original architecture. If you can't find this file, try searching for the YAML file in the GitHub repository where you found the `.ckpt` file. - `dump_path`: the path to the converted model. For example, you can take the `cldm_v15.yaml` file from the [ControlNet](https://github.com/lllyasviel/ControlNet/tree/main/models) repository because the TemporalNet model is a Stable Diffusion v1.5 and ControlNet model. 4. Now you can run the script to convert the `.ckpt` file: ```bash python ../diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path temporalnetv3.ckpt --original_config_file cldm_v15.yaml --dump_path ./ --controlnet ``` 5. Once the conversion is done, upload your converted model and test out the resulting [pull request](https://huggingface.co/CiaraRowles/TemporalNet/discussions/13)! ```bash git push origin pr/13:refs/pr/13 ``` ## Keras .pb or .h5 <Tip warning={true}> 🧪 This is an experimental feature. Only Stable Diffusion v1 checkpoints are supported by the Convert KerasCV Space at the moment. </Tip> [KerasCV](https://keras.io/keras_cv/) supports training for [Stable Diffusion](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion) v1 and v2. However, it offers limited support for experimenting with Stable Diffusion models for inference and deployment whereas 🤗 Diffusers has a more complete set of features for this purpose, such as different [noise schedulers](https://huggingface.co/docs/diffusers/using-diffusers/schedulers), [flash attention](https://huggingface.co/docs/diffusers/optimization/xformers), and [other optimization techniques](https://huggingface.co/docs/diffusers/optimization/fp16). The [Convert KerasCV](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers) Space converts `.pb` or `.h5` files to PyTorch, and then wraps them in a [`StableDiffusionPipeline`] so it is ready for inference. The converted checkpoint is stored in a repository on the Hugging Face Hub. For this example, let's convert the [`sayakpaul/textual-inversion-kerasio`](https://huggingface.co/sayakpaul/textual-inversion-kerasio/tree/main) checkpoint which was trained with Textual Inversion. It uses the special token `<my-funny-cat>` to personalize images with cats. The Convert KerasCV Space allows you to input the following: * Your Hugging Face token. * Paths to download the UNet and text encoder weights from. Depending on how the model was trained, you don't necessarily need to provide the paths to both the UNet and text encoder. For example, Textual Inversion only requires the embeddings from the text encoder and a text-to-image model only requires the UNet weights. * Placeholder token is only applicable for textual inversion models. * The `output_repo_prefix` is the name of the repository where the converted model is stored. Click the **Submit** button to automatically convert the KerasCV checkpoint! Once the checkpoint is successfully converted, you'll see a link to the new repository containing the converted checkpoint. Follow the link to the new repository, and you'll see the Convert KerasCV Space generated a model card with an inference widget to try out the converted model. If you prefer to run inference with code, click on the **Use in Diffusers** button in the upper right corner of the model card to copy and paste the code snippet: ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline", use_safetensors=True ) ``` Then, you can generate an image like: ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline", use_safetensors=True ) pipeline.to("cuda") placeholder_token = "<my-funny-cat-token>" prompt = f"two {placeholder_token} getting married, photorealistic, high quality" image = pipeline(prompt, num_inference_steps=50).images[0] ``` ## A1111 LoRA files [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) (A1111) is a popular web UI for Stable Diffusion that supports model sharing platforms like [Civitai](https://civitai.com/). Models trained with the Low-Rank Adaptation (LoRA) technique are especially popular because they're fast to train and have a much smaller file size than a fully finetuned model. 🤗 Diffusers supports loading A1111 LoRA checkpoints with [`~loaders.LoraLoaderMixin.load_lora_weights`]: ```py from diffusers import StableDiffusionXLPipeline import torch pipeline = StableDiffusionXLPipeline.from_pretrained( "Lykon/dreamshaper-xl-1-0", torch_dtype=torch.float16, variant="fp16" ).to("cuda") ``` Download a LoRA checkpoint from Civitai; this example uses the [Blueprintify SD XL 1.0](https://civitai.com/models/150986/blueprintify-sd-xl-10) checkpoint, but feel free to try out any LoRA checkpoint! ```py # uncomment to download the safetensor weights #!wget https://civitai.com/api/download/models/168776 -O blueprintify.safetensors ``` Load the LoRA checkpoint into the pipeline with the [`~loaders.LoraLoaderMixin.load_lora_weights`] method: ```py pipeline.load_lora_weights(".", weight_name="blueprintify.safetensors") ``` Now you can use the pipeline to generate images: ```py prompt = "bl3uprint, a highly detailed blueprint of the empire state building, explaining how to build all parts, many txt, blueprint grid backdrop" negative_prompt = "lowres, cropped, worst quality, low quality, normal quality, artifacts, signature, watermark, username, blurry, more than one bridge, bad architecture" image = pipeline( prompt=prompt, negative_prompt=negative_prompt, generator=torch.manual_seed(0), ).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/blueprint-lora.png"/> </div>
diffusers/docs/source/en/using-diffusers/other-formats.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/other-formats.md", "repo_id": "diffusers", "token_count": 2825 }
89
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Understanding pipelines, models and schedulers [[open-in-colab]] 🧨 Diffusers is designed to be a user-friendly and flexible toolbox for building diffusion systems tailored to your use-case. At the core of the toolbox are models and schedulers. While the [`DiffusionPipeline`] bundles these components together for convenience, you can also unbundle the pipeline and use the models and schedulers separately to create new diffusion systems. In this tutorial, you'll learn how to use models and schedulers to assemble a diffusion system for inference, starting with a basic pipeline and then progressing to the Stable Diffusion pipeline. ## Deconstruct a basic pipeline A pipeline is a quick and easy way to run a model for inference, requiring no more than four lines of code to generate an image: ```py >>> from diffusers import DDPMPipeline >>> ddpm = DDPMPipeline.from_pretrained("google/ddpm-cat-256", use_safetensors=True).to("cuda") >>> image = ddpm(num_inference_steps=25).images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ddpm-cat.png" alt="Image of cat created from DDPMPipeline"/> </div> That was super easy, but how did the pipeline do that? Let's breakdown the pipeline and take a look at what's happening under the hood. In the example above, the pipeline contains a [`UNet2DModel`] model and a [`DDPMScheduler`]. The pipeline denoises an image by taking random noise the size of the desired output and passing it through the model several times. At each timestep, the model predicts the *noise residual* and the scheduler uses it to predict a less noisy image. The pipeline repeats this process until it reaches the end of the specified number of inference steps. To recreate the pipeline with the model and scheduler separately, let's write our own denoising process. 1. Load the model and scheduler: ```py >>> from diffusers import DDPMScheduler, UNet2DModel >>> scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256") >>> model = UNet2DModel.from_pretrained("google/ddpm-cat-256", use_safetensors=True).to("cuda") ``` 2. Set the number of timesteps to run the denoising process for: ```py >>> scheduler.set_timesteps(50) ``` 3. Setting the scheduler timesteps creates a tensor with evenly spaced elements in it, 50 in this example. Each element corresponds to a timestep at which the model denoises an image. When you create the denoising loop later, you'll iterate over this tensor to denoise an image: ```py >>> scheduler.timesteps tensor([980, 960, 940, 920, 900, 880, 860, 840, 820, 800, 780, 760, 740, 720, 700, 680, 660, 640, 620, 600, 580, 560, 540, 520, 500, 480, 460, 440, 420, 400, 380, 360, 340, 320, 300, 280, 260, 240, 220, 200, 180, 160, 140, 120, 100, 80, 60, 40, 20, 0]) ``` 4. Create some random noise with the same shape as the desired output: ```py >>> import torch >>> sample_size = model.config.sample_size >>> noise = torch.randn((1, 3, sample_size, sample_size), device="cuda") ``` 5. Now write a loop to iterate over the timesteps. At each timestep, the model does a [`UNet2DModel.forward`] pass and returns the noisy residual. The scheduler's [`~DDPMScheduler.step`] method takes the noisy residual, timestep, and input and it predicts the image at the previous timestep. This output becomes the next input to the model in the denoising loop, and it'll repeat until it reaches the end of the `timesteps` array. ```py >>> input = noise >>> for t in scheduler.timesteps: ... with torch.no_grad(): ... noisy_residual = model(input, t).sample ... previous_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample ... input = previous_noisy_sample ``` This is the entire denoising process, and you can use this same pattern to write any diffusion system. 6. The last step is to convert the denoised output into an image: ```py >>> from PIL import Image >>> import numpy as np >>> image = (input / 2 + 0.5).clamp(0, 1).squeeze() >>> image = (image.permute(1, 2, 0) * 255).round().to(torch.uint8).cpu().numpy() >>> image = Image.fromarray(image) >>> image ``` In the next section, you'll put your skills to the test and breakdown the more complex Stable Diffusion pipeline. The steps are more or less the same. You'll initialize the necessary components, and set the number of timesteps to create a `timestep` array. The `timestep` array is used in the denoising loop, and for each element in this array, the model predicts a less noisy image. The denoising loop iterates over the `timestep`'s, and at each timestep, it outputs a noisy residual and the scheduler uses it to predict a less noisy image at the previous timestep. This process is repeated until you reach the end of the `timestep` array. Let's try it out! ## Deconstruct the Stable Diffusion pipeline Stable Diffusion is a text-to-image *latent diffusion* model. It is called a latent diffusion model because it works with a lower-dimensional representation of the image instead of the actual pixel space, which makes it more memory efficient. The encoder compresses the image into a smaller representation, and a decoder to convert the compressed representation back into an image. For text-to-image models, you'll need a tokenizer and an encoder to generate text embeddings. From the previous example, you already know you need a UNet model and a scheduler. As you can see, this is already more complex than the DDPM pipeline which only contains a UNet model. The Stable Diffusion model has three separate pretrained models. <Tip> 💡 Read the [How does Stable Diffusion work?](https://huggingface.co/blog/stable_diffusion#how-does-stable-diffusion-work) blog for more details about how the VAE, UNet, and text encoder models work. </Tip> Now that you know what you need for the Stable Diffusion pipeline, load all these components with the [`~ModelMixin.from_pretrained`] method. You can find them in the pretrained [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) checkpoint, and each component is stored in a separate subfolder: ```py >>> from PIL import Image >>> import torch >>> from transformers import CLIPTextModel, CLIPTokenizer >>> from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler >>> vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae", use_safetensors=True) >>> tokenizer = CLIPTokenizer.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="tokenizer") >>> text_encoder = CLIPTextModel.from_pretrained( ... "CompVis/stable-diffusion-v1-4", subfolder="text_encoder", use_safetensors=True ... ) >>> unet = UNet2DConditionModel.from_pretrained( ... "CompVis/stable-diffusion-v1-4", subfolder="unet", use_safetensors=True ... ) ``` Instead of the default [`PNDMScheduler`], exchange it for the [`UniPCMultistepScheduler`] to see how easy it is to plug a different scheduler in: ```py >>> from diffusers import UniPCMultistepScheduler >>> scheduler = UniPCMultistepScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") ``` To speed up inference, move the models to a GPU since, unlike the scheduler, they have trainable weights: ```py >>> torch_device = "cuda" >>> vae.to(torch_device) >>> text_encoder.to(torch_device) >>> unet.to(torch_device) ``` ### Create text embeddings The next step is to tokenize the text to generate embeddings. The text is used to condition the UNet model and steer the diffusion process towards something that resembles the input prompt. <Tip> 💡 The `guidance_scale` parameter determines how much weight should be given to the prompt when generating an image. </Tip> Feel free to choose any prompt you like if you want to generate something else! ```py >>> prompt = ["a photograph of an astronaut riding a horse"] >>> height = 512 # default height of Stable Diffusion >>> width = 512 # default width of Stable Diffusion >>> num_inference_steps = 25 # Number of denoising steps >>> guidance_scale = 7.5 # Scale for classifier-free guidance >>> generator = torch.manual_seed(0) # Seed generator to create the initial latent noise >>> batch_size = len(prompt) ``` Tokenize the text and generate the embeddings from the prompt: ```py >>> text_input = tokenizer( ... prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt" ... ) >>> with torch.no_grad(): ... text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] ``` You'll also need to generate the *unconditional text embeddings* which are the embeddings for the padding token. These need to have the same shape (`batch_size` and `seq_length`) as the conditional `text_embeddings`: ```py >>> max_length = text_input.input_ids.shape[-1] >>> uncond_input = tokenizer([""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt") >>> uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] ``` Let's concatenate the conditional and unconditional embeddings into a batch to avoid doing two forward passes: ```py >>> text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) ``` ### Create random noise Next, generate some initial random noise as a starting point for the diffusion process. This is the latent representation of the image, and it'll be gradually denoised. At this point, the `latent` image is smaller than the final image size but that's okay though because the model will transform it into the final 512x512 image dimensions later. <Tip> 💡 The height and width are divided by 8 because the `vae` model has 3 down-sampling layers. You can check by running the following: ```py 2 ** (len(vae.config.block_out_channels) - 1) == 8 ``` </Tip> ```py >>> latents = torch.randn( ... (batch_size, unet.config.in_channels, height // 8, width // 8), ... generator=generator, ... device=torch_device, ... ) ``` ### Denoise the image Start by scaling the input with the initial noise distribution, *sigma*, the noise scale value, which is required for improved schedulers like [`UniPCMultistepScheduler`]: ```py >>> latents = latents * scheduler.init_noise_sigma ``` The last step is to create the denoising loop that'll progressively transform the pure noise in `latents` to an image described by your prompt. Remember, the denoising loop needs to do three things: 1. Set the scheduler's timesteps to use during denoising. 2. Iterate over the timesteps. 3. At each timestep, call the UNet model to predict the noise residual and pass it to the scheduler to compute the previous noisy sample. ```py >>> from tqdm.auto import tqdm >>> scheduler.set_timesteps(num_inference_steps) >>> for t in tqdm(scheduler.timesteps): ... # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes. ... latent_model_input = torch.cat([latents] * 2) ... latent_model_input = scheduler.scale_model_input(latent_model_input, timestep=t) ... # predict the noise residual ... with torch.no_grad(): ... noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample ... # perform guidance ... noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) ... noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) ... # compute the previous noisy sample x_t -> x_t-1 ... latents = scheduler.step(noise_pred, t, latents).prev_sample ``` ### Decode the image The final step is to use the `vae` to decode the latent representation into an image and get the decoded output with `sample`: ```py # scale and decode the image latents with vae latents = 1 / 0.18215 * latents with torch.no_grad(): image = vae.decode(latents).sample ``` Lastly, convert the image to a `PIL.Image` to see your generated image! ```py >>> image = (image / 2 + 0.5).clamp(0, 1).squeeze() >>> image = (image.permute(1, 2, 0) * 255).to(torch.uint8).cpu().numpy() >>> images = (image * 255).round().astype("uint8") >>> image = Image.fromarray(image) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/blog/assets/98_stable_diffusion/stable_diffusion_k_lms.png"/> </div> ## Next steps From basic to complex pipelines, you've seen that all you really need to write your own diffusion system is a denoising loop. The loop should set the scheduler's timesteps, iterate over them, and alternate between calling the UNet model to predict the noise residual and passing it to the scheduler to compute the previous noisy sample. This is really what 🧨 Diffusers is designed for: to make it intuitive and easy to write your own diffusion system using models and schedulers. For your next steps, feel free to: * Learn how to [build and contribute a pipeline](../using-diffusers/contribute_pipeline) to 🧨 Diffusers. We can't wait and see what you'll come up with! * Explore [existing pipelines](../api/pipelines/overview) in the library, and see if you can deconstruct and build a pipeline from scratch using the models and schedulers separately.
diffusers/docs/source/en/using-diffusers/write_own_pipeline.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/write_own_pipeline.md", "repo_id": "diffusers", "token_count": 4163 }
90
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Apple Silicon (M1/M2)에서 Stable Diffusion을 사용하는 방법 Diffusers는 Stable Diffusion 추론을 위해 PyTorch `mps`를 사용해 Apple 실리콘과 호환됩니다. 다음은 Stable Diffusion이 있는 M1 또는 M2 컴퓨터를 사용하기 위해 따라야 하는 단계입니다. ## 요구 사항 - Apple silicon (M1/M2) 하드웨어의 Mac 컴퓨터. - macOS 12.6 또는 이후 (13.0 또는 이후 추천). - Python arm64 버전 - PyTorch 2.0(추천) 또는 1.13(`mps`를 지원하는 최소 버전). Yhttps://pytorch.org/get-started/locally/의 지침에 따라 `pip` 또는 `conda`로 설치할 수 있습니다. ## 추론 파이프라인 아래 코도는 익숙한 `to()` 인터페이스를 사용하여 `mps` 백엔드로 Stable Diffusion 파이프라인을 M1 또는 M2 장치로 이동하는 방법을 보여줍니다. <Tip warning={true}> **PyTorch 1.13을 사용 중일 때 ** 추가 일회성 전달을 사용하여 파이프라인을 "프라이밍"하는 것을 추천합니다. 이것은 발견한 이상한 문제에 대한 임시 해결 방법입니다. 첫 번째 추론 전달은 후속 전달와 약간 다른 결과를 생성합니다. 이 전달은 한 번만 수행하면 되며 추론 단계를 한 번만 사용하고 결과를 폐기해도 됩니다. </Tip> 이전 팁에서 설명한 것들을 포함한 여러 문제를 해결하므로 PyTorch 2 이상을 사용하는 것이 좋습니다. ```python # `huggingface-cli login`에 로그인되어 있음을 확인 from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to("mps") # 컴퓨터가 64GB 이하의 RAM 램일 때 추천 pipe.enable_attention_slicing() prompt = "a photo of an astronaut riding a horse on mars" # 처음 "워밍업" 전달 (위 설명을 보세요) _ = pipe(prompt, num_inference_steps=1) # 결과는 워밍업 전달 후의 CPU 장치의 결과와 일치합니다. image = pipe(prompt).images[0] ``` ## 성능 추천 M1/M2 성능은 메모리 압력에 매우 민감합니다. 시스템은 필요한 경우 자동으로 스왑되지만 스왑할 때 성능이 크게 저하됩니다. 특히 컴퓨터의 시스템 RAM이 64GB 미만이거나 512 × 512픽셀보다 큰 비표준 해상도에서 이미지를 생성하는 경우, 추론 중에 메모리 압력을 줄이고 스와핑을 방지하기 위해 *어텐션 슬라이싱*을 사용하는 것이 좋습니다. 어텐션 슬라이싱은 비용이 많이 드는 어텐션 작업을 한 번에 모두 수행하는 대신 여러 단계로 수행합니다. 일반적으로 범용 메모리가 없는 컴퓨터에서 ~20%의 성능 영향을 미치지만 64GB 이상이 아닌 경우 대부분의 Apple Silicon 컴퓨터에서 *더 나은 성능*이 관찰되었습니다. ```python pipeline.enable_attention_slicing() ``` ## Known Issues - 여러 프롬프트를 배치로 생성하는 것은 [충돌이 발생하거나 안정적으로 작동하지 않습니다](https://github.com/huggingface/diffusers/issues/363). 우리는 이것이 [PyTorch의 `mps` 백엔드](https://github.com/pytorch/pytorch/issues/84039)와 관련이 있다고 생각합니다. 이 문제는 해결되고 있지만 지금은 배치 대신 반복 방법을 사용하는 것이 좋습니다.
diffusers/docs/source/ko/optimization/mps.md/0
{ "file_path": "diffusers/docs/source/ko/optimization/mps.md", "repo_id": "diffusers", "token_count": 2533 }
91
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Low-Rank Adaptation of Large Language Models (LoRA) [[open-in-colab]] <Tip warning={true}> 현재 LoRA는 [`UNet2DConditionalModel`]의 어텐션 레이어에서만 지원됩니다. </Tip> [LoRA(Low-Rank Adaptation of Large Language Models)](https://arxiv.org/abs/2106.09685)는 메모리를 적게 사용하면서 대규모 모델의 학습을 가속화하는 학습 방법입니다. 이는 rank-decomposition weight 행렬 쌍(**업데이트 행렬**이라고 함)을 추가하고 새로 추가된 가중치**만** 학습합니다. 여기에는 몇 가지 장점이 있습니다. - 이전에 미리 학습된 가중치는 고정된 상태로 유지되므로 모델이 [치명적인 망각](https://www.pnas.org/doi/10.1073/pnas.1611835114) 경향이 없습니다. - Rank-decomposition 행렬은 원래 모델보다 파라메터 수가 훨씬 적으므로 학습된 LoRA 가중치를 쉽게 끼워넣을 수 있습니다. - LoRA 매트릭스는 일반적으로 원본 모델의 어텐션 레이어에 추가됩니다. 🧨 Diffusers는 [`~diffusers.loaders.UNet2DConditionLoadersMixin.load_attn_procs`] 메서드를 제공하여 LoRA 가중치를 모델의 어텐션 레이어로 불러옵니다. `scale` 매개변수를 통해 모델이 새로운 학습 이미지에 맞게 조정되는 범위를 제어할 수 있습니다. - 메모리 효율성이 향상되어 Tesla T4, RTX 3080 또는 RTX 2080 Ti와 같은 소비자용 GPU에서 파인튜닝을 실행할 수 있습니다! T4와 같은 GPU는 무료이며 Kaggle 또는 Google Colab 노트북에서 쉽게 액세스할 수 있습니다. <Tip> 💡 LoRA는 어텐션 레이어에만 한정되지는 않습니다. 저자는 언어 모델의 어텐션 레이어를 수정하는 것이 매우 효율적으로 죻은 성능을 얻기에 충분하다는 것을 발견했습니다. 이것이 LoRA 가중치를 모델의 어텐션 레이어에 추가하는 것이 일반적인 이유입니다. LoRA 작동 방식에 대한 자세한 내용은 [Using LoRA for effective Stable Diffusion fine-tuning](https://huggingface.co/blog/lora) 블로그를 확인하세요! </Tip> [cloneofsimo](https://github.com/cloneofsimo)는 인기 있는 [lora](https://github.com/cloneofsimo/lora) GitHub 리포지토리에서 Stable Diffusion을 위한 LoRA 학습을 최초로 시도했습니다. 🧨 Diffusers는 [text-to-image 생성](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image#training-with-lora) 및 [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth#training-with-low-rank-adaptation-of-large-language-models-lora)을 지원합니다. 이 가이드는 두 가지를 모두 수행하는 방법을 보여줍니다. 모델을 저장하거나 커뮤니티와 공유하려면 Hugging Face 계정에 로그인하세요(아직 계정이 없는 경우 [생성](hf.co/join)하세요): ```bash huggingface-cli login ``` ## Text-to-image 수십억 개의 파라메터들이 있는 Stable Diffusion과 같은 모델을 파인튜닝하는 것은 느리고 어려울 수 있습니다. LoRA를 사용하면 diffusion 모델을 파인튜닝하는 것이 훨씬 쉽고 빠릅니다. 8비트 옵티마이저와 같은 트릭에 의존하지 않고도 11GB의 GPU RAM으로 하드웨어에서 실행할 수 있습니다. ### 학습[[dreambooth-training]] [Pokémon BLIP 캡션](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) 데이터셋으로 [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)를 파인튜닝해 나만의 포켓몬을 생성해 보겠습니다. 시작하려면 `MODEL_NAME` 및 `DATASET_NAME` 환경 변수가 설정되어 있는지 확인하십시오. `OUTPUT_DIR` 및 `HUB_MODEL_ID` 변수는 선택 사항이며 허브에서 모델을 저장할 위치를 지정합니다. ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="/sddata/finetune/lora/pokemon" export HUB_MODEL_ID="pokemon-lora" export DATASET_NAME="lambdalabs/pokemon-blip-captions" ``` 학습을 시작하기 전에 알아야 할 몇 가지 플래그가 있습니다. * `--push_to_hub`를 명시하면 학습된 LoRA 임베딩을 허브에 저장합니다. * `--report_to=wandb`는 학습 결과를 가중치 및 편향 대시보드에 보고하고 기록합니다(예를 들어, 이 [보고서](https://wandb.ai/pcuenq/text2image-fine-tune/run/b4k1w0tn?workspace=user-pcuenq)를 참조하세요). * `--learning_rate=1e-04`, 일반적으로 LoRA에서 사용하는 것보다 더 높은 학습률을 사용할 수 있습니다. 이제 학습을 시작할 준비가 되었습니다 (전체 학습 스크립트는 [여기](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py)에서 찾을 수 있습니다). ```bash accelerate launch train_dreambooth_lora.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --checkpointing_steps=100 \ --learning_rate=1e-4 \ --report_to="wandb" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --validation_prompt="A photo of sks dog in a bucket" \ --validation_epochs=50 \ --seed="0" \ --push_to_hub ``` ### 추론[[dreambooth-inference]] 이제 [`StableDiffusionPipeline`]에서 기본 모델을 불러와 추론을 위해 모델을 사용할 수 있습니다: ```py >>> import torch >>> from diffusers import StableDiffusionPipeline >>> model_base = "runwayml/stable-diffusion-v1-5" >>> pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16) ``` *기본 모델의 가중치 위에* 파인튜닝된 DreamBooth 모델에서 LoRA 가중치를 불러온 다음, 더 빠른 추론을 위해 파이프라인을 GPU로 이동합니다. LoRA 가중치를 프리징된 사전 훈련된 모델 가중치와 병합할 때, 선택적으로 'scale' 매개변수로 어느 정도의 가중치를 병합할 지 조절할 수 있습니다: <Tip> 💡 `0`의 `scale` 값은 LoRA 가중치를 사용하지 않아 원래 모델의 가중치만 사용한 것과 같고, `1`의 `scale` 값은 파인튜닝된 LoRA 가중치만 사용함을 의미합니다. 0과 1 사이의 값들은 두 결과들 사이로 보간됩니다. </Tip> ```py >>> pipe.unet.load_attn_procs(model_path) >>> pipe.to("cuda") # LoRA 파인튜닝된 모델의 가중치 절반과 기본 모델의 가중치 절반 사용 >>> image = pipe( ... "A picture of a sks dog in a bucket.", ... num_inference_steps=25, ... guidance_scale=7.5, ... cross_attention_kwargs={"scale": 0.5}, ... ).images[0] # 완전히 파인튜닝된 LoRA 모델의 가중치 사용 >>> image = pipe("A picture of a sks dog in a bucket.", num_inference_steps=25, guidance_scale=7.5).images[0] >>> image.save("bucket-dog.png") ```
diffusers/docs/source/ko/training/lora.md/0
{ "file_path": "diffusers/docs/source/ko/training/lora.md", "repo_id": "diffusers", "token_count": 4734 }
92
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 파이프라인, 모델, 스케줄러 불러오기 기본적으로 diffusion 모델은 다양한 컴포넌트들(모델, 토크나이저, 스케줄러) 간의 복잡한 상호작용을 기반으로 동작합니다. 디퓨저스(Diffusers)는 이러한 diffusion 모델을 보다 쉽고 간편한 API로 제공하는 것을 목표로 설계되었습니다. [`DiffusionPipeline`]은 diffusion 모델이 갖는 복잡성을 하나의 파이프라인 API로 통합하고, 동시에 이를 구성하는 각각의 컴포넌트들을 태스크에 맞춰 유연하게 커스터마이징할 수 있도록 지원하고 있습니다. diffusion 모델의 훈련과 추론에 필요한 모든 것은 [`DiffusionPipeline.from_pretrained`] 메서드를 통해 접근할 수 있습니다. (이 말의 의미는 다음 단락에서 보다 자세하게 다뤄보도록 하겠습니다.) 이 문서에서는 설명할 내용은 다음과 같습니다. * 허브를 통해 혹은 로컬로 파이프라인을 불러오는 법 * 파이프라인에 다른 컴포넌트들을 적용하는 법 * 오리지널 체크포인트가 아닌 variant를 불러오는 법 (variant란 기본으로 설정된 `fp32`가 아닌 다른 부동 소수점 타입(예: `fp16`)을 사용하거나 Non-EMA 가중치를 사용하는 체크포인트들을 의미합니다.) * 모델과 스케줄러를 불러오는 법 ## Diffusion 파이프라인 <Tip> 💡 [`DiffusionPipeline`] 클래스가 동작하는 방식에 보다 자세한 내용이 궁금하다면, [DiffusionPipeline explained](#diffusionpipeline에-대해-알아보기) 섹션을 확인해보세요. </Tip> [`DiffusionPipeline`] 클래스는 diffusion 모델을 [허브](https://huggingface.co/models?library=diffusers)로부터 불러오는 가장 심플하면서 보편적인 방식입니다. [`DiffusionPipeline.from_pretrained`] 메서드는 적합한 파이프라인 클래스를 자동으로 탐지하고, 필요한 구성요소(configuration)와 가중치(weight) 파일들을 다운로드하고 캐싱한 다음, 해당 파이프라인 인스턴스를 반환합니다. ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipe = DiffusionPipeline.from_pretrained(repo_id) ``` 물론 [`DiffusionPipeline`] 클래스를 사용하지 않고, 명시적으로 직접 해당 파이프라인 클래스를 불러오는 것도 가능합니다. 아래 예시 코드는 위 예시와 동일한 인스턴스를 반환합니다. ```python from diffusers import StableDiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(repo_id) ``` [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4)이나 [runwayml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) 같은 체크포인트들의 경우, 하나 이상의 다양한 태스크에 활용될 수 있습니다. (예를 들어 위의 두 체크포인트의 경우, text-to-image와 image-to-image에 모두 활용될 수 있습니다.) 만약 이러한 체크포인트들을 기본 설정 태스크가 아닌 다른 태스크에 활용하고자 한다면, 해당 태스크에 대응되는 파이프라인(task-specific pipeline)을 사용해야 합니다. ```python from diffusers import StableDiffusionImg2ImgPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionImg2ImgPipeline.from_pretrained(repo_id) ``` ### 로컬 파이프라인 파이프라인을 로컬로 불러오고자 한다면, `git-lfs`를 사용하여 직접 체크포인트를 로컬 디스크에 다운로드 받아야 합니다. 아래의 명령어를 실행하면 `./stable-diffusion-v1-5`란 이름으로 폴더가 로컬디스크에 생성됩니다. ```bash git lfs install git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 ``` 그런 다음 해당 로컬 경로를 [`~DiffusionPipeline.from_pretrained`] 메서드에 전달합니다. ```python from diffusers import DiffusionPipeline repo_id = "./stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id) ``` 위의 예시코드처럼 만약 `repo_id`가 로컬 패스(local path)라면, [`~DiffusionPipeline.from_pretrained`] 메서드는 이를 자동으로 감지하여 허브에서 파일을 다운로드하지 않습니다. 만약 로컬 디스크에 저장된 파이프라인 체크포인트가 최신 버전이 아닐 경우에도, 최신 버전을 다운로드하지 않고 기존 로컬 디스크에 저장된 체크포인트를 사용한다는 것을 의미합니다. ### 파이프라인 내부의 컴포넌트 교체하기 파이프라인 내부의 컴포넌트들은 호환 가능한 다른 컴포넌트로 교체될 수 있습니다. 이와 같은 컴포넌트 교체가 중요한 이유는 다음과 같습니다. - 어떤 스케줄러를 사용할 것인가는 생성속도와 생성품질 간의 트레이드오프를 정의하는 중요한 요소입니다. - diffusion 모델 내부의 컴포넌트들은 일반적으로 각각 독립적으로 훈련되기 때문에, 더 좋은 성능을 보여주는 컴포넌트가 있다면 그걸로 교체하는 식으로 성능을 향상시킬 수 있습니다. - 파인 튜닝 단계에서는 일반적으로 UNet 혹은 텍스트 인코더와 같은 일부 컴포넌트들만 훈련하게 됩니다. 어떤 스케줄러들이 호환가능한지는 `compatibles` 속성을 통해 확인할 수 있습니다. ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id) stable_diffusion.scheduler.compatibles ``` 이번에는 [`SchedulerMixin.from_pretrained`] 메서드를 사용해서, 기존 기본 스케줄러였던 [`PNDMScheduler`]를 보다 우수한 성능의 [`EulerDiscreteScheduler`]로 바꿔봅시다. 스케줄러를 로드할 때는 `subfolder` 인자를 통해, 해당 파이프라인의 리포지토리에서 [스케줄러에 관한 하위폴더](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main/scheduler)를 명시해주어야 합니다. 그 다음 새롭게 생성한 [`EulerDiscreteScheduler`] 인스턴스를 [`DiffusionPipeline`]의 `scheduler` 인자에 전달합니다. ```python from diffusers import DiffusionPipeline, EulerDiscreteScheduler, DPMSolverMultistepScheduler repo_id = "runwayml/stable-diffusion-v1-5" scheduler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, scheduler=scheduler) ``` ### 세이프티 체커 스테이블 diffusion과 같은 diffusion 모델들은 유해한 이미지를 생성할 수도 있습니다. 이를 예방하기 위해 디퓨저스는 생성된 이미지의 유해성을 판단하는 [세이프티 체커(safety checker)](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) 기능을 지원하고 있습니다. 만약 세이프티 체커의 사용을 원하지 않는다면, `safety_checker` 인자에 `None`을 전달해주시면 됩니다. ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, safety_checker=None) ``` ### 컴포넌트 재사용 복수의 파이프라인에 동일한 모델이 반복적으로 사용한다면, 굳이 해당 모델의 동일한 가중치를 중복으로 RAM에 불러올 필요는 없을 것입니다. [`~DiffusionPipeline.components`] 속성을 통해 파이프라인 내부의 컴포넌트들을 참조할 수 있는데, 이번 단락에서는 이를 통해 동일한 모델 가중치를 RAM에 중복으로 불러오는 것을 방지하는 법에 대해 알아보겠습니다. ```python from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline model_id = "runwayml/stable-diffusion-v1-5" stable_diffusion_txt2img = StableDiffusionPipeline.from_pretrained(model_id) components = stable_diffusion_txt2img.components ``` 그 다음 위 예시 코드에서 선언한 `components` 변수를 다른 파이프라인에 전달함으로써, 모델의 가중치를 중복으로 RAM에 로딩하지 않고, 동일한 컴포넌트를 재사용할 수 있습니다. ```python stable_diffusion_img2img = StableDiffusionImg2ImgPipeline(**components) ``` 물론 각각의 컴포넌트들을 따로 따로 파이프라인에 전달할 수도 있습니다. 예를 들어 `stable_diffusion_txt2img` 파이프라인 안의 컴포넌트들 가운데서 세이프티 체커(`safety_checker`)와 피쳐 익스트랙터(`feature_extractor`)를 제외한 컴포넌트들만 `stable_diffusion_img2img` 파이프라인에서 재사용하는 방식 역시 가능합니다. ```python from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline model_id = "runwayml/stable-diffusion-v1-5" stable_diffusion_txt2img = StableDiffusionPipeline.from_pretrained(model_id) stable_diffusion_img2img = StableDiffusionImg2ImgPipeline( vae=stable_diffusion_txt2img.vae, text_encoder=stable_diffusion_txt2img.text_encoder, tokenizer=stable_diffusion_txt2img.tokenizer, unet=stable_diffusion_txt2img.unet, scheduler=stable_diffusion_txt2img.scheduler, safety_checker=None, feature_extractor=None, requires_safety_checker=False, ) ``` ## Checkpoint variants Variant란 일반적으로 다음과 같은 체크포인트들을 의미합니다. - `torch.float16`과 같이 정밀도는 더 낮지만, 용량 역시 더 작은 부동소수점 타입의 가중치를 사용하는 체크포인트. *(다만 이와 같은 variant의 경우, 추가적인 훈련과 CPU환경에서의 구동이 불가능합니다.)* - Non-EMA 가중치를 사용하는 체크포인트. *(Non-EMA 가중치의 경우, 파인 튜닝 단계에서 사용하는 것이 권장되는데, 추론 단계에선 사용하지 않는 것이 권장됩니다.)* <Tip> 💡 모델 구조는 동일하지만 서로 다른 학습 환경에서 서로 다른 데이터셋으로 학습된 체크포인트들이 있을 경우, 해당 체크포인트들은 variant 단계가 아닌 리포지토리 단계에서 분리되어 관리되어야 합니다. (즉, 해당 체크포인트들은 서로 다른 리포지토리에서 따로 관리되어야 합니다. 예시: [`stable-diffusion-v1-4`], [`stable-diffusion-v1-5`]). </Tip> | **checkpoint type** | **weight name** | **argument for loading weights** | | ------------------- | ----------------------------------- | -------------------------------- | | original | diffusion_pytorch_model.bin | | | floating point | diffusion_pytorch_model.fp16.bin | `variant`, `torch_dtype` | | non-EMA | diffusion_pytorch_model.non_ema.bin | `variant` | variant를 로드할 때 2개의 중요한 argument가 있습니다. * `torch_dtype`은 불러올 체크포인트의 부동소수점을 정의합니다. 예를 들어 `torch_dtype=torch.float16`을 명시함으로써 가중치의 부동소수점 타입을 `fl16`으로 변환할 수 있습니다. (만약 따로 설정하지 않을 경우, 기본값으로 `fp32` 타입의 가중치가 로딩됩니다.) 또한 `variant` 인자를 명시하지 않은 채로 체크포인트를 불러온 다음, 해당 체크포인트를 `torch_dtype=torch.float16` 인자를 통해 `fp16` 타입으로 변환하는 것 역시 가능합니다. 이 경우 기본으로 설정된 `fp32` 가중치가 먼저 다운로드되고, 해당 가중치들을 불러온 다음 `fp16` 타입으로 변환하게 됩니다. * `variant` 인자는 리포지토리에서 어떤 variant를 불러올 것인가를 정의합니다. 가령 [`diffusers/stable-diffusion-variants`](https://huggingface.co/diffusers/stable-diffusion-variants/tree/main/unet) 리포지토리로부터 `non_ema` 체크포인트를 불러오고자 한다면, `variant="non_ema"` 인자를 전달해야 합니다. ```python from diffusers import DiffusionPipeline # load fp16 variant stable_diffusion = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16 ) # load non_ema variant stable_diffusion = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", variant="non_ema") ``` 다른 부동소수점 타입의 가중치 혹은 non-EMA 가중치를 사용하는 체크포인트를 저장하기 위해서는, [`DiffusionPipeline.save_pretrained`] 메서드를 사용해야 하며, 이 때 `variant` 인자를 명시해줘야 합니다. 원래의 체크포인트와 동일한 폴더에 variant를 저장해야 하며, 이렇게 하면 동일한 폴더에서 오리지널 체크포인트과 variant를 모두 불러올 수 있습니다. ```python from diffusers import DiffusionPipeline # save as fp16 variant stable_diffusion.save_pretrained("runwayml/stable-diffusion-v1-5", variant="fp16") # save as non-ema variant stable_diffusion.save_pretrained("runwayml/stable-diffusion-v1-5", variant="non_ema") ``` 만약 variant를 기존 폴더에 저장하지 않을 경우, `variant` 인자를 반드시 명시해야 합니다. 그렇게 하지 않을 경우 원래의 오리지널 체크포인트를 찾을 수 없게 되기 때문에 에러가 발생합니다. ```python # 👎 this won't work stable_diffusion = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", torch_dtype=torch.float16) # 👍 this works stable_diffusion = DiffusionPipeline.from_pretrained( "./stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16 ) ``` ### 모델 불러오기 모델들은 [`ModelMixin.from_pretrained`] 메서드를 통해 불러올 수 있습니다. 해당 메서드는 최신 버전의 모델 가중치 파일과 설정 파일(configurations)을 다운로드하고 캐싱합니다. 만약 이러한 파일들이 최신 버전으로 로컬 캐시에 저장되어 있다면, [`ModelMixin.from_pretrained`]는 굳이 해당 파일들을 다시 다운로드하지 않으며, 그저 캐시에 있는 최신 파일들을 재사용합니다. 모델은 `subfolder` 인자에 명시된 하위 폴더로부터 로드됩니다. 예를 들어 `runwayml/stable-diffusion-v1-5`의 UNet 모델의 가중치는 [`unet`](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main/unet) 폴더에 저장되어 있습니다. ```python from diffusers import UNet2DConditionModel repo_id = "runwayml/stable-diffusion-v1-5" model = UNet2DConditionModel.from_pretrained(repo_id, subfolder="unet") ``` 혹은 [해당 모델의 리포지토리](https://huggingface.co/google/ddpm-cifar10-32/tree/main)로부터 다이렉트로 가져오는 것 역시 가능합니다. ```python from diffusers import UNet2DModel repo_id = "google/ddpm-cifar10-32" model = UNet2DModel.from_pretrained(repo_id) ``` 또한 앞서 봤던 `variant` 인자를 명시함으로써, Non-EMA나 `fp16`의 가중치를 가져오는 것 역시 가능합니다. ```python from diffusers import UNet2DConditionModel model = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet", variant="non-ema") model.save_pretrained("./local-unet", variant="non-ema") ``` ### 스케줄러 스케줄러들은 [`SchedulerMixin.from_pretrained`] 메서드를 통해 불러올 수 있습니다. 모델과 달리 스케줄러는 별도의 가중치를 갖지 않으며, 따라서 당연히 별도의 학습과정을 요구하지 않습니다. 이러한 스케줄러들은 (해당 스케줄러 하위폴더의) configration 파일을 통해 정의됩니다. 여러개의 스케줄러를 불러온다고 해서 많은 메모리를 소모하는 것은 아니며, 다양한 스케줄러들에 동일한 스케줄러 configration을 적용하는 것 역시 가능합니다. 다음 예시 코드에서 불러오는 스케줄러들은 모두 [`StableDiffusionPipeline`]과 호환되는데, 이는 곧 해당 스케줄러들에 동일한 스케줄러 configration 파일을 적용할 수 있음을 의미합니다. ```python from diffusers import StableDiffusionPipeline from diffusers import ( DDPMScheduler, DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ) repo_id = "runwayml/stable-diffusion-v1-5" ddpm = DDPMScheduler.from_pretrained(repo_id, subfolder="scheduler") ddim = DDIMScheduler.from_pretrained(repo_id, subfolder="scheduler") pndm = PNDMScheduler.from_pretrained(repo_id, subfolder="scheduler") lms = LMSDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") euler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler") dpm = DPMSolverMultistepScheduler.from_pretrained(repo_id, subfolder="scheduler") # replace `dpm` with any of `ddpm`, `ddim`, `pndm`, `lms`, `euler_anc`, `euler` pipeline = StableDiffusionPipeline.from_pretrained(repo_id, scheduler=dpm) ``` ### DiffusionPipeline에 대해 알아보기 클래스 메서드로서 [`DiffusionPipeline.from_pretrained`]은 2가지를 담당합니다. - 첫째로, `from_pretrained` 메서드는 최신 버전의 파이프라인을 다운로드하고, 캐시에 저장합니다. 이미 로컬 캐시에 최신 버전의 파이프라인이 저장되어 있다면, [`DiffusionPipeline.from_pretrained`]은 해당 파일들을 다시 다운로드하지 않고, 로컬 캐시에 저장되어 있는 파이프라인을 불러옵니다. - `model_index.json` 파일을 통해 체크포인트에 대응되는 적합한 파이프라인 클래스로 불러옵니다. 파이프라인의 폴더 구조는 해당 파이프라인 클래스의 구조와 직접적으로 일치합니다. 예를 들어 [`StableDiffusionPipeline`] 클래스는 [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) 리포지토리와 대응되는 구조를 갖습니다. ```python from diffusers import DiffusionPipeline repo_id = "runwayml/stable-diffusion-v1-5" pipeline = DiffusionPipeline.from_pretrained(repo_id) print(pipeline) ``` 위의 코드 출력 결과를 확인해보면, `pipeline`은 [`StableDiffusionPipeline`]의 인스턴스이며, 다음과 같이 총 7개의 컴포넌트로 구성된다는 것을 알 수 있습니다. - `"feature_extractor"`: [`~transformers.CLIPFeatureExtractor`]의 인스턴스 - `"safety_checker"`: 유해한 컨텐츠를 스크리닝하기 위한 [컴포넌트](https://github.com/huggingface/diffusers/blob/e55687e1e15407f60f32242027b7bb8170e58266/src/diffusers/pipelines/stable_diffusion/safety_checker.py#L32) - `"scheduler"`: [`PNDMScheduler`]의 인스턴스 - `"text_encoder"`: [`~transformers.CLIPTextModel`]의 인스턴스 - `"tokenizer"`: a [`~transformers.CLIPTokenizer`]의 인스턴스 - `"unet"`: [`UNet2DConditionModel`]의 인스턴스 - `"vae"` [`AutoencoderKL`]의 인스턴스 ```json StableDiffusionPipeline { "feature_extractor": [ "transformers", "CLIPImageProcessor" ], "safety_checker": [ "stable_diffusion", "StableDiffusionSafetyChecker" ], "scheduler": [ "diffusers", "PNDMScheduler" ], "text_encoder": [ "transformers", "CLIPTextModel" ], "tokenizer": [ "transformers", "CLIPTokenizer" ], "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` 파이프라인 인스턴스의 컴포넌트들을 [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)의 폴더 구조와 비교해볼 경우, 각각의 컴포넌트마다 별도의 폴더가 있음을 확인할 수 있습니다. ``` . ├── feature_extractor │ └── preprocessor_config.json ├── model_index.json ├── safety_checker │ ├── config.json │ └── pytorch_model.bin ├── scheduler │ └── scheduler_config.json ├── text_encoder │ ├── config.json │ └── pytorch_model.bin ├── tokenizer │ ├── merges.txt │ ├── special_tokens_map.json │ ├── tokenizer_config.json │ └── vocab.json ├── unet │ ├── config.json │ ├── diffusion_pytorch_model.bin └── vae ├── config.json ├── diffusion_pytorch_model.bin ``` 또한 각각의 컴포넌트들을 파이프라인 인스턴스의 속성으로써 참조할 수 있습니다. ```py pipeline.tokenizer ``` ```python CLIPTokenizer( name_or_path="/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819/tokenizer", vocab_size=49408, model_max_length=77, is_fast=False, padding_side="right", truncation_side="right", special_tokens={ "bos_token": AddedToken("<|startoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "eos_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "unk_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True), "pad_token": "<|endoftext|>", }, ) ``` 모든 파이프라인은 `model_index.json` 파일을 통해 [`DiffusionPipeline`]에 다음과 같은 정보를 전달합니다. - `_class_name` 는 어떤 파이프라인 클래스를 사용해야 하는지에 대해 알려줍니다. - `_diffusers_version`는 어떤 버전의 디퓨저스로 파이프라인 안의 모델들이 만들어졌는지를 알려줍니다. - 그 다음은 각각의 컴포넌트들이 어떤 라이브러리의 어떤 클래스로 만들어졌는지에 대해 알려줍니다. (아래 예시에서 `"feature_extractor" : ["transformers", "CLIPImageProcessor"]`의 경우, `feature_extractor` 컴포넌트는 `transformers` 라이브러리의 `CLIPImageProcessor` 클래스를 통해 만들어졌다는 것을 의미합니다.) ```json { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.6.0", "feature_extractor": [ "transformers", "CLIPImageProcessor" ], "safety_checker": [ "stable_diffusion", "StableDiffusionSafetyChecker" ], "scheduler": [ "diffusers", "PNDMScheduler" ], "text_encoder": [ "transformers", "CLIPTextModel" ], "tokenizer": [ "transformers", "CLIPTokenizer" ], "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ```
diffusers/docs/source/ko/using-diffusers/loading.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/loading.md", "repo_id": "diffusers", "token_count": 14651 }
93
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # Tour rápido Modelos de difusão são treinados para remover o ruído Gaussiano aleatório passo a passo para gerar uma amostra de interesse, como uma imagem ou áudio. Isso despertou um tremendo interesse em IA generativa, e você provavelmente já viu exemplos de imagens geradas por difusão na internet. 🧨 Diffusers é uma biblioteca que visa tornar os modelos de difusão amplamente acessíveis a todos. Seja você um desenvolvedor ou um usuário, esse tour rápido irá introduzir você ao 🧨 Diffusers e ajudar você a começar a gerar rapidamente! Há três componentes principais da biblioteca para conhecer: - O [`DiffusionPipeline`] é uma classe de alto nível de ponta a ponta desenhada para gerar rapidamente amostras de modelos de difusão pré-treinados para inferência. - [Modelos](./api/models) pré-treinados populares e módulos que podem ser usados como blocos de construção para criar sistemas de difusão. - Vários [Agendadores](./api/schedulers/overview) diferentes - algoritmos que controlam como o ruído é adicionado para treinamento, e como gerar imagens sem o ruído durante a inferência. Esse tour rápido mostrará como usar o [`DiffusionPipeline`] para inferência, e então mostrará como combinar um modelo e um agendador para replicar o que está acontecendo dentro do [`DiffusionPipeline`]. <Tip> Esse tour rápido é uma versão simplificada da introdução 🧨 Diffusers [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb) para ajudar você a começar rápido. Se você quer aprender mais sobre o objetivo do 🧨 Diffusers, filosofia de design, e detalhes adicionais sobre a API principal, veja o notebook! </Tip> Antes de começar, certifique-se de ter todas as bibliotecas necessárias instaladas: ```py # uncomment to install the necessary libraries in Colab #!pip install --upgrade diffusers accelerate transformers ``` - [🤗 Accelerate](https://huggingface.co/docs/accelerate/index) acelera o carregamento do modelo para geração e treinamento. - [🤗 Transformers](https://huggingface.co/docs/transformers/index) é necessário para executar os modelos mais populares de difusão, como o [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview). ## DiffusionPipeline O [`DiffusionPipeline`] é a forma mais fácil de usar um sistema de difusão pré-treinado para geração. É um sistema de ponta a ponta contendo o modelo e o agendador. Você pode usar o [`DiffusionPipeline`] pronto para muitas tarefas. Dê uma olhada na tabela abaixo para algumas tarefas suportadas, e para uma lista completa de tarefas suportadas, veja a tabela [Resumo do 🧨 Diffusers](./api/pipelines/overview#diffusers-summary). | **Tarefa** | **Descrição** | **Pipeline** | | -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | | Unconditional Image Generation | gera uma imagem a partir do ruído Gaussiano | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) | | Text-Guided Image Generation | gera uma imagem a partir de um prompt de texto | [conditional_image_generation](./using-diffusers/conditional_image_generation) | | Text-Guided Image-to-Image Translation | adapta uma imagem guiada por um prompt de texto | [img2img](./using-diffusers/img2img) | | Text-Guided Image-Inpainting | preenche a parte da máscara da imagem, dado a imagem, a máscara e o prompt de texto | [inpaint](./using-diffusers/inpaint) | | Text-Guided Depth-to-Image Translation | adapta as partes de uma imagem guiada por um prompt de texto enquanto preserva a estrutura por estimativa de profundidade | [depth2img](./using-diffusers/depth2img) | Comece criando uma instância do [`DiffusionPipeline`] e especifique qual checkpoint do pipeline você gostaria de baixar. Você pode usar o [`DiffusionPipeline`] para qualquer [checkpoint](https://huggingface.co/models?library=diffusers&sort=downloads) armazenado no Hugging Face Hub. Nesse quicktour, você carregará o checkpoint [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) para geração de texto para imagem. <Tip warning={true}> Para os modelos de [Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion), por favor leia cuidadosamente a [licença](https://huggingface.co/spaces/CompVis/stable-diffusion-license) primeiro antes de rodar o modelo. 🧨 Diffusers implementa uma verificação de segurança: [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) para prevenir conteúdo ofensivo ou nocivo, mas as capacidades de geração de imagem aprimorada do modelo podem ainda produzir conteúdo potencialmente nocivo. </Tip> Para carregar o modelo com o método [`~DiffusionPipeline.from_pretrained`]: ```python >>> from diffusers import DiffusionPipeline >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_safetensors=True) ``` O [`DiffusionPipeline`] baixa e armazena em cache todos os componentes de modelagem, tokenização, e agendamento. Você verá que o pipeline do Stable Diffusion é composto pelo [`UNet2DConditionModel`] e [`PNDMScheduler`] entre outras coisas: ```py >>> pipeline StableDiffusionPipeline { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.13.1", ..., "scheduler": [ "diffusers", "PNDMScheduler" ], ..., "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` Nós fortemente recomendamos rodar o pipeline em uma placa de vídeo, pois o modelo consiste em aproximadamente 1.4 bilhões de parâmetros. Você pode mover o objeto gerador para uma placa de vídeo, assim como você faria no PyTorch: ```python >>> pipeline.to("cuda") ``` Agora você pode passar o prompt de texto para o `pipeline` para gerar uma imagem, e então acessar a imagem sem ruído. Por padrão, a saída da imagem é embrulhada em um objeto [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class). ```python >>> image = pipeline("An image of a squirrel in Picasso style").images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/> </div> Salve a imagem chamando o `save`: ```python >>> image.save("image_of_squirrel_painting.png") ``` ### Pipeline local Você também pode utilizar o pipeline localmente. A única diferença é que você precisa baixar os pesos primeiro: ```bash !git lfs install !git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 ``` Assim carregue os pesos salvos no pipeline: ```python >>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", use_safetensors=True) ``` Agora você pode rodar o pipeline como você faria na seção acima. ### Troca dos agendadores Agendadores diferentes tem diferentes velocidades de retirar o ruído e compensações de qualidade. A melhor forma de descobrir qual funciona melhor para você é testar eles! Uma das principais características do 🧨 Diffusers é permitir que você troque facilmente entre agendadores. Por exemplo, para substituir o [`PNDMScheduler`] padrão com o [`EulerDiscreteScheduler`], carregue ele com o método [`~diffusers.ConfigMixin.from_config`]: ```py >>> from diffusers import EulerDiscreteScheduler >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_safetensors=True) >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) ``` Tente gerar uma imagem com o novo agendador e veja se você nota alguma diferença! Na próxima seção, você irá dar uma olhada mais de perto nos componentes - o modelo e o agendador - que compõe o [`DiffusionPipeline`] e aprender como usar esses componentes para gerar uma imagem de um gato. ## Modelos A maioria dos modelos recebe uma amostra de ruído, e em cada _timestep_ ele prevê o _noise residual_ (outros modelos aprendem a prever a amostra anterior diretamente ou a velocidade ou [`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)), a diferença entre uma imagem menos com ruído e a imagem de entrada. Você pode misturar e combinar modelos para criar outros sistemas de difusão. Modelos são inicializados com o método [`~ModelMixin.from_pretrained`] que também armazena em cache localmente os pesos do modelo para que seja mais rápido na próxima vez que você carregar o modelo. Para o tour rápido, você irá carregar o [`UNet2DModel`], um modelo básico de geração de imagem incondicional com um checkpoint treinado em imagens de gato: ```py >>> from diffusers import UNet2DModel >>> repo_id = "google/ddpm-cat-256" >>> model = UNet2DModel.from_pretrained(repo_id, use_safetensors=True) ``` Para acessar os parâmetros do modelo, chame `model.config`: ```py >>> model.config ``` A configuração do modelo é um dicionário 🧊 congelado 🧊, o que significa que esses parâmetros não podem ser mudados depois que o modelo é criado. Isso é intencional e garante que os parâmetros usados para definir a arquitetura do modelo no início permaneçam os mesmos, enquanto outros parâmetros ainda podem ser ajustados durante a geração. Um dos parâmetros mais importantes são: - `sample_size`: a dimensão da altura e largura da amostra de entrada. - `in_channels`: o número de canais de entrada da amostra de entrada. - `down_block_types` e `up_block_types`: o tipo de blocos de downsampling e upsampling usados para criar a arquitetura UNet. - `block_out_channels`: o número de canais de saída dos blocos de downsampling; também utilizado como uma order reversa do número de canais de entrada dos blocos de upsampling. - `layers_per_block`: o número de blocks ResNet presentes em cada block UNet. Para usar o modelo para geração, crie a forma da imagem com ruído Gaussiano aleatório. Deve ter um eixo `batch` porque o modelo pode receber múltiplos ruídos aleatórios, um eixo `channel` correspondente ao número de canais de entrada, e um eixo `sample_size` para a altura e largura da imagem: ```py >>> import torch >>> torch.manual_seed(0) >>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) >>> noisy_sample.shape torch.Size([1, 3, 256, 256]) ``` Para geração, passe a imagem com ruído para o modelo e um `timestep`. O `timestep` indica o quão ruidosa a imagem de entrada é, com mais ruído no início e menos no final. Isso ajuda o modelo a determinar sua posição no processo de difusão, se está mais perto do início ou do final. Use o método `sample` para obter a saída do modelo: ```py >>> with torch.no_grad(): ... noisy_residual = model(sample=noisy_sample, timestep=2).sample ``` Para geração de exemplos reais, você precisará de um agendador para guiar o processo de retirada do ruído. Na próxima seção, você irá aprender como acoplar um modelo com um agendador. ## Agendadores Agendadores gerenciam a retirada do ruído de uma amostra ruidosa para uma amostra menos ruidosa dado a saída do modelo - nesse caso, é o `noisy_residual`. <Tip> 🧨 Diffusers é uma caixa de ferramentas para construir sistemas de difusão. Enquanto o [`DiffusionPipeline`] é uma forma conveniente de começar com um sistema de difusão pré-construído, você também pode escolher seus próprios modelos e agendadores separadamente para construir um sistema de difusão personalizado. </Tip> Para o tour rápido, você irá instanciar o [`DDPMScheduler`] com o método [`~diffusers.ConfigMixin.from_config`]: ```py >>> from diffusers import DDPMScheduler >>> scheduler = DDPMScheduler.from_config(repo_id) >>> scheduler DDPMScheduler { "_class_name": "DDPMScheduler", "_diffusers_version": "0.13.1", "beta_end": 0.02, "beta_schedule": "linear", "beta_start": 0.0001, "clip_sample": true, "clip_sample_range": 1.0, "num_train_timesteps": 1000, "prediction_type": "epsilon", "trained_betas": null, "variance_type": "fixed_small" } ``` <Tip> 💡 Perceba como o agendador é instanciado de uma configuração. Diferentemente de um modelo, um agendador não tem pesos treináveis e é livre de parâmetros! </Tip> Um dos parâmetros mais importante são: - `num_train_timesteps`: o tamanho do processo de retirar ruído ou em outras palavras, o número de _timesteps_ necessários para o processo de ruídos Gausianos aleatórios dentro de uma amostra de dados. - `beta_schedule`: o tipo de agendados de ruído para o uso de geração e treinamento. - `beta_start` e `beta_end`: para começar e terminar os valores de ruído para o agendador de ruído. Para predizer uma imagem com um pouco menos de ruído, passe o seguinte para o método do agendador [`~diffusers.DDPMScheduler.step`]: saída do modelo, `timestep`, e a atual `amostra`. ```py >>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample >>> less_noisy_sample.shape ``` O `less_noisy_sample` pode ser passado para o próximo `timestep` onde ele ficará ainda com menos ruído! Vamos juntar tudo agora e visualizar o processo inteiro de retirada de ruído. Comece, criando a função que faça o pós-processamento e mostre a imagem sem ruído como uma `PIL.Image`: ```py >>> import PIL.Image >>> import numpy as np >>> def display_sample(sample, i): ... image_processed = sample.cpu().permute(0, 2, 3, 1) ... image_processed = (image_processed + 1.0) * 127.5 ... image_processed = image_processed.numpy().astype(np.uint8) ... image_pil = PIL.Image.fromarray(image_processed[0]) ... display(f"Image at step {i}") ... display(image_pil) ``` Para acelerar o processo de retirada de ruído, mova a entrada e o modelo para uma GPU: ```py >>> model.to("cuda") >>> noisy_sample = noisy_sample.to("cuda") ``` Agora, crie um loop de retirada de ruído que prediz o residual da amostra menos ruidosa, e computa a amostra menos ruidosa com o agendador: ```py >>> import tqdm >>> sample = noisy_sample >>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): ... # 1. predict noise residual ... with torch.no_grad(): ... residual = model(sample, t).sample ... # 2. compute less noisy image and set x_t -> x_t-1 ... sample = scheduler.step(residual, t, sample).prev_sample ... # 3. optionally look at image ... if (i + 1) % 50 == 0: ... display_sample(sample, i + 1) ``` Sente-se e assista o gato ser gerado do nada além de ruído! 😻 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/> </div> ## Próximos passos Esperamos que você tenha gerado algumas imagens legais com o 🧨 Diffusers neste tour rápido! Para suas próximas etapas, você pode - Treine ou faça a configuração fina de um modelo para gerar suas próprias imagens no tutorial de [treinamento](./tutorials/basic_training). - Veja exemplos oficiais e da comunidade de [scripts de treinamento ou configuração fina](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples) para os mais variados casos de uso. - Aprenda sobre como carregar, acessar, mudar e comparar agendadores no guia [Usando diferentes agendadores](./using-diffusers/schedulers). - Explore engenharia de prompt, otimizações de velocidade e memória, e dicas e truques para gerar imagens de maior qualidade com o guia [Stable Diffusion](./stable_diffusion). - Se aprofunde em acelerar 🧨 Diffusers com guias sobre [PyTorch otimizado em uma GPU](./optimization/fp16), e guias de inferência para rodar [Stable Diffusion em Apple Silicon (M1/M2)](./optimization/mps) e [ONNX Runtime](./optimization/onnx).
diffusers/docs/source/pt/quicktour.md/0
{ "file_path": "diffusers/docs/source/pt/quicktour.md", "repo_id": "diffusers", "token_count": 6767 }
94
import inspect from typing import List, Optional, Union import numpy as np import PIL.Image import torch from torch import nn from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import PIL_INTERPOLATION, deprecate from diffusers.utils.torch_utils import randn_tensor EXAMPLE_DOC_STRING = """ Examples: ``` from io import BytesIO import requests import torch from diffusers import DiffusionPipeline from PIL import Image from transformers import CLIPFeatureExtractor, CLIPModel feature_extractor = CLIPFeatureExtractor.from_pretrained( "laion/CLIP-ViT-B-32-laion2B-s34B-b79K" ) clip_model = CLIPModel.from_pretrained( "laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16 ) guided_pipeline = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", # custom_pipeline="clip_guided_stable_diffusion", custom_pipeline="/home/njindal/diffusers/examples/community/clip_guided_stable_diffusion.py", clip_model=clip_model, feature_extractor=feature_extractor, torch_dtype=torch.float16, ) guided_pipeline.enable_attention_slicing() guided_pipeline = guided_pipeline.to("cuda") prompt = "fantasy book cover, full moon, fantasy forest landscape, golden vector elements, fantasy magic, dark light night, intricate, elegant, sharp focus, illustration, highly detailed, digital painting, concept art, matte, art by WLOP and Artgerm and Albert Bierstadt, masterpiece" url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" response = requests.get(url) init_image = Image.open(BytesIO(response.content)).convert("RGB") image = guided_pipeline( prompt=prompt, num_inference_steps=30, image=init_image, strength=0.75, guidance_scale=7.5, clip_guidance_scale=100, num_cutouts=4, use_cutouts=False, ).images[0] display(image) ``` """ def preprocess(image, w, h): if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image class MakeCutouts(nn.Module): def __init__(self, cut_size, cut_power=1.0): super().__init__() self.cut_size = cut_size self.cut_power = cut_power def forward(self, pixel_values, num_cutouts): sideY, sideX = pixel_values.shape[2:4] max_size = min(sideX, sideY) min_size = min(sideX, sideY, self.cut_size) cutouts = [] for _ in range(num_cutouts): size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size) offsetx = torch.randint(0, sideX - size + 1, ()) offsety = torch.randint(0, sideY - size + 1, ()) cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size] cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size)) return torch.cat(cutouts) def spherical_dist_loss(x, y): x = F.normalize(x, dim=-1) y = F.normalize(y, dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def set_requires_grad(model, value): for param in model.parameters(): param.requires_grad = value class CLIPGuidedStableDiffusion(DiffusionPipeline): """CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000 - https://github.com/Jack000/glid-3-xl - https://github.dev/crowsonkb/k-diffusion """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, clip_model: CLIPModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], feature_extractor: CLIPFeatureExtractor, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, clip_model=clip_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor, ) self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) self.cut_out_size = ( feature_extractor.size if isinstance(feature_extractor.size, int) else feature_extractor.size["shortest_edge"] ) self.make_cutouts = MakeCutouts(self.cut_out_size) set_requires_grad(self.text_encoder, False) set_requires_grad(self.clip_model, False) def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(slice_size) def disable_attention_slicing(self): self.enable_attention_slicing(None) def freeze_vae(self): set_requires_grad(self.vae, False) def unfreeze_vae(self): set_requires_grad(self.vae, True) def freeze_unet(self): set_requires_grad(self.unet, False) def unfreeze_unet(self): set_requires_grad(self.unet, True) def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if isinstance(generator, list): init_latents = [ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = self.vae.encode(image).latent_dist.sample(generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size deprecation_message = ( f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents @torch.enable_grad() def cond_fn( self, latents, timestep, index, text_embeddings, noise_pred_original, text_embeddings_clip, clip_guidance_scale, num_cutouts, use_cutouts=True, ): latents = latents.detach().requires_grad_() latent_model_input = self.scheduler.scale_model_input(latents, timestep) # predict the noise residual noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)): alpha_prod_t = self.scheduler.alphas_cumprod[timestep] beta_prod_t = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) fac = torch.sqrt(beta_prod_t) sample = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler, LMSDiscreteScheduler): sigma = self.scheduler.sigmas[index] sample = latents - sigma * noise_pred else: raise ValueError(f"scheduler type {type(self.scheduler)} not supported") sample = 1 / self.vae.config.scaling_factor * sample image = self.vae.decode(sample).sample image = (image / 2 + 0.5).clamp(0, 1) if use_cutouts: image = self.make_cutouts(image, num_cutouts) else: image = transforms.Resize(self.cut_out_size)(image) image = self.normalize(image).to(latents.dtype) image_embeddings_clip = self.clip_model.get_image_features(image) image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True) if use_cutouts: dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip) dists = dists.view([num_cutouts, sample.shape[0], -1]) loss = dists.sum(2).mean(0).sum() * clip_guidance_scale else: loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale grads = -torch.autograd.grad(loss, latents)[0] if isinstance(self.scheduler, LMSDiscreteScheduler): latents = latents.detach() + grads * (sigma**2) noise_pred = noise_pred_original else: noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads return noise_pred, latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: Optional[int] = 512, width: Optional[int] = 512, image: Union[torch.FloatTensor, PIL.Image.Image] = None, strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, clip_guidance_scale: Optional[float] = 100, clip_prompt: Optional[Union[str, List[str]]] = None, num_cutouts: Optional[int] = 4, use_cutouts: Optional[bool] = True, generator: Optional[torch.Generator] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, ): if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") # get prompt text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0) # set timesteps accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()) extra_set_kwargs = {} if accepts_offset: extra_set_kwargs["offset"] = 1 self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # Preprocess image image = preprocess(image, width, height) latents = self.prepare_latents( image, latent_timestep, batch_size, num_images_per_prompt, text_embeddings.dtype, self.device, generator ) if clip_guidance_scale > 0: if clip_prompt is not None: clip_text_input = self.tokenizer( clip_prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ).input_ids.to(self.device) else: clip_text_input = text_input.input_ids.to(self.device) text_embeddings_clip = self.clip_model.get_text_features(clip_text_input) text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True) # duplicate text embeddings clip for each generation per prompt text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: max_length = text_input.input_ids.shape[-1] uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt") uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) latents_dtype = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to( self.device ) else: latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") latents = latents.to(self.device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator with self.progress_bar(total=num_inference_steps): for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform classifier free guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: text_embeddings_for_guidance = ( text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings ) noise_pred, latents = self.cond_fn( latents, t, i, text_embeddings_for_guidance, noise_pred, text_embeddings_clip, clip_guidance_scale, num_cutouts, use_cutouts, ) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # scale and decode the image latents with vae latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
diffusers/examples/community/clip_guided_stable_diffusion_img2img.py/0
{ "file_path": "diffusers/examples/community/clip_guided_stable_diffusion_img2img.py", "repo_id": "diffusers", "token_count": 9555 }
95
import inspect import re from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( PIL_INTERPOLATION, deprecate, is_accelerate_available, is_accelerate_version, logging, ) from diffusers.utils.torch_utils import randn_tensor # ------------------------------------------------------------------------------ logger = logging.get_logger(__name__) # pylint: disable=invalid-name re_attention = re.compile( r""" \\\(| \\\)| \\\[| \\]| \\\\| \\| \(| \[| :([+-]?[.\d]+)\)| \)| ]| [^\\()\[\]:]+| : """, re.X, ) def parse_prompt_attention(text): """ Parses a string with attention tokens and returns a list of pairs: text and its associated weight. Accepted tokens are: (abc) - increases attention to abc by a multiplier of 1.1 (abc:3.12) - increases attention to abc by a multiplier of 3.12 [abc] - decreases attention to abc by a multiplier of 1.1 \\( - literal character '(' \\[ - literal character '[' \\) - literal character ')' \\] - literal character ']' \\ - literal character '\' anything else - just text >>> parse_prompt_attention('normal text') [['normal text', 1.0]] >>> parse_prompt_attention('an (important) word') [['an ', 1.0], ['important', 1.1], [' word', 1.0]] >>> parse_prompt_attention('(unbalanced') [['unbalanced', 1.1]] >>> parse_prompt_attention('\\(literal\\]') [['(literal]', 1.0]] >>> parse_prompt_attention('(unnecessary)(parens)') [['unnecessaryparens', 1.1]] >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') [['a ', 1.0], ['house', 1.5730000000000004], [' ', 1.1], ['on', 1.0], [' a ', 1.1], ['hill', 0.55], [', sun, ', 1.1], ['sky', 1.4641000000000006], ['.', 1.1]] """ res = [] round_brackets = [] square_brackets = [] round_bracket_multiplier = 1.1 square_bracket_multiplier = 1 / 1.1 def multiply_range(start_position, multiplier): for p in range(start_position, len(res)): res[p][1] *= multiplier for m in re_attention.finditer(text): text = m.group(0) weight = m.group(1) if text.startswith("\\"): res.append([text[1:], 1.0]) elif text == "(": round_brackets.append(len(res)) elif text == "[": square_brackets.append(len(res)) elif weight is not None and len(round_brackets) > 0: multiply_range(round_brackets.pop(), float(weight)) elif text == ")" and len(round_brackets) > 0: multiply_range(round_brackets.pop(), round_bracket_multiplier) elif text == "]" and len(square_brackets) > 0: multiply_range(square_brackets.pop(), square_bracket_multiplier) else: res.append([text, 1.0]) for pos in round_brackets: multiply_range(pos, round_bracket_multiplier) for pos in square_brackets: multiply_range(pos, square_bracket_multiplier) if len(res) == 0: res = [["", 1.0]] # merge runs of identical weights i = 0 while i + 1 < len(res): if res[i][1] == res[i + 1][1]: res[i][0] += res[i + 1][0] res.pop(i + 1) else: i += 1 return res def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int): r""" Tokenize a list of prompts and return its tokens with weights of each token. No padding, starting or ending token is included. """ tokens = [] weights = [] truncated = False for text in prompt: texts_and_weights = parse_prompt_attention(text) text_token = [] text_weight = [] for word, weight in texts_and_weights: # tokenize and discard the starting and the ending token token = pipe.tokenizer(word).input_ids[1:-1] text_token += token # copy the weight by length of token text_weight += [weight] * len(token) # stop if the text is too long (longer than truncation limit) if len(text_token) > max_length: truncated = True break # truncate if len(text_token) > max_length: truncated = True text_token = text_token[:max_length] text_weight = text_weight[:max_length] tokens.append(text_token) weights.append(text_weight) if truncated: logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples") return tokens, weights def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77): r""" Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length. """ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2) weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length for i in range(len(tokens)): tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos] if no_boseos_middle: weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i])) else: w = [] if len(weights[i]) == 0: w = [1.0] * weights_length else: for j in range(max_embeddings_multiples): w.append(1.0) # weight for starting token in this chunk w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))] w.append(1.0) # weight for ending token in this chunk w += [1.0] * (weights_length - len(w)) weights[i] = w[:] return tokens, weights def get_unweighted_text_embeddings( pipe: DiffusionPipeline, text_input: torch.Tensor, chunk_length: int, no_boseos_middle: Optional[bool] = True, ): """ When the length of tokens is a multiple of the capacity of the text encoder, it should be split into chunks and sent to the text encoder individually. """ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2) if max_embeddings_multiples > 1: text_embeddings = [] for i in range(max_embeddings_multiples): # extract the i-th chunk text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone() # cover the head and the tail by the starting and the ending tokens text_input_chunk[:, 0] = text_input[0, 0] text_input_chunk[:, -1] = text_input[0, -1] text_embedding = pipe.text_encoder(text_input_chunk)[0] if no_boseos_middle: if i == 0: # discard the ending token text_embedding = text_embedding[:, :-1] elif i == max_embeddings_multiples - 1: # discard the starting token text_embedding = text_embedding[:, 1:] else: # discard both starting and ending tokens text_embedding = text_embedding[:, 1:-1] text_embeddings.append(text_embedding) text_embeddings = torch.concat(text_embeddings, axis=1) else: text_embeddings = pipe.text_encoder(text_input)[0] return text_embeddings def get_weighted_text_embeddings( pipe: DiffusionPipeline, prompt: Union[str, List[str]], uncond_prompt: Optional[Union[str, List[str]]] = None, max_embeddings_multiples: Optional[int] = 3, no_boseos_middle: Optional[bool] = False, skip_parsing: Optional[bool] = False, skip_weighting: Optional[bool] = False, ): r""" Prompts can be assigned with local weights using brackets. For example, prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful', and the embedding tokens corresponding to the words get multiplied by a constant, 1.1. Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean. Args: pipe (`DiffusionPipeline`): Pipe to provide access to the tokenizer and the text encoder. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. uncond_prompt (`str` or `List[str]`): The unconditional prompt or prompts for guide the image generation. If unconditional prompt is provided, the embeddings of prompt and uncond_prompt are concatenated. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. no_boseos_middle (`bool`, *optional*, defaults to `False`): If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and ending token in each of the chunk in the middle. skip_parsing (`bool`, *optional*, defaults to `False`): Skip the parsing of brackets. skip_weighting (`bool`, *optional*, defaults to `False`): Skip the weighting. When the parsing is skipped, it is forced True. """ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2 if isinstance(prompt, str): prompt = [prompt] if not skip_parsing: prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2) if uncond_prompt is not None: if isinstance(uncond_prompt, str): uncond_prompt = [uncond_prompt] uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2) else: prompt_tokens = [ token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids ] prompt_weights = [[1.0] * len(token) for token in prompt_tokens] if uncond_prompt is not None: if isinstance(uncond_prompt, str): uncond_prompt = [uncond_prompt] uncond_tokens = [ token[1:-1] for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids ] uncond_weights = [[1.0] * len(token) for token in uncond_tokens] # round up the longest length of tokens to a multiple of (model_max_length - 2) max_length = max([len(token) for token in prompt_tokens]) if uncond_prompt is not None: max_length = max(max_length, max([len(token) for token in uncond_tokens])) max_embeddings_multiples = min( max_embeddings_multiples, (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1, ) max_embeddings_multiples = max(1, max_embeddings_multiples) max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2 # pad the length of tokens and weights bos = pipe.tokenizer.bos_token_id eos = pipe.tokenizer.eos_token_id pad = getattr(pipe.tokenizer, "pad_token_id", eos) prompt_tokens, prompt_weights = pad_tokens_and_weights( prompt_tokens, prompt_weights, max_length, bos, eos, pad, no_boseos_middle=no_boseos_middle, chunk_length=pipe.tokenizer.model_max_length, ) prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device) if uncond_prompt is not None: uncond_tokens, uncond_weights = pad_tokens_and_weights( uncond_tokens, uncond_weights, max_length, bos, eos, pad, no_boseos_middle=no_boseos_middle, chunk_length=pipe.tokenizer.model_max_length, ) uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device) # get the embeddings text_embeddings = get_unweighted_text_embeddings( pipe, prompt_tokens, pipe.tokenizer.model_max_length, no_boseos_middle=no_boseos_middle, ) prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device) if uncond_prompt is not None: uncond_embeddings = get_unweighted_text_embeddings( pipe, uncond_tokens, pipe.tokenizer.model_max_length, no_boseos_middle=no_boseos_middle, ) uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device) # assign weights to the prompts and normalize in the sense of mean # TODO: should we normalize by chunk or in a whole (current implementation)? if (not skip_parsing) and (not skip_weighting): previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype) text_embeddings *= prompt_weights.unsqueeze(-1) current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype) text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1) if uncond_prompt is not None: previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype) uncond_embeddings *= uncond_weights.unsqueeze(-1) current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype) uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1) if uncond_prompt is not None: return text_embeddings, uncond_embeddings return text_embeddings, None def preprocess_image(image, batch_size): w, h = image.size w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = np.array(image).astype(np.float32) / 255.0 image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size) image = torch.from_numpy(image) return 2.0 * image - 1.0 def preprocess_mask(mask, batch_size, scale_factor=8): if not isinstance(mask, torch.FloatTensor): mask = mask.convert("L") w, h = mask.size w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"]) mask = np.array(mask).astype(np.float32) / 255.0 mask = np.tile(mask, (4, 1, 1)) mask = np.vstack([mask[None]] * batch_size) mask = 1 - mask # repaint white, keep black mask = torch.from_numpy(mask) return mask else: valid_mask_channel_sizes = [1, 3] # if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W) if mask.shape[3] in valid_mask_channel_sizes: mask = mask.permute(0, 3, 1, 2) elif mask.shape[1] not in valid_mask_channel_sizes: raise ValueError( f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension," f" but received mask of shape {tuple(mask.shape)}" ) # (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape mask = mask.mean(dim=1, keepdim=True) h, w = mask.shape[-2:] h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8 mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor)) return mask class StableDiffusionLongPromptWeightingPipeline( DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin ): r""" Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing weighting in prompt. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config( requires_safety_checker=requires_safety_checker, ) def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful to save a large amount of memory and to allow the processing of larger images. """ self.vae.enable_tiling() def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload def enable_sequential_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. Note that offloading happens on a submodule basis. Memory savings are higher than with `enable_model_cpu_offload`, but performance is lower. """ if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"): from accelerate import cpu_offload else: raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher") device = torch.device(f"cuda:{gpu_id}") if self.device.type != "cpu": self.to("cpu", silence_dtype_warnings=True) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: cpu_offload(cpu_offloaded_model, device) if self.safety_checker is not None: cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload def enable_model_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. """ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") device = torch.device(f"cuda:{gpu_id}") if self.device.type != "cpu": self.to("cpu", silence_dtype_warnings=True) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) hook = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]: _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) if self.safety_checker is not None: _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook) # We'll offload the last model manually. self.final_offload_hook = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _execution_device(self): r""" Returns the device on which the pipeline's models will be executed. After calling `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module hooks. """ if not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, max_embeddings_multiples=3, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `list(int)`): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if negative_prompt_embeds is None: if negative_prompt is None: negative_prompt = [""] * batch_size elif isinstance(negative_prompt, str): negative_prompt = [negative_prompt] * batch_size if batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) if prompt_embeds is None or negative_prompt_embeds is None: if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) if do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = self.maybe_convert_prompt(negative_prompt, self.tokenizer) prompt_embeds1, negative_prompt_embeds1 = get_weighted_text_embeddings( pipe=self, prompt=prompt, uncond_prompt=negative_prompt if do_classifier_free_guidance else None, max_embeddings_multiples=max_embeddings_multiples, ) if prompt_embeds is None: prompt_embeds = prompt_embeds1 if negative_prompt_embeds is None: negative_prompt_embeds = negative_prompt_embeds1 bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: bs_embed, seq_len, _ = negative_prompt_embeds.shape negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def check_inputs( self, prompt, height, width, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) def get_timesteps(self, num_inference_steps, strength, device, is_text2img): if is_text2img: return self.scheduler.timesteps.to(device), num_inference_steps else: # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] return timesteps, num_inference_steps - t_start def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) else: has_nsfw_concept = None return image, has_nsfw_concept def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def prepare_latents( self, image, timestep, num_images_per_prompt, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None, ): if image is None: batch_size = batch_size * num_images_per_prompt shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents, None, None else: image = image.to(device=self.device, dtype=dtype) init_latent_dist = self.vae.encode(image).latent_dist init_latents = init_latent_dist.sample(generator=generator) init_latents = self.vae.config.scaling_factor * init_latents # Expand init_latents for batch_size and num_images_per_prompt init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0) init_latents_orig = init_latents # add noise to latents using the timesteps noise = randn_tensor(init_latents.shape, generator=generator, device=self.device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents, init_latents_orig, noise @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, image: Union[torch.FloatTensor, PIL.Image.Image] = None, mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, strength: float = 0.8, num_images_per_prompt: Optional[int] = 1, add_predicted_noise: Optional[bool] = False, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, is_cancelled_callback: Optional[Callable[[], bool]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). image (`torch.FloatTensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. mask_image (`torch.FloatTensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. add_predicted_noise (`bool`, *optional*, defaults to True): Use predicted noise instead of random noise when constructing noisy versions of the original image in the reverse diffusion process eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. is_cancelled_callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. If the function returns `True`, the inference will be cancelled. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Returns: `None` if cancelled by `is_cancelled_callback`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, max_embeddings_multiples, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) dtype = prompt_embeds.dtype # 4. Preprocess image and mask if isinstance(image, PIL.Image.Image): image = preprocess_image(image, batch_size) if image is not None: image = image.to(device=self.device, dtype=dtype) if isinstance(mask_image, PIL.Image.Image): mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor) if mask_image is not None: mask = mask_image.to(device=self.device, dtype=dtype) mask = torch.cat([mask] * num_images_per_prompt) else: mask = None # 5. set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device, image is None) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 6. Prepare latent variables latents, init_latents_orig, noise = self.prepare_latents( image, latent_timestep, num_images_per_prompt, batch_size, self.unet.config.in_channels, height, width, dtype, device, generator, latents, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample if mask is not None: # masking if add_predicted_noise: init_latents_proper = self.scheduler.add_noise( init_latents_orig, noise_pred_uncond, torch.tensor([t]) ) else: init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t])) latents = (init_latents_proper * mask) + (latents * (1 - mask)) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if i % callback_steps == 0: if callback is not None: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if is_cancelled_callback is not None and is_cancelled_callback(): return None if output_type == "latent": image = latents has_nsfw_concept = None elif output_type == "pil": # 9. Post-processing image = self.decode_latents(latents) # 10. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # 11. Convert to PIL image = self.numpy_to_pil(image) else: # 9. Post-processing image = self.decode_latents(latents) # 10. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return image, has_nsfw_concept return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) def text2img( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, is_cancelled_callback: Optional[Callable[[], bool]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ): r""" Function for text-to-image generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. is_cancelled_callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. If the function returns `True`, the inference will be cancelled. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Returns: `None` if cancelled by `is_cancelled_callback`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ return self.__call__( prompt=prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_embeddings_multiples=max_embeddings_multiples, output_type=output_type, return_dict=return_dict, callback=callback, is_cancelled_callback=is_cancelled_callback, callback_steps=callback_steps, cross_attention_kwargs=cross_attention_kwargs, ) def img2img( self, image: Union[torch.FloatTensor, PIL.Image.Image], prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, num_images_per_prompt: Optional[int] = 1, eta: Optional[float] = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, is_cancelled_callback: Optional[Callable[[], bool]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ): r""" Function for image-to-image generation. Args: image (`torch.FloatTensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter will be modulated by `strength`. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. is_cancelled_callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. If the function returns `True`, the inference will be cancelled. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Returns: `None` if cancelled by `is_cancelled_callback`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ return self.__call__( prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, strength=strength, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_embeddings_multiples=max_embeddings_multiples, output_type=output_type, return_dict=return_dict, callback=callback, is_cancelled_callback=is_cancelled_callback, callback_steps=callback_steps, cross_attention_kwargs=cross_attention_kwargs, ) def inpaint( self, image: Union[torch.FloatTensor, PIL.Image.Image], mask_image: Union[torch.FloatTensor, PIL.Image.Image], prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, num_images_per_prompt: Optional[int] = 1, add_predicted_noise: Optional[bool] = False, eta: Optional[float] = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, max_embeddings_multiples: Optional[int] = 3, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, is_cancelled_callback: Optional[Callable[[], bool]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ): r""" Function for inpaint. Args: image (`torch.FloatTensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. This is the image whose masked region will be inpainted. mask_image (`torch.FloatTensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength` is 1, the denoising process will be run on the masked area for the full number of iterations specified in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur. num_inference_steps (`int`, *optional*, defaults to 50): The reference number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter will be modulated by `strength`, as explained above. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. add_predicted_noise (`bool`, *optional*, defaults to True): Use predicted noise instead of random noise when constructing noisy versions of the original image in the reverse diffusion process eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. max_embeddings_multiples (`int`, *optional*, defaults to `3`): The max multiple length of prompt embeddings compared to the max output length of text encoder. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. is_cancelled_callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. If the function returns `True`, the inference will be cancelled. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). Returns: `None` if cancelled by `is_cancelled_callback`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ return self.__call__( prompt=prompt, negative_prompt=negative_prompt, image=image, mask_image=mask_image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, strength=strength, num_images_per_prompt=num_images_per_prompt, add_predicted_noise=add_predicted_noise, eta=eta, generator=generator, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, max_embeddings_multiples=max_embeddings_multiples, output_type=output_type, return_dict=return_dict, callback=callback, is_cancelled_callback=is_cancelled_callback, callback_steps=callback_steps, cross_attention_kwargs=cross_attention_kwargs, )
diffusers/examples/community/lpw_stable_diffusion.py/0
{ "file_path": "diffusers/examples/community/lpw_stable_diffusion.py", "repo_id": "diffusers", "token_count": 32894 }
96
# Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/ import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, ControlNetModel, DiffusionPipeline, UNet2DConditionModel, logging from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( PIL_INTERPOLATION, is_accelerate_available, is_accelerate_version, replace_example_docstring, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import numpy as np >>> import torch >>> from PIL import Image >>> from diffusers import ControlNetModel, UniPCMultistepScheduler >>> from diffusers.utils import load_image >>> input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png") >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) >>> pipe_controlnet = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16 ) >>> pipe_controlnet.scheduler = UniPCMultistepScheduler.from_config(pipe_controlnet.scheduler.config) >>> pipe_controlnet.enable_xformers_memory_efficient_attention() >>> pipe_controlnet.enable_model_cpu_offload() # using image with edges for our canny controlnet >>> control_image = load_image( "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/vermeer_canny_edged.png") >>> result_img = pipe_controlnet(controlnet_conditioning_image=control_image, image=input_image, prompt="an android robot, cyberpank, digitl art masterpiece", num_inference_steps=20).images[0] >>> result_img.show() ``` """ def prepare_image(image): if isinstance(image, torch.Tensor): # Batch single image if image.ndim == 3: image = image.unsqueeze(0) image = image.to(dtype=torch.float32) else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 return image def prepare_controlnet_conditioning_image( controlnet_conditioning_image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance, ): if not isinstance(controlnet_conditioning_image, torch.Tensor): if isinstance(controlnet_conditioning_image, PIL.Image.Image): controlnet_conditioning_image = [controlnet_conditioning_image] if isinstance(controlnet_conditioning_image[0], PIL.Image.Image): controlnet_conditioning_image = [ np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in controlnet_conditioning_image ] controlnet_conditioning_image = np.concatenate(controlnet_conditioning_image, axis=0) controlnet_conditioning_image = np.array(controlnet_conditioning_image).astype(np.float32) / 255.0 controlnet_conditioning_image = controlnet_conditioning_image.transpose(0, 3, 1, 2) controlnet_conditioning_image = torch.from_numpy(controlnet_conditioning_image) elif isinstance(controlnet_conditioning_image[0], torch.Tensor): controlnet_conditioning_image = torch.cat(controlnet_conditioning_image, dim=0) image_batch_size = controlnet_conditioning_image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt controlnet_conditioning_image = controlnet_conditioning_image.repeat_interleave(repeat_by, dim=0) controlnet_conditioning_image = controlnet_conditioning_image.to(device=device, dtype=dtype) if do_classifier_free_guidance: controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2) return controlnet_conditioning_image class StableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline): """ Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/ """ _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.register_to_config(requires_safety_checker=requires_safety_checker) def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. Note that offloading happens on a submodule basis. Memory savings are higher than with `enable_model_cpu_offload`, but performance is lower. """ if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]: cpu_offload(cpu_offloaded_model, device) if self.safety_checker is not None: cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) def enable_model_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. """ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") device = torch.device(f"cuda:{gpu_id}") hook = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]: _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) if self.safety_checker is not None: # the safety checker can offload the vae again _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook) # control net hook has be manually offloaded as it alternates with unet cpu_offload_with_hook(self.controlnet, device) # We'll offload the last model manually. self.final_offload_hook = hook @property def _execution_device(self): r""" Returns the device on which the pipeline's models will be executed. After calling `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module hooks. """ if not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) else: has_nsfw_concept = None return image, has_nsfw_concept def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_controlnet_conditioning_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list: raise TypeError( "image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors" ) if image_is_pil: image_batch_size = 1 elif image_is_tensor: image_batch_size = image.shape[0] elif image_is_pil_list: image_batch_size = len(image) elif image_is_tensor_list: image_batch_size = len(image) else: raise ValueError("controlnet condition image is not valid") if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] else: raise ValueError("prompt or prompt_embeds are not valid") if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) def check_inputs( self, prompt, image, controlnet_conditioning_image, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, strength=None, controlnet_guidance_start=None, controlnet_guidance_end=None, controlnet_conditioning_scale=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # check controlnet condition image if isinstance(self.controlnet, ControlNetModel): self.check_controlnet_conditioning_image(controlnet_conditioning_image, prompt, prompt_embeds) elif isinstance(self.controlnet, MultiControlNetModel): if not isinstance(controlnet_conditioning_image, list): raise TypeError("For multiple controlnets: `image` must be type `list`") if len(controlnet_conditioning_image) != len(self.controlnet.nets): raise ValueError( "For multiple controlnets: `image` must have the same length as the number of controlnets." ) for image_ in controlnet_conditioning_image: self.check_controlnet_conditioning_image(image_, prompt, prompt_embeds) else: assert False # Check `controlnet_conditioning_scale` if isinstance(self.controlnet, ControlNetModel): if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif isinstance(self.controlnet, MultiControlNetModel): if isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( self.controlnet.nets ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False if isinstance(image, torch.Tensor): if image.ndim != 3 and image.ndim != 4: raise ValueError("`image` must have 3 or 4 dimensions") if image.ndim == 3: image_batch_size = 1 image_channels, image_height, image_width = image.shape elif image.ndim == 4: image_batch_size, image_channels, image_height, image_width = image.shape else: assert False if image_channels != 3: raise ValueError("`image` must have 3 channels") if image.min() < -1 or image.max() > 1: raise ValueError("`image` should be in range [-1, 1]") if self.vae.config.latent_channels != self.unet.config.in_channels: raise ValueError( f"The config of `pipeline.unet` expects {self.unet.config.in_channels} but received" f" latent channels: {self.vae.config.latent_channels}," f" Please verify the config of `pipeline.unet` and the `pipeline.vae`" ) if strength < 0 or strength > 1: raise ValueError(f"The value of `strength` should in [0.0, 1.0] but is {strength}") if controlnet_guidance_start < 0 or controlnet_guidance_start > 1: raise ValueError( f"The value of `controlnet_guidance_start` should in [0.0, 1.0] but is {controlnet_guidance_start}" ) if controlnet_guidance_end < 0 or controlnet_guidance_end > 1: raise ValueError( f"The value of `controlnet_guidance_end` should in [0.0, 1.0] but is {controlnet_guidance_end}" ) if controlnet_guidance_start > controlnet_guidance_end: raise ValueError( "The value of `controlnet_guidance_start` should be less than `controlnet_guidance_end`, but got" f" `controlnet_guidance_start` {controlnet_guidance_start} >= `controlnet_guidance_end` {controlnet_guidance_end}" ) def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if isinstance(generator, list): init_latents = [ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = self.vae.encode(image).latent_dist.sample(generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents def _default_height_width(self, height, width, image): if isinstance(image, list): image = image[0] if height is None: if isinstance(image, PIL.Image.Image): height = image.height elif isinstance(image, torch.Tensor): height = image.shape[3] height = (height // 8) * 8 # round down to nearest multiple of 8 if width is None: if isinstance(image, PIL.Image.Image): width = image.width elif isinstance(image, torch.Tensor): width = image.shape[2] width = (width // 8) * 8 # round down to nearest multiple of 8 return height, width @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, image: Union[torch.Tensor, PIL.Image.Image] = None, controlnet_conditioning_image: Union[ torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image] ] = None, strength: float = 0.8, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 1.0, controlnet_guidance_start: float = 0.0, controlnet_guidance_end: float = 1.0, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`torch.Tensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will be masked out with `mask_image` and repainted according to `prompt`. controlnet_conditioning_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`): The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can also be accepted as an image. The control image is automatically resized to fit the output image. strength (`float`, *optional*): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0): The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original unet. controlnet_guidance_start ('float', *optional*, defaults to 0.0): The percentage of total steps the controlnet starts applying. Must be between 0 and 1. controlnet_guidance_end ('float', *optional*, defaults to 1.0): The percentage of total steps the controlnet ends applying. Must be between 0 and 1. Must be greater than `controlnet_guidance_start`. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 0. Default height and width to unet height, width = self._default_height_width(height, width, controlnet_conditioning_image) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, image, controlnet_conditioning_image, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, strength, controlnet_guidance_start, controlnet_guidance_end, controlnet_conditioning_scale, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 if isinstance(self.controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(self.controlnet.nets) # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 4. Prepare image, and controlnet_conditioning_image image = prepare_image(image) # condition image(s) if isinstance(self.controlnet, ControlNetModel): controlnet_conditioning_image = prepare_controlnet_conditioning_image( controlnet_conditioning_image=controlnet_conditioning_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, ) elif isinstance(self.controlnet, MultiControlNetModel): controlnet_conditioning_images = [] for image_ in controlnet_conditioning_image: image_ = prepare_controlnet_conditioning_image( controlnet_conditioning_image=image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=self.controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, ) controlnet_conditioning_images.append(image_) controlnet_conditioning_image = controlnet_conditioning_images else: assert False # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 6. Prepare latent variables latents = self.prepare_latents( image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # compute the percentage of total steps we are at current_sampling_percent = i / len(timesteps) if ( current_sampling_percent < controlnet_guidance_start or current_sampling_percent > controlnet_guidance_end ): # do not apply the controlnet down_block_res_samples = None mid_block_res_sample = None else: # apply the controlnet down_block_res_samples, mid_block_res_sample = self.controlnet( latent_model_input, t, encoder_hidden_states=prompt_embeds, controlnet_cond=controlnet_conditioning_image, conditioning_scale=controlnet_conditioning_scale, return_dict=False, ) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # If we do sequential model offloading, let's offload unet and controlnet # manually for max memory savings if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.unet.to("cpu") self.controlnet.to("cpu") torch.cuda.empty_cache() if output_type == "latent": image = latents has_nsfw_concept = None elif output_type == "pil": # 8. Post-processing image = self.decode_latents(latents) # 9. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # 10. Convert to PIL image = self.numpy_to_pil(image) else: # 8. Post-processing image = self.decode_latents(latents) # 9. Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/stable_diffusion_controlnet_img2img.py/0
{ "file_path": "diffusers/examples/community/stable_diffusion_controlnet_img2img.py", "repo_id": "diffusers", "token_count": 20854 }
97
import inspect from typing import List, Optional, Tuple, Union import torch from torch.nn import functional as F from transformers import CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput from diffusers import ( DiffusionPipeline, ImagePipelineOutput, PriorTransformer, UnCLIPScheduler, UNet2DConditionModel, UNet2DModel, ) from diffusers.pipelines.unclip import UnCLIPTextProjModel from diffusers.utils import is_accelerate_available, logging from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name def slerp(val, low, high): """ Find the interpolation point between the 'low' and 'high' values for the given 'val'. See https://en.wikipedia.org/wiki/Slerp for more details on the topic. """ low_norm = low / torch.norm(low) high_norm = high / torch.norm(high) omega = torch.acos((low_norm * high_norm)) so = torch.sin(omega) res = (torch.sin((1.0 - val) * omega) / so) * low + (torch.sin(val * omega) / so) * high return res class UnCLIPTextInterpolationPipeline(DiffusionPipeline): """ Pipeline for prompt-to-prompt interpolation on CLIP text embeddings and using the UnCLIP / Dall-E to decode them to images. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior ([`PriorTransformer`]): The canonincal unCLIP prior to approximate the image embedding from the text embedding. text_proj ([`UnCLIPTextProjModel`]): Utility class to prepare and combine the embeddings before they are passed to the decoder. decoder ([`UNet2DConditionModel`]): The decoder to invert the image embedding into an image. super_res_first ([`UNet2DModel`]): Super resolution unet. Used in all but the last step of the super resolution diffusion process. super_res_last ([`UNet2DModel`]): Super resolution unet. Used in the last step of the super resolution diffusion process. prior_scheduler ([`UnCLIPScheduler`]): Scheduler used in the prior denoising process. Just a modified DDPMScheduler. decoder_scheduler ([`UnCLIPScheduler`]): Scheduler used in the decoder denoising process. Just a modified DDPMScheduler. super_res_scheduler ([`UnCLIPScheduler`]): Scheduler used in the super resolution denoising process. Just a modified DDPMScheduler. """ prior: PriorTransformer decoder: UNet2DConditionModel text_proj: UnCLIPTextProjModel text_encoder: CLIPTextModelWithProjection tokenizer: CLIPTokenizer super_res_first: UNet2DModel super_res_last: UNet2DModel prior_scheduler: UnCLIPScheduler decoder_scheduler: UnCLIPScheduler super_res_scheduler: UnCLIPScheduler # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.__init__ def __init__( self, prior: PriorTransformer, decoder: UNet2DConditionModel, text_encoder: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, text_proj: UnCLIPTextProjModel, super_res_first: UNet2DModel, super_res_last: UNet2DModel, prior_scheduler: UnCLIPScheduler, decoder_scheduler: UnCLIPScheduler, super_res_scheduler: UnCLIPScheduler, ): super().__init__() self.register_modules( prior=prior, decoder=decoder, text_encoder=text_encoder, tokenizer=tokenizer, text_proj=text_proj, super_res_first=super_res_first, super_res_last=super_res_last, prior_scheduler=prior_scheduler, decoder_scheduler=decoder_scheduler, super_res_scheduler=super_res_scheduler, ) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, ): if text_model_output is None: batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device)) prompt_embeds = text_encoder_output.text_embeds text_encoder_hidden_states = text_encoder_output.last_hidden_state else: batch_size = text_model_output[0].shape[0] prompt_embeds, text_encoder_hidden_states = text_model_output[0], text_model_output[1] text_mask = text_attention_mask prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [""] * batch_size uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_encoder_hidden_states, text_mask # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.enable_sequential_cpu_offload def enable_sequential_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, the pipeline's models have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. """ if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") # TODO: self.prior.post_process_latents is not covered by the offload hooks, so it fails if added to the list models = [ self.decoder, self.text_proj, self.text_encoder, self.super_res_first, self.super_res_last, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._execution_device def _execution_device(self): r""" Returns the device on which the pipeline's models will be executed. After calling `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module hooks. """ if self.device != torch.device("meta") or not hasattr(self.decoder, "_hf_hook"): return self.device for module in self.decoder.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device @torch.no_grad() def __call__( self, start_prompt: str, end_prompt: str, steps: int = 5, prior_num_inference_steps: int = 25, decoder_num_inference_steps: int = 25, super_res_num_inference_steps: int = 7, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prior_guidance_scale: float = 4.0, decoder_guidance_scale: float = 8.0, enable_sequential_cpu_offload=True, gpu_id=0, output_type: Optional[str] = "pil", return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: start_prompt (`str`): The prompt to start the image generation interpolation from. end_prompt (`str`): The prompt to end the image generation interpolation at. steps (`int`, *optional*, defaults to 5): The number of steps over which to interpolate from start_prompt to end_prompt. The pipeline returns the same number of images as this value. prior_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps for the prior. More denoising steps usually lead to a higher quality image at the expense of slower inference. decoder_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality image at the expense of slower inference. super_res_num_inference_steps (`int`, *optional*, defaults to 7): The number of denoising steps for super resolution. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. decoder_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. enable_sequential_cpu_offload (`bool`, *optional*, defaults to `True`): If True, offloads all models to CPU using accelerate, significantly reducing memory usage. When called, the pipeline's models have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. gpu_id (`int`, *optional*, defaults to `0`): The gpu_id to be passed to enable_sequential_cpu_offload. Only works when enable_sequential_cpu_offload is set to True. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. """ if not isinstance(start_prompt, str) or not isinstance(end_prompt, str): raise ValueError( f"`start_prompt` and `end_prompt` should be of type `str` but got {type(start_prompt)} and" f" {type(end_prompt)} instead" ) if enable_sequential_cpu_offload: self.enable_sequential_cpu_offload(gpu_id=gpu_id) device = self._execution_device # Turn the prompts into embeddings. inputs = self.tokenizer( [start_prompt, end_prompt], padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", ) inputs.to(device) text_model_output = self.text_encoder(**inputs) text_attention_mask = torch.max(inputs.attention_mask[0], inputs.attention_mask[1]) text_attention_mask = torch.cat([text_attention_mask.unsqueeze(0)] * steps).to(device) # Interpolate from the start to end prompt using slerp and add the generated images to an image output pipeline batch_text_embeds = [] batch_last_hidden_state = [] for interp_val in torch.linspace(0, 1, steps): text_embeds = slerp(interp_val, text_model_output.text_embeds[0], text_model_output.text_embeds[1]) last_hidden_state = slerp( interp_val, text_model_output.last_hidden_state[0], text_model_output.last_hidden_state[1] ) batch_text_embeds.append(text_embeds.unsqueeze(0)) batch_last_hidden_state.append(last_hidden_state.unsqueeze(0)) batch_text_embeds = torch.cat(batch_text_embeds) batch_last_hidden_state = torch.cat(batch_last_hidden_state) text_model_output = CLIPTextModelOutput( text_embeds=batch_text_embeds, last_hidden_state=batch_last_hidden_state ) batch_size = text_model_output[0].shape[0] do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0 prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt( prompt=None, device=device, num_images_per_prompt=1, do_classifier_free_guidance=do_classifier_free_guidance, text_model_output=text_model_output, text_attention_mask=text_attention_mask, ) # prior self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) prior_timesteps_tensor = self.prior_scheduler.timesteps embedding_dim = self.prior.config.embedding_dim prior_latents = self.prepare_latents( (batch_size, embedding_dim), prompt_embeds.dtype, device, generator, None, self.prior_scheduler, ) for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents predicted_image_embedding = self.prior( latent_model_input, timestep=t, proj_embedding=prompt_embeds, encoder_hidden_states=text_encoder_hidden_states, attention_mask=text_mask, ).predicted_image_embedding if do_classifier_free_guidance: predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( predicted_image_embedding_text - predicted_image_embedding_uncond ) if i + 1 == prior_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = prior_timesteps_tensor[i + 1] prior_latents = self.prior_scheduler.step( predicted_image_embedding, timestep=t, sample=prior_latents, generator=generator, prev_timestep=prev_timestep, ).prev_sample prior_latents = self.prior.post_process_latents(prior_latents) image_embeddings = prior_latents # done prior # decoder text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj( image_embeddings=image_embeddings, prompt_embeds=prompt_embeds, text_encoder_hidden_states=text_encoder_hidden_states, do_classifier_free_guidance=do_classifier_free_guidance, ) if device.type == "mps": # HACK: MPS: There is a panic when padding bool tensors, # so cast to int tensor for the pad and back to bool afterwards text_mask = text_mask.type(torch.int) decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1) decoder_text_mask = decoder_text_mask.type(torch.bool) else: decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True) self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device) decoder_timesteps_tensor = self.decoder_scheduler.timesteps num_channels_latents = self.decoder.config.in_channels height = self.decoder.config.sample_size width = self.decoder.config.sample_size decoder_latents = self.prepare_latents( (batch_size, num_channels_latents, height, width), text_encoder_hidden_states.dtype, device, generator, None, self.decoder_scheduler, ) for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents noise_pred = self.decoder( sample=latent_model_input, timestep=t, encoder_hidden_states=text_encoder_hidden_states, class_labels=additive_clip_time_embeddings, attention_mask=decoder_text_mask, ).sample if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1) noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1) noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, predicted_variance], dim=1) if i + 1 == decoder_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = decoder_timesteps_tensor[i + 1] # compute the previous noisy sample x_t -> x_t-1 decoder_latents = self.decoder_scheduler.step( noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator ).prev_sample decoder_latents = decoder_latents.clamp(-1, 1) image_small = decoder_latents # done decoder # super res self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device) super_res_timesteps_tensor = self.super_res_scheduler.timesteps channels = self.super_res_first.config.in_channels // 2 height = self.super_res_first.config.sample_size width = self.super_res_first.config.sample_size super_res_latents = self.prepare_latents( (batch_size, channels, height, width), image_small.dtype, device, generator, None, self.super_res_scheduler, ) if device.type == "mps": # MPS does not support many interpolations image_upscaled = F.interpolate(image_small, size=[height, width]) else: interpolate_antialias = {} if "antialias" in inspect.signature(F.interpolate).parameters: interpolate_antialias["antialias"] = True image_upscaled = F.interpolate( image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias ) for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)): # no classifier free guidance if i == super_res_timesteps_tensor.shape[0] - 1: unet = self.super_res_last else: unet = self.super_res_first latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1) noise_pred = unet( sample=latent_model_input, timestep=t, ).sample if i + 1 == super_res_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = super_res_timesteps_tensor[i + 1] # compute the previous noisy sample x_t -> x_t-1 super_res_latents = self.super_res_scheduler.step( noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator ).prev_sample image = super_res_latents # done super res # post processing image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/examples/community/unclip_text_interpolation.py/0
{ "file_path": "diffusers/examples/community/unclip_text_interpolation.py", "repo_id": "diffusers", "token_count": 11575 }
98
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile import safetensors sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 from diffusers import DiffusionPipeline # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class DreamBoothLoRA(ExamplesTestsAccelerate): def test_dreambooth_lora(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` in their names. starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys()) self.assertTrue(starts_with_unet) def test_dreambooth_lora_with_text_encoder(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --train_text_encoder --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # check `text_encoder` is present at all. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) keys = lora_state_dict.keys() is_text_encoder_present = any(k.startswith("text_encoder") for k in keys) self.assertTrue(is_text_encoder_present) # the names of the keys of the state dict should either start with `unet` # or `text_encoder`. is_correct_naming = all(k.startswith("unet") or k.startswith("text_encoder") for k in keys) self.assertTrue(is_correct_naming) def test_dreambooth_lora_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=6 --checkpoints_total_limit=2 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_dreambooth_lora_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=4 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}) resume_run_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --instance_data_dir=docs/source/en/imgs --output_dir={tmpdir} --instance_prompt=prompt --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=8 --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + resume_run_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"}) def test_dreambooth_lora_if_model(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora.py --pretrained_model_name_or_path hf-internal-testing/tiny-if-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --pre_compute_text_embeddings --tokenizer_max_length=77 --text_encoder_use_attention_mask """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` in their names. starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys()) self.assertTrue(starts_with_unet) class DreamBoothLoRASDXL(ExamplesTestsAccelerate): def test_dreambooth_lora_sdxl(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` in their names. starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys()) self.assertTrue(starts_with_unet) def test_dreambooth_lora_sdxl_with_text_encoder(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --train_text_encoder """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` or `"text_encoder"` or `"text_encoder_2"` in their names. keys = lora_state_dict.keys() starts_with_unet = all( k.startswith("unet") or k.startswith("text_encoder") or k.startswith("text_encoder_2") for k in keys ) self.assertTrue(starts_with_unet) def test_dreambooth_lora_sdxl_custom_captions(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --dataset_name hf-internal-testing/dummy_image_text_data --caption_column text --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) def test_dreambooth_lora_sdxl_text_encoder_custom_captions(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --dataset_name hf-internal-testing/dummy_image_text_data --caption_column text --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --train_text_encoder """.split() run_command(self._launch_args + test_args) def test_dreambooth_lora_sdxl_checkpointing_checkpoints_total_limit(self): pipeline_path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path {pipeline_path} --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 6 --checkpointing_steps=2 --checkpoints_total_limit=2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) pipe = DiffusionPipeline.from_pretrained(pipeline_path) pipe.load_lora_weights(tmpdir) pipe("a prompt", num_inference_steps=1) # check checkpoint directories exist # checkpoint-2 should have been deleted self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) def test_dreambooth_lora_sdxl_text_encoder_checkpointing_checkpoints_total_limit(self): pipeline_path = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path {pipeline_path} --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 7 --checkpointing_steps=2 --checkpoints_total_limit=2 --train_text_encoder --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) pipe = DiffusionPipeline.from_pretrained(pipeline_path) pipe.load_lora_weights(tmpdir) pipe("a prompt", num_inference_steps=2) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, # checkpoint-2 should have been deleted {"checkpoint-4", "checkpoint-6"}, )
diffusers/examples/dreambooth/test_dreambooth_lora.py/0
{ "file_path": "diffusers/examples/dreambooth/test_dreambooth_lora.py", "repo_id": "diffusers", "token_count": 8108 }
99
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import argparse import logging import math import os import shutil from pathlib import Path import accelerate import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.state import AcceleratorState from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from PIL import Image from tqdm import tqdm from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from transformers.utils import ContextManagers import diffusers from diffusers import AutoPipelineForText2Image, DDPMScheduler, UNet2DConditionModel, VQModel from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel, compute_snr from diffusers.utils import check_min_version, is_wandb_available, make_image_grid from diffusers.utils.import_utils import is_xformers_available if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.26.0.dev0") logger = get_logger(__name__, log_level="INFO") def save_model_card( args, repo_id: str, images=None, repo_folder=None, ): img_str = "" if len(images) > 0: image_grid = make_image_grid(images, 1, len(args.validation_prompts)) image_grid.save(os.path.join(repo_folder, "val_imgs_grid.png")) img_str += "![val_imgs_grid](./val_imgs_grid.png)\n" yaml = f""" --- license: creativeml-openrail-m base_model: {args.pretrained_decoder_model_name_or_path} datasets: - {args.dataset_name} prior: - {args.pretrained_prior_model_name_or_path} tags: - kandinsky - text-to-image - diffusers inference: true --- """ model_card = f""" # Finetuning - {repo_id} This pipeline was finetuned from **{args.pretrained_decoder_model_name_or_path}** on the **{args.dataset_name}** dataset. Below are some example images generated with the finetuned pipeline using the following prompts: {args.validation_prompts}: \n {img_str} ## Pipeline usage You can use the pipeline like so: ```python from diffusers import DiffusionPipeline import torch pipeline = AutoPipelineForText2Image.from_pretrained("{repo_id}", torch_dtype=torch.float16) prompt = "{args.validation_prompts[0]}" image = pipeline(prompt).images[0] image.save("my_image.png") ``` ## Training info These are the key hyperparameters used during training: * Epochs: {args.num_train_epochs} * Learning rate: {args.learning_rate} * Batch size: {args.train_batch_size} * Gradient accumulation steps: {args.gradient_accumulation_steps} * Image resolution: {args.resolution} * Mixed-precision: {args.mixed_precision} """ wandb_info = "" if is_wandb_available(): wandb_run_url = None if wandb.run is not None: wandb_run_url = wandb.run.url if wandb_run_url is not None: wandb_info = f""" More information on all the CLI arguments and the environment are available on your [`wandb` run page]({wandb_run_url}). """ model_card += wandb_info with open(os.path.join(repo_folder, "README.md"), "w") as f: f.write(yaml + model_card) def log_validation(vae, image_encoder, image_processor, unet, args, accelerator, weight_dtype, epoch): logger.info("Running validation... ") pipeline = AutoPipelineForText2Image.from_pretrained( args.pretrained_decoder_model_name_or_path, vae=accelerator.unwrap_model(vae), prior_image_encoder=accelerator.unwrap_model(image_encoder), prior_image_processor=image_processor, unet=accelerator.unwrap_model(unet), torch_dtype=weight_dtype, ) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) images = [] for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") elif tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompts[i]}") for i, image in enumerate(images) ] } ) else: logger.warn(f"image logging not implemented for {tracker.name}") del pipeline torch.cuda.empty_cache() return images def parse_args(): parser = argparse.ArgumentParser(description="Simple example of finetuning Kandinsky 2.2.") parser.add_argument( "--pretrained_decoder_model_name_or_path", type=str, default="kandinsky-community/kandinsky-2-2-decoder", required=False, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_prior_model_name_or_path", type=str, default="kandinsky-community/kandinsky-2-2-prior", required=False, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing an image." ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--validation_prompts", type=str, default=None, nargs="+", help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), ) parser.add_argument( "--output_dir", type=str, default="kandi_2_2-model-finetuned", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--train_batch_size", type=int, default=1, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="learning rate", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://arxiv.org/abs/2303.09556.", ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument( "--adam_weight_decay", type=float, default=0.0, required=False, help="weight decay_to_use", ) parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--validation_epochs", type=int, default=5, help="Run validation every X epochs.", ) parser.add_argument( "--tracker_project_name", type=str, default="text2image-fine-tune", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") return args def main(): args = parse_args() logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration( total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir ) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder="scheduler") image_processor = CLIPImageProcessor.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_processor" ) def deepspeed_zero_init_disabled_context_manager(): """ returns either a context list that includes one that will disable zero.Init or an empty context list """ deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None if deepspeed_plugin is None: return [] return [deepspeed_plugin.zero3_init_context_manager(enable=False)] weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 with ContextManagers(deepspeed_zero_init_disabled_context_manager()): vae = VQModel.from_pretrained( args.pretrained_decoder_model_name_or_path, subfolder="movq", torch_dtype=weight_dtype ).eval() image_encoder = CLIPVisionModelWithProjection.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_encoder", torch_dtype=weight_dtype ).eval() unet = UNet2DConditionModel.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder="unet") # Freeze vae and image_encoder vae.requires_grad_(False) image_encoder.requires_grad_(False) # Set unet to trainable. unet.train() # Create EMA for the unet. if args.use_ema: ema_unet = UNet2DConditionModel.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder="unet") ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) ema_unet.to(accelerator.device) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warn( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for i in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW optimizer = optimizer_cls( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names image_column = args.image_column if image_column not in column_names: raise ValueError(f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}") def center_crop(image): width, height = image.size new_size = min(width, height) left = (width - new_size) / 2 top = (height - new_size) / 2 right = (width + new_size) / 2 bottom = (height + new_size) / 2 return image.crop((left, top, right, bottom)) def train_transforms(img): img = center_crop(img) img = img.resize((args.resolution, args.resolution), resample=Image.BICUBIC, reducing_gap=1) img = np.array(img).astype(np.float32) / 127.5 - 1 img = torch.from_numpy(np.transpose(img, [2, 0, 1])) return img def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["clip_pixel_values"] = image_processor(images, return_tensors="pt").pixel_values return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() clip_pixel_values = torch.stack([example["clip_pixel_values"] for example in examples]) clip_pixel_values = clip_pixel_values.to(memory_format=torch.contiguous_format).float() return {"pixel_values": pixel_values, "clip_pixel_values": clip_pixel_values} train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # Move image_encode and vae to gpu and cast to weight_dtype image_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) tracker_config.pop("validation_prompts") accelerator.init_trackers(args.tracker_project_name, tracker_config) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): train_loss = 0.0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # Convert images to latent space images = batch["pixel_values"].to(weight_dtype) clip_images = batch["clip_pixel_values"].to(weight_dtype) latents = vae.encode(images).latents image_embeds = image_encoder(clip_images).image_embeds # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) target = noise # Predict the noise residual and compute loss added_cond_kwargs = {"image_embeds": image_embeds} model_pred = unet(noisy_latents, timesteps, None, added_cond_kwargs=added_cond_kwargs).sample[:, :4] if args.snr_gamma is None: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") else: # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. # Since we predict the noise instead of x_0, the original formulation is slightly changed. # This is discussed in Section 4.2 of the same paper. snr = compute_snr(noise_scheduler, timesteps) mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( dim=1 )[0] if noise_scheduler.config.prediction_type == "epsilon": mse_loss_weights = mse_loss_weights / snr elif noise_scheduler.config.prediction_type == "v_prediction": mse_loss_weights = mse_loss_weights / (snr + 1) loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights loss = loss.mean() # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompts is not None and epoch % args.validation_epochs == 0: if args.use_ema: # Store the UNet parameters temporarily and load the EMA parameters to perform inference. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) log_validation( vae, image_encoder, image_processor, unet, args, accelerator, weight_dtype, global_step, ) if args.use_ema: # Switch back to the original UNet parameters. ema_unet.restore(unet.parameters()) # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: unet = accelerator.unwrap_model(unet) if args.use_ema: ema_unet.copy_to(unet.parameters()) pipeline = AutoPipelineForText2Image.from_pretrained( args.pretrained_decoder_model_name_or_path, vae=vae, unet=unet, ) pipeline.decoder_pipe.save_pretrained(args.output_dir) # Run a final round of inference. images = [] if args.validation_prompts is not None: logger.info("Running inference for collecting generated images...") pipeline = pipeline.to(accelerator.device) pipeline.torch_dtype = weight_dtype pipeline.set_progress_bar_config(disable=True) pipeline.enable_model_cpu_offload() if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) if args.push_to_hub: save_model_card(args, repo_id, images, repo_folder=args.output_dir) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py/0
{ "file_path": "diffusers/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py", "repo_id": "diffusers", "token_count": 16245 }
100
## Diffusers examples with Intel optimizations **This research project is not actively maintained by the diffusers team. For any questions or comments, please make sure to tag @hshen14 .** This aims to provide diffusers examples with Intel optimizations such as Bfloat16 for training/fine-tuning acceleration and 8-bit integer (INT8) for inference acceleration on Intel platforms. ## Accelerating the fine-tuning for textual inversion We accelereate the fine-tuning for textual inversion with Intel Extension for PyTorch. The [examples](textual_inversion) enable both single node and multi-node distributed training with Bfloat16 support on Intel Xeon Scalable Processor. ## Accelerating the inference for Stable Diffusion using Bfloat16 We start the inference acceleration with Bfloat16 using Intel Extension for PyTorch. The [script](inference_bf16.py) is generally designed to support standard Stable Diffusion models with Bfloat16 support. ```bash pip install diffusers transformers accelerate scipy safetensors export KMP_BLOCKTIME=1 export KMP_SETTINGS=1 export KMP_AFFINITY=granularity=fine,compact,1,0 # Intel OpenMP export OMP_NUM_THREADS=< Cores to use > export LD_PRELOAD=${LD_PRELOAD}:/path/to/lib/libiomp5.so # Jemalloc is a recommended malloc implementation that emphasizes fragmentation avoidance and scalable concurrency support. export LD_PRELOAD=${LD_PRELOAD}:/path/to/lib/libjemalloc.so export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:-1,muzzy_decay_ms:9000000000" # Launch with default DDIM numactl --membind <node N> -C <cpu list> python python inference_bf16.py # Launch with DPMSolverMultistepScheduler numactl --membind <node N> -C <cpu list> python python inference_bf16.py --dpm ``` ## Accelerating the inference for Stable Diffusion using INT8 Coming soon ...
diffusers/examples/research_projects/intel_opts/README.md/0
{ "file_path": "diffusers/examples/research_projects/intel_opts/README.md", "repo_id": "diffusers", "token_count": 528 }
101
# Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint. # *Only* converts the UNet, VAE, and Text Encoder. # Does not convert optimizer state or any other thing. import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# unet_conversion_map = [ # (stable-diffusion, HF Diffusers) ("time_embed.0.weight", "time_embedding.linear_1.weight"), ("time_embed.0.bias", "time_embedding.linear_1.bias"), ("time_embed.2.weight", "time_embedding.linear_2.weight"), ("time_embed.2.bias", "time_embedding.linear_2.bias"), ("input_blocks.0.0.weight", "conv_in.weight"), ("input_blocks.0.0.bias", "conv_in.bias"), ("out.0.weight", "conv_norm_out.weight"), ("out.0.bias", "conv_norm_out.bias"), ("out.2.weight", "conv_out.weight"), ("out.2.bias", "conv_out.bias"), ] unet_conversion_map_resnet = [ # (stable-diffusion, HF Diffusers) ("in_layers.0", "norm1"), ("in_layers.2", "conv1"), ("out_layers.0", "norm2"), ("out_layers.3", "conv2"), ("emb_layers.1", "time_emb_proj"), ("skip_connection", "conv_shortcut"), ] unet_conversion_map_layer = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}." sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0." unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}." sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1." unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}." sd_up_res_prefix = f"output_blocks.{3*i + j}.0." unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}." sd_up_atn_prefix = f"output_blocks.{3*i + j}.1." unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv." sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op." unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}." unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) hf_mid_atn_prefix = "mid_block.attentions.0." sd_mid_atn_prefix = "middle_block.1." unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): hf_mid_res_prefix = f"mid_block.resnets.{j}." sd_mid_res_prefix = f"middle_block.{2*j}." unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def convert_unet_state_dict(unet_state_dict): # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. mapping = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: mapping[hf_name] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: v = v.replace(hf_part, sd_part) mapping[k] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: v = v.replace(hf_part, sd_part) mapping[k] = v new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# vae_conversion_map = [ # (stable-diffusion, HF Diffusers) ("nin_shortcut", "conv_shortcut"), ("norm_out", "conv_norm_out"), ("mid.attn_1.", "mid_block.attentions.0."), ] for i in range(4): # down_blocks have two resnets for j in range(2): hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}." sd_down_prefix = f"encoder.down.{i}.block.{j}." vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0." sd_downsample_prefix = f"down.{i}.downsample." vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." sd_upsample_prefix = f"up.{3-i}.upsample." vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}." sd_up_prefix = f"decoder.up.{3-i}.block.{j}." vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): hf_mid_res_prefix = f"mid_block.resnets.{i}." sd_mid_res_prefix = f"mid.block_{i+1}." vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) vae_conversion_map_attn = [ # (stable-diffusion, HF Diffusers) ("norm.", "group_norm."), ("q.", "query."), ("k.", "key."), ("v.", "value."), ("proj_out.", "proj_attn."), ] # This is probably not the most ideal solution, but it does work. vae_extra_conversion_map = [ ("to_q", "q"), ("to_k", "k"), ("to_v", "v"), ("to_out.0", "proj_out"), ] def reshape_weight_for_sd(w): # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape, 1, 1) def convert_vae_state_dict(vae_state_dict): mapping = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: v = v.replace(hf_part, sd_part) mapping[k] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: v = v.replace(hf_part, sd_part) mapping[k] = v new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()} weights_to_convert = ["q", "k", "v", "proj_out"] keys_to_rename = {} for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"mid.attn_1.{weight_name}.weight" in k: print(f"Reshaping {k} for SD format") new_state_dict[k] = reshape_weight_for_sd(v) for weight_name, real_weight_name in vae_extra_conversion_map: if f"mid.attn_1.{weight_name}.weight" in k or f"mid.attn_1.{weight_name}.bias" in k: keys_to_rename[k] = k.replace(weight_name, real_weight_name) for k, v in keys_to_rename.items(): if k in new_state_dict: print(f"Renaming {k} to {v}") new_state_dict[v] = reshape_weight_for_sd(new_state_dict[k]) del new_state_dict[k] return new_state_dict # =========================# # Text Encoder Conversion # # =========================# textenc_conversion_lst = [ # (stable-diffusion, HF Diffusers) ("resblocks.", "text_model.encoder.layers."), ("ln_1", "layer_norm1"), ("ln_2", "layer_norm2"), (".c_fc.", ".fc1."), (".c_proj.", ".fc2."), (".attn", ".self_attn"), ("ln_final.", "transformer.text_model.final_layer_norm."), ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), ] protected = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} textenc_pattern = re.compile("|".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp code2idx = {"q": 0, "k": 1, "v": 2} def convert_text_enc_state_dict_v20(text_enc_dict): new_state_dict = {} capture_qkv_weight = {} capture_qkv_bias = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight") or k.endswith(".self_attn.k_proj.weight") or k.endswith(".self_attn.v_proj.weight") ): k_pre = k[: -len(".q_proj.weight")] k_code = k[-len("q_proj.weight")] if k_pre not in capture_qkv_weight: capture_qkv_weight[k_pre] = [None, None, None] capture_qkv_weight[k_pre][code2idx[k_code]] = v continue if ( k.endswith(".self_attn.q_proj.bias") or k.endswith(".self_attn.k_proj.bias") or k.endswith(".self_attn.v_proj.bias") ): k_pre = k[: -len(".q_proj.bias")] k_code = k[-len("q_proj.bias")] if k_pre not in capture_qkv_bias: capture_qkv_bias[k_pre] = [None, None, None] capture_qkv_bias[k_pre][code2idx[k_code]] = v continue relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k) new_state_dict[relabelled_key] = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing") relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre) new_state_dict[relabelled_key + ".in_proj_weight"] = torch.cat(tensors) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing") relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre) new_state_dict[relabelled_key + ".in_proj_bias"] = torch.cat(tensors) return new_state_dict def convert_text_enc_state_dict(text_enc_dict): return text_enc_dict if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt." ) args = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors unet_path = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors") vae_path = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors") text_enc_path = osp.join(args.model_path, "text_encoder", "model.safetensors") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): unet_state_dict = load_file(unet_path, device="cpu") else: unet_path = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin") unet_state_dict = torch.load(unet_path, map_location="cpu") if osp.exists(vae_path): vae_state_dict = load_file(vae_path, device="cpu") else: vae_path = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin") vae_state_dict = torch.load(vae_path, map_location="cpu") if osp.exists(text_enc_path): text_enc_dict = load_file(text_enc_path, device="cpu") else: text_enc_path = osp.join(args.model_path, "text_encoder", "pytorch_model.bin") text_enc_dict = torch.load(text_enc_path, map_location="cpu") # Convert the UNet model unet_state_dict = convert_unet_state_dict(unet_state_dict) unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()} # Convert the VAE model vae_state_dict = convert_vae_state_dict(vae_state_dict) vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper is_v20_model = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict if is_v20_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm text_enc_dict = {"transformer." + k: v for k, v in text_enc_dict.items()} text_enc_dict = convert_text_enc_state_dict_v20(text_enc_dict) text_enc_dict = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()} else: text_enc_dict = convert_text_enc_state_dict(text_enc_dict) text_enc_dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint state_dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: state_dict = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: state_dict = {"state_dict": state_dict} torch.save(state_dict, args.checkpoint_path)
diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py/0
{ "file_path": "diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py", "repo_id": "diffusers", "token_count": 6226 }
102
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Conversion script for the Versatile Stable Diffusion checkpoints. """ import argparse from argparse import Namespace import torch from transformers import ( CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, VersatileDiffusionPipeline, ) from diffusers.pipelines.versatile_diffusion.modeling_text_unet import UNetFlatConditionModel SCHEDULER_CONFIG = Namespace( **{ "beta_linear_start": 0.00085, "beta_linear_end": 0.012, "timesteps": 1000, "scale_factor": 0.18215, } ) IMAGE_UNET_CONFIG = Namespace( **{ "input_channels": 4, "model_channels": 320, "output_channels": 4, "num_noattn_blocks": [2, 2, 2, 2], "channel_mult": [1, 2, 4, 4], "with_attn": [True, True, True, False], "num_heads": 8, "context_dim": 768, "use_checkpoint": True, } ) TEXT_UNET_CONFIG = Namespace( **{ "input_channels": 768, "model_channels": 320, "output_channels": 768, "num_noattn_blocks": [2, 2, 2, 2], "channel_mult": [1, 2, 4, 4], "second_dim": [4, 4, 4, 4], "with_attn": [True, True, True, False], "num_heads": 8, "context_dim": 768, "use_checkpoint": True, } ) AUTOENCODER_CONFIG = Namespace( **{ "double_z": True, "z_channels": 4, "resolution": 256, "in_channels": 3, "out_ch": 3, "ch": 128, "ch_mult": [1, 2, 4, 4], "num_res_blocks": 2, "attn_resolutions": [], "dropout": 0.0, } ) def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. """ if n_shave_prefix_segments >= 0: return ".".join(path.split(".")[n_shave_prefix_segments:]) else: return ".".join(path.split(".")[:n_shave_prefix_segments]) def renew_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item.replace("in_layers.0", "norm1") new_item = new_item.replace("in_layers.2", "conv1") new_item = new_item.replace("out_layers.0", "norm2") new_item = new_item.replace("out_layers.3", "conv2") new_item = new_item.replace("emb_layers.1", "time_emb_proj") new_item = new_item.replace("skip_connection", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("nin_shortcut", "conv_shortcut") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item # new_item = new_item.replace('norm.weight', 'group_norm.weight') # new_item = new_item.replace('norm.bias', 'group_norm.bias') # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("norm.weight", "group_norm.weight") new_item = new_item.replace("norm.bias", "group_norm.bias") new_item = new_item.replace("q.weight", "query.weight") new_item = new_item.replace("q.bias", "query.bias") new_item = new_item.replace("k.weight", "key.weight") new_item = new_item.replace("k.bias", "key.bias") new_item = new_item.replace("v.weight", "value.weight") new_item = new_item.replace("v.bias", "value.bias") new_item = new_item.replace("proj_out.weight", "proj_attn.weight") new_item = new_item.replace("proj_out.bias", "proj_attn.bias") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def assign_to_checkpoint( paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None ): """ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits attention layers, and takes into account additional replacements that may arise. Assigns the weights to the new checkpoint. """ assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map["query"]] = query.reshape(target_shape) checkpoint[path_map["key"]] = key.reshape(target_shape) checkpoint[path_map["value"]] = value.reshape(target_shape) for path in paths: new_path = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here new_path = new_path.replace("middle_block.0", "mid_block.resnets.0") new_path = new_path.replace("middle_block.1", "mid_block.attentions.0") new_path = new_path.replace("middle_block.2", "mid_block.resnets.1") if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace(replacement["old"], replacement["new"]) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0] elif path["old"] in old_checkpoint: checkpoint[new_path] = old_checkpoint[path["old"]] def conv_attn_to_linear(checkpoint): keys = list(checkpoint.keys()) attn_keys = ["query.weight", "key.weight", "value.weight"] for key in keys: if ".".join(key.split(".")[-2:]) in attn_keys: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0, 0] elif "proj_attn.weight" in key: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0] def create_image_unet_diffusers_config(unet_params): """ Creates a config for the diffusers based on the config of the VD model. """ block_out_channels = [unet_params.model_channels * mult for mult in unet_params.channel_mult] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): block_type = "CrossAttnDownBlock2D" if unet_params.with_attn[i] else "DownBlock2D" down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): block_type = "CrossAttnUpBlock2D" if unet_params.with_attn[-i - 1] else "UpBlock2D" up_block_types.append(block_type) resolution //= 2 if not all(n == unet_params.num_noattn_blocks[0] for n in unet_params.num_noattn_blocks): raise ValueError("Not all num_res_blocks are equal, which is not supported in this script.") config = { "sample_size": None, "in_channels": unet_params.input_channels, "out_channels": unet_params.output_channels, "down_block_types": tuple(down_block_types), "up_block_types": tuple(up_block_types), "block_out_channels": tuple(block_out_channels), "layers_per_block": unet_params.num_noattn_blocks[0], "cross_attention_dim": unet_params.context_dim, "attention_head_dim": unet_params.num_heads, } return config def create_text_unet_diffusers_config(unet_params): """ Creates a config for the diffusers based on the config of the VD model. """ block_out_channels = [unet_params.model_channels * mult for mult in unet_params.channel_mult] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): block_type = "CrossAttnDownBlockFlat" if unet_params.with_attn[i] else "DownBlockFlat" down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): block_type = "CrossAttnUpBlockFlat" if unet_params.with_attn[-i - 1] else "UpBlockFlat" up_block_types.append(block_type) resolution //= 2 if not all(n == unet_params.num_noattn_blocks[0] for n in unet_params.num_noattn_blocks): raise ValueError("Not all num_res_blocks are equal, which is not supported in this script.") config = { "sample_size": None, "in_channels": (unet_params.input_channels, 1, 1), "out_channels": (unet_params.output_channels, 1, 1), "down_block_types": tuple(down_block_types), "up_block_types": tuple(up_block_types), "block_out_channels": tuple(block_out_channels), "layers_per_block": unet_params.num_noattn_blocks[0], "cross_attention_dim": unet_params.context_dim, "attention_head_dim": unet_params.num_heads, } return config def create_vae_diffusers_config(vae_params): """ Creates a config for the diffusers based on the config of the VD model. """ block_out_channels = [vae_params.ch * mult for mult in vae_params.ch_mult] down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) config = { "sample_size": vae_params.resolution, "in_channels": vae_params.in_channels, "out_channels": vae_params.out_ch, "down_block_types": tuple(down_block_types), "up_block_types": tuple(up_block_types), "block_out_channels": tuple(block_out_channels), "latent_channels": vae_params.z_channels, "layers_per_block": vae_params.num_res_blocks, } return config def create_diffusers_scheduler(original_config): schedular = DDIMScheduler( num_train_timesteps=original_config.model.params.timesteps, beta_start=original_config.model.params.linear_start, beta_end=original_config.model.params.linear_end, beta_schedule="scaled_linear", ) return schedular def convert_vd_unet_checkpoint(checkpoint, config, unet_key, extract_ema=False): """ Takes a state dict and a config, and returns a converted checkpoint. """ # extract state_dict for UNet unet_state_dict = {} keys = list(checkpoint.keys()) # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA if sum(k.startswith("model_ema") for k in keys) > 100: print("Checkpoint has both EMA and non-EMA weights.") if extract_ema: print( "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." ) for key in keys: if key.startswith("model.diffusion_model"): flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) else: print( "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" " weights (usually better for inference), please make sure to add the `--extract_ema` flag." ) for key in keys: if key.startswith(unet_key): unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = checkpoint["model.diffusion_model.time_embed.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = checkpoint["model.diffusion_model.time_embed.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = checkpoint["model.diffusion_model.time_embed.2.weight"] new_checkpoint["time_embedding.linear_2.bias"] = checkpoint["model.diffusion_model.time_embed.2.bias"] new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"] new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"] new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"] new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] # Retrieves the keys for the input blocks only num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) input_blocks = { layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) middle_blocks = { layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) output_blocks = { layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] for layer_id in range(num_output_blocks) } for i in range(1, num_input_blocks): block_id = (i - 1) // (config["layers_per_block"] + 1) layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) resnets = [ key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key ] attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] if f"input_blocks.{i}.0.op.weight" in unet_state_dict: new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( f"input_blocks.{i}.0.op.weight" ) new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( f"input_blocks.{i}.0.op.bias" ) elif f"input_blocks.{i}.0.weight" in unet_state_dict: # text_unet uses linear layers in place of downsamplers shape = unet_state_dict[f"input_blocks.{i}.0.weight"].shape if shape[0] != shape[1]: continue new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.weight"] = unet_state_dict.pop( f"input_blocks.{i}.0.weight" ) new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.bias"] = unet_state_dict.pop( f"input_blocks.{i}.0.bias" ) paths = renew_resnet_paths(resnets) meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) resnet_0 = middle_blocks[0] attentions = middle_blocks[1] resnet_1 = middle_blocks[2] resnet_0_paths = renew_resnet_paths(resnet_0) assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config) resnet_1_paths = renew_resnet_paths(resnet_1) assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config) attentions_paths = renew_attention_paths(attentions) meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"} assign_to_checkpoint( attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) for i in range(num_output_blocks): block_id = i // (config["layers_per_block"] + 1) layer_in_block_id = i % (config["layers_per_block"] + 1) output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] output_block_list = {} for layer in output_block_layers: layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] paths = renew_resnet_paths(resnets) meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if ["conv.weight", "conv.bias"] in output_block_list.values(): index = list(output_block_list.values()).index(["conv.weight", "conv.bias"]) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] elif f"output_blocks.{i}.1.weight" in unet_state_dict: # text_unet uses linear layers in place of upsamplers shape = unet_state_dict[f"output_blocks.{i}.1.weight"].shape if shape[0] != shape[1]: continue new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.weight"] = unet_state_dict.pop( f"output_blocks.{i}.1.weight" ) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.bias"] = unet_state_dict.pop( f"output_blocks.{i}.1.bias" ) # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] elif f"output_blocks.{i}.2.weight" in unet_state_dict: # text_unet uses linear layers in place of upsamplers shape = unet_state_dict[f"output_blocks.{i}.2.weight"].shape if shape[0] != shape[1]: continue new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.weight"] = unet_state_dict.pop( f"output_blocks.{i}.2.weight" ) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.bias"] = unet_state_dict.pop( f"output_blocks.{i}.2.bias" ) if len(attentions): paths = renew_attention_paths(attentions) meta_path = { "old": f"output_blocks.{i}.1", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) else: resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) for path in resnet_0_paths: old_path = ".".join(["output_blocks", str(i), path["old"]]) new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) new_checkpoint[new_path] = unet_state_dict[old_path] return new_checkpoint def convert_vd_vae_checkpoint(checkpoint, config): # extract state dict for VAE vae_state_dict = {} keys = list(checkpoint.keys()) for key in keys: vae_state_dict[key] = checkpoint.get(key) new_checkpoint = {} new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"] new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"] new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"] new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"] new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"] new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"] new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"] new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"] new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"] new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"] new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"] new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"] new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"] new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"] new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"] new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer}) down_blocks = { layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) } # Retrieves the keys for the decoder up blocks only num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer}) up_blocks = { layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) } for i in range(num_down_blocks): resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.weight" ) new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.bias" ) paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key ] if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.weight" ] new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.bias" ] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) return new_checkpoint if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--unet_checkpoint_path", default=None, type=str, required=False, help="Path to the checkpoint to convert." ) parser.add_argument( "--vae_checkpoint_path", default=None, type=str, required=False, help="Path to the checkpoint to convert." ) parser.add_argument( "--optimus_checkpoint_path", default=None, type=str, required=False, help="Path to the checkpoint to convert." ) parser.add_argument( "--scheduler_type", default="pndm", type=str, help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']", ) parser.add_argument( "--extract_ema", action="store_true", help=( "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." ), ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() scheduler_config = SCHEDULER_CONFIG num_train_timesteps = scheduler_config.timesteps beta_start = scheduler_config.beta_linear_start beta_end = scheduler_config.beta_linear_end if args.scheduler_type == "pndm": scheduler = PNDMScheduler( beta_end=beta_end, beta_schedule="scaled_linear", beta_start=beta_start, num_train_timesteps=num_train_timesteps, skip_prk_steps=True, steps_offset=1, ) elif args.scheduler_type == "lms": scheduler = LMSDiscreteScheduler(beta_start=beta_start, beta_end=beta_end, beta_schedule="scaled_linear") elif args.scheduler_type == "euler": scheduler = EulerDiscreteScheduler(beta_start=beta_start, beta_end=beta_end, beta_schedule="scaled_linear") elif args.scheduler_type == "euler-ancestral": scheduler = EulerAncestralDiscreteScheduler( beta_start=beta_start, beta_end=beta_end, beta_schedule="scaled_linear" ) elif args.scheduler_type == "dpm": scheduler = DPMSolverMultistepScheduler( beta_start=beta_start, beta_end=beta_end, beta_schedule="scaled_linear" ) elif args.scheduler_type == "ddim": scheduler = DDIMScheduler( beta_start=beta_start, beta_end=beta_end, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1, ) else: raise ValueError(f"Scheduler of type {args.scheduler_type} doesn't exist!") # Convert the UNet2DConditionModel models. if args.unet_checkpoint_path is not None: # image UNet image_unet_config = create_image_unet_diffusers_config(IMAGE_UNET_CONFIG) checkpoint = torch.load(args.unet_checkpoint_path) converted_image_unet_checkpoint = convert_vd_unet_checkpoint( checkpoint, image_unet_config, unet_key="model.diffusion_model.unet_image.", extract_ema=args.extract_ema ) image_unet = UNet2DConditionModel(**image_unet_config) image_unet.load_state_dict(converted_image_unet_checkpoint) # text UNet text_unet_config = create_text_unet_diffusers_config(TEXT_UNET_CONFIG) converted_text_unet_checkpoint = convert_vd_unet_checkpoint( checkpoint, text_unet_config, unet_key="model.diffusion_model.unet_text.", extract_ema=args.extract_ema ) text_unet = UNetFlatConditionModel(**text_unet_config) text_unet.load_state_dict(converted_text_unet_checkpoint) # Convert the VAE model. if args.vae_checkpoint_path is not None: vae_config = create_vae_diffusers_config(AUTOENCODER_CONFIG) checkpoint = torch.load(args.vae_checkpoint_path) converted_vae_checkpoint = convert_vd_vae_checkpoint(checkpoint, vae_config) vae = AutoencoderKL(**vae_config) vae.load_state_dict(converted_vae_checkpoint) tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") image_feature_extractor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14") text_encoder = CLIPTextModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") pipe = VersatileDiffusionPipeline( scheduler=scheduler, tokenizer=tokenizer, image_feature_extractor=image_feature_extractor, text_encoder=text_encoder, image_encoder=image_encoder, image_unet=image_unet, text_unet=text_unet, vae=vae, ) pipe.save_pretrained(args.dump_path)
diffusers/scripts/convert_versatile_diffusion_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_versatile_diffusion_to_diffusers.py", "repo_id": "diffusers", "token_count": 14926 }
103
from .value_guided_sampling import ValueGuidedRLPipeline
diffusers/src/diffusers/experimental/rl/__init__.py/0
{ "file_path": "diffusers/src/diffusers/experimental/rl/__init__.py", "repo_id": "diffusers", "token_count": 17 }
104
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..utils import ( DIFFUSERS_SLOW_IMPORT, _LazyModule, is_flax_available, is_torch_available, ) _import_structure = {} if is_torch_available(): _import_structure["adapter"] = ["MultiAdapter", "T2IAdapter"] _import_structure["autoencoders.autoencoder_asym_kl"] = ["AsymmetricAutoencoderKL"] _import_structure["autoencoders.autoencoder_kl"] = ["AutoencoderKL"] _import_structure["autoencoders.autoencoder_kl_temporal_decoder"] = ["AutoencoderKLTemporalDecoder"] _import_structure["autoencoders.autoencoder_tiny"] = ["AutoencoderTiny"] _import_structure["autoencoders.consistency_decoder_vae"] = ["ConsistencyDecoderVAE"] _import_structure["controlnet"] = ["ControlNetModel"] _import_structure["dual_transformer_2d"] = ["DualTransformer2DModel"] _import_structure["embeddings"] = ["ImageProjection"] _import_structure["modeling_utils"] = ["ModelMixin"] _import_structure["transformers.prior_transformer"] = ["PriorTransformer"] _import_structure["transformers.t5_film_transformer"] = ["T5FilmDecoder"] _import_structure["transformers.transformer_2d"] = ["Transformer2DModel"] _import_structure["transformers.transformer_temporal"] = ["TransformerTemporalModel"] _import_structure["unets.unet_1d"] = ["UNet1DModel"] _import_structure["unets.unet_2d"] = ["UNet2DModel"] _import_structure["unets.unet_2d_condition"] = ["UNet2DConditionModel"] _import_structure["unets.unet_3d_condition"] = ["UNet3DConditionModel"] _import_structure["unets.unet_i2vgen_xl"] = ["I2VGenXLUNet"] _import_structure["unets.unet_kandinsky3"] = ["Kandinsky3UNet"] _import_structure["unets.unet_motion_model"] = ["MotionAdapter", "UNetMotionModel"] _import_structure["unets.unet_spatio_temporal_condition"] = ["UNetSpatioTemporalConditionModel"] _import_structure["unets.uvit_2d"] = ["UVit2DModel"] _import_structure["vq_model"] = ["VQModel"] if is_flax_available(): _import_structure["controlnet_flax"] = ["FlaxControlNetModel"] _import_structure["unets.unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"] _import_structure["vae_flax"] = ["FlaxAutoencoderKL"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if is_torch_available(): from .adapter import MultiAdapter, T2IAdapter from .autoencoders import ( AsymmetricAutoencoderKL, AutoencoderKL, AutoencoderKLTemporalDecoder, AutoencoderTiny, ConsistencyDecoderVAE, ) from .controlnet import ControlNetModel from .embeddings import ImageProjection from .modeling_utils import ModelMixin from .transformers import ( DualTransformer2DModel, PriorTransformer, T5FilmDecoder, Transformer2DModel, TransformerTemporalModel, ) from .unets import ( I2VGenXLUNet, Kandinsky3UNet, MotionAdapter, UNet1DModel, UNet2DConditionModel, UNet2DModel, UNet3DConditionModel, UNetMotionModel, UNetSpatioTemporalConditionModel, UVit2DModel, ) from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unets import FlaxUNet2DConditionModel from .vae_flax import FlaxAutoencoderKL else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diffusers/src/diffusers/models/__init__.py/0
{ "file_path": "diffusers/src/diffusers/models/__init__.py", "repo_id": "diffusers", "token_count": 1724 }
105
from ...utils import is_torch_available if is_torch_available(): from .dual_transformer_2d import DualTransformer2DModel from .prior_transformer import PriorTransformer from .t5_film_transformer import T5FilmDecoder from .transformer_2d import Transformer2DModel from .transformer_temporal import TransformerTemporalModel
diffusers/src/diffusers/models/transformers/__init__.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/__init__.py", "repo_id": "diffusers", "token_count": 110 }
106
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import flax.linen as nn import jax.numpy as jnp from ..attention_flax import FlaxTransformer2DModel from ..resnet_flax import FlaxDownsample2D, FlaxResnetBlock2D, FlaxUpsample2D class FlaxCrossAttnDownBlock2D(nn.Module): r""" Cross Attention 2D Downsizing block - original architecture from Unet transformers: https://arxiv.org/abs/2103.06104 Parameters: in_channels (:obj:`int`): Input channels out_channels (:obj:`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of attention blocks layers num_attention_heads (:obj:`int`, *optional*, defaults to 1): Number of attention heads of each spatial transformer block add_downsample (:obj:`bool`, *optional*, defaults to `True`): Whether to add downsampling layer before each final output use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): enable memory efficient attention https://arxiv.org/abs/2112.05682 split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int dropout: float = 0.0 num_layers: int = 1 num_attention_heads: int = 1 add_downsample: bool = True use_linear_projection: bool = False only_cross_attention: bool = False use_memory_efficient_attention: bool = False split_head_dim: bool = False dtype: jnp.dtype = jnp.float32 transformer_layers_per_block: int = 1 def setup(self): resnets = [] attentions = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels res_block = FlaxResnetBlock2D( in_channels=in_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, ) resnets.append(res_block) attn_block = FlaxTransformer2DModel( in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=self.transformer_layers_per_block, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, split_head_dim=self.split_head_dim, dtype=self.dtype, ) attentions.append(attn_block) self.resnets = resnets self.attentions = attentions if self.add_downsample: self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True): output_states = () for resnet, attn in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb, deterministic=deterministic) hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic) output_states += (hidden_states,) if self.add_downsample: hidden_states = self.downsamplers_0(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class FlaxDownBlock2D(nn.Module): r""" Flax 2D downsizing block Parameters: in_channels (:obj:`int`): Input channels out_channels (:obj:`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of attention blocks layers add_downsample (:obj:`bool`, *optional*, defaults to `True`): Whether to add downsampling layer before each final output dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int dropout: float = 0.0 num_layers: int = 1 add_downsample: bool = True dtype: jnp.dtype = jnp.float32 def setup(self): resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels res_block = FlaxResnetBlock2D( in_channels=in_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets if self.add_downsample: self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, temb, deterministic=True): output_states = () for resnet in self.resnets: hidden_states = resnet(hidden_states, temb, deterministic=deterministic) output_states += (hidden_states,) if self.add_downsample: hidden_states = self.downsamplers_0(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class FlaxCrossAttnUpBlock2D(nn.Module): r""" Cross Attention 2D Upsampling block - original architecture from Unet transformers: https://arxiv.org/abs/2103.06104 Parameters: in_channels (:obj:`int`): Input channels out_channels (:obj:`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of attention blocks layers num_attention_heads (:obj:`int`, *optional*, defaults to 1): Number of attention heads of each spatial transformer block add_upsample (:obj:`bool`, *optional*, defaults to `True`): Whether to add upsampling layer before each final output use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): enable memory efficient attention https://arxiv.org/abs/2112.05682 split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int prev_output_channel: int dropout: float = 0.0 num_layers: int = 1 num_attention_heads: int = 1 add_upsample: bool = True use_linear_projection: bool = False only_cross_attention: bool = False use_memory_efficient_attention: bool = False split_head_dim: bool = False dtype: jnp.dtype = jnp.float32 transformer_layers_per_block: int = 1 def setup(self): resnets = [] attentions = [] for i in range(self.num_layers): res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels res_block = FlaxResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, ) resnets.append(res_block) attn_block = FlaxTransformer2DModel( in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=self.transformer_layers_per_block, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, split_head_dim=self.split_head_dim, dtype=self.dtype, ) attentions.append(attn_block) self.resnets = resnets self.attentions = attentions if self.add_upsample: self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, res_hidden_states_tuple, temb, encoder_hidden_states, deterministic=True): for resnet, attn in zip(self.resnets, self.attentions): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1) hidden_states = resnet(hidden_states, temb, deterministic=deterministic) hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic) if self.add_upsample: hidden_states = self.upsamplers_0(hidden_states) return hidden_states class FlaxUpBlock2D(nn.Module): r""" Flax 2D upsampling block Parameters: in_channels (:obj:`int`): Input channels out_channels (:obj:`int`): Output channels prev_output_channel (:obj:`int`): Output channels from the previous block dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of attention blocks layers add_downsample (:obj:`bool`, *optional*, defaults to `True`): Whether to add downsampling layer before each final output dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int prev_output_channel: int dropout: float = 0.0 num_layers: int = 1 add_upsample: bool = True dtype: jnp.dtype = jnp.float32 def setup(self): resnets = [] for i in range(self.num_layers): res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels res_block = FlaxResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets if self.add_upsample: self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, res_hidden_states_tuple, temb, deterministic=True): for resnet in self.resnets: # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1) hidden_states = resnet(hidden_states, temb, deterministic=deterministic) if self.add_upsample: hidden_states = self.upsamplers_0(hidden_states) return hidden_states class FlaxUNetMidBlock2DCrossAttn(nn.Module): r""" Cross Attention 2D Mid-level block - original architecture from Unet transformers: https://arxiv.org/abs/2103.06104 Parameters: in_channels (:obj:`int`): Input channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of attention blocks layers num_attention_heads (:obj:`int`, *optional*, defaults to 1): Number of attention heads of each spatial transformer block use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): enable memory efficient attention https://arxiv.org/abs/2112.05682 split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int dropout: float = 0.0 num_layers: int = 1 num_attention_heads: int = 1 use_linear_projection: bool = False use_memory_efficient_attention: bool = False split_head_dim: bool = False dtype: jnp.dtype = jnp.float32 transformer_layers_per_block: int = 1 def setup(self): # there is always at least one resnet resnets = [ FlaxResnetBlock2D( in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, ) ] attentions = [] for _ in range(self.num_layers): attn_block = FlaxTransformer2DModel( in_channels=self.in_channels, n_heads=self.num_attention_heads, d_head=self.in_channels // self.num_attention_heads, depth=self.transformer_layers_per_block, use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, split_head_dim=self.split_head_dim, dtype=self.dtype, ) attentions.append(attn_block) res_block = FlaxResnetBlock2D( in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets self.attentions = attentions def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True): hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic) hidden_states = resnet(hidden_states, temb, deterministic=deterministic) return hidden_states
diffusers/src/diffusers/models/unets/unet_2d_blocks_flax.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_2d_blocks_flax.py", "repo_id": "diffusers", "token_count": 6962 }
107
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( AmusedImg2ImgPipeline, AmusedInpaintPipeline, AmusedPipeline, ) _dummy_objects.update( { "AmusedPipeline": AmusedPipeline, "AmusedImg2ImgPipeline": AmusedImg2ImgPipeline, "AmusedInpaintPipeline": AmusedInpaintPipeline, } ) else: _import_structure["pipeline_amused"] = ["AmusedPipeline"] _import_structure["pipeline_amused_img2img"] = ["AmusedImg2ImgPipeline"] _import_structure["pipeline_amused_inpaint"] = ["AmusedInpaintPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( AmusedPipeline, ) else: from .pipeline_amused import AmusedPipeline from .pipeline_amused_img2img import AmusedImg2ImgPipeline from .pipeline_amused_inpaint import AmusedInpaintPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/amused/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/amused/__init__.py", "repo_id": "diffusers", "token_count": 796 }
108
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from transformers import BertTokenizer from transformers.activations import QuickGELUActivation as QuickGELU from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, ) from transformers.models.blip_2.configuration_blip_2 import Blip2Config, Blip2VisionConfig from transformers.models.blip_2.modeling_blip_2 import ( Blip2Encoder, Blip2PreTrainedModel, Blip2QFormerAttention, Blip2QFormerIntermediate, Blip2QFormerOutput, ) from transformers.pytorch_utils import apply_chunking_to_forward from transformers.utils import ( logging, replace_return_docstrings, ) logger = logging.get_logger(__name__) # There is an implementation of Blip2 in `transformers` : https://github.com/huggingface/transformers/blob/main/src/transformers/models/blip_2/modeling_blip_2.py. # But it doesn't support getting multimodal embeddings. So, this module can be # replaced with a future `transformers` version supports that. class Blip2TextEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.config = config def forward( self, input_ids=None, position_ids=None, query_embeds=None, past_key_values_length=0, ): if input_ids is not None: seq_length = input_ids.size()[1] else: seq_length = 0 if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length].clone() if input_ids is not None: embeddings = self.word_embeddings(input_ids) if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings if query_embeds is not None: batch_size = embeddings.shape[0] # repeat the query embeddings for batch size query_embeds = query_embeds.repeat(batch_size, 1, 1) embeddings = torch.cat((query_embeds, embeddings), dim=1) else: embeddings = query_embeds embeddings = embeddings.to(query_embeds.dtype) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Copy-pasted from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->Blip2 class Blip2VisionEmbeddings(nn.Module): def __init__(self, config: Blip2VisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype) return embeddings # The Qformer encoder, which takes the visual embeddings, and the text input, to get multimodal embeddings class Blip2QFormerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList( [Blip2QFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, query_length=0, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions else None next_decoder_cache = () if use_cache else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions, query_length) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, query_length, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if layer_module.has_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # The layers making up the Qformer encoder class Blip2QFormerLayer(nn.Module): def __init__(self, config, layer_idx): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = Blip2QFormerAttention(config) self.layer_idx = layer_idx if layer_idx % config.cross_attention_frequency == 0: self.crossattention = Blip2QFormerAttention(config, is_cross_attention=True) self.has_cross_attention = True else: self.has_cross_attention = False self.intermediate = Blip2QFormerIntermediate(config) self.intermediate_query = Blip2QFormerIntermediate(config) self.output_query = Blip2QFormerOutput(config) self.output = Blip2QFormerOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, query_length=0, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] if query_length > 0: query_attention_output = attention_output[:, :query_length, :] if self.has_cross_attention: if encoder_hidden_states is None: raise ValueError("encoder_hidden_states must be given for cross-attention layers") cross_attention_outputs = self.crossattention( query_attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions, ) query_attention_output = cross_attention_outputs[0] # add cross attentions if we output attention weights outputs = outputs + cross_attention_outputs[1:-1] layer_output = apply_chunking_to_forward( self.feed_forward_chunk_query, self.chunk_size_feed_forward, self.seq_len_dim, query_attention_output, ) if attention_output.shape[1] > query_length: layer_output_text = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[:, query_length:, :], ) layer_output = torch.cat([layer_output, layer_output_text], dim=1) else: layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, ) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def feed_forward_chunk_query(self, attention_output): intermediate_output = self.intermediate_query(attention_output) layer_output = self.output_query(intermediate_output, attention_output) return layer_output # ProjLayer used to project the multimodal Blip2 embeddings to be used in the text encoder class ProjLayer(nn.Module): def __init__(self, in_dim, out_dim, hidden_dim, drop_p=0.1, eps=1e-12): super().__init__() # Dense1 -> Act -> Dense2 -> Drop -> Res -> Norm self.dense1 = nn.Linear(in_dim, hidden_dim) self.act_fn = QuickGELU() self.dense2 = nn.Linear(hidden_dim, out_dim) self.dropout = nn.Dropout(drop_p) self.LayerNorm = nn.LayerNorm(out_dim, eps=eps) def forward(self, x): x_in = x x = self.LayerNorm(x) x = self.dropout(self.dense2(self.act_fn(self.dense1(x)))) + x_in return x # Copy-pasted from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->Blip2, BLIP->BLIP_2 class Blip2VisionModel(Blip2PreTrainedModel): main_input_name = "pixel_values" config_class = Blip2VisionConfig def __init__(self, config: Blip2VisionConfig): super().__init__(config) self.config = config embed_dim = config.hidden_size self.embeddings = Blip2VisionEmbeddings(config) self.pre_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = Blip2Encoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.post_init() @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=Blip2VisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layernorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): return self.embeddings # Qformer model, used to get multimodal embeddings from the text and image inputs class Blip2QFormerModel(Blip2PreTrainedModel): """ Querying Transformer (Q-Former), used in BLIP-2. """ def __init__(self, config: Blip2Config): super().__init__(config) self.config = config self.embeddings = Blip2TextEmbeddings(config.qformer_config) self.visual_encoder = Blip2VisionModel(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) if not hasattr(config, "tokenizer") or config.tokenizer is None: self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", truncation_side="right") else: self.tokenizer = BertTokenizer.from_pretrained(config.tokenizer, truncation_side="right") self.tokenizer.add_special_tokens({"bos_token": "[DEC]"}) self.proj_layer = ProjLayer( in_dim=config.qformer_config.hidden_size, out_dim=config.qformer_config.hidden_size, hidden_dim=config.qformer_config.hidden_size * 4, drop_p=0.1, eps=1e-12, ) self.encoder = Blip2QFormerEncoder(config.qformer_config) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask( self, attention_mask: torch.Tensor, input_shape: Tuple[int], device: torch.device, has_query: bool = False, ) -> torch.Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`Tuple[int]`): The shape of the input to the model. device (`torch.device`): The device of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( input_shape, attention_mask.shape ) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward( self, text_input=None, image_input=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of: shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, `optional`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ text = self.tokenizer(text_input, return_tensors="pt", padding=True) text = text.to(self.device) input_ids = text.input_ids batch_size = input_ids.shape[0] query_atts = torch.ones((batch_size, self.query_tokens.size()[1]), dtype=torch.long).to(self.device) attention_mask = torch.cat([query_atts, text.attention_mask], dim=1) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # past_key_values_length past_key_values_length = ( past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0 ) query_length = self.query_tokens.shape[1] embedding_output = self.embeddings( input_ids=input_ids, query_embeds=self.query_tokens, past_key_values_length=past_key_values_length, ) # embedding_output = self.layernorm(query_embeds) # embedding_output = self.dropout(embedding_output) input_shape = embedding_output.size()[:-1] batch_size, seq_length = input_shape device = embedding_output.device image_embeds_frozen = self.visual_encoder(image_input).last_hidden_state # image_embeds_frozen = torch.ones_like(image_embeds_frozen) encoder_hidden_states = image_embeds_frozen if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, list): encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() else: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.qformer_config.num_hidden_layers) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, query_length=query_length, ) sequence_output = encoder_outputs[0] pooled_output = sequence_output[:, 0, :] if not return_dict: return self.proj_layer(sequence_output[:, :query_length, :]) return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, )
diffusers/src/diffusers/pipelines/blip_diffusion/modeling_blip2.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/blip_diffusion/modeling_blip2.py", "repo_id": "diffusers", "token_count": 12076 }
109
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple, Union import torch from ...utils import logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name class DanceDiffusionPipeline(DiffusionPipeline): r""" Pipeline for audio generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet1DModel`]): A `UNet1DModel` to denoise the encoded audio. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of [`IPNDMScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, num_inference_steps: int = 100, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, audio_length_in_s: Optional[float] = None, return_dict: bool = True, ) -> Union[AudioPipelineOutput, Tuple]: r""" The call function to the pipeline for generation. Args: batch_size (`int`, *optional*, defaults to 1): The number of audio samples to generate. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at the expense of slower inference. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`): The length of the generated audio sample in seconds. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. Example: ```py from diffusers import DiffusionPipeline from scipy.io.wavfile import write model_id = "harmonai/maestro-150k" pipe = DiffusionPipeline.from_pretrained(model_id) pipe = pipe.to("cuda") audios = pipe(audio_length_in_s=4.0).audios # To save locally for i, audio in enumerate(audios): write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose()) # To dislay in google colab import IPython.display as ipd for audio in audios: display(ipd.Audio(audio, rate=pipe.unet.sample_rate)) ``` Returns: [`~pipelines.AudioPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated audio. """ if audio_length_in_s is None: audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate sample_size = audio_length_in_s * self.unet.config.sample_rate down_scale_factor = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError( f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" f" {3 * down_scale_factor / self.unet.config.sample_rate}." ) original_sample_size = int(sample_size) if sample_size % down_scale_factor != 0: sample_size = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" " process." ) sample_size = int(sample_size) dtype = next(self.unet.parameters()).dtype shape = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) audio = randn_tensor(shape, generator=generator, device=self._execution_device, dtype=dtype) # set step values self.scheduler.set_timesteps(num_inference_steps, device=audio.device) self.scheduler.timesteps = self.scheduler.timesteps.to(dtype) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output model_output = self.unet(audio, t).sample # 2. compute previous audio sample: x_t -> t_t-1 audio = self.scheduler.step(model_output, t, audio).prev_sample audio = audio.clamp(-1, 1).float().cpu().numpy() audio = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=audio)
diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py", "repo_id": "diffusers", "token_count": 2623 }
110
# Deprecated Pipelines This folder contains pipelines that have very low usage as measured by model downloads, issues and PRs. While you can still use the pipelines just as before, we will stop testing the pipelines and will not accept any changes to existing files.
diffusers/src/diffusers/pipelines/deprecated/README.md/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/README.md", "repo_id": "diffusers", "token_count": 54 }
111
from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_score_sde_ve": ["ScoreSdeVePipeline"]} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_score_sde_ve import ScoreSdeVePipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, )
diffusers/src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/score_sde_ve/__init__.py", "repo_id": "diffusers", "token_count": 193 }
112
from typing import TYPE_CHECKING from ....utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, is_transformers_version, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) _dummy_objects.update( { "VersatileDiffusionDualGuidedPipeline": VersatileDiffusionDualGuidedPipeline, "VersatileDiffusionImageVariationPipeline": VersatileDiffusionImageVariationPipeline, "VersatileDiffusionPipeline": VersatileDiffusionPipeline, "VersatileDiffusionTextToImagePipeline": VersatileDiffusionTextToImagePipeline, } ) else: _import_structure["modeling_text_unet"] = ["UNetFlatConditionModel"] _import_structure["pipeline_versatile_diffusion"] = ["VersatileDiffusionPipeline"] _import_structure["pipeline_versatile_diffusion_dual_guided"] = ["VersatileDiffusionDualGuidedPipeline"] _import_structure["pipeline_versatile_diffusion_image_variation"] = ["VersatileDiffusionImageVariationPipeline"] _import_structure["pipeline_versatile_diffusion_text_to_image"] = ["VersatileDiffusionTextToImagePipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ....utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/versatile_diffusion/__init__.py", "repo_id": "diffusers", "token_count": 1112 }
113
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from copy import deepcopy from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from packaging import version from PIL import Image from transformers import ( XLMRobertaTokenizer, ) from ... import __version__ from ...models import UNet2DConditionModel, VQModel from ...schedulers import DDIMScheduler from ...utils import ( logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .text_encoder import MultilingualCLIP logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusers import KandinskyInpaintPipeline, KandinskyPriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> import numpy as np >>> pipe_prior = KandinskyPriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior.to("cuda") >>> prompt = "a hat" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyInpaintPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16 ... ) >>> pipe.to("cuda") >>> init_image = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/cat.png" ... ) >>> mask = np.zeros((768, 768), dtype=np.float32) >>> mask[:250, 250:-250] = 1 >>> out = pipe( ... prompt, ... image=init_image, ... mask_image=mask, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=50, ... ) >>> image = out.images[0] >>> image.save("cat_with_hat.png") ``` """ def get_new_h_w(h, w, scale_factor=8): new_h = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 new_w = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor def prepare_mask(masks): prepared_masks = [] for mask in masks: old_mask = deepcopy(mask) for i in range(mask.shape[1]): for j in range(mask.shape[2]): if old_mask[0][i][j] == 1: continue if i != 0: mask[:, i - 1, j] = 0 if j != 0: mask[:, i, j - 1] = 0 if i != 0 and j != 0: mask[:, i - 1, j - 1] = 0 if i != mask.shape[1] - 1: mask[:, i + 1, j] = 0 if j != mask.shape[2] - 1: mask[:, i, j + 1] = 0 if i != mask.shape[1] - 1 and j != mask.shape[2] - 1: mask[:, i + 1, j + 1] = 0 prepared_masks.append(mask) return torch.stack(prepared_masks, dim=0) def prepare_mask_and_masked_image(image, mask, height, width): r""" Prepares a pair (mask, image) to be consumed by the Kandinsky inpaint pipeline. This means that those inputs will be converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the ``image`` and ``1`` for the ``mask``. The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be binarized (``mask > 0.5``) and cast to ``torch.float32`` too. Args: image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. mask (_type_): The mask to apply to the image, i.e. regions to inpaint. It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. Raises: ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not (ot the other way around). Returns: tuple[torch.Tensor]: The pair (mask, image) as ``torch.Tensor`` with 4 dimensions: ``batch x channels x height x width``. """ if image is None: raise ValueError("`image` input cannot be undefined.") if mask is None: raise ValueError("`mask_image` input cannot be undefined.") if isinstance(image, torch.Tensor): if not isinstance(mask, torch.Tensor): raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") # Batch single image if image.ndim == 3: assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" image = image.unsqueeze(0) # Batch and add channel dim for single mask if mask.ndim == 2: mask = mask.unsqueeze(0).unsqueeze(0) # Batch single mask or add channel dim if mask.ndim == 3: # Single batched mask, no channel dim or single mask not batched but channel dim if mask.shape[0] == 1: mask = mask.unsqueeze(0) # Batched masks no channel dim else: mask = mask.unsqueeze(1) assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" # Check image is in [-1, 1] if image.min() < -1 or image.max() > 1: raise ValueError("Image should be in [-1, 1] range") # Check mask is in [0, 1] if mask.min() < 0 or mask.max() > 1: raise ValueError("Mask should be in [0, 1] range") # Binarize mask mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 # Image as float32 image = image.to(dtype=torch.float32) elif isinstance(mask, torch.Tensor): raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): # resize all images w.r.t passed height an width image = [i.resize((width, height), resample=Image.BICUBIC, reducing_gap=1) for i in image] image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 # preprocess mask if isinstance(mask, (PIL.Image.Image, np.ndarray)): mask = [mask] if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) mask = mask.astype(np.float32) / 255.0 elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): mask = np.concatenate([m[None, None, :] for m in mask], axis=0) mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) mask = 1 - mask return mask, image class KandinskyInpaintPipeline(DiffusionPipeline): """ Pipeline for text-guided image inpainting using Kandinsky2.1 This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: text_encoder ([`MultilingualCLIP`]): Frozen text-encoder. tokenizer ([`XLMRobertaTokenizer`]): Tokenizer of class scheduler ([`DDIMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ image encoder and decoder """ model_cpu_offload_seq = "text_encoder->unet->movq" def __init__( self, text_encoder: MultilingualCLIP, movq: VQModel, tokenizer: XLMRobertaTokenizer, unet: UNet2DConditionModel, scheduler: DDIMScheduler, ): super().__init__() self.register_modules( text_encoder=text_encoder, movq=movq, tokenizer=tokenizer, unet=unet, scheduler=scheduler, ) self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) self._warn_has_been_called = False # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, ): batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=77, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids.to(device) text_mask = text_inputs.attention_mask.to(device) prompt_embeds, text_encoder_hidden_states = self.text_encoder( input_ids=text_input_ids, attention_mask=text_mask ) prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=77, truncation=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) uncond_text_input_ids = uncond_input.input_ids.to(device) uncond_text_mask = uncond_input.attention_mask.to(device) negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder( input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_encoder_hidden_states, text_mask @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], image: Union[torch.FloatTensor, PIL.Image.Image], mask_image: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray], image_embeds: torch.FloatTensor, negative_image_embeds: torch.FloatTensor, negative_prompt: Optional[Union[str, List[str]]] = None, height: int = 512, width: int = 512, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`torch.FloatTensor`, `PIL.Image.Image` or `np.ndarray`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. mask_image (`PIL.Image.Image`,`torch.FloatTensor` or `np.ndarray`): `Image`, or a tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while black pixels will be preserved. You can pass a pytorch tensor as mask only if the image you passed is a pytorch tensor, and it should contain one color channel (L) instead of 3, so the expected shape would be either `(B, 1, H, W,)`, `(B, H, W)`, `(1, H, W)` or `(H, W)` If image is an PIL image or numpy array, mask should also be a either PIL image or numpy array. If it is a PIL image, it will be converted to a single channel (luminance) before use. If it is a nummpy array, the expected shape is `(H, W)`. image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): The clip image embeddings for text prompt, that will be used to condition the image generation. negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): The clip image embeddings for negative text prompt, will be used to condition the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse( "0.23.0.dev0" ): logger.warn( "Please note that the expected format of `mask_image` has recently been changed. " "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. " "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. " "This way, Kandinsky's masking behavior is aligned with Stable Diffusion. " "THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. " "This warning will be surpressed after the first inference call and will be removed in diffusers>0.23.0" ) self._warn_has_been_called = True # Define call parameters if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = guidance_scale > 1.0 prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if isinstance(image_embeds, list): image_embeds = torch.cat(image_embeds, dim=0) if isinstance(negative_image_embeds, list): negative_image_embeds = torch.cat(negative_image_embeds, dim=0) if do_classifier_free_guidance: image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( dtype=prompt_embeds.dtype, device=device ) # preprocess image and mask mask_image, image = prepare_mask_and_masked_image(image, mask_image, height, width) image = image.to(dtype=prompt_embeds.dtype, device=device) image = self.movq.encode(image)["latents"] mask_image = mask_image.to(dtype=prompt_embeds.dtype, device=device) image_shape = tuple(image.shape[-2:]) mask_image = F.interpolate( mask_image, image_shape, mode="nearest", ) mask_image = prepare_mask(mask_image) masked_image = image * mask_image mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) masked_image = masked_image.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: mask_image = mask_image.repeat(2, 1, 1, 1) masked_image = masked_image.repeat(2, 1, 1, 1) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps_tensor = self.scheduler.timesteps num_channels_latents = self.movq.config.latent_channels # get h, w for latents sample_height, sample_width = get_new_h_w(height, width, self.movq_scale_factor) # create initial latent latents = self.prepare_latents( (batch_size, num_channels_latents, sample_height, sample_width), text_encoder_hidden_states.dtype, device, generator, latents, self.scheduler, ) # Check that sizes of mask, masked image and latents match with expected num_channels_mask = mask_image.shape[1] num_channels_masked_image = masked_image.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" " `pipeline.unet` or your `mask_image` or `image` input." ) for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = torch.cat([latent_model_input, masked_image, mask_image], dim=1) added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} noise_pred = self.unet( sample=latent_model_input, timestep=t, encoder_hidden_states=text_encoder_hidden_states, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] if do_classifier_free_guidance: noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) _, variance_pred_text = variance_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) if not ( hasattr(self.scheduler.config, "variance_type") and self.scheduler.config.variance_type in ["learned", "learned_range"] ): noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step( noise_pred, t, latents, generator=generator, ).prev_sample if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # post-processing image = self.movq.decode(latents, force_not_quantize=True)["sample"] self.maybe_free_model_hooks() if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") if output_type in ["np", "pil"]: image = image * 0.5 + 0.5 image = image.clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).float().numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py", "repo_id": "diffusers", "token_count": 12787 }
114
# Copyright 2023 MultiDiffusion Authors and The HuggingFace Team. All rights reserved." # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import DDIMScheduler from ...utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from ..stable_diffusion import StableDiffusionPipelineOutput from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableDiffusionPanoramaPipeline, DDIMScheduler >>> model_ckpt = "stabilityai/stable-diffusion-2-base" >>> scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") >>> pipe = StableDiffusionPanoramaPipeline.from_pretrained( ... model_ckpt, scheduler=scheduler, torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> prompt = "a photo of the dolomites" >>> image = pipe(prompt).images[0] ``` """ class StableDiffusionPanoramaPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin): r""" Pipeline for text-to-image generation using MultiDiffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] _exclude_from_cpu_offload = ["safety_checker"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: DDIMScheduler, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: Optional[CLIPVisionModelWithProjection] = None, requires_safety_checker: bool = True, ): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds(self, ip_adapter_image, device, num_images_per_prompt): if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." ) image_embeds = [] for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers ): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) single_image_embeds, single_negative_image_embeds = self.encode_image( single_ip_adapter_image, device, 1, output_hidden_state ) single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) single_negative_image_embeds = torch.stack([single_negative_image_embeds] * num_images_per_prompt, dim=0) if self.do_classifier_free_guidance: single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) single_image_embeds = single_image_embeds.to(device) image_embeds.append(single_image_embeds) return image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image def decode_latents_with_padding(self, latents, padding=8): # Add padding to latents for circular inference # padding is the number of latents to add on each side # it would slightly increase the memory usage, but remove the boundary artifacts latents = 1 / self.vae.config.scaling_factor * latents latents_left = latents[..., :padding] latents_right = latents[..., -padding:] latents = torch.cat((latents_right, latents, latents_left), axis=-1) image = self.vae.decode(latents, return_dict=False)[0] padding_pix = self.vae_scale_factor * padding image = image[..., padding_pix:-padding_pix] return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def get_views(self, panorama_height, panorama_width, window_size=64, stride=8, circular_padding=False): # Here, we define the mappings F_i (see Eq. 7 in the MultiDiffusion paper https://arxiv.org/abs/2302.08113) # if panorama's height/width < window_size, num_blocks of height/width should return 1 panorama_height /= 8 panorama_width /= 8 num_blocks_height = (panorama_height - window_size) // stride + 1 if panorama_height > window_size else 1 if circular_padding: num_blocks_width = panorama_width // stride if panorama_width > window_size else 1 else: num_blocks_width = (panorama_width - window_size) // stride + 1 if panorama_width > window_size else 1 total_num_blocks = int(num_blocks_height * num_blocks_width) views = [] for i in range(total_num_blocks): h_start = int((i // num_blocks_width) * stride) h_end = h_start + window_size w_start = int((i % num_blocks_width) * stride) w_end = w_start + window_size views.append((h_start, h_end, w_start, w_end)) return views @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = 512, width: Optional[int] = 2048, num_inference_steps: int = 50, guidance_scale: float = 7.5, view_batch_size: int = 1, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, circular_padding: bool = False, clip_skip: Optional[int] = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 2048): The width in pixels of the generated image. The width is kept high because the pipeline is supposed generate panorama-like images. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. view_batch_size (`int`, *optional*, defaults to 1): The batch size to denoise split views. For some GPUs with high performance, higher view batch size can speedup the generation and increase the VRAM usage. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). circular_padding (`bool`, *optional*, defaults to `False`): If set to `True`, circular padding is applied to ensure there are no stitching artifacts. Circular padding allows the model to seamlessly generate a transition from the rightmost part of the image to the leftmost part, maintaining consistency in a 360-degree sense. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 if ip_adapter_image is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, device, batch_size * num_images_per_prompt ) # 3. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Define panorama grid and initialize views for synthesis. # prepare batch grid views = self.get_views(height, width, circular_padding=circular_padding) views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)] views_scheduler_status = [copy.deepcopy(self.scheduler.__dict__)] * len(views_batch) count = torch.zeros_like(latents) value = torch.zeros_like(latents) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Add image embeds for IP-Adapter added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None # 8. Denoising loop # Each denoising step also includes refinement of the latents with respect to the # views. num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): count.zero_() value.zero_() # generate views # Here, we iterate through different spatial crops of the latents and denoise them. These # denoised (latent) crops are then averaged to produce the final latent # for the current timestep via MultiDiffusion. Please see Sec. 4.1 in the # MultiDiffusion paper for more details: https://arxiv.org/abs/2302.08113 # Batch views denoise for j, batch_view in enumerate(views_batch): vb_size = len(batch_view) # get the latents corresponding to the current view coordinates if circular_padding: latents_for_view = [] for h_start, h_end, w_start, w_end in batch_view: if w_end > latents.shape[3]: # Add circular horizontal padding latent_view = torch.cat( ( latents[:, :, h_start:h_end, w_start:], latents[:, :, h_start:h_end, : w_end - latents.shape[3]], ), axis=-1, ) else: latent_view = latents[:, :, h_start:h_end, w_start:w_end] latents_for_view.append(latent_view) latents_for_view = torch.cat(latents_for_view) else: latents_for_view = torch.cat( [ latents[:, :, h_start:h_end, w_start:w_end] for h_start, h_end, w_start, w_end in batch_view ] ) # rematch block's scheduler status self.scheduler.__dict__.update(views_scheduler_status[j]) # expand the latents if we are doing classifier free guidance latent_model_input = ( latents_for_view.repeat_interleave(2, dim=0) if do_classifier_free_guidance else latents_for_view ) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # repeat prompt_embeds for batch prompt_embeds_input = torch.cat([prompt_embeds] * vb_size) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds_input, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2] noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents_denoised_batch = self.scheduler.step( noise_pred, t, latents_for_view, **extra_step_kwargs ).prev_sample # save views scheduler status after sample views_scheduler_status[j] = copy.deepcopy(self.scheduler.__dict__) # extract value from batch for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip( latents_denoised_batch.chunk(vb_size), batch_view ): if circular_padding and w_end > latents.shape[3]: # Case for circular padding value[:, :, h_start:h_end, w_start:] += latents_view_denoised[ :, :, h_start:h_end, : latents.shape[3] - w_start ] value[:, :, h_start:h_end, : w_end - latents.shape[3]] += latents_view_denoised[ :, :, h_start:h_end, latents.shape[3] - w_start : ] count[:, :, h_start:h_end, w_start:] += 1 count[:, :, h_start:h_end, : w_end - latents.shape[3]] += 1 else: value[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised count[:, :, h_start:h_end, w_start:w_end] += 1 # take the MultiDiffusion step. Eq. 5 in MultiDiffusion paper: https://arxiv.org/abs/2302.08113 latents = torch.where(count > 0, value / count, value) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": if circular_padding: image = self.decode_latents_with_padding(latents) else: image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_panorama/pipeline_stable_diffusion_panorama.py", "repo_id": "diffusers", "token_count": 20626 }
115
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from dataclasses import dataclass from typing import Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from ...image_processor import VaeImageProcessor from ...models import AutoencoderKLTemporalDecoder, UNetSpatioTemporalConditionModel from ...schedulers import EulerDiscreteScheduler from ...utils import BaseOutput, logging from ...utils.torch_utils import is_compiled_module, randn_tensor from ..pipeline_utils import DiffusionPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name def _append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] # Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid def tensor2vid(video: torch.Tensor, processor: "VaeImageProcessor", output_type: str = "np"): batch_size, channels, num_frames, height, width = video.shape outputs = [] for batch_idx in range(batch_size): batch_vid = video[batch_idx].permute(1, 0, 2, 3) batch_output = processor.postprocess(batch_vid, output_type) outputs.append(batch_output) if output_type == "np": outputs = np.stack(outputs) elif output_type == "pt": outputs = torch.stack(outputs) elif not output_type == "pil": raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil]") return outputs @dataclass class StableVideoDiffusionPipelineOutput(BaseOutput): r""" Output class for zero-shot text-to-video pipeline. Args: frames (`[List[PIL.Image.Image]`, `np.ndarray`]): List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. """ frames: Union[List[PIL.Image.Image], np.ndarray] class StableVideoDiffusionPipeline(DiffusionPipeline): r""" Pipeline to generate video from an input image using Stable Video Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKLTemporalDecoder`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. image_encoder ([`~transformers.CLIPVisionModelWithProjection`]): Frozen CLIP image-encoder ([laion/CLIP-ViT-H-14-laion2B-s32B-b79K](https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K)). unet ([`UNetSpatioTemporalConditionModel`]): A `UNetSpatioTemporalConditionModel` to denoise the encoded image latents. scheduler ([`EulerDiscreteScheduler`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images. """ model_cpu_offload_seq = "image_encoder->unet->vae" _callback_tensor_inputs = ["latents"] def __init__( self, vae: AutoencoderKLTemporalDecoder, image_encoder: CLIPVisionModelWithProjection, unet: UNetSpatioTemporalConditionModel, scheduler: EulerDiscreteScheduler, feature_extractor: CLIPImageProcessor, ): super().__init__() self.register_modules( vae=vae, image_encoder=image_encoder, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def _encode_image(self, image, device, num_videos_per_prompt, do_classifier_free_guidance): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.image_processor.pil_to_numpy(image) image = self.image_processor.numpy_to_pt(image) # We normalize the image before resizing to match with the original implementation. # Then we unnormalize it after resizing. image = image * 2.0 - 1.0 image = _resize_with_antialiasing(image, (224, 224)) image = (image + 1.0) / 2.0 # Normalize the image with for CLIP input image = self.feature_extractor( images=image, do_normalize=True, do_center_crop=False, do_resize=False, do_rescale=False, return_tensors="pt", ).pixel_values image = image.to(device=device, dtype=dtype) image_embeddings = self.image_encoder(image).image_embeds image_embeddings = image_embeddings.unsqueeze(1) # duplicate image embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_videos_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) if do_classifier_free_guidance: negative_image_embeddings = torch.zeros_like(image_embeddings) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_embeddings = torch.cat([negative_image_embeddings, image_embeddings]) return image_embeddings def _encode_vae_image( self, image: torch.Tensor, device, num_videos_per_prompt, do_classifier_free_guidance, ): image = image.to(device=device) image_latents = self.vae.encode(image).latent_dist.mode() if do_classifier_free_guidance: negative_image_latents = torch.zeros_like(image_latents) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_latents = torch.cat([negative_image_latents, image_latents]) # duplicate image_latents for each generation per prompt, using mps friendly method image_latents = image_latents.repeat(num_videos_per_prompt, 1, 1, 1) return image_latents def _get_add_time_ids( self, fps, motion_bucket_id, noise_aug_strength, dtype, batch_size, num_videos_per_prompt, do_classifier_free_guidance, ): add_time_ids = [fps, motion_bucket_id, noise_aug_strength] passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) add_time_ids = add_time_ids.repeat(batch_size * num_videos_per_prompt, 1) if do_classifier_free_guidance: add_time_ids = torch.cat([add_time_ids, add_time_ids]) return add_time_ids def decode_latents(self, latents, num_frames, decode_chunk_size=14): # [batch, frames, channels, height, width] -> [batch*frames, channels, height, width] latents = latents.flatten(0, 1) latents = 1 / self.vae.config.scaling_factor * latents forward_vae_fn = self.vae._orig_mod.forward if is_compiled_module(self.vae) else self.vae.forward accepts_num_frames = "num_frames" in set(inspect.signature(forward_vae_fn).parameters.keys()) # decode decode_chunk_size frames at a time to avoid OOM frames = [] for i in range(0, latents.shape[0], decode_chunk_size): num_frames_in = latents[i : i + decode_chunk_size].shape[0] decode_kwargs = {} if accepts_num_frames: # we only pass num_frames_in if it's expected decode_kwargs["num_frames"] = num_frames_in frame = self.vae.decode(latents[i : i + decode_chunk_size], **decode_kwargs).sample frames.append(frame) frames = torch.cat(frames, dim=0) # [batch*frames, channels, height, width] -> [batch, channels, frames, height, width] frames = frames.reshape(-1, num_frames, *frames.shape[1:]).permute(0, 2, 1, 3, 4) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 frames = frames.float() return frames def check_inputs(self, image, height, width): if ( not isinstance(image, torch.Tensor) and not isinstance(image, PIL.Image.Image) and not isinstance(image, list) ): raise ValueError( "`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" f" {type(image)}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") def prepare_latents( self, batch_size, num_frames, num_channels_latents, height, width, dtype, device, generator, latents=None, ): shape = ( batch_size, num_frames, num_channels_latents // 2, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @property def guidance_scale(self): return self._guidance_scale # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): if isinstance(self.guidance_scale, (int, float)): return self.guidance_scale return self.guidance_scale.max() > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() def __call__( self, image: Union[PIL.Image.Image, List[PIL.Image.Image], torch.FloatTensor], height: int = 576, width: int = 1024, num_frames: Optional[int] = None, num_inference_steps: int = 25, min_guidance_scale: float = 1.0, max_guidance_scale: float = 3.0, fps: int = 7, motion_bucket_id: int = 127, noise_aug_strength: float = 0.02, decode_chunk_size: Optional[int] = None, num_videos_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], return_dict: bool = True, ): r""" The call function to the pipeline for generation. Args: image (`PIL.Image.Image` or `List[PIL.Image.Image]` or `torch.FloatTensor`): Image or images to guide image generation. If you provide a tensor, it needs to be compatible with [`CLIPImageProcessor`](https://huggingface.co/lambdalabs/sd-image-variations-diffusers/blob/main/feature_extractor/preprocessor_config.json). height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_frames (`int`, *optional*): The number of video frames to generate. Defaults to 14 for `stable-video-diffusion-img2vid` and to 25 for `stable-video-diffusion-img2vid-xt` num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter is modulated by `strength`. min_guidance_scale (`float`, *optional*, defaults to 1.0): The minimum guidance scale. Used for the classifier free guidance with first frame. max_guidance_scale (`float`, *optional*, defaults to 3.0): The maximum guidance scale. Used for the classifier free guidance with last frame. fps (`int`, *optional*, defaults to 7): Frames per second. The rate at which the generated images shall be exported to a video after generation. Note that Stable Diffusion Video's UNet was micro-conditioned on fps-1 during training. motion_bucket_id (`int`, *optional*, defaults to 127): The motion bucket ID. Used as conditioning for the generation. The higher the number the more motion will be in the video. noise_aug_strength (`float`, *optional*, defaults to 0.02): The amount of noise added to the init image, the higher it is the less the video will look like the init image. Increase it for more motion. decode_chunk_size (`int`, *optional*): The number of frames to decode at a time. The higher the chunk size, the higher the temporal consistency between frames, but also the higher the memory consumption. By default, the decoder will decode all frames at once for maximal quality. Reduce `decode_chunk_size` to reduce memory usage. num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. Returns: [`~pipelines.stable_diffusion.StableVideoDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableVideoDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list of list with the generated frames. Examples: ```py from diffusers import StableVideoDiffusionPipeline from diffusers.utils import load_image, export_to_video pipe = StableVideoDiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16") pipe.to("cuda") image = load_image("https://lh3.googleusercontent.com/y-iFOHfLTwkuQSUegpwDdgKmOjRSTvPxat63dQLB25xkTs4lhIbRUFeNBWZzYf370g=s1200") image = image.resize((1024, 576)) frames = pipe(image, num_frames=25, decode_chunk_size=8).frames[0] export_to_video(frames, "generated.mp4", fps=7) ``` """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor num_frames = num_frames if num_frames is not None else self.unet.config.num_frames decode_chunk_size = decode_chunk_size if decode_chunk_size is not None else num_frames # 1. Check inputs. Raise error if not correct self.check_inputs(image, height, width) # 2. Define call parameters if isinstance(image, PIL.Image.Image): batch_size = 1 elif isinstance(image, list): batch_size = len(image) else: batch_size = image.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. self._guidance_scale = max_guidance_scale # 3. Encode input image image_embeddings = self._encode_image(image, device, num_videos_per_prompt, self.do_classifier_free_guidance) # NOTE: Stable Diffusion Video was conditioned on fps - 1, which # is why it is reduced here. # See: https://github.com/Stability-AI/generative-models/blob/ed0997173f98eaf8f4edf7ba5fe8f15c6b877fd3/scripts/sampling/simple_video_sample.py#L188 fps = fps - 1 # 4. Encode input image using VAE image = self.image_processor.preprocess(image, height=height, width=width).to(device) noise = randn_tensor(image.shape, generator=generator, device=device, dtype=image.dtype) image = image + noise_aug_strength * noise needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.vae.to(dtype=torch.float32) image_latents = self._encode_vae_image( image, device=device, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=self.do_classifier_free_guidance, ) image_latents = image_latents.to(image_embeddings.dtype) # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) # Repeat the image latents for each frame so we can concatenate them with the noise # image_latents [batch, channels, height, width] ->[batch, num_frames, channels, height, width] image_latents = image_latents.unsqueeze(1).repeat(1, num_frames, 1, 1, 1) # 5. Get Added Time IDs added_time_ids = self._get_add_time_ids( fps, motion_bucket_id, noise_aug_strength, image_embeddings.dtype, batch_size, num_videos_per_prompt, self.do_classifier_free_guidance, ) added_time_ids = added_time_ids.to(device) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_frames, num_channels_latents, height, width, image_embeddings.dtype, device, generator, latents, ) # 7. Prepare guidance scale guidance_scale = torch.linspace(min_guidance_scale, max_guidance_scale, num_frames).unsqueeze(0) guidance_scale = guidance_scale.to(device, latents.dtype) guidance_scale = guidance_scale.repeat(batch_size * num_videos_per_prompt, 1) guidance_scale = _append_dims(guidance_scale, latents.ndim) self._guidance_scale = guidance_scale # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # Concatenate image_latents over channels dimention latent_model_input = torch.cat([latent_model_input, image_latents], dim=2) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=image_embeddings, added_time_ids=added_time_ids, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if not output_type == "latent": # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) frames = self.decode_latents(latents, num_frames, decode_chunk_size) frames = tensor2vid(frames, self.image_processor, output_type=output_type) else: frames = latents self.maybe_free_model_hooks() if not return_dict: return frames return StableVideoDiffusionPipelineOutput(frames=frames) # resizing utils # TODO: clean up later def _resize_with_antialiasing(input, size, interpolation="bicubic", align_corners=True): h, w = input.shape[-2:] factors = (h / size[0], w / size[1]) # First, we have to determine sigma # Taken from skimage: https://github.com/scikit-image/scikit-image/blob/v0.19.2/skimage/transform/_warps.py#L171 sigmas = ( max((factors[0] - 1.0) / 2.0, 0.001), max((factors[1] - 1.0) / 2.0, 0.001), ) # Now kernel size. Good results are for 3 sigma, but that is kind of slow. Pillow uses 1 sigma # https://github.com/python-pillow/Pillow/blob/master/src/libImaging/Resample.c#L206 # But they do it in the 2 passes, which gives better results. Let's try 2 sigmas for now ks = int(max(2.0 * 2 * sigmas[0], 3)), int(max(2.0 * 2 * sigmas[1], 3)) # Make sure it is odd if (ks[0] % 2) == 0: ks = ks[0] + 1, ks[1] if (ks[1] % 2) == 0: ks = ks[0], ks[1] + 1 input = _gaussian_blur2d(input, ks, sigmas) output = torch.nn.functional.interpolate(input, size=size, mode=interpolation, align_corners=align_corners) return output def _compute_padding(kernel_size): """Compute padding tuple.""" # 4 or 6 ints: (padding_left, padding_right,padding_top,padding_bottom) # https://pytorch.org/docs/stable/nn.html#torch.nn.functional.pad if len(kernel_size) < 2: raise AssertionError(kernel_size) computed = [k - 1 for k in kernel_size] # for even kernels we need to do asymmetric padding :( out_padding = 2 * len(kernel_size) * [0] for i in range(len(kernel_size)): computed_tmp = computed[-(i + 1)] pad_front = computed_tmp // 2 pad_rear = computed_tmp - pad_front out_padding[2 * i + 0] = pad_front out_padding[2 * i + 1] = pad_rear return out_padding def _filter2d(input, kernel): # prepare kernel b, c, h, w = input.shape tmp_kernel = kernel[:, None, ...].to(device=input.device, dtype=input.dtype) tmp_kernel = tmp_kernel.expand(-1, c, -1, -1) height, width = tmp_kernel.shape[-2:] padding_shape: list[int] = _compute_padding([height, width]) input = torch.nn.functional.pad(input, padding_shape, mode="reflect") # kernel and input tensor reshape to align element-wise or batch-wise params tmp_kernel = tmp_kernel.reshape(-1, 1, height, width) input = input.view(-1, tmp_kernel.size(0), input.size(-2), input.size(-1)) # convolve the tensor with the kernel. output = torch.nn.functional.conv2d(input, tmp_kernel, groups=tmp_kernel.size(0), padding=0, stride=1) out = output.view(b, c, h, w) return out def _gaussian(window_size: int, sigma): if isinstance(sigma, float): sigma = torch.tensor([[sigma]]) batch_size = sigma.shape[0] x = (torch.arange(window_size, device=sigma.device, dtype=sigma.dtype) - window_size // 2).expand(batch_size, -1) if window_size % 2 == 0: x = x + 0.5 gauss = torch.exp(-x.pow(2.0) / (2 * sigma.pow(2.0))) return gauss / gauss.sum(-1, keepdim=True) def _gaussian_blur2d(input, kernel_size, sigma): if isinstance(sigma, tuple): sigma = torch.tensor([sigma], dtype=input.dtype) else: sigma = sigma.to(dtype=input.dtype) ky, kx = int(kernel_size[0]), int(kernel_size[1]) bs = sigma.shape[0] kernel_x = _gaussian(kx, sigma[:, 1].view(bs, 1)) kernel_y = _gaussian(ky, sigma[:, 0].view(bs, 1)) out_x = _filter2d(input, kernel_x[..., None, :]) out = _filter2d(out_x, kernel_y[..., None]) return out
diffusers/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py", "repo_id": "diffusers", "token_count": 12421 }
116
import math from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin from ...models.attention import FeedForward from ...models.attention_processor import Attention from ...models.embeddings import TimestepEmbedding, Timesteps, get_2d_sincos_pos_embed from ...models.normalization import AdaLayerNorm from ...models.transformers.transformer_2d import Transformer2DModelOutput from ...utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name def _no_grad_trunc_normal_(tensor, mean, std, a, b): # Cut & paste from PyTorch official master until it's in a few official releases - RW # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf def norm_cdf(x): # Computes standard normal cumulative distribution function return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 if (mean < a - 2 * std) or (mean > b + 2 * std): logger.warning( "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " "The distribution of values may be incorrect." ) with torch.no_grad(): # Values are generated by using a truncated uniform distribution and # then using the inverse CDF for the normal distribution. # Get upper and lower cdf values l = norm_cdf((a - mean) / std) u = norm_cdf((b - mean) / std) # Uniformly fill tensor with values from [l, u], then translate to # [2l-1, 2u-1]. tensor.uniform_(2 * l - 1, 2 * u - 1) # Use inverse cdf transform for normal distribution to get truncated # standard normal tensor.erfinv_() # Transform to proper mean, std tensor.mul_(std * math.sqrt(2.0)) tensor.add_(mean) # Clamp to ensure it's in the proper range tensor.clamp_(min=a, max=b) return tensor def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): # type: (torch.Tensor, float, float, float, float) -> torch.Tensor r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) """ return _no_grad_trunc_normal_(tensor, mean, std, a, b) class PatchEmbed(nn.Module): """2D Image to Patch Embedding""" def __init__( self, height=224, width=224, patch_size=16, in_channels=3, embed_dim=768, layer_norm=False, flatten=True, bias=True, use_pos_embed=True, ): super().__init__() num_patches = (height // patch_size) * (width // patch_size) self.flatten = flatten self.layer_norm = layer_norm self.proj = nn.Conv2d( in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias ) if layer_norm: self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6) else: self.norm = None self.use_pos_embed = use_pos_embed if self.use_pos_embed: pos_embed = get_2d_sincos_pos_embed(embed_dim, int(num_patches**0.5)) self.register_buffer("pos_embed", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=False) def forward(self, latent): latent = self.proj(latent) if self.flatten: latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC if self.layer_norm: latent = self.norm(latent) if self.use_pos_embed: return latent + self.pos_embed else: return latent class SkipBlock(nn.Module): def __init__(self, dim: int): super().__init__() self.skip_linear = nn.Linear(2 * dim, dim) # Use torch.nn.LayerNorm for now, following the original code self.norm = nn.LayerNorm(dim) def forward(self, x, skip): x = self.skip_linear(torch.cat([x, skip], dim=-1)) x = self.norm(x) return x # Modified to support both pre-LayerNorm and post-LayerNorm configurations # Don't support AdaLayerNormZero for now # Modified from diffusers.models.attention.BasicTransformerBlock class UTransformerBlock(nn.Module): r""" A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations. Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm (:obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. attention_bias (:obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. only_cross_attention (`bool`, *optional*): Whether to use only cross-attention layers. In this case two cross attention layers are used. double_self_attention (`bool`, *optional*): Whether to use two self-attention layers. In this case no cross attention layers are used. upcast_attention (`bool`, *optional*): Whether to upcast the query and key to float32 when performing the attention calculation. norm_elementwise_affine (`bool`, *optional*): Whether to use learnable per-element affine parameters during layer normalization. norm_type (`str`, defaults to `"layer_norm"`): The layer norm implementation to use. pre_layer_norm (`bool`, *optional*): Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), as opposed to after ("post-LayerNorm"). Note that `BasicTransformerBlock` uses pre-LayerNorm, e.g. `pre_layer_norm = True`. final_dropout (`bool`, *optional*): Whether to use a final Dropout layer after the feedforward network. """ def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout=0.0, cross_attention_dim: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, attention_bias: bool = False, only_cross_attention: bool = False, double_self_attention: bool = False, upcast_attention: bool = False, norm_elementwise_affine: bool = True, norm_type: str = "layer_norm", pre_layer_norm: bool = True, final_dropout: bool = False, ): super().__init__() self.only_cross_attention = only_cross_attention self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" self.pre_layer_norm = pre_layer_norm if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." ) # 1. Self-Attn self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=upcast_attention, ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention, ) # is self-attn if encoder_hidden_states is none else: self.attn2 = None if self.use_ada_layer_norm: self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) else: self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. self.norm2 = ( AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) ) else: self.norm2 = None # 3. Feed-forward self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) def forward( self, hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, timestep=None, cross_attention_kwargs=None, class_labels=None, ): # Pre-LayerNorm if self.pre_layer_norm: if self.use_ada_layer_norm: norm_hidden_states = self.norm1(hidden_states, timestep) else: norm_hidden_states = self.norm1(hidden_states) else: norm_hidden_states = hidden_states # 1. Self-Attention cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} attn_output = self.attn1( norm_hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs, ) # Post-LayerNorm if not self.pre_layer_norm: if self.use_ada_layer_norm: attn_output = self.norm1(attn_output, timestep) else: attn_output = self.norm1(attn_output) hidden_states = attn_output + hidden_states if self.attn2 is not None: # Pre-LayerNorm if self.pre_layer_norm: norm_hidden_states = ( self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) ) else: norm_hidden_states = hidden_states # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly # prepare attention mask here # 2. Cross-Attention attn_output = self.attn2( norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, **cross_attention_kwargs, ) # Post-LayerNorm if not self.pre_layer_norm: attn_output = self.norm2(attn_output, timestep) if self.use_ada_layer_norm else self.norm2(attn_output) hidden_states = attn_output + hidden_states # 3. Feed-forward # Pre-LayerNorm if self.pre_layer_norm: norm_hidden_states = self.norm3(hidden_states) else: norm_hidden_states = hidden_states ff_output = self.ff(norm_hidden_states) # Post-LayerNorm if not self.pre_layer_norm: ff_output = self.norm3(ff_output) hidden_states = ff_output + hidden_states return hidden_states # Like UTransformerBlock except with LayerNorms on the residual backbone of the block # Modified from diffusers.models.attention.BasicTransformerBlock class UniDiffuserBlock(nn.Module): r""" A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations and puts the LayerNorms on the residual backbone of the block. This matches the transformer block in the [original UniDiffuser implementation](https://github.com/thu-ml/unidiffuser/blob/main/libs/uvit_multi_post_ln_v1.py#L104). Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm (:obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. attention_bias (:obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. only_cross_attention (`bool`, *optional*): Whether to use only cross-attention layers. In this case two cross attention layers are used. double_self_attention (`bool`, *optional*): Whether to use two self-attention layers. In this case no cross attention layers are used. upcast_attention (`bool`, *optional*): Whether to upcast the query and key to float() when performing the attention calculation. norm_elementwise_affine (`bool`, *optional*): Whether to use learnable per-element affine parameters during layer normalization. norm_type (`str`, defaults to `"layer_norm"`): The layer norm implementation to use. pre_layer_norm (`bool`, *optional*): Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm (`pre_layer_norm = False`). final_dropout (`bool`, *optional*): Whether to use a final Dropout layer after the feedforward network. """ def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout=0.0, cross_attention_dim: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, attention_bias: bool = False, only_cross_attention: bool = False, double_self_attention: bool = False, upcast_attention: bool = False, norm_elementwise_affine: bool = True, norm_type: str = "layer_norm", pre_layer_norm: bool = False, final_dropout: bool = True, ): super().__init__() self.only_cross_attention = only_cross_attention self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" self.pre_layer_norm = pre_layer_norm if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." ) # 1. Self-Attn self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=upcast_attention, ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention, ) # is self-attn if encoder_hidden_states is none else: self.attn2 = None if self.use_ada_layer_norm: self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) else: self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. self.norm2 = ( AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) ) else: self.norm2 = None # 3. Feed-forward self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) def forward( self, hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, timestep=None, cross_attention_kwargs=None, class_labels=None, ): # Following the diffusers transformer block implementation, put the LayerNorm on the # residual backbone # Pre-LayerNorm if self.pre_layer_norm: if self.use_ada_layer_norm: hidden_states = self.norm1(hidden_states, timestep) else: hidden_states = self.norm1(hidden_states) # 1. Self-Attention cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} attn_output = self.attn1( hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs, ) hidden_states = attn_output + hidden_states # Following the diffusers transformer block implementation, put the LayerNorm on the # residual backbone # Post-LayerNorm if not self.pre_layer_norm: if self.use_ada_layer_norm: hidden_states = self.norm1(hidden_states, timestep) else: hidden_states = self.norm1(hidden_states) if self.attn2 is not None: # Pre-LayerNorm if self.pre_layer_norm: hidden_states = ( self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) ) # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly # prepare attention mask here # 2. Cross-Attention attn_output = self.attn2( hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, **cross_attention_kwargs, ) hidden_states = attn_output + hidden_states # Post-LayerNorm if not self.pre_layer_norm: hidden_states = ( self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) ) # 3. Feed-forward # Pre-LayerNorm if self.pre_layer_norm: hidden_states = self.norm3(hidden_states) ff_output = self.ff(hidden_states) hidden_states = ff_output + hidden_states # Post-LayerNorm if not self.pre_layer_norm: hidden_states = self.norm3(hidden_states) return hidden_states # Modified from diffusers.models.transformer_2d.Transformer2DModel # Modify the transformer block structure to be U-Net like following U-ViT # Only supports patch-style input and torch.nn.LayerNorm currently # https://github.com/baofff/U-ViT class UTransformer2DModel(ModelMixin, ConfigMixin): """ Transformer model based on the [U-ViT](https://github.com/baofff/U-ViT) architecture for image-like data. Compared to [`Transformer2DModel`], this model has skip connections between transformer blocks in a "U"-shaped fashion, similar to a U-Net. Supports only continuous (actual embeddings) inputs, which are embedded via a [`PatchEmbed`] layer and then reshaped to (b, t, d). Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): Pass if the input is continuous. The number of channels in the input. out_channels (`int`, *optional*): The number of output channels; if `None`, defaults to `in_channels`. num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups to use when performing Group Normalization. cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. attention_bias (`bool`, *optional*): Configure if the TransformerBlocks' attention should contain a bias parameter. sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. Note that this is fixed at training time as it is used for learning a number of position embeddings. See `ImagePositionalEmbeddings`. num_vector_embeds (`int`, *optional*): Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked latent pixel. patch_size (`int`, *optional*, defaults to 2): The patch size to use in the patch embedding. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. The number of diffusion steps used during training. Note that this is fixed at training time as it is used to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for up to but not more than steps than `num_embeds_ada_norm`. use_linear_projection (int, *optional*): TODO: Not used only_cross_attention (`bool`, *optional*): Whether to use only cross-attention layers. In this case two cross attention layers are used in each transformer block. upcast_attention (`bool`, *optional*): Whether to upcast the query and key to float() when performing the attention calculation. norm_type (`str`, *optional*, defaults to `"layer_norm"`): The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`. block_type (`str`, *optional*, defaults to `"unidiffuser"`): The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard behavior in `diffusers`.) pre_layer_norm (`bool`, *optional*): Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm (`pre_layer_norm = False`). norm_elementwise_affine (`bool`, *optional*): Whether to use learnable per-element affine parameters during layer normalization. use_patch_pos_embed (`bool`, *optional*): Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`). final_dropout (`bool`, *optional*): Whether to use a final Dropout layer after the feedforward network. """ @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, num_vector_embeds: Optional[int] = None, patch_size: Optional[int] = 2, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, norm_type: str = "layer_norm", block_type: str = "unidiffuser", pre_layer_norm: bool = False, norm_elementwise_affine: bool = True, use_patch_pos_embed=False, ff_final_dropout: bool = False, ): super().__init__() self.use_linear_projection = use_linear_projection self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim # 1. Input # Only support patch input of shape (batch_size, num_channels, height, width) for now assert in_channels is not None and patch_size is not None, "Patch input requires in_channels and patch_size." assert sample_size is not None, "UTransformer2DModel over patched input must provide sample_size" # 2. Define input layers self.height = sample_size self.width = sample_size self.patch_size = patch_size self.pos_embed = PatchEmbed( height=sample_size, width=sample_size, patch_size=patch_size, in_channels=in_channels, embed_dim=inner_dim, use_pos_embed=use_patch_pos_embed, ) # 3. Define transformers blocks # Modify this to have in_blocks ("downsample" blocks, even though we don't actually downsample), a mid_block, # and out_blocks ("upsample" blocks). Like a U-Net, there are skip connections from in_blocks to out_blocks in # a "U"-shaped fashion (e.g. first in_block to last out_block, etc.). # Quick hack to make the transformer block type configurable if block_type == "unidiffuser": block_cls = UniDiffuserBlock else: block_cls = UTransformerBlock self.transformer_in_blocks = nn.ModuleList( [ block_cls( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, norm_type=norm_type, pre_layer_norm=pre_layer_norm, norm_elementwise_affine=norm_elementwise_affine, final_dropout=ff_final_dropout, ) for d in range(num_layers // 2) ] ) self.transformer_mid_block = block_cls( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, norm_type=norm_type, pre_layer_norm=pre_layer_norm, norm_elementwise_affine=norm_elementwise_affine, final_dropout=ff_final_dropout, ) # For each skip connection, we use a SkipBlock (concatenation + Linear + LayerNorm) to process the inputs # before each transformer out_block. self.transformer_out_blocks = nn.ModuleList( [ nn.ModuleDict( { "skip": SkipBlock( inner_dim, ), "block": block_cls( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, norm_type=norm_type, pre_layer_norm=pre_layer_norm, norm_elementwise_affine=norm_elementwise_affine, final_dropout=ff_final_dropout, ), } ) for d in range(num_layers // 2) ] ) # 4. Define output layers self.out_channels = in_channels if out_channels is None else out_channels # Following the UniDiffuser U-ViT implementation, we process the transformer output with # a LayerNorm layer with per-element affine params self.norm_out = nn.LayerNorm(inner_dim) def forward( self, hidden_states, encoder_hidden_states=None, timestep=None, class_labels=None, cross_attention_kwargs=None, return_dict: bool = True, hidden_states_is_embedding: bool = False, unpatchify: bool = True, ): """ Args: hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`. When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input hidden_states encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep ( `torch.long`, *optional*): Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step. class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): Optional class labels to be applied as an embedding in AdaLayerZeroNorm. Used to indicate class labels conditioning. cross_attention_kwargs (*optional*): Keyword arguments to supply to the cross attention layers, if used. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. hidden_states_is_embedding (`bool`, *optional*, defaults to `False`): Whether or not hidden_states is an embedding directly usable by the transformer. In this case we will ignore input handling (e.g. continuous, vectorized, etc.) and directly feed hidden_states into the transformer blocks. unpatchify (`bool`, *optional*, defaults to `True`): Whether to unpatchify the transformer output. Returns: [`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`: [`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ # 0. Check inputs if not unpatchify and return_dict: raise ValueError( f"Cannot both define `unpatchify`: {unpatchify} and `return_dict`: {return_dict} since when" f" `unpatchify` is {unpatchify} the returned output is of shape (batch_size, seq_len, hidden_dim)" " rather than (batch_size, num_channels, height, width)." ) # 1. Input if not hidden_states_is_embedding: hidden_states = self.pos_embed(hidden_states) # 2. Blocks # In ("downsample") blocks skips = [] for in_block in self.transformer_in_blocks: hidden_states = in_block( hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, ) skips.append(hidden_states) # Mid block hidden_states = self.transformer_mid_block(hidden_states) # Out ("upsample") blocks for out_block in self.transformer_out_blocks: hidden_states = out_block["skip"](hidden_states, skips.pop()) hidden_states = out_block["block"]( hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, ) # 3. Output # Don't support AdaLayerNorm for now, so no conditioning/scale/shift logic hidden_states = self.norm_out(hidden_states) # hidden_states = self.proj_out(hidden_states) if unpatchify: # unpatchify height = width = int(hidden_states.shape[1] ** 0.5) hidden_states = hidden_states.reshape( shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) ) else: output = hidden_states if not return_dict: return (output,) return Transformer2DModelOutput(sample=output) class UniDiffuserModel(ModelMixin, ConfigMixin): """ Transformer model for a image-text [UniDiffuser](https://arxiv.org/pdf/2303.06555.pdf) model. This is a modification of [`UTransformer2DModel`] with input and output heads for the VAE-embedded latent image, the CLIP-embedded image, and the CLIP-embedded prompt (see paper for more details). Parameters: text_dim (`int`): The hidden dimension of the CLIP text model used to embed images. clip_img_dim (`int`): The hidden dimension of the CLIP vision model used to embed prompts. num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): Pass if the input is continuous. The number of channels in the input. out_channels (`int`, *optional*): The number of output channels; if `None`, defaults to `in_channels`. num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups to use when performing Group Normalization. cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. attention_bias (`bool`, *optional*): Configure if the TransformerBlocks' attention should contain a bias parameter. sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. Note that this is fixed at training time as it is used for learning a number of position embeddings. See `ImagePositionalEmbeddings`. num_vector_embeds (`int`, *optional*): Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked latent pixel. patch_size (`int`, *optional*, defaults to 2): The patch size to use in the patch embedding. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. The number of diffusion steps used during training. Note that this is fixed at training time as it is used to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for up to but not more than steps than `num_embeds_ada_norm`. use_linear_projection (int, *optional*): TODO: Not used only_cross_attention (`bool`, *optional*): Whether to use only cross-attention layers. In this case two cross attention layers are used in each transformer block. upcast_attention (`bool`, *optional*): Whether to upcast the query and key to float32 when performing the attention calculation. norm_type (`str`, *optional*, defaults to `"layer_norm"`): The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`. block_type (`str`, *optional*, defaults to `"unidiffuser"`): The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard behavior in `diffusers`.) pre_layer_norm (`bool`, *optional*): Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm (`pre_layer_norm = False`). norm_elementwise_affine (`bool`, *optional*): Whether to use learnable per-element affine parameters during layer normalization. use_patch_pos_embed (`bool`, *optional*): Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`). ff_final_dropout (`bool`, *optional*): Whether to use a final Dropout layer after the feedforward network. use_data_type_embedding (`bool`, *optional*): Whether to use a data type embedding. This is only relevant for UniDiffuser-v1 style models; UniDiffuser-v1 is continue-trained from UniDiffuser-v0 on non-publically-available data and accepts a `data_type` argument, which can either be `1` to use the weights trained on non-publically-available data or `0` otherwise. This argument is subsequently embedded by the data type embedding, if used. """ @register_to_config def __init__( self, text_dim: int = 768, clip_img_dim: int = 512, num_text_tokens: int = 77, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, num_vector_embeds: Optional[int] = None, patch_size: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, norm_type: str = "layer_norm", block_type: str = "unidiffuser", pre_layer_norm: bool = False, use_timestep_embedding=False, norm_elementwise_affine: bool = True, use_patch_pos_embed=False, ff_final_dropout: bool = True, use_data_type_embedding: bool = False, ): super().__init__() # 0. Handle dimensions self.inner_dim = num_attention_heads * attention_head_dim assert sample_size is not None, "UniDiffuserModel over patched input must provide sample_size" self.sample_size = sample_size self.in_channels = in_channels self.out_channels = in_channels if out_channels is None else out_channels self.patch_size = patch_size # Assume image is square... self.num_patches = (self.sample_size // patch_size) * (self.sample_size // patch_size) # 1. Define input layers # 1.1 Input layers for text and image input # For now, only support patch input for VAE latent image input self.vae_img_in = PatchEmbed( height=sample_size, width=sample_size, patch_size=patch_size, in_channels=in_channels, embed_dim=self.inner_dim, use_pos_embed=use_patch_pos_embed, ) self.clip_img_in = nn.Linear(clip_img_dim, self.inner_dim) self.text_in = nn.Linear(text_dim, self.inner_dim) # 1.2. Timestep embeddings for t_img, t_text self.timestep_img_proj = Timesteps( self.inner_dim, flip_sin_to_cos=True, downscale_freq_shift=0, ) self.timestep_img_embed = ( TimestepEmbedding( self.inner_dim, 4 * self.inner_dim, out_dim=self.inner_dim, ) if use_timestep_embedding else nn.Identity() ) self.timestep_text_proj = Timesteps( self.inner_dim, flip_sin_to_cos=True, downscale_freq_shift=0, ) self.timestep_text_embed = ( TimestepEmbedding( self.inner_dim, 4 * self.inner_dim, out_dim=self.inner_dim, ) if use_timestep_embedding else nn.Identity() ) # 1.3. Positional embedding self.num_text_tokens = num_text_tokens self.num_tokens = 1 + 1 + num_text_tokens + 1 + self.num_patches self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, self.inner_dim)) self.pos_embed_drop = nn.Dropout(p=dropout) trunc_normal_(self.pos_embed, std=0.02) # 1.4. Handle data type token embeddings for UniDiffuser-V1, if necessary self.use_data_type_embedding = use_data_type_embedding if self.use_data_type_embedding: self.data_type_token_embedding = nn.Embedding(2, self.inner_dim) self.data_type_pos_embed_token = nn.Parameter(torch.zeros(1, 1, self.inner_dim)) # 2. Define transformer blocks self.transformer = UTransformer2DModel( num_attention_heads=num_attention_heads, attention_head_dim=attention_head_dim, in_channels=in_channels, out_channels=out_channels, num_layers=num_layers, dropout=dropout, norm_num_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, attention_bias=attention_bias, sample_size=sample_size, num_vector_embeds=num_vector_embeds, patch_size=patch_size, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, norm_type=norm_type, block_type=block_type, pre_layer_norm=pre_layer_norm, norm_elementwise_affine=norm_elementwise_affine, use_patch_pos_embed=use_patch_pos_embed, ff_final_dropout=ff_final_dropout, ) # 3. Define output layers patch_dim = (patch_size**2) * out_channels self.vae_img_out = nn.Linear(self.inner_dim, patch_dim) self.clip_img_out = nn.Linear(self.inner_dim, clip_img_dim) self.text_out = nn.Linear(self.inner_dim, text_dim) @torch.jit.ignore def no_weight_decay(self): return {"pos_embed"} def forward( self, latent_image_embeds: torch.FloatTensor, image_embeds: torch.FloatTensor, prompt_embeds: torch.FloatTensor, timestep_img: Union[torch.Tensor, float, int], timestep_text: Union[torch.Tensor, float, int], data_type: Optional[Union[torch.Tensor, float, int]] = 1, encoder_hidden_states=None, cross_attention_kwargs=None, ): """ Args: latent_image_embeds (`torch.FloatTensor` of shape `(batch size, latent channels, height, width)`): Latent image representation from the VAE encoder. image_embeds (`torch.FloatTensor` of shape `(batch size, 1, clip_img_dim)`): CLIP-embedded image representation (unsqueezed in the first dimension). prompt_embeds (`torch.FloatTensor` of shape `(batch size, seq_len, text_dim)`): CLIP-embedded text representation. timestep_img (`torch.long` or `float` or `int`): Current denoising step for the image. timestep_text (`torch.long` or `float` or `int`): Current denoising step for the text. data_type: (`torch.int` or `float` or `int`, *optional*, defaults to `1`): Only used in UniDiffuser-v1-style models. Can be either `1`, to use weights trained on nonpublic data, or `0` otherwise. encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. cross_attention_kwargs (*optional*): Keyword arguments to supply to the cross attention layers, if used. Returns: `tuple`: Returns relevant parts of the model's noise prediction: the first element of the tuple is tbe VAE image embedding, the second element is the CLIP image embedding, and the third element is the CLIP text embedding. """ batch_size = latent_image_embeds.shape[0] # 1. Input # 1.1. Map inputs to shape (B, N, inner_dim) vae_hidden_states = self.vae_img_in(latent_image_embeds) clip_hidden_states = self.clip_img_in(image_embeds) text_hidden_states = self.text_in(prompt_embeds) num_text_tokens, num_img_tokens = text_hidden_states.size(1), vae_hidden_states.size(1) # 1.2. Encode image timesteps to single token (B, 1, inner_dim) if not torch.is_tensor(timestep_img): timestep_img = torch.tensor([timestep_img], dtype=torch.long, device=vae_hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep_img = timestep_img * torch.ones(batch_size, dtype=timestep_img.dtype, device=timestep_img.device) timestep_img_token = self.timestep_img_proj(timestep_img) # t_img_token does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. timestep_img_token = timestep_img_token.to(dtype=self.dtype) timestep_img_token = self.timestep_img_embed(timestep_img_token) timestep_img_token = timestep_img_token.unsqueeze(dim=1) # 1.3. Encode text timesteps to single token (B, 1, inner_dim) if not torch.is_tensor(timestep_text): timestep_text = torch.tensor([timestep_text], dtype=torch.long, device=vae_hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep_text = timestep_text * torch.ones(batch_size, dtype=timestep_text.dtype, device=timestep_text.device) timestep_text_token = self.timestep_text_proj(timestep_text) # t_text_token does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. timestep_text_token = timestep_text_token.to(dtype=self.dtype) timestep_text_token = self.timestep_text_embed(timestep_text_token) timestep_text_token = timestep_text_token.unsqueeze(dim=1) # 1.4. Concatenate all of the embeddings together. if self.use_data_type_embedding: assert data_type is not None, "data_type must be supplied if the model uses a data type embedding" if not torch.is_tensor(data_type): data_type = torch.tensor([data_type], dtype=torch.int, device=vae_hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML data_type = data_type * torch.ones(batch_size, dtype=data_type.dtype, device=data_type.device) data_type_token = self.data_type_token_embedding(data_type).unsqueeze(dim=1) hidden_states = torch.cat( [ timestep_img_token, timestep_text_token, data_type_token, text_hidden_states, clip_hidden_states, vae_hidden_states, ], dim=1, ) else: hidden_states = torch.cat( [timestep_img_token, timestep_text_token, text_hidden_states, clip_hidden_states, vae_hidden_states], dim=1, ) # 1.5. Prepare the positional embeddings and add to hidden states # Note: I think img_vae should always have the proper shape, so there's no need to interpolate # the position embeddings. if self.use_data_type_embedding: pos_embed = torch.cat( [self.pos_embed[:, : 1 + 1, :], self.data_type_pos_embed_token, self.pos_embed[:, 1 + 1 :, :]], dim=1 ) else: pos_embed = self.pos_embed hidden_states = hidden_states + pos_embed hidden_states = self.pos_embed_drop(hidden_states) # 2. Blocks hidden_states = self.transformer( hidden_states, encoder_hidden_states=encoder_hidden_states, timestep=None, class_labels=None, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, hidden_states_is_embedding=True, unpatchify=False, )[0] # 3. Output # Split out the predicted noise representation. if self.use_data_type_embedding: ( t_img_token_out, t_text_token_out, data_type_token_out, text_out, img_clip_out, img_vae_out, ) = hidden_states.split((1, 1, 1, num_text_tokens, 1, num_img_tokens), dim=1) else: t_img_token_out, t_text_token_out, text_out, img_clip_out, img_vae_out = hidden_states.split( (1, 1, num_text_tokens, 1, num_img_tokens), dim=1 ) img_vae_out = self.vae_img_out(img_vae_out) # unpatchify height = width = int(img_vae_out.shape[1] ** 0.5) img_vae_out = img_vae_out.reshape( shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) ) img_vae_out = torch.einsum("nhwpqc->nchpwq", img_vae_out) img_vae_out = img_vae_out.reshape( shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) ) img_clip_out = self.clip_img_out(img_clip_out) text_out = self.text_out(text_out) return img_vae_out, img_clip_out, text_out
diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py", "repo_id": "diffusers", "token_count": 24180 }
117
import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils import SchedulerMixin def gumbel_noise(t, generator=None): device = generator.device if generator is not None else t.device noise = torch.zeros_like(t, device=device).uniform_(0, 1, generator=generator).to(t.device) return -torch.log((-torch.log(noise.clamp(1e-20))).clamp(1e-20)) def mask_by_random_topk(mask_len, probs, temperature=1.0, generator=None): confidence = torch.log(probs.clamp(1e-20)) + temperature * gumbel_noise(probs, generator=generator) sorted_confidence = torch.sort(confidence, dim=-1).values cut_off = torch.gather(sorted_confidence, 1, mask_len.long()) masking = confidence < cut_off return masking @dataclass class AmusedSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.FloatTensor pred_original_sample: torch.FloatTensor = None class AmusedScheduler(SchedulerMixin, ConfigMixin): order = 1 temperatures: torch.Tensor @register_to_config def __init__( self, mask_token_id: int, masking_schedule: str = "cosine", ): self.temperatures = None self.timesteps = None def set_timesteps( self, num_inference_steps: int, temperature: Union[int, Tuple[int, int], List[int]] = (2, 0), device: Union[str, torch.device] = None, ): self.timesteps = torch.arange(num_inference_steps, device=device).flip(0) if isinstance(temperature, (tuple, list)): self.temperatures = torch.linspace(temperature[0], temperature[1], num_inference_steps, device=device) else: self.temperatures = torch.linspace(temperature, 0.01, num_inference_steps, device=device) def step( self, model_output: torch.FloatTensor, timestep: torch.long, sample: torch.LongTensor, starting_mask_ratio: int = 1, generator: Optional[torch.Generator] = None, return_dict: bool = True, ) -> Union[AmusedSchedulerOutput, Tuple]: two_dim_input = sample.ndim == 3 and model_output.ndim == 4 if two_dim_input: batch_size, codebook_size, height, width = model_output.shape sample = sample.reshape(batch_size, height * width) model_output = model_output.reshape(batch_size, codebook_size, height * width).permute(0, 2, 1) unknown_map = sample == self.config.mask_token_id probs = model_output.softmax(dim=-1) device = probs.device probs_ = probs.to(generator.device) if generator is not None else probs # handles when generator is on CPU if probs_.device.type == "cpu" and probs_.dtype != torch.float32: probs_ = probs_.float() # multinomial is not implemented for cpu half precision probs_ = probs_.reshape(-1, probs.size(-1)) pred_original_sample = torch.multinomial(probs_, 1, generator=generator).to(device=device) pred_original_sample = pred_original_sample[:, 0].view(*probs.shape[:-1]) pred_original_sample = torch.where(unknown_map, pred_original_sample, sample) if timestep == 0: prev_sample = pred_original_sample else: seq_len = sample.shape[1] step_idx = (self.timesteps == timestep).nonzero() ratio = (step_idx + 1) / len(self.timesteps) if self.config.masking_schedule == "cosine": mask_ratio = torch.cos(ratio * math.pi / 2) elif self.config.masking_schedule == "linear": mask_ratio = 1 - ratio else: raise ValueError(f"unknown masking schedule {self.config.masking_schedule}") mask_ratio = starting_mask_ratio * mask_ratio mask_len = (seq_len * mask_ratio).floor() # do not mask more than amount previously masked mask_len = torch.min(unknown_map.sum(dim=-1, keepdim=True) - 1, mask_len) # mask at least one mask_len = torch.max(torch.tensor([1], device=model_output.device), mask_len) selected_probs = torch.gather(probs, -1, pred_original_sample[:, :, None])[:, :, 0] # Ignores the tokens given in the input by overwriting their confidence. selected_probs = torch.where(unknown_map, selected_probs, torch.finfo(selected_probs.dtype).max) masking = mask_by_random_topk(mask_len, selected_probs, self.temperatures[step_idx], generator) # Masks tokens with lower confidence. prev_sample = torch.where(masking, self.config.mask_token_id, pred_original_sample) if two_dim_input: prev_sample = prev_sample.reshape(batch_size, height, width) pred_original_sample = pred_original_sample.reshape(batch_size, height, width) if not return_dict: return (prev_sample, pred_original_sample) return AmusedSchedulerOutput(prev_sample, pred_original_sample) def add_noise(self, sample, timesteps, generator=None): step_idx = (self.timesteps == timesteps).nonzero() ratio = (step_idx + 1) / len(self.timesteps) if self.config.masking_schedule == "cosine": mask_ratio = torch.cos(ratio * math.pi / 2) elif self.config.masking_schedule == "linear": mask_ratio = 1 - ratio else: raise ValueError(f"unknown masking schedule {self.config.masking_schedule}") mask_indices = ( torch.rand( sample.shape, device=generator.device if generator is not None else sample.device, generator=generator ).to(sample.device) < mask_ratio ) masked_sample = sample.clone() masked_sample[mask_indices] = self.config.mask_token_id return masked_sample
diffusers/src/diffusers/schedulers/scheduling_amused.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_amused.py", "repo_id": "diffusers", "token_count": 2780 }
118
# Copyright 2023 Google Brain and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class SdeVeOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. prev_sample_mean (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Mean averaged `prev_sample` over previous timesteps. """ prev_sample: torch.FloatTensor prev_sample_mean: torch.FloatTensor class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin): """ `ScoreSdeVeScheduler` is a variance exploding stochastic differential equation (SDE) scheduler. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. snr (`float`, defaults to 0.15): A coefficient weighting the step from the `model_output` sample (from the network) to the random noise. sigma_min (`float`, defaults to 0.01): The initial noise scale for the sigma sequence in the sampling procedure. The minimum sigma should mirror the distribution of the data. sigma_max (`float`, defaults to 1348.0): The maximum value used for the range of continuous timesteps passed into the model. sampling_eps (`float`, defaults to 1e-5): The end value of sampling where timesteps decrease progressively from 1 to epsilon. correct_steps (`int`, defaults to 1): The number of correction steps performed on a produced sample. """ order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 2000, snr: float = 0.15, sigma_min: float = 0.01, sigma_max: float = 1348.0, sampling_eps: float = 1e-5, correct_steps: int = 1, ): # standard deviation of the initial noise distribution self.init_noise_sigma = sigma_max # setable values self.timesteps = None self.set_sigmas(num_train_timesteps, sigma_min, sigma_max, sampling_eps) def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.FloatTensor`: A scaled input sample. """ return sample def set_timesteps( self, num_inference_steps: int, sampling_eps: float = None, device: Union[str, torch.device] = None ): """ Sets the continuous timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. sampling_eps (`float`, *optional*): The final timestep value (overrides value given during scheduler instantiation). device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps self.timesteps = torch.linspace(1, sampling_eps, num_inference_steps, device=device) def set_sigmas( self, num_inference_steps: int, sigma_min: float = None, sigma_max: float = None, sampling_eps: float = None ): """ Sets the noise scales used for the diffusion chain (to be run before inference). The sigmas control the weight of the `drift` and `diffusion` components of the sample update. Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. sigma_min (`float`, optional): The initial noise scale value (overrides value given during scheduler instantiation). sigma_max (`float`, optional): The final noise scale value (overrides value given during scheduler instantiation). sampling_eps (`float`, optional): The final timestep value (overrides value given during scheduler instantiation). """ sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(num_inference_steps, sampling_eps) self.sigmas = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) self.discrete_sigmas = torch.exp(torch.linspace(math.log(sigma_min), math.log(sigma_max), num_inference_steps)) self.sigmas = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) def get_adjacent_sigma(self, timesteps, t): return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), ) def step_pred( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, generator: Optional[torch.Generator] = None, return_dict: bool = True, ) -> Union[SdeVeOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`int`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. generator (`torch.Generator`, *optional*): A random number generator. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`. Returns: [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_sde_ve.SdeVeOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) timestep = timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) timesteps = (timestep * (len(self.timesteps) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda timesteps = timesteps.to(self.discrete_sigmas.device) sigma = self.discrete_sigmas[timesteps].to(sample.device) adjacent_sigma = self.get_adjacent_sigma(timesteps, timestep).to(sample.device) drift = torch.zeros_like(sample) diffusion = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods diffusion = diffusion.flatten() while len(diffusion.shape) < len(sample.shape): diffusion = diffusion.unsqueeze(-1) drift = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of noise = randn_tensor( sample.shape, layout=sample.layout, generator=generator, device=sample.device, dtype=sample.dtype ) prev_sample_mean = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? prev_sample = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=prev_sample, prev_sample_mean=prev_sample_mean) def step_correct( self, model_output: torch.FloatTensor, sample: torch.FloatTensor, generator: Optional[torch.Generator] = None, return_dict: bool = True, ) -> Union[SchedulerOutput, Tuple]: """ Correct the predicted sample based on the `model_output` of the network. This is often run repeatedly after making the prediction for the previous timestep. Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. generator (`torch.Generator`, *optional*): A random number generator. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`. Returns: [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_sde_ve.SdeVeOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction noise = randn_tensor(sample.shape, layout=sample.layout, generator=generator).to(sample.device) # compute step size from the model_output, the noise, and the snr grad_norm = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean() noise_norm = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean() step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 step_size = step_size * torch.ones(sample.shape[0]).to(sample.device) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term step_size = step_size.flatten() while len(step_size.shape) < len(sample.shape): step_size = step_size.unsqueeze(-1) prev_sample_mean = sample + step_size * model_output prev_sample = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def add_noise( self, original_samples: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.FloatTensor, ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples timesteps = timesteps.to(original_samples.device) sigmas = self.discrete_sigmas.to(original_samples.device)[timesteps] noise = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(original_samples) * sigmas[:, None, None, None] ) noisy_samples = noise + original_samples return noisy_samples def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_sde_ve.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_sde_ve.py", "repo_id": "diffusers", "token_count": 5400 }
119
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class OnnxRuntimeModel(metaclass=DummyObject): _backends = ["onnx"] def __init__(self, *args, **kwargs): requires_backends(self, ["onnx"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["onnx"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["onnx"])
diffusers/src/diffusers/utils/dummy_onnx_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_onnx_objects.py", "repo_id": "diffusers", "token_count": 202 }
120
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PEFT utilities: Utilities related to peft library """ import collections import importlib from typing import Optional from packaging import version from .import_utils import is_peft_available, is_torch_available if is_torch_available(): import torch def recurse_remove_peft_layers(model): r""" Recursively replace all instances of `LoraLayer` with corresponding new layers in `model`. """ from peft.tuners.tuners_utils import BaseTunerLayer has_base_layer_pattern = False for module in model.modules(): if isinstance(module, BaseTunerLayer): has_base_layer_pattern = hasattr(module, "base_layer") break if has_base_layer_pattern: from peft.utils import _get_submodules key_list = [key for key, _ in model.named_modules() if "lora" not in key] for key in key_list: try: parent, target, target_name = _get_submodules(model, key) except AttributeError: continue if hasattr(target, "base_layer"): setattr(parent, target_name, target.get_base_layer()) else: # This is for backwards compatibility with PEFT <= 0.6.2. # TODO can be removed once that PEFT version is no longer supported. from peft.tuners.lora import LoraLayer for name, module in model.named_children(): if len(list(module.children())) > 0: ## compound module, go inside it recurse_remove_peft_layers(module) module_replaced = False if isinstance(module, LoraLayer) and isinstance(module, torch.nn.Linear): new_module = torch.nn.Linear(module.in_features, module.out_features, bias=module.bias is not None).to( module.weight.device ) new_module.weight = module.weight if module.bias is not None: new_module.bias = module.bias module_replaced = True elif isinstance(module, LoraLayer) and isinstance(module, torch.nn.Conv2d): new_module = torch.nn.Conv2d( module.in_channels, module.out_channels, module.kernel_size, module.stride, module.padding, module.dilation, module.groups, ).to(module.weight.device) new_module.weight = module.weight if module.bias is not None: new_module.bias = module.bias module_replaced = True if module_replaced: setattr(model, name, new_module) del module if torch.cuda.is_available(): torch.cuda.empty_cache() return model def scale_lora_layers(model, weight): """ Adjust the weightage given to the LoRA layers of the model. Args: model (`torch.nn.Module`): The model to scale. weight (`float`): The weight to be given to the LoRA layers. """ from peft.tuners.tuners_utils import BaseTunerLayer for module in model.modules(): if isinstance(module, BaseTunerLayer): module.scale_layer(weight) def unscale_lora_layers(model, weight: Optional[float] = None): """ Removes the previously passed weight given to the LoRA layers of the model. Args: model (`torch.nn.Module`): The model to scale. weight (`float`, *optional*): The weight to be given to the LoRA layers. If no scale is passed the scale of the lora layer will be re-initialized to the correct value. If 0.0 is passed, we will re-initialize the scale with the correct value. """ from peft.tuners.tuners_utils import BaseTunerLayer for module in model.modules(): if isinstance(module, BaseTunerLayer): if weight is not None and weight != 0: module.unscale_layer(weight) elif weight is not None and weight == 0: for adapter_name in module.active_adapters: # if weight == 0 unscale should re-set the scale to the original value. module.set_scale(adapter_name, 1.0) def get_peft_kwargs(rank_dict, network_alpha_dict, peft_state_dict, is_unet=True): rank_pattern = {} alpha_pattern = {} r = lora_alpha = list(rank_dict.values())[0] if len(set(rank_dict.values())) > 1: # get the rank occuring the most number of times r = collections.Counter(rank_dict.values()).most_common()[0][0] # for modules with rank different from the most occuring rank, add it to the `rank_pattern` rank_pattern = dict(filter(lambda x: x[1] != r, rank_dict.items())) rank_pattern = {k.split(".lora_B.")[0]: v for k, v in rank_pattern.items()} if network_alpha_dict is not None and len(network_alpha_dict) > 0: if len(set(network_alpha_dict.values())) > 1: # get the alpha occuring the most number of times lora_alpha = collections.Counter(network_alpha_dict.values()).most_common()[0][0] # for modules with alpha different from the most occuring alpha, add it to the `alpha_pattern` alpha_pattern = dict(filter(lambda x: x[1] != lora_alpha, network_alpha_dict.items())) if is_unet: alpha_pattern = { ".".join(k.split(".lora_A.")[0].split(".")).replace(".alpha", ""): v for k, v in alpha_pattern.items() } else: alpha_pattern = {".".join(k.split(".down.")[0].split(".")[:-1]): v for k, v in alpha_pattern.items()} else: lora_alpha = set(network_alpha_dict.values()).pop() # layer names without the Diffusers specific target_modules = list({name.split(".lora")[0] for name in peft_state_dict.keys()}) lora_config_kwargs = { "r": r, "lora_alpha": lora_alpha, "rank_pattern": rank_pattern, "alpha_pattern": alpha_pattern, "target_modules": target_modules, } return lora_config_kwargs def get_adapter_name(model): from peft.tuners.tuners_utils import BaseTunerLayer for module in model.modules(): if isinstance(module, BaseTunerLayer): return f"default_{len(module.r)}" return "default_0" def set_adapter_layers(model, enabled=True): from peft.tuners.tuners_utils import BaseTunerLayer for module in model.modules(): if isinstance(module, BaseTunerLayer): # The recent version of PEFT needs to call `enable_adapters` instead if hasattr(module, "enable_adapters"): module.enable_adapters(enabled=enabled) else: module.disable_adapters = not enabled def delete_adapter_layers(model, adapter_name): from peft.tuners.tuners_utils import BaseTunerLayer for module in model.modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, "delete_adapter"): module.delete_adapter(adapter_name) else: raise ValueError( "The version of PEFT you are using is not compatible, please use a version that is greater than 0.6.1" ) # For transformers integration - we need to pop the adapter from the config if getattr(model, "_hf_peft_config_loaded", False) and hasattr(model, "peft_config"): model.peft_config.pop(adapter_name, None) # In case all adapters are deleted, we need to delete the config # and make sure to set the flag to False if len(model.peft_config) == 0: del model.peft_config model._hf_peft_config_loaded = None def set_weights_and_activate_adapters(model, adapter_names, weights): from peft.tuners.tuners_utils import BaseTunerLayer # iterate over each adapter, make it active and set the corresponding scaling weight for adapter_name, weight in zip(adapter_names, weights): for module in model.modules(): if isinstance(module, BaseTunerLayer): # For backward compatbility with previous PEFT versions if hasattr(module, "set_adapter"): module.set_adapter(adapter_name) else: module.active_adapter = adapter_name module.set_scale(adapter_name, weight) # set multiple active adapters for module in model.modules(): if isinstance(module, BaseTunerLayer): # For backward compatbility with previous PEFT versions if hasattr(module, "set_adapter"): module.set_adapter(adapter_names) else: module.active_adapter = adapter_names def check_peft_version(min_version: str) -> None: r""" Checks if the version of PEFT is compatible. Args: version (`str`): The version of PEFT to check against. """ if not is_peft_available(): raise ValueError("PEFT is not installed. Please install it with `pip install peft`") is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) > version.parse(min_version) if not is_peft_version_compatible: raise ValueError( f"The version of PEFT you are using is not compatible, please use a version that is greater" f" than {min_version}" )
diffusers/src/diffusers/utils/peft_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/peft_utils.py", "repo_id": "diffusers", "token_count": 4356 }
121
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from parameterized import parameterized from diffusers import ( AsymmetricAutoencoderKL, AutoencoderKL, AutoencoderKLTemporalDecoder, AutoencoderTiny, ConsistencyDecoderVAE, StableDiffusionPipeline, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.loading_utils import load_image from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, load_hf_numpy, require_torch_accelerator, require_torch_accelerator_with_fp16, require_torch_accelerator_with_training, require_torch_gpu, skip_mps, slow, torch_all_close, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() def get_autoencoder_kl_config(block_out_channels=None, norm_num_groups=None): block_out_channels = block_out_channels or [32, 64] norm_num_groups = norm_num_groups or 32 init_dict = { "block_out_channels": block_out_channels, "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), "up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels), "latent_channels": 4, "norm_num_groups": norm_num_groups, } return init_dict def get_asym_autoencoder_kl_config(block_out_channels=None, norm_num_groups=None): block_out_channels = block_out_channels or [32, 64] norm_num_groups = norm_num_groups or 32 init_dict = { "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), "down_block_out_channels": block_out_channels, "layers_per_down_block": 1, "up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels), "up_block_out_channels": block_out_channels, "layers_per_up_block": 1, "act_fn": "silu", "latent_channels": 4, "norm_num_groups": norm_num_groups, "sample_size": 32, "scaling_factor": 0.18215, } return init_dict def get_autoencoder_tiny_config(block_out_channels=None): block_out_channels = (len(block_out_channels) * [32]) if block_out_channels is not None else [32, 32] init_dict = { "in_channels": 3, "out_channels": 3, "encoder_block_out_channels": block_out_channels, "decoder_block_out_channels": block_out_channels, "num_encoder_blocks": [b // min(block_out_channels) for b in block_out_channels], "num_decoder_blocks": [b // min(block_out_channels) for b in reversed(block_out_channels)], } return init_dict def get_consistency_vae_config(block_out_channels=None, norm_num_groups=None): block_out_channels = block_out_channels or [32, 64] norm_num_groups = norm_num_groups or 32 return { "encoder_block_out_channels": block_out_channels, "encoder_in_channels": 3, "encoder_out_channels": 4, "encoder_down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), "decoder_add_attention": False, "decoder_block_out_channels": block_out_channels, "decoder_down_block_types": ["ResnetDownsampleBlock2D"] * len(block_out_channels), "decoder_downsample_padding": 1, "decoder_in_channels": 7, "decoder_layers_per_block": 1, "decoder_norm_eps": 1e-05, "decoder_norm_num_groups": norm_num_groups, "encoder_norm_num_groups": norm_num_groups, "decoder_num_train_timesteps": 1024, "decoder_out_channels": 6, "decoder_resnet_time_scale_shift": "scale_shift", "decoder_time_embedding_type": "learned", "decoder_up_block_types": ["ResnetUpsampleBlock2D"] * len(block_out_channels), "scaling_factor": 1, "latent_channels": 4, } class AutoencoderKLTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = AutoencoderKL main_input_name = "sample" base_precision = 1e-2 @property def dummy_input(self): batch_size = 4 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = get_autoencoder_kl_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def test_forward_signature(self): pass def test_training(self): pass @require_torch_accelerator_with_training def test_gradient_checkpointing(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) assert not model.is_gradient_checkpointing and model.training out = model(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() labels = torch.randn_like(out) loss = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing model_2 = self.model_class(**init_dict) # clone model model_2.load_state_dict(model.state_dict()) model_2.to(torch_device) model_2.enable_gradient_checkpointing() assert model_2.is_gradient_checkpointing and model_2.training out_2 = model_2(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_2.zero_grad() loss_2 = (out_2 - labels).mean() loss_2.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_2).abs() < 1e-5) named_params = dict(model.named_parameters()) named_params_2 = dict(model_2.named_parameters()) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=5e-5)) def test_from_pretrained_hub(self): model, loading_info = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy", output_loading_info=True) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) image = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def test_output_pretrained(self): model = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy") model = model.to(torch_device) model.eval() # Keep generator on CPU for non-CUDA devices to compare outputs with CPU result tensors generator_device = "cpu" if not torch_device.startswith("cuda") else "cuda" if torch_device != "mps": generator = torch.Generator(device=generator_device).manual_seed(0) else: generator = torch.manual_seed(0) image = torch.randn( 1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0), ) image = image.to(torch_device) with torch.no_grad(): output = model(image, sample_posterior=True, generator=generator).sample output_slice = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": expected_output_slice = torch.tensor( [ -4.0078e-01, -3.8323e-04, -1.2681e-01, -1.1462e-01, 2.0095e-01, 1.0893e-01, -8.8247e-02, -3.0361e-01, -9.8644e-03, ] ) elif generator_device == "cpu": expected_output_slice = torch.tensor( [ -0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026, ] ) else: expected_output_slice = torch.tensor( [ -0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485, ] ) self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) class AsymmetricAutoencoderKLTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = AsymmetricAutoencoderKL main_input_name = "sample" base_precision = 1e-2 @property def dummy_input(self): batch_size = 4 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) mask = torch.ones((batch_size, 1) + sizes).to(torch_device) return {"sample": image, "mask": mask} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = get_asym_autoencoder_kl_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def test_forward_signature(self): pass def test_forward_with_norm_groups(self): pass class AutoencoderTinyTests(ModelTesterMixin, unittest.TestCase): model_class = AutoencoderTiny main_input_name = "sample" base_precision = 1e-2 @property def dummy_input(self): batch_size = 4 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = get_autoencoder_tiny_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def test_outputs_equivalence(self): pass class ConsistencyDecoderVAETests(ModelTesterMixin, unittest.TestCase): model_class = ConsistencyDecoderVAE main_input_name = "sample" base_precision = 1e-2 forward_requires_fresh_args = True def inputs_dict(self, seed=None): generator = torch.Generator("cpu") if seed is not None: generator.manual_seed(0) image = randn_tensor((4, 3, 32, 32), generator=generator, device=torch.device(torch_device)) return {"sample": image, "generator": generator} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) @property def init_dict(self): return get_consistency_vae_config() def prepare_init_args_and_inputs_for_common(self): return self.init_dict, self.inputs_dict() @unittest.skip def test_training(self): ... @unittest.skip def test_ema_training(self): ... class AutoncoderKLTemporalDecoderFastTests(ModelTesterMixin, unittest.TestCase): model_class = AutoencoderKLTemporalDecoder main_input_name = "sample" base_precision = 1e-2 @property def dummy_input(self): batch_size = 3 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) num_frames = 3 return {"sample": image, "num_frames": num_frames} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "latent_channels": 4, "layers_per_block": 2, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_forward_signature(self): pass def test_training(self): pass @unittest.skipIf(torch_device == "mps", "Gradient checkpointing skipped on MPS") def test_gradient_checkpointing(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) assert not model.is_gradient_checkpointing and model.training out = model(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() labels = torch.randn_like(out) loss = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing model_2 = self.model_class(**init_dict) # clone model model_2.load_state_dict(model.state_dict()) model_2.to(torch_device) model_2.enable_gradient_checkpointing() assert model_2.is_gradient_checkpointing and model_2.training out_2 = model_2(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_2.zero_grad() loss_2 = (out_2 - labels).mean() loss_2.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_2).abs() < 1e-5) named_params = dict(model.named_parameters()) named_params_2 = dict(model_2.named_parameters()) for name, param in named_params.items(): if "post_quant_conv" in name: continue self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=5e-5)) @slow class AutoencoderTinyIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): dtype = torch.float16 if fp16 else torch.float32 image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) return image def get_sd_vae_model(self, model_id="hf-internal-testing/taesd-diffusers", fp16=False): torch_dtype = torch.float16 if fp16 else torch.float32 model = AutoencoderTiny.from_pretrained(model_id, torch_dtype=torch_dtype) model.to(torch_device).eval() return model @parameterized.expand( [ [(1, 4, 73, 97), (1, 3, 584, 776)], [(1, 4, 97, 73), (1, 3, 776, 584)], [(1, 4, 49, 65), (1, 3, 392, 520)], [(1, 4, 65, 49), (1, 3, 520, 392)], [(1, 4, 49, 49), (1, 3, 392, 392)], ] ) def test_tae_tiling(self, in_shape, out_shape): model = self.get_sd_vae_model() model.enable_tiling() with torch.no_grad(): zeros = torch.zeros(in_shape).to(torch_device) dec = model.decode(zeros).sample assert dec.shape == out_shape def test_stable_diffusion(self): model = self.get_sd_vae_model() image = self.get_sd_image(seed=33) with torch.no_grad(): sample = model(image).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor([0.0093, 0.6385, -0.1274, 0.1631, -0.1762, 0.5232, -0.3108, -0.0382]) assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) @parameterized.expand([(True,), (False,)]) def test_tae_roundtrip(self, enable_tiling): # load the autoencoder model = self.get_sd_vae_model() if enable_tiling: model.enable_tiling() # make a black image with a white square in the middle, # which is large enough to split across multiple tiles image = -torch.ones(1, 3, 1024, 1024, device=torch_device) image[..., 256:768, 256:768] = 1.0 # round-trip the image through the autoencoder with torch.no_grad(): sample = model(image).sample # the autoencoder reconstruction should match original image, sorta def downscale(x): return torch.nn.functional.avg_pool2d(x, model.spatial_scale_factor) assert torch_all_close(downscale(sample), downscale(image), atol=0.125) @slow class AutoencoderKLIntegrationTests(unittest.TestCase): def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): dtype = torch.float16 if fp16 else torch.float32 image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) return image def get_sd_vae_model(self, model_id="CompVis/stable-diffusion-v1-4", fp16=False): revision = "fp16" if fp16 else None torch_dtype = torch.float16 if fp16 else torch.float32 model = AutoencoderKL.from_pretrained( model_id, subfolder="vae", torch_dtype=torch_dtype, revision=revision, ) model.to(torch_device) return model def get_generator(self, seed=0): generator_device = "cpu" if not torch_device.startswith("cuda") else "cuda" if torch_device != "mps": return torch.Generator(device=generator_device).manual_seed(seed) return torch.manual_seed(seed) @parameterized.expand( [ # fmt: off [ 33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824], ], [ 47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131], ], # fmt: on ] ) def test_stable_diffusion(self, seed, expected_slice, expected_slice_mps): model = self.get_sd_vae_model() image = self.get_sd_image(seed) generator = self.get_generator(seed) with torch.no_grad(): sample = model(image, generator=generator, sample_posterior=True).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_stable_diffusion_fp16(self, seed, expected_slice): model = self.get_sd_vae_model(fp16=True) image = self.get_sd_image(seed, fp16=True) generator = self.get_generator(seed) with torch.no_grad(): sample = model(image, generator=generator, sample_posterior=True).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-2) @parameterized.expand( [ # fmt: off [ 33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824], ], [ 47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131], ], # fmt: on ] ) def test_stable_diffusion_mode(self, seed, expected_slice, expected_slice_mps): model = self.get_sd_vae_model() image = self.get_sd_image(seed) with torch.no_grad(): sample = model(image).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_accelerator @skip_mps def test_stable_diffusion_decode(self, seed, expected_slice): model = self.get_sd_vae_model() encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) with torch.no_grad(): sample = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] output_slice = sample[-1, -2:, :2, -2:].flatten().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-3) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_stable_diffusion_decode_fp16(self, seed, expected_slice): model = self.get_sd_vae_model(fp16=True) encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64), fp16=True) with torch.no_grad(): sample = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) @parameterized.expand([(13,), (16,), (27,)]) @require_torch_gpu @unittest.skipIf( not is_xformers_available(), reason="xformers is not required when using PyTorch 2.0.", ) def test_stable_diffusion_decode_xformers_vs_2_0_fp16(self, seed): model = self.get_sd_vae_model(fp16=True) encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64), fp16=True) with torch.no_grad(): sample = model.decode(encoding).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): sample_2 = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] assert torch_all_close(sample, sample_2, atol=1e-1) @parameterized.expand([(13,), (16,), (37,)]) @require_torch_gpu @unittest.skipIf( not is_xformers_available(), reason="xformers is not required when using PyTorch 2.0.", ) def test_stable_diffusion_decode_xformers_vs_2_0(self, seed): model = self.get_sd_vae_model() encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) with torch.no_grad(): sample = model.decode(encoding).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): sample_2 = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] assert torch_all_close(sample, sample_2, atol=1e-2) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def test_stable_diffusion_encode_sample(self, seed, expected_slice): model = self.get_sd_vae_model() image = self.get_sd_image(seed) generator = self.get_generator(seed) with torch.no_grad(): dist = model.encode(image).latent_dist sample = dist.sample(generator=generator) assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] output_slice = sample[0, -1, -3:, -3:].flatten().cpu() expected_output_slice = torch.tensor(expected_slice) tolerance = 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(output_slice, expected_output_slice, atol=tolerance) def test_stable_diffusion_model_local(self): model_id = "stabilityai/sd-vae-ft-mse" model_1 = AutoencoderKL.from_pretrained(model_id).to(torch_device) url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors" model_2 = AutoencoderKL.from_single_file(url).to(torch_device) image = self.get_sd_image(33) with torch.no_grad(): sample_1 = model_1(image).sample sample_2 = model_2(image).sample assert sample_1.shape == sample_2.shape output_slice_1 = sample_1[-1, -2:, -2:, :2].flatten().float().cpu() output_slice_2 = sample_2[-1, -2:, -2:, :2].flatten().float().cpu() assert torch_all_close(output_slice_1, output_slice_2, atol=3e-3) @slow class AsymmetricAutoencoderKLIntegrationTests(unittest.TestCase): def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): dtype = torch.float16 if fp16 else torch.float32 image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) return image def get_sd_vae_model(self, model_id="cross-attention/asymmetric-autoencoder-kl-x-1-5", fp16=False): revision = "main" torch_dtype = torch.float32 model = AsymmetricAutoencoderKL.from_pretrained( model_id, torch_dtype=torch_dtype, revision=revision, ) model.to(torch_device).eval() return model def get_generator(self, seed=0): generator_device = "cpu" if not torch_device.startswith("cuda") else "cuda" if torch_device != "mps": return torch.Generator(device=generator_device).manual_seed(seed) return torch.manual_seed(seed) @parameterized.expand( [ # fmt: off [ 33, [-0.0344, 0.2912, 0.1687, -0.0137, -0.3462, 0.3552, -0.1337, 0.1078], [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], ], [ 47, [0.4400, 0.0543, 0.2873, 0.2946, 0.0553, 0.0839, -0.1585, 0.2529], [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], ], # fmt: on ] ) def test_stable_diffusion(self, seed, expected_slice, expected_slice_mps): model = self.get_sd_vae_model() image = self.get_sd_image(seed) generator = self.get_generator(seed) with torch.no_grad(): sample = model(image, generator=generator, sample_posterior=True).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) @parameterized.expand( [ # fmt: off [ 33, [-0.0340, 0.2870, 0.1698, -0.0105, -0.3448, 0.3529, -0.1321, 0.1097], [-0.0344, 0.2912, 0.1687, -0.0137, -0.3462, 0.3552, -0.1337, 0.1078], ], [ 47, [0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531], [0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531], ], # fmt: on ] ) def test_stable_diffusion_mode(self, seed, expected_slice, expected_slice_mps): model = self.get_sd_vae_model() image = self.get_sd_image(seed) with torch.no_grad(): sample = model(image).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) @parameterized.expand( [ # fmt: off [13, [-0.0521, -0.2939, 0.1540, -0.1855, -0.5936, -0.3138, -0.4579, -0.2275]], [37, [-0.1820, -0.4345, -0.0455, -0.2923, -0.8035, -0.5089, -0.4795, -0.3106]], # fmt: on ] ) @require_torch_accelerator @skip_mps def test_stable_diffusion_decode(self, seed, expected_slice): model = self.get_sd_vae_model() encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) with torch.no_grad(): sample = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] output_slice = sample[-1, -2:, :2, -2:].flatten().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=2e-3) @parameterized.expand([(13,), (16,), (37,)]) @require_torch_gpu @unittest.skipIf( not is_xformers_available(), reason="xformers is not required when using PyTorch 2.0.", ) def test_stable_diffusion_decode_xformers_vs_2_0(self, seed): model = self.get_sd_vae_model() encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) with torch.no_grad(): sample = model.decode(encoding).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): sample_2 = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] assert torch_all_close(sample, sample_2, atol=5e-2) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def test_stable_diffusion_encode_sample(self, seed, expected_slice): model = self.get_sd_vae_model() image = self.get_sd_image(seed) generator = self.get_generator(seed) with torch.no_grad(): dist = model.encode(image).latent_dist sample = dist.sample(generator=generator) assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] output_slice = sample[0, -1, -3:, -3:].flatten().cpu() expected_output_slice = torch.tensor(expected_slice) tolerance = 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(output_slice, expected_output_slice, atol=tolerance) @slow class ConsistencyDecoderVAEIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @torch.no_grad() def test_encode_decode(self): vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder") # TODO - update vae.to(torch_device) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ).resize((256, 256)) image = torch.from_numpy(np.array(image).transpose(2, 0, 1).astype(np.float32) / 127.5 - 1)[ None, :, :, : ].cuda() latent = vae.encode(image).latent_dist.mean sample = vae.decode(latent, generator=torch.Generator("cpu").manual_seed(0)).sample actual_output = sample[0, :2, :2, :2].flatten().cpu() expected_output = torch.tensor([-0.0141, -0.0014, 0.0115, 0.0086, 0.1051, 0.1053, 0.1031, 0.1024]) assert torch_all_close(actual_output, expected_output, atol=5e-3) def test_sd(self): vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder") # TODO - update pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", vae=vae, safety_checker=None) pipe.to(torch_device) out = pipe( "horse", num_inference_steps=2, output_type="pt", generator=torch.Generator("cpu").manual_seed(0), ).images[0] actual_output = out[:2, :2, :2].flatten().cpu() expected_output = torch.tensor([0.7686, 0.8228, 0.6489, 0.7455, 0.8661, 0.8797, 0.8241, 0.8759]) assert torch_all_close(actual_output, expected_output, atol=5e-3) def test_encode_decode_f16(self): vae = ConsistencyDecoderVAE.from_pretrained( "openai/consistency-decoder", torch_dtype=torch.float16 ) # TODO - update vae.to(torch_device) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ).resize((256, 256)) image = ( torch.from_numpy(np.array(image).transpose(2, 0, 1).astype(np.float32) / 127.5 - 1)[None, :, :, :] .half() .cuda() ) latent = vae.encode(image).latent_dist.mean sample = vae.decode(latent, generator=torch.Generator("cpu").manual_seed(0)).sample actual_output = sample[0, :2, :2, :2].flatten().cpu() expected_output = torch.tensor( [-0.0111, -0.0125, -0.0017, -0.0007, 0.1257, 0.1465, 0.1450, 0.1471], dtype=torch.float16, ) assert torch_all_close(actual_output, expected_output, atol=5e-3) def test_sd_f16(self): vae = ConsistencyDecoderVAE.from_pretrained( "openai/consistency-decoder", torch_dtype=torch.float16 ) # TODO - update pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, vae=vae, safety_checker=None, ) pipe.to(torch_device) out = pipe( "horse", num_inference_steps=2, output_type="pt", generator=torch.Generator("cpu").manual_seed(0), ).images[0] actual_output = out[:2, :2, :2].flatten().cpu() expected_output = torch.tensor( [0.0000, 0.0249, 0.0000, 0.0000, 0.1709, 0.2773, 0.0471, 0.1035], dtype=torch.float16, ) assert torch_all_close(actual_output, expected_output, atol=5e-3)
diffusers/tests/models/autoencoders/test_models_vae.py/0
{ "file_path": "diffusers/tests/models/autoencoders/test_models_vae.py", "repo_id": "diffusers", "token_count": 18283 }
122
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import tempfile import unittest import numpy as np import torch from diffusers import MotionAdapter, UNet2DConditionModel, UNetMotionModel from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin logger = logging.get_logger(__name__) enable_full_determinism() class UNetMotionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNetMotionModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_channels = 4 num_frames = 8 sizes = (32, 32) noise = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) encoder_hidden_states = floats_tensor((batch_size, 4, 32)).to(torch_device) return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states} @property def input_shape(self): return (4, 8, 32, 32) @property def output_shape(self): return (4, 8, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (32, 64), "down_block_types": ("CrossAttnDownBlockMotion", "DownBlockMotion"), "up_block_types": ("UpBlockMotion", "CrossAttnUpBlockMotion"), "cross_attention_dim": 32, "num_attention_heads": 4, "out_channels": 4, "in_channels": 4, "layers_per_block": 1, "sample_size": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_from_unet2d(self): torch.manual_seed(0) unet2d = UNet2DConditionModel() torch.manual_seed(1) model = self.model_class.from_unet2d(unet2d) model_state_dict = model.state_dict() for param_name, param_value in unet2d.named_parameters(): self.assertTrue(torch.equal(model_state_dict[param_name], param_value)) def test_freeze_unet2d(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.freeze_unet2d_params() for param_name, param_value in model.named_parameters(): if "motion_modules" not in param_name: self.assertFalse(param_value.requires_grad) else: self.assertTrue(param_value.requires_grad) def test_loading_motion_adapter(self): model = self.model_class() adapter = MotionAdapter() model.load_motion_modules(adapter) for idx, down_block in enumerate(model.down_blocks): adapter_state_dict = adapter.down_blocks[idx].motion_modules.state_dict() for param_name, param_value in down_block.motion_modules.named_parameters(): self.assertTrue(torch.equal(adapter_state_dict[param_name], param_value)) for idx, up_block in enumerate(model.up_blocks): adapter_state_dict = adapter.up_blocks[idx].motion_modules.state_dict() for param_name, param_value in up_block.motion_modules.named_parameters(): self.assertTrue(torch.equal(adapter_state_dict[param_name], param_value)) mid_block_adapter_state_dict = adapter.mid_block.motion_modules.state_dict() for param_name, param_value in model.mid_block.motion_modules.named_parameters(): self.assertTrue(torch.equal(mid_block_adapter_state_dict[param_name], param_value)) def test_saving_motion_modules(self): torch.manual_seed(0) init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: model.save_motion_modules(tmpdirname) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "diffusion_pytorch_model.safetensors"))) adapter_loaded = MotionAdapter.from_pretrained(tmpdirname) torch.manual_seed(0) model_loaded = self.model_class(**init_dict) model_loaded.load_motion_modules(adapter_loaded) model_loaded.to(torch_device) with torch.no_grad(): output = model(**inputs_dict)[0] output_loaded = model_loaded(**inputs_dict)[0] max_diff = (output - output_loaded).abs().max().item() self.assertLessEqual(max_diff, 1e-4, "Models give different forward passes") @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_enable_works(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.enable_xformers_memory_efficient_attention() assert ( model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__ == "XFormersAttnProcessor" ), "xformers is not enabled" def test_gradient_checkpointing_is_applied(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model_class_copy = copy.copy(self.model_class) modules_with_gc_enabled = {} # now monkey patch the following function: # def _set_gradient_checkpointing(self, module, value=False): # if hasattr(module, "gradient_checkpointing"): # module.gradient_checkpointing = value def _set_gradient_checkpointing_new(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value modules_with_gc_enabled[module.__class__.__name__] = True model_class_copy._set_gradient_checkpointing = _set_gradient_checkpointing_new model = model_class_copy(**init_dict) model.enable_gradient_checkpointing() EXPECTED_SET = { "CrossAttnUpBlockMotion", "CrossAttnDownBlockMotion", "UNetMidBlockCrossAttnMotion", "UpBlockMotion", "Transformer2DModel", "DownBlockMotion", } assert set(modules_with_gc_enabled.keys()) == EXPECTED_SET assert all(modules_with_gc_enabled.values()), "All modules should be enabled" def test_feed_forward_chunking(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 32 model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict)[0] model.enable_forward_chunking() with torch.no_grad(): output_2 = model(**inputs_dict)[0] self.assertEqual(output.shape, output_2.shape, "Shape doesn't match") assert np.abs(output.cpu() - output_2.cpu()).max() < 1e-2 def test_pickle(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) with torch.no_grad(): sample = model(**inputs_dict).sample sample_copy = copy.copy(sample) assert (sample - sample_copy).abs().max() < 1e-4 def test_from_save_pretrained(self, expected_max_diff=5e-5): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) torch.manual_seed(0) new_model = self.model_class.from_pretrained(tmpdirname) new_model.to(torch_device) with torch.no_grad(): image = model(**inputs_dict) if isinstance(image, dict): image = image.to_tuple()[0] new_image = new_model(**inputs_dict) if isinstance(new_image, dict): new_image = new_image.to_tuple()[0] max_diff = (image - new_image).abs().max().item() self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") def test_from_save_pretrained_variant(self, expected_max_diff=5e-5): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, variant="fp16", safe_serialization=False) torch.manual_seed(0) new_model = self.model_class.from_pretrained(tmpdirname, variant="fp16") # non-variant cannot be loaded with self.assertRaises(OSError) as error_context: self.model_class.from_pretrained(tmpdirname) # make sure that error message states what keys are missing assert "Error no file named diffusion_pytorch_model.bin found in directory" in str(error_context.exception) new_model.to(torch_device) with torch.no_grad(): image = model(**inputs_dict) if isinstance(image, dict): image = image.to_tuple()[0] new_image = new_model(**inputs_dict) if isinstance(new_image, dict): new_image = new_image.to_tuple()[0] max_diff = (image - new_image).abs().max().item() self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") def test_forward_with_norm_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 16 init_dict["block_out_channels"] = (16, 32) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
diffusers/tests/models/unets/test_models_unet_motion.py/0
{ "file_path": "diffusers/tests/models/unets/test_models_unet_motion.py", "repo_id": "diffusers", "token_count": 4947 }
123
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import AmusedPipeline, AmusedScheduler, UVit2DModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class AmusedPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AmusedPipeline params = TEXT_TO_IMAGE_PARAMS | {"encoder_hidden_states", "negative_encoder_hidden_states"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) transformer = UVit2DModel( hidden_size=32, use_bias=False, hidden_dropout=0.0, cond_embed_dim=32, micro_cond_encode_dim=2, micro_cond_embed_dim=10, encoder_hidden_size=32, vocab_size=32, codebook_size=32, in_channels=32, block_out_channels=32, num_res_blocks=1, downsample=True, upsample=True, block_num_heads=1, num_hidden_layers=1, num_attention_heads=1, attention_dropout=0.0, intermediate_size=32, layer_norm_eps=1e-06, ln_elementwise_affine=True, ) scheduler = AmusedScheduler(mask_token_id=31) torch.manual_seed(0) vqvae = VQModel( act_fn="silu", block_out_channels=[32], down_block_types=[ "DownEncoderBlock2D", ], in_channels=3, latent_channels=32, layers_per_block=2, norm_num_groups=32, num_vq_embeddings=32, out_channels=3, sample_size=32, up_block_types=[ "UpDecoderBlock2D", ], mid_block_add_attention=False, lookup_from_codebook=True, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=64, layer_norm_eps=1e-05, num_attention_heads=8, num_hidden_layers=3, pad_token_id=1, vocab_size=1000, projection_dim=32, ) text_encoder = CLIPTextModelWithProjection(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "transformer": transformer, "scheduler": scheduler, "vqvae": vqvae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "np", "height": 4, "width": 4, } return inputs def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") def test_inference_batch_single_identical(self): ... @slow @require_torch_gpu class AmusedPipelineSlowTests(unittest.TestCase): def test_amused_256(self): pipe = AmusedPipeline.from_pretrained("amused/amused-256") pipe.to(torch_device) image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.4011, 0.3992, 0.3790, 0.3856, 0.3772, 0.3711, 0.3919, 0.3850, 0.3625]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_amused_256_fp16(self): pipe = AmusedPipeline.from_pretrained("amused/amused-256", variant="fp16", torch_dtype=torch.float16) pipe.to(torch_device) image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.0554, 0.05129, 0.0344, 0.0452, 0.0476, 0.0271, 0.0495, 0.0527, 0.0158]) assert np.abs(image_slice - expected_slice).max() < 7e-3 def test_amused_512(self): pipe = AmusedPipeline.from_pretrained("amused/amused-512") pipe.to(torch_device) image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.9960, 0.9960, 0.9946, 0.9980, 0.9947, 0.9932, 0.9960, 0.9961, 0.9947]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_amused_512_fp16(self): pipe = AmusedPipeline.from_pretrained("amused/amused-512", variant="fp16", torch_dtype=torch.float16) pipe.to(torch_device) image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.9983, 1.0, 1.0, 1.0, 1.0, 0.9989, 0.9994, 0.9976, 0.9977]) assert np.abs(image_slice - expected_slice).max() < 3e-3
diffusers/tests/pipelines/amused/test_amused.py/0
{ "file_path": "diffusers/tests/pipelines/amused/test_amused.py", "repo_id": "diffusers", "token_count": 3131 }
124
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyV22Pipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyV22PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Pipeline params = [ "image_embeds", "negative_image_embeds", ] batch_params = ["image_embeds", "negative_image_embeds"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] callback_cfg_params = ["image_embds"] test_xformers_attention = False def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) @slow @require_torch_gpu class KandinskyV22PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_text2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" ) pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22Pipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) prompt = "red cat, 4k photo" generator = torch.Generator(device="cuda").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() generator = torch.Generator(device="cuda").manual_seed(0) output = pipeline( image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(image, expected_image)
diffusers/tests/pipelines/kandinsky2_2/test_kandinsky.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky2_2/test_kandinsky.py", "repo_id": "diffusers", "token_count": 3974 }
125