text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
cargo build --target wasm32-unknown-unknown --release wasm-bindgen ../../target/wasm32-unknown-unknown/release/m.wasm --out-dir build --target web wasm-bindgen ../../target/wasm32-unknown-unknown/release/m-quantized.wasm --out-dir build --target web
candle/candle-wasm-examples/t5/build-lib.sh/0
{ "file_path": "candle/candle-wasm-examples/t5/build-lib.sh", "repo_id": "candle", "token_count": 84 }
38
use yew_agent::PublicWorker; fn main() { candle_wasm_example_whisper::Worker::register(); }
candle/candle-wasm-examples/whisper/src/bin/worker.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/bin/worker.rs", "repo_id": "candle", "token_count": 38 }
39
use candle::{DType, IndexOp, Result, Tensor, D}; use candle_nn::{ batch_norm, conv2d, conv2d_no_bias, BatchNorm, Conv2d, Conv2dConfig, Module, VarBuilder, }; use image::DynamicImage; // Model architecture from https://github.com/ultralytics/ultralytics/issues/189 // https://github.com/tinygrad/tinygrad/blob/master/examples/yolov8.py #[derive(Clone, Copy, PartialEq, Debug)] pub struct Multiples { depth: f64, width: f64, ratio: f64, } impl Multiples { pub fn n() -> Self { Self { depth: 0.33, width: 0.25, ratio: 2.0, } } pub fn s() -> Self { Self { depth: 0.33, width: 0.50, ratio: 2.0, } } pub fn m() -> Self { Self { depth: 0.67, width: 0.75, ratio: 1.5, } } pub fn l() -> Self { Self { depth: 1.00, width: 1.00, ratio: 1.0, } } pub fn x() -> Self { Self { depth: 1.00, width: 1.25, ratio: 1.0, } } fn filters(&self) -> (usize, usize, usize) { let f1 = (256. * self.width) as usize; let f2 = (512. * self.width) as usize; let f3 = (512. * self.width * self.ratio) as usize; (f1, f2, f3) } } #[derive(Debug)] struct Upsample { scale_factor: usize, } impl Upsample { fn new(scale_factor: usize) -> Result<Self> { Ok(Upsample { scale_factor }) } } impl Module for Upsample { fn forward(&self, xs: &Tensor) -> candle::Result<Tensor> { let (_b_size, _channels, h, w) = xs.dims4()?; xs.upsample_nearest2d(self.scale_factor * h, self.scale_factor * w) } } #[derive(Debug)] struct ConvBlock { conv: Conv2d, bn: BatchNorm, } impl ConvBlock { fn load( vb: VarBuilder, c1: usize, c2: usize, k: usize, stride: usize, padding: Option<usize>, ) -> Result<Self> { let padding = padding.unwrap_or(k / 2); let cfg = Conv2dConfig { padding, stride, groups: 1, dilation: 1, }; let conv = conv2d_no_bias(c1, c2, k, cfg, vb.pp("conv"))?; let bn = batch_norm(c2, 1e-3, vb.pp("bn"))?; Ok(Self { conv, bn }) } } impl Module for ConvBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.conv.forward(xs)?.apply_t(&self.bn, false)?; candle_nn::ops::silu(&xs) } } #[derive(Debug)] struct Bottleneck { cv1: ConvBlock, cv2: ConvBlock, residual: bool, } impl Bottleneck { fn load(vb: VarBuilder, c1: usize, c2: usize, shortcut: bool) -> Result<Self> { let channel_factor = 1.; let c_ = (c2 as f64 * channel_factor) as usize; let cv1 = ConvBlock::load(vb.pp("cv1"), c1, c_, 3, 1, None)?; let cv2 = ConvBlock::load(vb.pp("cv2"), c_, c2, 3, 1, None)?; let residual = c1 == c2 && shortcut; Ok(Self { cv1, cv2, residual }) } } impl Module for Bottleneck { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let ys = self.cv2.forward(&self.cv1.forward(xs)?)?; if self.residual { xs + ys } else { Ok(ys) } } } #[derive(Debug)] struct C2f { cv1: ConvBlock, cv2: ConvBlock, bottleneck: Vec<Bottleneck>, } impl C2f { fn load(vb: VarBuilder, c1: usize, c2: usize, n: usize, shortcut: bool) -> Result<Self> { let c = (c2 as f64 * 0.5) as usize; let cv1 = ConvBlock::load(vb.pp("cv1"), c1, 2 * c, 1, 1, None)?; let cv2 = ConvBlock::load(vb.pp("cv2"), (2 + n) * c, c2, 1, 1, None)?; let mut bottleneck = Vec::with_capacity(n); for idx in 0..n { let b = Bottleneck::load(vb.pp(&format!("bottleneck.{idx}")), c, c, shortcut)?; bottleneck.push(b) } Ok(Self { cv1, cv2, bottleneck, }) } } impl Module for C2f { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let ys = self.cv1.forward(xs)?; let mut ys = ys.chunk(2, 1)?; for m in self.bottleneck.iter() { ys.push(m.forward(ys.last().unwrap())?) } let zs = Tensor::cat(ys.as_slice(), 1)?; self.cv2.forward(&zs) } } #[derive(Debug)] struct Sppf { cv1: ConvBlock, cv2: ConvBlock, k: usize, } impl Sppf { fn load(vb: VarBuilder, c1: usize, c2: usize, k: usize) -> Result<Self> { let c_ = c1 / 2; let cv1 = ConvBlock::load(vb.pp("cv1"), c1, c_, 1, 1, None)?; let cv2 = ConvBlock::load(vb.pp("cv2"), c_ * 4, c2, 1, 1, None)?; Ok(Self { cv1, cv2, k }) } } impl Module for Sppf { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_, _, _, _) = xs.dims4()?; let xs = self.cv1.forward(xs)?; let xs2 = xs .pad_with_zeros(2, self.k / 2, self.k / 2)? .pad_with_zeros(3, self.k / 2, self.k / 2)? .max_pool2d_with_stride(self.k, 1)?; let xs3 = xs2 .pad_with_zeros(2, self.k / 2, self.k / 2)? .pad_with_zeros(3, self.k / 2, self.k / 2)? .max_pool2d_with_stride(self.k, 1)?; let xs4 = xs3 .pad_with_zeros(2, self.k / 2, self.k / 2)? .pad_with_zeros(3, self.k / 2, self.k / 2)? .max_pool2d_with_stride(self.k, 1)?; self.cv2.forward(&Tensor::cat(&[&xs, &xs2, &xs3, &xs4], 1)?) } } #[derive(Debug)] struct Dfl { conv: Conv2d, num_classes: usize, } impl Dfl { fn load(vb: VarBuilder, num_classes: usize) -> Result<Self> { let conv = conv2d_no_bias(num_classes, 1, 1, Default::default(), vb.pp("conv"))?; Ok(Self { conv, num_classes }) } } impl Module for Dfl { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b_sz, _channels, anchors) = xs.dims3()?; let xs = xs .reshape((b_sz, 4, self.num_classes, anchors))? .transpose(2, 1)?; let xs = candle_nn::ops::softmax(&xs, 1)?; self.conv.forward(&xs)?.reshape((b_sz, 4, anchors)) } } #[derive(Debug)] struct DarkNet { b1_0: ConvBlock, b1_1: ConvBlock, b2_0: C2f, b2_1: ConvBlock, b2_2: C2f, b3_0: ConvBlock, b3_1: C2f, b4_0: ConvBlock, b4_1: C2f, b5: Sppf, } impl DarkNet { fn load(vb: VarBuilder, m: Multiples) -> Result<Self> { let (w, r, d) = (m.width, m.ratio, m.depth); let b1_0 = ConvBlock::load(vb.pp("b1.0"), 3, (64. * w) as usize, 3, 2, Some(1))?; let b1_1 = ConvBlock::load( vb.pp("b1.1"), (64. * w) as usize, (128. * w) as usize, 3, 2, Some(1), )?; let b2_0 = C2f::load( vb.pp("b2.0"), (128. * w) as usize, (128. * w) as usize, (3. * d).round() as usize, true, )?; let b2_1 = ConvBlock::load( vb.pp("b2.1"), (128. * w) as usize, (256. * w) as usize, 3, 2, Some(1), )?; let b2_2 = C2f::load( vb.pp("b2.2"), (256. * w) as usize, (256. * w) as usize, (6. * d).round() as usize, true, )?; let b3_0 = ConvBlock::load( vb.pp("b3.0"), (256. * w) as usize, (512. * w) as usize, 3, 2, Some(1), )?; let b3_1 = C2f::load( vb.pp("b3.1"), (512. * w) as usize, (512. * w) as usize, (6. * d).round() as usize, true, )?; let b4_0 = ConvBlock::load( vb.pp("b4.0"), (512. * w) as usize, (512. * w * r) as usize, 3, 2, Some(1), )?; let b4_1 = C2f::load( vb.pp("b4.1"), (512. * w * r) as usize, (512. * w * r) as usize, (3. * d).round() as usize, true, )?; let b5 = Sppf::load( vb.pp("b5.0"), (512. * w * r) as usize, (512. * w * r) as usize, 5, )?; Ok(Self { b1_0, b1_1, b2_0, b2_1, b2_2, b3_0, b3_1, b4_0, b4_1, b5, }) } fn forward(&self, xs: &Tensor) -> Result<(Tensor, Tensor, Tensor)> { let x1 = self.b1_1.forward(&self.b1_0.forward(xs)?)?; let x2 = self .b2_2 .forward(&self.b2_1.forward(&self.b2_0.forward(&x1)?)?)?; let x3 = self.b3_1.forward(&self.b3_0.forward(&x2)?)?; let x4 = self.b4_1.forward(&self.b4_0.forward(&x3)?)?; let x5 = self.b5.forward(&x4)?; Ok((x2, x3, x5)) } } #[derive(Debug)] struct YoloV8Neck { up: Upsample, n1: C2f, n2: C2f, n3: ConvBlock, n4: C2f, n5: ConvBlock, n6: C2f, } impl YoloV8Neck { fn load(vb: VarBuilder, m: Multiples) -> Result<Self> { let up = Upsample::new(2)?; let (w, r, d) = (m.width, m.ratio, m.depth); let n = (3. * d).round() as usize; let n1 = C2f::load( vb.pp("n1"), (512. * w * (1. + r)) as usize, (512. * w) as usize, n, false, )?; let n2 = C2f::load( vb.pp("n2"), (768. * w) as usize, (256. * w) as usize, n, false, )?; let n3 = ConvBlock::load( vb.pp("n3"), (256. * w) as usize, (256. * w) as usize, 3, 2, Some(1), )?; let n4 = C2f::load( vb.pp("n4"), (768. * w) as usize, (512. * w) as usize, n, false, )?; let n5 = ConvBlock::load( vb.pp("n5"), (512. * w) as usize, (512. * w) as usize, 3, 2, Some(1), )?; let n6 = C2f::load( vb.pp("n6"), (512. * w * (1. + r)) as usize, (512. * w * r) as usize, n, false, )?; Ok(Self { up, n1, n2, n3, n4, n5, n6, }) } fn forward(&self, p3: &Tensor, p4: &Tensor, p5: &Tensor) -> Result<(Tensor, Tensor, Tensor)> { let x = self .n1 .forward(&Tensor::cat(&[&self.up.forward(p5)?, p4], 1)?)?; let head_1 = self .n2 .forward(&Tensor::cat(&[&self.up.forward(&x)?, p3], 1)?)?; let head_2 = self .n4 .forward(&Tensor::cat(&[&self.n3.forward(&head_1)?, &x], 1)?)?; let head_3 = self .n6 .forward(&Tensor::cat(&[&self.n5.forward(&head_2)?, p5], 1)?)?; Ok((head_1, head_2, head_3)) } } #[derive(Debug)] struct DetectionHead { dfl: Dfl, cv2: [(ConvBlock, ConvBlock, Conv2d); 3], cv3: [(ConvBlock, ConvBlock, Conv2d); 3], ch: usize, no: usize, } #[derive(Debug)] struct PoseHead { detect: DetectionHead, cv4: [(ConvBlock, ConvBlock, Conv2d); 3], kpt: (usize, usize), } fn make_anchors( xs0: &Tensor, xs1: &Tensor, xs2: &Tensor, (s0, s1, s2): (usize, usize, usize), grid_cell_offset: f64, ) -> Result<(Tensor, Tensor)> { let dev = xs0.device(); let mut anchor_points = vec![]; let mut stride_tensor = vec![]; for (xs, stride) in [(xs0, s0), (xs1, s1), (xs2, s2)] { // xs is only used to extract the h and w dimensions. let (_, _, h, w) = xs.dims4()?; let sx = (Tensor::arange(0, w as u32, dev)?.to_dtype(DType::F32)? + grid_cell_offset)?; let sy = (Tensor::arange(0, h as u32, dev)?.to_dtype(DType::F32)? + grid_cell_offset)?; let sx = sx .reshape((1, sx.elem_count()))? .repeat((h, 1))? .flatten_all()?; let sy = sy .reshape((sy.elem_count(), 1))? .repeat((1, w))? .flatten_all()?; anchor_points.push(Tensor::stack(&[&sx, &sy], D::Minus1)?); stride_tensor.push((Tensor::ones(h * w, DType::F32, dev)? * stride as f64)?); } let anchor_points = Tensor::cat(anchor_points.as_slice(), 0)?; let stride_tensor = Tensor::cat(stride_tensor.as_slice(), 0)?.unsqueeze(1)?; Ok((anchor_points, stride_tensor)) } struct DetectionHeadOut { pred: Tensor, anchors: Tensor, strides: Tensor, } fn dist2bbox(distance: &Tensor, anchor_points: &Tensor) -> Result<Tensor> { let chunks = distance.chunk(2, 1)?; let lt = &chunks[0]; let rb = &chunks[1]; let x1y1 = anchor_points.sub(lt)?; let x2y2 = anchor_points.add(rb)?; let c_xy = ((&x1y1 + &x2y2)? * 0.5)?; let wh = (&x2y2 - &x1y1)?; Tensor::cat(&[c_xy, wh], 1) } impl DetectionHead { fn load(vb: VarBuilder, nc: usize, filters: (usize, usize, usize)) -> Result<Self> { let ch = 16; let dfl = Dfl::load(vb.pp("dfl"), ch)?; let c1 = usize::max(filters.0, nc); let c2 = usize::max(filters.0 / 4, ch * 4); let cv3 = [ Self::load_cv3(vb.pp("cv3.0"), c1, nc, filters.0)?, Self::load_cv3(vb.pp("cv3.1"), c1, nc, filters.1)?, Self::load_cv3(vb.pp("cv3.2"), c1, nc, filters.2)?, ]; let cv2 = [ Self::load_cv2(vb.pp("cv2.0"), c2, ch, filters.0)?, Self::load_cv2(vb.pp("cv2.1"), c2, ch, filters.1)?, Self::load_cv2(vb.pp("cv2.2"), c2, ch, filters.2)?, ]; let no = nc + ch * 4; Ok(Self { dfl, cv2, cv3, ch, no, }) } fn load_cv3( vb: VarBuilder, c1: usize, nc: usize, filter: usize, ) -> Result<(ConvBlock, ConvBlock, Conv2d)> { let block0 = ConvBlock::load(vb.pp("0"), filter, c1, 3, 1, None)?; let block1 = ConvBlock::load(vb.pp("1"), c1, c1, 3, 1, None)?; let conv = conv2d(c1, nc, 1, Default::default(), vb.pp("2"))?; Ok((block0, block1, conv)) } fn load_cv2( vb: VarBuilder, c2: usize, ch: usize, filter: usize, ) -> Result<(ConvBlock, ConvBlock, Conv2d)> { let block0 = ConvBlock::load(vb.pp("0"), filter, c2, 3, 1, None)?; let block1 = ConvBlock::load(vb.pp("1"), c2, c2, 3, 1, None)?; let conv = conv2d(c2, 4 * ch, 1, Default::default(), vb.pp("2"))?; Ok((block0, block1, conv)) } fn forward(&self, xs0: &Tensor, xs1: &Tensor, xs2: &Tensor) -> Result<DetectionHeadOut> { let forward_cv = |xs, i: usize| { let xs_2 = self.cv2[i].0.forward(xs)?; let xs_2 = self.cv2[i].1.forward(&xs_2)?; let xs_2 = self.cv2[i].2.forward(&xs_2)?; let xs_3 = self.cv3[i].0.forward(xs)?; let xs_3 = self.cv3[i].1.forward(&xs_3)?; let xs_3 = self.cv3[i].2.forward(&xs_3)?; Tensor::cat(&[&xs_2, &xs_3], 1) }; let xs0 = forward_cv(xs0, 0)?; let xs1 = forward_cv(xs1, 1)?; let xs2 = forward_cv(xs2, 2)?; let (anchors, strides) = make_anchors(&xs0, &xs1, &xs2, (8, 16, 32), 0.5)?; let anchors = anchors.transpose(0, 1)?.unsqueeze(0)?; let strides = strides.transpose(0, 1)?; let reshape = |xs: &Tensor| { let d = xs.dim(0)?; let el = xs.elem_count(); xs.reshape((d, self.no, el / (d * self.no))) }; let ys0 = reshape(&xs0)?; let ys1 = reshape(&xs1)?; let ys2 = reshape(&xs2)?; let x_cat = Tensor::cat(&[ys0, ys1, ys2], 2)?; let box_ = x_cat.i((.., ..self.ch * 4))?; let cls = x_cat.i((.., self.ch * 4..))?; let dbox = dist2bbox(&self.dfl.forward(&box_)?, &anchors)?; let dbox = dbox.broadcast_mul(&strides)?; let pred = Tensor::cat(&[dbox, candle_nn::ops::sigmoid(&cls)?], 1)?; Ok(DetectionHeadOut { pred, anchors, strides, }) } } impl PoseHead { // kpt: keypoints, (17, 3) // nc: num-classes, 80 fn load( vb: VarBuilder, nc: usize, kpt: (usize, usize), filters: (usize, usize, usize), ) -> Result<Self> { let detect = DetectionHead::load(vb.clone(), nc, filters)?; let nk = kpt.0 * kpt.1; let c4 = usize::max(filters.0 / 4, nk); let cv4 = [ Self::load_cv4(vb.pp("cv4.0"), c4, nk, filters.0)?, Self::load_cv4(vb.pp("cv4.1"), c4, nk, filters.1)?, Self::load_cv4(vb.pp("cv4.2"), c4, nk, filters.2)?, ]; Ok(Self { detect, cv4, kpt }) } fn load_cv4( vb: VarBuilder, c1: usize, nc: usize, filter: usize, ) -> Result<(ConvBlock, ConvBlock, Conv2d)> { let block0 = ConvBlock::load(vb.pp("0"), filter, c1, 3, 1, None)?; let block1 = ConvBlock::load(vb.pp("1"), c1, c1, 3, 1, None)?; let conv = conv2d(c1, nc, 1, Default::default(), vb.pp("2"))?; Ok((block0, block1, conv)) } fn forward(&self, xs0: &Tensor, xs1: &Tensor, xs2: &Tensor) -> Result<Tensor> { let d = self.detect.forward(xs0, xs1, xs2)?; let forward_cv = |xs: &Tensor, i: usize| { let (b_sz, _, h, w) = xs.dims4()?; let xs = self.cv4[i].0.forward(xs)?; let xs = self.cv4[i].1.forward(&xs)?; let xs = self.cv4[i].2.forward(&xs)?; xs.reshape((b_sz, self.kpt.0 * self.kpt.1, h * w)) }; let xs0 = forward_cv(xs0, 0)?; let xs1 = forward_cv(xs1, 1)?; let xs2 = forward_cv(xs2, 2)?; let xs = Tensor::cat(&[xs0, xs1, xs2], D::Minus1)?; let (b_sz, _nk, hw) = xs.dims3()?; let xs = xs.reshape((b_sz, self.kpt.0, self.kpt.1, hw))?; let ys01 = ((xs.i((.., .., 0..2))? * 2.)?.broadcast_add(&d.anchors)? - 0.5)? .broadcast_mul(&d.strides)?; let ys2 = candle_nn::ops::sigmoid(&xs.i((.., .., 2..3))?)?; let ys = Tensor::cat(&[ys01, ys2], 2)?.flatten(1, 2)?; Tensor::cat(&[d.pred, ys], 1) } } #[derive(Debug)] pub struct YoloV8 { net: DarkNet, fpn: YoloV8Neck, head: DetectionHead, } impl YoloV8 { pub fn load(vb: VarBuilder, m: Multiples, num_classes: usize) -> Result<Self> { let net = DarkNet::load(vb.pp("net"), m)?; let fpn = YoloV8Neck::load(vb.pp("fpn"), m)?; let head = DetectionHead::load(vb.pp("head"), num_classes, m.filters())?; Ok(Self { net, fpn, head }) } } impl Module for YoloV8 { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (xs1, xs2, xs3) = self.net.forward(xs)?; let (xs1, xs2, xs3) = self.fpn.forward(&xs1, &xs2, &xs3)?; Ok(self.head.forward(&xs1, &xs2, &xs3)?.pred) } } #[derive(Debug)] pub struct YoloV8Pose { net: DarkNet, fpn: YoloV8Neck, head: PoseHead, } impl YoloV8Pose { pub fn load( vb: VarBuilder, m: Multiples, num_classes: usize, kpt: (usize, usize), ) -> Result<Self> { let net = DarkNet::load(vb.pp("net"), m)?; let fpn = YoloV8Neck::load(vb.pp("fpn"), m)?; let head = PoseHead::load(vb.pp("head"), num_classes, kpt, m.filters())?; Ok(Self { net, fpn, head }) } } impl Module for YoloV8Pose { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (xs1, xs2, xs3) = self.net.forward(xs)?; let (xs1, xs2, xs3) = self.fpn.forward(&xs1, &xs2, &xs3)?; self.head.forward(&xs1, &xs2, &xs3) } } #[derive(Debug, Clone, Copy, PartialEq, serde::Serialize, serde::Deserialize)] pub struct KeyPoint { pub x: f32, pub y: f32, pub mask: f32, } #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] pub struct Bbox { pub xmin: f32, pub ymin: f32, pub xmax: f32, pub ymax: f32, pub confidence: f32, pub keypoints: Vec<KeyPoint>, } // Intersection over union of two bounding boxes. fn iou(b1: &Bbox, b2: &Bbox) -> f32 { let b1_area = (b1.xmax - b1.xmin + 1.) * (b1.ymax - b1.ymin + 1.); let b2_area = (b2.xmax - b2.xmin + 1.) * (b2.ymax - b2.ymin + 1.); let i_xmin = b1.xmin.max(b2.xmin); let i_xmax = b1.xmax.min(b2.xmax); let i_ymin = b1.ymin.max(b2.ymin); let i_ymax = b1.ymax.min(b2.ymax); let i_area = (i_xmax - i_xmin + 1.).max(0.) * (i_ymax - i_ymin + 1.).max(0.); i_area / (b1_area + b2_area - i_area) } pub fn report_detect( pred: &Tensor, img: DynamicImage, w: usize, h: usize, conf_threshold: f32, iou_threshold: f32, ) -> Result<Vec<Vec<Bbox>>> { let (pred_size, npreds) = pred.dims2()?; let nclasses = pred_size - 4; let conf_threshold = conf_threshold.clamp(0.0, 1.0); let iou_threshold = iou_threshold.clamp(0.0, 1.0); // The bounding boxes grouped by (maximum) class index. let mut bboxes: Vec<Vec<Bbox>> = (0..nclasses).map(|_| vec![]).collect(); // Extract the bounding boxes for which confidence is above the threshold. for index in 0..npreds { let pred = Vec::<f32>::try_from(pred.i((.., index))?)?; let confidence = *pred[4..].iter().max_by(|x, y| x.total_cmp(y)).unwrap(); if confidence > conf_threshold { let mut class_index = 0; for i in 0..nclasses { if pred[4 + i] > pred[4 + class_index] { class_index = i } } if pred[class_index + 4] > 0. { let bbox = Bbox { xmin: pred[0] - pred[2] / 2., ymin: pred[1] - pred[3] / 2., xmax: pred[0] + pred[2] / 2., ymax: pred[1] + pred[3] / 2., confidence, keypoints: vec![], }; bboxes[class_index].push(bbox) } } } non_maximum_suppression(&mut bboxes, iou_threshold); // Annotate the original image and print boxes information. let (initial_h, initial_w) = (img.height() as f32, img.width() as f32); let w_ratio = initial_w / w as f32; let h_ratio = initial_h / h as f32; for (class_index, bboxes_for_class) in bboxes.iter_mut().enumerate() { for b in bboxes_for_class.iter_mut() { crate::console_log!("{}: {:?}", crate::coco_classes::NAMES[class_index], b); b.xmin = (b.xmin * w_ratio).clamp(0., initial_w - 1.); b.ymin = (b.ymin * h_ratio).clamp(0., initial_h - 1.); b.xmax = (b.xmax * w_ratio).clamp(0., initial_w - 1.); b.ymax = (b.ymax * h_ratio).clamp(0., initial_h - 1.); } } Ok(bboxes) } fn non_maximum_suppression(bboxes: &mut [Vec<Bbox>], threshold: f32) { // Perform non-maximum suppression. for bboxes_for_class in bboxes.iter_mut() { bboxes_for_class.sort_by(|b1, b2| b2.confidence.partial_cmp(&b1.confidence).unwrap()); let mut current_index = 0; for index in 0..bboxes_for_class.len() { let mut drop = false; for prev_index in 0..current_index { let iou = iou(&bboxes_for_class[prev_index], &bboxes_for_class[index]); if iou > threshold { drop = true; break; } } if !drop { bboxes_for_class.swap(current_index, index); current_index += 1; } } bboxes_for_class.truncate(current_index); } } pub fn report_pose( pred: &Tensor, img: DynamicImage, w: usize, h: usize, confidence_threshold: f32, nms_threshold: f32, ) -> Result<Vec<Bbox>> { let (pred_size, npreds) = pred.dims2()?; if pred_size != 17 * 3 + 4 + 1 { candle::bail!("unexpected pred-size {pred_size}"); } let mut bboxes = vec![]; // Extract the bounding boxes for which confidence is above the threshold. for index in 0..npreds { let pred = Vec::<f32>::try_from(pred.i((.., index))?)?; let confidence = pred[4]; if confidence > confidence_threshold { let keypoints = (0..17) .map(|i| KeyPoint { x: pred[3 * i + 5], y: pred[3 * i + 6], mask: pred[3 * i + 7], }) .collect::<Vec<_>>(); let bbox = Bbox { xmin: pred[0] - pred[2] / 2., ymin: pred[1] - pred[3] / 2., xmax: pred[0] + pred[2] / 2., ymax: pred[1] + pred[3] / 2., confidence, keypoints, }; bboxes.push(bbox) } } let mut bboxes = vec![bboxes]; non_maximum_suppression(&mut bboxes, nms_threshold); let mut bboxes = bboxes.into_iter().next().unwrap(); let (initial_h, initial_w) = (img.height() as f32, img.width() as f32); let w_ratio = initial_w / w as f32; let h_ratio = initial_h / h as f32; for b in bboxes.iter_mut() { crate::console_log!("detected {b:?}"); b.xmin = (b.xmin * w_ratio).clamp(0., initial_w - 1.); b.ymin = (b.ymin * h_ratio).clamp(0., initial_h - 1.); b.xmax = (b.xmax * w_ratio).clamp(0., initial_w - 1.); b.ymax = (b.ymax * h_ratio).clamp(0., initial_h - 1.); for kp in b.keypoints.iter_mut() { kp.x = (kp.x * w_ratio).clamp(0., initial_w - 1.); kp.y = (kp.y * h_ratio).clamp(0., initial_h - 1.); } } Ok(bboxes) }
candle/candle-wasm-examples/yolo/src/model.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/model.rs", "repo_id": "candle", "token_count": 14731 }
40
{ "useTabs": true, "trailingComma": "es5", "printWidth": 100, "plugins": ["prettier-plugin-svelte", "prettier-plugin-tailwindcss"], "pluginSearchDirs": ["."], "overrides": [{ "files": "*.svelte", "options": { "parser": "svelte" } }] }
chat-ui/.prettierrc/0
{ "file_path": "chat-ui/.prettierrc", "repo_id": "chat-ui", "token_count": 104 }
41
/// <reference types="@sveltejs/kit" /> /// <reference types="unplugin-icons/types/svelte" /> import type { User } from "$lib/types/User"; // See https://kit.svelte.dev/docs/types#app // for information about these interfaces declare global { namespace App { // interface Error {} interface Locals { sessionId: string; user?: User; } // interface PageData {} // interface Platform {} } } export {};
chat-ui/src/app.d.ts/0
{ "file_path": "chat-ui/src/app.d.ts", "repo_id": "chat-ui", "token_count": 145 }
42
<script lang="ts"> import CarbonEarth from "~icons/carbon/earth"; import CarbonArrowUpRight from "~icons/carbon/arrow-up-right"; import type { Model } from "$lib/types/Model"; export let model: Pick<Model, "name" | "datasetName" | "websiteUrl" | "modelUrl" | "datasetUrl">; export let variant: "light" | "dark" = "light"; </script> <div class="flex items-center gap-5 rounded-xl bg-gray-100 px-3 py-2 text-xs sm:text-sm {variant === 'dark' ? 'text-gray-600 dark:bg-gray-800 dark:text-gray-300' : 'text-gray-800 dark:bg-gray-100 dark:text-gray-600'}" > <a href={model.modelUrl || "https://huggingface.co/" + model.name} target="_blank" rel="noreferrer" class="flex items-center hover:underline" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs text-gray-400" /> Model <div class="max-sm:hidden">&nbsp;page</div></a > {#if model.datasetName || model.datasetUrl} <a href={model.datasetUrl || "https://huggingface.co/datasets/" + model.datasetName} target="_blank" rel="noreferrer" class="flex items-center hover:underline" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs text-gray-400" /> Dataset <div class="max-sm:hidden">&nbsp;page</div></a > {/if} {#if model.websiteUrl} <a href={model.websiteUrl} target="_blank" class="ml-auto flex items-center hover:underline" rel="noreferrer" > <CarbonEarth class="mr-1.5 shrink-0 text-xs text-gray-400" /> Website </a> {/if} </div>
chat-ui/src/lib/components/ModelCardMetadata.svelte/0
{ "file_path": "chat-ui/src/lib/components/ModelCardMetadata.svelte", "repo_id": "chat-ui", "token_count": 623 }
43
<script lang="ts"> import { createEventDispatcher } from "svelte"; import IconGear from "~icons/bi/gear-fill"; import { base } from "$app/paths"; import type { Assistant } from "$lib/types/Assistant"; import IconInternet from "../icons/IconInternet.svelte"; export let assistant: Pick< Assistant, | "avatar" | "name" | "rag" | "modelId" | "createdByName" | "exampleInputs" | "_id" | "description" >; const dispatch = createEventDispatcher<{ message: string }>(); $: hasRag = assistant?.rag?.allowAllDomains || (assistant?.rag?.allowedDomains?.length ?? 0) > 0 || (assistant?.rag?.allowedLinks?.length ?? 0) > 0; </script> <div class="flex h-full w-full flex-col content-center items-center justify-center pb-52"> <div class="relative mt-auto rounded-2xl bg-gray-100 text-gray-600 dark:border-gray-800 dark:bg-gray-800/60 dark:text-gray-300" > <div class="mt-3 flex min-w-[80dvw] items-center gap-4 p-4 pr-1 sm:min-w-[440px] md:p-8 md:pt-10 xl:gap-8" > {#if assistant.avatar} <img src={`${base}/settings/assistants/${assistant._id.toString()}/avatar.jpg?hash=${ assistant.avatar }`} alt="avatar" class="size-16 flex-none rounded-full object-cover max-sm:self-start md:size-32" /> {:else} <div class="flex size-12 flex-none items-center justify-center rounded-full bg-gray-300 object-cover text-xl font-bold uppercase text-gray-500 max-sm:self-start sm:text-4xl md:size-32 dark:bg-gray-600" > {assistant?.name[0]} </div> {/if} <div class="flex h-full flex-col gap-2 text-balance"> <p class="-mb-1">Assistant</p> <p class="text-xl font-bold sm:text-2xl">{assistant.name}</p> {#if assistant.description} <p class="line-clamp-6 text-sm text-gray-500 dark:text-gray-400"> {assistant.description} </p> {/if} {#if hasRag} <div class="flex h-5 w-fit items-center gap-1 rounded-full bg-blue-500/10 pl-1 pr-2 text-xs" title="This assistant uses the websearch." > <IconInternet classNames="text-sm text-blue-600" /> Has internet access </div> {/if} {#if assistant.createdByName} <p class="pt-2 text-sm text-gray-400 dark:text-gray-500"> Created by <a class="hover:underline" href="{base}/assistants?user={assistant.createdByName}" > {assistant.createdByName} </a> </p> {/if} </div> </div> <div class="absolute right-3 top-3 md:right-4 md:top-4"> <a href="{base}/settings/assistants/{assistant._id.toString()}" class="flex items-center gap-1.5 rounded-full border bg-white py-1 pl-3 pr-2.5 text-xs text-gray-800 shadow-sm hover:shadow-inner md:text-sm dark:border-gray-700 dark:bg-gray-700 dark:text-gray-300/90 dark:hover:bg-gray-800" ><IconGear class="text-xxs" />Settings</a > </div> </div> {#if assistant.exampleInputs} <div class="mx-auto mt-auto w-full gap-8 sm:-mb-8"> <div class="md:col-span-2 md:mt-6"> <div class="grid grid-cols-1 gap-3 {assistant.exampleInputs.length > 1 ? 'md:grid-cols-2' : ''}" > {#each assistant.exampleInputs as example} <button type="button" class="truncate whitespace-nowrap rounded-xl border bg-gray-50 px-3 py-2 text-left text-smd text-gray-600 hover:bg-gray-100 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-300 dark:hover:bg-gray-700" on:click={() => dispatch("message", example)} > {example} </button> {/each} </div> </div> </div> {/if} </div>
chat-ui/src/lib/components/chat/AssistantIntroduction.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/AssistantIntroduction.svelte", "repo_id": "chat-ui", "token_count": 1629 }
44
import { afterEach, describe, expect, it } from "vitest"; import { migrations } from "./routines"; import { acquireLock, isDBLocked, refreshLock, releaseLock } from "./lock"; import { collections } from "$lib/server/database"; describe("migrations", () => { it("should not have duplicates guid", async () => { const guids = migrations.map((m) => m._id.toString()); const uniqueGuids = [...new Set(guids)]; expect(uniqueGuids.length).toBe(guids.length); }); it("should acquire only one lock on DB", async () => { const results = await Promise.all(new Array(1000).fill(0).map(() => acquireLock())); const locks = results.filter((r) => r); const semaphores = await collections.semaphores.find({}).toArray(); expect(locks.length).toBe(1); expect(semaphores).toBeDefined(); expect(semaphores.length).toBe(1); expect(semaphores?.[0].key).toBe("migrations"); }); it("should read the lock correctly", async () => { expect(await acquireLock()).toBe(true); expect(await isDBLocked()).toBe(true); expect(await acquireLock()).toBe(false); await releaseLock(); expect(await isDBLocked()).toBe(false); }); it("should refresh the lock", async () => { await acquireLock(); // get the updatedAt time const updatedAtInitially = (await collections.semaphores.findOne({}))?.updatedAt; await refreshLock(); const updatedAtAfterRefresh = (await collections.semaphores.findOne({}))?.updatedAt; expect(updatedAtInitially).toBeDefined(); expect(updatedAtAfterRefresh).toBeDefined(); expect(updatedAtInitially).not.toBe(updatedAtAfterRefresh); }); }); afterEach(async () => { await collections.semaphores.deleteMany({}); await collections.migrationResults.deleteMany({}); });
chat-ui/src/lib/migrations/migrations.spec.ts/0
{ "file_path": "chat-ui/src/lib/migrations/migrations.spec.ts", "repo_id": "chat-ui", "token_count": 597 }
45
import { buildPrompt } from "$lib/buildPrompt"; import type { TextGenerationStreamOutput } from "@huggingface/inference"; import type { Endpoint } from "../endpoints"; import { z } from "zod"; export const endpointOllamaParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("ollama"), url: z.string().url().default("http://127.0.0.1:11434"), ollamaName: z.string().min(1).optional(), }); export function endpointOllama(input: z.input<typeof endpointOllamaParametersSchema>): Endpoint { const { url, model, ollamaName } = endpointOllamaParametersSchema.parse(input); return async ({ messages, preprompt, continueMessage }) => { const prompt = await buildPrompt({ messages, continueMessage, preprompt, model, }); const r = await fetch(`${url}/api/generate`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ prompt, model: ollamaName ?? model.name, raw: true, options: { top_p: model.parameters.top_p, top_k: model.parameters.top_k, temperature: model.parameters.temperature, repeat_penalty: model.parameters.repetition_penalty, stop: model.parameters.stop, num_predict: model.parameters.max_new_tokens, }, }), }); if (!r.ok) { throw new Error(`Failed to generate text: ${await r.text()}`); } const encoder = new TextDecoderStream(); const reader = r.body?.pipeThrough(encoder).getReader(); return (async function* () { let generatedText = ""; let tokenId = 0; let stop = false; while (!stop) { // read the stream and log the outputs to console const out = (await reader?.read()) ?? { done: false, value: undefined }; // we read, if it's done we cancel if (out.done) { reader?.cancel(); return; } if (!out.value) { return; } let data = null; try { data = JSON.parse(out.value); } catch (e) { return; } if (!data.done) { generatedText += data.response; yield { token: { id: tokenId++, text: data.response ?? "", logprob: 0, special: false, }, generated_text: null, details: null, } satisfies TextGenerationStreamOutput; } else { stop = true; yield { token: { id: tokenId++, text: data.response ?? "", logprob: 0, special: true, }, generated_text: generatedText, details: null, } satisfies TextGenerationStreamOutput; } } })(); }; } export default endpointOllama;
chat-ui/src/lib/server/endpoints/ollama/endpointOllama.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/ollama/endpointOllama.ts", "repo_id": "chat-ui", "token_count": 1123 }
46
import { JSDOM, VirtualConsole } from "jsdom"; export async function parseWeb(url: string) { const abortController = new AbortController(); setTimeout(() => abortController.abort(), 10000); const r = await fetch(url, { signal: abortController.signal, credentials: "omit" }).catch(); if (r.headers.get("content-type")?.includes("text/html")) { const virtualConsole = new VirtualConsole(); virtualConsole.on("error", () => { // No-op to skip console errors. }); // put the html string into a DOM const dom = new JSDOM((await r.text()) ?? "", { virtualConsole, }); const { document } = dom.window; const paragraphs = document.querySelectorAll("p, table, pre, ul, ol"); if (!paragraphs.length) { throw new Error(`webpage doesn't have any parseable element`); } const paragraphTexts = Array.from(paragraphs).map((p) => p.textContent); // combine text contents from paragraphs and then remove newlines and multiple spaces const text = paragraphTexts.join(" ").replace(/ {2}|\r\n|\n|\r/gm, ""); return text; } else if ( r.headers.get("content-type")?.includes("text/plain") || r.headers.get("content-type")?.includes("text/markdown") ) { return r.text(); } else { throw new Error("Unsupported content type"); } }
chat-ui/src/lib/server/websearch/parseWeb.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/parseWeb.ts", "repo_id": "chat-ui", "token_count": 428 }
47
export interface ConvSidebar { id: string; title: string; updatedAt: Date; model?: string; assistantId?: string; avatarHash?: string; }
chat-ui/src/lib/types/ConvSidebar.ts/0
{ "file_path": "chat-ui/src/lib/types/ConvSidebar.ts", "repo_id": "chat-ui", "token_count": 50 }
48
import type { ObjectId } from "mongodb"; import type { Timestamps } from "./Timestamps"; export interface User extends Timestamps { _id: ObjectId; username?: string; name: string; email?: string; avatarUrl: string | undefined; hfUserId: string; }
chat-ui/src/lib/types/User.ts/0
{ "file_path": "chat-ui/src/lib/types/User.ts", "repo_id": "chat-ui", "token_count": 85 }
49
import type { Model } from "$lib/types/Model"; export const findCurrentModel = (models: Model[], id?: string): Model => models.find((m) => m.id === id) ?? models[0];
chat-ui/src/lib/utils/models.ts/0
{ "file_path": "chat-ui/src/lib/utils/models.ts", "repo_id": "chat-ui", "token_count": 54 }
50
import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; import { describe, expect, it } from "vitest"; import { convertLegacyConversation } from "./convertLegacyConversation"; import { insertLegacyConversation } from "./treeHelpers.spec"; describe("convertLegacyConversation", () => { it("should convert a legacy conversation", async () => { const convId = await insertLegacyConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); const newConv = convertLegacyConversation(conv); expect(newConv.rootMessageId).toBe(newConv.messages[0].id); expect(newConv.messages[0].ancestors).toEqual([]); expect(newConv.messages[1].ancestors).toEqual([newConv.messages[0].id]); expect(newConv.messages[0].children).toEqual([newConv.messages[1].id]); }); it("should work on empty conversations", async () => { const conv = { _id: new ObjectId(), rootMessageId: undefined, messages: [], }; const newConv = convertLegacyConversation(conv); expect(newConv.rootMessageId).toBe(undefined); expect(newConv.messages).toEqual([]); }); });
chat-ui/src/lib/utils/tree/convertLegacyConversation.spec.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/convertLegacyConversation.spec.ts", "repo_id": "chat-ui", "token_count": 425 }
51
<script lang="ts"> import { base } from "$app/paths"; import { clickOutside } from "$lib/actions/clickOutside"; import { afterNavigate, goto } from "$app/navigation"; import { useSettingsStore } from "$lib/stores/settings"; import type { PageData } from "./$types"; import { applyAction, enhance } from "$app/forms"; import { PUBLIC_APP_NAME, PUBLIC_ORIGIN } from "$env/static/public"; import { page } from "$app/stores"; import IconGear from "~icons/bi/gear-fill"; export let data: PageData; let previousPage: string = base; afterNavigate(({ from }) => { if (!from?.url.pathname.includes("settings")) { previousPage = from?.url.toString() || previousPage; } }); const settings = useSettingsStore(); </script> <svelte:head> <meta property="og:title" content={data.assistant.name + " - " + PUBLIC_APP_NAME} /> <meta property="og:type" content="link" /> <meta property="og:description" content={`Use the ${data.assistant.name} assistant inside of ${PUBLIC_APP_NAME}`} /> <meta property="og:image" content="{PUBLIC_ORIGIN || $page.url.origin}{base}/assistant/{data.assistant._id}/thumbnail.png" /> <meta property="og:url" content={$page.url.href} /> <meta name="twitter:card" content="summary_large_image" /> </svelte:head> <div class="fixed inset-0 flex items-center justify-center bg-black/80 backdrop-blur-sm dark:bg-black/50" > <dialog open use:clickOutside={() => { goto(previousPage); }} class="z-10 flex flex-col content-center items-center gap-x-10 gap-y-3 overflow-hidden rounded-2xl bg-white p-4 pt-6 text-center shadow-2xl outline-none max-sm:w-[85dvw] max-sm:px-6 md:w-96 md:grid-cols-3 md:grid-rows-[auto,1fr] md:p-8" > <div class="absolute right-0 top-0 m-6"> <form method="POST" action="{base}/settings/assistants/{data.assistant._id}?/subscribe" class="w-full" use:enhance={() => { return async ({ result }) => { // `result` is an `ActionResult` object if (result.type === "success") { $settings.activeModel = data.assistant._id; await goto(`${base}/settings/assistants/${data.assistant._id}`, { invalidateAll: true, }); } else { await applyAction(result); } }; }} > <button class="flex items-center rounded-full border border-gray-200 px-2.5 py-1 text-sm text-gray-900 hover:bg-gray-100" name="Settings" type="submit" > <IconGear class="mr-1.5 text-xxs" /> Settings </button> </form> </div> {#if data.assistant.avatar} <img class="size-16 flex-none rounded-full object-cover sm:size-24" src="{base}/settings/assistants/{data.assistant._id}/avatar.jpg?hash={data.assistant .avatar}" alt="avatar" /> {:else} <div class="flex size-16 flex-none items-center justify-center rounded-full bg-gray-300 text-2xl font-bold uppercase text-gray-500 sm:size-24" > {data.assistant.name[0]} </div> {/if} <h1 class="text-balance text-xl font-bold"> {data.assistant.name} </h1> {#if data.assistant.description} <h3 class="line-clamp-6 text-balance text-sm text-gray-500"> {data.assistant.description} </h3> {/if} {#if data.assistant.createdByName} <p class="mt-2 text-sm text-gray-500"> Created by <a class="hover:underline" href="{base}/assistants?user={data.assistant.createdByName}" > {data.assistant.createdByName} </a> </p> {/if} <button class="mt-4 w-full rounded-full bg-gray-200 px-4 py-2 font-semibold text-gray-700" on:click={() => { goto(previousPage); }} > Cancel </button> <form method="POST" action="{base}/settings/assistants/{data.assistant._id}?/subscribe" class="w-full" use:enhance={() => { return async ({ result }) => { // `result` is an `ActionResult` object if (result.type === "success") { $settings.activeModel = data.assistant._id; goto(`${base}` || "/"); } else { await applyAction(result); } }; }} > <button type="submit" class=" w-full rounded-full bg-black px-4 py-3 font-semibold text-white" > Start chatting </button> </form> </dialog> </div>
chat-ui/src/routes/assistant/[assistantId]/+page.svelte/0
{ "file_path": "chat-ui/src/routes/assistant/[assistantId]/+page.svelte", "repo_id": "chat-ui", "token_count": 1821 }
52
import { redirect, error } from "@sveltejs/kit"; import { getOIDCUserData, validateAndParseCsrfToken } from "$lib/server/auth"; import { z } from "zod"; import { base } from "$app/paths"; import { updateUser } from "./updateUser"; import { ALLOWED_USER_EMAILS } from "$env/static/private"; import JSON5 from "json5"; const allowedUserEmails = z .array(z.string().email()) .optional() .default([]) .parse(JSON5.parse(ALLOWED_USER_EMAILS)); export async function load({ url, locals, cookies, request, getClientAddress }) { const { error: errorName, error_description: errorDescription } = z .object({ error: z.string().optional(), error_description: z.string().optional(), }) .parse(Object.fromEntries(url.searchParams.entries())); if (errorName) { throw error(400, errorName + (errorDescription ? ": " + errorDescription : "")); } const { code, state } = z .object({ code: z.string(), state: z.string(), }) .parse(Object.fromEntries(url.searchParams.entries())); const csrfToken = Buffer.from(state, "base64").toString("utf-8"); const validatedToken = await validateAndParseCsrfToken(csrfToken, locals.sessionId); if (!validatedToken) { throw error(403, "Invalid or expired CSRF token"); } const { userData } = await getOIDCUserData({ redirectURI: validatedToken.redirectUrl }, code); // Filter by allowed user emails if (allowedUserEmails.length > 0) { if (!userData.email) { throw error(403, "User not allowed: email not returned"); } const emailVerified = userData.email_verified ?? true; if (!emailVerified) { throw error(403, "User not allowed: email not verified"); } if (!allowedUserEmails.includes(userData.email)) { throw error(403, "User not allowed"); } } await updateUser({ userData, locals, cookies, userAgent: request.headers.get("user-agent") ?? undefined, ip: getClientAddress(), }); throw redirect(302, `${base}/`); }
chat-ui/src/routes/login/callback/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/login/callback/+page.server.ts", "repo_id": "chat-ui", "token_count": 671 }
53
import { collections } from "$lib/server/database"; import { type Actions, fail, redirect } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import { authCondition } from "$lib/server/auth"; import { base } from "$app/paths"; import { PUBLIC_ORIGIN, PUBLIC_SHARE_PREFIX } from "$env/static/public"; import { WEBHOOK_URL_REPORT_ASSISTANT } from "$env/static/private"; import { z } from "zod"; import type { Assistant } from "$lib/types/Assistant"; async function assistantOnlyIfAuthor(locals: App.Locals, assistantId?: string) { const assistant = await collections.assistants.findOne({ _id: new ObjectId(assistantId) }); if (!assistant) { throw Error("Assistant not found"); } if (assistant.createdById.toString() !== (locals.user?._id ?? locals.sessionId).toString()) { throw Error("You are not the author of this assistant"); } return assistant; } export const actions: Actions = { delete: async ({ params, locals }) => { let assistant; try { assistant = await assistantOnlyIfAuthor(locals, params.assistantId); } catch (e) { return fail(400, { error: true, message: (e as Error).message }); } await collections.assistants.deleteOne({ _id: assistant._id }); // and remove it from all users settings await collections.settings.updateMany( { assistants: { $in: [assistant._id] }, }, { $pull: { assistants: assistant._id }, } ); // and delete all avatars const fileCursor = collections.bucket.find({ filename: assistant._id.toString() }); // Step 2: Delete the existing file if it exists let fileId = await fileCursor.next(); while (fileId) { await collections.bucket.delete(fileId._id); fileId = await fileCursor.next(); } throw redirect(302, `${base}/settings`); }, report: async ({ request, params, locals, url }) => { // is there already a report from this user for this model ? const report = await collections.reports.findOne({ createdBy: locals.user?._id ?? locals.sessionId, assistantId: new ObjectId(params.assistantId), }); if (report) { return fail(400, { error: true, message: "Already reported" }); } const formData = await request.formData(); const result = z.string().min(1).max(128).safeParse(formData?.get("reportReason")); if (!result.success) { return fail(400, { error: true, message: "Invalid report reason" }); } const { acknowledged } = await collections.reports.insertOne({ _id: new ObjectId(), assistantId: new ObjectId(params.assistantId), createdBy: locals.user?._id ?? locals.sessionId, createdAt: new Date(), updatedAt: new Date(), reason: result.data, }); if (!acknowledged) { return fail(500, { error: true, message: "Failed to report assistant" }); } if (WEBHOOK_URL_REPORT_ASSISTANT) { const prefixUrl = PUBLIC_SHARE_PREFIX || `${PUBLIC_ORIGIN || url.origin}${base}`; const assistantUrl = `${prefixUrl}/assistant/${params.assistantId}`; const assistant = await collections.assistants.findOne<Pick<Assistant, "name">>( { _id: new ObjectId(params.assistantId) }, { projection: { name: 1 } } ); const username = locals.user?.username; const res = await fetch(WEBHOOK_URL_REPORT_ASSISTANT, { method: "POST", headers: { "Content-type": "application/json", }, body: JSON.stringify({ text: `Assistant <${assistantUrl}|${assistant?.name}> reported by ${ username ? `<http://hf.co/${username}|${username}>` : "non-logged in user" }.\n\n> ${result.data}`, }), }); if (!res.ok) { console.error(`Webhook assistant report failed. ${res.statusText} ${res.text}`); } } return { from: "report", ok: true, message: "Assistant reported" }; }, subscribe: async ({ params, locals }) => { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.assistantId), }); if (!assistant) { return fail(404, { error: true, message: "Assistant not found" }); } // don't push if it's already there const settings = await collections.settings.findOne(authCondition(locals)); if (settings?.assistants?.includes(assistant._id)) { return fail(400, { error: true, message: "Already subscribed" }); } const result = await collections.settings.updateOne(authCondition(locals), { $addToSet: { assistants: assistant._id }, }); // reduce count only if push succeeded if (result.modifiedCount > 0) { await collections.assistants.updateOne({ _id: assistant._id }, { $inc: { userCount: 1 } }); } return { from: "subscribe", ok: true, message: "Assistant added" }; }, unsubscribe: async ({ params, locals }) => { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.assistantId), }); if (!assistant) { return fail(404, { error: true, message: "Assistant not found" }); } const result = await collections.settings.updateOne(authCondition(locals), { $pull: { assistants: assistant._id }, }); // reduce count only if pull succeeded if (result.modifiedCount > 0) { await collections.assistants.updateOne({ _id: assistant._id }, { $inc: { userCount: -1 } }); } throw redirect(302, `${base}/settings`); }, };
chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/+page.server.ts", "repo_id": "chat-ui", "token_count": 1828 }
54
{ "$schema": "https://vega.github.io/schema/vega-lite/v4.json", "data": { "values": "<DVC_METRIC_DATA>" }, "title": "<DVC_METRIC_TITLE>", "mark": "rect", "encoding": { "x": { "field": "<DVC_METRIC_X>", "type": "nominal", "sort": "ascending", "title": "<DVC_METRIC_X_LABEL>" }, "y": { "field": "<DVC_METRIC_Y>", "type": "nominal", "sort": "ascending", "title": "<DVC_METRIC_Y_LABEL>" }, "color": { "aggregate": "count", "type": "quantitative" }, "facet": { "field": "rev", "type": "nominal" } } }
datasets/.dvc/plots/confusion.json/0
{ "file_path": "datasets/.dvc/plots/confusion.json", "repo_id": "datasets", "token_count": 450 }
55
cff-version: 1.2.0 message: "If you use this software, please cite it as below." title: "huggingface/datasets" authors: - family-names: Lhoest given-names: Quentin - family-names: Villanova del Moral given-names: Albert orcid: "https://orcid.org/0000-0003-1727-1045" - family-names: von Platen given-names: Patrick - family-names: Wolf given-names: Thomas - family-names: Šaško given-names: Mario - family-names: Jernite given-names: Yacine - family-names: Thakur given-names: Abhishek - family-names: Tunstall given-names: Lewis - family-names: Patil given-names: Suraj - family-names: Drame given-names: Mariama - family-names: Chaumond given-names: Julien - family-names: Plu given-names: Julien - family-names: Davison given-names: Joe - family-names: Brandeis given-names: Simon - family-names: Sanh given-names: Victor - family-names: Le Scao given-names: Teven - family-names: Canwen Xu given-names: Kevin - family-names: Patry given-names: Nicolas - family-names: Liu given-names: Steven - family-names: McMillan-Major given-names: Angelina - family-names: Schmid given-names: Philipp - family-names: Gugger given-names: Sylvain - family-names: Raw given-names: Nathan - family-names: Lesage given-names: Sylvain - family-names: Lozhkov given-names: Anton - family-names: Carrigan given-names: Matthew - family-names: Matussière given-names: Théo - family-names: von Werra given-names: Leandro - family-names: Debut given-names: Lysandre - family-names: Bekman given-names: Stas - family-names: Delangue given-names: Clément doi: 10.5281/zenodo.4817768 repository-code: "https://github.com/huggingface/datasets" license: Apache-2.0 preferred-citation: type: conference-paper title: "Datasets: A Community Library for Natural Language Processing" authors: - family-names: Lhoest given-names: Quentin - family-names: Villanova del Moral given-names: Albert orcid: "https://orcid.org/0000-0003-1727-1045" - family-names: von Platen given-names: Patrick - family-names: Wolf given-names: Thomas - family-names: Šaško given-names: Mario - family-names: Jernite given-names: Yacine - family-names: Thakur given-names: Abhishek - family-names: Tunstall given-names: Lewis - family-names: Patil given-names: Suraj - family-names: Drame given-names: Mariama - family-names: Chaumond given-names: Julien - family-names: Plu given-names: Julien - family-names: Davison given-names: Joe - family-names: Brandeis given-names: Simon - family-names: Sanh given-names: Victor - family-names: Le Scao given-names: Teven - family-names: Canwen Xu given-names: Kevin - family-names: Patry given-names: Nicolas - family-names: Liu given-names: Steven - family-names: McMillan-Major given-names: Angelina - family-names: Schmid given-names: Philipp - family-names: Gugger given-names: Sylvain - family-names: Raw given-names: Nathan - family-names: Lesage given-names: Sylvain - family-names: Lozhkov given-names: Anton - family-names: Carrigan given-names: Matthew - family-names: Matussière given-names: Théo - family-names: von Werra given-names: Leandro - family-names: Debut given-names: Lysandre - family-names: Bekman given-names: Stas - family-names: Delangue given-names: Clément collection-title: "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations" collection-type: proceedings month: 11 year: 2021 publisher: name: "Association for Computational Linguistics" url: "https://aclanthology.org/2021.emnlp-demo.21" start: 175 end: 184 identifiers: - type: other value: "arXiv:2109.02846" description: "The arXiv preprint of the paper"
datasets/CITATION.cff/0
{ "file_path": "datasets/CITATION.cff", "repo_id": "datasets", "token_count": 1428 }
56
# Know your dataset There are two types of dataset objects, a regular [`Dataset`] and then an ✨ [`IterableDataset`] ✨. A [`Dataset`] provides fast random access to the rows, and memory-mapping so that loading even large datasets only uses a relatively small amount of device memory. But for really, really big datasets that won't even fit on disk or in memory, an [`IterableDataset`] allows you to access and use the dataset without waiting for it to download completely! This tutorial will show you how to load and access a [`Dataset`] and an [`IterableDataset`]. ## Dataset When you load a dataset split, you'll get a [`Dataset`] object. You can do many things with a [`Dataset`] object, which is why it's important to learn how to manipulate and interact with the data stored inside. This tutorial uses the [rotten_tomatoes](https://huggingface.co/datasets/rotten_tomatoes) dataset, but feel free to load any dataset you'd like and follow along! ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes", split="train") ``` ### Indexing A [`Dataset`] contains columns of data, and each column can be a different type of data. The *index*, or axis label, is used to access examples from the dataset. For example, indexing by the row returns a dictionary of an example from the dataset: ```py # Get the first row in the dataset >>> dataset[0] {'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` Use the `-` operator to start from the end of the dataset: ```py # Get the last row in the dataset >>> dataset[-1] {'label': 0, 'text': 'things really get weird , though not particularly scary : the movie is all portent and no content .'} ``` Indexing by the column name returns a list of all the values in the column: ```py >>> dataset["text"] ['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .', 'effective but too-tepid biopic', ..., 'things really get weird , though not particularly scary : the movie is all portent and no content .'] ``` You can combine row and column name indexing to return a specific value at a position: ```py >>> dataset[0]["text"] 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .' ``` But it is important to remember that indexing order matters, especially when working with large audio and image datasets. Indexing by the column name returns all the values in the column first, then loads the value at that position. For large datasets, it may be slower to index by the column name first. ```py >>> import time >>> start_time = time.time() >>> text = dataset[0]["text"] >>> end_time = time.time() >>> print(f"Elapsed time: {end_time - start_time:.4f} seconds") Elapsed time: 0.0031 seconds >>> start_time = time.time() >>> text = dataset["text"][0] >>> end_time = time.time() >>> print(f"Elapsed time: {end_time - start_time:.4f} seconds") Elapsed time: 0.0094 seconds ``` ### Slicing Slicing returns a slice - or subset - of the dataset, which is useful for viewing several rows at once. To slice a dataset, use the `:` operator to specify a range of positions. ```py # Get the first three rows >>> dataset[:3] {'label': [1, 1, 1], 'text': ['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .', 'effective but too-tepid biopic']} # Get rows between three and six >>> dataset[3:6] {'label': [1, 1, 1], 'text': ['if you sometimes like to go to the movies to have fun , wasabi is a good place to start .', "emerges as something rare , an issue movie that's so honest and keenly observed that it doesn't feel like one .", 'the film provides some great insight into the neurotic mindset of all comics -- even those who have reached the absolute top of the game .']} ``` ## IterableDataset An [`IterableDataset`] is loaded when you set the `streaming` parameter to `True` in [`~datasets.load_dataset`]: ```py >>> from datasets import load_dataset >>> iterable_dataset = load_dataset("food101", split="train", streaming=True) >>> for example in iterable_dataset: ... print(example) ... break {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F0681F5C520>, 'label': 6} ``` You can also create an [`IterableDataset`] from an *existing* [`Dataset`], but it is faster than streaming mode because the dataset is streamed from local files: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes", split="train") >>> iterable_dataset = dataset.to_iterable_dataset() ``` An [`IterableDataset`] progressively iterates over a dataset one example at a time, so you don't have to wait for the whole dataset to download before you can use it. As you can imagine, this is quite useful for large datasets you want to use immediately! However, this means an [`IterableDataset`]'s behavior is different from a regular [`Dataset`]. You don't get random access to examples in an [`IterableDataset`]. Instead, you should iterate over its elements, for example, by calling `next(iter())` or with a `for` loop to return the next item from the [`IterableDataset`]: ```py >>> next(iter(iterable_dataset)) {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F0681F59B50>, 'label': 6} >>> for example in iterable_dataset: ... print(example) ... break {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F7479DE82B0>, 'label': 6} ``` You can return a subset of the dataset with a specific number of examples in it with [`IterableDataset.take`]: ```py # Get first three examples >>> list(iterable_dataset.take(3)) [{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F7479DEE9D0>, 'label': 6}, {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at 0x7F7479DE8190>, 'label': 6}, {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x383 at 0x7F7479DE8310>, 'label': 6}] ``` But unlike [slicing](access/#slicing), [`IterableDataset.take`] creates a new [`IterableDataset`]. ## Next steps Interested in learning more about the differences between these two types of datasets? Learn more about them in the [Differences between `Dataset` and `IterableDataset`](about_mapstyle_vs_iterable) conceptual guide. To get more hands-on with these datasets types, check out the [Process](process) guide to learn how to preprocess a [`Dataset`] or the [Stream](stream) guide to learn how to preprocess an [`IterableDataset`].
datasets/docs/source/access.mdx/0
{ "file_path": "datasets/docs/source/access.mdx", "repo_id": "datasets", "token_count": 2274 }
57
# Load image data Image datasets have [`Image`] type columns, which contain PIL objects. <Tip> To work with image datasets, you need to have the `vision` dependency installed. Check out the [installation](./installation#vision) guide to learn how to install it. </Tip> When you load an image dataset and call the image column, the images are decoded as PIL Images: ```py >>> from datasets import load_dataset, Image >>> dataset = load_dataset("beans", split="train") >>> dataset[0]["image"] ``` <Tip warning={true}> Index into an image dataset using the row index first and then the `image` column - `dataset[0]["image"]` - to avoid decoding and resampling all the image objects in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset. </Tip> For a guide on how to load any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./loading">general loading guide</a>. ## Local files You can load a dataset from the image path. Use the [`~Dataset.cast_column`] function to accept a column of image file paths, and decode it into a PIL image with the [`Image`] feature: ```py >>> from datasets import Dataset, Image >>> dataset = Dataset.from_dict({"image": ["path/to/image_1", "path/to/image_2", ..., "path/to/image_n"]}).cast_column("image", Image()) >>> dataset[0]["image"] <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E6D7160>] ``` If you only want to load the underlying path to the image dataset without decoding the image object, set `decode=False` in the [`Image`] feature: ```py >>> dataset = load_dataset("beans", split="train").cast_column("image", Image(decode=False)) >>> dataset[0]["image"] {'bytes': None, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/bean_rust/bean_rust_train.29.jpg'} ``` ## ImageFolder You can also load a dataset with an `ImageFolder` dataset builder which does not require writing a custom dataloader. This makes `ImageFolder` ideal for quickly creating and loading image datasets with several thousand images for different vision tasks. Your image dataset structure should look like this: ``` folder/train/dog/golden_retriever.png folder/train/dog/german_shepherd.png folder/train/dog/chihuahua.png folder/train/cat/maine_coon.png folder/train/cat/bengal.png folder/train/cat/birman.png ``` Load your dataset by specifying `imagefolder` and the directory of your dataset in `data_dir`: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder") >>> dataset["train"][0] {"image": <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E6D7160>, "label": 0} >>> dataset["train"][-1] {"image": <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=1200x215 at 0x15E8DAD30>, "label": 1} ``` Load remote datasets from their URLs with the `data_files` parameter: ```py >>> dataset = load_dataset("imagefolder", data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_5340.zip", split="train") ``` Some datasets have a metadata file (`metadata.csv`/`metadata.jsonl`) associated with it, containing other information about the data like bounding boxes, text captions, and labels. The metadata is automatically loaded when you call [`load_dataset`] and specify `imagefolder`. To ignore the information in the metadata file, set `drop_labels=False` in [`load_dataset`], and allow `ImageFolder` to automatically infer the label name from the directory name: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("imagefolder", data_dir="/path/to/folder", drop_labels=False) ``` <Tip> For more information about creating your own `ImageFolder` dataset, take a look at the [Create an image dataset](./image_dataset) guide. </Tip> ## WebDataset The [WebDataset](https://github.com/webdataset/webdataset) format is based on a folder of TAR archives and is suitable for big image datasets. Because of their size, WebDatasets are generally loaded in streaming mode (using `streaming=True`). You can load a WebDataset like this: ```python >>> from datasets import load_dataset >>> dataset = load_dataset("webdataset", data_dir="/path/to/folder", streaming=True) ```
datasets/docs/source/image_load.mdx/0
{ "file_path": "datasets/docs/source/image_load.mdx", "repo_id": "datasets", "token_count": 1388 }
58
# Task templates <Tip warning={true}> The Task API is deprecated in favor of [`train-eval-index`](https://github.com/huggingface/hub-docs/blob/9ab2555e1c146122056aba6f89af404a8bc9a6f1/datasetcard.md?plain=1#L90-L106) and will be removed in the next major release. </Tip> The tasks supported by [`Dataset.prepare_for_task`] and [`DatasetDict.prepare_for_task`]. [[autodoc]] datasets.tasks.AutomaticSpeechRecognition [[autodoc]] datasets.tasks.AudioClassification [[autodoc]] datasets.tasks.ImageClassification - align_with_features [[autodoc]] datasets.tasks.LanguageModeling [[autodoc]] datasets.tasks.QuestionAnsweringExtractive [[autodoc]] datasets.tasks.Summarization [[autodoc]] datasets.tasks.TextClassification - align_with_features
datasets/docs/source/package_reference/task_templates.mdx/0
{ "file_path": "datasets/docs/source/package_reference/task_templates.mdx", "repo_id": "datasets", "token_count": 277 }
59
# Using Datasets with TensorFlow This document is a quick introduction to using `datasets` with TensorFlow, with a particular focus on how to get `tf.Tensor` objects out of our datasets, and how to stream data from Hugging Face `Dataset` objects to Keras methods like `model.fit()`. ## Dataset format By default, datasets return regular Python objects: integers, floats, strings, lists, etc. To get TensorFlow tensors instead, you can set the format of the dataset to `tf`: ```py >>> from datasets import Dataset >>> data = [[1, 2],[3, 4]] >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("tf") >>> ds[0] {'data': <tf.Tensor: shape=(2,), dtype=int64, numpy=array([1, 2])>} >>> ds[:2] {'data': <tf.Tensor: shape=(2, 2), dtype=int64, numpy= array([[1, 2], [3, 4]])>} ``` <Tip> A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to TensorFlow tensors. </Tip> This can be useful for converting your dataset to a dict of `Tensor` objects, or for writing a generator to load TF samples from it. If you wish to convert the entire dataset to `Tensor`, simply query the full dataset: ```py >>> ds[:] {'data': <tf.Tensor: shape=(2, 2), dtype=int64, numpy= array([[1, 2], [3, 4]])>} ``` ## N-dimensional arrays If your dataset consists of N-dimensional arrays, you will see that by default they are considered as nested lists. In particular, a TensorFlow formatted dataset outputs a `RaggedTensor` instead of a single tensor: ```py >>> from datasets import Dataset >>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] >>> ds = Dataset.from_dict({"data": data}) >>> ds = ds.with_format("tf") >>> ds[0] {'data': <tf.RaggedTensor [[1, 2], [3, 4]]>} ``` To get a single tensor, you must explicitly use the [`Array`] feature type and specify the shape of your tensors: ```py >>> from datasets import Dataset, Features, Array2D >>> data = [[[1, 2],[3, 4]],[[5, 6],[7, 8]]] >>> features = Features({"data": Array2D(shape=(2, 2), dtype='int32')}) >>> ds = Dataset.from_dict({"data": data}, features=features) >>> ds = ds.with_format("tf") >>> ds[0] {'data': <tf.Tensor: shape=(2, 2), dtype=int64, numpy= array([[1, 2], [3, 4]])>} >>> ds[:2] {'data': <tf.Tensor: shape=(2, 2, 2), dtype=int64, numpy= array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])>} ``` ## Other feature types [`ClassLabel`] data are properly converted to tensors: ```py >>> from datasets import Dataset, Features, ClassLabel >>> labels = [0, 0, 1] >>> features = Features({"label": ClassLabel(names=["negative", "positive"])}) >>> ds = Dataset.from_dict({"label": labels}, features=features) >>> ds = ds.with_format("tf") >>> ds[:3] {'label': <tf.Tensor: shape=(3,), dtype=int64, numpy=array([0, 0, 1])>} ``` Strings and binary objects are also supported: ```py >>> from datasets import Dataset, Features >>> text = ["foo", "bar"] >>> data = [0, 1] >>> ds = Dataset.from_dict({"text": text, "data": data}) >>> ds = ds.with_format("tf") >>> ds[:2] {'text': <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'foo', b'bar'], dtype=object)>, 'data': <tf.Tensor: shape=(2,), dtype=int64, numpy=array([0, 1])>} ``` You can also explicitly format certain columns and leave the other columns unformatted: ```py >>> ds = ds.with_format("tf", columns=["data"], output_all_columns=True) >>> ds[:2] {'data': <tf.Tensor: shape=(2,), dtype=int64, numpy=array([0, 1])>, 'text': ['foo', 'bar']} ``` String and binary objects are unchanged, since PyTorch only supports numbers. The [`Image`] and [`Audio`] feature types are also supported. <Tip> To use the [`Image`] feature type, you'll need to install the `vision` extra as `pip install datasets[vision]`. </Tip> ```py >>> from datasets import Dataset, Features, Audio, Image >>> images = ["path/to/image.png"] * 10 >>> features = Features({"image": Image()}) >>> ds = Dataset.from_dict({"image": images}, features=features) >>> ds = ds.with_format("tf") >>> ds[0] {'image': <tf.Tensor: shape=(512, 512, 4), dtype=uint8, numpy= array([[[255, 215, 106, 255], [255, 215, 106, 255], ..., [255, 255, 255, 255], [255, 255, 255, 255]]], dtype=uint8)>} >>> ds[:2] {'image': <tf.Tensor: shape=(2, 512, 512, 4), dtype=uint8, numpy= array([[[[255, 215, 106, 255], [255, 215, 106, 255], ..., [255, 255, 255, 255], [255, 255, 255, 255]]]], dtype=uint8)>} ``` <Tip> To use the [`Audio`] feature type, you'll need to install the `audio` extra as `pip install datasets[audio]`. </Tip> ```py >>> from datasets import Dataset, Features, Audio, Image >>> audio = ["path/to/audio.wav"] * 10 >>> features = Features({"audio": Audio()}) >>> ds = Dataset.from_dict({"audio": audio}, features=features) >>> ds = ds.with_format("tf") >>> ds[0]["audio"]["array"] <tf.Tensor: shape=(202311,), dtype=float32, numpy= array([ 6.1035156e-05, 1.5258789e-05, 1.6784668e-04, ..., -1.5258789e-05, -1.5258789e-05, 1.5258789e-05], dtype=float32)> >>> ds[0]["audio"]["sampling_rate"] <tf.Tensor: shape=(), dtype=int32, numpy=44100> ``` ## Data loading Although you can load individual samples and batches just by indexing into your dataset, this won't work if you want to use Keras methods like `fit()` and `predict()`. You could write a generator function that shuffles and loads batches from your dataset and `fit()` on that, but that sounds like a lot of unnecessary work. Instead, if you want to stream data from your dataset on-the-fly, we recommend converting your dataset to a `tf.data.Dataset` using the `to_tf_dataset()` method. The `tf.data.Dataset` class covers a wide range of use-cases - it is often created from Tensors in memory, or using a load function to read files on disc or external storage. The dataset can be transformed arbitrarily with the `map()` method, or methods like `batch()` and `shuffle()` can be used to create a dataset that's ready for training. These methods do not modify the stored data in any way - instead, the methods build a data pipeline graph that will be executed when the dataset is iterated over, usually during model training or inference. This is different from the `map()` method of Hugging Face `Dataset` objects, which runs the map function immediately and saves the new or changed columns. Since the entire data preprocessing pipeline can be compiled in a `tf.data.Dataset`, this approach allows for massively parallel, asynchronous data loading and training. However, the requirement for graph compilation can be a limitation, particularly for Hugging Face tokenizers, which are usually not (yet!) compilable as part of a TF graph. As a result, we usually advise pre-processing the dataset as a Hugging Face dataset, where arbitrary Python functions can be used, and then converting to `tf.data.Dataset` afterwards using `to_tf_dataset()` to get a batched dataset ready for training. To see examples of this approach, please see the [examples](https://github.com/huggingface/transformers/tree/main/examples) or [notebooks](https://huggingface.co/docs/transformers/notebooks) for `transformers`. ### Using `to_tf_dataset()` Using `to_tf_dataset()` is straightforward. Once your dataset is preprocessed and ready, simply call it like so: ```py >>> from datasets import Dataset >>> data = {"inputs": [[1, 2],[3, 4]], "labels": [0, 1]} >>> ds = Dataset.from_dict(data) >>> tf_ds = ds.to_tf_dataset( columns=["inputs"], label_cols=["labels"], batch_size=2, shuffle=True ) ``` The returned `tf_ds` object here is now fully ready to train on, and can be passed directly to `model.fit()`. Note that you set the batch size when creating the dataset, and so you don't need to specify it when calling `fit()`: ```py >>> model.fit(tf_ds, epochs=2) ``` For a full description of the arguments, please see the [`~Dataset.to_tf_dataset`] documentation. In many cases, you will also need to add a `collate_fn` to your call. This is a function that takes multiple elements of the dataset and combines them into a single batch. When all elements have the same length, the built-in default collator will suffice, but for more complex tasks a custom collator may be necessary. In particular, many tasks have samples with varying sequence lengths which will require a [data collator](https://huggingface.co/docs/transformers/main/en/main_classes/data_collator) that can pad batches correctly. You can see examples of this in the `transformers` NLP [examples](https://github.com/huggingface/transformers/tree/main/examples) and [notebooks](https://huggingface.co/docs/transformers/notebooks), where variable sequence lengths are very common. If you find that loading with `to_tf_dataset` is slow, you can also use the `num_workers` argument. This spins up multiple subprocesses to load data in parallel. This feature is recent and still somewhat experimental - please file an issue if you encounter any bugs while using it! ### When to use to_tf_dataset The astute reader may have noticed at this point that we have offered two approaches to achieve the same goal - if you want to pass your dataset to a TensorFlow model, you can either convert the dataset to a `Tensor` or `dict` of `Tensors` using `.with_format('tf')`, or you can convert the dataset to a `tf.data.Dataset` with `to_tf_dataset()`. Either of these can be passed to `model.fit()`, so which should you choose? The key thing to recognize is that when you convert the whole dataset to `Tensor`s, it is static and fully loaded into RAM. This is simple and convenient, but if any of the following apply, you should probably use `to_tf_dataset()` instead: - Your dataset is too large to fit in RAM. `to_tf_dataset()` streams only one batch at a time, so even very large datasets can be handled with this method. - You want to apply random transformations using `dataset.with_transform()` or the `collate_fn`. This is common in several modalities, such as image augmentations when training vision models, or random masking when training masked language models. Using `to_tf_dataset()` will apply those transformations at the moment when a batch is loaded, which means the same samples will get different augmentations each time they are loaded. This is usually what you want. - Your data has a variable dimension, such as input texts in NLP that consist of varying numbers of tokens. When you create a batch with samples with a variable dimension, the standard solution is to pad the shorter samples to the length of the longest one. When you stream samples from a dataset with `to_tf_dataset`, you can apply this padding to each batch via your `collate_fn`. However, if you want to convert such a dataset to dense `Tensor`s, then you will have to pad samples to the length of the longest sample in *the entire dataset!* This can result in huge amounts of padding, which wastes memory and reduces your model's speed. ### Caveats and limitations Right now, `to_tf_dataset()` always returns a batched dataset - we will add support for unbatched datasets soon!
datasets/docs/source/use_with_tensorflow.mdx/0
{ "file_path": "datasets/docs/source/use_with_tensorflow.mdx", "repo_id": "datasets", "token_count": 3671 }
60
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def check_correctness(check_program, timeout, task_id, completion_id): """ Evaluates the functional correctness of a completion by running the test suite provided in the problem. :param completion_id: an optional completion ID so we can match the results later even if execution finishes asynchronously. """ manager = multiprocessing.Manager() result = manager.list() p = multiprocessing.Process(target=unsafe_execute, args=(check_program, result, timeout)) p.start() p.join(timeout=timeout + 1) if p.is_alive(): p.kill() if not result: result.append("timed out") return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def unsafe_execute(check_program, result, timeout): with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil rmtree = shutil.rmtree rmdir = os.rmdir chdir = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: exec_globals = {} with swallow_io(): with time_limit(timeout): exec(check_program, exec_globals) result.append("passed") except TimeoutException: result.append("timed out") except BaseException as e: result.append(f"failed: {e}") # Needed for cleaning up. shutil.rmtree = rmtree os.rmdir = rmdir os.chdir = chdir @contextlib.contextmanager def time_limit(seconds): def signal_handler(signum, frame): raise TimeoutException("Timed out!") signal.setitimer(signal.ITIMER_REAL, seconds) signal.signal(signal.SIGALRM, signal_handler) try: yield finally: signal.setitimer(signal.ITIMER_REAL, 0) @contextlib.contextmanager def swallow_io(): stream = WriteOnlyStringIO() with contextlib.redirect_stdout(stream): with contextlib.redirect_stderr(stream): with redirect_stdin(stream): yield @contextlib.contextmanager def create_tempdir(): with tempfile.TemporaryDirectory() as dirname: with chdir(dirname): yield dirname class TimeoutException(Exception): pass class WriteOnlyStringIO(io.StringIO): """StringIO that throws an exception when it's read from""" def read(self, *args, **kwargs): raise OSError def readline(self, *args, **kwargs): raise OSError def readlines(self, *args, **kwargs): raise OSError def readable(self, *args, **kwargs): """Returns True if the IO object can be read.""" return False class redirect_stdin(contextlib._RedirectStream): # type: ignore _stream = "stdin" @contextlib.contextmanager def chdir(root): if root == ".": yield return cwd = os.getcwd() os.chdir(root) try: yield except BaseException as exc: raise exc finally: os.chdir(cwd) def reliability_guard(maximum_memory_bytes=None): """ This disables various destructive functions and prevents the generated code from interfering with the test (e.g. fork bomb, killing other processes, removing filesystem files, etc.) WARNING This function is NOT a security sandbox. Untrusted code, including, model- generated code, should not be blindly executed outside of one. See the Codex paper for more information about OpenAI's code sandbox, and proceed with caution. """ if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS, (maximum_memory_bytes, maximum_memory_bytes)) resource.setrlimit(resource.RLIMIT_DATA, (maximum_memory_bytes, maximum_memory_bytes)) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK, (maximum_memory_bytes, maximum_memory_bytes)) faulthandler.disable() import builtins builtins.exit = None builtins.quit = None import os os.environ["OMP_NUM_THREADS"] = "1" os.kill = None os.system = None os.putenv = None os.remove = None os.removedirs = None os.rmdir = None os.fchdir = None os.setuid = None os.fork = None os.forkpty = None os.killpg = None os.rename = None os.renames = None os.truncate = None os.replace = None os.unlink = None os.fchmod = None os.fchown = None os.chmod = None os.chown = None os.chroot = None os.fchdir = None os.lchflags = None os.lchmod = None os.lchown = None os.getcwd = None os.chdir = None import shutil shutil.rmtree = None shutil.move = None shutil.chown = None import subprocess subprocess.Popen = None # type: ignore __builtins__["help"] = None import sys sys.modules["ipdb"] = None sys.modules["joblib"] = None sys.modules["resource"] = None sys.modules["psutil"] = None sys.modules["tkinter"] = None
datasets/metrics/code_eval/execute.py/0
{ "file_path": "datasets/metrics/code_eval/execute.py", "repo_id": "datasets", "token_count": 2368 }
61
# Metric Card for GLUE ## Metric description This metric is used to compute the GLUE evaluation metric associated to each [GLUE dataset](https://huggingface.co/datasets/glue). GLUE, the General Language Understanding Evaluation benchmark is a collection of resources for training, evaluating, and analyzing natural language understanding systems. ## How to use There are two steps: (1) loading the GLUE metric relevant to the subset of the GLUE dataset being used for evaluation; and (2) calculating the metric. 1. **Loading the relevant GLUE metric** : the subsets of GLUE are the following: `sst2`, `mnli`, `mnli_mismatched`, `mnli_matched`, `qnli`, `rte`, `wnli`, `cola`,`stsb`, `mrpc`, `qqp`, and `hans`. More information about the different subsets of the GLUE dataset can be found on the [GLUE dataset page](https://huggingface.co/datasets/glue). 2. **Calculating the metric**: the metric takes two inputs : one list with the predictions of the model to score and one lists of references for each translation. ```python from datasets import load_metric glue_metric = load_metric('glue', 'sst2') references = [0, 1] predictions = [0, 1] results = glue_metric.compute(predictions=predictions, references=references) ``` ## Output values The output of the metric depends on the GLUE subset chosen, consisting of a dictionary that contains one or several of the following metrics: `accuracy`: the proportion of correct predictions among the total number of cases processed, with a range between 0 and 1 (see [accuracy](https://huggingface.co/metrics/accuracy) for more information). `f1`: the harmonic mean of the precision and recall (see [F1 score](https://huggingface.co/metrics/f1) for more information). Its range is 0-1 -- its lowest possible value is 0, if either the precision or the recall is 0, and its highest possible value is 1.0, which means perfect precision and recall. `pearson`: a measure of the linear relationship between two datasets (see [Pearson correlation](https://huggingface.co/metrics/pearsonr) for more information). Its range is between -1 and +1, with 0 implying no correlation, and -1/+1 implying an exact linear relationship. Positive correlations imply that as x increases, so does y, whereas negative correlations imply that as x increases, y decreases. `spearmanr`: a nonparametric measure of the monotonicity of the relationship between two datasets(see [Spearman Correlation](https://huggingface.co/metrics/spearmanr) for more information). `spearmanr` has the same range as `pearson`. `matthews_correlation`: a measure of the quality of binary and multiclass classifications (see [Matthews Correlation](https://huggingface.co/metrics/matthews_correlation) for more information). Its range of values is between -1 and +1, where a coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The `cola` subset returns `matthews_correlation`, the `stsb` subset returns `pearson` and `spearmanr`, the `mrpc` and `qqp` subsets return both `accuracy` and `f1`, and all other subsets of GLUE return only accuracy. ### Values from popular papers The [original GLUE paper](https://huggingface.co/datasets/glue) reported average scores ranging from 58 to 64%, depending on the model used (with all evaluation values scaled by 100 to make computing the average possible). For more recent model performance, see the [dataset leaderboard](https://paperswithcode.com/dataset/glue). ## Examples Maximal values for the MRPC subset (which outputs `accuracy` and `f1`): ```python from datasets import load_metric glue_metric = load_metric('glue', 'mrpc') # 'mrpc' or 'qqp' references = [0, 1] predictions = [0, 1] results = glue_metric.compute(predictions=predictions, references=references) print(results) {'accuracy': 1.0, 'f1': 1.0} ``` Minimal values for the STSB subset (which outputs `pearson` and `spearmanr`): ```python from datasets import load_metric glue_metric = load_metric('glue', 'stsb') references = [0., 1., 2., 3., 4., 5.] predictions = [-10., -11., -12., -13., -14., -15.] results = glue_metric.compute(predictions=predictions, references=references) print(results) {'pearson': -1.0, 'spearmanr': -1.0} ``` Partial match for the COLA subset (which outputs `matthews_correlation`) ```python from datasets import load_metric glue_metric = load_metric('glue', 'cola') references = [0, 1] predictions = [1, 1] results = glue_metric.compute(predictions=predictions, references=references) results {'matthews_correlation': 0.0} ``` ## Limitations and bias This metric works only with datasets that have the same format as the [GLUE dataset](https://huggingface.co/datasets/glue). While the GLUE dataset is meant to represent "General Language Understanding", the tasks represented in it are not necessarily representative of language understanding, and should not be interpreted as such. Also, while the GLUE subtasks were considered challenging during its creation in 2019, they are no longer considered as such given the impressive progress made since then. A more complex (or "stickier") version of it, called [SuperGLUE](https://huggingface.co/datasets/super_glue), was subsequently created. ## Citation ```bibtex @inproceedings{wang2019glue, title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding}, author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.}, note={In the Proceedings of ICLR.}, year={2019} } ``` ## Further References - [GLUE benchmark homepage](https://gluebenchmark.com/) - [Fine-tuning a model with the Trainer API](https://huggingface.co/course/chapter3/3?)
datasets/metrics/glue/README.md/0
{ "file_path": "datasets/metrics/glue/README.md", "repo_id": "datasets", "token_count": 1659 }
62
# Metric Card for METEOR ## Metric description METEOR (Metric for Evaluation of Translation with Explicit ORdering) is a machine translation evaluation metric, which is calculated based on the harmonic mean of precision and recall, with recall weighted more than precision. METEOR is based on a generalized concept of unigram matching between the machine-produced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. ## How to use METEOR has two mandatory arguments: `predictions`: a list of predictions to score. Each prediction should be a string with tokens separated by spaces. `references`: a list of references for each prediction. Each reference should be a string with tokens separated by spaces. It also has several optional parameters: `alpha`: Parameter for controlling relative weights of precision and recall. The default value is `0.9`. `beta`: Parameter for controlling shape of penalty as a function of fragmentation. The default value is `3`. `gamma`: The relative weight assigned to fragmentation penalty. The default is `0.5`. Refer to the [METEOR paper](https://aclanthology.org/W05-0909.pdf) for more information about parameter values and ranges. ```python >>> from datasets import load_metric >>> meteor = load_metric('meteor') >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"] >>> results = meteor.compute(predictions=predictions, references=references) ``` ## Output values The metric outputs a dictionary containing the METEOR score. Its values range from 0 to 1. ### Values from popular papers The [METEOR paper](https://aclanthology.org/W05-0909.pdf) does not report METEOR score values for different models, but it does report that METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic data and 0.331 on the Chinese data. ## Examples Maximal values : ```python >>> from datasets import load_metric >>> meteor = load_metric('meteor') >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> references = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results['meteor'], 2)) 1.0 ``` Minimal values: ```python >>> from datasets import load_metric >>> meteor = load_metric('meteor') >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> references = ["Hello world"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results['meteor'], 2)) 0.0 ``` Partial match: ```python >>> from datasets import load_metric >>> meteor = load_metric('meteor') >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results['meteor'], 2)) 0.69 ``` ## Limitations and bias While the correlation between METEOR and human judgments was measured for Chinese and Arabic and found to be significant, further experimentation is needed to check its correlation for other languages. Furthermore, while the alignment and matching done in METEOR is based on unigrams, using multiple word entities (e.g. bigrams) could contribute to improving its accuracy -- this has been proposed in [more recent publications](https://www.cs.cmu.edu/~alavie/METEOR/pdf/meteor-naacl-2010.pdf) on the subject. ## Citation ```bibtex @inproceedings{banarjee2005, title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments}, author = {Banerjee, Satanjeev and Lavie, Alon}, booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization}, month = jun, year = {2005}, address = {Ann Arbor, Michigan}, publisher = {Association for Computational Linguistics}, url = {https://www.aclweb.org/anthology/W05-0909}, pages = {65--72}, } ``` ## Further References - [METEOR -- Wikipedia](https://en.wikipedia.org/wiki/METEOR) - [METEOR score -- NLTK](https://www.nltk.org/_modules/nltk/translate/meteor_score.html)
datasets/metrics/meteor/README.md/0
{ "file_path": "datasets/metrics/meteor/README.md", "repo_id": "datasets", "token_count": 1340 }
63
# Metric Card for SacreBLEU ## Metric Description SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores. Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official Workshop on Machine Translation (WMT) scores but works with plain text. It also knows all the standard test sets and handles downloading, processing, and tokenization. See the [README.md] file at https://github.com/mjpost/sacreBLEU for more information. ## How to Use This metric takes a set of predictions and a set of references as input, along with various optional parameters. ```python >>> predictions = ["hello there general kenobi", "foo bar foobar"] >>> references = [["hello there general kenobi", "hello there !"], ... ["foo bar foobar", "foo bar foobar"]] >>> sacrebleu = datasets.load_metric("sacrebleu") >>> results = sacrebleu.compute(predictions=predictions, ... references=references) >>> print(list(results.keys())) ['score', 'counts', 'totals', 'precisions', 'bp', 'sys_len', 'ref_len'] >>> print(round(results["score"], 1)) 100.0 ``` ### Inputs - **`predictions`** (`list` of `str`): list of translations to score. Each translation should be tokenized into a list of tokens. - **`references`** (`list` of `list` of `str`): A list of lists of references. The contents of the first sub-list are the references for the first prediction, the contents of the second sub-list are for the second prediction, etc. Note that there must be the same number of references for each prediction (i.e. all sub-lists must be of the same length). - **`smooth_method`** (`str`): The smoothing method to use, defaults to `'exp'`. Possible values are: - `'none'`: no smoothing - `'floor'`: increment zero counts - `'add-k'`: increment num/denom by k for n>1 - `'exp'`: exponential decay - **`smooth_value`** (`float`): The smoothing value. Only valid when `smooth_method='floor'` (in which case `smooth_value` defaults to `0.1`) or `smooth_method='add-k'` (in which case `smooth_value` defaults to `1`). - **`tokenize`** (`str`): Tokenization method to use for BLEU. If not provided, defaults to `'zh'` for Chinese, `'ja-mecab'` for Japanese and `'13a'` (mteval) otherwise. Possible values are: - `'none'`: No tokenization. - `'zh'`: Chinese tokenization. - `'13a'`: mimics the `mteval-v13a` script from Moses. - `'intl'`: International tokenization, mimics the `mteval-v14` script from Moses - `'char'`: Language-agnostic character-level tokenization. - `'ja-mecab'`: Japanese tokenization. Uses the [MeCab tokenizer](https://pypi.org/project/mecab-python3). - **`lowercase`** (`bool`): If `True`, lowercases the input, enabling case-insensitivity. Defaults to `False`. - **`force`** (`bool`): If `True`, insists that your tokenized input is actually detokenized. Defaults to `False`. - **`use_effective_order`** (`bool`): If `True`, stops including n-gram orders for which precision is 0. This should be `True`, if sentence-level BLEU will be computed. Defaults to `False`. ### Output Values - `score`: BLEU score - `counts`: Counts - `totals`: Totals - `precisions`: Precisions - `bp`: Brevity penalty - `sys_len`: predictions length - `ref_len`: reference length The output is in the following format: ```python {'score': 39.76353643835252, 'counts': [6, 4, 2, 1], 'totals': [10, 8, 6, 4], 'precisions': [60.0, 50.0, 33.333333333333336, 25.0], 'bp': 1.0, 'sys_len': 10, 'ref_len': 7} ``` The score can take any value between `0.0` and `100.0`, inclusive. #### Values from Popular Papers ### Examples ```python >>> predictions = ["hello there general kenobi", ... "on our way to ankh morpork"] >>> references = [["hello there general kenobi", "hello there !"], ... ["goodbye ankh morpork", "ankh morpork"]] >>> sacrebleu = datasets.load_metric("sacrebleu") >>> results = sacrebleu.compute(predictions=predictions, ... references=references) >>> print(list(results.keys())) ['score', 'counts', 'totals', 'precisions', 'bp', 'sys_len', 'ref_len'] >>> print(round(results["score"], 1)) 39.8 ``` ## Limitations and Bias Because what this metric calculates is BLEU scores, it has the same limitations as that metric, except that sacreBLEU is more easily reproducible. ## Citation ```bibtex @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ``` ## Further References - See the [sacreBLEU README.md file](https://github.com/mjpost/sacreBLEU) for more information.
datasets/metrics/sacrebleu/README.md/0
{ "file_path": "datasets/metrics/sacrebleu/README.md", "repo_id": "datasets", "token_count": 1692 }
64
# Copyright 2020 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The SuperGLUE benchmark metric.""" from sklearn.metrics import f1_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record _CITATION = """\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } """ _DESCRIPTION = """\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. """ _KWARGS_DESCRIPTION = """ Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for 'record': list of question-answer dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'prediction_text': the predicted answer text - for 'multirc': list of question-answer dictionaries with the following keys: - 'idx': index of the question-answer pair as specified by the dataset - 'prediction': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for 'record': list of question-answers dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'answers': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for 'record': - 'exact_match': Exact match between answer and gold answer - 'f1': F1 score - for 'multirc': - 'exact_match': Exact match between answer and gold answer - 'f1_m': Per-question macro-F1 score - 'f1_a': Average F1 score over all answers - for 'axb': 'matthews_correlation': Matthew Correlation - for 'cb': - 'accuracy': Accuracy - 'f1': F1 score - for all others: - 'accuracy': Accuracy Examples: >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'cb') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'record') >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}] >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc') >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'axb') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'matthews_correlation': 1.0} """ def simple_accuracy(preds, labels): return float((preds == labels).mean()) def acc_and_f1(preds, labels, f1_avg="binary"): acc = simple_accuracy(preds, labels) f1 = float(f1_score(y_true=labels, y_pred=preds, average=f1_avg)) return { "accuracy": acc, "f1": f1, } def evaluate_multirc(ids_preds, labels): """ Computes F1 score and Exact Match for MultiRC predictions. """ question_map = {} for id_pred, label in zip(ids_preds, labels): question_id = f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}' pred = id_pred["prediction"] if question_id in question_map: question_map[question_id].append((pred, label)) else: question_map[question_id] = [(pred, label)] f1s, ems = [], [] for question, preds_labels in question_map.items(): question_preds, question_labels = zip(*preds_labels) f1 = f1_score(y_true=question_labels, y_pred=question_preds, average="macro") f1s.append(f1) em = int(sum(pred == label for pred, label in preds_labels) == len(preds_labels)) ems.append(em) f1_m = float(sum(f1s) / len(f1s)) em = sum(ems) / len(ems) f1_a = float(f1_score(y_true=labels, y_pred=[id_pred["prediction"] for id_pred in ids_preds])) return {"exact_match": em, "f1_m": f1_m, "f1_a": f1_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class SuperGlue(datasets.Metric): def _info(self): if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( "You should supply a configuration name selected in " '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' ) return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types()), codebase_urls=[], reference_urls=[], format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None, ) def _get_feature_types(self): if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("int64"), "query": datasets.Value("int64"), }, "prediction_text": datasets.Value("string"), }, "references": { "idx": { "passage": datasets.Value("int64"), "query": datasets.Value("int64"), }, "answers": datasets.Sequence(datasets.Value("string")), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("int64"), "paragraph": datasets.Value("int64"), "question": datasets.Value("int64"), }, "prediction": datasets.Value("int64"), }, "references": datasets.Value("int64"), } else: return { "predictions": datasets.Value("int64"), "references": datasets.Value("int64"), } def _compute(self, predictions, references): if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(references, predictions)} elif self.config_name == "cb": return acc_and_f1(predictions, references, f1_avg="macro") elif self.config_name == "record": dataset = [ { "qas": [ {"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]} for ref in references ] } ] predictions = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions} return evaluate_record(dataset, predictions)[0] elif self.config_name == "multirc": return evaluate_multirc(predictions, references) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(predictions, references)} else: raise KeyError( "You should supply a configuration name selected in " '["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
datasets/metrics/super_glue/super_glue.py/0
{ "file_path": "datasets/metrics/super_glue/super_glue.py", "repo_id": "datasets", "token_count": 4296 }
65
__all__ = [ "DownloadConfig", "DownloadManager", "DownloadMode", "StreamingDownloadManager", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
datasets/src/datasets/download/__init__.py/0
{ "file_path": "datasets/src/datasets/download/__init__.py", "repo_id": "datasets", "token_count": 77 }
66
# Copyright 2020 The HuggingFace Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Mapping, MutableMapping from functools import partial # Lint as: python3 from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union import numpy as np import pandas as pd import pyarrow as pa from packaging import version from .. import config from ..features import Features from ..features.features import _ArrayXDExtensionType, _is_zero_copy_only, decode_nested_example, pandas_types_mapper from ..table import Table from ..utils.py_utils import no_op_if_value_is_null T = TypeVar("T") RowFormat = TypeVar("RowFormat") ColumnFormat = TypeVar("ColumnFormat") BatchFormat = TypeVar("BatchFormat") def _is_range_contiguous(key: range) -> bool: return key.step == 1 and key.stop >= key.start def _raise_bad_key_type(key: Any): raise TypeError( f"Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable." ) def _query_table_with_indices_mapping( table: Table, key: Union[int, slice, range, str, Iterable], indices: Table ) -> pa.Table: """ Query a pyarrow Table to extract the subtable that correspond to the given key. The :obj:`indices` parameter corresponds to the indices mapping in case we cant to take into account a shuffling or an indices selection for example. The indices table must contain one column named "indices" of type uint64. """ if isinstance(key, int): key = indices.fast_slice(key % indices.num_rows, 1).column(0)[0].as_py() return _query_table(table, key) if isinstance(key, slice): key = range(*key.indices(indices.num_rows)) if isinstance(key, range): if _is_range_contiguous(key) and key.start >= 0: return _query_table( table, [i.as_py() for i in indices.fast_slice(key.start, key.stop - key.start).column(0)] ) else: pass # treat as an iterable if isinstance(key, str): table = table.select([key]) return _query_table(table, indices.column(0).to_pylist()) if isinstance(key, Iterable): return _query_table(table, [indices.fast_slice(i, 1).column(0)[0].as_py() for i in key]) _raise_bad_key_type(key) def _query_table(table: Table, key: Union[int, slice, range, str, Iterable]) -> pa.Table: """ Query a pyarrow Table to extract the subtable that correspond to the given key. """ if isinstance(key, int): return table.fast_slice(key % table.num_rows, 1) if isinstance(key, slice): key = range(*key.indices(table.num_rows)) if isinstance(key, range): if _is_range_contiguous(key) and key.start >= 0: return table.fast_slice(key.start, key.stop - key.start) else: pass # treat as an iterable if isinstance(key, str): return table.table.drop([column for column in table.column_names if column != key]) if isinstance(key, Iterable): key = np.fromiter(key, np.int64) if len(key) == 0: return table.table.slice(0, 0) # don't use pyarrow.Table.take even for pyarrow >=1.0 (see https://issues.apache.org/jira/browse/ARROW-9773) return table.fast_gather(key % table.num_rows) _raise_bad_key_type(key) def _is_array_with_nulls(pa_array: pa.Array) -> bool: return pa_array.null_count > 0 class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]): """ Arrow extractor are used to extract data from pyarrow tables. It makes it possible to extract rows, columns and batches. These three extractions types have to be implemented. """ def extract_row(self, pa_table: pa.Table) -> RowFormat: raise NotImplementedError def extract_column(self, pa_table: pa.Table) -> ColumnFormat: raise NotImplementedError def extract_batch(self, pa_table: pa.Table) -> BatchFormat: raise NotImplementedError def _unnest(py_dict: Dict[str, List[T]]) -> Dict[str, T]: """Return the first element of a batch (dict) as a row (dict)""" return {key: array[0] for key, array in py_dict.items()} class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]): def extract_row(self, pa_table: pa.Table) -> pa.Table: return pa_table def extract_column(self, pa_table: pa.Table) -> pa.Array: return pa_table.column(0) def extract_batch(self, pa_table: pa.Table) -> pa.Table: return pa_table class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]): def extract_row(self, pa_table: pa.Table) -> dict: return _unnest(pa_table.to_pydict()) def extract_column(self, pa_table: pa.Table) -> list: return pa_table.column(0).to_pylist() def extract_batch(self, pa_table: pa.Table) -> dict: return pa_table.to_pydict() class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]): def __init__(self, **np_array_kwargs): self.np_array_kwargs = np_array_kwargs def extract_row(self, pa_table: pa.Table) -> dict: return _unnest(self.extract_batch(pa_table)) def extract_column(self, pa_table: pa.Table) -> np.ndarray: return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]]) def extract_batch(self, pa_table: pa.Table) -> dict: return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names} def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray: if isinstance(pa_array, pa.ChunkedArray): if isinstance(pa_array.type, _ArrayXDExtensionType): # don't call to_pylist() to preserve dtype of the fixed-size array zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True) array: List = [ row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only) ] else: zero_copy_only = _is_zero_copy_only(pa_array.type) and all( not _is_array_with_nulls(chunk) for chunk in pa_array.chunks ) array: List = [ row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only) ] else: if isinstance(pa_array.type, _ArrayXDExtensionType): # don't call to_pylist() to preserve dtype of the fixed-size array zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True) array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only) else: zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array) array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist() if len(array) > 0: if any( (isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape)) or (isinstance(x, float) and np.isnan(x)) for x in array ): return np.array(array, copy=False, dtype=object) return np.array(array, copy=False) class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]): def extract_row(self, pa_table: pa.Table) -> pd.DataFrame: return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper) def extract_column(self, pa_table: pa.Table) -> pd.Series: return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]] def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame: return pa_table.to_pandas(types_mapper=pandas_types_mapper) class PythonFeaturesDecoder: def __init__(self, features: Optional[Features]): self.features = features def decode_row(self, row: dict) -> dict: return self.features.decode_example(row) if self.features else row def decode_column(self, column: list, column_name: str) -> list: return self.features.decode_column(column, column_name) if self.features else column def decode_batch(self, batch: dict) -> dict: return self.features.decode_batch(batch) if self.features else batch class PandasFeaturesDecoder: def __init__(self, features: Optional[Features]): self.features = features def decode_row(self, row: pd.DataFrame) -> pd.DataFrame: decode = ( { column_name: no_op_if_value_is_null(partial(decode_nested_example, feature)) for column_name, feature in self.features.items() if self.features._column_requires_decoding[column_name] } if self.features else {} ) if decode: row[list(decode.keys())] = row.transform(decode) return row def decode_column(self, column: pd.Series, column_name: str) -> pd.Series: decode = ( no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name])) if self.features and column_name in self.features and self.features._column_requires_decoding[column_name] else None ) if decode: column = column.transform(decode) return column def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame: return self.decode_row(batch) class LazyDict(MutableMapping): """A dictionary backed by Arrow data. The values are formatted on-the-fly when accessing the dictionary.""" def __init__(self, pa_table: pa.Table, formatter: "Formatter"): self.pa_table = pa_table self.formatter = formatter self.data = {key: None for key in pa_table.column_names} self.keys_to_format = set(self.data.keys()) def __len__(self): return len(self.data) def __getitem__(self, key): value = self.data[key] if key in self.keys_to_format: value = self.format(key) self.data[key] = value self.keys_to_format.remove(key) return value def __setitem__(self, key, value): if key in self.keys_to_format: self.keys_to_format.remove(key) self.data[key] = value def __delitem__(self, key) -> None: if key in self.keys_to_format: self.keys_to_format.remove(key) del self.data[key] def __iter__(self): return iter(self.data) def __contains__(self, key): return key in self.data def __repr__(self): self._format_all() return repr(self.data) if config.PY_VERSION >= version.parse("3.9"): # merging with the union ("|") operator is supported in Python 3.9+ def __or__(self, other): if isinstance(other, LazyDict): inst = self.copy() other = other.copy() other._format_all() inst.keys_to_format -= other.data.keys() inst.data = inst.data | other.data return inst if isinstance(other, dict): inst = self.copy() inst.keys_to_format -= other.keys() inst.data = inst.data | other return inst return NotImplemented def __ror__(self, other): if isinstance(other, LazyDict): inst = self.copy() other = other.copy() other._format_all() inst.keys_to_format -= other.data.keys() inst.data = other.data | inst.data return inst if isinstance(other, dict): inst = self.copy() inst.keys_to_format -= other.keys() inst.data = other | inst.data return inst return NotImplemented def __ior__(self, other): if isinstance(other, LazyDict): other = other.copy() other._format_all() self.keys_to_format -= other.data.keys() self.data |= other.data else: self.keys_to_format -= other.keys() self.data |= other return self def __copy__(self): # Identical to `UserDict.__copy__` inst = self.__class__.__new__(self.__class__) inst.__dict__.update(self.__dict__) # Create a copy and avoid triggering descriptors inst.__dict__["data"] = self.__dict__["data"].copy() inst.__dict__["keys_to_format"] = self.__dict__["keys_to_format"].copy() return inst def copy(self): import copy return copy.copy(self) @classmethod def fromkeys(cls, iterable, value=None): raise NotImplementedError def format(self, key): raise NotImplementedError def _format_all(self): for key in self.keys_to_format: self.data[key] = self.format(key) self.keys_to_format.clear() class LazyRow(LazyDict): def format(self, key): return self.formatter.format_column(self.pa_table.select([key]))[0] class LazyBatch(LazyDict): def format(self, key): return self.formatter.format_column(self.pa_table.select([key])) class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]): """ A formatter is an object that extracts and formats data from pyarrow tables. It defines the formatting for rows, columns and batches. """ simple_arrow_extractor = SimpleArrowExtractor python_arrow_extractor = PythonArrowExtractor numpy_arrow_extractor = NumpyArrowExtractor pandas_arrow_extractor = PandasArrowExtractor def __init__(self, features: Optional[Features] = None): self.features = features self.python_features_decoder = PythonFeaturesDecoder(self.features) self.pandas_features_decoder = PandasFeaturesDecoder(self.features) def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]: if query_type == "row": return self.format_row(pa_table) elif query_type == "column": return self.format_column(pa_table) elif query_type == "batch": return self.format_batch(pa_table) def format_row(self, pa_table: pa.Table) -> RowFormat: raise NotImplementedError def format_column(self, pa_table: pa.Table) -> ColumnFormat: raise NotImplementedError def format_batch(self, pa_table: pa.Table) -> BatchFormat: raise NotImplementedError class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]): def recursive_tensorize(self, data_struct: dict): raise NotImplementedError class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]): def format_row(self, pa_table: pa.Table) -> pa.Table: return self.simple_arrow_extractor().extract_row(pa_table) def format_column(self, pa_table: pa.Table) -> pa.Array: return self.simple_arrow_extractor().extract_column(pa_table) def format_batch(self, pa_table: pa.Table) -> pa.Table: return self.simple_arrow_extractor().extract_batch(pa_table) class PythonFormatter(Formatter[Mapping, list, Mapping]): def __init__(self, features=None, lazy=False): super().__init__(features) self.lazy = lazy def format_row(self, pa_table: pa.Table) -> Mapping: if self.lazy: return LazyRow(pa_table, self) row = self.python_arrow_extractor().extract_row(pa_table) row = self.python_features_decoder.decode_row(row) return row def format_column(self, pa_table: pa.Table) -> list: column = self.python_arrow_extractor().extract_column(pa_table) column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) return column def format_batch(self, pa_table: pa.Table) -> Mapping: if self.lazy: return LazyBatch(pa_table, self) batch = self.python_arrow_extractor().extract_batch(pa_table) batch = self.python_features_decoder.decode_batch(batch) return batch class PandasFormatter(Formatter[pd.DataFrame, pd.Series, pd.DataFrame]): def format_row(self, pa_table: pa.Table) -> pd.DataFrame: row = self.pandas_arrow_extractor().extract_row(pa_table) row = self.pandas_features_decoder.decode_row(row) return row def format_column(self, pa_table: pa.Table) -> pd.Series: column = self.pandas_arrow_extractor().extract_column(pa_table) column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0]) return column def format_batch(self, pa_table: pa.Table) -> pd.DataFrame: row = self.pandas_arrow_extractor().extract_batch(pa_table) row = self.pandas_features_decoder.decode_batch(row) return row class CustomFormatter(Formatter[dict, ColumnFormat, dict]): """ A user-defined custom formatter function defined by a ``transform``. The transform must take as input a batch of data extracted for an arrow table using the python extractor, and return a batch. If the output batch is not a dict, then output_all_columns won't work. If the ouput batch has several fields, then querying a single column won't work since we don't know which field to return. """ def __init__(self, transform: Callable[[dict], dict], features=None, **kwargs): super().__init__(features=features) self.transform = transform def format_row(self, pa_table: pa.Table) -> dict: formatted_batch = self.format_batch(pa_table) try: return _unnest(formatted_batch) except Exception as exc: raise TypeError( f"Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}" ) from exc def format_column(self, pa_table: pa.Table) -> ColumnFormat: formatted_batch = self.format_batch(pa_table) if hasattr(formatted_batch, "keys"): if len(formatted_batch.keys()) > 1: raise TypeError( "Tried to query a column but the custom formatting function returns too many columns. " f"Only one column was expected but got columns {list(formatted_batch.keys())}." ) else: raise TypeError( f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}" ) try: return formatted_batch[pa_table.column_names[0]] except Exception as exc: raise TypeError( f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}" ) from exc def format_batch(self, pa_table: pa.Table) -> dict: batch = self.python_arrow_extractor().extract_batch(pa_table) batch = self.python_features_decoder.decode_batch(batch) return self.transform(batch) def _check_valid_column_key(key: str, columns: List[str]) -> None: if key not in columns: raise KeyError(f"Column {key} not in the dataset. Current columns in the dataset: {columns}") def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None: if isinstance(key, int): if (key < 0 and key + size < 0) or (key >= size): raise IndexError(f"Invalid key: {key} is out of bounds for size {size}") return elif isinstance(key, slice): pass elif isinstance(key, range): if len(key) > 0: _check_valid_index_key(max(key), size=size) _check_valid_index_key(min(key), size=size) elif isinstance(key, Iterable): if len(key) > 0: _check_valid_index_key(int(max(key)), size=size) _check_valid_index_key(int(min(key)), size=size) else: _raise_bad_key_type(key) def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str: if isinstance(key, int): return "row" elif isinstance(key, str): return "column" elif isinstance(key, (slice, range, Iterable)): return "batch" _raise_bad_key_type(key) def query_table( table: Table, key: Union[int, slice, range, str, Iterable], indices: Optional[Table] = None, ) -> pa.Table: """ Query a Table to extract the subtable that correspond to the given key. Args: table (``datasets.table.Table``): The input Table to query from key (``Union[int, slice, range, str, Iterable]``): The key can be of different types: - an integer i: the subtable containing only the i-th row - a slice [i:j:k]: the subtable containing the rows that correspond to this slice - a range(i, j, k): the subtable containing the rows that correspond to this range - a string c: the subtable containing all the rows but only the column c - an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows. The indices table must contain one column named "indices" of type uint64. This is used in case of shuffling or rows selection. Returns: ``pyarrow.Table``: the result of the query on the input table """ # Check if key is valid if not isinstance(key, (int, slice, range, str, Iterable)): _raise_bad_key_type(key) if isinstance(key, str): _check_valid_column_key(key, table.column_names) else: size = indices.num_rows if indices is not None else table.num_rows _check_valid_index_key(key, size) # Query the main table if indices is None: pa_subtable = _query_table(table, key) else: pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices) return pa_subtable def format_table( table: Table, key: Union[int, slice, range, str, Iterable], formatter: Formatter, format_columns: Optional[list] = None, output_all_columns=False, ): """ Format a Table depending on the key that was used and a Formatter object. Args: table (``datasets.table.Table``): The input Table to format key (``Union[int, slice, range, str, Iterable]``): Depending on the key that was used, the formatter formats the table as either a row, a column or a batch. formatter (``datasets.formatting.formatting.Formatter``): Any subclass of a Formatter such as PythonFormatter, NumpyFormatter, etc. format_columns (:obj:`List[str]`, optional): if not None, it defines the columns that will be formatted using the given formatter. Other columns are discarded (unless ``output_all_columns`` is True) output_all_columns (:obj:`bool`, defaults to False). If True, the formatted output is completed using the columns that are not in the ``format_columns`` list. For these columns, the PythonFormatter is used. Returns: A row, column or batch formatted object defined by the Formatter: - the PythonFormatter returns a dictionary for a row or a batch, and a list for a column. - the NumpyFormatter returns a dictionary for a row or a batch, and a np.array for a column. - the PandasFormatter returns a pd.DataFrame for a row or a batch, and a pd.Series for a column. - the TorchFormatter returns a dictionary for a row or a batch, and a torch.Tensor for a column. - the TFFormatter returns a dictionary for a row or a batch, and a tf.Tensor for a column. """ if isinstance(table, Table): pa_table = table.table else: pa_table = table query_type = key_to_query_type(key) python_formatter = PythonFormatter(features=formatter.features) if format_columns is None: return formatter(pa_table, query_type=query_type) elif query_type == "column": if key in format_columns: return formatter(pa_table, query_type) else: return python_formatter(pa_table, query_type=query_type) else: pa_table_to_format = pa_table.drop(col for col in pa_table.column_names if col not in format_columns) formatted_output = formatter(pa_table_to_format, query_type=query_type) if output_all_columns: if isinstance(formatted_output, MutableMapping): pa_table_with_remaining_columns = pa_table.drop( col for col in pa_table.column_names if col in format_columns ) remaining_columns_dict = python_formatter(pa_table_with_remaining_columns, query_type=query_type) formatted_output.update(remaining_columns_dict) else: raise TypeError( f"Custom formatting function must return a dict to work with output_all_columns=True, but got {formatted_output}" ) return formatted_output
datasets/src/datasets/formatting/formatting.py/0
{ "file_path": "datasets/src/datasets/formatting/formatting.py", "repo_id": "datasets", "token_count": 10729 }
67
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class TextDatasetReader(AbstractDatasetReader): def __init__( self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, num_proc: Optional[int] = None, **kwargs, ): super().__init__( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} self.builder = Text( cache_dir=cache_dir, data_files=path_or_paths, features=features, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset
datasets/src/datasets/io/text.py/0
{ "file_path": "datasets/src/datasets/io/text.py", "repo_id": "datasets", "token_count": 961 }
68
import collections import itertools import os from dataclasses import dataclass from typing import List, Optional, Tuple, Type import pandas as pd import pyarrow as pa import pyarrow.json as paj import datasets from datasets.features.features import FeatureType from datasets.tasks.base import TaskTemplate logger = datasets.utils.logging.get_logger(__name__) def count_path_segments(path): return path.replace("\\", "/").count("/") @dataclass class FolderBasedBuilderConfig(datasets.BuilderConfig): """BuilderConfig for AutoFolder.""" features: Optional[datasets.Features] = None drop_labels: bool = None drop_metadata: bool = None class FolderBasedBuilder(datasets.GeneratorBasedBuilder): """ Base class for generic data loaders for vision and image data. Abstract class attributes to be overridden by a child class: BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...) BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...) BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig` EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files will be included in a dataset) CLASSIFICATION_TASK: classification task to use if labels are obtained from the folder structure """ BASE_FEATURE: Type[FeatureType] BASE_COLUMN_NAME: str BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig EXTENSIONS: List[str] CLASSIFICATION_TASK: TaskTemplate METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"] def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") # Do an early pass if: # * `drop_labels` is None (default) or False, to infer the class labels # * `drop_metadata` is None (default) or False, to find the metadata files do_analyze = not self.config.drop_labels or not self.config.drop_metadata labels, path_depths = set(), set() metadata_files = collections.defaultdict(set) def analyze(files_or_archives, downloaded_files_or_dirs, split): if len(downloaded_files_or_dirs) == 0: return # The files are separated from the archives at this point, so check the first sample # to see if it's a file or a directory and iterate accordingly if os.path.isfile(downloaded_files_or_dirs[0]): original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs for original_file, downloaded_file in zip(original_files, downloaded_files): original_file, downloaded_file = str(original_file), str(downloaded_file) _, original_file_ext = os.path.splitext(original_file) if original_file_ext.lower() in self.EXTENSIONS: if not self.config.drop_labels: labels.add(os.path.basename(os.path.dirname(original_file))) path_depths.add(count_path_segments(original_file)) elif os.path.basename(original_file) in self.METADATA_FILENAMES: metadata_files[split].add((original_file, downloaded_file)) else: original_file_name = os.path.basename(original_file) logger.debug( f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either." ) else: archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs for archive, downloaded_dir in zip(archives, downloaded_dirs): archive, downloaded_dir = str(archive), str(downloaded_dir) for downloaded_dir_file in dl_manager.iter_files(downloaded_dir): _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) if downloaded_dir_file_ext in self.EXTENSIONS: if not self.config.drop_labels: labels.add(os.path.basename(os.path.dirname(downloaded_dir_file))) path_depths.add(count_path_segments(downloaded_dir_file)) elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES: metadata_files[split].add((None, downloaded_dir_file)) else: archive_file_name = os.path.basename(archive) original_file_name = os.path.basename(downloaded_dir_file) logger.debug( f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either." ) data_files = self.config.data_files splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files, archives = self._split_files_and_archives(files) downloaded_files = dl_manager.download(files) downloaded_dirs = dl_manager.download_and_extract(archives) if do_analyze: # drop_metadata is None or False, drop_labels is None or False logger.info(f"Searching for labels and/or metadata files in {split_name} data files...") analyze(files, downloaded_files, split_name) analyze(archives, downloaded_dirs, split_name) if metadata_files: # add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False add_metadata = not self.config.drop_metadata # if `metadata_files` are found, add labels only if # `drop_labels` is set up to False explicitly (not-default behavior) add_labels = self.config.drop_labels is False else: # if `metadata_files` are not found, don't add metadata add_metadata = False # if `metadata_files` are not found and `drop_labels` is None (default) - # add labels if files are on the same level in directory hierarchy and there is more than one label add_labels = ( (len(labels) > 1 and len(path_depths) == 1) if self.config.drop_labels is None else not self.config.drop_labels ) if add_labels: logger.info("Adding the labels inferred from data directories to the dataset's features...") if add_metadata: logger.info("Adding metadata to the dataset...") else: add_labels, add_metadata, metadata_files = False, False, {} splits.append( datasets.SplitGenerator( name=split_name, gen_kwargs={ "files": list(zip(files, downloaded_files)) + [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs], "metadata_files": metadata_files, "split_name": split_name, "add_labels": add_labels, "add_metadata": add_metadata, }, ) ) if add_metadata: # Verify that: # * all metadata files have the same set of features # * the `file_name` key is one of the metadata keys and is of type string features_per_metadata_file: List[Tuple[str, datasets.Features]] = [] # Check that all metadata files share the same format metadata_ext = { os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values()) } if len(metadata_ext) > 1: raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}") metadata_ext = metadata_ext.pop() for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()): pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext) features_per_metadata_file.append( (downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema)) ) for downloaded_metadata_file, metadata_features in features_per_metadata_file: if metadata_features != features_per_metadata_file[0][1]: raise ValueError( f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}" ) metadata_features = features_per_metadata_file[0][1] if "file_name" not in metadata_features: raise ValueError("`file_name` must be present as dictionary key in metadata files") if metadata_features["file_name"] != datasets.Value("string"): raise ValueError("`file_name` key must be a string") del metadata_features["file_name"] else: metadata_features = None # Normally, we would do this in _info, but we need to know the labels and/or metadata # before building the features if self.config.features is None: if add_labels: self.info.features = datasets.Features( { self.BASE_COLUMN_NAME: self.BASE_FEATURE(), "label": datasets.ClassLabel(names=sorted(labels)), } ) self.info.task_templates = [self.CLASSIFICATION_TASK.align_with_features(self.info.features)] else: self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()}) if add_metadata: # Warn if there are duplicated keys in metadata compared to the existing features # (`BASE_COLUMN_NAME`, optionally "label") duplicated_keys = set(self.info.features) & set(metadata_features) if duplicated_keys: logger.warning( f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in " f"the features dictionary." ) # skip metadata duplicated keys self.info.features.update( { feature: metadata_features[feature] for feature in metadata_features if feature not in duplicated_keys } ) return splits def _split_files_and_archives(self, data_files): files, archives = [], [] for data_file in data_files: _, data_file_ext = os.path.splitext(data_file) if data_file_ext.lower() in self.EXTENSIONS: files.append(data_file) elif os.path.basename(data_file) in self.METADATA_FILENAMES: files.append(data_file) else: archives.append(data_file) return files, archives def _read_metadata(self, metadata_file, metadata_ext: str = ""): if metadata_ext == ".csv": # Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module return pa.Table.from_pandas(pd.read_csv(metadata_file)) else: with open(metadata_file, "rb") as f: return paj.read_json(f) def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels): split_metadata_files = metadata_files.get(split_name, []) sample_empty_metadata = ( {k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {} ) last_checked_dir = None metadata_dir = None metadata_dict = None downloaded_metadata_file = None metadata_ext = "" if split_metadata_files: metadata_ext = { os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files } metadata_ext = metadata_ext.pop() file_idx = 0 for original_file, downloaded_file_or_dir in files: if original_file is not None: _, original_file_ext = os.path.splitext(original_file) if original_file_ext.lower() in self.EXTENSIONS: if add_metadata: # If the file is a file of a needed type, and we've just entered a new directory, # find the nereast metadata file (by counting path segments) for the directory current_dir = os.path.dirname(original_file) if last_checked_dir is None or last_checked_dir != current_dir: last_checked_dir = current_dir metadata_file_candidates = [ ( os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)), metadata_file_candidate, downloaded_metadata_file, ) for metadata_file_candidate, downloaded_metadata_file in split_metadata_files if metadata_file_candidate is not None # ignore metadata_files that are inside archives and not os.path.relpath( original_file, os.path.dirname(metadata_file_candidate) ).startswith("..") ] if metadata_file_candidates: _, metadata_file, downloaded_metadata_file = min( metadata_file_candidates, key=lambda x: count_path_segments(x[0]) ) pa_metadata_table = self._read_metadata( downloaded_metadata_file, metadata_ext=metadata_ext ) pa_file_name_array = pa_metadata_table["file_name"] pa_metadata_table = pa_metadata_table.drop(["file_name"]) metadata_dir = os.path.dirname(metadata_file) metadata_dict = { os.path.normpath(file_name).replace("\\", "/"): sample_metadata for file_name, sample_metadata in zip( pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() ) } else: raise ValueError( f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." ) if metadata_dir is not None and downloaded_metadata_file is not None: file_relpath = os.path.relpath(original_file, metadata_dir) file_relpath = file_relpath.replace("\\", "/") if file_relpath not in metadata_dict: raise ValueError( f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}." ) sample_metadata = metadata_dict[file_relpath] else: raise ValueError( f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." ) else: sample_metadata = {} if add_labels: sample_label = {"label": os.path.basename(os.path.dirname(original_file))} else: sample_label = {} yield ( file_idx, { **sample_empty_metadata, self.BASE_COLUMN_NAME: downloaded_file_or_dir, **sample_metadata, **sample_label, }, ) file_idx += 1 else: for downloaded_dir_file in downloaded_file_or_dir: _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) if downloaded_dir_file_ext.lower() in self.EXTENSIONS: if add_metadata: current_dir = os.path.dirname(downloaded_dir_file) if last_checked_dir is None or last_checked_dir != current_dir: last_checked_dir = current_dir metadata_file_candidates = [ ( os.path.relpath( downloaded_dir_file, os.path.dirname(downloaded_metadata_file) ), metadata_file_candidate, downloaded_metadata_file, ) for metadata_file_candidate, downloaded_metadata_file in split_metadata_files if metadata_file_candidate is None # ignore metadata_files that are not inside archives and not os.path.relpath( downloaded_dir_file, os.path.dirname(downloaded_metadata_file) ).startswith("..") ] if metadata_file_candidates: _, metadata_file, downloaded_metadata_file = min( metadata_file_candidates, key=lambda x: count_path_segments(x[0]) ) pa_metadata_table = self._read_metadata( downloaded_metadata_file, metadata_ext=metadata_ext ) pa_file_name_array = pa_metadata_table["file_name"] pa_metadata_table = pa_metadata_table.drop(["file_name"]) metadata_dir = os.path.dirname(downloaded_metadata_file) metadata_dict = { os.path.normpath(file_name).replace("\\", "/"): sample_metadata for file_name, sample_metadata in zip( pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() ) } else: raise ValueError( f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." ) if metadata_dir is not None and downloaded_metadata_file is not None: downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir) downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/") if downloaded_dir_file_relpath not in metadata_dict: raise ValueError( f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}." ) sample_metadata = metadata_dict[downloaded_dir_file_relpath] else: raise ValueError( f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." ) else: sample_metadata = {} if add_labels: sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))} else: sample_label = {} yield ( file_idx, { **sample_empty_metadata, self.BASE_COLUMN_NAME: downloaded_dir_file, **sample_metadata, **sample_label, }, ) file_idx += 1
datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py", "repo_id": "datasets", "token_count": 11960 }
69
import itertools import warnings from dataclasses import InitVar, dataclass from io import StringIO from typing import Optional import pyarrow as pa import datasets from datasets.features.features import require_storage_cast from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class TextConfig(datasets.BuilderConfig): """BuilderConfig for text files.""" features: Optional[datasets.Features] = None encoding: str = "utf-8" errors: InitVar[Optional[str]] = "deprecated" encoding_errors: Optional[str] = None chunksize: int = 10 << 20 # 10MB keep_linebreaks: bool = False sample_by: str = "line" def __post_init__(self, errors): if errors != "deprecated": warnings.warn( "'errors' was deprecated in favor of 'encoding_errors' in version 2.14.0 and will be removed in 3.0.0.\n" f"You can remove this warning by passing 'encoding_errors={errors}' instead.", FutureWarning, ) self.encoding_errors = errors class Text(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = TextConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]]. If str or List[str], then the dataset returns only the 'train' split. If dict, then keys should be from the `datasets.Split` enum. """ if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: schema = self.config.features.arrow_schema if all(not require_storage_cast(feature) for feature in self.config.features.values()): # cheaper cast pa_table = pa_table.cast(schema) else: # more expensive cast; allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, schema) return pa_table else: return pa_table.cast(pa.schema({"text": pa.string()})) def _generate_tables(self, files): pa_table_names = list(self.config.features) if self.config.features is not None else ["text"] for file_idx, file in enumerate(itertools.chain.from_iterable(files)): # open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n" with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: if self.config.sample_by == "line": batch_idx = 0 while True: batch = f.read(self.config.chunksize) if not batch: break batch += f.readline() # finish current line # StringIO.readlines, by default splits only on "\n" (and keeps line breaks) batch = StringIO(batch).readlines() if not self.config.keep_linebreaks: batch = [line.rstrip("\n") for line in batch] pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1 elif self.config.sample_by == "paragraph": batch_idx = 0 batch = "" while True: new_batch = f.read(self.config.chunksize) if not new_batch: break batch += new_batch batch += f.readline() # finish current line batch = batch.split("\n\n") pa_table = pa.Table.from_arrays( [pa.array([example for example in batch[:-1] if example])], names=pa_table_names ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(pa_table) batch_idx += 1 batch = batch[-1] if batch: pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names) yield (file_idx, batch_idx), self._cast_table(pa_table) elif self.config.sample_by == "document": text = f.read() pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names) yield file_idx, self._cast_table(pa_table)
datasets/src/datasets/packaged_modules/text/text.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/text/text.py", "repo_id": "datasets", "token_count": 3042 }
70
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=True) class QuestionAnsweringExtractive(TaskTemplate): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization task: str = field(default="question-answering-extractive", metadata={"include_in_asdict_even_if_is_default": True}) input_schema: ClassVar[Features] = Features({"question": Value("string"), "context": Value("string")}) label_schema: ClassVar[Features] = Features( { "answers": Sequence( { "text": Value("string"), "answer_start": Value("int32"), } ) } ) question_column: str = "question" context_column: str = "context" answers_column: str = "answers" @property def column_mapping(self) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
datasets/src/datasets/tasks/question_answering.py/0
{ "file_path": "datasets/src/datasets/tasks/question_answering.py", "repo_id": "datasets", "token_count": 437 }
71
import enum import os from typing import Optional from huggingface_hub.utils import insecure_hashlib from .. import config from .logging import get_logger logger = get_logger(__name__) class VerificationMode(enum.Enum): """`Enum` that specifies which verification checks to run. The default mode is `BASIC_CHECKS`, which will perform only rudimentary checks to avoid slowdowns when generating/downloading a dataset for the first time. The verification modes: | | Verification checks | |---------------------------|------------------------------------------------------------------------------ | | `ALL_CHECKS` | Split checks, uniqueness of the keys yielded in case of the GeneratorBuilder | | | and the validity (number of files, checksums, etc.) of downloaded files | | `BASIC_CHECKS` (default) | Same as `ALL_CHECKS` but without checking downloaded files | | `NO_CHECKS` | None | """ ALL_CHECKS = "all_checks" BASIC_CHECKS = "basic_checks" NO_CHECKS = "no_checks" class ChecksumVerificationException(Exception): """Exceptions during checksums verifications of downloaded files.""" class UnexpectedDownloadedFile(ChecksumVerificationException): """Some downloaded files were not expected.""" class ExpectedMoreDownloadedFiles(ChecksumVerificationException): """Some files were supposed to be downloaded but were not.""" class NonMatchingChecksumError(ChecksumVerificationException): """The downloaded file checksum don't match the expected checksum.""" def verify_checksums(expected_checksums: Optional[dict], recorded_checksums: dict, verification_name=None): if expected_checksums is None: logger.info("Unable to verify checksums.") return if len(set(expected_checksums) - set(recorded_checksums)) > 0: raise ExpectedMoreDownloadedFiles(str(set(expected_checksums) - set(recorded_checksums))) if len(set(recorded_checksums) - set(expected_checksums)) > 0: raise UnexpectedDownloadedFile(str(set(recorded_checksums) - set(expected_checksums))) bad_urls = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] for_verification_name = " for " + verification_name if verification_name is not None else "" if len(bad_urls) > 0: raise NonMatchingChecksumError( f"Checksums didn't match{for_verification_name}:\n" f"{bad_urls}\n" "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" ) logger.info("All the checksums matched successfully" + for_verification_name) class SplitsVerificationException(Exception): """Exceptions during splis verifications""" class UnexpectedSplits(SplitsVerificationException): """The expected splits of the downloaded file is missing.""" class ExpectedMoreSplits(SplitsVerificationException): """Some recorded splits are missing.""" class NonMatchingSplitsSizesError(SplitsVerificationException): """The splits sizes don't match the expected splits sizes.""" def verify_splits(expected_splits: Optional[dict], recorded_splits: dict): if expected_splits is None: logger.info("Unable to verify splits sizes.") return if len(set(expected_splits) - set(recorded_splits)) > 0: raise ExpectedMoreSplits(str(set(expected_splits) - set(recorded_splits))) if len(set(recorded_splits) - set(expected_splits)) > 0: raise UnexpectedSplits(str(set(recorded_splits) - set(expected_splits))) bad_splits = [ {"expected": expected_splits[name], "recorded": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(bad_splits) > 0: raise NonMatchingSplitsSizesError(str(bad_splits)) logger.info("All the splits matched successfully.") def get_size_checksum_dict(path: str, record_checksum: bool = True) -> dict: """Compute the file size and the sha256 checksum of a file""" if record_checksum: m = insecure_hashlib.sha256() with open(path, "rb") as f: for chunk in iter(lambda: f.read(1 << 20), b""): m.update(chunk) checksum = m.hexdigest() else: checksum = None return {"num_bytes": os.path.getsize(path), "checksum": checksum} def is_small_dataset(dataset_size): """Check if `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`. Args: dataset_size (int): Dataset size in bytes. Returns: bool: Whether `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`. """ if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
datasets/src/datasets/utils/info_utils.py/0
{ "file_path": "datasets/src/datasets/utils/info_utils.py", "repo_id": "datasets", "token_count": 1893 }
72
from collections.abc import Iterator from typing import Iterable class tracked_str(str): origins = {} def set_origin(self, origin: str): if super().__repr__() not in self.origins: self.origins[super().__repr__()] = origin def get_origin(self): return self.origins.get(super().__repr__(), str(self)) def __repr__(self) -> str: if super().__repr__() not in self.origins or self.origins[super().__repr__()] == self: return super().__repr__() else: return f"{str(self)} (origin={self.origins[super().__repr__()]})" class tracked_list(list): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.last_item = None def __iter__(self) -> Iterator: for x in super().__iter__(): self.last_item = x yield x self.last_item = None def __repr__(self) -> str: if self.last_item is None: return super().__repr__() else: return f"{self.__class__.__name__}(current={self.last_item})" class TrackedIterable(Iterable): def __init__(self) -> None: super().__init__() self.last_item = None def __repr__(self) -> str: if self.last_item is None: super().__repr__() else: return f"{self.__class__.__name__}(current={self.last_item})"
datasets/src/datasets/utils/track.py/0
{ "file_path": "datasets/src/datasets/utils/track.py", "repo_id": "datasets", "token_count": 648 }
73
import textwrap import pyarrow as pa import pytest from datasets import Features, Image from datasets.packaged_modules.text.text import Text from ..utils import require_pil @pytest.fixture def text_file(tmp_path): filename = tmp_path / "text.txt" data = textwrap.dedent( """\ Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. Second paragraph: Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. """ ) with open(filename, "w", encoding="utf-8") as f: f.write(data) return str(filename) @pytest.fixture def text_file_with_image(tmp_path, image_file): filename = tmp_path / "text_with_image.txt" with open(filename, "w", encoding="utf-8") as f: f.write(image_file) return str(filename) @pytest.mark.parametrize("keep_linebreaks", [True, False]) def test_text_linebreaks(text_file, keep_linebreaks): with open(text_file, encoding="utf-8") as f: expected_content = f.read().splitlines(keepends=keep_linebreaks) text = Text(keep_linebreaks=keep_linebreaks, encoding="utf-8") generator = text._generate_tables([[text_file]]) generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"] assert generated_content == expected_content @require_pil def test_text_cast_image(text_file_with_image): with open(text_file_with_image, encoding="utf-8") as f: image_file = f.read().splitlines()[0] text = Text(encoding="utf-8", features=Features({"image": Image()})) generator = text._generate_tables([[text_file_with_image]]) pa_table = pa.concat_tables([table for _, table in generator]) assert pa_table.schema.field("image").type == Image()() generated_content = pa_table.to_pydict()["image"] assert generated_content == [{"path": image_file, "bytes": None}] @pytest.mark.parametrize("sample_by", ["line", "paragraph", "document"]) def test_text_sample_by(sample_by, text_file): with open(text_file, encoding="utf-8") as f: expected_content = f.read() if sample_by == "line": expected_content = expected_content.splitlines() elif sample_by == "paragraph": expected_content = expected_content.split("\n\n") elif sample_by == "document": expected_content = [expected_content] text = Text(sample_by=sample_by, encoding="utf-8", chunksize=100) generator = text._generate_tables([[text_file]]) generated_content = pa.concat_tables([table for _, table in generator]).to_pydict()["text"] assert generated_content == expected_content
datasets/tests/packaged_modules/test_text.py/0
{ "file_path": "datasets/tests/packaged_modules/test_text.py", "repo_id": "datasets", "token_count": 1289 }
74
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.core import url_to_fs from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, extract_path_from_uri, is_remote_filesystem from .utils import require_lz4, require_zstandard def test_mockfs(mockfs): assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def test_non_mockfs(): assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def test_extract_path_from_uri(): mock_bucket = "mock-s3-bucket" dataset_path = f"s3://{mock_bucket}" dataset_path = extract_path_from_uri(dataset_path) assert dataset_path.startswith("s3://") is False dataset_path = "./local/path" new_dataset_path = extract_path_from_uri(dataset_path) assert dataset_path == new_dataset_path def test_is_remote_filesystem(mockfs): is_remote = is_remote_filesystem(mockfs) assert is_remote is True fs = fsspec.filesystem("file") is_remote = is_remote_filesystem(fs) assert is_remote is False @pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS) def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file): input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file} input_path = input_paths[compression_fs_class.protocol] if input_path is None: reason = f"for '{compression_fs_class.protocol}' compression protocol, " if compression_fs_class.protocol == "lz4": reason += require_lz4.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(reason) fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path) assert isinstance(fs, compression_fs_class) expected_filename = os.path.basename(input_path) expected_filename = expected_filename[: expected_filename.rindex(".")] assert fs.glob("*") == [expected_filename] with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol", ["zip", "gzip"]) def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path): compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} compressed_file_path = compressed_file_paths[protocol] member_file_path = "dataset.jsonl" path = f"{protocol}://{member_file_path}::{compressed_file_path}" fs, *_ = url_to_fs(path) assert fs.isfile(member_file_path) assert not fs.isfile("non_existing_" + member_file_path) def test_fs_overwrites(): protocol = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(protocol, None, clobber=True) with pytest.warns(UserWarning) as warning_info: importlib.reload(datasets.filesystems) assert len(warning_info) == 1 assert ( str(warning_info[0].message) == f"A filesystem protocol was already set for {protocol} and will be overwritten." )
datasets/tests/test_filesystem.py/0
{ "file_path": "datasets/tests/test_filesystem.py", "repo_id": "datasets", "token_count": 1303 }
75
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def np_sum(x): # picklable for multiprocessing return x.sum() def add_one(i): # picklable for multiprocessing return i + 1 @dataclass class A: x: int y: str class PyUtilsTest(TestCase): def test_map_nested(self): s1 = {} s2 = [] s3 = 1 s4 = [1, 2] s5 = {"a": 1, "b": 2} s6 = {"a": [1, 2], "b": [3, 4]} s7 = {"a": {"1": 1}, "b": 2} s8 = {"a": 1, "b": 2, "c": 3, "d": 4} expected_map_nested_s1 = {} expected_map_nested_s2 = [] expected_map_nested_s3 = 2 expected_map_nested_s4 = [2, 3] expected_map_nested_s5 = {"a": 2, "b": 3} expected_map_nested_s6 = {"a": [2, 3], "b": [4, 5]} expected_map_nested_s7 = {"a": {"1": 2}, "b": 3} expected_map_nested_s8 = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(add_one, s1), expected_map_nested_s1) self.assertEqual(map_nested(add_one, s2), expected_map_nested_s2) self.assertEqual(map_nested(add_one, s3), expected_map_nested_s3) self.assertEqual(map_nested(add_one, s4), expected_map_nested_s4) self.assertEqual(map_nested(add_one, s5), expected_map_nested_s5) self.assertEqual(map_nested(add_one, s6), expected_map_nested_s6) self.assertEqual(map_nested(add_one, s7), expected_map_nested_s7) self.assertEqual(map_nested(add_one, s8), expected_map_nested_s8) num_proc = 2 self.assertEqual(map_nested(add_one, s1, num_proc=num_proc), expected_map_nested_s1) self.assertEqual(map_nested(add_one, s2, num_proc=num_proc), expected_map_nested_s2) self.assertEqual(map_nested(add_one, s3, num_proc=num_proc), expected_map_nested_s3) self.assertEqual(map_nested(add_one, s4, num_proc=num_proc), expected_map_nested_s4) self.assertEqual(map_nested(add_one, s5, num_proc=num_proc), expected_map_nested_s5) self.assertEqual(map_nested(add_one, s6, num_proc=num_proc), expected_map_nested_s6) self.assertEqual(map_nested(add_one, s7, num_proc=num_proc), expected_map_nested_s7) self.assertEqual(map_nested(add_one, s8, num_proc=num_proc), expected_map_nested_s8) sn1 = {"a": np.eye(2), "b": np.zeros(3), "c": np.ones(2)} expected_map_nested_sn1_sum = {"a": 2, "b": 0, "c": 2} expected_map_nested_sn1_int = { "a": np.eye(2).astype(int), "b": np.zeros(3).astype(int), "c": np.ones(2).astype(int), } self.assertEqual(map_nested(np_sum, sn1, map_numpy=False), expected_map_nested_sn1_sum) self.assertEqual( {k: v.tolist() for k, v in map_nested(int, sn1, map_numpy=True).items()}, {k: v.tolist() for k, v in expected_map_nested_sn1_int.items()}, ) self.assertEqual(map_nested(np_sum, sn1, map_numpy=False, num_proc=num_proc), expected_map_nested_sn1_sum) self.assertEqual( {k: v.tolist() for k, v in map_nested(int, sn1, map_numpy=True, num_proc=num_proc).items()}, {k: v.tolist() for k, v in expected_map_nested_sn1_int.items()}, ) with self.assertRaises(AttributeError): # can't pickle a local lambda map_nested(lambda x: x + 1, sn1, num_proc=num_proc) def test_zip_dict(self): d1 = {"a": 1, "b": 2} d2 = {"a": 3, "b": 4} d3 = {"a": 5, "b": 6} expected_zip_dict_result = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))]) self.assertEqual(sorted(zip_dict(d1, d2, d3)), expected_zip_dict_result) def test_temporary_assignment(self): class Foo: my_attr = "bar" foo = Foo() self.assertEqual(foo.my_attr, "bar") with temporary_assignment(foo, "my_attr", "BAR"): self.assertEqual(foo.my_attr, "BAR") self.assertEqual(foo.my_attr, "bar") @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc", [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ], ) def test_map_nested_num_proc(iterable_length, num_proc, expected_num_proc): with patch("datasets.utils.py_utils._single_map_nested") as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: data_struct = {f"{i}": i for i in range(iterable_length)} _ = map_nested(lambda x: x + 10, data_struct, num_proc=num_proc, parallel_min_length=16) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class TempSeedTest(TestCase): @require_tf def test_tensorflow(self): import tensorflow as tf from tensorflow.keras import layers model = layers.Dense(2) def gen_random_output(): x = tf.random.uniform((1, 3)) return model(x).numpy() with temp_seed(42, set_tensorflow=True): out1 = gen_random_output() with temp_seed(42, set_tensorflow=True): out2 = gen_random_output() out3 = gen_random_output() np.testing.assert_equal(out1, out2) self.assertGreater(np.abs(out1 - out3).sum(), 0) @require_torch def test_torch(self): import torch def gen_random_output(): model = torch.nn.Linear(3, 2) x = torch.rand(1, 3) return model(x).detach().numpy() with temp_seed(42, set_pytorch=True): out1 = gen_random_output() with temp_seed(42, set_pytorch=True): out2 = gen_random_output() out3 = gen_random_output() np.testing.assert_equal(out1, out2) self.assertGreater(np.abs(out1 - out3).sum(), 0) def test_numpy(self): def gen_random_output(): return np.random.rand(1, 3) with temp_seed(42): out1 = gen_random_output() with temp_seed(42): out2 = gen_random_output() out3 = gen_random_output() np.testing.assert_equal(out1, out2) self.assertGreater(np.abs(out1 - out3).sum(), 0) @pytest.mark.parametrize("input_data", [{}]) def test_nested_data_structure_data(input_data): output_data = NestedDataStructure(input_data).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output", [ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ], ) def test_flatten(data, expected_output): output = NestedDataStructure(data).flatten() assert output == expected_output def test_asdict(): input = A(x=1, y="foobar") expected_output = {"x": 1, "y": "foobar"} assert asdict(input) == expected_output input = {"a": {"b": A(x=10, y="foo")}, "c": [A(x=20, y="bar")]} expected_output = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(input) == expected_output with pytest.raises(TypeError): asdict([1, A(x=10, y="foo")]) def _split_text(text: str): return text.split() def _2seconds_generator_of_2items_with_timing(content): yield (time.time(), content) time.sleep(2) yield (time.time(), content) def test_iflatmap_unordered(): with Pool(2) as pool: out = list(iflatmap_unordered(pool, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10)) assert out.count("hello") == 10 assert out.count("there") == 10 assert len(out) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2) as pool: out = list(iflatmap_unordered(pool, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10)) assert out.count("hello") == 10 assert out.count("there") == 10 assert len(out) == 20 # check that we get items as fast as possible with Pool(2) as pool: out = [] for yield_time, content in iflatmap_unordered( pool, _2seconds_generator_of_2items_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(content) assert out.count("a") == 2 assert out.count("b") == 2 assert len(out) == 4
datasets/tests/test_py_utils.py/0
{ "file_path": "datasets/tests/test_py_utils.py", "repo_id": "datasets", "token_count": 4821 }
76
# Congratulations <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/communication/thumbnail.png" alt="Thumbnail"/> **Congratulations on finishing this course!** With perseverance, hard work, and determination, **you've acquired a solid background in Deep Reinforcement Learning**. But finishing this course is **not the end of your journey**. It's just the beginning: don't hesitate to explore bonus unit 3, where we show you topics you may be interested in studying. And don't hesitate to **share what you're doing, and ask questions in the discord server** **Thank you** for being part of this course. **I hope you liked this course as much as I loved writing it**. Don't hesitate **to give us feedback on how we can improve the course** using [this form](https://forms.gle/BzKXWzLAGZESGNaE9) And don't forget **to check in the next section how you can get (if you pass) your certificate of completion ‎‍🎓.** One last thing, to keep in touch with the Reinforcement Learning Team and with me: - [Follow me on Twitter](https://twitter.com/thomassimonini) - [Follow Hugging Face Twitter account](https://twitter.com/huggingface) - [Join the Hugging Face Discord](https://www.hf.co/join/discord) ## Keep Learning, Stay Awesome 🤗 Thomas Simonini,
deep-rl-class/units/en/communication/conclusion.mdx/0
{ "file_path": "deep-rl-class/units/en/communication/conclusion.mdx", "repo_id": "deep-rl-class", "token_count": 364 }
77
# Two main approaches for solving RL problems [[two-methods]] <Tip> Now that we learned the RL framework, how do we solve the RL problem? </Tip> In other words, how do we build an RL agent that can **select the actions that maximize its expected cumulative reward?** ## The Policy π: the agent’s brain [[policy]] The Policy **π** is the **brain of our Agent**, it’s the function that tells us what **action to take given the state we are in.** So it **defines the agent’s behavior** at a given time. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy_1.jpg" alt="Policy" /> <figcaption>Think of policy as the brain of our agent, the function that will tell us the action to take given a state</figcaption> </figure> This Policy **is the function we want to learn**, our goal is to find the optimal policy π\*, the policy that **maximizes expected return** when the agent acts according to it. We find this π\* **through training.** There are two approaches to train our agent to find this optimal policy π\*: - **Directly,** by teaching the agent to learn which **action to take,** given the current state: **Policy-Based Methods.** - Indirectly, **teach the agent to learn which state is more valuable** and then take the action that **leads to the more valuable states**: Value-Based Methods. ## Policy-Based Methods [[policy-based]] In Policy-Based methods, **we learn a policy function directly.** This function will define a mapping from each state to the best corresponding action. Alternatively, it could define **a probability distribution over the set of possible actions at that state.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy_2.jpg" alt="Policy" /> <figcaption>As we can see here, the policy (deterministic) <b>directly indicates the action to take for each step.</b></figcaption> </figure> We have two types of policies: - *Deterministic*: a policy at a given state **will always return the same action.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy_3.jpg" alt="Policy"/> <figcaption>action = policy(state)</figcaption> </figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy_4.jpg" alt="Policy" width="100%"/> - *Stochastic*: outputs **a probability distribution over actions.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy_5.jpg" alt="Policy"/> <figcaption>policy(actions | state) = probability distribution over the set of actions given the current state</figcaption> </figure> <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/policy-based.png" alt="Policy Based"/> <figcaption>Given an initial state, our stochastic policy will output probability distributions over the possible actions at that state.</figcaption> </figure> If we recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/pbm_1.jpg" alt="Pbm recap" width="100%" /> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/pbm_2.jpg" alt="Pbm recap" width="100%" /> ## Value-based methods [[value-based]] In value-based methods, instead of learning a policy function, we **learn a value function** that maps a state to the expected value **of being at that state.** The value of a state is the **expected discounted return** the agent can get if it **starts in that state, and then acts according to our policy.** “Act according to our policy” just means that our policy is **“going to the state with the highest value”.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/value_1.jpg" alt="Value based RL" width="100%" /> Here we see that our value function **defined values for each possible state.** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/value_2.jpg" alt="Value based RL"/> <figcaption>Thanks to our value function, at each step our policy will select the state with the biggest value defined by the value function: -7, then -6, then -5 (and so on) to attain the goal.</figcaption> </figure> Thanks to our value function, at each step our policy will select the state with the biggest value defined by the value function: -7, then -6, then -5 (and so on) to attain the goal. If we recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/vbm_1.jpg" alt="Vbm recap" width="100%" /> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/vbm_2.jpg" alt="Vbm recap" width="100%" />
deep-rl-class/units/en/unit1/two-methods.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/two-methods.mdx", "repo_id": "deep-rl-class", "token_count": 1565 }
78
# What is RL? A short recap [[what-is-rl]] In RL, we build an agent that can **make smart decisions**. For instance, an agent that **learns to play a video game.** Or a trading agent that **learns to maximize its benefits** by deciding on **what stocks to buy and when to sell.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/rl-process.jpg" alt="RL process"/> To make intelligent decisions, our agent will learn from the environment by **interacting with it through trial and error** and receiving rewards (positive or negative) **as unique feedback.** Its goal **is to maximize its expected cumulative reward** (because of the reward hypothesis). **The agent's decision-making process is called the policy π:** given a state, a policy will output an action or a probability distribution over actions. That is, given an observation of the environment, a policy will provide an action (or multiple probabilities for each action) that the agent should take. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/policy.jpg" alt="Policy"/> **Our goal is to find an optimal policy π* **, aka., a policy that leads to the best expected cumulative reward. And to find this optimal policy (hence solving the RL problem), there **are two main types of RL methods**: - *Policy-based methods*: **Train the policy directly** to learn which action to take given a state. - *Value-based methods*: **Train a value function** to learn **which state is more valuable** and use this value function **to take the action that leads to it.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/two-approaches.jpg" alt="Two RL approaches"/> And in this unit, **we'll dive deeper into the value-based methods.**
deep-rl-class/units/en/unit2/what-is-rl.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/what-is-rl.mdx", "repo_id": "deep-rl-class", "token_count": 525 }
79
# (Optional) the Policy Gradient Theorem In this optional section where we're **going to study how we differentiate the objective function that we will use to approximate the policy gradient**. Let's first recap our different formulas: 1. The Objective function <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/expected_reward.png" alt="Return"/> 2. The probability of a trajectory (given that action comes from \\(\pi_\theta\\)): <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/probability.png" alt="Probability"/> So we have: \\(\nabla_\theta J(\theta) = \nabla_\theta \sum_{\tau}P(\tau;\theta)R(\tau)\\) We can rewrite the gradient of the sum as the sum of the gradient: \\( = \sum_{\tau} \nabla_\theta P(\tau;\theta)R(\tau) \\) We then multiply every term in the sum by \\(\frac{P(\tau;\theta)}{P(\tau;\theta)}\\)(which is possible since it's = 1) \\( = \sum_{\tau} \frac{P(\tau;\theta)}{P(\tau;\theta)}\nabla_\theta P(\tau;\theta)R(\tau) \\) We can simplify further this since \\( \frac{P(\tau;\theta)}{P(\tau;\theta)}\nabla_\theta P(\tau;\theta) = P(\tau;\theta)\frac{\nabla_\theta P(\tau;\theta)}{P(\tau;\theta)} \\) \\( P(\tau;\theta)\frac{\nabla_\theta P(\tau;\theta)}{P(\tau;\theta)}= \sum_{\tau} P(\tau;\theta) \frac{\nabla_\theta P(\tau;\theta)}{P(\tau;\theta)}R(\tau) \\) We can then use the *derivative log trick* (also called *likelihood ratio trick* or *REINFORCE trick*), a simple rule in calculus that implies that \\( \nabla_x log f(x) = \frac{\nabla_x f(x)}{f(x)} \\) So given we have \\(\frac{\nabla_\theta P(\tau;\theta)}{P(\tau;\theta)} \\) we transform it as \\(\nabla_\theta log P(\tau|\theta) \\) So this is our likelihood policy gradient: \\( \nabla_\theta J(\theta) = \sum_{\tau} P(\tau;\theta) \nabla_\theta log P(\tau;\theta) R(\tau) \\) Thanks for this new formula, we can estimate the gradient using trajectory samples (we can approximate the likelihood ratio policy gradient with sample-based estimate if you prefer). \\(\nabla_\theta J(\theta) = \frac{1}{m} \sum^{m}_{i=1} \nabla_\theta log P(\tau^{(i)};\theta)R(\tau^{(i)})\\) where each \\(\tau^{(i)}\\) is a sampled trajectory. But we still have some mathematics work to do there: we need to simplify \\( \nabla_\theta log P(\tau|\theta) \\) We know that: \\(\nabla_\theta log P(\tau^{(i)};\theta)= \nabla_\theta log[ \mu(s_0) \prod_{t=0}^{H} P(s_{t+1}^{(i)}|s_{t}^{(i)}, a_{t}^{(i)}) \pi_\theta(a_{t}^{(i)}|s_{t}^{(i)})]\\) Where \\(\mu(s_0)\\) is the initial state distribution and \\( P(s_{t+1}^{(i)}|s_{t}^{(i)}, a_{t}^{(i)}) \\) is the state transition dynamics of the MDP. We know that the log of a product is equal to the sum of the logs: \\(\nabla_\theta log P(\tau^{(i)};\theta)= \nabla_\theta \left[log \mu(s_0) + \sum\limits_{t=0}^{H}log P(s_{t+1}^{(i)}|s_{t}^{(i)} a_{t}^{(i)}) + \sum\limits_{t=0}^{H}log \pi_\theta(a_{t}^{(i)}|s_{t}^{(i)})\right] \\) We also know that the gradient of the sum is equal to the sum of gradient: \\( \nabla_\theta log P(\tau^{(i)};\theta)=\nabla_\theta log\mu(s_0) + \nabla_\theta \sum\limits_{t=0}^{H} log P(s_{t+1}^{(i)}|s_{t}^{(i)} a_{t}^{(i)}) + \nabla_\theta \sum\limits_{t=0}^{H} log \pi_\theta(a_{t}^{(i)}|s_{t}^{(i)}) \\) Since neither initial state distribution or state transition dynamics of the MDP are dependent of \\(\theta\\), the derivate of both terms are 0. So we can remove them: Since: \\(\nabla_\theta \sum_{t=0}^{H} log P(s_{t+1}^{(i)}|s_{t}^{(i)} a_{t}^{(i)}) = 0 \\) and \\( \nabla_\theta \mu(s_0) = 0\\) \\(\nabla_\theta log P(\tau^{(i)};\theta) = \nabla_\theta \sum_{t=0}^{H} log \pi_\theta(a_{t}^{(i)}|s_{t}^{(i)})\\) We can rewrite the gradient of the sum as the sum of gradients: \\( \nabla_\theta log P(\tau^{(i)};\theta)= \sum_{t=0}^{H} \nabla_\theta log \pi_\theta(a_{t}^{(i)}|s_{t}^{(i)}) \\) So, the final formula for estimating the policy gradient is: \\( \nabla_{\theta} J(\theta) = \hat{g} = \frac{1}{m} \sum^{m}_{i=1} \sum^{H}_{t=0} \nabla_\theta \log \pi_\theta(a^{(i)}_{t} | s_{t}^{(i)})R(\tau^{(i)}) \\)
deep-rl-class/units/en/unit4/pg-theorem.mdx/0
{ "file_path": "deep-rl-class/units/en/unit4/pg-theorem.mdx", "repo_id": "deep-rl-class", "token_count": 1854 }
80
# Advantage Actor Critic (A2C) using Robotics Simulations with Panda-Gym 🤖 [[hands-on]] <CourseFloatingBanner classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/deep-rl-class/blob/main/notebooks/unit6/unit6.ipynb"} ]} askForHelpUrl="http://hf.co/join/discord" /> Now that you've studied the theory behind Advantage Actor Critic (A2C), **you're ready to train your A2C agent** using Stable-Baselines3 in a robotic environment. And train a: - A robotic arm 🦾 to move to the correct position. We're going to use - [panda-gym](https://github.com/qgallouedec/panda-gym) To validate this hands-on for the certification process, you need to push your two trained models to the Hub and get the following results: - `PandaReachDense-v3` get a result of >= -3.5. To find your result, [go to the leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward** For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process **To start the hands-on click on Open In Colab button** 👇 : [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit6/unit6.ipynb) # Unit 6: Advantage Actor Critic (A2C) using Robotics Simulations with Panda-Gym 🤖 ### 🎮 Environments: - [Panda-Gym](https://github.com/qgallouedec/panda-gym) ### 📚 RL-Library: - [Stable-Baselines3](https://stable-baselines3.readthedocs.io/) We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). ## Objectives of this notebook 🏆 At the end of the notebook, you will: - Be able to use **Panda-Gym**, the environment library. - Be able to **train robots using A2C**. - Understand why **we need to normalize the input**. - Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score 🔥. ## Prerequisites 🏗️ Before diving into the notebook, you need to: 🔲 📚 Study [Actor-Critic methods by reading Unit 6](https://huggingface.co/deep-rl-course/unit6/introduction) 🤗 # Let's train our first robots 🤖 ## Set the GPU 💪 - To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step1.jpg" alt="GPU Step 1"> - `Hardware Accelerator > GPU` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step2.jpg" alt="GPU Step 2"> ## Create a virtual display 🔽 During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames). The following cell will install the librairies and create and run a virtual screen 🖥 ```python %%capture !apt install python-opengl !apt install ffmpeg !apt install xvfb !pip3 install pyvirtualdisplay ``` ```python # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start() ``` ### Install dependencies 🔽 We’ll install multiple ones: - `gymnasium` - `panda-gym`: Contains the robotics arm environments. - `stable-baselines3`: The SB3 deep reinforcement learning library. - `huggingface_sb3`: Additional code for Stable-baselines3 to load and upload models from the Hugging Face 🤗 Hub. - `huggingface_hub`: Library allowing anyone to work with the Hub repositories. ```bash !pip install stable-baselines3[extra] !pip install gymnasium !pip install huggingface_sb3 !pip install huggingface_hub !pip install panda_gym ``` ## Import the packages 📦 ```python import os import gymnasium as gym import panda_gym from huggingface_sb3 import load_from_hub, package_to_hub from stable_baselines3 import A2C from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize from stable_baselines3.common.env_util import make_vec_env from huggingface_hub import notebook_login ``` ## PandaReachDense-v3 🦾 The agent we're going to train is a robotic arm that needs to do controls (moving the arm and using the end-effector). In robotics, the *end-effector* is the device at the end of a robotic arm designed to interact with the environment. In `PandaReach`, the robot must place its end-effector at a target position (green ball). We're going to use the dense version of this environment. It means we'll get a *dense reward function* that **will provide a reward at each timestep** (the closer the agent is to completing the task, the higher the reward). Contrary to a *sparse reward function* where the environment **return a reward if and only if the task is completed**. Also, we're going to use the *End-effector displacement control*, it means the **action corresponds to the displacement of the end-effector**. We don't control the individual motion of each joint (joint control). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/robotics.jpg" alt="Robotics"/> This way **the training will be easier**. ### Create the environment #### The environment 🎮 In `PandaReachDense-v3` the robotic arm must place its end-effector at a target position (green ball). ```python env_id = "PandaReachDense-v3" # Create the env env = gym.make(env_id) # Get the state space and action space s_size = env.observation_space.shape a_size = env.action_space ``` ```python print("_____OBSERVATION SPACE_____ \n") print("The State Space is: ", s_size) print("Sample observation", env.observation_space.sample()) # Get a random observation ``` The observation space **is a dictionary with 3 different elements**: - `achieved_goal`: (x,y,z) position of the goal. - `desired_goal`: (x,y,z) distance between the goal position and the current object position. - `observation`: position (x,y,z) and velocity of the end-effector (vx, vy, vz). Given it's a dictionary as observation, **we will need to use a MultiInputPolicy policy instead of MlpPolicy**. ```python print("\n _____ACTION SPACE_____ \n") print("The Action Space is: ", a_size) print("Action Space Sample", env.action_space.sample()) # Take a random action ``` The action space is a vector with 3 values: - Control x, y, z movement ### Normalize observation and rewards A good practice in reinforcement learning is to [normalize input features](https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html). For that purpose, there is a wrapper that will compute a running average and standard deviation of input features. We also normalize rewards with this same wrapper by adding `norm_reward = True` [You should check the documentation to fill this cell](https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html#vecnormalize) ```python env = make_vec_env(env_id, n_envs=4) # Adding this wrapper to normalize the observation and the reward env = # TODO: Add the wrapper ``` #### Solution ```python env = make_vec_env(env_id, n_envs=4) env = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.) ``` ### Create the A2C Model 🤖 For more information about A2C implementation with StableBaselines3 check: https://stable-baselines3.readthedocs.io/en/master/modules/a2c.html#notes To find the best parameters I checked the [official trained agents by Stable-Baselines3 team](https://huggingface.co/sb3). ```python model = # Create the A2C model and try to find the best parameters ``` #### Solution ```python model = A2C(policy = "MultiInputPolicy", env = env, verbose=1) ``` ### Train the A2C agent 🏃 - Let's train our agent for 1,000,000 timesteps, don't forget to use GPU on Colab. It will take approximately ~25-40min ```python model.learn(1_000_000) ``` ```python # Save the model and VecNormalize statistics when saving the agent model.save("a2c-PandaReachDense-v3") env.save("vec_normalize.pkl") ``` ### Evaluate the agent 📈 - Now that's our agent is trained, we need to **check its performance**. - Stable-Baselines3 provides a method to do that: `evaluate_policy` ```python from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize # Load the saved statistics eval_env = DummyVecEnv([lambda: gym.make("PandaReachDense-v3")]) eval_env = VecNormalize.load("vec_normalize.pkl", eval_env) # We need to override the render_mode eval_env.render_mode = "rgb_array" # do not update them at test time eval_env.training = False # reward normalization is not needed at test time eval_env.norm_reward = False # Load the agent model = A2C.load("a2c-PandaReachDense-v3") mean_reward, std_reward = evaluate_policy(model, eval_env) print(f"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}") ``` ### Publish your trained model on the Hub 🔥 Now that we saw we got good results after the training, we can publish our trained model on the Hub with one line of code. 📚 The libraries documentation 👉 https://github.com/huggingface/huggingface_sb3/tree/main#hugging-face--x-stable-baselines3-v20 By using `package_to_hub`, as we already mentionned in the former units, **you evaluate, record a replay, generate a model card of your agent and push it to the hub**. This way: - You can **showcase our work** 🔥 - You can **visualize your agent playing** 👀 - You can **share with the community an agent that others can use** 💾 - You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow: 1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join 2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website. - Create a new token (https://huggingface.co/settings/tokens) **with write role** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token"> - Copy the token - Run the cell below and paste the token ```python notebook_login() !git config --global credential.helper store ``` If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 using `package_to_hub()` function. For this environment, **running this cell can take approximately 10min** ```python from huggingface_sb3 import package_to_hub package_to_hub( model=model, model_name=f"a2c-{env_id}", model_architecture="A2C", env_id=env_id, eval_env=eval_env, repo_id=f"ThomasSimonini/a2c-{env_id}", # Change the username commit_message="Initial commit", ) ``` ## Some additional challenges 🏆 The best way to learn **is to try things by your own**! Why not trying `PandaPickAndPlace-v3`? If you want to try more advanced tasks for panda-gym, you need to check what was done using **TQC or SAC** (a more sample-efficient algorithm suited for robotics tasks). In real robotics, you'll use a more sample-efficient algorithm for a simple reason: contrary to a simulation **if you move your robotic arm too much, you have a risk of breaking it**. PandaPickAndPlace-v1 (this model uses the v1 version of the environment): https://huggingface.co/sb3/tqc-PandaPickAndPlace-v1 And don't hesitate to check panda-gym documentation here: https://panda-gym.readthedocs.io/en/latest/usage/train_with_sb3.html We provide you the steps to train another agent (optional): 1. Define the environment called "PandaPickAndPlace-v3" 2. Make a vectorized environment 3. Add a wrapper to normalize the observations and rewards. [Check the documentation](https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html#vecnormalize) 4. Create the A2C Model (don't forget verbose=1 to print the training logs). 5. Train it for 1M Timesteps 6. Save the model and VecNormalize statistics when saving the agent 7. Evaluate your agent 8. Publish your trained model on the Hub 🔥 with `package_to_hub` ### Solution (optional) ```python # 1 - 2 env_id = "PandaPickAndPlace-v3" env = make_vec_env(env_id, n_envs=4) # 3 env = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.) # 4 model = A2C(policy = "MultiInputPolicy", env = env, verbose=1) # 5 model.learn(1_000_000) ``` ```python # 6 model_name = "a2c-PandaPickAndPlace-v3"; model.save(model_name) env.save("vec_normalize.pkl") # 7 from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize # Load the saved statistics eval_env = DummyVecEnv([lambda: gym.make("PandaPickAndPlace-v3")]) eval_env = VecNormalize.load("vec_normalize.pkl", eval_env) # do not update them at test time eval_env.training = False # reward normalization is not needed at test time eval_env.norm_reward = False # Load the agent model = A2C.load(model_name) mean_reward, std_reward = evaluate_policy(model, eval_env) print(f"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}") # 8 package_to_hub( model=model, model_name=f"a2c-{env_id}", model_architecture="A2C", env_id=env_id, eval_env=eval_env, repo_id=f"ThomasSimonini/a2c-{env_id}", # TODO: Change the username commit_message="Initial commit", ) ``` See you on Unit 7! 🔥 ## Keep learning, stay awesome 🤗
deep-rl-class/units/en/unit6/hands-on.mdx/0
{ "file_path": "deep-rl-class/units/en/unit6/hands-on.mdx", "repo_id": "deep-rl-class", "token_count": 4616 }
81
# Interesting Environments to try Here we provide a list of interesting environments you can try to train your agents on: ## DIAMBRA Arena <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/diambraarena.png" alt="diambraArena"/> DIAMBRA Arena is a software package featuring a collection of high-quality environments for Reinforcement Learning research and experimentation. It provides a standard interface to popular arcade emulated video games, offering a Python API fully compliant with OpenAI Gym/Gymnasium format, that makes its adoption smooth and straightforward. It supports all major Operating Systems (Linux, Windows and MacOS) and can be easily installed via [Python PIP](https://pypi.org/project/diambra-arena/). It is completely free to use, the user only needs to register on the [official website](https://diambra.ai/register/). In addition, its [GitHub repository](https://github.com/diambra/) provides a collection of examples covering main use cases of interest that can be run in just a few steps. #### Main Features All environments are episodic Reinforcement Learning tasks, with discrete actions (gamepad buttons) and observations composed by screen pixels plus additional numerical data (RAM values like characters health bars or characters stage side). They all support both single player (1P) as well as two players (2P) mode, making them the perfect resource to explore Standard RL, Competitive Multi-Agent, Competitive Human-Agent, Self-Play, Imitation Learning and Human-in-the-Loop. [Interfaced games](https://docs.diambra.ai/envs/games/) have been selected among the most popular fighting retro-games. While sharing the same fundamental mechanics, they provide different challenges, with specific features such as different type and number of characters, how to perform combos, health bars recharging, etc. DIAMBRA Arena is built to maximize compatibility will all major Reinforcement Learning libraries. It natively provides interfaces with the two most important packages: [Stable Baselines 3](https://stable-baselines3.readthedocs.io/en/master/) and [Ray RLlib](https://docs.ray.io/en/latest/rllib/index.html), while Stable Baselines is also available but deprecated. Their usage is illustrated in the [official documentation](https://docs.diambra.ai/) and in the [DIAMBRA Agents examples repository](https://github.com/diambra/agents). It can easily be interfaced with any other package in a similar way. ### Competition Platform DIAMBRA also provides a competition platform fully integrated with the Hugging Face Hub, on which you can submit your trained agents and compete with other coders around the globe in epic video games tournaments! It features a public leaderboard where users are ranked by the best score achieved by their agents in our different environments. It also offers the possibility to unlock cool achievements depending on the performances of your agent. Submitted agents are evaluated and their episodes are streamed on [DIAMBRA Twitch channel](https://www.twitch.tv/diambra_ai). #### References To start using this environment, check these resources: - [Official Docs](https://docs.diambra.ai/) - [Competition Platform](https://diambra.ai) - [GitHub](https://github.com/diambra/) - [Discord](https://diambra.ai/discord) ## MineRL <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/minerl.jpg" alt="MineRL"/> MineRL is a Python library that provides a Gym interface for interacting with the video game Minecraft, accompanied by datasets of human gameplay. Every year there are challenges with this library. Check the [website](https://minerl.io/) To start using this environment, check these resources: - [What is MineRL?](https://www.youtube.com/watch?v=z6PTrGifupU) - [First steps in MineRL](https://www.youtube.com/watch?v=8yIrWcyWGek) - [MineRL documentation and tutorials](https://minerl.readthedocs.io/en/latest/) ## DonkeyCar Simulator <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/donkeycar.jpg" alt="Donkey Car"/> Donkey is a Self Driving Car Platform for hobby remote control cars. This simulator version is built on the Unity game platform. It uses their internal physics and graphics and connects to a donkey Python process to use our trained model to control the simulated Donkey (car). To start using this environment, check these resources: - [DonkeyCar Simulator documentation](https://docs.donkeycar.com/guide/deep_learning/simulator/) - [Learn to Drive Smoothly (Antonin Raffin's tutorial) Part 1](https://www.youtube.com/watch?v=ngK33h00iBE) - [Learn to Drive Smoothly (Antonin Raffin's tutorial) Part 2](https://www.youtube.com/watch?v=DUqssFvcSOY) - [Learn to Drive Smoothly (Antonin Raffin's tutorial) Part 3](https://www.youtube.com/watch?v=v8j2bpcE4Rg) - Pretrained agents: - https://huggingface.co/araffin/tqc-donkey-mountain-track-v0 - https://huggingface.co/araffin/tqc-donkey-avc-sparkfun-v0 - https://huggingface.co/araffin/tqc-donkey-minimonaco-track-v0 ## Starcraft II <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit12/alphastar.jpg" alt="Alphastar"/> Starcraft II is a famous *real-time strategy game*. DeepMind has used this game for their Deep Reinforcement Learning research with [Alphastar](https://www.deepmind.com/blog/alphastar-mastering-the-real-time-strategy-game-starcraft-ii) To start using this environment, check these resources: - [Starcraft gym](http://starcraftgym.com/) - [A. I. Learns to Play Starcraft 2 (Reinforcement Learning) tutorial](https://www.youtube.com/watch?v=q59wap1ELQ4) ## Author This section was written by <a href="https://twitter.com/ThomasSimonini"> Thomas Simonini</a>
deep-rl-class/units/en/unitbonus3/envs-to-try.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus3/envs-to-try.mdx", "repo_id": "deep-rl-class", "token_count": 1642 }
82
import argparse import sys sys.path.append(".") from base_classes import LCMLoRATextToImageBenchmark # noqa: E402 if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--ckpt", type=str, default="stabilityai/stable-diffusion-xl-base-1.0", ) parser.add_argument("--batch_size", type=int, default=1) parser.add_argument("--num_inference_steps", type=int, default=4) parser.add_argument("--model_cpu_offload", action="store_true") parser.add_argument("--run_compile", action="store_true") args = parser.parse_args() benchmark_pipe = LCMLoRATextToImageBenchmark(args) benchmark_pipe.benchmark(args)
diffusers/benchmarks/benchmark_t2i_lcm_lora.py/0
{ "file_path": "diffusers/benchmarks/benchmark_t2i_lcm_lora.py", "repo_id": "diffusers", "token_count": 273 }
83
- sections: - local: index title: 🧨 Diffusers - local: quicktour title: Quicktour - local: stable_diffusion title: Effective and efficient diffusion - local: installation title: Installation title: Get started - sections: - local: tutorials/tutorial_overview title: Overview - local: using-diffusers/write_own_pipeline title: Understanding pipelines, models and schedulers - local: tutorials/autopipeline title: AutoPipeline - local: tutorials/basic_training title: Train a diffusion model - local: tutorials/using_peft_for_inference title: Load LoRAs for inference - local: tutorials/fast_diffusion title: Accelerate inference of text-to-image diffusion models title: Tutorials - sections: - sections: - local: using-diffusers/loading_overview title: Overview - local: using-diffusers/loading title: Load pipelines, models, and schedulers - local: using-diffusers/schedulers title: Load and compare different schedulers - local: using-diffusers/custom_pipeline_overview title: Load community pipelines and components - local: using-diffusers/using_safetensors title: Load safetensors - local: using-diffusers/other-formats title: Load different Stable Diffusion formats - local: using-diffusers/loading_adapters title: Load adapters - local: using-diffusers/push_to_hub title: Push files to the Hub title: Loading & Hub - sections: - local: using-diffusers/pipeline_overview title: Overview - local: using-diffusers/unconditional_image_generation title: Unconditional image generation - local: using-diffusers/conditional_image_generation title: Text-to-image - local: using-diffusers/img2img title: Image-to-image - local: using-diffusers/inpaint title: Inpainting - local: using-diffusers/text-img2vid title: Text or image-to-video - local: using-diffusers/depth2img title: Depth-to-image title: Tasks - sections: - local: using-diffusers/textual_inversion_inference title: Textual inversion - local: using-diffusers/ip_adapter title: IP-Adapter - local: using-diffusers/merge_loras title: Merge LoRAs - local: training/distributed_inference title: Distributed inference with multiple GPUs - local: using-diffusers/reusing_seeds title: Improve image quality with deterministic generation - local: using-diffusers/control_brightness title: Control image brightness - local: using-diffusers/weighted_prompts title: Prompt weighting - local: using-diffusers/freeu title: Improve generation quality with FreeU title: Techniques - sections: - local: using-diffusers/pipeline_overview title: Overview - local: using-diffusers/sdxl title: Stable Diffusion XL - local: using-diffusers/sdxl_turbo title: SDXL Turbo - local: using-diffusers/kandinsky title: Kandinsky - local: using-diffusers/controlnet title: ControlNet - local: using-diffusers/shap-e title: Shap-E - local: using-diffusers/diffedit title: DiffEdit - local: using-diffusers/distilled_sd title: Distilled Stable Diffusion inference - local: using-diffusers/callback title: Pipeline callbacks - local: using-diffusers/reproducibility title: Create reproducible pipelines - local: using-diffusers/custom_pipeline_examples title: Community pipelines - local: using-diffusers/contribute_pipeline title: Contribute a community pipeline - local: using-diffusers/inference_with_lcm_lora title: Latent Consistency Model-LoRA - local: using-diffusers/inference_with_lcm title: Latent Consistency Model - local: using-diffusers/inference_with_tcd_lora title: Trajectory Consistency Distillation-LoRA - local: using-diffusers/svd title: Stable Video Diffusion title: Specific pipeline examples - sections: - local: training/overview title: Overview - local: training/create_dataset title: Create a dataset for training - local: training/adapt_a_model title: Adapt a model to a new task - sections: - local: training/unconditional_training title: Unconditional image generation - local: training/text2image title: Text-to-image - local: training/sdxl title: Stable Diffusion XL - local: training/kandinsky title: Kandinsky 2.2 - local: training/wuerstchen title: Wuerstchen - local: training/controlnet title: ControlNet - local: training/t2i_adapters title: T2I-Adapters - local: training/instructpix2pix title: InstructPix2Pix title: Models - sections: - local: training/text_inversion title: Textual Inversion - local: training/dreambooth title: DreamBooth - local: training/lora title: LoRA - local: training/custom_diffusion title: Custom Diffusion - local: training/lcm_distill title: Latent Consistency Distillation - local: training/ddpo title: Reinforcement learning training with DDPO title: Methods title: Training - sections: - local: using-diffusers/other-modalities title: Other Modalities title: Taking Diffusers Beyond Images title: Using Diffusers - sections: - local: optimization/opt_overview title: Overview - sections: - local: optimization/fp16 title: Speed up inference - local: optimization/memory title: Reduce memory usage - local: optimization/torch2.0 title: PyTorch 2.0 - local: optimization/xformers title: xFormers - local: optimization/tome title: Token merging - local: optimization/deepcache title: DeepCache title: General optimizations - sections: - local: using-diffusers/stable_diffusion_jax_how_to title: JAX/Flax - local: optimization/onnx title: ONNX - local: optimization/open_vino title: OpenVINO - local: optimization/coreml title: Core ML title: Optimized model types - sections: - local: optimization/mps title: Metal Performance Shaders (MPS) - local: optimization/habana title: Habana Gaudi title: Optimized hardware title: Optimization - sections: - local: conceptual/philosophy title: Philosophy - local: using-diffusers/controlling_generation title: Controlled generation - local: conceptual/contribution title: How to contribute? - local: conceptual/ethical_guidelines title: Diffusers' Ethical Guidelines - local: conceptual/evaluation title: Evaluating Diffusion Models title: Conceptual Guides - sections: - sections: - local: api/configuration title: Configuration - local: api/logging title: Logging - local: api/outputs title: Outputs title: Main Classes - sections: - local: api/loaders/ip_adapter title: IP-Adapter - local: api/loaders/lora title: LoRA - local: api/loaders/single_file title: Single files - local: api/loaders/textual_inversion title: Textual Inversion - local: api/loaders/unet title: UNet - local: api/loaders/peft title: PEFT title: Loaders - sections: - local: api/models/overview title: Overview - local: api/models/unet title: UNet1DModel - local: api/models/unet2d title: UNet2DModel - local: api/models/unet2d-cond title: UNet2DConditionModel - local: api/models/unet3d-cond title: UNet3DConditionModel - local: api/models/unet-motion title: UNetMotionModel - local: api/models/uvit2d title: UViT2DModel - local: api/models/vq title: VQModel - local: api/models/autoencoderkl title: AutoencoderKL - local: api/models/asymmetricautoencoderkl title: AsymmetricAutoencoderKL - local: api/models/autoencoder_tiny title: Tiny AutoEncoder - local: api/models/consistency_decoder_vae title: ConsistencyDecoderVAE - local: api/models/transformer2d title: Transformer2D - local: api/models/transformer_temporal title: Transformer Temporal - local: api/models/prior_transformer title: Prior Transformer - local: api/models/controlnet title: ControlNet title: Models - sections: - local: api/pipelines/overview title: Overview - local: api/pipelines/amused title: aMUSEd - local: api/pipelines/animatediff title: AnimateDiff - local: api/pipelines/attend_and_excite title: Attend-and-Excite - local: api/pipelines/audioldm title: AudioLDM - local: api/pipelines/audioldm2 title: AudioLDM 2 - local: api/pipelines/auto_pipeline title: AutoPipeline - local: api/pipelines/blip_diffusion title: BLIP-Diffusion - local: api/pipelines/consistency_models title: Consistency Models - local: api/pipelines/controlnet title: ControlNet - local: api/pipelines/controlnet_sdxl title: ControlNet with Stable Diffusion XL - local: api/pipelines/dance_diffusion title: Dance Diffusion - local: api/pipelines/ddim title: DDIM - local: api/pipelines/ddpm title: DDPM - local: api/pipelines/deepfloyd_if title: DeepFloyd IF - local: api/pipelines/diffedit title: DiffEdit - local: api/pipelines/dit title: DiT - local: api/pipelines/i2vgenxl title: I2VGen-XL - local: api/pipelines/pix2pix title: InstructPix2Pix - local: api/pipelines/kandinsky title: Kandinsky 2.1 - local: api/pipelines/kandinsky_v22 title: Kandinsky 2.2 - local: api/pipelines/kandinsky3 title: Kandinsky 3 - local: api/pipelines/latent_consistency_models title: Latent Consistency Models - local: api/pipelines/latent_diffusion title: Latent Diffusion - local: api/pipelines/ledits_pp title: LEDITS++ - local: api/pipelines/panorama title: MultiDiffusion - local: api/pipelines/musicldm title: MusicLDM - local: api/pipelines/paint_by_example title: Paint by Example - local: api/pipelines/pia title: Personalized Image Animator (PIA) - local: api/pipelines/pixart title: PixArt-α - local: api/pipelines/self_attention_guidance title: Self-Attention Guidance - local: api/pipelines/semantic_stable_diffusion title: Semantic Guidance - local: api/pipelines/shap_e title: Shap-E - local: api/pipelines/stable_cascade title: Stable Cascade - sections: - local: api/pipelines/stable_diffusion/overview title: Overview - local: api/pipelines/stable_diffusion/text2img title: Text-to-image - local: api/pipelines/stable_diffusion/img2img title: Image-to-image - local: api/pipelines/stable_diffusion/svd title: Image-to-video - local: api/pipelines/stable_diffusion/inpaint title: Inpainting - local: api/pipelines/stable_diffusion/depth2img title: Depth-to-image - local: api/pipelines/stable_diffusion/image_variation title: Image variation - local: api/pipelines/stable_diffusion/stable_diffusion_safe title: Safe Stable Diffusion - local: api/pipelines/stable_diffusion/stable_diffusion_2 title: Stable Diffusion 2 - local: api/pipelines/stable_diffusion/stable_diffusion_xl title: Stable Diffusion XL - local: api/pipelines/stable_diffusion/sdxl_turbo title: SDXL Turbo - local: api/pipelines/stable_diffusion/latent_upscale title: Latent upscaler - local: api/pipelines/stable_diffusion/upscale title: Super-resolution - local: api/pipelines/stable_diffusion/k_diffusion title: K-Diffusion - local: api/pipelines/stable_diffusion/ldm3d_diffusion title: LDM3D Text-to-(RGB, Depth), Text-to-(RGB-pano, Depth-pano), LDM3D Upscaler - local: api/pipelines/stable_diffusion/adapter title: Stable Diffusion T2I-Adapter - local: api/pipelines/stable_diffusion/gligen title: GLIGEN (Grounded Language-to-Image Generation) title: Stable Diffusion - local: api/pipelines/stable_unclip title: Stable unCLIP - local: api/pipelines/text_to_video title: Text-to-video - local: api/pipelines/text_to_video_zero title: Text2Video-Zero - local: api/pipelines/unclip title: unCLIP - local: api/pipelines/unidiffuser title: UniDiffuser - local: api/pipelines/value_guided_sampling title: Value-guided sampling - local: api/pipelines/wuerstchen title: Wuerstchen title: Pipelines - sections: - local: api/schedulers/overview title: Overview - local: api/schedulers/cm_stochastic_iterative title: CMStochasticIterativeScheduler - local: api/schedulers/consistency_decoder title: ConsistencyDecoderScheduler - local: api/schedulers/ddim_inverse title: DDIMInverseScheduler - local: api/schedulers/ddim title: DDIMScheduler - local: api/schedulers/ddpm title: DDPMScheduler - local: api/schedulers/deis title: DEISMultistepScheduler - local: api/schedulers/multistep_dpm_solver_inverse title: DPMSolverMultistepInverse - local: api/schedulers/multistep_dpm_solver title: DPMSolverMultistepScheduler - local: api/schedulers/dpm_sde title: DPMSolverSDEScheduler - local: api/schedulers/singlestep_dpm_solver title: DPMSolverSinglestepScheduler - local: api/schedulers/edm_multistep_dpm_solver title: EDMDPMSolverMultistepScheduler - local: api/schedulers/edm_euler title: EDMEulerScheduler - local: api/schedulers/euler_ancestral title: EulerAncestralDiscreteScheduler - local: api/schedulers/euler title: EulerDiscreteScheduler - local: api/schedulers/heun title: HeunDiscreteScheduler - local: api/schedulers/ipndm title: IPNDMScheduler - local: api/schedulers/stochastic_karras_ve title: KarrasVeScheduler - local: api/schedulers/dpm_discrete_ancestral title: KDPM2AncestralDiscreteScheduler - local: api/schedulers/dpm_discrete title: KDPM2DiscreteScheduler - local: api/schedulers/lcm title: LCMScheduler - local: api/schedulers/lms_discrete title: LMSDiscreteScheduler - local: api/schedulers/pndm title: PNDMScheduler - local: api/schedulers/repaint title: RePaintScheduler - local: api/schedulers/score_sde_ve title: ScoreSdeVeScheduler - local: api/schedulers/score_sde_vp title: ScoreSdeVpScheduler - local: api/schedulers/tcd title: TCDScheduler - local: api/schedulers/unipc title: UniPCMultistepScheduler - local: api/schedulers/vq_diffusion title: VQDiffusionScheduler title: Schedulers - sections: - local: api/internal_classes_overview title: Overview - local: api/attnprocessor title: Attention Processor - local: api/activations title: Custom activation functions - local: api/normalization title: Custom normalization layers - local: api/utilities title: Utilities - local: api/image_processor title: VAE Image Processor title: Internal classes title: API
diffusers/docs/source/en/_toctree.yml/0
{ "file_path": "diffusers/docs/source/en/_toctree.yml", "repo_id": "diffusers", "token_count": 6273 }
84
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Consistency Decoder Consistency decoder can be used to decode the latents from the denoising UNet in the [`StableDiffusionPipeline`]. This decoder was introduced in the [DALL-E 3 technical report](https://openai.com/dall-e-3). The original codebase can be found at [openai/consistencydecoder](https://github.com/openai/consistencydecoder). <Tip warning={true}> Inference is only supported for 2 iterations as of now. </Tip> The pipeline could not have been contributed without the help of [madebyollin](https://github.com/madebyollin) and [mrsteyk](https://github.com/mrsteyk) from [this issue](https://github.com/openai/consistencydecoder/issues/1). ## ConsistencyDecoderVAE [[autodoc]] ConsistencyDecoderVAE - all - decode
diffusers/docs/source/en/api/models/consistency_decoder_vae.md/0
{ "file_path": "diffusers/docs/source/en/api/models/consistency_decoder_vae.md", "repo_id": "diffusers", "token_count": 383 }
85
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Kandinsky 2.1 Kandinsky 2.1 is created by [Arseniy Shakhmatov](https://github.com/cene555), [Anton Razzhigaev](https://github.com/razzant), [Aleksandr Nikolich](https://github.com/AlexWortega), [Vladimir Arkhipkin](https://github.com/oriBetelgeuse), [Igor Pavlov](https://github.com/boomb0om), [Andrey Kuznetsov](https://github.com/kuznetsoffandrey), and [Denis Dimitrov](https://github.com/denndimitrov). The description from it's GitHub page is: *Kandinsky 2.1 inherits best practicies from Dall-E 2 and Latent diffusion, while introducing some new ideas. As text and image encoder it uses CLIP model and diffusion image prior (mapping) between latent spaces of CLIP modalities. This approach increases the visual performance of the model and unveils new horizons in blending images and text-guided image manipulation.* The original codebase can be found at [ai-forever/Kandinsky-2](https://github.com/ai-forever/Kandinsky-2). <Tip> Check out the [Kandinsky Community](https://huggingface.co/kandinsky-community) organization on the Hub for the official model checkpoints for tasks like text-to-image, image-to-image, and inpainting. </Tip> <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## KandinskyPriorPipeline [[autodoc]] KandinskyPriorPipeline - all - __call__ - interpolate ## KandinskyPipeline [[autodoc]] KandinskyPipeline - all - __call__ ## KandinskyCombinedPipeline [[autodoc]] KandinskyCombinedPipeline - all - __call__ ## KandinskyImg2ImgPipeline [[autodoc]] KandinskyImg2ImgPipeline - all - __call__ ## KandinskyImg2ImgCombinedPipeline [[autodoc]] KandinskyImg2ImgCombinedPipeline - all - __call__ ## KandinskyInpaintPipeline [[autodoc]] KandinskyInpaintPipeline - all - __call__ ## KandinskyInpaintCombinedPipeline [[autodoc]] KandinskyInpaintCombinedPipeline - all - __call__
diffusers/docs/source/en/api/pipelines/kandinsky.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/kandinsky.md", "repo_id": "diffusers", "token_count": 850 }
86
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Stable Cascade This model is built upon the [Würstchen](https://openreview.net/forum?id=gU58d5QeGv) architecture and its main difference to other models like Stable Diffusion is that it is working at a much smaller latent space. Why is this important? The smaller the latent space, the **faster** you can run inference and the **cheaper** the training becomes. How small is the latent space? Stable Diffusion uses a compression factor of 8, resulting in a 1024x1024 image being encoded to 128x128. Stable Cascade achieves a compression factor of 42, meaning that it is possible to encode a 1024x1024 image to 24x24, while maintaining crisp reconstructions. The text-conditional model is then trained in the highly compressed latent space. Previous versions of this architecture, achieved a 16x cost reduction over Stable Diffusion 1.5. Therefore, this kind of model is well suited for usages where efficiency is important. Furthermore, all known extensions like finetuning, LoRA, ControlNet, IP-Adapter, LCM etc. are possible with this method as well. The original codebase can be found at [Stability-AI/StableCascade](https://github.com/Stability-AI/StableCascade). ## Model Overview Stable Cascade consists of three models: Stage A, Stage B and Stage C, representing a cascade to generate images, hence the name "Stable Cascade". Stage A & B are used to compress images, similar to what the job of the VAE is in Stable Diffusion. However, with this setup, a much higher compression of images can be achieved. While the Stable Diffusion models use a spatial compression factor of 8, encoding an image with resolution of 1024 x 1024 to 128 x 128, Stable Cascade achieves a compression factor of 42. This encodes a 1024 x 1024 image to 24 x 24, while being able to accurately decode the image. This comes with the great benefit of cheaper training and inference. Furthermore, Stage C is responsible for generating the small 24 x 24 latents given a text prompt. The Stage C model operates on the small 24 x 24 latents and denoises the latents conditioned on text prompts. The model is also the largest component in the Cascade pipeline and is meant to be used with the `StableCascadePriorPipeline` The Stage B and Stage A models are used with the `StableCascadeDecoderPipeline` and are responsible for generating the final image given the small 24 x 24 latents. <Tip warning={true}> There are some restrictions on data types that can be used with the Stable Cascade models. The official checkpoints for the `StableCascadePriorPipeline` do not support the `torch.float16` data type. Please use `torch.bfloat16` instead. In order to use the `torch.bfloat16` data type with the `StableCascadeDecoderPipeline` you need to have PyTorch 2.2.0 or higher installed. This also means that using the `StableCascadeCombinedPipeline` with `torch.bfloat16` requires PyTorch 2.2.0 or higher, since it calls the `StableCascadeDecoderPipeline` internally. If it is not possible to install PyTorch 2.2.0 or higher in your environment, the `StableCascadeDecoderPipeline` can be used on its own with the `torch.float16` data type. You can download the full precision or `bf16` variant weights for the pipeline and cast the weights to `torch.float16`. </Tip> ## Usage example ```python import torch from diffusers import StableCascadeDecoderPipeline, StableCascadePriorPipeline prompt = "an image of a shiba inu, donning a spacesuit and helmet" negative_prompt = "" prior = StableCascadePriorPipeline.from_pretrained("stabilityai/stable-cascade-prior", variant="bf16", torch_dtype=torch.bfloat16) decoder = StableCascadeDecoderPipeline.from_pretrained("stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.float16) prior.enable_model_cpu_offload() prior_output = prior( prompt=prompt, height=1024, width=1024, negative_prompt=negative_prompt, guidance_scale=4.0, num_images_per_prompt=1, num_inference_steps=20 ) decoder.enable_model_cpu_offload() decoder_output = decoder( image_embeddings=prior_output.image_embeddings.to(torch.float16), prompt=prompt, negative_prompt=negative_prompt, guidance_scale=0.0, output_type="pil", num_inference_steps=10 ).images[0] decoder_output.save("cascade.png") ``` ## Using the Lite Versions of the Stage B and Stage C models ```python import torch from diffusers import ( StableCascadeDecoderPipeline, StableCascadePriorPipeline, StableCascadeUNet, ) prompt = "an image of a shiba inu, donning a spacesuit and helmet" negative_prompt = "" prior_unet = StableCascadeUNet.from_pretrained("stabilityai/stable-cascade-prior", subfolder="prior_lite") decoder_unet = StableCascadeUNet.from_pretrained("stabilityai/stable-cascade", subfolder="decoder_lite") prior = StableCascadePriorPipeline.from_pretrained("stabilityai/stable-cascade-prior", prior=prior_unet) decoder = StableCascadeDecoderPipeline.from_pretrained("stabilityai/stable-cascade", decoder=decoder_unet) prior.enable_model_cpu_offload() prior_output = prior( prompt=prompt, height=1024, width=1024, negative_prompt=negative_prompt, guidance_scale=4.0, num_images_per_prompt=1, num_inference_steps=20 ) decoder.enable_model_cpu_offload() decoder_output = decoder( image_embeddings=prior_output.image_embeddings, prompt=prompt, negative_prompt=negative_prompt, guidance_scale=0.0, output_type="pil", num_inference_steps=10 ).images[0] decoder_output.save("cascade.png") ``` ## Loading original checkpoints with `from_single_file` Loading the original format checkpoints is supported via `from_single_file` method in the StableCascadeUNet. ```python import torch from diffusers import ( StableCascadeDecoderPipeline, StableCascadePriorPipeline, StableCascadeUNet, ) prompt = "an image of a shiba inu, donning a spacesuit and helmet" negative_prompt = "" prior_unet = StableCascadeUNet.from_single_file( "https://huggingface.co/stabilityai/stable-cascade/resolve/main/stage_c_bf16.safetensors", torch_dtype=torch.bfloat16 ) decoder_unet = StableCascadeUNet.from_single_file( "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_bf16.safetensors", torch_dtype=torch.bfloat16 ) prior = StableCascadePriorPipeline.from_pretrained("stabilityai/stable-cascade-prior", prior=prior_unet, torch_dtype=torch.bfloat16) decoder = StableCascadeDecoderPipeline.from_pretrained("stabilityai/stable-cascade", decoder=decoder_unet, torch_dtype=torch.bfloat16) prior.enable_model_cpu_offload() prior_output = prior( prompt=prompt, height=1024, width=1024, negative_prompt=negative_prompt, guidance_scale=4.0, num_images_per_prompt=1, num_inference_steps=20 ) decoder.enable_model_cpu_offload() decoder_output = decoder( image_embeddings=prior_output.image_embeddings, prompt=prompt, negative_prompt=negative_prompt, guidance_scale=0.0, output_type="pil", num_inference_steps=10 ).images[0] decoder_output.save("cascade-single-file.png") ``` ## Uses ### Direct Use The model is intended for research purposes for now. Possible research areas and tasks include - Research on generative models. - Safe deployment of models which have the potential to generate harmful content. - Probing and understanding the limitations and biases of generative models. - Generation of artworks and use in design and other artistic processes. - Applications in educational or creative tools. Excluded uses are described below. ### Out-of-Scope Use The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. The model should not be used in any way that violates Stability AI's [Acceptable Use Policy](https://stability.ai/use-policy). ## Limitations and Bias ### Limitations - Faces and people in general may not be generated properly. - The autoencoding part of the model is lossy. ## StableCascadeCombinedPipeline [[autodoc]] StableCascadeCombinedPipeline - all - __call__ ## StableCascadePriorPipeline [[autodoc]] StableCascadePriorPipeline - all - __call__ ## StableCascadePriorPipelineOutput [[autodoc]] pipelines.stable_cascade.pipeline_stable_cascade_prior.StableCascadePriorPipelineOutput ## StableCascadeDecoderPipeline [[autodoc]] StableCascadeDecoderPipeline - all - __call__
diffusers/docs/source/en/api/pipelines/stable_cascade.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/stable_cascade.md", "repo_id": "diffusers", "token_count": 2836 }
87
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DeepCache [DeepCache](https://huggingface.co/papers/2312.00858) accelerates [`StableDiffusionPipeline`] and [`StableDiffusionXLPipeline`] by strategically caching and reusing high-level features while efficiently updating low-level features by taking advantage of the U-Net architecture. Start by installing [DeepCache](https://github.com/horseee/DeepCache): ```bash pip install DeepCache ``` Then load and enable the [`DeepCacheSDHelper`](https://github.com/horseee/DeepCache#usage): ```diff import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5', torch_dtype=torch.float16).to("cuda") + from DeepCache import DeepCacheSDHelper + helper = DeepCacheSDHelper(pipe=pipe) + helper.set_params( + cache_interval=3, + cache_branch_id=0, + ) + helper.enable() image = pipe("a photo of an astronaut on a moon").images[0] ``` The `set_params` method accepts two arguments: `cache_interval` and `cache_branch_id`. `cache_interval` means the frequency of feature caching, specified as the number of steps between each cache operation. `cache_branch_id` identifies which branch of the network (ordered from the shallowest to the deepest layer) is responsible for executing the caching processes. Opting for a lower `cache_branch_id` or a larger `cache_interval` can lead to faster inference speed at the expense of reduced image quality (ablation experiments of these two hyperparameters can be found in the [paper](https://arxiv.org/abs/2312.00858)). Once those arguments are set, use the `enable` or `disable` methods to activate or deactivate the `DeepCacheSDHelper`. <div class="flex justify-center"> <img src="https://github.com/horseee/Diffusion_DeepCache/raw/master/static/images/example.png"> </div> You can find more generated samples (original pipeline vs DeepCache) and the corresponding inference latency in the [WandB report](https://wandb.ai/horseee/DeepCache/runs/jwlsqqgt?workspace=user-horseee). The prompts are randomly selected from the [MS-COCO 2017](https://cocodataset.org/#home) dataset. ## Benchmark We tested how much faster DeepCache accelerates [Stable Diffusion v2.1](https://huggingface.co/stabilityai/stable-diffusion-2-1) with 50 inference steps on an NVIDIA RTX A5000, using different configurations for resolution, batch size, cache interval (I), and cache branch (B). | **Resolution** | **Batch size** | **Original** | **DeepCache(I=3, B=0)** | **DeepCache(I=5, B=0)** | **DeepCache(I=5, B=1)** | |----------------|----------------|--------------|-------------------------|-------------------------|-------------------------| | 512| 8| 15.96| 6.88(2.32x)| 5.03(3.18x)| 7.27(2.20x)| | | 4| 8.39| 3.60(2.33x)| 2.62(3.21x)| 3.75(2.24x)| | | 1| 2.61| 1.12(2.33x)| 0.81(3.24x)| 1.11(2.35x)| | 768| 8| 43.58| 18.99(2.29x)| 13.96(3.12x)| 21.27(2.05x)| | | 4| 22.24| 9.67(2.30x)| 7.10(3.13x)| 10.74(2.07x)| | | 1| 6.33| 2.72(2.33x)| 1.97(3.21x)| 2.98(2.12x)| | 1024| 8| 101.95| 45.57(2.24x)| 33.72(3.02x)| 53.00(1.92x)| | | 4| 49.25| 21.86(2.25x)| 16.19(3.04x)| 25.78(1.91x)| | | 1| 13.83| 6.07(2.28x)| 4.43(3.12x)| 7.15(1.93x)|
diffusers/docs/source/en/optimization/deepcache.md/0
{ "file_path": "diffusers/docs/source/en/optimization/deepcache.md", "repo_id": "diffusers", "token_count": 1912 }
88
<!--Copyright 2024 Custom Diffusion authors The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Custom Diffusion [Custom Diffusion](https://huggingface.co/papers/2212.04488) is a training technique for personalizing image generation models. Like Textual Inversion, DreamBooth, and LoRA, Custom Diffusion only requires a few (~4-5) example images. This technique works by only training weights in the cross-attention layers, and it uses a special word to represent the newly learned concept. Custom Diffusion is unique because it can also learn multiple concepts at the same time. If you're training on a GPU with limited vRAM, you should try enabling xFormers with `--enable_xformers_memory_efficient_attention` for faster training with lower vRAM requirements (16GB). To save even more memory, add `--set_grads_to_none` in the training argument to set the gradients to `None` instead of zero (this option can cause some issues, so if you experience any, try removing this parameter). This guide will explore the [train_custom_diffusion.py](https://github.com/huggingface/diffusers/blob/main/examples/custom_diffusion/train_custom_diffusion.py) script to help you become more familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Navigate to the example folder with the training script and install the required dependencies: ```bash cd examples/custom_diffusion pip install -r requirements.txt pip install clip-retrieval ``` <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```bash from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/custom_diffusion/train_custom_diffusion.py) and let us know if you have any questions or concerns. </Tip> ## Script parameters The training script contains all the parameters to help you customize your training run. These are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L319) function. The function comes with default values, but you can also set your own values in the training command if you'd like. For example, to change the resolution of the input image: ```bash accelerate launch train_custom_diffusion.py \ --resolution=256 ``` Many of the basic parameters are described in the [DreamBooth](dreambooth#script-parameters) training guide, so this guide focuses on the parameters unique to Custom Diffusion: - `--freeze_model`: freezes the key and value parameters in the cross-attention layer; the default is `crossattn_kv`, but you can set it to `crossattn` to train all the parameters in the cross-attention layer - `--concepts_list`: to learn multiple concepts, provide a path to a JSON file containing the concepts - `--modifier_token`: a special word used to represent the learned concept - `--initializer_token`: ### Prior preservation loss Prior preservation loss is a method that uses a model's own generated samples to help it learn how to generate more diverse images. Because these generated sample images belong to the same class as the images you provided, they help the model retain what it has learned about the class and how it can use what it already knows about the class to make new compositions. Many of the parameters for prior preservation loss are described in the [DreamBooth](dreambooth#prior-preservation-loss) training guide. ### Regularization Custom Diffusion includes training the target images with a small set of real images to prevent overfitting. As you can imagine, this can be easy to do when you're only training on a few images! Download 200 real images with `clip_retrieval`. The `class_prompt` should be the same category as the target images. These images are stored in `class_data_dir`. ```bash python retrieve.py --class_prompt cat --class_data_dir real_reg/samples_cat --num_class_images 200 ``` To enable regularization, add the following parameters: - `--with_prior_preservation`: whether to use prior preservation loss - `--prior_loss_weight`: controls the influence of the prior preservation loss on the model - `--real_prior`: whether to use a small set of real images to prevent overfitting ```bash accelerate launch train_custom_diffusion.py \ --with_prior_preservation \ --prior_loss_weight=1.0 \ --class_data_dir="./real_reg/samples_cat" \ --class_prompt="cat" \ --real_prior=True \ ``` ## Training script <Tip> A lot of the code in the Custom Diffusion training script is similar to the [DreamBooth](dreambooth#training-script) script. This guide instead focuses on the code that is relevant to Custom Diffusion. </Tip> The Custom Diffusion training script has two dataset classes: - [`CustomDiffusionDataset`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L165): preprocesses the images, class images, and prompts for training - [`PromptDataset`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L148): prepares the prompts for generating class images Next, the `modifier_token` is [added to the tokenizer](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L811), converted to token ids, and the token embeddings are resized to account for the new `modifier_token`. Then the `modifier_token` embeddings are initialized with the embeddings of the `initializer_token`. All parameters in the text encoder are frozen, except for the token embeddings since this is what the model is trying to learn to associate with the concepts. ```py params_to_freeze = itertools.chain( text_encoder.text_model.encoder.parameters(), text_encoder.text_model.final_layer_norm.parameters(), text_encoder.text_model.embeddings.position_embedding.parameters(), ) freeze_params(params_to_freeze) ``` Now you'll need to add the [Custom Diffusion weights](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/custom_diffusion/train_custom_diffusion.py#L911C3-L911C3) to the attention layers. This is a really important step for getting the shape and size of the attention weights correct, and for setting the appropriate number of attention processors in each UNet block. ```py st = unet.state_dict() for name, _ in unet.attn_processors.items(): cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = unet.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(unet.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = unet.config.block_out_channels[block_id] layer_name = name.split(".processor")[0] weights = { "to_k_custom_diffusion.weight": st[layer_name + ".to_k.weight"], "to_v_custom_diffusion.weight": st[layer_name + ".to_v.weight"], } if train_q_out: weights["to_q_custom_diffusion.weight"] = st[layer_name + ".to_q.weight"] weights["to_out_custom_diffusion.0.weight"] = st[layer_name + ".to_out.0.weight"] weights["to_out_custom_diffusion.0.bias"] = st[layer_name + ".to_out.0.bias"] if cross_attention_dim is not None: custom_diffusion_attn_procs[name] = attention_class( train_kv=train_kv, train_q_out=train_q_out, hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, ).to(unet.device) custom_diffusion_attn_procs[name].load_state_dict(weights) else: custom_diffusion_attn_procs[name] = attention_class( train_kv=False, train_q_out=False, hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, ) del st unet.set_attn_processor(custom_diffusion_attn_procs) custom_diffusion_layers = AttnProcsLayers(unet.attn_processors) ``` The [optimizer](https://github.com/huggingface/diffusers/blob/84cd9e8d01adb47f046b1ee449fc76a0c32dc4e2/examples/custom_diffusion/train_custom_diffusion.py#L982) is initialized to update the cross-attention layer parameters: ```py optimizer = optimizer_class( itertools.chain(text_encoder.get_input_embeddings().parameters(), custom_diffusion_layers.parameters()) if args.modifier_token is not None else custom_diffusion_layers.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ``` In the [training loop](https://github.com/huggingface/diffusers/blob/84cd9e8d01adb47f046b1ee449fc76a0c32dc4e2/examples/custom_diffusion/train_custom_diffusion.py#L1048), it is important to only update the embeddings for the concept you're trying to learn. This means setting the gradients of all the other token embeddings to zero: ```py if args.modifier_token is not None: if accelerator.num_processes > 1: grads_text_encoder = text_encoder.module.get_input_embeddings().weight.grad else: grads_text_encoder = text_encoder.get_input_embeddings().weight.grad index_grads_to_zero = torch.arange(len(tokenizer)) != modifier_token_id[0] for i in range(len(modifier_token_id[1:])): index_grads_to_zero = index_grads_to_zero & ( torch.arange(len(tokenizer)) != modifier_token_id[i] ) grads_text_encoder.data[index_grads_to_zero, :] = grads_text_encoder.data[ index_grads_to_zero, : ].fill_(0) ``` ## Launch the script Once you’ve made all your changes or you’re okay with the default configuration, you’re ready to launch the training script! 🚀 In this guide, you'll download and use these example [cat images](https://www.cs.cmu.edu/~custom-diffusion/assets/data.zip). You can also create and use your own dataset if you want (see the [Create a dataset for training](create_dataset) guide). Set the environment variable `MODEL_NAME` to a model id on the Hub or a path to a local model, `INSTANCE_DIR` to the path where you just downloaded the cat images to, and `OUTPUT_DIR` to where you want to save the model. You'll use `<new1>` as the special word to tie the newly learned embeddings to. The script creates and saves model checkpoints and a pytorch_custom_diffusion_weights.bin file to your repository. To monitor training progress with Weights and Biases, add the `--report_to=wandb` parameter to the training command and specify a validation prompt with `--validation_prompt`. This is useful for debugging and saving intermediate results. <Tip> If you're training on human faces, the Custom Diffusion team has found the following parameters to work well: - `--learning_rate=5e-6` - `--max_train_steps` can be anywhere between 1000 and 2000 - `--freeze_model=crossattn` - use at least 15-20 images to train with </Tip> <hfoptions id="training-inference"> <hfoption id="single concept"> ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export OUTPUT_DIR="path-to-save-model" export INSTANCE_DIR="./data/cat" accelerate launch train_custom_diffusion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --class_data_dir=./real_reg/samples_cat/ \ --with_prior_preservation \ --real_prior \ --prior_loss_weight=1.0 \ --class_prompt="cat" \ --num_class_images=200 \ --instance_prompt="photo of a <new1> cat" \ --resolution=512 \ --train_batch_size=2 \ --learning_rate=1e-5 \ --lr_warmup_steps=0 \ --max_train_steps=250 \ --scale_lr \ --hflip \ --modifier_token "<new1>" \ --validation_prompt="<new1> cat sitting in a bucket" \ --report_to="wandb" \ --push_to_hub ``` </hfoption> <hfoption id="multiple concepts"> Custom Diffusion can also learn multiple concepts if you provide a [JSON](https://github.com/adobe-research/custom-diffusion/blob/main/assets/concept_list.json) file with some details about each concept it should learn. Run clip-retrieval to collect some real images to use for regularization: ```bash pip install clip-retrieval python retrieve.py --class_prompt {} --class_data_dir {} --num_class_images 200 ``` Then you can launch the script: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export OUTPUT_DIR="path-to-save-model" accelerate launch train_custom_diffusion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --output_dir=$OUTPUT_DIR \ --concepts_list=./concept_list.json \ --with_prior_preservation \ --real_prior \ --prior_loss_weight=1.0 \ --resolution=512 \ --train_batch_size=2 \ --learning_rate=1e-5 \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --num_class_images=200 \ --scale_lr \ --hflip \ --modifier_token "<new1>+<new2>" \ --push_to_hub ``` </hfoption> </hfoptions> Once training is finished, you can use your new Custom Diffusion model for inference. <hfoptions id="training-inference"> <hfoption id="single concept"> ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, ).to("cuda") pipeline.unet.load_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin") pipeline.load_textual_inversion("path-to-save-model", weight_name="<new1>.bin") image = pipeline( "<new1> cat sitting in a bucket", num_inference_steps=100, guidance_scale=6.0, eta=1.0, ).images[0] image.save("cat.png") ``` </hfoption> <hfoption id="multiple concepts"> ```py import torch from huggingface_hub.repocard import RepoCard from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("sayakpaul/custom-diffusion-cat-wooden-pot", torch_dtype=torch.float16).to("cuda") pipeline.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin") pipeline.load_textual_inversion(model_id, weight_name="<new1>.bin") pipeline.load_textual_inversion(model_id, weight_name="<new2>.bin") image = pipeline( "the <new1> cat sculpture in the style of a <new2> wooden pot", num_inference_steps=100, guidance_scale=6.0, eta=1.0, ).images[0] image.save("multi-subject.png") ``` </hfoption> </hfoptions> ## Next steps Congratulations on training a model with Custom Diffusion! 🎉 To learn more: - Read the [Multi-Concept Customization of Text-to-Image Diffusion](https://www.cs.cmu.edu/~custom-diffusion/) blog post to learn more details about the experimental results from the Custom Diffusion team.
diffusers/docs/source/en/training/custom_diffusion.md/0
{ "file_path": "diffusers/docs/source/en/training/custom_diffusion.md", "repo_id": "diffusers", "token_count": 5470 }
89
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # Train a diffusion model Unconditional image generation is a popular application of diffusion models that generates images that look like those in the dataset used for training. Typically, the best results are obtained from finetuning a pretrained model on a specific dataset. You can find many of these checkpoints on the [Hub](https://huggingface.co/search/full-text?q=unconditional-image-generation&type=model), but if you can't find one you like, you can always train your own! This tutorial will teach you how to train a [`UNet2DModel`] from scratch on a subset of the [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) dataset to generate your own 🦋 butterflies 🦋. <Tip> 💡 This training tutorial is based on the [Training with 🧨 Diffusers](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) notebook. For additional details and context about diffusion models like how they work, check out the notebook! </Tip> Before you begin, make sure you have 🤗 Datasets installed to load and preprocess image datasets, and 🤗 Accelerate, to simplify training on any number of GPUs. The following command will also install [TensorBoard](https://www.tensorflow.org/tensorboard) to visualize training metrics (you can also use [Weights & Biases](https://docs.wandb.ai/) to track your training). ```py # uncomment to install the necessary libraries in Colab #!pip install diffusers[training] ``` We encourage you to share your model with the community, and in order to do that, you'll need to login to your Hugging Face account (create one [here](https://hf.co/join) if you don't already have one!). You can login from a notebook and enter your token when prompted. Make sure your token has the write role. ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` Or login in from the terminal: ```bash huggingface-cli login ``` Since the model checkpoints are quite large, install [Git-LFS](https://git-lfs.com/) to version these large files: ```bash !sudo apt -qq install git-lfs !git config --global credential.helper store ``` ## Training configuration For convenience, create a `TrainingConfig` class containing the training hyperparameters (feel free to adjust them): ```py >>> from dataclasses import dataclass >>> @dataclass ... class TrainingConfig: ... image_size = 128 # the generated image resolution ... train_batch_size = 16 ... eval_batch_size = 16 # how many images to sample during evaluation ... num_epochs = 50 ... gradient_accumulation_steps = 1 ... learning_rate = 1e-4 ... lr_warmup_steps = 500 ... save_image_epochs = 10 ... save_model_epochs = 30 ... mixed_precision = "fp16" # `no` for float32, `fp16` for automatic mixed precision ... output_dir = "ddpm-butterflies-128" # the model name locally and on the HF Hub ... push_to_hub = True # whether to upload the saved model to the HF Hub ... hub_model_id = "<your-username>/<my-awesome-model>" # the name of the repository to create on the HF Hub ... hub_private_repo = False ... overwrite_output_dir = True # overwrite the old model when re-running the notebook ... seed = 0 >>> config = TrainingConfig() ``` ## Load the dataset You can easily load the [Smithsonian Butterflies](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset) dataset with the 🤗 Datasets library: ```py >>> from datasets import load_dataset >>> config.dataset_name = "huggan/smithsonian_butterflies_subset" >>> dataset = load_dataset(config.dataset_name, split="train") ``` <Tip> 💡 You can find additional datasets from the [HugGan Community Event](https://huggingface.co/huggan) or you can use your own dataset by creating a local [`ImageFolder`](https://huggingface.co/docs/datasets/image_dataset#imagefolder). Set `config.dataset_name` to the repository id of the dataset if it is from the HugGan Community Event, or `imagefolder` if you're using your own images. </Tip> 🤗 Datasets uses the [`~datasets.Image`] feature to automatically decode the image data and load it as a [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html) which we can visualize: ```py >>> import matplotlib.pyplot as plt >>> fig, axs = plt.subplots(1, 4, figsize=(16, 4)) >>> for i, image in enumerate(dataset[:4]["image"]): ... axs[i].imshow(image) ... axs[i].set_axis_off() >>> fig.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_ds.png"/> </div> The images are all different sizes though, so you'll need to preprocess them first: * `Resize` changes the image size to the one defined in `config.image_size`. * `RandomHorizontalFlip` augments the dataset by randomly mirroring the images. * `Normalize` is important to rescale the pixel values into a [-1, 1] range, which is what the model expects. ```py >>> from torchvision import transforms >>> preprocess = transforms.Compose( ... [ ... transforms.Resize((config.image_size, config.image_size)), ... transforms.RandomHorizontalFlip(), ... transforms.ToTensor(), ... transforms.Normalize([0.5], [0.5]), ... ] ... ) ``` Use 🤗 Datasets' [`~datasets.Dataset.set_transform`] method to apply the `preprocess` function on the fly during training: ```py >>> def transform(examples): ... images = [preprocess(image.convert("RGB")) for image in examples["image"]] ... return {"images": images} >>> dataset.set_transform(transform) ``` Feel free to visualize the images again to confirm that they've been resized. Now you're ready to wrap the dataset in a [DataLoader](https://pytorch.org/docs/stable/data#torch.utils.data.DataLoader) for training! ```py >>> import torch >>> train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=config.train_batch_size, shuffle=True) ``` ## Create a UNet2DModel Pretrained models in 🧨 Diffusers are easily created from their model class with the parameters you want. For example, to create a [`UNet2DModel`]: ```py >>> from diffusers import UNet2DModel >>> model = UNet2DModel( ... sample_size=config.image_size, # the target image resolution ... in_channels=3, # the number of input channels, 3 for RGB images ... out_channels=3, # the number of output channels ... layers_per_block=2, # how many ResNet layers to use per UNet block ... block_out_channels=(128, 128, 256, 256, 512, 512), # the number of output channels for each UNet block ... down_block_types=( ... "DownBlock2D", # a regular ResNet downsampling block ... "DownBlock2D", ... "DownBlock2D", ... "DownBlock2D", ... "AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention ... "DownBlock2D", ... ), ... up_block_types=( ... "UpBlock2D", # a regular ResNet upsampling block ... "AttnUpBlock2D", # a ResNet upsampling block with spatial self-attention ... "UpBlock2D", ... "UpBlock2D", ... "UpBlock2D", ... "UpBlock2D", ... ), ... ) ``` It is often a good idea to quickly check the sample image shape matches the model output shape: ```py >>> sample_image = dataset[0]["images"].unsqueeze(0) >>> print("Input shape:", sample_image.shape) Input shape: torch.Size([1, 3, 128, 128]) >>> print("Output shape:", model(sample_image, timestep=0).sample.shape) Output shape: torch.Size([1, 3, 128, 128]) ``` Great! Next, you'll need a scheduler to add some noise to the image. ## Create a scheduler The scheduler behaves differently depending on whether you're using the model for training or inference. During inference, the scheduler generates image from the noise. During training, the scheduler takes a model output - or a sample - from a specific point in the diffusion process and applies noise to the image according to a *noise schedule* and an *update rule*. Let's take a look at the [`DDPMScheduler`] and use the `add_noise` method to add some random noise to the `sample_image` from before: ```py >>> import torch >>> from PIL import Image >>> from diffusers import DDPMScheduler >>> noise_scheduler = DDPMScheduler(num_train_timesteps=1000) >>> noise = torch.randn(sample_image.shape) >>> timesteps = torch.LongTensor([50]) >>> noisy_image = noise_scheduler.add_noise(sample_image, noise, timesteps) >>> Image.fromarray(((noisy_image.permute(0, 2, 3, 1) + 1.0) * 127.5).type(torch.uint8).numpy()[0]) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/noisy_butterfly.png"/> </div> The training objective of the model is to predict the noise added to the image. The loss at this step can be calculated by: ```py >>> import torch.nn.functional as F >>> noise_pred = model(noisy_image, timesteps).sample >>> loss = F.mse_loss(noise_pred, noise) ``` ## Train the model By now, you have most of the pieces to start training the model and all that's left is putting everything together. First, you'll need an optimizer and a learning rate scheduler: ```py >>> from diffusers.optimization import get_cosine_schedule_with_warmup >>> optimizer = torch.optim.AdamW(model.parameters(), lr=config.learning_rate) >>> lr_scheduler = get_cosine_schedule_with_warmup( ... optimizer=optimizer, ... num_warmup_steps=config.lr_warmup_steps, ... num_training_steps=(len(train_dataloader) * config.num_epochs), ... ) ``` Then, you'll need a way to evaluate the model. For evaluation, you can use the [`DDPMPipeline`] to generate a batch of sample images and save it as a grid: ```py >>> from diffusers import DDPMPipeline >>> from diffusers.utils import make_image_grid >>> import os >>> def evaluate(config, epoch, pipeline): ... # Sample some images from random noise (this is the backward diffusion process). ... # The default pipeline output type is `List[PIL.Image]` ... images = pipeline( ... batch_size=config.eval_batch_size, ... generator=torch.manual_seed(config.seed), ... ).images ... # Make a grid out of the images ... image_grid = make_image_grid(images, rows=4, cols=4) ... # Save the images ... test_dir = os.path.join(config.output_dir, "samples") ... os.makedirs(test_dir, exist_ok=True) ... image_grid.save(f"{test_dir}/{epoch:04d}.png") ``` Now you can wrap all these components together in a training loop with 🤗 Accelerate for easy TensorBoard logging, gradient accumulation, and mixed precision training. To upload the model to the Hub, write a function to get your repository name and information and then push it to the Hub. <Tip> 💡 The training loop below may look intimidating and long, but it'll be worth it later when you launch your training in just one line of code! If you can't wait and want to start generating images, feel free to copy and run the code below. You can always come back and examine the training loop more closely later, like when you're waiting for your model to finish training. 🤗 </Tip> ```py >>> from accelerate import Accelerator >>> from huggingface_hub import create_repo, upload_folder >>> from tqdm.auto import tqdm >>> from pathlib import Path >>> import os >>> def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler): ... # Initialize accelerator and tensorboard logging ... accelerator = Accelerator( ... mixed_precision=config.mixed_precision, ... gradient_accumulation_steps=config.gradient_accumulation_steps, ... log_with="tensorboard", ... project_dir=os.path.join(config.output_dir, "logs"), ... ) ... if accelerator.is_main_process: ... if config.output_dir is not None: ... os.makedirs(config.output_dir, exist_ok=True) ... if config.push_to_hub: ... repo_id = create_repo( ... repo_id=config.hub_model_id or Path(config.output_dir).name, exist_ok=True ... ).repo_id ... accelerator.init_trackers("train_example") ... # Prepare everything ... # There is no specific order to remember, you just need to unpack the ... # objects in the same order you gave them to the prepare method. ... model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( ... model, optimizer, train_dataloader, lr_scheduler ... ) ... global_step = 0 ... # Now you train the model ... for epoch in range(config.num_epochs): ... progress_bar = tqdm(total=len(train_dataloader), disable=not accelerator.is_local_main_process) ... progress_bar.set_description(f"Epoch {epoch}") ... for step, batch in enumerate(train_dataloader): ... clean_images = batch["images"] ... # Sample noise to add to the images ... noise = torch.randn(clean_images.shape, device=clean_images.device) ... bs = clean_images.shape[0] ... # Sample a random timestep for each image ... timesteps = torch.randint( ... 0, noise_scheduler.config.num_train_timesteps, (bs,), device=clean_images.device, ... dtype=torch.int64 ... ) ... # Add noise to the clean images according to the noise magnitude at each timestep ... # (this is the forward diffusion process) ... noisy_images = noise_scheduler.add_noise(clean_images, noise, timesteps) ... with accelerator.accumulate(model): ... # Predict the noise residual ... noise_pred = model(noisy_images, timesteps, return_dict=False)[0] ... loss = F.mse_loss(noise_pred, noise) ... accelerator.backward(loss) ... accelerator.clip_grad_norm_(model.parameters(), 1.0) ... optimizer.step() ... lr_scheduler.step() ... optimizer.zero_grad() ... progress_bar.update(1) ... logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step} ... progress_bar.set_postfix(**logs) ... accelerator.log(logs, step=global_step) ... global_step += 1 ... # After each epoch you optionally sample some demo images with evaluate() and save the model ... if accelerator.is_main_process: ... pipeline = DDPMPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler) ... if (epoch + 1) % config.save_image_epochs == 0 or epoch == config.num_epochs - 1: ... evaluate(config, epoch, pipeline) ... if (epoch + 1) % config.save_model_epochs == 0 or epoch == config.num_epochs - 1: ... if config.push_to_hub: ... upload_folder( ... repo_id=repo_id, ... folder_path=config.output_dir, ... commit_message=f"Epoch {epoch}", ... ignore_patterns=["step_*", "epoch_*"], ... ) ... else: ... pipeline.save_pretrained(config.output_dir) ``` Phew, that was quite a bit of code! But you're finally ready to launch the training with 🤗 Accelerate's [`~accelerate.notebook_launcher`] function. Pass the function the training loop, all the training arguments, and the number of processes (you can change this value to the number of GPUs available to you) to use for training: ```py >>> from accelerate import notebook_launcher >>> args = (config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler) >>> notebook_launcher(train_loop, args, num_processes=1) ``` Once training is complete, take a look at the final 🦋 images 🦋 generated by your diffusion model! ```py >>> import glob >>> sample_images = sorted(glob.glob(f"{config.output_dir}/samples/*.png")) >>> Image.open(sample_images[-1]) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/butterflies_final.png"/> </div> ## Next steps Unconditional image generation is one example of a task that can be trained. You can explore other tasks and training techniques by visiting the [🧨 Diffusers Training Examples](../training/overview) page. Here are some examples of what you can learn: * [Textual Inversion](../training/text_inversion), an algorithm that teaches a model a specific visual concept and integrates it into the generated image. * [DreamBooth](../training/dreambooth), a technique for generating personalized images of a subject given several input images of the subject. * [Guide](../training/text2image) to finetuning a Stable Diffusion model on your own dataset. * [Guide](../training/lora) to using LoRA, a memory-efficient technique for finetuning really large models faster.
diffusers/docs/source/en/tutorials/basic_training.md/0
{ "file_path": "diffusers/docs/source/en/tutorials/basic_training.md", "repo_id": "diffusers", "token_count": 6186 }
90
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Image-to-image [[open-in-colab]] Image-to-image is similar to [text-to-image](conditional_image_generation), but in addition to a prompt, you can also pass an initial image as a starting point for the diffusion process. The initial image is encoded to latent space and noise is added to it. Then the latent diffusion model takes a prompt and the noisy latent image, predicts the added noise, and removes the predicted noise from the initial latent image to get the new latent image. Lastly, a decoder decodes the new latent image back into an image. With 🤗 Diffusers, this is as easy as 1-2-3: 1. Load a checkpoint into the [`AutoPipelineForImage2Image`] class; this pipeline automatically handles loading the correct pipeline class based on the checkpoint: ```py import torch from diffusers import AutoPipelineForImage2Image from diffusers.utils import load_image, make_image_grid pipeline = AutoPipelineForImage2Image.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() ``` <Tip> You'll notice throughout the guide, we use [`~DiffusionPipeline.enable_model_cpu_offload`] and [`~DiffusionPipeline.enable_xformers_memory_efficient_attention`], to save memory and increase inference speed. If you're using PyTorch 2.0, then you don't need to call [`~DiffusionPipeline.enable_xformers_memory_efficient_attention`] on your pipeline because it'll already be using PyTorch 2.0's native [scaled-dot product attention](../optimization/torch2.0#scaled-dot-product-attention). </Tip> 2. Load an image to pass to the pipeline: ```py init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png") ``` 3. Pass a prompt and image to the pipeline to generate an image: ```py prompt = "cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k" image = pipeline(prompt, image=init_image).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption> </div> </div> ## Popular models The most popular image-to-image models are [Stable Diffusion v1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5), [Stable Diffusion XL (SDXL)](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0), and [Kandinsky 2.2](https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder). The results from the Stable Diffusion and Kandinsky models vary due to their architecture differences and training process; you can generally expect SDXL to produce higher quality images than Stable Diffusion v1.5. Let's take a quick look at how to use each of these models and compare their results. ### Stable Diffusion v1.5 Stable Diffusion v1.5 is a latent diffusion model initialized from an earlier checkpoint, and further finetuned for 595K steps on 512x512 images. To use this pipeline for image-to-image, you'll need to prepare an initial image to pass to the pipeline. Then you can pass a prompt and the image to the pipeline to generate a new image: ```py import torch from diffusers import AutoPipelineForImage2Image from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # pass prompt and image to pipeline image = pipeline(prompt, image=init_image).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-sdv1.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption> </div> </div> ### Stable Diffusion XL (SDXL) SDXL is a more powerful version of the Stable Diffusion model. It uses a larger base model, and an additional refiner model to increase the quality of the base model's output. Read the [SDXL](sdxl) guide for a more detailed walkthrough of how to use this model, and other techniques it uses to produce high quality images. ```py import torch from diffusers import AutoPipelineForImage2Image from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-sdxl-init.png" init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # pass prompt and image to pipeline image = pipeline(prompt, image=init_image, strength=0.5).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-sdxl-init.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-sdxl.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption> </div> </div> ### Kandinsky 2.2 The Kandinsky model is different from the Stable Diffusion models because it uses an image prior model to create image embeddings. The embeddings help create a better alignment between text and images, allowing the latent diffusion model to generate better images. The simplest way to use Kandinsky 2.2 is: ```py import torch from diffusers import AutoPipelineForImage2Image from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # pass prompt and image to pipeline image = pipeline(prompt, image=init_image).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-kandinsky.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption> </div> </div> ## Configure pipeline parameters There are several important parameters you can configure in the pipeline that'll affect the image generation process and image quality. Let's take a closer look at what these parameters do and how changing them affects the output. ### Strength `strength` is one of the most important parameters to consider and it'll have a huge impact on your generated image. It determines how much the generated image resembles the initial image. In other words: - 📈 a higher `strength` value gives the model more "creativity" to generate an image that's different from the initial image; a `strength` value of 1.0 means the initial image is more or less ignored - 📉 a lower `strength` value means the generated image is more similar to the initial image The `strength` and `num_inference_steps` parameters are related because `strength` determines the number of noise steps to add. For example, if the `num_inference_steps` is 50 and `strength` is 0.8, then this means adding 40 (50 * 0.8) steps of noise to the initial image and then denoising for 40 steps to get the newly generated image. ```py import torch from diffusers import AutoPipelineForImage2Image from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # pass prompt and image to pipeline image = pipeline(prompt, image=init_image, strength=0.8).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-strength-0.4.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">strength = 0.4</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-strength-0.6.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">strength = 0.6</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-strength-1.0.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">strength = 1.0</figcaption> </div> </div> ### Guidance scale The `guidance_scale` parameter is used to control how closely aligned the generated image and text prompt are. A higher `guidance_scale` value means your generated image is more aligned with the prompt, while a lower `guidance_scale` value means your generated image has more space to deviate from the prompt. You can combine `guidance_scale` with `strength` for even more precise control over how expressive the model is. For example, combine a high `strength + guidance_scale` for maximum creativity or use a combination of low `strength` and low `guidance_scale` to generate an image that resembles the initial image but is not as strictly bound to the prompt. ```py import torch from diffusers import AutoPipelineForImage2Image from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # pass prompt and image to pipeline image = pipeline(prompt, image=init_image, guidance_scale=8.0).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-guidance-0.1.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 0.1</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-guidance-3.0.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 5.0</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-guidance-7.5.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">guidance_scale = 10.0</figcaption> </div> </div> ### Negative prompt A negative prompt conditions the model to *not* include things in an image, and it can be used to improve image quality or modify an image. For example, you can improve image quality by including negative prompts like "poor details" or "blurry" to encourage the model to generate a higher quality image. Or you can modify an image by specifying things to exclude from an image. ```py import torch from diffusers import AutoPipelineForImage2Image from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" negative_prompt = "ugly, deformed, disfigured, poor details, bad anatomy" # pass prompt and image to pipeline image = pipeline(prompt, negative_prompt=negative_prompt, image=init_image).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-negative-1.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">negative_prompt = "ugly, deformed, disfigured, poor details, bad anatomy"</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-negative-2.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">negative_prompt = "jungle"</figcaption> </div> </div> ## Chained image-to-image pipelines There are some other interesting ways you can use an image-to-image pipeline aside from just generating an image (although that is pretty cool too). You can take it a step further and chain it with other pipelines. ### Text-to-image-to-image Chaining a text-to-image and image-to-image pipeline allows you to generate an image from text and use the generated image as the initial image for the image-to-image pipeline. This is useful if you want to generate an image entirely from scratch. For example, let's chain a Stable Diffusion and a Kandinsky model. Start by generating an image with the text-to-image pipeline: ```py from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image import torch from diffusers.utils import make_image_grid pipeline = AutoPipelineForText2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() text2image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k").images[0] text2image ``` Now you can pass this generated image to the image-to-image pipeline: ```py pipeline = AutoPipelineForImage2Image.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() image2image = pipeline("Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", image=text2image).images[0] make_image_grid([text2image, image2image], rows=1, cols=2) ``` ### Image-to-image-to-image You can also chain multiple image-to-image pipelines together to create more interesting images. This can be useful for iteratively performing style transfer on an image, generating short GIFs, restoring color to an image, or restoring missing areas of an image. Start by generating an image: ```py import torch from diffusers import AutoPipelineForImage2Image from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # pass prompt and image to pipeline image = pipeline(prompt, image=init_image, output_type="latent").images[0] ``` <Tip> It is important to specify `output_type="latent"` in the pipeline to keep all the outputs in latent space to avoid an unnecessary decode-encode step. This only works if the chained pipelines are using the same VAE. </Tip> Pass the latent output from this pipeline to the next pipeline to generate an image in a [comic book art style](https://huggingface.co/ogkalu/Comic-Diffusion): ```py pipeline = AutoPipelineForImage2Image.from_pretrained( "ogkalu/Comic-Diffusion", torch_dtype=torch.float16 ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # need to include the token "charliebo artstyle" in the prompt to use this checkpoint image = pipeline("Astronaut in a jungle, charliebo artstyle", image=image, output_type="latent").images[0] ``` Repeat one more time to generate the final image in a [pixel art style](https://huggingface.co/kohbanye/pixel-art-style): ```py pipeline = AutoPipelineForImage2Image.from_pretrained( "kohbanye/pixel-art-style", torch_dtype=torch.float16 ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # need to include the token "pixelartstyle" in the prompt to use this checkpoint image = pipeline("Astronaut in a jungle, pixelartstyle", image=image).images[0] make_image_grid([init_image, image], rows=1, cols=2) ``` ### Image-to-upscaler-to-super-resolution Another way you can chain your image-to-image pipeline is with an upscaler and super-resolution pipeline to really increase the level of details in an image. Start with an image-to-image pipeline: ```py import torch from diffusers import AutoPipelineForImage2Image from diffusers.utils import make_image_grid, load_image pipeline = AutoPipelineForImage2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # pass prompt and image to pipeline image_1 = pipeline(prompt, image=init_image, output_type="latent").images[0] ``` <Tip> It is important to specify `output_type="latent"` in the pipeline to keep all the outputs in *latent* space to avoid an unnecessary decode-encode step. This only works if the chained pipelines are using the same VAE. </Tip> Chain it to an upscaler pipeline to increase the image resolution: ```py from diffusers import StableDiffusionLatentUpscalePipeline upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) upscaler.enable_model_cpu_offload() upscaler.enable_xformers_memory_efficient_attention() image_2 = upscaler(prompt, image=image_1, output_type="latent").images[0] ``` Finally, chain it to a super-resolution pipeline to further enhance the resolution: ```py from diffusers import StableDiffusionUpscalePipeline super_res = StableDiffusionUpscalePipeline.from_pretrained( "stabilityai/stable-diffusion-x4-upscaler", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) super_res.enable_model_cpu_offload() super_res.enable_xformers_memory_efficient_attention() image_3 = super_res(prompt, image=image_2).images[0] make_image_grid([init_image, image_3.resize((512, 512))], rows=1, cols=2) ``` ## Control image generation Trying to generate an image that looks exactly the way you want can be difficult, which is why controlled generation techniques and models are so useful. While you can use the `negative_prompt` to partially control image generation, there are more robust methods like prompt weighting and ControlNets. ### Prompt weighting Prompt weighting allows you to scale the representation of each concept in a prompt. For example, in a prompt like "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", you can choose to increase or decrease the embeddings of "astronaut" and "jungle". The [Compel](https://github.com/damian0815/compel) library provides a simple syntax for adjusting prompt weights and generating the embeddings. You can learn how to create the embeddings in the [Prompt weighting](weighted_prompts) guide. [`AutoPipelineForImage2Image`] has a `prompt_embeds` (and `negative_prompt_embeds` if you're using a negative prompt) parameter where you can pass the embeddings which replaces the `prompt` parameter. ```py from diffusers import AutoPipelineForImage2Image import torch pipeline = AutoPipelineForImage2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() image = pipeline(prompt_embeds=prompt_embeds, # generated from Compel negative_prompt_embeds=negative_prompt_embeds, # generated from Compel image=init_image, ).images[0] ``` ### ControlNet ControlNets provide a more flexible and accurate way to control image generation because you can use an additional conditioning image. The conditioning image can be a canny image, depth map, image segmentation, and even scribbles! Whatever type of conditioning image you choose, the ControlNet generates an image that preserves the information in it. For example, let's condition an image with a depth map to keep the spatial information in the image. ```py from diffusers.utils import load_image, make_image_grid # prepare image url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png" init_image = load_image(url) init_image = init_image.resize((958, 960)) # resize to depth image dimensions depth_image = load_image("https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png") make_image_grid([init_image, depth_image], rows=1, cols=2) ``` Load a ControlNet model conditioned on depth maps and the [`AutoPipelineForImage2Image`]: ```py from diffusers import ControlNetModel, AutoPipelineForImage2Image import torch controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11f1p_sd15_depth", torch_dtype=torch.float16, variant="fp16", use_safetensors=True) pipeline = AutoPipelineForImage2Image.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() ``` Now generate a new image conditioned on the depth map, initial image, and prompt: ```py prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image_control_net = pipeline(prompt, image=init_image, control_image=depth_image).images[0] make_image_grid([init_image, depth_image, image_control_net], rows=1, cols=3) ``` <div class="flex flex-row gap-4"> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-init.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">initial image</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/main/images/control.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">depth image</figcaption> </div> <div class="flex-1"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-controlnet.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">ControlNet image</figcaption> </div> </div> Let's apply a new [style](https://huggingface.co/nitrosocke/elden-ring-diffusion) to the image generated from the ControlNet by chaining it with an image-to-image pipeline: ```py pipeline = AutoPipelineForImage2Image.from_pretrained( "nitrosocke/elden-ring-diffusion", torch_dtype=torch.float16, ) pipeline.enable_model_cpu_offload() # remove following line if xFormers is not installed or you have PyTorch 2.0 or higher installed pipeline.enable_xformers_memory_efficient_attention() prompt = "elden ring style astronaut in a jungle" # include the token "elden ring style" in the prompt negative_prompt = "ugly, deformed, disfigured, poor details, bad anatomy" image_elden_ring = pipeline(prompt, negative_prompt=negative_prompt, image=image_control_net, strength=0.45, guidance_scale=10.5).images[0] make_image_grid([init_image, depth_image, image_control_net, image_elden_ring], rows=2, cols=2) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-elden-ring.png"> </div> ## Optimize Running diffusion models is computationally expensive and intensive, but with a few optimization tricks, it is entirely possible to run them on consumer and free-tier GPUs. For example, you can use a more memory-efficient form of attention such as PyTorch 2.0's [scaled-dot product attention](../optimization/torch2.0#scaled-dot-product-attention) or [xFormers](../optimization/xformers) (you can use one or the other, but there's no need to use both). You can also offload the model to the GPU while the other pipeline components wait on the CPU. ```diff + pipeline.enable_model_cpu_offload() + pipeline.enable_xformers_memory_efficient_attention() ``` With [`torch.compile`](../optimization/torch2.0#torchcompile), you can boost your inference speed even more by wrapping your UNet with it: ```py pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True) ``` To learn more, take a look at the [Reduce memory usage](../optimization/memory) and [Torch 2.0](../optimization/torch2.0) guides.
diffusers/docs/source/en/using-diffusers/img2img.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/img2img.md", "repo_id": "diffusers", "token_count": 9649 }
91
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Improve image quality with deterministic generation [[open-in-colab]] A common way to improve the quality of generated images is with *deterministic batch generation*, generate a batch of images and select one image to improve with a more detailed prompt in a second round of inference. The key is to pass a list of [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html#generator)'s to the pipeline for batched image generation, and tie each `Generator` to a seed so you can reuse it for an image. Let's use [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) for example, and generate several versions of the following prompt: ```py prompt = "Labrador in the style of Vermeer" ``` Instantiate a pipeline with [`DiffusionPipeline.from_pretrained`] and place it on a GPU (if available): ```python import torch from diffusers import DiffusionPipeline from diffusers.utils import make_image_grid pipe = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True ) pipe = pipe.to("cuda") ``` Now, define four different `Generator`s and assign each `Generator` a seed (`0` to `3`) so you can reuse a `Generator` later for a specific image: ```python generator = [torch.Generator(device="cuda").manual_seed(i) for i in range(4)] ``` <Tip warning={true}> To create a batched seed, you should use a list comprehension that iterates over the length specified in `range()`. This creates a unique `Generator` object for each image in the batch. If you only multiply the `Generator` by the batch size, this only creates one `Generator` object that is used sequentially for each image in the batch. For example, if you want to use the same seed to create 4 identical images: ```py ❌ [torch.Generator().manual_seed(seed)] * 4 ✅ [torch.Generator().manual_seed(seed) for _ in range(4)] ``` </Tip> Generate the images and have a look: ```python images = pipe(prompt, generator=generator, num_images_per_prompt=4).images make_image_grid(images, rows=2, cols=2) ``` ![img](https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/reusabe_seeds.jpg) In this example, you'll improve upon the first image - but in reality, you can use any image you want (even the image with double sets of eyes!). The first image used the `Generator` with seed `0`, so you'll reuse that `Generator` for the second round of inference. To improve the quality of the image, add some additional text to the prompt: ```python prompt = [prompt + t for t in [", highly realistic", ", artsy", ", trending", ", colorful"]] generator = [torch.Generator(device="cuda").manual_seed(0) for i in range(4)] ``` Create four generators with seed `0`, and generate another batch of images, all of which should look like the first image from the previous round! ```python images = pipe(prompt, generator=generator).images make_image_grid(images, rows=2, cols=2) ``` ![img](https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/reusabe_seeds_2.jpg)
diffusers/docs/source/en/using-diffusers/reusing_seeds.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/reusing_seeds.md", "repo_id": "diffusers", "token_count": 1099 }
92
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # 簡単な案内 拡散モデル(Diffusion Model)は、ランダムな正規分布から段階的にノイズ除去するように学習され、画像や音声などの目的のものを生成できます。これは生成AIに多大な関心を呼び起こしました。インターネット上で拡散によって生成された画像の例を見たことがあるでしょう。🧨 Diffusersは、誰もが拡散モデルに広くアクセスできるようにすることを目的としたライブラリです。 この案内では、開発者または日常的なユーザーに関わらず、🧨 Diffusers を紹介し、素早く目的のものを生成できるようにします!このライブラリには3つの主要コンポーネントがあります: * [`DiffusionPipeline`]は事前に学習された拡散モデルからサンプルを迅速に生成するために設計された高レベルのエンドツーエンドクラス。 * 拡散システムを作成するためのビルディングブロックとして使用できる、人気のある事前学習された[モデル](./api/models)アーキテクチャとモジュール。 * 多くの異なる[スケジューラ](./api/schedulers/overview) - ノイズがどのようにトレーニングのために加えられるか、そして生成中にどのようにノイズ除去された画像を生成するかを制御するアルゴリズム。 この案内では、[`DiffusionPipeline`]を生成に使用する方法を紹介し、モデルとスケジューラを組み合わせて[`DiffusionPipeline`]の内部で起こっていることを再現する方法を説明します。 <Tip> この案内は🧨 Diffusers [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb)を簡略化したもので、すぐに使い始めることができます。Diffusers 🧨のゴール、設計哲学、コアAPIの詳細についてもっと知りたい方は、ノートブックをご覧ください! </Tip> 始める前に必要なライブラリーがすべてインストールされていることを確認してください: ```py # uncomment to install the necessary libraries in Colab #!pip install --upgrade diffusers accelerate transformers ``` - [🤗 Accelerate](https://huggingface.co/docs/accelerate/index)生成とトレーニングのためのモデルのロードを高速化します - [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview)ような最も一般的な拡散モデルを実行するには、[🤗 Transformers](https://huggingface.co/docs/transformers/index)が必要です。 ## 拡散パイプライン [`DiffusionPipeline`]は事前学習された拡散システムを生成に使用する最も簡単な方法です。これはモデルとスケジューラを含むエンドツーエンドのシステムです。[`DiffusionPipeline`]は多くの作業/タスクにすぐに使用することができます。また、サポートされているタスクの完全なリストについては[🧨Diffusersの概要](./api/pipelines/overview#diffusers-summary)の表を参照してください。 | **タスク** | **説明** | **パイプライン** |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------| | Unconditional Image Generation | 正規分布から画像生成 | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) | | Text-Guided Image Generation | 文章から画像生成 | [conditional_image_generation](./using-diffusers/conditional_image_generation) | | Text-Guided Image-to-Image Translation | 画像と文章から新たな画像生成 | [img2img](./using-diffusers/img2img) | | Text-Guided Image-Inpainting | 画像、マスク、および文章が指定された場合に、画像のマスクされた部分を文章をもとに修復 | [inpaint](./using-diffusers/inpaint) | | Text-Guided Depth-to-Image Translation | 文章と深度推定によって構造を保持しながら画像生成 | [depth2img](./using-diffusers/depth2img) | まず、[`DiffusionPipeline`]のインスタンスを作成し、ダウンロードしたいパイプラインのチェックポイントを指定します。 この[`DiffusionPipeline`]はHugging Face Hubに保存されている任意の[チェックポイント](https://huggingface.co/models?library=diffusers&sort=downloads)を使用することができます。 この案内では、[`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)チェックポイントでテキストから画像へ生成します。 <Tip warning={true}> [Stable Diffusion]モデルについては、モデルを実行する前にまず[ライセンス](https://huggingface.co/spaces/CompVis/stable-diffusion-license)を注意深くお読みください。🧨 Diffusers は、攻撃的または有害なコンテンツを防ぐために [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) を実装していますが、モデルの改良された画像生成機能により、潜在的に有害なコンテンツが生成される可能性があります。 </Tip> モデルを[`~DiffusionPipeline.from_pretrained`]メソッドでロードします: ```python >>> from diffusers import DiffusionPipeline >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_safetensors=True) ``` [`DiffusionPipeline`]は全てのモデリング、トークン化、スケジューリングコンポーネントをダウンロードしてキャッシュします。Stable Diffusionパイプラインは[`UNet2DConditionModel`]と[`PNDMScheduler`]などで構成されています: ```py >>> pipeline StableDiffusionPipeline { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.13.1", ..., "scheduler": [ "diffusers", "PNDMScheduler" ], ..., "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` このモデルはおよそ14億個のパラメータで構成されているため、GPU上でパイプラインを実行することを強く推奨します。 PyTorchと同じように、ジェネレータオブジェクトをGPUに移すことができます: ```python >>> pipeline.to("cuda") ``` これで、文章を `pipeline` に渡して画像を生成し、ノイズ除去された画像にアクセスできるようになりました。デフォルトでは、画像出力は[`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class)オブジェクトでラップされます。 ```python >>> image = pipeline("An image of a squirrel in Picasso style").images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/> </div> `save`関数で画像を保存できます: ```python >>> image.save("image_of_squirrel_painting.png") ``` ### ローカルパイプライン ローカルでパイプラインを使用することもできます。唯一の違いは、最初にウェイトをダウンロードする必要があることです: ```bash !git lfs install !git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 ``` 保存したウェイトをパイプラインにロードします: ```python >>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", use_safetensors=True) ``` これで、上のセクションと同じようにパイプラインを動かすことができます。 ### スケジューラの交換 スケジューラーによって、ノイズ除去のスピードや品質のトレードオフが異なります。どれが自分に最適かを知る最善の方法は、実際に試してみることです!Diffusers 🧨の主な機能の1つは、スケジューラを簡単に切り替えることができることです。例えば、デフォルトの[`PNDMScheduler`]を[`EulerDiscreteScheduler`]に置き換えるには、[`~diffusers.ConfigMixin.from_config`]メソッドでロードできます: ```py >>> from diffusers import EulerDiscreteScheduler >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_safetensors=True) >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) ``` 新しいスケジューラを使って画像を生成し、その違いに気づくかどうか試してみてください! 次のセクションでは、[`DiffusionPipeline`]を構成するコンポーネント(モデルとスケジューラ)を詳しく見て、これらのコンポーネントを使って猫の画像を生成する方法を学びます。 ## モデル ほとんどのモデルはノイズの多いサンプルを取り、各タイムステップで*残りのノイズ*を予測します(他のモデルは前のサンプルを直接予測するか、速度または[`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)を予測するように学習します)。モデルを混ぜて他の拡散システムを作ることもできます。 モデルは[`~ModelMixin.from_pretrained`]メソッドで開始されます。このメソッドはモデルをローカルにキャッシュするので、次にモデルをロードするときに高速になります。この案内では、[`UNet2DModel`]をロードします。これは基本的な画像生成モデルであり、猫画像で学習されたチェックポイントを使います: ```py >>> from diffusers import UNet2DModel >>> repo_id = "google/ddpm-cat-256" >>> model = UNet2DModel.from_pretrained(repo_id, use_safetensors=True) ``` モデルのパラメータにアクセスするには、`model.config` を呼び出せます: ```py >>> model.config ``` モデル構成は🧊凍結🧊されたディクショナリであり、モデル作成後にこれらのパラメー タを変更することはできません。これは意図的なもので、最初にモデル・アーキテクチャを定義するために使用されるパラメータが同じままであることを保証します。他のパラメータは生成中に調整することができます。 最も重要なパラメータは以下の通りです: * sample_size`: 入力サンプルの高さと幅。 * `in_channels`: 入力サンプルの入力チャンネル数。 * down_block_types` と `up_block_types`: UNet アーキテクチャを作成するために使用されるダウンサンプリングブロックとアップサンプリングブロックのタイプ。 * block_out_channels`: ダウンサンプリングブロックの出力チャンネル数。逆順でアップサンプリングブロックの入力チャンネル数にも使用されます。 * layer_per_block`: 各 UNet ブロックに含まれる ResNet ブロックの数。 このモデルを生成に使用するには、ランダムな画像の形の正規分布を作成します。このモデルは複数のランダムな正規分布を受け取ることができるため`batch`軸を入れます。入力チャンネル数に対応する`channel`軸も必要です。画像の高さと幅に対応する`sample_size`軸を持つ必要があります: ```py >>> import torch >>> torch.manual_seed(0) >>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) >>> noisy_sample.shape torch.Size([1, 3, 256, 256]) ``` 画像生成には、ノイズの多い画像と `timestep` をモデルに渡します。`timestep`は入力画像がどの程度ノイズが多いかを示します。これは、モデルが拡散プロセスにおける自分の位置を決定するのに役立ちます。モデルの出力を得るには `sample` メソッドを使用します: ```py >>> with torch.no_grad(): ... noisy_residual = model(sample=noisy_sample, timestep=2).sample ``` しかし、実際の例を生成するには、ノイズ除去プロセスをガイドするスケジューラが必要です。次のセクションでは、モデルをスケジューラと組み合わせる方法を学びます。 ## スケジューラ スケジューラは、モデルの出力(この場合は `noisy_residual` )が与えられたときに、ノイズの多いサンプルからノイズの少ないサンプルへの移行を管理します。 <Tip> 🧨 Diffusersは拡散システムを構築するためのツールボックスです。[`DiffusionPipeline`]は事前に構築された拡散システムを使い始めるのに便利な方法ですが、独自のモデルとスケジューラコンポーネントを個別に選択してカスタム拡散システムを構築することもできます。 </Tip> この案内では、[`DDPMScheduler`]を[`~diffusers.ConfigMixin.from_config`]メソッドでインスタンス化します: ```py >>> from diffusers import DDPMScheduler >>> scheduler = DDPMScheduler.from_config(repo_id) >>> scheduler DDPMScheduler { "_class_name": "DDPMScheduler", "_diffusers_version": "0.13.1", "beta_end": 0.02, "beta_schedule": "linear", "beta_start": 0.0001, "clip_sample": true, "clip_sample_range": 1.0, "num_train_timesteps": 1000, "prediction_type": "epsilon", "trained_betas": null, "variance_type": "fixed_small" } ``` <Tip> 💡 スケジューラがどのようにコンフィギュレーションからインスタンス化されるかに注目してください。モデルとは異なり、スケジューラは学習可能な重みを持たず、パラメーターを持ちません! </Tip> 最も重要なパラメータは以下の通りです: * num_train_timesteps`: ノイズ除去処理の長さ、言い換えれば、ランダムな正規分布をデータサンプルに処理するのに必要なタイムステップ数です。 * `beta_schedule`: 生成とトレーニングに使用するノイズスケジュールのタイプ。 * `beta_start` と `beta_end`: ノイズスケジュールの開始値と終了値。 少しノイズの少ない画像を予測するには、スケジューラの [`~diffusers.DDPMScheduler.step`] メソッドに以下を渡します: モデルの出力、`timestep`、現在の `sample`。 ```py >>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample >>> less_noisy_sample.shape ``` `less_noisy_sample`は次の`timestep`に渡すことができ、そこでさらにノイズが少なくなります! では、すべてをまとめて、ノイズ除去プロセス全体を視覚化してみましょう。 まず、ノイズ除去された画像を後処理して `PIL.Image` として表示する関数を作成します: ```py >>> import PIL.Image >>> import numpy as np >>> def display_sample(sample, i): ... image_processed = sample.cpu().permute(0, 2, 3, 1) ... image_processed = (image_processed + 1.0) * 127.5 ... image_processed = image_processed.numpy().astype(np.uint8) ... image_pil = PIL.Image.fromarray(image_processed[0]) ... display(f"Image at step {i}") ... display(image_pil) ``` ノイズ除去処理を高速化するために入力とモデルをGPUに移します: ```py >>> model.to("cuda") >>> noisy_sample = noisy_sample.to("cuda") ``` ここで、ノイズが少なくなったサンプルの残りのノイズを予測するノイズ除去ループを作成し、スケジューラを使ってさらにノイズの少ないサンプルを計算します: ```py >>> import tqdm >>> sample = noisy_sample >>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): ... # 1. predict noise residual ... with torch.no_grad(): ... residual = model(sample, t).sample ... # 2. compute less noisy image and set x_t -> x_t-1 ... sample = scheduler.step(residual, t, sample).prev_sample ... # 3. optionally look at image ... if (i + 1) % 50 == 0: ... display_sample(sample, i + 1) ``` 何もないところから猫が生成されるのを、座って見てください!😻 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/> </div> ## 次のステップ このクイックツアーで、🧨ディフューザーを使ったクールな画像をいくつか作成できたと思います!次のステップとして * モデルをトレーニングまたは微調整については、[training](./tutorials/basic_training)チュートリアルを参照してください。 * 様々な使用例については、公式およびコミュニティの[training or finetuning scripts](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples)の例を参照してください。 * スケジューラのロード、アクセス、変更、比較については[Using different Schedulers](./using-diffusers/schedulers)ガイドを参照してください。 * プロンプトエンジニアリング、スピードとメモリの最適化、より高品質な画像を生成するためのヒントやトリックについては、[Stable Diffusion](./stable_diffusion)ガイドを参照してください。 * 🧨 Diffusers の高速化については、最適化された [PyTorch on a GPU](./optimization/fp16)のガイド、[Stable Diffusion on Apple Silicon (M1/M2)](./optimization/mps)と[ONNX Runtime](./optimization/onnx)を参照してください。
diffusers/docs/source/ja/quicktour.md/0
{ "file_path": "diffusers/docs/source/ja/quicktour.md", "repo_id": "diffusers", "token_count": 7835 }
93
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Token Merging (토큰 병합) Token Merging (introduced in [Token Merging: Your ViT But Faster](https://arxiv.org/abs/2210.09461))은 트랜스포머 기반 네트워크의 forward pass에서 중복 토큰이나 패치를 점진적으로 병합하는 방식으로 작동합니다. 이를 통해 기반 네트워크의 추론 지연 시간을 단축할 수 있습니다. Token Merging(ToMe)이 출시된 후, 저자들은 [Fast Stable Diffusion을 위한 토큰 병합](https://arxiv.org/abs/2303.17604)을 발표하여 Stable Diffusion과 더 잘 호환되는 ToMe 버전을 소개했습니다. ToMe를 사용하면 [`DiffusionPipeline`]의 추론 지연 시간을 부드럽게 단축할 수 있습니다. 이 문서에서는 ToMe를 [`StableDiffusionPipeline`]에 적용하는 방법, 예상되는 속도 향상, [`StableDiffusionPipeline`]에서 ToMe를 사용할 때의 질적 측면에 대해 설명합니다. ## ToMe 사용하기 ToMe의 저자들은 [`tomesd`](https://github.com/dbolya/tomesd)라는 편리한 Python 라이브러리를 공개했는데, 이 라이브러리를 이용하면 [`DiffusionPipeline`]에 ToMe를 다음과 같이 적용할 수 있습니다: ```diff from diffusers import StableDiffusionPipeline import tomesd pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 ).to("cuda") + tomesd.apply_patch(pipeline, ratio=0.5) image = pipeline("a photo of an astronaut riding a horse on mars").images[0] ``` 이것이 다입니다! `tomesd.apply_patch()`는 파이프라인 추론 속도와 생성된 토큰의 품질 사이의 균형을 맞출 수 있도록 [여러 개의 인자](https://github.com/dbolya/tomesd#usage)를 노출합니다. 이러한 인수 중 가장 중요한 것은 `ratio(비율)`입니다. `ratio`은 forward pass 중에 병합될 토큰의 수를 제어합니다. `tomesd`에 대한 자세한 내용은 해당 리포지토리(https://github.com/dbolya/tomesd) 및 [논문](https://arxiv.org/abs/2303.17604)을 참고하시기 바랍니다. ## `StableDiffusionPipeline`으로 `tomesd` 벤치마킹하기 We benchmarked the impact of using `tomesd` on [`StableDiffusionPipeline`] along with [xformers](https://huggingface.co/docs/diffusers/optimization/xformers) across different image resolutions. We used A100 and V100 as our test GPU devices with the following development environment (with Python 3.8.5): 다양한 이미지 해상도에서 [xformers](https://huggingface.co/docs/diffusers/optimization/xformers)를 적용한 상태에서, [`StableDiffusionPipeline`]에 `tomesd`를 사용했을 때의 영향을 벤치마킹했습니다. 테스트 GPU 장치로 A100과 V100을 사용했으며 개발 환경은 다음과 같습니다(Python 3.8.5 사용): ```bash - `diffusers` version: 0.15.1 - Python version: 3.8.16 - PyTorch version (GPU?): 1.13.1+cu116 (True) - Huggingface_hub version: 0.13.2 - Transformers version: 4.27.2 - Accelerate version: 0.18.0 - xFormers version: 0.0.16 - tomesd version: 0.1.2 ``` 벤치마킹에는 다음 스크립트를 사용했습니다: [https://gist.github.com/sayakpaul/27aec6bca7eb7b0e0aa4112205850335](https://gist.github.com/sayakpaul/27aec6bca7eb7b0e0aa4112205850335). 결과는 다음과 같습니다: ### A100 | 해상도 | 배치 크기 | Vanilla | ToMe | ToMe + xFormers | ToMe 속도 향상 (%) | ToMe + xFormers 속도 향상 (%) | | --- | --- | --- | --- | --- | --- | --- | | 512 | 10 | 6.88 | 5.26 | 4.69 | 23.54651163 | 31.83139535 | | | | | | | | | | 768 | 10 | OOM | 14.71 | 11 | | | | | 8 | OOM | 11.56 | 8.84 | | | | | 4 | OOM | 5.98 | 4.66 | | | | | 2 | 4.99 | 3.24 | 3.1 | 35.07014028 | 37.8757515 | | | 1 | 3.29 | 2.24 | 2.03 | 31.91489362 | 38.29787234 | | | | | | | | | | 1024 | 10 | OOM | OOM | OOM | | | | | 8 | OOM | OOM | OOM | | | | | 4 | OOM | 12.51 | 9.09 | | | | | 2 | OOM | 6.52 | 4.96 | | | | | 1 | 6.4 | 3.61 | 2.81 | 43.59375 | 56.09375 | ***결과는 초 단위입니다. 속도 향상은 `Vanilla`과 비교해 계산됩니다.*** ### V100 | 해상도 | 배치 크기 | Vanilla | ToMe | ToMe + xFormers | ToMe 속도 향상 (%) | ToMe + xFormers 속도 향상 (%) | | --- | --- | --- | --- | --- | --- | --- | | 512 | 10 | OOM | 10.03 | 9.29 | | | | | 8 | OOM | 8.05 | 7.47 | | | | | 4 | 5.7 | 4.3 | 3.98 | 24.56140351 | 30.1754386 | | | 2 | 3.14 | 2.43 | 2.27 | 22.61146497 | 27.70700637 | | | 1 | 1.88 | 1.57 | 1.57 | 16.4893617 | 16.4893617 | | | | | | | | | | 768 | 10 | OOM | OOM | 23.67 | | | | | 8 | OOM | OOM | 18.81 | | | | | 4 | OOM | 11.81 | 9.7 | | | | | 2 | OOM | 6.27 | 5.2 | | | | | 1 | 5.43 | 3.38 | 2.82 | 37.75322284 | 48.06629834 | | | | | | | | | | 1024 | 10 | OOM | OOM | OOM | | | | | 8 | OOM | OOM | OOM | | | | | 4 | OOM | OOM | 19.35 | | | | | 2 | OOM | 13 | 10.78 | | | | | 1 | OOM | 6.66 | 5.54 | | | 위의 표에서 볼 수 있듯이, 이미지 해상도가 높을수록 `tomesd`를 사용한 속도 향상이 더욱 두드러집니다. 또한 `tomesd`를 사용하면 1024x1024와 같은 더 높은 해상도에서 파이프라인을 실행할 수 있다는 점도 흥미롭습니다. [`torch.compile()`](https://huggingface.co/docs/diffusers/optimization/torch2.0)을 사용하면 추론 속도를 더욱 높일 수 있습니다. ## 품질 As reported in [the paper](https://arxiv.org/abs/2303.17604), ToMe can preserve the quality of the generated images to a great extent while speeding up inference. By increasing the `ratio`, it is possible to further speed up inference, but that might come at the cost of a deterioration in the image quality. To test the quality of the generated samples using our setup, we sampled a few prompts from the “Parti Prompts” (introduced in [Parti](https://parti.research.google/)) and performed inference with the [`StableDiffusionPipeline`] in the following settings: [논문](https://arxiv.org/abs/2303.17604)에 보고된 바와 같이, ToMe는 생성된 이미지의 품질을 상당 부분 보존하면서 추론 속도를 높일 수 있습니다. `ratio`을 높이면 추론 속도를 더 높일 수 있지만, 이미지 품질이 저하될 수 있습니다. 해당 설정을 사용하여 생성된 샘플의 품질을 테스트하기 위해, "Parti 프롬프트"([Parti](https://parti.research.google/)에서 소개)에서 몇 가지 프롬프트를 샘플링하고 다음 설정에서 [`StableDiffusionPipeline`]을 사용하여 추론을 수행했습니다: - Vanilla [`StableDiffusionPipeline`] - [`StableDiffusionPipeline`] + ToMe - [`StableDiffusionPipeline`] + ToMe + xformers 생성된 샘플의 품질이 크게 저하되는 것을 발견하지 못했습니다. 다음은 샘플입니다: ![tome-samples](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/tome/tome_samples.png) 생성된 샘플은 [여기](https://wandb.ai/sayakpaul/tomesd-results/runs/23j4bj3i?workspace=)에서 확인할 수 있습니다. 이 실험을 수행하기 위해 [이 스크립트](https://gist.github.com/sayakpaul/8cac98d7f22399085a060992f411ecbd)를 사용했습니다.
diffusers/docs/source/ko/optimization/tome.md/0
{ "file_path": "diffusers/docs/source/ko/optimization/tome.md", "repo_id": "diffusers", "token_count": 4366 }
94
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Unconditional 이미지 생성 unconditional 이미지 생성은 text-to-image 또는 image-to-image 모델과 달리 텍스트나 이미지에 대한 조건이 없이 학습 데이터 분포와 유사한 이미지만을 생성합니다. <iframe src="https://stevhliu-ddpm-butterflies-128.hf.space" frameborder="0" width="850" height="550" ></iframe> 이 가이드에서는 기존에 존재하던 데이터셋과 자신만의 커스텀 데이터셋에 대해 unconditional image generation 모델을 훈련하는 방법을 설명합니다. 훈련 세부 사항에 대해 더 자세히 알고 싶다면 unconditional image generation을 위한 모든 학습 스크립트를 [여기](https://github.com/huggingface/diffusers/tree/main/examples/unconditional_image_generation)에서 확인할 수 있습니다. 스크립트를 실행하기 전, 먼저 의존성 라이브러리들을 설치해야 합니다. ```bash pip install diffusers[training] accelerate datasets ``` 그 다음 🤗 [Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화합니다. ```bash accelerate config ``` 별도의 설정 없이 기본 설정으로 🤗 [Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화해봅시다. ```bash accelerate config default ``` 노트북과 같은 대화형 쉘을 지원하지 않는 환경의 경우, 다음과 같이 사용해볼 수도 있습니다. ```py from accelerate.utils import write_basic_config write_basic_config() ``` ## 모델을 허브에 업로드하기 학습 스크립트에 다음 인자를 추가하여 허브에 모델을 업로드할 수 있습니다. ```bash --push_to_hub ``` ## 체크포인트 저장하고 불러오기 훈련 중 문제가 발생할 경우를 대비하여 체크포인트를 정기적으로 저장하는 것이 좋습니다. 체크포인트를 저장하려면 학습 스크립트에 다음 인자를 전달합니다: ```bash --checkpointing_steps=500 ``` 전체 훈련 상태는 500스텝마다 `output_dir`의 하위 폴더에 저장되며, 학습 스크립트에 `--resume_from_checkpoint` 인자를 전달함으로써 체크포인트를 불러오고 훈련을 재개할 수 있습니다. ```bash --resume_from_checkpoint="checkpoint-1500" ``` ## 파인튜닝 이제 학습 스크립트를 시작할 준비가 되었습니다! `--dataset_name` 인자에 파인튜닝할 데이터셋 이름을 지정한 다음, `--output_dir` 인자에 지정된 경로로 저장합니다. 본인만의 데이터셋를 사용하려면, [학습용 데이터셋 만들기](create_dataset) 가이드를 참조하세요. 학습 스크립트는 `diffusion_pytorch_model.bin` 파일을 생성하고, 그것을 당신의 리포지토리에 저장합니다. <Tip> 💡 전체 학습은 V100 GPU 4개를 사용할 경우, 2시간이 소요됩니다. </Tip> 예를 들어, [Oxford Flowers](https://huggingface.co/datasets/huggan/flowers-102-categories) 데이터셋을 사용해 파인튜닝할 경우: ```bash accelerate launch train_unconditional.py \ --dataset_name="huggan/flowers-102-categories" \ --resolution=64 \ --output_dir="ddpm-ema-flowers-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision=no \ --push_to_hub ``` <div class="flex justify-center"> <img src="https://user-images.githubusercontent.com/26864830/180248660-a0b143d0-b89a-42c5-8656-2ebf6ece7e52.png"/> </div> [Pokemon](https://huggingface.co/datasets/huggan/pokemon) 데이터셋을 사용할 경우: ```bash accelerate launch train_unconditional.py \ --dataset_name="huggan/pokemon" \ --resolution=64 \ --output_dir="ddpm-ema-pokemon-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision=no \ --push_to_hub ``` <div class="flex justify-center"> <img src="https://user-images.githubusercontent.com/26864830/180248200-928953b4-db38-48db-b0c6-8b740fe6786f.png"/> </div> ### 여러개의 GPU로 훈련하기 `accelerate`을 사용하면 원활한 다중 GPU 훈련이 가능합니다. `accelerate`을 사용하여 분산 훈련을 실행하려면 [여기](https://huggingface.co/docs/accelerate/basic_tutorials/launch) 지침을 따르세요. 다음은 명령어 예제입니다. ```bash accelerate launch --mixed_precision="fp16" --multi_gpu train_unconditional.py \ --dataset_name="huggan/pokemon" \ --resolution=64 --center_crop --random_flip \ --output_dir="ddpm-ema-pokemon-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --use_ema \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision="fp16" \ --logger="wandb" \ --push_to_hub ```
diffusers/docs/source/ko/training/unconditional_training.md/0
{ "file_path": "diffusers/docs/source/ko/training/unconditional_training.md", "repo_id": "diffusers", "token_count": 3095 }
95
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 재현 가능한 파이프라인 생성하기 [[open-in-colab]] 재현성은 테스트, 결과 재현, 그리고 [이미지 퀄리티 높이기](resuing_seeds)에서 중요합니다. 그러나 diffusion 모델의 무작위성은 매번 모델이 돌아갈 때마다 파이프라인이 다른 이미지를 생성할 수 있도록 하는 이유로 필요합니다. 플랫폼 간에 정확하게 동일한 결과를 얻을 수는 없지만, 특정 허용 범위 내에서 릴리스 및 플랫폼 간에 결과를 재현할 수는 있습니다. 그럼에도 diffusion 파이프라인과 체크포인트에 따라 허용 오차가 달라집니다. diffusion 모델에서 무작위성의 원천을 제어하거나 결정론적 알고리즘을 사용하는 방법을 이해하는 것이 중요한 이유입니다. <Tip> 💡 Pytorch의 [재현성에 대한 선언](https://pytorch.org/docs/stable/notes/randomness.html)를 꼭 읽어보길 추천합니다: > 완전하게 재현가능한 결과는 Pytorch 배포, 개별적인 커밋, 혹은 다른 플랫폼들에서 보장되지 않습니다. > 또한, 결과는 CPU와 GPU 실행간에 심지어 같은 seed를 사용할 때도 재현 가능하지 않을 수 있습니다. </Tip> ## 무작위성 제어하기 추론에서, 파이프라인은 노이즈를 줄이기 위해 가우시안 노이즈를 생성하거나 스케줄링 단계에 노이즈를 더하는 등의 랜덤 샘플링 실행에 크게 의존합니다, [DDIMPipeline](https://huggingface.co/docs/diffusers/v0.18.0/en/api/pipelines/ddim#diffusers.DDIMPipeline)에서 두 추론 단계 이후의 텐서 값을 살펴보세요: ```python from diffusers import DDIMPipeline import numpy as np model_id = "google/ddpm-cifar10-32" # 모델과 스케줄러를 불러오기 ddim = DDIMPipeline.from_pretrained(model_id) # 두 개의 단계에 대해서 파이프라인을 실행하고 numpy tensor로 값을 반환하기 image = ddim(num_inference_steps=2, output_type="np").images print(np.abs(image).sum()) ``` 위의 코드를 실행하면 하나의 값이 나오지만, 다시 실행하면 다른 값이 나옵니다. 무슨 일이 일어나고 있는 걸까요? 파이프라인이 실행될 때마다, [torch.randn](https://pytorch.org/docs/stable/generated/torch.randn.html)은 단계적으로 노이즈 제거되는 가우시안 노이즈가 생성하기 위한 다른 랜덤 seed를 사용합니다. 그러나 동일한 이미지를 안정적으로 생성해야 하는 경우에는 CPU에서 파이프라인을 실행하는지 GPU에서 실행하는지에 따라 달라집니다. ### CPU CPU에서 재현 가능한 결과를 생성하려면, PyTorch [Generator](https://pytorch.org/docs/stable/generated/torch.randn.html)로 seed를 고정합니다: ```python import torch from diffusers import DDIMPipeline import numpy as np model_id = "google/ddpm-cifar10-32" # 모델과 스케줄러 불러오기 ddim = DDIMPipeline.from_pretrained(model_id) # 재현성을 위해 generator 만들기 generator = torch.Generator(device="cpu").manual_seed(0) # 두 개의 단계에 대해서 파이프라인을 실행하고 numpy tensor로 값을 반환하기 image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` 이제 위의 코드를 실행하면 seed를 가진 `Generator` 객체가 파이프라인의 모든 랜덤 함수에 전달되므로 항상 `1491.1711` 값이 출력됩니다. 특정 하드웨어 및 PyTorch 버전에서 이 코드 예제를 실행하면 동일하지는 않더라도 유사한 결과를 얻을 수 있습니다. <Tip> 💡 처음에는 시드를 나타내는 정수값 대신에 `Generator` 개체를 파이프라인에 전달하는 것이 약간 비직관적일 수 있지만, `Generator`는 순차적으로 여러 파이프라인에 전달될 수 있는 \랜덤상태\이기 때문에 PyTorch에서 확률론적 모델을 다룰 때 권장되는 설계입니다. </Tip> ### GPU 예를 들면, GPU 상에서 같은 코드 예시를 실행하면: ```python import torch from diffusers import DDIMPipeline import numpy as np model_id = "google/ddpm-cifar10-32" # 모델과 스케줄러 불러오기 ddim = DDIMPipeline.from_pretrained(model_id) ddim.to("cuda") # 재현성을 위한 generator 만들기 generator = torch.Generator(device="cuda").manual_seed(0) # 두 개의 단계에 대해서 파이프라인을 실행하고 numpy tensor로 값을 반환하기 image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` GPU가 CPU와 다른 난수 생성기를 사용하기 때문에 동일한 시드를 사용하더라도 결과가 같지 않습니다. 이 문제를 피하기 위해 🧨 Diffusers는 CPU에 임의의 노이즈를 생성한 다음 필요에 따라 텐서를 GPU로 이동시키는 [randn_tensor()](https://huggingface.co/docs/diffusers/v0.18.0/en/api/utilities#diffusers.utils.randn_tensor)기능을 가지고 있습니다. `randn_tensor` 기능은 파이프라인 내부 어디에서나 사용되므로 파이프라인이 GPU에서 실행되더라도 **항상** CPU `Generator`를 통과할 수 있습니다. 이제 결과에 훨씬 더 다가왔습니다! ```python import torch from diffusers import DDIMPipeline import numpy as np model_id = "google/ddpm-cifar10-32" # 모델과 스케줄러 불러오기 ddim = DDIMPipeline.from_pretrained(model_id) ddim.to("cuda") #재현성을 위한 generator 만들기 (GPU에 올리지 않도록 조심한다!) generator = torch.manual_seed(0) # 두 개의 단계에 대해서 파이프라인을 실행하고 numpy tensor로 값을 반환하기 image = ddim(num_inference_steps=2, output_type="np", generator=generator).images print(np.abs(image).sum()) ``` <Tip> 💡 재현성이 중요한 경우에는 항상 CPU generator를 전달하는 것이 좋습니다. 성능 손실은 무시할 수 없는 경우가 많으며 파이프라인이 GPU에서 실행되었을 때보다 훨씬 더 비슷한 값을 생성할 수 있습니다. </Tip> 마지막으로 [UnCLIPPipeline](https://huggingface.co/docs/diffusers/v0.18.0/en/api/pipelines/unclip#diffusers.UnCLIPPipeline)과 같은 더 복잡한 파이프라인의 경우, 이들은 종종 정밀 오차 전파에 극도로 취약합니다. 다른 GPU 하드웨어 또는 PyTorch 버전에서 유사한 결과를 기대하지 마세요. 이 경우 완전한 재현성을 위해 완전히 동일한 하드웨어 및 PyTorch 버전을 실행해야 합니다. ## 결정론적 알고리즘 결정론적 알고리즘을 사용하여 재현 가능한 파이프라인을 생성하도록 PyTorch를 구성할 수도 있습니다. 그러나 결정론적 알고리즘은 비결정론적 알고리즘보다 느리고 성능이 저하될 수 있습니다. 하지만 재현성이 중요하다면, 이것이 최선의 방법입니다! 둘 이상의 CUDA 스트림에서 작업이 시작될 때 비결정론적 동작이 발생합니다. 이 문제를 방지하려면 환경 변수 [CUBLAS_WORKSPACE_CONFIG](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility)를 `:16:8`로 설정해서 런타임 중에 오직 하나의 버퍼 크리만 사용하도록 설정합니다. PyTorch는 일반적으로 가장 빠른 알고리즘을 선택하기 위해 여러 알고리즘을 벤치마킹합니다. 하지만 재현성을 원하는 경우, 벤치마크가 매 순간 다른 알고리즘을 선택할 수 있기 때문에 이 기능을 사용하지 않도록 설정해야 합니다. 마지막으로, [torch.use_deterministic_algorithms](https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html)에 `True`를 통과시켜 결정론적 알고리즘이 활성화 되도록 합니다. ```py import os os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" torch.backends.cudnn.benchmark = False torch.use_deterministic_algorithms(True) ``` 이제 동일한 파이프라인을 두번 실행하면 동일한 결과를 얻을 수 있습니다. ```py import torch from diffusers import DDIMScheduler, StableDiffusionPipeline import numpy as np model_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(model_id).to("cuda") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) g = torch.Generator(device="cuda") prompt = "A bear is playing a guitar on Times Square" g.manual_seed(0) result1 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images g.manual_seed(0) result2 = pipe(prompt=prompt, num_inference_steps=50, generator=g, output_type="latent").images print("L_inf dist = ", abs(result1 - result2).max()) "L_inf dist = tensor(0., device='cuda:0')" ```
diffusers/docs/source/ko/using-diffusers/reproducibility.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/reproducibility.md", "repo_id": "diffusers", "token_count": 6163 }
96
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # 快速上手 训练扩散模型,是为了对随机高斯噪声进行逐步去噪,以生成令人感兴趣的样本,比如图像或者语音。 扩散模型的发展引起了人们对生成式人工智能的极大兴趣,你可能已经在网上见过扩散生成的图像了。🧨 Diffusers库的目的是让大家更易上手扩散模型。 无论你是开发人员还是普通用户,本文将向你介绍🧨 Diffusers 并帮助你快速开始生成内容! 🧨 Diffusers 库的三个主要组件: 无论你是开发者还是普通用户,这个快速指南将向你介绍🧨 Diffusers,并帮助你快速使用和生成!该库三个主要部分如下: * [`DiffusionPipeline`]是一个高级的端到端类,旨在通过预训练的扩散模型快速生成样本进行推理。 * 作为创建扩散系统做组件的流行的预训练[模型](./api/models)框架和模块。 * 许多不同的[调度器](./api/schedulers/overview):控制如何在训练过程中添加噪声的算法,以及如何在推理过程中生成去噪图像的算法。 快速入门将告诉你如何使用[`DiffusionPipeline`]进行推理,然后指导你如何结合模型和调度器以复现[`DiffusionPipeline`]内部发生的事情。 <Tip> 快速入门是🧨[Diffusers入门](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb)的简化版,可以帮助你快速上手。如果你想了解更多关于🧨 Diffusers的目标、设计理念以及关于它的核心API的更多细节,可以点击🧨[Diffusers入门](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb)查看。 </Tip> 在开始之前,确认一下你已经安装好了所需要的库: ```bash pip install --upgrade diffusers accelerate transformers ``` - [🤗 Accelerate](https://huggingface.co/docs/accelerate/index) 在推理和训练过程中加速模型加载。 - [🤗 Transformers](https://huggingface.co/docs/transformers/index) 是运行最流行的扩散模型所必须的库,比如[Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview). ## 扩散模型管道 [`DiffusionPipeline`]是用预训练的扩散系统进行推理的最简单方法。它是一个包含模型和调度器的端到端系统。你可以直接使用[`DiffusionPipeline`]完成许多任务。请查看下面的表格以了解一些支持的任务,要获取完整的支持任务列表,请查看[🧨 Diffusers 总结](./api/pipelines/overview#diffusers-summary) 。 | **任务** | **描述** | **管道** |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------| | Unconditional Image Generation | 从高斯噪声中生成图片 | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) | | Text-Guided Image Generation | 给定文本提示生成图像 | [conditional_image_generation](./using-diffusers/conditional_image_generation) | | Text-Guided Image-to-Image Translation | 在文本提示的指导下调整图像 | [img2img](./using-diffusers/img2img) | | Text-Guided Image-Inpainting | 给出图像、遮罩和文本提示,填充图像的遮罩部分 | [inpaint](./using-diffusers/inpaint) | | Text-Guided Depth-to-Image Translation | 在文本提示的指导下调整图像的部分内容,同时通过深度估计保留其结构 | [depth2img](./using-diffusers/depth2img) | 首先创建一个[`DiffusionPipeline`]的实例,并指定要下载的pipeline检查点。 你可以使用存储在Hugging Face Hub上的任何[`DiffusionPipeline`][检查点](https://huggingface.co/models?library=diffusers&sort=downloads)。 在教程中,你将加载[`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)检查点,用于文本到图像的生成。 首先创建一个[DiffusionPipeline]实例,并指定要下载的管道检查点。 您可以在Hugging Face Hub上使用[DiffusionPipeline]的任何检查点。 在本快速入门中,您将加载stable-diffusion-v1-5检查点,用于文本到图像生成。 <Tip warning={true}>。 对于[Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion)模型,在运行该模型之前,请先仔细阅读[许可证](https://huggingface.co/spaces/CompVis/stable-diffusion-license)。🧨 Diffusers实现了一个[`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py),以防止有攻击性的或有害的内容,但Stable Diffusion模型改进图像的生成能力仍有可能产生潜在的有害内容。 </Tip> 用[`~DiffusionPipeline.from_pretrained`]方法加载模型。 ```python >>> from diffusers import DiffusionPipeline >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") ``` [`DiffusionPipeline`]会下载并缓存所有的建模、标记化和调度组件。你可以看到Stable Diffusion的pipeline是由[`UNet2DConditionModel`]和[`PNDMScheduler`]等组件组成的: ```py >>> pipeline StableDiffusionPipeline { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.13.1", ..., "scheduler": [ "diffusers", "PNDMScheduler" ], ..., "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` 我们强烈建议你在GPU上运行这个pipeline,因为该模型由大约14亿个参数组成。 你可以像在Pytorch里那样把生成器对象移到GPU上: ```python >>> pipeline.to("cuda") ``` 现在你可以向`pipeline`传递一个文本提示来生成图像,然后获得去噪的图像。默认情况下,图像输出被放在一个[`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class)对象中。 ```python >>> image = pipeline("An image of a squirrel in Picasso style").images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/> </div> 调用`save`保存图像: ```python >>> image.save("image_of_squirrel_painting.png") ``` ### 本地管道 你也可以在本地使用管道。唯一的区别是你需提前下载权重: ``` git lfs install git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 ``` 将下载好的权重加载到管道中: ```python >>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5") ``` 现在你可以像上一节中那样运行管道了。 ### 更换调度器 不同的调度器对去噪速度和质量的权衡是不同的。要想知道哪种调度器最适合你,最好的办法就是试用一下。🧨 Diffusers的主要特点之一是允许你轻松切换不同的调度器。例如,要用[`EulerDiscreteScheduler`]替换默认的[`PNDMScheduler`],用[`~diffusers.ConfigMixin.from_config`]方法加载即可: ```py >>> from diffusers import EulerDiscreteScheduler >>> pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) ``` 试着用新的调度器生成一个图像,看看你能否发现不同之处。 在下一节中,你将仔细观察组成[`DiffusionPipeline`]的组件——模型和调度器,并学习如何使用这些组件来生成猫咪的图像。 ## 模型 大多数模型取一个噪声样本,在每个时间点预测*噪声残差*(其他模型则直接学习预测前一个样本或速度或[`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)),即噪声较小的图像与输入图像的差异。你可以混搭模型创建其他扩散系统。 模型是用[`~ModelMixin.from_pretrained`]方法启动的,该方法还在本地缓存了模型权重,所以下次加载模型时更快。对于快速入门,你默认加载的是[`UNet2DModel`],这是一个基础的无条件图像生成模型,该模型有一个在猫咪图像上训练的检查点: ```py >>> from diffusers import UNet2DModel >>> repo_id = "google/ddpm-cat-256" >>> model = UNet2DModel.from_pretrained(repo_id) ``` 想知道模型的参数,调用 `model.config`: ```py >>> model.config ``` 模型配置是一个🧊冻结的🧊字典,意思是这些参数在模型创建后就不变了。这是特意设置的,确保在开始时用于定义模型架构的参数保持不变,其他参数仍然可以在推理过程中进行调整。 一些最重要的参数: * `sample_size`:输入样本的高度和宽度尺寸。 * `in_channels`:输入样本的输入通道数。 * `down_block_types`和`up_block_types`:用于创建U-Net架构的下采样和上采样块的类型。 * `block_out_channels`:下采样块的输出通道数;也以相反的顺序用于上采样块的输入通道数。 * `layers_per_block`:每个U-Net块中存在的ResNet块的数量。 为了使用该模型进行推理,用随机高斯噪声生成图像形状。它应该有一个`batch`轴,因为模型可以接收多个随机噪声,一个`channel`轴,对应于输入通道的数量,以及一个`sample_size`轴,对应图像的高度和宽度。 ```py >>> import torch >>> torch.manual_seed(0) >>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) >>> noisy_sample.shape torch.Size([1, 3, 256, 256]) ``` 对于推理,将噪声图像和一个`timestep`传递给模型。`timestep` 表示输入图像的噪声程度,开始时噪声更多,结束时噪声更少。这有助于模型确定其在扩散过程中的位置,是更接近开始还是结束。使用 `sample` 获得模型输出: ```py >>> with torch.no_grad(): ... noisy_residual = model(sample=noisy_sample, timestep=2).sample ``` 想生成实际的样本,你需要一个调度器指导去噪过程。在下一节中,你将学习如何把模型与调度器结合起来。 ## 调度器 调度器管理一个噪声样本到一个噪声较小的样本的处理过程,给出模型输出 —— 在这种情况下,它是`noisy_residual`。 <Tip> 🧨 Diffusers是一个用于构建扩散系统的工具箱。预定义好的扩散系统[`DiffusionPipeline`]能方便你快速试用,你也可以单独选择自己的模型和调度器组件来建立一个自定义的扩散系统。 </Tip> 在快速入门教程中,你将用它的[`~diffusers.ConfigMixin.from_config`]方法实例化[`DDPMScheduler`]: ```py >>> from diffusers import DDPMScheduler >>> scheduler = DDPMScheduler.from_config(repo_id) >>> scheduler DDPMScheduler { "_class_name": "DDPMScheduler", "_diffusers_version": "0.13.1", "beta_end": 0.02, "beta_schedule": "linear", "beta_start": 0.0001, "clip_sample": true, "clip_sample_range": 1.0, "num_train_timesteps": 1000, "prediction_type": "epsilon", "trained_betas": null, "variance_type": "fixed_small" } ``` <Tip> 💡 注意调度器是如何从配置中实例化的。与模型不同,调度器没有可训练的权重,而且是无参数的。 </Tip> * `num_train_timesteps`:去噪过程的长度,或者换句话说,将随机高斯噪声处理成数据样本所需的时间步数。 * `beta_schedule`:用于推理和训练的噪声表。 * `beta_start`和`beta_end`:噪声表的开始和结束噪声值。 要预测一个噪音稍小的图像,请将 模型输出、`timestep`和当前`sample` 传递给调度器的[`~diffusers.DDPMScheduler.step`]方法: ```py >>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample >>> less_noisy_sample.shape ``` 这个 `less_noisy_sample` 去噪样本 可以被传递到下一个`timestep` ,处理后会将变得噪声更小。现在让我们把所有步骤合起来,可视化整个去噪过程。 首先,创建一个函数,对去噪后的图像进行后处理并显示为`PIL.Image`: ```py >>> import PIL.Image >>> import numpy as np >>> def display_sample(sample, i): ... image_processed = sample.cpu().permute(0, 2, 3, 1) ... image_processed = (image_processed + 1.0) * 127.5 ... image_processed = image_processed.numpy().astype(np.uint8) ... image_pil = PIL.Image.fromarray(image_processed[0]) ... display(f"Image at step {i}") ... display(image_pil) ``` 将输入和模型移到GPU上加速去噪过程: ```py >>> model.to("cuda") >>> noisy_sample = noisy_sample.to("cuda") ``` 现在创建一个去噪循环,该循环预测噪声较少样本的残差,并使用调度程序计算噪声较少的样本: ```py >>> import tqdm >>> sample = noisy_sample >>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): ... # 1. predict noise residual ... with torch.no_grad(): ... residual = model(sample, t).sample ... # 2. compute less noisy image and set x_t -> x_t-1 ... sample = scheduler.step(residual, t, sample).prev_sample ... # 3. optionally look at image ... if (i + 1) % 50 == 0: ... display_sample(sample, i + 1) ``` 看!这样就从噪声中生成出一只猫了!😻 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/> </div> ## 下一步 希望你在这次快速入门教程中用🧨Diffuser 生成了一些很酷的图像! 下一步你可以: * 在[训练](./tutorials/basic_training)教程中训练或微调一个模型来生成你自己的图像。 * 查看官方和社区的[训练或微调脚本](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples)的例子,了解更多使用情况。 * 在[使用不同的调度器](./using-diffusers/schedulers)指南中了解更多关于加载、访问、更改和比较调度器的信息。 * 在[Stable Diffusion](./stable_diffusion)教程中探索提示工程、速度和内存优化,以及生成更高质量图像的技巧。 * 通过[在GPU上优化PyTorch](./optimization/fp16)指南,以及运行[Apple (M1/M2)上的Stable Diffusion](./optimization/mps)和[ONNX Runtime](./optimization/onnx)的教程,更深入地了解如何加速🧨Diffuser。
diffusers/docs/source/zh/quicktour.md/0
{ "file_path": "diffusers/docs/source/zh/quicktour.md", "repo_id": "diffusers", "token_count": 8423 }
97
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple, Union import PIL.Image import torch from torchvision import transforms from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils.torch_utils import randn_tensor trans = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def preprocess(image): if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] image = [trans(img.convert("RGB")) for img in image] image = torch.stack(image) return image class DDIMNoiseComparativeAnalysisPipeline(DiffusionPipeline): r""" This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Parameters: unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ def __init__(self, unet, scheduler): super().__init__() # make sure scheduler can always be converted to DDIM scheduler = DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=unet, scheduler=scheduler) def check_inputs(self, strength): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) init_latents = image.to(device=device, dtype=dtype) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents print("add noise to latents at timestep", timestep) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents @torch.no_grad() def __call__( self, image: Union[torch.FloatTensor, PIL.Image.Image] = None, strength: float = 0.8, batch_size: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, eta: float = 0.0, num_inference_steps: int = 50, use_clipped_model_output: Optional[bool] = None, output_type: Optional[str] = "pil", return_dict: bool = True, ) -> Union[ImagePipelineOutput, Tuple]: r""" Args: image (`torch.FloatTensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. eta (`float`, *optional*, defaults to 0.0): The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM). num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. use_clipped_model_output (`bool`, *optional*, defaults to `None`): if `True` or `False`, see documentation for `DDIMScheduler.step`. If `None`, nothing is passed downstream to the scheduler. So use `None` for schedulers which don't support this argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ # 1. Check inputs. Raise error if not correct self.check_inputs(strength) # 2. Preprocess image image = preprocess(image) # 3. set timesteps self.scheduler.set_timesteps(num_inference_steps, device=self.device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device) latent_timestep = timesteps[:1].repeat(batch_size) # 4. Prepare latent variables latents = self.prepare_latents(image, latent_timestep, batch_size, self.unet.dtype, self.device, generator) image = latents # 5. Denoising loop for t in self.progress_bar(timesteps): # 1. predict noise model_output model_output = self.unet(image, t).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 image = self.scheduler.step( model_output, t, image, eta=eta, use_clipped_model_output=use_clipped_model_output, generator=generator, ).prev_sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=image)
diffusers/examples/community/ddim_noise_comparative_analysis.py/0
{ "file_path": "diffusers/examples/community/ddim_noise_comparative_analysis.py", "repo_id": "diffusers", "token_count": 3417 }
98
## ---------------------------------------------------------- # A SDXL pipeline can take unlimited weighted prompt # # Author: Andrew Zhu # Github: https://github.com/xhinker # Medium: https://medium.com/@xhinker ## ----------------------------------------------------------- import inspect import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from diffusers import DiffusionPipeline, StableDiffusionXLPipeline from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel from diffusers.models.attention_processor import ( AttnProcessor2_0, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor, XFormersAttnProcessor, ) from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( deprecate, is_accelerate_available, is_accelerate_version, is_invisible_watermark_available, logging, replace_example_docstring, ) from diffusers.utils.torch_utils import randn_tensor if is_invisible_watermark_available(): from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker def parse_prompt_attention(text): """ Parses a string with attention tokens and returns a list of pairs: text and its associated weight. Accepted tokens are: (abc) - increases attention to abc by a multiplier of 1.1 (abc:3.12) - increases attention to abc by a multiplier of 3.12 [abc] - decreases attention to abc by a multiplier of 1.1 \\( - literal character '(' \\[ - literal character '[' \\) - literal character ')' \\] - literal character ']' \\ - literal character '\' anything else - just text >>> parse_prompt_attention('normal text') [['normal text', 1.0]] >>> parse_prompt_attention('an (important) word') [['an ', 1.0], ['important', 1.1], [' word', 1.0]] >>> parse_prompt_attention('(unbalanced') [['unbalanced', 1.1]] >>> parse_prompt_attention('\\(literal\\]') [['(literal]', 1.0]] >>> parse_prompt_attention('(unnecessary)(parens)') [['unnecessaryparens', 1.1]] >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') [['a ', 1.0], ['house', 1.5730000000000004], [' ', 1.1], ['on', 1.0], [' a ', 1.1], ['hill', 0.55], [', sun, ', 1.1], ['sky', 1.4641000000000006], ['.', 1.1]] """ import re re_attention = re.compile( r""" \\\(|\\\)|\\\[|\\]|\\\\|\\|\(|\[|:([+-]?[.\d]+)\)| \)|]|[^\\()\[\]:]+|: """, re.X, ) re_break = re.compile(r"\s*\bBREAK\b\s*", re.S) res = [] round_brackets = [] square_brackets = [] round_bracket_multiplier = 1.1 square_bracket_multiplier = 1 / 1.1 def multiply_range(start_position, multiplier): for p in range(start_position, len(res)): res[p][1] *= multiplier for m in re_attention.finditer(text): text = m.group(0) weight = m.group(1) if text.startswith("\\"): res.append([text[1:], 1.0]) elif text == "(": round_brackets.append(len(res)) elif text == "[": square_brackets.append(len(res)) elif weight is not None and len(round_brackets) > 0: multiply_range(round_brackets.pop(), float(weight)) elif text == ")" and len(round_brackets) > 0: multiply_range(round_brackets.pop(), round_bracket_multiplier) elif text == "]" and len(square_brackets) > 0: multiply_range(square_brackets.pop(), square_bracket_multiplier) else: parts = re.split(re_break, text) for i, part in enumerate(parts): if i > 0: res.append(["BREAK", -1]) res.append([part, 1.0]) for pos in round_brackets: multiply_range(pos, round_bracket_multiplier) for pos in square_brackets: multiply_range(pos, square_bracket_multiplier) if len(res) == 0: res = [["", 1.0]] # merge runs of identical weights i = 0 while i + 1 < len(res): if res[i][1] == res[i + 1][1]: res[i][0] += res[i + 1][0] res.pop(i + 1) else: i += 1 return res def get_prompts_tokens_with_weights(clip_tokenizer: CLIPTokenizer, prompt: str): """ Get prompt token ids and weights, this function works for both prompt and negative prompt Args: pipe (CLIPTokenizer) A CLIPTokenizer prompt (str) A prompt string with weights Returns: text_tokens (list) A list contains token ids text_weight (list) A list contains the correspodent weight of token ids Example: import torch from transformers import CLIPTokenizer clip_tokenizer = CLIPTokenizer.from_pretrained( "stablediffusionapi/deliberate-v2" , subfolder = "tokenizer" , dtype = torch.float16 ) token_id_list, token_weight_list = get_prompts_tokens_with_weights( clip_tokenizer = clip_tokenizer ,prompt = "a (red:1.5) cat"*70 ) """ texts_and_weights = parse_prompt_attention(prompt) text_tokens, text_weights = [], [] for word, weight in texts_and_weights: # tokenize and discard the starting and the ending token token = clip_tokenizer(word, truncation=False).input_ids[1:-1] # so that tokenize whatever length prompt # the returned token is a 1d list: [320, 1125, 539, 320] # merge the new tokens to the all tokens holder: text_tokens text_tokens = [*text_tokens, *token] # each token chunk will come with one weight, like ['red cat', 2.0] # need to expand weight for each token. chunk_weights = [weight] * len(token) # append the weight back to the weight holder: text_weights text_weights = [*text_weights, *chunk_weights] return text_tokens, text_weights def group_tokens_and_weights(token_ids: list, weights: list, pad_last_block=False): """ Produce tokens and weights in groups and pad the missing tokens Args: token_ids (list) The token ids from tokenizer weights (list) The weights list from function get_prompts_tokens_with_weights pad_last_block (bool) Control if fill the last token list to 75 tokens with eos Returns: new_token_ids (2d list) new_weights (2d list) Example: token_groups,weight_groups = group_tokens_and_weights( token_ids = token_id_list , weights = token_weight_list ) """ bos, eos = 49406, 49407 # this will be a 2d list new_token_ids = [] new_weights = [] while len(token_ids) >= 75: # get the first 75 tokens head_75_tokens = [token_ids.pop(0) for _ in range(75)] head_75_weights = [weights.pop(0) for _ in range(75)] # extract token ids and weights temp_77_token_ids = [bos] + head_75_tokens + [eos] temp_77_weights = [1.0] + head_75_weights + [1.0] # add 77 token and weights chunk to the holder list new_token_ids.append(temp_77_token_ids) new_weights.append(temp_77_weights) # padding the left if len(token_ids) > 0: padding_len = 75 - len(token_ids) if pad_last_block else 0 temp_77_token_ids = [bos] + token_ids + [eos] * padding_len + [eos] new_token_ids.append(temp_77_token_ids) temp_77_weights = [1.0] + weights + [1.0] * padding_len + [1.0] new_weights.append(temp_77_weights) return new_token_ids, new_weights def get_weighted_text_embeddings_sdxl( pipe: StableDiffusionXLPipeline, prompt: str = "", prompt_2: str = None, neg_prompt: str = "", neg_prompt_2: str = None, num_images_per_prompt: int = 1, device: Optional[torch.device] = None, clip_skip: Optional[int] = None, ): """ This function can process long prompt with weights, no length limitation for Stable Diffusion XL Args: pipe (StableDiffusionPipeline) prompt (str) prompt_2 (str) neg_prompt (str) neg_prompt_2 (str) num_images_per_prompt (int) device (torch.device) clip_skip (int) Returns: prompt_embeds (torch.Tensor) neg_prompt_embeds (torch.Tensor) """ device = device or pipe._execution_device if prompt_2: prompt = f"{prompt} {prompt_2}" if neg_prompt_2: neg_prompt = f"{neg_prompt} {neg_prompt_2}" prompt_t1 = prompt_t2 = prompt neg_prompt_t1 = neg_prompt_t2 = neg_prompt if isinstance(pipe, TextualInversionLoaderMixin): prompt_t1 = pipe.maybe_convert_prompt(prompt_t1, pipe.tokenizer) neg_prompt_t1 = pipe.maybe_convert_prompt(neg_prompt_t1, pipe.tokenizer) prompt_t2 = pipe.maybe_convert_prompt(prompt_t2, pipe.tokenizer_2) neg_prompt_t2 = pipe.maybe_convert_prompt(neg_prompt_t2, pipe.tokenizer_2) eos = pipe.tokenizer.eos_token_id # tokenizer 1 prompt_tokens, prompt_weights = get_prompts_tokens_with_weights(pipe.tokenizer, prompt_t1) neg_prompt_tokens, neg_prompt_weights = get_prompts_tokens_with_weights(pipe.tokenizer, neg_prompt_t1) # tokenizer 2 prompt_tokens_2, prompt_weights_2 = get_prompts_tokens_with_weights(pipe.tokenizer_2, prompt_t2) neg_prompt_tokens_2, neg_prompt_weights_2 = get_prompts_tokens_with_weights(pipe.tokenizer_2, neg_prompt_t2) # padding the shorter one for prompt set 1 prompt_token_len = len(prompt_tokens) neg_prompt_token_len = len(neg_prompt_tokens) if prompt_token_len > neg_prompt_token_len: # padding the neg_prompt with eos token neg_prompt_tokens = neg_prompt_tokens + [eos] * abs(prompt_token_len - neg_prompt_token_len) neg_prompt_weights = neg_prompt_weights + [1.0] * abs(prompt_token_len - neg_prompt_token_len) else: # padding the prompt prompt_tokens = prompt_tokens + [eos] * abs(prompt_token_len - neg_prompt_token_len) prompt_weights = prompt_weights + [1.0] * abs(prompt_token_len - neg_prompt_token_len) # padding the shorter one for token set 2 prompt_token_len_2 = len(prompt_tokens_2) neg_prompt_token_len_2 = len(neg_prompt_tokens_2) if prompt_token_len_2 > neg_prompt_token_len_2: # padding the neg_prompt with eos token neg_prompt_tokens_2 = neg_prompt_tokens_2 + [eos] * abs(prompt_token_len_2 - neg_prompt_token_len_2) neg_prompt_weights_2 = neg_prompt_weights_2 + [1.0] * abs(prompt_token_len_2 - neg_prompt_token_len_2) else: # padding the prompt prompt_tokens_2 = prompt_tokens_2 + [eos] * abs(prompt_token_len_2 - neg_prompt_token_len_2) prompt_weights_2 = prompt_weights + [1.0] * abs(prompt_token_len_2 - neg_prompt_token_len_2) embeds = [] neg_embeds = [] prompt_token_groups, prompt_weight_groups = group_tokens_and_weights(prompt_tokens.copy(), prompt_weights.copy()) neg_prompt_token_groups, neg_prompt_weight_groups = group_tokens_and_weights( neg_prompt_tokens.copy(), neg_prompt_weights.copy() ) prompt_token_groups_2, prompt_weight_groups_2 = group_tokens_and_weights( prompt_tokens_2.copy(), prompt_weights_2.copy() ) neg_prompt_token_groups_2, neg_prompt_weight_groups_2 = group_tokens_and_weights( neg_prompt_tokens_2.copy(), neg_prompt_weights_2.copy() ) # get prompt embeddings one by one is not working. for i in range(len(prompt_token_groups)): # get positive prompt embeddings with weights token_tensor = torch.tensor([prompt_token_groups[i]], dtype=torch.long, device=device) weight_tensor = torch.tensor(prompt_weight_groups[i], dtype=torch.float16, device=device) token_tensor_2 = torch.tensor([prompt_token_groups_2[i]], dtype=torch.long, device=device) # use first text encoder prompt_embeds_1 = pipe.text_encoder(token_tensor.to(device), output_hidden_states=True) # use second text encoder prompt_embeds_2 = pipe.text_encoder_2(token_tensor_2.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds_2[0] if clip_skip is None: prompt_embeds_1_hidden_states = prompt_embeds_1.hidden_states[-2] prompt_embeds_2_hidden_states = prompt_embeds_2.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds_1_hidden_states = prompt_embeds_1.hidden_states[-(clip_skip + 2)] prompt_embeds_2_hidden_states = prompt_embeds_2.hidden_states[-(clip_skip + 2)] prompt_embeds_list = [prompt_embeds_1_hidden_states, prompt_embeds_2_hidden_states] token_embedding = torch.concat(prompt_embeds_list, dim=-1).squeeze(0) for j in range(len(weight_tensor)): if weight_tensor[j] != 1.0: token_embedding[j] = ( token_embedding[-1] + (token_embedding[j] - token_embedding[-1]) * weight_tensor[j] ) token_embedding = token_embedding.unsqueeze(0) embeds.append(token_embedding) # get negative prompt embeddings with weights neg_token_tensor = torch.tensor([neg_prompt_token_groups[i]], dtype=torch.long, device=device) neg_token_tensor_2 = torch.tensor([neg_prompt_token_groups_2[i]], dtype=torch.long, device=device) neg_weight_tensor = torch.tensor(neg_prompt_weight_groups[i], dtype=torch.float16, device=device) # use first text encoder neg_prompt_embeds_1 = pipe.text_encoder(neg_token_tensor.to(device), output_hidden_states=True) neg_prompt_embeds_1_hidden_states = neg_prompt_embeds_1.hidden_states[-2] # use second text encoder neg_prompt_embeds_2 = pipe.text_encoder_2(neg_token_tensor_2.to(device), output_hidden_states=True) neg_prompt_embeds_2_hidden_states = neg_prompt_embeds_2.hidden_states[-2] negative_pooled_prompt_embeds = neg_prompt_embeds_2[0] neg_prompt_embeds_list = [neg_prompt_embeds_1_hidden_states, neg_prompt_embeds_2_hidden_states] neg_token_embedding = torch.concat(neg_prompt_embeds_list, dim=-1).squeeze(0) for z in range(len(neg_weight_tensor)): if neg_weight_tensor[z] != 1.0: neg_token_embedding[z] = ( neg_token_embedding[-1] + (neg_token_embedding[z] - neg_token_embedding[-1]) * neg_weight_tensor[z] ) neg_token_embedding = neg_token_embedding.unsqueeze(0) neg_embeds.append(neg_token_embedding) prompt_embeds = torch.cat(embeds, dim=1) negative_prompt_embeds = torch.cat(neg_embeds, dim=1) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1).view( bs_embed * num_images_per_prompt, -1 ) negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1).view( bs_embed * num_images_per_prompt, -1 ) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # ------------------------------------------------------------------------------------------------------------------------------- # reuse the backbone code from StableDiffusionXLPipeline # ------------------------------------------------------------------------------------------------------------------------------- logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0" , torch_dtype = torch.float16 , use_safetensors = True , variant = "fp16" , custom_pipeline = "lpw_stable_diffusion_xl", ) prompt = "a white cat running on the grass"*20 prompt2 = "play a football"*20 prompt = f"{prompt},{prompt2}" neg_prompt = "blur, low quality" pipe.to("cuda") images = pipe( prompt = prompt , negative_prompt = neg_prompt ).images[0] pipe.to("cpu") torch.cuda.empty_cache() images ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class SDXLLongPromptWeightingPipeline( DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion XL. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion XL uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", "image_encoder", "feature_extractor", ] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "add_text_embeds", "add_time_ids", "negative_pooled_prompt_embeds", "negative_add_time_ids", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, feature_extractor: Optional[CLIPImageProcessor] = None, image_encoder: Optional[CLIPVisionModelWithProjection] = None, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True ) self.default_sample_size = self.unet.config.sample_size add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None def enable_model_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. """ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") device = torch.device(f"cuda:{gpu_id}") if self.device.type != "cpu": self.to("cpu", silence_dtype_warnings=True) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) model_sequence = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) model_sequence.extend([self.unet, self.vae]) hook = None for cpu_offloaded_model in model_sequence: _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) # We'll offload the last model manually. self.final_offload_hook = hook # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder( text_input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt, negative_prompt_2] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, prompt_2, height, width, strength, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): # get the original timestep using init_timestep if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) else: t_start = 0 timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] # Strength is irrelevant if we directly request a timestep to start at; # that is, strength is determined by the denoising_start instead. if denoising_start is not None: discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_start * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: # if the scheduler is a 2nd order scheduler we might have to do +1 # because `num_inference_steps` might be even given that every timestep # (except the highest one) is duplicated. If `num_inference_steps` is even it would # mean that we cut the timesteps in the middle of the denoising step # (between 1st and 2nd devirative) which leads to incorrect results. By adding 1 # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler num_inference_steps = num_inference_steps + 1 # because t_n+1 >= t_n, we slice the timesteps starting from the end timesteps = timesteps[-num_inference_steps:] return timesteps, num_inference_steps return timesteps, num_inference_steps - t_start def prepare_latents( self, image, mask, width, height, num_channels_latents, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True, latents=None, is_strength_max=True, return_noise=False, return_image_latents=False, ): batch_size *= num_images_per_prompt if image is None: shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents elif mask is None: if not isinstance(image, (torch.Tensor, Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) # Offload text encoder if `enable_model_cpu_offload` was enabled if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.text_encoder_2.to("cpu") torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) if image.shape[1] == 4: init_latents = image else: # make sure the VAE is in float32 mode, as it overflows in float16 if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents else: shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if (image is None or timestep is None) and not is_strength_max: raise ValueError( "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." "However, either the image or the noise timestep has not been provided." ) if image.shape[1] == 4: image_latents = image.to(device=device, dtype=dtype) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) elif return_image_latents or (latents is None and not is_strength_max): image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None and add_noise: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # if strength is 1. then initialise the latents to noise, else initial to image + noise latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) # if pure noise then scale the initial latents by the Scheduler's init sigma latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents elif add_noise: noise = latents.to(device) latents = noise * self.scheduler.init_noise_sigma else: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = image_latents.to(device) outputs = (latents,) if return_noise: outputs += (noise,) if return_image_latents: outputs += (image_latents,) return outputs def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): dtype = image.dtype if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) image_latents = image_latents.to(dtype) image_latents = self.vae.config.scaling_factor * image_latents return image_latents def prepare_mask_latents( self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance ): # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask = torch.nn.functional.interpolate( mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) ) mask = mask.to(device=device, dtype=dtype) # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask if masked_image is not None and masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = None if masked_image is not None: if masked_image_latents is None: masked_image = masked_image.to(device=device, dtype=dtype) masked_image_latents = self._encode_vae_image(masked_image, generator=generator) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat( batch_size // masked_image_latents.shape[0], 1, 1, 1 ) masked_image_latents = ( torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents ) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return mask, masked_image_latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, LoRAXFormersAttnProcessor, LoRAAttnProcessor2_0, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def denoising_start(self): return self._denoising_start @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str = None, prompt_2: Optional[str] = None, image: Optional[PipelineImageInput] = None, mask_image: Optional[PipelineImageInput] = None, masked_image_latents: Optional[torch.FloatTensor] = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.8, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str`): The prompt to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str`): The prompt to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders image (`PipelineImageInput`, *optional*): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. mask_image (`PipelineImageInput`, *optional*): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. denoising_start (`float`, *optional*): When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refine Image Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refine Image Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str`): The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str`): The prompt not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeine class. Examples: Returns: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) # 0. Default height and width to unet height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, strength, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._denoising_start = denoising_start # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if ip_adapter_image is not None: output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True image_embeds, negative_image_embeds = self.encode_image( ip_adapter_image, device, num_images_per_prompt, output_hidden_state ) if self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) # 3. Encode input prompt (self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None) negative_prompt = negative_prompt if negative_prompt is not None else "" ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = get_weighted_text_embeddings_sdxl( pipe=self, prompt=prompt, neg_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, ) dtype = prompt_embeds.dtype if isinstance(image, Image.Image): image = self.image_processor.preprocess(image, height=height, width=width) if image is not None: image = image.to(device=self.device, dtype=dtype) if isinstance(mask_image, Image.Image): mask = self.mask_processor.preprocess(mask_image, height=height, width=width) else: mask = mask_image if mask_image is not None: mask = mask.to(device=self.device, dtype=dtype) if masked_image_latents is not None: masked_image = masked_image_latents elif image.shape[1] == 4: # if image is in latent space, we can't mask it masked_image = None else: masked_image = image * (mask < 0.5) else: mask = None # 4. Prepare timesteps def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) if image is not None: timesteps, num_inference_steps = self.get_timesteps( num_inference_steps, strength, device, denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, ) # check that number of inference steps is not < 1 - as this doesn't make sense if num_inference_steps < 1: raise ValueError( f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." ) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) is_strength_max = strength == 1.0 add_noise = True if self.denoising_start is None else False # 5. Prepare latent variables num_channels_latents = self.vae.config.latent_channels num_channels_unet = self.unet.config.in_channels return_image_latents = num_channels_unet == 4 latents = self.prepare_latents( image=image, mask=mask, width=width, height=height, num_channels_latents=num_channels_unet, timestep=latent_timestep, batch_size=batch_size, num_images_per_prompt=num_images_per_prompt, dtype=prompt_embeds.dtype, device=device, generator=generator, add_noise=add_noise, latents=latents, is_strength_max=is_strength_max, return_noise=True, return_image_latents=return_image_latents, ) if mask is not None: if return_image_latents: latents, noise, image_latents = latents else: latents, noise = latents # 5.1 Prepare mask latent variables if mask is not None: mask, masked_image_latents = self.prepare_mask_latents( mask=mask, masked_image=masked_image, batch_size=batch_size * num_images_per_prompt, height=height, width=width, dtype=prompt_embeds.dtype, device=device, generator=generator, do_classifier_free_guidance=self.do_classifier_free_guidance, ) # Check that sizes of mask, masked image and latents match if num_channels_unet == 9: # default case for runwayml/stable-diffusion-inpainting num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != num_channels_unet: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" " `pipeline.unet` or your `mask_image` or `image` input." ) elif num_channels_unet != 4: raise ValueError( f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 6.1 Add image embeds for IP-Adapter added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else {} height, width = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 7. Prepare added time ids & embeddings add_text_embeds = pooled_prompt_embeds add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) # 7.1 Apply denoising_end if ( self.denoising_end is not None and self.denoising_start is not None and denoising_value_valid(self.denoising_end) and denoising_value_valid(self.denoising_start) and self.denoising_start >= self.denoising_end ): raise ValueError( f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + f" {self.denoising_end} when using type float." ) elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (self.denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] # 8. Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) # 9. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if mask is not None and num_channels_unet == 9: latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) # predict the noise residual added_cond_kwargs.update({"text_embeds": add_text_embeds, "time_ids": add_time_ids}) noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if mask is not None and num_channels_unet == 4: init_latents_proper = image_latents if self.do_classifier_free_guidance: init_mask, _ = mask.chunk(2) else: init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.add_noise( init_latents_proper, noise, torch.tensor([noise_timestep]) ) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents return StableDiffusionXLPipelineOutput(images=image) # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) def text2img( self, prompt: str = None, prompt_2: Optional[str] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling pipeline for text-to-image. Refer to the documentation of the `__call__` method for parameter descriptions. """ return self.__call__( prompt=prompt, prompt_2=prompt_2, height=height, width=width, num_inference_steps=num_inference_steps, timesteps=timesteps, denoising_start=denoising_start, denoising_end=denoising_end, guidance_scale=guidance_scale, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, ip_adapter_image=ip_adapter_image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, output_type=output_type, return_dict=return_dict, cross_attention_kwargs=cross_attention_kwargs, guidance_rescale=guidance_rescale, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, clip_skip=clip_skip, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs, ) def img2img( self, prompt: str = None, prompt_2: Optional[str] = None, image: Optional[PipelineImageInput] = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.8, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling pipeline for image-to-image. Refer to the documentation of the `__call__` method for parameter descriptions. """ return self.__call__( prompt=prompt, prompt_2=prompt_2, image=image, height=height, width=width, strength=strength, num_inference_steps=num_inference_steps, timesteps=timesteps, denoising_start=denoising_start, denoising_end=denoising_end, guidance_scale=guidance_scale, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, ip_adapter_image=ip_adapter_image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, output_type=output_type, return_dict=return_dict, cross_attention_kwargs=cross_attention_kwargs, guidance_rescale=guidance_rescale, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, clip_skip=clip_skip, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs, ) def inpaint( self, prompt: str = None, prompt_2: Optional[str] = None, image: Optional[PipelineImageInput] = None, mask_image: Optional[PipelineImageInput] = None, masked_image_latents: Optional[torch.FloatTensor] = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.8, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling pipeline for inpainting. Refer to the documentation of the `__call__` method for parameter descriptions. """ return self.__call__( prompt=prompt, prompt_2=prompt_2, image=image, mask_image=mask_image, masked_image_latents=masked_image_latents, height=height, width=width, strength=strength, num_inference_steps=num_inference_steps, timesteps=timesteps, denoising_start=denoising_start, denoising_end=denoising_end, guidance_scale=guidance_scale, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, ip_adapter_image=ip_adapter_image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, output_type=output_type, return_dict=return_dict, cross_attention_kwargs=cross_attention_kwargs, guidance_rescale=guidance_rescale, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, clip_skip=clip_skip, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs, ) # Overrride to properly handle the loading and unloading of the additional text encoder. def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs): # We could have accessed the unet config from `lora_state_dict()` too. We pass # it here explicitly to be able to tell that it's coming from an SDXL # pipeline. state_dict, network_alphas = self.lora_state_dict( pretrained_model_name_or_path_or_dict, unet_config=self.unet.config, **kwargs, ) self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet) text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k} if len(text_encoder_state_dict) > 0: self.load_lora_into_text_encoder( text_encoder_state_dict, network_alphas=network_alphas, text_encoder=self.text_encoder, prefix="text_encoder", lora_scale=self.lora_scale, ) text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k} if len(text_encoder_2_state_dict) > 0: self.load_lora_into_text_encoder( text_encoder_2_state_dict, network_alphas=network_alphas, text_encoder=self.text_encoder_2, prefix="text_encoder_2", lora_scale=self.lora_scale, ) @classmethod def save_lora_weights( self, save_directory: Union[str, os.PathLike], unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = False, ): state_dict = {} def pack_weights(layers, prefix): layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()} return layers_state_dict state_dict.update(pack_weights(unet_lora_layers, "unet")) if text_encoder_lora_layers and text_encoder_2_lora_layers: state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder")) state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2")) self.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, ) def _remove_text_encoder_monkey_patch(self): self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder) self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
diffusers/examples/community/lpw_stable_diffusion_xl.py/0
{ "file_path": "diffusers/examples/community/lpw_stable_diffusion_xl.py", "repo_id": "diffusers", "token_count": 47271 }
99
# Copyright 2024 TencentARC and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL, ControlNetModel, MultiAdapter, T2IAdapter, UNet2DConditionModel from diffusers.models.attention_processor import ( AttnProcessor2_0, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor, XFormersAttnProcessor, ) from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( PIL_INTERPOLATION, USE_PEFT_BACKEND, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import is_compiled_module, randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import T2IAdapter, StableDiffusionXLAdapterPipeline, DDPMScheduler >>> from diffusers.utils import load_image >>> from controlnet_aux.midas import MidasDetector >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" >>> image = load_image(img_url).resize((1024, 1024)) >>> mask_image = load_image(mask_url).resize((1024, 1024)) >>> midas_depth = MidasDetector.from_pretrained( ... "valhalla/t2iadapter-aux-models", filename="dpt_large_384.pt", model_type="dpt_large" ... ).to("cuda") >>> depth_image = midas_depth( ... image, detect_resolution=512, image_resolution=1024 ... ) >>> model_id = "stabilityai/stable-diffusion-xl-base-1.0" >>> adapter = T2IAdapter.from_pretrained( ... "Adapter/t2iadapter", ... subfolder="sketch_sdxl_1.0", ... torch_dtype=torch.float16, ... adapter_type="full_adapter_xl", ... ) >>> controlnet = ControlNetModel.from_pretrained( ... "diffusers/controlnet-depth-sdxl-1.0", ... torch_dtype=torch.float16, ... variant="fp16", ... use_safetensors=True ... ).to("cuda") >>> scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler") >>> pipe = StableDiffusionXLAdapterPipeline.from_pretrained( ... model_id, ... adapter=adapter, ... controlnet=controlnet, ... torch_dtype=torch.float16, ... variant="fp16", ... scheduler=scheduler ... ).to("cuda") >>> strength = 0.5 >>> generator = torch.manual_seed(42) >>> sketch_image_out = pipe( ... prompt="a photo of a tiger sitting on a park bench", ... negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality", ... adapter_image=depth_image, ... control_image=mask_image, ... adapter_conditioning_scale=strength, ... controlnet_conditioning_scale=strength, ... generator=generator, ... guidance_scale=7.5, ... ).images[0] ``` """ def _preprocess_adapter_image(image, height, width): if isinstance(image, torch.Tensor): return image elif isinstance(image, PIL.Image.Image): image = [image] if isinstance(image[0], PIL.Image.Image): image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image] image = [ i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image ] # expand [h, w] or [h, w, c] to [b, h, w, c] image = np.concatenate(image, axis=0) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): if image[0].ndim == 3: image = torch.stack(image, dim=0) elif image[0].ndim == 4: image = torch.cat(image, dim=0) else: raise ValueError( f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}" ) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class StableDiffusionXLControlNetAdapterPipeline( DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter https://arxiv.org/abs/2302.08453 This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`): Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a list, the outputs from each Adapter are added together to create one combined additional conditioning. adapter_weights (`List[float]`, *optional*, defaults to None): List of floats representing the weight which will be multiply to each adapter's output before adding them together. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPFeatureExtractor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" _optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]], controlnet: Union[ControlNetModel, MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, force_zeros_for_empty_prompt: bool = True, ): super().__init__() if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, adapter=adapter, controlnet=controlnet, scheduler=scheduler, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) self.default_sample_size = self.unet.config.sample_size # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if self.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) else: scale_lora_layers(self.text_encoder_2, lora_scale) prompt = [prompt] if isinstance(prompt, str) else prompt if prompt is not None: batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] if clip_skip is None: prompt_embeds = prompt_embeds.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt # normalize str to list negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt negative_prompt_2 = ( batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2 ) uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) if self.text_encoder_2 is not None: prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] if self.text_encoder_2 is not None: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) else: negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if self.text_encoder is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) if self.text_encoder_2 is not None: if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if ( not image_is_pil and not image_is_tensor and not image_is_np and not image_is_pil_list and not image_is_tensor_list and not image_is_np_list ): raise TypeError( f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" ) if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.check_inputs def check_inputs( self, prompt, prompt_2, height, width, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) def check_conditions( self, prompt, prompt_embeds, adapter_image, control_image, adapter_conditioning_scale, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ): # controlnet checks if not isinstance(control_guidance_start, (tuple, list)): control_guidance_start = [control_guidance_start] if not isinstance(control_guidance_end, (tuple, list)): control_guidance_end = [control_guidance_end] if len(control_guidance_start) != len(control_guidance_end): raise ValueError( f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." ) if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError( f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." ) for start, end in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError( f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." ) if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") # Check controlnet `image` is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( self.controlnet, torch._dynamo.eval_frame.OptimizedModule ) if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): self.check_image(control_image, prompt, prompt_embeds) elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if not isinstance(control_image, list): raise TypeError("For multiple controlnets: `control_image` must be type `list`") # When `image` is a nested list: # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) elif any(isinstance(i, list) for i in control_image): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif len(control_image) != len(self.controlnet.nets): raise ValueError( f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(control_image)} images and {len(self.controlnet.nets)} ControlNets." ) for image_ in control_image: self.check_image(image_, prompt, prompt_embeds) else: assert False # Check `controlnet_conditioning_scale` if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if isinstance(controlnet_conditioning_scale, list): if any(isinstance(i, list) for i in controlnet_conditioning_scale): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( self.controlnet.nets ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False # adapter checks if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter): self.check_image(adapter_image, prompt, prompt_embeds) elif ( isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter) ): if not isinstance(adapter_image, list): raise TypeError("For multiple adapters: `adapter_image` must be type `list`") # When `image` is a nested list: # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) elif any(isinstance(i, list) for i in adapter_image): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif len(adapter_image) != len(self.adapter.adapters): raise ValueError( f"For multiple adapters: `image` must have the same length as the number of adapters, but got {len(adapter_image)} images and {len(self.adapters.nets)} Adapters." ) for image_ in adapter_image: self.check_image(image_, prompt, prompt_embeds) else: assert False # Check `adapter_conditioning_scale` if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter): if not isinstance(adapter_conditioning_scale, float): raise TypeError("For single adapter: `adapter_conditioning_scale` must be type `float`.") elif ( isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter) ): if isinstance(adapter_conditioning_scale, list): if any(isinstance(i, list) for i in adapter_conditioning_scale): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif isinstance(adapter_conditioning_scale, list) and len(adapter_conditioning_scale) != len( self.adapter.adapters ): raise ValueError( "For multiple adapters: When `adapter_conditioning_scale` is specified as `list`, it must have" " the same length as the number of adapters" ) else: assert False # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids def _get_add_time_ids( self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None ): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, LoRAXFormersAttnProcessor, LoRAAttnProcessor2_0, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width def _default_height_width(self, height, width, image): # NOTE: It is possible that a list of images have different # dimensions for each image, so just checking the first image # is not _exactly_ correct, but it is simple. while isinstance(image, list): image = image[0] if height is None: if isinstance(image, PIL.Image.Image): height = image.height elif isinstance(image, torch.Tensor): height = image.shape[-2] # round down to nearest multiple of `self.adapter.downscale_factor` height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor if width is None: if isinstance(image, PIL.Image.Image): width = image.width elif isinstance(image, torch.Tensor): width = image.shape[-1] # round down to nearest multiple of `self.adapter.downscale_factor` width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor return height, width def prepare_control_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, adapter_image: PipelineImageInput = None, control_image: PipelineImageInput = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, adapter_conditioning_scale: Union[float, List[float]] = 1.0, adapter_conditioning_factor: float = 1.0, clip_skip: Optional[int] = None, controlnet_conditioning_scale=1.0, guess_mode: bool = False, control_guidance_start: float = 0.0, control_guidance_end: float = 1.0, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders adapter_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`): The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the type is specified as `Torch.FloatTensor`, it is passed to Adapter as is. PIL.Image.Image` can also be accepted as an image. The control image is automatically resized to fit the output image. control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The ControlNet input condition to provide guidance to the `unet` for generation. If the type is specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, images must be passed as a list such that each element of the list can be correctly batched for input to a single ControlNet. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise as determined by the discrete timesteps selected by the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original unet. If multiple adapters are specified in init, you can set the corresponding scale as a list. adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the residual in the original unet. If multiple adapters are specified in init, you can set the corresponding scale as a list. adapter_conditioning_factor (`float`, *optional*, defaults to 1.0): The fraction of timesteps for which adapter should be applied. If `adapter_conditioning_factor` is `0.0`, adapter is not applied at all. If `adapter_conditioning_factor` is `1.0`, adapter is applied for all timesteps. If `adapter_conditioning_factor` is `0.5`, adapter is applied for half of the timesteps. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet adapter = self.adapter._orig_mod if is_compiled_module(self.adapter) else self.adapter # 0. Default height and width to unet height, width = self._default_height_width(height, width, adapter_image) device = self._execution_device if isinstance(adapter, MultiAdapter): adapter_input = [] for one_image in adapter_image: one_image = _preprocess_adapter_image(one_image, height, width) one_image = one_image.to(device=device, dtype=adapter.dtype) adapter_input.append(one_image) else: adapter_input = _preprocess_adapter_image(adapter_image, height, width) adapter_input = adapter_input.to(device=device, dtype=adapter.dtype) original_size = original_size or (height, width) target_size = target_size or (height, width) # 0.1 align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], ) if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) if isinstance(adapter, MultiAdapter) and isinstance(adapter_conditioning_scale, float): adapter_conditioning_scale = [adapter_conditioning_scale] * len(adapter.adapters) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, callback_steps, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) self.check_conditions( prompt, prompt_embeds, adapter_image, control_image, adapter_conditioning_scale, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, clip_skip=clip_skip, ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Prepare added time ids & embeddings & adapter features if isinstance(adapter, MultiAdapter): adapter_state = adapter(adapter_input, adapter_conditioning_scale) for k, v in enumerate(adapter_state): adapter_state[k] = v else: adapter_state = adapter(adapter_input) for k, v in enumerate(adapter_state): adapter_state[k] = v * adapter_conditioning_scale if num_images_per_prompt > 1: for k, v in enumerate(adapter_state): adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1) if do_classifier_free_guidance: for k, v in enumerate(adapter_state): adapter_state[k] = torch.cat([v] * 2, dim=0) # 7.2 Prepare control images if isinstance(controlnet, ControlNetModel): control_image = self.prepare_control_image( image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, guess_mode=guess_mode, ) elif isinstance(controlnet, MultiControlNetModel): control_images = [] for control_image_ in control_image: control_image_ = self.prepare_control_image( image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, guess_mode=guess_mode, ) control_images.append(control_image_) control_image = control_images else: raise ValueError(f"{controlnet.__class__} is not supported.") # 8.2 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] if isinstance(self.controlnet, MultiControlNetModel): controlnet_keep.append(keeps) else: controlnet_keep.append(keeps[0]) add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids( negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) else: negative_add_time_ids = add_time_ids if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) # 8. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) # 7.1 Apply denoising_end if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1: discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} if i < int(num_inference_steps * adapter_conditioning_factor): down_intrablock_additional_residuals = [state.clone() for state in adapter_state] else: down_intrablock_additional_residuals = None # ----------- ControlNet # expand the latents if we are doing classifier free guidance latent_model_input_controlnet = torch.cat([latents] * 2) if do_classifier_free_guidance else latents # concat latents, mask, masked_image_latents in the channel dimension latent_model_input_controlnet = self.scheduler.scale_model_input(latent_model_input_controlnet, t) # controlnet(s) inference if guess_mode and do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] controlnet_added_cond_kwargs = { "text_embeds": add_text_embeds.chunk(2)[1], "time_ids": add_time_ids.chunk(2)[1], } else: control_model_input = latent_model_input_controlnet controlnet_prompt_embeds = prompt_embeds controlnet_added_cond_kwargs = added_cond_kwargs if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] down_block_res_samples, mid_block_res_sample = self.controlnet( control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=control_image, conditioning_scale=cond_scale, guess_mode=guess_mode, added_cond_kwargs=controlnet_added_cond_kwargs, return_dict=False, ) noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, down_intrablock_additional_residuals=down_intrablock_additional_residuals, # t2iadapter down_block_additional_residuals=down_block_res_samples, # controlnet mid_block_additional_residual=mid_block_res_sample, # controlnet )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents return StableDiffusionXLPipelineOutput(images=image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
diffusers/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py/0
{ "file_path": "diffusers/examples/community/pipeline_stable_diffusion_xl_controlnet_adapter.py", "repo_id": "diffusers", "token_count": 32940 }
100
import inspect import os import random import re from dataclasses import dataclass from typing import Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name global_re_wildcard = re.compile(r"__([^_]*)__") def get_filename(path: str): # this doesn't work on Windows return os.path.basename(path).split(".txt")[0] def read_wildcard_values(path: str): with open(path, encoding="utf8") as f: return f.read().splitlines() def grab_wildcard_values(wildcard_option_dict: Dict[str, List[str]] = {}, wildcard_files: List[str] = []): for wildcard_file in wildcard_files: filename = get_filename(wildcard_file) read_values = read_wildcard_values(wildcard_file) if filename not in wildcard_option_dict: wildcard_option_dict[filename] = [] wildcard_option_dict[filename].extend(read_values) return wildcard_option_dict def replace_prompt_with_wildcards( prompt: str, wildcard_option_dict: Dict[str, List[str]] = {}, wildcard_files: List[str] = [] ): new_prompt = prompt # get wildcard options wildcard_option_dict = grab_wildcard_values(wildcard_option_dict, wildcard_files) for m in global_re_wildcard.finditer(new_prompt): wildcard_value = m.group() replace_value = random.choice(wildcard_option_dict[wildcard_value.strip("__")]) new_prompt = new_prompt.replace(wildcard_value, replace_value, 1) return new_prompt @dataclass class WildcardStableDiffusionOutput(StableDiffusionPipelineOutput): prompts: List[str] class WildcardStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): r""" Example Usage: pipe = WildcardStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, ) prompt = "__animal__ sitting on a __object__ wearing a __clothing__" out = pipe( prompt, wildcard_option_dict={ "clothing":["hat", "shirt", "scarf", "beret"] }, wildcard_files=["object.txt", "animal.txt"], num_prompt_samples=1 ) Pipeline for text-to-image generation with wild cards using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, wildcard_option_dict: Dict[str, List[str]] = {}, wildcard_files: List[str] = [], num_prompt_samples: Optional[int] = 1, **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. wildcard_option_dict (Dict[str, List[str]]): dict with key as `wildcard` and values as a list of possible replacements. For example if a prompt, "A __animal__ sitting on a chair". A wildcard_option_dict can provide possible values for "animal" like this: {"animal":["dog", "cat", "fox"]} wildcard_files: (List[str]) List of filenames of txt files for wildcard replacements. For example if a prompt, "A __animal__ sitting on a chair". A file can be provided ["animal.txt"] num_prompt_samples: int Number of times to sample wildcards for each prompt provided Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ if isinstance(prompt, str): prompt = [ replace_prompt_with_wildcards(prompt, wildcard_option_dict, wildcard_files) for i in range(num_prompt_samples) ] batch_size = len(prompt) elif isinstance(prompt, list): prompt_list = [] for p in prompt: for i in range(num_prompt_samples): prompt_list.append(replace_prompt_with_wildcards(p, wildcard_option_dict, wildcard_files)) prompt = prompt_list batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ) text_input_ids = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) latents_dtype = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to( self.device ) else: latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") latents = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(num_inference_steps) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand timesteps_tensor = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) latents = 1 / 0.18215 * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to( self.device ) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype) ) else: has_nsfw_concept = None if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, has_nsfw_concept) return WildcardStableDiffusionOutput(images=image, nsfw_content_detected=has_nsfw_concept, prompts=prompt)
diffusers/examples/community/wildcard_stable_diffusion.py/0
{ "file_path": "diffusers/examples/community/wildcard_stable_diffusion.py", "repo_id": "diffusers", "token_count": 9011 }
101
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class ControlNet(ExamplesTestsAccelerate): def test_controlnet_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/controlnet/train_controlnet.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/fill10 --output_dir={tmpdir} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --max_train_steps=6 --checkpoints_total_limit=2 --checkpointing_steps=2 --controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_controlnet_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/controlnet/train_controlnet.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/fill10 --output_dir={tmpdir} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet --max_train_steps=6 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4", "checkpoint-6"}, ) resume_run_args = f""" examples/controlnet/train_controlnet.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/fill10 --output_dir={tmpdir} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet --max_train_steps=8 --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-6 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + resume_run_args) self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"}) class ControlNetSDXL(ExamplesTestsAccelerate): def test_controlnet_sdxl(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/controlnet/train_controlnet_sdxl.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-xl-pipe --dataset_name=hf-internal-testing/fill10 --output_dir={tmpdir} --resolution=64 --train_batch_size=1 --gradient_accumulation_steps=1 --controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet-sdxl --max_train_steps=4 --checkpointing_steps=2 """.split() run_command(self._launch_args + test_args) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "diffusion_pytorch_model.safetensors")))
diffusers/examples/controlnet/test_controlnet.py/0
{ "file_path": "diffusers/examples/controlnet/test_controlnet.py", "repo_id": "diffusers", "token_count": 2010 }
102
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile import safetensors sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class DreamBoothLoRASDXLWithEDM(ExamplesTestsAccelerate): def test_dreambooth_lora_sdxl_with_edm(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --do_edm_style_training --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` in their names. starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys()) self.assertTrue(starts_with_unet) def test_dreambooth_lora_playground(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/dreambooth/train_dreambooth_lora_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-playground-v2-5-pipe --instance_data_dir docs/source/en/imgs --instance_prompt photo --resolution 64 --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) # make sure the state_dict has the correct naming in the parameters. lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) is_lora = all("lora" in k for k in lora_state_dict.keys()) self.assertTrue(is_lora) # when not training the text encoder, all the parameters in the state dict should start # with `"unet"` in their names. starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys()) self.assertTrue(starts_with_unet)
diffusers/examples/dreambooth/test_dreambooth_lora_edm.py/0
{ "file_path": "diffusers/examples/dreambooth/test_dreambooth_lora_edm.py", "repo_id": "diffusers", "token_count": 1864 }
103
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import argparse import logging import math import os import shutil from pathlib import Path import accelerate import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.state import AcceleratorState from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from PIL import Image from tqdm import tqdm from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from transformers.utils import ContextManagers import diffusers from diffusers import AutoPipelineForText2Image, DDPMScheduler, UNet2DConditionModel, VQModel from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel, compute_snr from diffusers.utils import check_min_version, is_wandb_available, make_image_grid from diffusers.utils.import_utils import is_xformers_available if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.28.0.dev0") logger = get_logger(__name__, log_level="INFO") def save_model_card( args, repo_id: str, images=None, repo_folder=None, ): img_str = "" if len(images) > 0: image_grid = make_image_grid(images, 1, len(args.validation_prompts)) image_grid.save(os.path.join(repo_folder, "val_imgs_grid.png")) img_str += "![val_imgs_grid](./val_imgs_grid.png)\n" yaml = f""" --- license: creativeml-openrail-m base_model: {args.pretrained_decoder_model_name_or_path} datasets: - {args.dataset_name} prior: - {args.pretrained_prior_model_name_or_path} tags: - kandinsky - text-to-image - diffusers - diffusers-training inference: true --- """ model_card = f""" # Finetuning - {repo_id} This pipeline was finetuned from **{args.pretrained_decoder_model_name_or_path}** on the **{args.dataset_name}** dataset. Below are some example images generated with the finetuned pipeline using the following prompts: {args.validation_prompts}: \n {img_str} ## Pipeline usage You can use the pipeline like so: ```python from diffusers import DiffusionPipeline import torch pipeline = AutoPipelineForText2Image.from_pretrained("{repo_id}", torch_dtype=torch.float16) prompt = "{args.validation_prompts[0]}" image = pipeline(prompt).images[0] image.save("my_image.png") ``` ## Training info These are the key hyperparameters used during training: * Epochs: {args.num_train_epochs} * Learning rate: {args.learning_rate} * Batch size: {args.train_batch_size} * Gradient accumulation steps: {args.gradient_accumulation_steps} * Image resolution: {args.resolution} * Mixed-precision: {args.mixed_precision} """ wandb_info = "" if is_wandb_available(): wandb_run_url = None if wandb.run is not None: wandb_run_url = wandb.run.url if wandb_run_url is not None: wandb_info = f""" More information on all the CLI arguments and the environment are available on your [`wandb` run page]({wandb_run_url}). """ model_card += wandb_info with open(os.path.join(repo_folder, "README.md"), "w") as f: f.write(yaml + model_card) def log_validation(vae, image_encoder, image_processor, unet, args, accelerator, weight_dtype, epoch): logger.info("Running validation... ") pipeline = AutoPipelineForText2Image.from_pretrained( args.pretrained_decoder_model_name_or_path, vae=accelerator.unwrap_model(vae), prior_image_encoder=accelerator.unwrap_model(image_encoder), prior_image_processor=image_processor, unet=accelerator.unwrap_model(unet), torch_dtype=weight_dtype, ) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) images = [] for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") elif tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompts[i]}") for i, image in enumerate(images) ] } ) else: logger.warning(f"image logging not implemented for {tracker.name}") del pipeline torch.cuda.empty_cache() return images def parse_args(): parser = argparse.ArgumentParser(description="Simple example of finetuning Kandinsky 2.2.") parser.add_argument( "--pretrained_decoder_model_name_or_path", type=str, default="kandinsky-community/kandinsky-2-2-decoder", required=False, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--pretrained_prior_model_name_or_path", type=str, default="kandinsky-community/kandinsky-2-2-prior", required=False, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing an image." ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--validation_prompts", type=str, default=None, nargs="+", help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), ) parser.add_argument( "--output_dir", type=str, default="kandi_2_2-model-finetuned", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--train_batch_size", type=int, default=1, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="learning rate", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://arxiv.org/abs/2303.09556.", ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument( "--adam_weight_decay", type=float, default=0.0, required=False, help="weight decay_to_use", ) parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument( "--validation_epochs", type=int, default=5, help="Run validation every X epochs.", ) parser.add_argument( "--tracker_project_name", type=str, default="text2image-fine-tune", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") return args def main(): args = parse_args() if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration( total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir ) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder="scheduler") image_processor = CLIPImageProcessor.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_processor" ) def deepspeed_zero_init_disabled_context_manager(): """ returns either a context list that includes one that will disable zero.Init or an empty context list """ deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None if deepspeed_plugin is None: return [] return [deepspeed_plugin.zero3_init_context_manager(enable=False)] weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 with ContextManagers(deepspeed_zero_init_disabled_context_manager()): vae = VQModel.from_pretrained( args.pretrained_decoder_model_name_or_path, subfolder="movq", torch_dtype=weight_dtype ).eval() image_encoder = CLIPVisionModelWithProjection.from_pretrained( args.pretrained_prior_model_name_or_path, subfolder="image_encoder", torch_dtype=weight_dtype ).eval() unet = UNet2DConditionModel.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder="unet") # Freeze vae and image_encoder vae.requires_grad_(False) image_encoder.requires_grad_(False) # Set unet to trainable. unet.train() # Create EMA for the unet. if args.use_ema: ema_unet = UNet2DConditionModel.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder="unet") ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) ema_unet.to(accelerator.device) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for i in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW optimizer = optimizer_cls( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names image_column = args.image_column if image_column not in column_names: raise ValueError(f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}") def center_crop(image): width, height = image.size new_size = min(width, height) left = (width - new_size) / 2 top = (height - new_size) / 2 right = (width + new_size) / 2 bottom = (height + new_size) / 2 return image.crop((left, top, right, bottom)) def train_transforms(img): img = center_crop(img) img = img.resize((args.resolution, args.resolution), resample=Image.BICUBIC, reducing_gap=1) img = np.array(img).astype(np.float32) / 127.5 - 1 img = torch.from_numpy(np.transpose(img, [2, 0, 1])) return img def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["clip_pixel_values"] = image_processor(images, return_tensors="pt").pixel_values return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() clip_pixel_values = torch.stack([example["clip_pixel_values"] for example in examples]) clip_pixel_values = clip_pixel_values.to(memory_format=torch.contiguous_format).float() return {"pixel_values": pixel_values, "clip_pixel_values": clip_pixel_values} train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # Move image_encode and vae to gpu and cast to weight_dtype image_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) tracker_config.pop("validation_prompts") accelerator.init_trackers(args.tracker_project_name, tracker_config) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): train_loss = 0.0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # Convert images to latent space images = batch["pixel_values"].to(weight_dtype) clip_images = batch["clip_pixel_values"].to(weight_dtype) latents = vae.encode(images).latents image_embeds = image_encoder(clip_images).image_embeds # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) target = noise # Predict the noise residual and compute loss added_cond_kwargs = {"image_embeds": image_embeds} model_pred = unet(noisy_latents, timesteps, None, added_cond_kwargs=added_cond_kwargs).sample[:, :4] if args.snr_gamma is None: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") else: # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. # Since we predict the noise instead of x_0, the original formulation is slightly changed. # This is discussed in Section 4.2 of the same paper. snr = compute_snr(noise_scheduler, timesteps) mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( dim=1 )[0] if noise_scheduler.config.prediction_type == "epsilon": mse_loss_weights = mse_loss_weights / snr elif noise_scheduler.config.prediction_type == "v_prediction": mse_loss_weights = mse_loss_weights / (snr + 1) loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights loss = loss.mean() # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompts is not None and epoch % args.validation_epochs == 0: if args.use_ema: # Store the UNet parameters temporarily and load the EMA parameters to perform inference. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) log_validation( vae, image_encoder, image_processor, unet, args, accelerator, weight_dtype, global_step, ) if args.use_ema: # Switch back to the original UNet parameters. ema_unet.restore(unet.parameters()) # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: unet = accelerator.unwrap_model(unet) if args.use_ema: ema_unet.copy_to(unet.parameters()) pipeline = AutoPipelineForText2Image.from_pretrained( args.pretrained_decoder_model_name_or_path, vae=vae, unet=unet, ) pipeline.decoder_pipe.save_pretrained(args.output_dir) # Run a final round of inference. images = [] if args.validation_prompts is not None: logger.info("Running inference for collecting generated images...") pipeline = pipeline.to(accelerator.device) pipeline.torch_dtype = weight_dtype pipeline.set_progress_bar_config(disable=True) pipeline.enable_model_cpu_offload() if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) if args.push_to_hub: save_model_card(args, repo_id, images, repo_folder=args.output_dir) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py/0
{ "file_path": "diffusers/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py", "repo_id": "diffusers", "token_count": 16368 }
104
## Diffusers examples with Intel optimizations **This research project is not actively maintained by the diffusers team. For any questions or comments, please make sure to tag @hshen14 .** This aims to provide diffusers examples with Intel optimizations such as Bfloat16 for training/fine-tuning acceleration and 8-bit integer (INT8) for inference acceleration on Intel platforms. ## Accelerating the fine-tuning for textual inversion We accelereate the fine-tuning for textual inversion with Intel Extension for PyTorch. The [examples](textual_inversion) enable both single node and multi-node distributed training with Bfloat16 support on Intel Xeon Scalable Processor. ## Accelerating the inference for Stable Diffusion using Bfloat16 We start the inference acceleration with Bfloat16 using Intel Extension for PyTorch. The [script](inference_bf16.py) is generally designed to support standard Stable Diffusion models with Bfloat16 support. ```bash pip install diffusers transformers accelerate scipy safetensors export KMP_BLOCKTIME=1 export KMP_SETTINGS=1 export KMP_AFFINITY=granularity=fine,compact,1,0 # Intel OpenMP export OMP_NUM_THREADS=< Cores to use > export LD_PRELOAD=${LD_PRELOAD}:/path/to/lib/libiomp5.so # Jemalloc is a recommended malloc implementation that emphasizes fragmentation avoidance and scalable concurrency support. export LD_PRELOAD=${LD_PRELOAD}:/path/to/lib/libjemalloc.so export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:-1,muzzy_decay_ms:9000000000" # Launch with default DDIM numactl --membind <node N> -C <cpu list> python python inference_bf16.py # Launch with DPMSolverMultistepScheduler numactl --membind <node N> -C <cpu list> python python inference_bf16.py --dpm ``` ## Accelerating the inference for Stable Diffusion using INT8 Coming soon ...
diffusers/examples/research_projects/intel_opts/README.md/0
{ "file_path": "diffusers/examples/research_projects/intel_opts/README.md", "repo_id": "diffusers", "token_count": 528 }
105
#!/usr/bin/env python3 import argparse import math import os from copy import deepcopy import requests import torch from audio_diffusion.models import DiffusionAttnUnet1D from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel MODELS_MAP = { "gwf-440k": { "url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt", "sample_rate": 48000, "sample_size": 65536, }, "jmann-small-190k": { "url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt", "sample_rate": 48000, "sample_size": 65536, }, "jmann-large-580k": { "url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt", "sample_rate": 48000, "sample_size": 131072, }, "maestro-uncond-150k": { "url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt", "sample_rate": 16000, "sample_size": 65536, }, "unlocked-uncond-250k": { "url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt", "sample_rate": 16000, "sample_size": 65536, }, "honk-140k": { "url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt", "sample_rate": 16000, "sample_size": 65536, }, } def alpha_sigma_to_t(alpha, sigma): """Returns a timestep, given the scaling factors for the clean image and for the noise.""" return torch.atan2(sigma, alpha) / math.pi * 2 def get_crash_schedule(t): sigma = torch.sin(t * math.pi / 2) ** 2 alpha = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(alpha, sigma) class Object(object): pass class DiffusionUncond(nn.Module): def __init__(self, global_args): super().__init__() self.diffusion = DiffusionAttnUnet1D(global_args, n_attn_layers=4) self.diffusion_ema = deepcopy(self.diffusion) self.rng = torch.quasirandom.SobolEngine(1, scramble=True) def download(model_name): url = MODELS_MAP[model_name]["url"] r = requests.get(url, stream=True) local_filename = f"./{model_name}.ckpt" with open(local_filename, "wb") as fp: for chunk in r.iter_content(chunk_size=8192): fp.write(chunk) return local_filename DOWN_NUM_TO_LAYER = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", } UP_NUM_TO_LAYER = { "8": "resnets.0", "9": "attentions.0", "10": "resnets.1", "11": "attentions.1", "12": "resnets.2", "13": "attentions.2", } MID_NUM_TO_LAYER = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", "8": "resnets.3", "9": "attentions.3", "10": "resnets.4", "11": "attentions.4", "12": "resnets.5", "13": "attentions.5", } DEPTH_0_TO_LAYER = { "0": "resnets.0", "1": "resnets.1", "2": "resnets.2", "4": "resnets.0", "5": "resnets.1", "6": "resnets.2", } RES_CONV_MAP = { "skip": "conv_skip", "main.0": "conv_1", "main.1": "group_norm_1", "main.3": "conv_2", "main.4": "group_norm_2", } ATTN_MAP = { "norm": "group_norm", "qkv_proj": ["query", "key", "value"], "out_proj": ["proj_attn"], } def convert_resconv_naming(name): if name.startswith("skip"): return name.replace("skip", RES_CONV_MAP["skip"]) # name has to be of format main.{digit} if not name.startswith("main."): raise ValueError(f"ResConvBlock error with {name}") return name.replace(name[:6], RES_CONV_MAP[name[:6]]) def convert_attn_naming(name): for key, value in ATTN_MAP.items(): if name.startswith(key) and not isinstance(value, list): return name.replace(key, value) elif name.startswith(key): return [name.replace(key, v) for v in value] raise ValueError(f"Attn error with {name}") def rename(input_string, max_depth=13): string = input_string if string.split(".")[0] == "timestep_embed": return string.replace("timestep_embed", "time_proj") depth = 0 if string.startswith("net.3."): depth += 1 string = string[6:] elif string.startswith("net."): string = string[4:] while string.startswith("main.7."): depth += 1 string = string[7:] if string.startswith("main."): string = string[5:] # mid block if string[:2].isdigit(): layer_num = string[:2] string_left = string[2:] else: layer_num = string[0] string_left = string[1:] if depth == max_depth: new_layer = MID_NUM_TO_LAYER[layer_num] prefix = "mid_block" elif depth > 0 and int(layer_num) < 7: new_layer = DOWN_NUM_TO_LAYER[layer_num] prefix = f"down_blocks.{depth}" elif depth > 0 and int(layer_num) > 7: new_layer = UP_NUM_TO_LAYER[layer_num] prefix = f"up_blocks.{max_depth - depth - 1}" elif depth == 0: new_layer = DEPTH_0_TO_LAYER[layer_num] prefix = f"up_blocks.{max_depth - 1}" if int(layer_num) > 3 else "down_blocks.0" if not string_left.startswith("."): raise ValueError(f"Naming error with {input_string} and string_left: {string_left}.") string_left = string_left[1:] if "resnets" in new_layer: string_left = convert_resconv_naming(string_left) elif "attentions" in new_layer: new_string_left = convert_attn_naming(string_left) string_left = new_string_left if not isinstance(string_left, list): new_string = prefix + "." + new_layer + "." + string_left else: new_string = [prefix + "." + new_layer + "." + s for s in string_left] return new_string def rename_orig_weights(state_dict): new_state_dict = {} for k, v in state_dict.items(): if k.endswith("kernel"): # up- and downsample layers, don't have trainable weights continue new_k = rename(k) # check if we need to transform from Conv => Linear for attention if isinstance(new_k, list): new_state_dict = transform_conv_attns(new_state_dict, new_k, v) else: new_state_dict[new_k] = v return new_state_dict def transform_conv_attns(new_state_dict, new_k, v): if len(new_k) == 1: if len(v.shape) == 3: # weight new_state_dict[new_k[0]] = v[:, :, 0] else: # bias new_state_dict[new_k[0]] = v else: # qkv matrices trippled_shape = v.shape[0] single_shape = trippled_shape // 3 for i in range(3): if len(v.shape) == 3: new_state_dict[new_k[i]] = v[i * single_shape : (i + 1) * single_shape, :, 0] else: new_state_dict[new_k[i]] = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def main(args): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model_name = args.model_path.split("/")[-1].split(".")[0] if not os.path.isfile(args.model_path): assert ( model_name == args.model_path ), f"Make sure to provide one of the official model names {MODELS_MAP.keys()}" args.model_path = download(model_name) sample_rate = MODELS_MAP[model_name]["sample_rate"] sample_size = MODELS_MAP[model_name]["sample_size"] config = Object() config.sample_size = sample_size config.sample_rate = sample_rate config.latent_dim = 0 diffusers_model = UNet1DModel(sample_size=sample_size, sample_rate=sample_rate) diffusers_state_dict = diffusers_model.state_dict() orig_model = DiffusionUncond(config) orig_model.load_state_dict(torch.load(args.model_path, map_location=device)["state_dict"]) orig_model = orig_model.diffusion_ema.eval() orig_model_state_dict = orig_model.state_dict() renamed_state_dict = rename_orig_weights(orig_model_state_dict) renamed_minus_diffusers = set(renamed_state_dict.keys()) - set(diffusers_state_dict.keys()) diffusers_minus_renamed = set(diffusers_state_dict.keys()) - set(renamed_state_dict.keys()) assert len(renamed_minus_diffusers) == 0, f"Problem with {renamed_minus_diffusers}" assert all(k.endswith("kernel") for k in list(diffusers_minus_renamed)), f"Problem with {diffusers_minus_renamed}" for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}" if key == "time_proj.weight": value = value.squeeze() diffusers_state_dict[key] = value diffusers_model.load_state_dict(diffusers_state_dict) steps = 100 seed = 33 diffusers_scheduler = IPNDMScheduler(num_train_timesteps=steps) generator = torch.manual_seed(seed) noise = torch.randn([1, 2, config.sample_size], generator=generator).to(device) t = torch.linspace(1, 0, steps + 1, device=device)[:-1] step_list = get_crash_schedule(t) pipe = DanceDiffusionPipeline(unet=diffusers_model, scheduler=diffusers_scheduler) generator = torch.manual_seed(33) audio = pipe(num_inference_steps=steps, generator=generator).audios generated = sampling.iplms_sample(orig_model, noise, step_list, {}) generated = generated.clamp(-1, 1) diff_sum = (generated - audio).abs().sum() diff_max = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path) print("Diff sum", diff_sum) print("Diff max", diff_max) assert diff_max < 1e-3, f"Diff max: {diff_max} is too much :-/" print(f"Conversion for {model_name} successful!") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() main(args)
diffusers/scripts/convert_dance_diffusion_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_dance_diffusion_to_diffusers.py", "repo_id": "diffusers", "token_count": 4637 }
106
from diffusers.utils import is_accelerate_available, logging if is_accelerate_available(): pass logger = logging.get_logger(__name__) # pylint: disable=invalid-name def create_unet_diffusers_config(original_config, image_size: int, controlnet=False): """ Creates a config for the diffusers based on the config of the LDM model. """ if controlnet: unet_params = original_config.model.params.control_stage_config.params else: if "unet_config" in original_config.model.params and original_config.model.params.unet_config is not None: unet_params = original_config.model.params.unet_config.params else: unet_params = original_config.model.params.network_config.params vae_params = original_config.model.params.first_stage_config.params.encoder_config.params block_out_channels = [unet_params.model_channels * mult for mult in unet_params.channel_mult] down_block_types = [] resolution = 1 for i in range(len(block_out_channels)): block_type = ( "CrossAttnDownBlockSpatioTemporal" if resolution in unet_params.attention_resolutions else "DownBlockSpatioTemporal" ) down_block_types.append(block_type) if i != len(block_out_channels) - 1: resolution *= 2 up_block_types = [] for i in range(len(block_out_channels)): block_type = ( "CrossAttnUpBlockSpatioTemporal" if resolution in unet_params.attention_resolutions else "UpBlockSpatioTemporal" ) up_block_types.append(block_type) resolution //= 2 if unet_params.transformer_depth is not None: transformer_layers_per_block = ( unet_params.transformer_depth if isinstance(unet_params.transformer_depth, int) else list(unet_params.transformer_depth) ) else: transformer_layers_per_block = 1 vae_scale_factor = 2 ** (len(vae_params.ch_mult) - 1) head_dim = unet_params.num_heads if "num_heads" in unet_params else None use_linear_projection = ( unet_params.use_linear_in_transformer if "use_linear_in_transformer" in unet_params else False ) if use_linear_projection: # stable diffusion 2-base-512 and 2-768 if head_dim is None: head_dim_mult = unet_params.model_channels // unet_params.num_head_channels head_dim = [head_dim_mult * c for c in list(unet_params.channel_mult)] class_embed_type = None addition_embed_type = None addition_time_embed_dim = None projection_class_embeddings_input_dim = None context_dim = None if unet_params.context_dim is not None: context_dim = ( unet_params.context_dim if isinstance(unet_params.context_dim, int) else unet_params.context_dim[0] ) if "num_classes" in unet_params: if unet_params.num_classes == "sequential": addition_time_embed_dim = 256 assert "adm_in_channels" in unet_params projection_class_embeddings_input_dim = unet_params.adm_in_channels config = { "sample_size": image_size // vae_scale_factor, "in_channels": unet_params.in_channels, "down_block_types": tuple(down_block_types), "block_out_channels": tuple(block_out_channels), "layers_per_block": unet_params.num_res_blocks, "cross_attention_dim": context_dim, "attention_head_dim": head_dim, "use_linear_projection": use_linear_projection, "class_embed_type": class_embed_type, "addition_embed_type": addition_embed_type, "addition_time_embed_dim": addition_time_embed_dim, "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim, "transformer_layers_per_block": transformer_layers_per_block, } if "disable_self_attentions" in unet_params: config["only_cross_attention"] = unet_params.disable_self_attentions if "num_classes" in unet_params and isinstance(unet_params.num_classes, int): config["num_class_embeds"] = unet_params.num_classes if controlnet: config["conditioning_channels"] = unet_params.hint_channels else: config["out_channels"] = unet_params.out_channels config["up_block_types"] = tuple(up_block_types) return config def assign_to_checkpoint( paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None, mid_block_suffix="", ): """ This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits attention layers, and takes into account additional replacements that may arise. Assigns the weights to the new checkpoint. """ assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): old_tensor = old_checkpoint[path] channels = old_tensor.shape[0] // 3 target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) query, key, value = old_tensor.split(channels // num_heads, dim=1) checkpoint[path_map["query"]] = query.reshape(target_shape) checkpoint[path_map["key"]] = key.reshape(target_shape) checkpoint[path_map["value"]] = value.reshape(target_shape) if mid_block_suffix is not None: mid_block_suffix = f".{mid_block_suffix}" else: mid_block_suffix = "" for path in paths: new_path = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here new_path = new_path.replace("middle_block.0", f"mid_block.resnets.0{mid_block_suffix}") new_path = new_path.replace("middle_block.1", "mid_block.attentions.0") new_path = new_path.replace("middle_block.2", f"mid_block.resnets.1{mid_block_suffix}") if additional_replacements is not None: for replacement in additional_replacements: new_path = new_path.replace(replacement["old"], replacement["new"]) if new_path == "mid_block.resnets.0.spatial_res_block.norm1.weight": print("yeyy") # proj_attn.weight has to be converted from conv 1D to linear is_attn_weight = "proj_attn.weight" in new_path or ("attentions" in new_path and "to_" in new_path) shape = old_checkpoint[path["old"]].shape if is_attn_weight and len(shape) == 3: checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0] elif is_attn_weight and len(shape) == 4: checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0, 0] else: checkpoint[new_path] = old_checkpoint[path["old"]] def renew_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item # new_item = new_item.replace('norm.weight', 'group_norm.weight') # new_item = new_item.replace('norm.bias', 'group_norm.bias') # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) new_item = new_item.replace("time_stack", "temporal_transformer_blocks") new_item = new_item.replace("time_pos_embed.0.bias", "time_pos_embed.linear_1.bias") new_item = new_item.replace("time_pos_embed.0.weight", "time_pos_embed.linear_1.weight") new_item = new_item.replace("time_pos_embed.2.bias", "time_pos_embed.linear_2.bias") new_item = new_item.replace("time_pos_embed.2.weight", "time_pos_embed.linear_2.weight") mapping.append({"old": old_item, "new": new_item}) return mapping def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. """ if n_shave_prefix_segments >= 0: return ".".join(path.split(".")[n_shave_prefix_segments:]) else: return ".".join(path.split(".")[:n_shave_prefix_segments]) def renew_resnet_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item.replace("in_layers.0", "norm1") new_item = new_item.replace("in_layers.2", "conv1") new_item = new_item.replace("out_layers.0", "norm2") new_item = new_item.replace("out_layers.3", "conv2") new_item = new_item.replace("emb_layers.1", "time_emb_proj") new_item = new_item.replace("skip_connection", "conv_shortcut") new_item = new_item.replace("time_stack.", "") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def convert_ldm_unet_checkpoint( checkpoint, config, path=None, extract_ema=False, controlnet=False, skip_extract_state_dict=False ): """ Takes a state dict and a config, and returns a converted checkpoint. """ if skip_extract_state_dict: unet_state_dict = checkpoint else: # extract state_dict for UNet unet_state_dict = {} keys = list(checkpoint.keys()) unet_key = "model.diffusion_model." # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: logger.warning(f"Checkpoint {path} has both EMA and non-EMA weights.") logger.warning( "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." ) for key in keys: if key.startswith("model.diffusion_model"): flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) else: if sum(k.startswith("model_ema") for k in keys) > 100: logger.warning( "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" " weights (usually better for inference), please make sure to add the `--extract_ema` flag." ) for key in keys: if key.startswith(unet_key): unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) new_checkpoint = {} new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"] new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"] new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"] new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"] if config["class_embed_type"] is None: # No parameters to port ... elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection": new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"] new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"] new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"] new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"] else: raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}") # if config["addition_embed_type"] == "text_time": new_checkpoint["add_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"] new_checkpoint["add_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"] new_checkpoint["add_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"] new_checkpoint["add_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"] new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"] new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"] new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"] new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] # Retrieves the keys for the input blocks only num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) input_blocks = { layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] for layer_id in range(num_input_blocks) } # Retrieves the keys for the middle blocks only num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) middle_blocks = { layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] for layer_id in range(num_middle_blocks) } # Retrieves the keys for the output blocks only num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) output_blocks = { layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] for layer_id in range(num_output_blocks) } for i in range(1, num_input_blocks): block_id = (i - 1) // (config["layers_per_block"] + 1) layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) spatial_resnets = [ key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and ( f"input_blocks.{i}.0.op" not in key and f"input_blocks.{i}.0.time_stack" not in key and f"input_blocks.{i}.0.time_mixer" not in key ) ] temporal_resnets = [key for key in input_blocks[i] if f"input_blocks.{i}.0.time_stack" in key] # import ipdb; ipdb.set_trace() attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] if f"input_blocks.{i}.0.op.weight" in unet_state_dict: new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( f"input_blocks.{i}.0.op.weight" ) new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( f"input_blocks.{i}.0.op.bias" ) paths = renew_resnet_paths(spatial_resnets) meta_path = { "old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}.spatial_res_block", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) paths = renew_resnet_paths(temporal_resnets) meta_path = { "old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}.temporal_res_block", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) # TODO resnet time_mixer.mix_factor if f"input_blocks.{i}.0.time_mixer.mix_factor" in unet_state_dict: new_checkpoint[ f"down_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor" ] = unet_state_dict[f"input_blocks.{i}.0.time_mixer.mix_factor"] if len(attentions): paths = renew_attention_paths(attentions) meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"} # import ipdb; ipdb.set_trace() assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) resnet_0 = middle_blocks[0] attentions = middle_blocks[1] resnet_1 = middle_blocks[2] resnet_0_spatial = [key for key in resnet_0 if "time_stack" not in key and "time_mixer" not in key] resnet_0_paths = renew_resnet_paths(resnet_0_spatial) # import ipdb; ipdb.set_trace() assign_to_checkpoint( resnet_0_paths, new_checkpoint, unet_state_dict, config=config, mid_block_suffix="spatial_res_block" ) resnet_0_temporal = [key for key in resnet_0 if "time_stack" in key and "time_mixer" not in key] resnet_0_paths = renew_resnet_paths(resnet_0_temporal) assign_to_checkpoint( resnet_0_paths, new_checkpoint, unet_state_dict, config=config, mid_block_suffix="temporal_res_block" ) resnet_1_spatial = [key for key in resnet_1 if "time_stack" not in key and "time_mixer" not in key] resnet_1_paths = renew_resnet_paths(resnet_1_spatial) assign_to_checkpoint( resnet_1_paths, new_checkpoint, unet_state_dict, config=config, mid_block_suffix="spatial_res_block" ) resnet_1_temporal = [key for key in resnet_1 if "time_stack" in key and "time_mixer" not in key] resnet_1_paths = renew_resnet_paths(resnet_1_temporal) assign_to_checkpoint( resnet_1_paths, new_checkpoint, unet_state_dict, config=config, mid_block_suffix="temporal_res_block" ) new_checkpoint["mid_block.resnets.0.time_mixer.mix_factor"] = unet_state_dict[ "middle_block.0.time_mixer.mix_factor" ] new_checkpoint["mid_block.resnets.1.time_mixer.mix_factor"] = unet_state_dict[ "middle_block.2.time_mixer.mix_factor" ] attentions_paths = renew_attention_paths(attentions) meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"} assign_to_checkpoint( attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) for i in range(num_output_blocks): block_id = i // (config["layers_per_block"] + 1) layer_in_block_id = i % (config["layers_per_block"] + 1) output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] output_block_list = {} for layer in output_block_layers: layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) if layer_id in output_block_list: output_block_list[layer_id].append(layer_name) else: output_block_list[layer_id] = [layer_name] if len(output_block_list) > 1: spatial_resnets = [ key for key in output_blocks[i] if f"output_blocks.{i}.0" in key and (f"output_blocks.{i}.0.time_stack" not in key and "time_mixer" not in key) ] # import ipdb; ipdb.set_trace() temporal_resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0.time_stack" in key] paths = renew_resnet_paths(spatial_resnets) meta_path = { "old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}.spatial_res_block", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) paths = renew_resnet_paths(temporal_resnets) meta_path = { "old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}.temporal_res_block", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) if f"output_blocks.{i}.0.time_mixer.mix_factor" in unet_state_dict: new_checkpoint[ f"up_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor" ] = unet_state_dict[f"output_blocks.{i}.0.time_mixer.mix_factor"] output_block_list = {k: sorted(v) for k, v in output_block_list.items()} if ["conv.bias", "conv.weight"] in output_block_list.values(): index = list(output_block_list.values()).index(["conv.bias", "conv.weight"]) new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.weight" ] new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ f"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(attentions) == 2: attentions = [] attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key and "conv" not in key] if len(attentions): paths = renew_attention_paths(attentions) # import ipdb; ipdb.set_trace() meta_path = { "old": f"output_blocks.{i}.1", "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", } assign_to_checkpoint( paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config ) else: spatial_layers = [ layer for layer in output_block_layers if "time_stack" not in layer and "time_mixer" not in layer ] resnet_0_paths = renew_resnet_paths(spatial_layers, n_shave_prefix_segments=1) # import ipdb; ipdb.set_trace() for path in resnet_0_paths: old_path = ".".join(["output_blocks", str(i), path["old"]]) new_path = ".".join( ["up_blocks", str(block_id), "resnets", str(layer_in_block_id), "spatial_res_block", path["new"]] ) new_checkpoint[new_path] = unet_state_dict[old_path] temporal_layers = [ layer for layer in output_block_layers if "time_stack" in layer and "time_mixer" not in key ] resnet_0_paths = renew_resnet_paths(temporal_layers, n_shave_prefix_segments=1) # import ipdb; ipdb.set_trace() for path in resnet_0_paths: old_path = ".".join(["output_blocks", str(i), path["old"]]) new_path = ".".join( ["up_blocks", str(block_id), "resnets", str(layer_in_block_id), "temporal_res_block", path["new"]] ) new_checkpoint[new_path] = unet_state_dict[old_path] new_checkpoint["up_blocks.0.resnets.0.time_mixer.mix_factor"] = unet_state_dict[ f"output_blocks.{str(i)}.0.time_mixer.mix_factor" ] return new_checkpoint def conv_attn_to_linear(checkpoint): keys = list(checkpoint.keys()) attn_keys = ["to_q.weight", "to_k.weight", "to_v.weight"] for key in keys: if ".".join(key.split(".")[-2:]) in attn_keys: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0, 0] elif "proj_attn.weight" in key: if checkpoint[key].ndim > 2: checkpoint[key] = checkpoint[key][:, :, 0] def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0, is_temporal=False): """ Updates paths inside resnets to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item # Temporal resnet new_item = old_item.replace("in_layers.0", "norm1") new_item = new_item.replace("in_layers.2", "conv1") new_item = new_item.replace("out_layers.0", "norm2") new_item = new_item.replace("out_layers.3", "conv2") new_item = new_item.replace("skip_connection", "conv_shortcut") new_item = new_item.replace("time_stack.", "temporal_res_block.") # Spatial resnet new_item = new_item.replace("conv1", "spatial_res_block.conv1") new_item = new_item.replace("norm1", "spatial_res_block.norm1") new_item = new_item.replace("conv2", "spatial_res_block.conv2") new_item = new_item.replace("norm2", "spatial_res_block.norm2") new_item = new_item.replace("nin_shortcut", "spatial_res_block.conv_shortcut") new_item = new_item.replace("mix_factor", "spatial_res_block.time_mixer.mix_factor") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0): """ Updates paths inside attentions to the new naming scheme (local renaming) """ mapping = [] for old_item in old_list: new_item = old_item new_item = new_item.replace("norm.weight", "group_norm.weight") new_item = new_item.replace("norm.bias", "group_norm.bias") new_item = new_item.replace("q.weight", "to_q.weight") new_item = new_item.replace("q.bias", "to_q.bias") new_item = new_item.replace("k.weight", "to_k.weight") new_item = new_item.replace("k.bias", "to_k.bias") new_item = new_item.replace("v.weight", "to_v.weight") new_item = new_item.replace("v.bias", "to_v.bias") new_item = new_item.replace("proj_out.weight", "to_out.0.weight") new_item = new_item.replace("proj_out.bias", "to_out.0.bias") new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) mapping.append({"old": old_item, "new": new_item}) return mapping def convert_ldm_vae_checkpoint(checkpoint, config): # extract state dict for VAE vae_state_dict = {} keys = list(checkpoint.keys()) vae_key = "first_stage_model." if any(k.startswith("first_stage_model.") for k in keys) else "" for key in keys: if key.startswith(vae_key): vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key) new_checkpoint = {} new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"] new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"] new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"] new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"] new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"] new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"] new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"] new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"] new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"] new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"] new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"] new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"] new_checkpoint["decoder.time_conv_out.weight"] = vae_state_dict["decoder.time_mix_conv.weight"] new_checkpoint["decoder.time_conv_out.bias"] = vae_state_dict["decoder.time_mix_conv.bias"] # new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"] # new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"] # new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"] # new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer}) down_blocks = { layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) } # Retrieves the keys for the decoder up blocks only num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer}) up_blocks = { layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) } for i in range(num_down_blocks): resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.weight" ) new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.bias" ) paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) for i in range(num_up_blocks): block_id = num_up_blocks - 1 - i resnets = [ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key ] if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.weight" ] new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.bias" ] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] num_mid_res_blocks = 2 for i in range(1, num_mid_res_blocks + 1): resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] paths = renew_vae_resnet_paths(resnets) meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] paths = renew_vae_attention_paths(mid_attentions) meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) conv_attn_to_linear(new_checkpoint) return new_checkpoint
diffusers/scripts/convert_svd_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_svd_to_diffusers.py", "repo_id": "diffusers", "token_count": 14781 }
107
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def info_command_factory(_): return EnvironmentCommand() class EnvironmentCommand(BaseDiffusersCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): download_parser = parser.add_parser("env") download_parser.set_defaults(func=info_command_factory) def run(self): hub_version = huggingface_hub.__version__ pt_version = "not installed" pt_cuda_available = "NA" if is_torch_available(): import torch pt_version = torch.__version__ pt_cuda_available = torch.cuda.is_available() transformers_version = "not installed" if is_transformers_available(): import transformers transformers_version = transformers.__version__ accelerate_version = "not installed" if is_accelerate_available(): import accelerate accelerate_version = accelerate.__version__ xformers_version = "not installed" if is_xformers_available(): import xformers xformers_version = xformers.__version__ info = { "`diffusers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", "Huggingface_hub version": hub_version, "Transformers version": transformers_version, "Accelerate version": accelerate_version, "xFormers version": xformers_version, "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n") print(self.format_dict(info)) return info @staticmethod def format_dict(d): return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
diffusers/src/diffusers/commands/env.py/0
{ "file_path": "diffusers/src/diffusers/commands/env.py", "repo_id": "diffusers", "token_count": 1069 }
108
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Union from ..utils import MIN_PEFT_VERSION, check_peft_version, is_peft_available class PeftAdapterMixin: """ A class containing all functions for loading and using adapters weights that are supported in PEFT library. For more details about adapters and injecting them in a transformer-based model, check out the PEFT [documentation](https://huggingface.co/docs/peft/index). Install the latest version of PEFT, and use this mixin to: - Attach new adapters in the model. - Attach multiple adapters and iteratively activate/deactivate them. - Activate/deactivate all adapters from the model. - Get a list of the active adapters. """ _hf_peft_config_loaded = False def add_adapter(self, adapter_config, adapter_name: str = "default") -> None: r""" Adds a new adapter to the current model for training. If no adapter name is passed, a default name is assigned to the adapter to follow the convention of the PEFT library. If you are not familiar with adapters and PEFT methods, we invite you to read more about them in the PEFT [documentation](https://huggingface.co/docs/peft). Args: adapter_config (`[~peft.PeftConfig]`): The configuration of the adapter to add; supported adapters are non-prefix tuning and adaption prompt methods. adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to add. If no name is passed, a default name is assigned to the adapter. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not is_peft_available(): raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.") from peft import PeftConfig, inject_adapter_in_model if not self._hf_peft_config_loaded: self._hf_peft_config_loaded = True elif adapter_name in self.peft_config: raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") if not isinstance(adapter_config, PeftConfig): raise ValueError( f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead." ) # Unlike transformers, here we don't need to retrieve the name_or_path of the unet as the loading logic is # handled by the `load_lora_layers` or `LoraLoaderMixin`. Therefore we set it to `None` here. adapter_config.base_model_name_or_path = None inject_adapter_in_model(adapter_config, self, adapter_name) self.set_adapter(adapter_name) def set_adapter(self, adapter_name: Union[str, List[str]]) -> None: """ Sets a specific adapter by forcing the model to only use that adapter and disables the other adapters. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT [documentation](https://huggingface.co/docs/peft). Args: adapter_name (Union[str, List[str]])): The list of adapters to set or the adapter name in the case of a single adapter. """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") if isinstance(adapter_name, str): adapter_name = [adapter_name] missing = set(adapter_name) - set(self.peft_config) if len(missing) > 0: raise ValueError( f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)." f" current loaded adapters are: {list(self.peft_config.keys())}" ) from peft.tuners.tuners_utils import BaseTunerLayer _adapters_has_been_set = False for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, "set_adapter"): module.set_adapter(adapter_name) # Previous versions of PEFT does not support multi-adapter inference elif not hasattr(module, "set_adapter") and len(adapter_name) != 1: raise ValueError( "You are trying to set multiple adapters and you have a PEFT version that does not support multi-adapter inference. Please upgrade to the latest version of PEFT." " `pip install -U peft` or `pip install -U git+https://github.com/huggingface/peft.git`" ) else: module.active_adapter = adapter_name _adapters_has_been_set = True if not _adapters_has_been_set: raise ValueError( "Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters." ) def disable_adapters(self) -> None: r""" Disable all adapters attached to the model and fallback to inference with the base model only. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT [documentation](https://huggingface.co/docs/peft). """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, "enable_adapters"): module.enable_adapters(enabled=False) else: # support for older PEFT versions module.disable_adapters = True def enable_adapters(self) -> None: """ Enable adapters that are attached to the model. The model uses `self.active_adapters()` to retrieve the list of adapters to enable. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT [documentation](https://huggingface.co/docs/peft). """ check_peft_version(min_version=MIN_PEFT_VERSION) if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): if hasattr(module, "enable_adapters"): module.enable_adapters(enabled=True) else: # support for older PEFT versions module.disable_adapters = False def active_adapters(self) -> List[str]: """ Gets the current list of active adapters of the model. If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT [documentation](https://huggingface.co/docs/peft). """ check_peft_version(min_version=MIN_PEFT_VERSION) if not is_peft_available(): raise ImportError("PEFT is not available. Please install PEFT to use this function: `pip install peft`.") if not self._hf_peft_config_loaded: raise ValueError("No adapter loaded. Please load an adapter first.") from peft.tuners.tuners_utils import BaseTunerLayer for _, module in self.named_modules(): if isinstance(module, BaseTunerLayer): return module.active_adapter
diffusers/src/diffusers/loaders/peft.py/0
{ "file_path": "diffusers/src/diffusers/loaders/peft.py", "repo_id": "diffusers", "token_count": 3289 }
109
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import is_torch_version from ...utils.accelerate_utils import apply_forward_hook from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from ..unets.unet_3d_blocks import MidBlockTemporalDecoder, UpBlockTemporalDecoder from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder class TemporalDecoder(nn.Module): def __init__( self, in_channels: int = 4, out_channels: int = 3, block_out_channels: Tuple[int] = (128, 256, 512, 512), layers_per_block: int = 2, ): super().__init__() self.layers_per_block = layers_per_block self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1) self.mid_block = MidBlockTemporalDecoder( num_layers=self.layers_per_block, in_channels=block_out_channels[-1], out_channels=block_out_channels[-1], attention_head_dim=block_out_channels[-1], ) # up self.up_blocks = nn.ModuleList([]) reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] for i in range(len(block_out_channels)): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 up_block = UpBlockTemporalDecoder( num_layers=self.layers_per_block + 1, in_channels=prev_output_channel, out_channels=output_channel, add_upsample=not is_final_block, ) self.up_blocks.append(up_block) prev_output_channel = output_channel self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=32, eps=1e-6) self.conv_act = nn.SiLU() self.conv_out = torch.nn.Conv2d( in_channels=block_out_channels[0], out_channels=out_channels, kernel_size=3, padding=1, ) conv_out_kernel_size = (3, 1, 1) padding = [int(k // 2) for k in conv_out_kernel_size] self.time_conv_out = torch.nn.Conv3d( in_channels=out_channels, out_channels=out_channels, kernel_size=conv_out_kernel_size, padding=padding, ) self.gradient_checkpointing = False def forward( self, sample: torch.FloatTensor, image_only_indicator: torch.FloatTensor, num_frames: int = 1, ) -> torch.FloatTensor: r"""The forward method of the `Decoder` class.""" sample = self.conv_in(sample) upscale_dtype = next(iter(self.up_blocks.parameters())).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward if is_torch_version(">=", "1.11.0"): # middle sample = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block), sample, image_only_indicator, use_reentrant=False, ) sample = sample.to(upscale_dtype) # up for up_block in self.up_blocks: sample = torch.utils.checkpoint.checkpoint( create_custom_forward(up_block), sample, image_only_indicator, use_reentrant=False, ) else: # middle sample = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block), sample, image_only_indicator, ) sample = sample.to(upscale_dtype) # up for up_block in self.up_blocks: sample = torch.utils.checkpoint.checkpoint( create_custom_forward(up_block), sample, image_only_indicator, ) else: # middle sample = self.mid_block(sample, image_only_indicator=image_only_indicator) sample = sample.to(upscale_dtype) # up for up_block in self.up_blocks: sample = up_block(sample, image_only_indicator=image_only_indicator) # post-process sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) batch_frames, channels, height, width = sample.shape batch_size = batch_frames // num_frames sample = sample[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4) sample = self.time_conv_out(sample) sample = sample.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width) return sample class AutoencoderKLTemporalDecoder(ModelMixin, ConfigMixin): r""" A VAE model with KL loss for encoding images into latents and decoding latent representations into images. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: in_channels (int, *optional*, defaults to 3): Number of channels in the input image. out_channels (int, *optional*, defaults to 3): Number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): Tuple of downsample block types. block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): Tuple of block output channels. layers_per_block: (`int`, *optional*, defaults to 1): Number of layers per block. latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space. sample_size (`int`, *optional*, defaults to `32`): Sample input size. scaling_factor (`float`, *optional*, defaults to 0.18215): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. force_upcast (`bool`, *optional*, default to `True`): If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE can be fine-tuned / trained to a lower range without loosing too much precision in which case `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 3, out_channels: int = 3, down_block_types: Tuple[str] = ("DownEncoderBlock2D",), block_out_channels: Tuple[int] = (64,), layers_per_block: int = 1, latent_channels: int = 4, sample_size: int = 32, scaling_factor: float = 0.18215, force_upcast: float = True, ): super().__init__() # pass init params to Encoder self.encoder = Encoder( in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, double_z=True, ) # pass init params to Decoder self.decoder = TemporalDecoder( in_channels=latent_channels, out_channels=out_channels, block_out_channels=block_out_channels, layers_per_block=layers_per_block, ) self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) sample_size = ( self.config.sample_size[0] if isinstance(self.config.sample_size, (list, tuple)) else self.config.sample_size ) self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1))) self.tile_overlap_factor = 0.25 def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (Encoder, TemporalDecoder)): module.gradient_checkpointing = value @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) @apply_forward_hook def encode( self, x: torch.FloatTensor, return_dict: bool = True ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: """ Encode a batch of images into latents. Args: x (`torch.FloatTensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded images. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) @apply_forward_hook def decode( self, z: torch.FloatTensor, num_frames: int, return_dict: bool = True, ) -> Union[DecoderOutput, torch.FloatTensor]: """ Decode a batch of images. Args: z (`torch.FloatTensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ batch_size = z.shape[0] // num_frames image_only_indicator = torch.zeros(batch_size, num_frames, dtype=z.dtype, device=z.device) decoded = self.decoder(z, num_frames=num_frames, image_only_indicator=image_only_indicator) if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def forward( self, sample: torch.FloatTensor, sample_posterior: bool = False, return_dict: bool = True, generator: Optional[torch.Generator] = None, num_frames: int = 1, ) -> Union[DecoderOutput, torch.FloatTensor]: r""" Args: sample (`torch.FloatTensor`): Input sample. sample_posterior (`bool`, *optional*, defaults to `False`): Whether to sample from the posterior. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z, num_frames=num_frames).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec)
diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py/0
{ "file_path": "diffusers/src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py", "repo_id": "diffusers", "token_count": 7179 }
110
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numbers from typing import Dict, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from ..utils import is_torch_version from .activations import get_activation from .embeddings import CombinedTimestepLabelEmbeddings, PixArtAlphaCombinedTimestepSizeEmbeddings class AdaLayerNorm(nn.Module): r""" Norm layer modified to incorporate timestep embeddings. Parameters: embedding_dim (`int`): The size of each embedding vector. num_embeddings (`int`): The size of the embeddings dictionary. """ def __init__(self, embedding_dim: int, num_embeddings: int): super().__init__() self.emb = nn.Embedding(num_embeddings, embedding_dim) self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, embedding_dim * 2) self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False) def forward(self, x: torch.Tensor, timestep: torch.Tensor) -> torch.Tensor: emb = self.linear(self.silu(self.emb(timestep))) scale, shift = torch.chunk(emb, 2) x = self.norm(x) * (1 + scale) + shift return x class AdaLayerNormZero(nn.Module): r""" Norm layer adaptive layer norm zero (adaLN-Zero). Parameters: embedding_dim (`int`): The size of each embedding vector. num_embeddings (`int`): The size of the embeddings dictionary. """ def __init__(self, embedding_dim: int, num_embeddings: int): super().__init__() self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim) self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True) self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6) def forward( self, x: torch.Tensor, timestep: torch.Tensor, class_labels: torch.LongTensor, hidden_dtype: Optional[torch.dtype] = None, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: emb = self.linear(self.silu(self.emb(timestep, class_labels, hidden_dtype=hidden_dtype))) shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1) x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class AdaLayerNormSingle(nn.Module): r""" Norm layer adaptive layer norm single (adaLN-single). As proposed in PixArt-Alpha (see: https://arxiv.org/abs/2310.00426; Section 2.3). Parameters: embedding_dim (`int`): The size of each embedding vector. use_additional_conditions (`bool`): To use additional conditions for normalization or not. """ def __init__(self, embedding_dim: int, use_additional_conditions: bool = False): super().__init__() self.emb = PixArtAlphaCombinedTimestepSizeEmbeddings( embedding_dim, size_emb_dim=embedding_dim // 3, use_additional_conditions=use_additional_conditions ) self.silu = nn.SiLU() self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True) def forward( self, timestep: torch.Tensor, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, batch_size: Optional[int] = None, hidden_dtype: Optional[torch.dtype] = None, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: # No modulation happening here. embedded_timestep = self.emb(timestep, **added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_dtype) return self.linear(self.silu(embedded_timestep)), embedded_timestep class AdaGroupNorm(nn.Module): r""" GroupNorm layer modified to incorporate timestep embeddings. Parameters: embedding_dim (`int`): The size of each embedding vector. num_embeddings (`int`): The size of the embeddings dictionary. num_groups (`int`): The number of groups to separate the channels into. act_fn (`str`, *optional*, defaults to `None`): The activation function to use. eps (`float`, *optional*, defaults to `1e-5`): The epsilon value to use for numerical stability. """ def __init__( self, embedding_dim: int, out_dim: int, num_groups: int, act_fn: Optional[str] = None, eps: float = 1e-5 ): super().__init__() self.num_groups = num_groups self.eps = eps if act_fn is None: self.act = None else: self.act = get_activation(act_fn) self.linear = nn.Linear(embedding_dim, out_dim * 2) def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor: if self.act: emb = self.act(emb) emb = self.linear(emb) emb = emb[:, :, None, None] scale, shift = emb.chunk(2, dim=1) x = F.group_norm(x, self.num_groups, eps=self.eps) x = x * (1 + scale) + shift return x class AdaLayerNormContinuous(nn.Module): def __init__( self, embedding_dim: int, conditioning_embedding_dim: int, # NOTE: It is a bit weird that the norm layer can be configured to have scale and shift parameters # because the output is immediately scaled and shifted by the projected conditioning embeddings. # Note that AdaLayerNorm does not let the norm layer have scale and shift parameters. # However, this is how it was implemented in the original code, and it's rather likely you should # set `elementwise_affine` to False. elementwise_affine=True, eps=1e-5, bias=True, norm_type="layer_norm", ): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias) if norm_type == "layer_norm": self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias) elif norm_type == "rms_norm": self.norm = RMSNorm(embedding_dim, eps, elementwise_affine) else: raise ValueError(f"unknown norm_type {norm_type}") def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor: emb = self.linear(self.silu(conditioning_embedding)) scale, shift = torch.chunk(emb, 2, dim=1) x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] return x if is_torch_version(">=", "2.1.0"): LayerNorm = nn.LayerNorm else: # Has optional bias parameter compared to torch layer norm # TODO: replace with torch layernorm once min required torch version >= 2.1 class LayerNorm(nn.Module): def __init__(self, dim, eps: float = 1e-5, elementwise_affine: bool = True, bias: bool = True): super().__init__() self.eps = eps if isinstance(dim, numbers.Integral): dim = (dim,) self.dim = torch.Size(dim) if elementwise_affine: self.weight = nn.Parameter(torch.ones(dim)) self.bias = nn.Parameter(torch.zeros(dim)) if bias else None else: self.weight = None self.bias = None def forward(self, input): return F.layer_norm(input, self.dim, self.weight, self.bias, self.eps) class RMSNorm(nn.Module): def __init__(self, dim, eps: float, elementwise_affine: bool = True): super().__init__() self.eps = eps if isinstance(dim, numbers.Integral): dim = (dim,) self.dim = torch.Size(dim) if elementwise_affine: self.weight = nn.Parameter(torch.ones(dim)) else: self.weight = None def forward(self, hidden_states): input_dtype = hidden_states.dtype variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.eps) if self.weight is not None: # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) hidden_states = hidden_states * self.weight else: hidden_states = hidden_states.to(input_dtype) return hidden_states class GlobalResponseNorm(nn.Module): # Taken from https://github.com/facebookresearch/ConvNeXt-V2/blob/3608f67cc1dae164790c5d0aead7bf2d73d9719b/models/utils.py#L105 def __init__(self, dim): super().__init__() self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim)) self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim)) def forward(self, x): gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True) nx = gx / (gx.mean(dim=-1, keepdim=True) + 1e-6) return self.gamma * (x * nx) + self.beta + x
diffusers/src/diffusers/models/normalization.py/0
{ "file_path": "diffusers/src/diffusers/models/normalization.py", "repo_id": "diffusers", "token_count": 4029 }
111
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional from ..utils import deprecate from .unets.unet_2d_blocks import ( AttnDownBlock2D, AttnDownEncoderBlock2D, AttnSkipDownBlock2D, AttnSkipUpBlock2D, AttnUpBlock2D, AttnUpDecoderBlock2D, AutoencoderTinyBlock, CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, KAttentionBlock, KCrossAttnDownBlock2D, KCrossAttnUpBlock2D, KDownBlock2D, KUpBlock2D, ResnetDownsampleBlock2D, ResnetUpsampleBlock2D, SimpleCrossAttnDownBlock2D, SimpleCrossAttnUpBlock2D, SkipDownBlock2D, SkipUpBlock2D, UNetMidBlock2D, UNetMidBlock2DCrossAttn, UNetMidBlock2DSimpleCrossAttn, UpBlock2D, UpDecoderBlock2D, ) def get_down_block( down_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_downsample: bool, resnet_eps: float, resnet_act_fn: str, transformer_layers_per_block: int = 1, num_attention_heads: Optional[int] = None, resnet_groups: Optional[int] = None, cross_attention_dim: Optional[int] = None, downsample_padding: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", attention_type: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: float = 1.0, cross_attention_norm: Optional[str] = None, attention_head_dim: Optional[int] = None, downsample_type: Optional[str] = None, dropout: float = 0.0, ): deprecation_message = "Importing `get_down_block` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import get_down_block`, instead." deprecate("get_down_block", "0.29", deprecation_message) from .unets.unet_2d_blocks import get_down_block return get_down_block( down_block_type=down_block_type, num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, transformer_layers_per_block=transformer_layers_per_block, num_attention_heads=num_attention_heads, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim, downsample_type=downsample_type, dropout=dropout, ) def get_mid_block( mid_block_type: str, temb_channels: int, in_channels: int, resnet_eps: float, resnet_act_fn: str, resnet_groups: int, output_scale_factor: float = 1.0, transformer_layers_per_block: int = 1, num_attention_heads: Optional[int] = None, cross_attention_dim: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, mid_block_only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", attention_type: str = "default", resnet_skip_time_act: bool = False, cross_attention_norm: Optional[str] = None, attention_head_dim: Optional[int] = 1, dropout: float = 0.0, ): if mid_block_type == "UNetMidBlock2DCrossAttn": return UNetMidBlock2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, temb_channels=temb_channels, dropout=dropout, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, output_scale_factor=output_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, resnet_groups=resnet_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type, ) elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn": return UNetMidBlock2DSimpleCrossAttn( in_channels=in_channels, temb_channels=temb_channels, dropout=dropout, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, output_scale_factor=output_scale_factor, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, only_cross_attention=mid_block_only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif mid_block_type == "UNetMidBlock2D": return UNetMidBlock2D( in_channels=in_channels, temb_channels=temb_channels, dropout=dropout, num_layers=0, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, output_scale_factor=output_scale_factor, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, add_attention=False, ) elif mid_block_type is None: return None else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") def get_up_block( up_block_type: str, num_layers: int, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, add_upsample: bool, resnet_eps: float, resnet_act_fn: str, resolution_idx: Optional[int] = None, transformer_layers_per_block: int = 1, num_attention_heads: Optional[int] = None, resnet_groups: Optional[int] = None, cross_attention_dim: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", attention_type: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: float = 1.0, cross_attention_norm: Optional[str] = None, attention_head_dim: Optional[int] = None, upsample_type: Optional[str] = None, dropout: float = 0.0, ): deprecation_message = "Importing `get_up_block` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import get_up_block`, instead." deprecate("get_up_block", "0.29", deprecation_message) from .unets.unet_2d_blocks import get_up_block return get_up_block( up_block_type=up_block_type, num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resolution_idx=resolution_idx, transformer_layers_per_block=transformer_layers_per_block, num_attention_heads=num_attention_heads, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim, upsample_type=upsample_type, dropout=dropout, ) class AutoencoderTinyBlock(AutoencoderTinyBlock): deprecation_message = "Importing `AutoencoderTinyBlock` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AutoencoderTinyBlock`, instead." deprecate("AutoencoderTinyBlock", "0.29", deprecation_message) class UNetMidBlock2D(UNetMidBlock2D): deprecation_message = "Importing `UNetMidBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2D`, instead." deprecate("UNetMidBlock2D", "0.29", deprecation_message) class UNetMidBlock2DCrossAttn(UNetMidBlock2DCrossAttn): deprecation_message = "Importing `UNetMidBlock2DCrossAttn` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2DCrossAttn`, instead." deprecate("UNetMidBlock2DCrossAttn", "0.29", deprecation_message) class UNetMidBlock2DSimpleCrossAttn(UNetMidBlock2DSimpleCrossAttn): deprecation_message = "Importing `UNetMidBlock2DSimpleCrossAttn` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2DSimpleCrossAttn`, instead." deprecate("UNetMidBlock2DSimpleCrossAttn", "0.29", deprecation_message) class AttnDownBlock2D(AttnDownBlock2D): deprecation_message = "Importing `AttnDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnDownBlock2D`, instead." deprecate("AttnDownBlock2D", "0.29", deprecation_message) class CrossAttnDownBlock2D(CrossAttnDownBlock2D): deprecation_message = "Importing `AttnDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import CrossAttnDownBlock2D`, instead." deprecate("CrossAttnDownBlock2D", "0.29", deprecation_message) class DownBlock2D(DownBlock2D): deprecation_message = "Importing `DownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import DownBlock2D`, instead." deprecate("DownBlock2D", "0.29", deprecation_message) class AttnDownEncoderBlock2D(AttnDownEncoderBlock2D): deprecation_message = "Importing `AttnDownEncoderBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnDownEncoderBlock2D`, instead." deprecate("AttnDownEncoderBlock2D", "0.29", deprecation_message) class AttnSkipDownBlock2D(AttnSkipDownBlock2D): deprecation_message = "Importing `AttnSkipDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnSkipDownBlock2D`, instead." deprecate("AttnSkipDownBlock2D", "0.29", deprecation_message) class SkipDownBlock2D(SkipDownBlock2D): deprecation_message = "Importing `SkipDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import SkipDownBlock2D`, instead." deprecate("SkipDownBlock2D", "0.29", deprecation_message) class ResnetDownsampleBlock2D(ResnetDownsampleBlock2D): deprecation_message = "Importing `ResnetDownsampleBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import ResnetDownsampleBlock2D`, instead." deprecate("ResnetDownsampleBlock2D", "0.29", deprecation_message) class SimpleCrossAttnDownBlock2D(SimpleCrossAttnDownBlock2D): deprecation_message = "Importing `SimpleCrossAttnDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import SimpleCrossAttnDownBlock2D`, instead." deprecate("SimpleCrossAttnDownBlock2D", "0.29", deprecation_message) class KDownBlock2D(KDownBlock2D): deprecation_message = "Importing `KDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KDownBlock2D`, instead." deprecate("KDownBlock2D", "0.29", deprecation_message) class KCrossAttnDownBlock2D(KCrossAttnDownBlock2D): deprecation_message = "Importing `KCrossAttnDownBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KCrossAttnDownBlock2D`, instead." deprecate("KCrossAttnDownBlock2D", "0.29", deprecation_message) class AttnUpBlock2D(AttnUpBlock2D): deprecation_message = "Importing `AttnUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnUpBlock2D`, instead." deprecate("AttnUpBlock2D", "0.29", deprecation_message) class CrossAttnUpBlock2D(CrossAttnUpBlock2D): deprecation_message = "Importing `CrossAttnUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import CrossAttnUpBlock2D`, instead." deprecate("CrossAttnUpBlock2D", "0.29", deprecation_message) class UpBlock2D(UpBlock2D): deprecation_message = "Importing `UpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UpBlock2D`, instead." deprecate("UpBlock2D", "0.29", deprecation_message) class UpDecoderBlock2D(UpDecoderBlock2D): deprecation_message = "Importing `UpDecoderBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import UpDecoderBlock2D`, instead." deprecate("UpDecoderBlock2D", "0.29", deprecation_message) class AttnUpDecoderBlock2D(AttnUpDecoderBlock2D): deprecation_message = "Importing `AttnUpDecoderBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnUpDecoderBlock2D`, instead." deprecate("AttnUpDecoderBlock2D", "0.29", deprecation_message) class AttnSkipUpBlock2D(AttnSkipUpBlock2D): deprecation_message = "Importing `AttnSkipUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import AttnSkipUpBlock2D`, instead." deprecate("AttnSkipUpBlock2D", "0.29", deprecation_message) class SkipUpBlock2D(SkipUpBlock2D): deprecation_message = "Importing `SkipUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import SkipUpBlock2D`, instead." deprecate("SkipUpBlock2D", "0.29", deprecation_message) class ResnetUpsampleBlock2D(ResnetUpsampleBlock2D): deprecation_message = "Importing `ResnetUpsampleBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import ResnetUpsampleBlock2D`, instead." deprecate("ResnetUpsampleBlock2D", "0.29", deprecation_message) class SimpleCrossAttnUpBlock2D(SimpleCrossAttnUpBlock2D): deprecation_message = "Importing `SimpleCrossAttnUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import SimpleCrossAttnUpBlock2D`, instead." deprecate("SimpleCrossAttnUpBlock2D", "0.29", deprecation_message) class KUpBlock2D(KUpBlock2D): deprecation_message = "Importing `KUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KUpBlock2D`, instead." deprecate("KUpBlock2D", "0.29", deprecation_message) class KCrossAttnUpBlock2D(KCrossAttnUpBlock2D): deprecation_message = "Importing `KCrossAttnUpBlock2D` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KCrossAttnUpBlock2D`, instead." deprecate("KCrossAttnUpBlock2D", "0.29", deprecation_message) # can potentially later be renamed to `No-feed-forward` attention class KAttentionBlock(KAttentionBlock): deprecation_message = "Importing `KAttentionBlock` from `diffusers.models.unet_2d_blocks` is deprecated and this will be removed in a future version. Please use `from diffusers.models.unets.unet_2d_blocks import KAttentionBlock`, instead." deprecate("KAttentionBlock", "0.29", deprecation_message)
diffusers/src/diffusers/models/unet_2d_blocks.py/0
{ "file_path": "diffusers/src/diffusers/models/unet_2d_blocks.py", "repo_id": "diffusers", "token_count": 7248 }
112
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders.unet import FromOriginalUNetMixin from ...utils import BaseOutput from ..attention_processor import Attention from ..modeling_utils import ModelMixin # Copied from diffusers.pipelines.wuerstchen.modeling_wuerstchen_common.WuerstchenLayerNorm with WuerstchenLayerNorm -> SDCascadeLayerNorm class SDCascadeLayerNorm(nn.LayerNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, x): x = x.permute(0, 2, 3, 1) x = super().forward(x) return x.permute(0, 3, 1, 2) class SDCascadeTimestepBlock(nn.Module): def __init__(self, c, c_timestep, conds=[]): super().__init__() linear_cls = nn.Linear self.mapper = linear_cls(c_timestep, c * 2) self.conds = conds for cname in conds: setattr(self, f"mapper_{cname}", linear_cls(c_timestep, c * 2)) def forward(self, x, t): t = t.chunk(len(self.conds) + 1, dim=1) a, b = self.mapper(t[0])[:, :, None, None].chunk(2, dim=1) for i, c in enumerate(self.conds): ac, bc = getattr(self, f"mapper_{c}")(t[i + 1])[:, :, None, None].chunk(2, dim=1) a, b = a + ac, b + bc return x * (1 + a) + b class SDCascadeResBlock(nn.Module): def __init__(self, c, c_skip=0, kernel_size=3, dropout=0.0): super().__init__() self.depthwise = nn.Conv2d(c, c, kernel_size=kernel_size, padding=kernel_size // 2, groups=c) self.norm = SDCascadeLayerNorm(c, elementwise_affine=False, eps=1e-6) self.channelwise = nn.Sequential( nn.Linear(c + c_skip, c * 4), nn.GELU(), GlobalResponseNorm(c * 4), nn.Dropout(dropout), nn.Linear(c * 4, c), ) def forward(self, x, x_skip=None): x_res = x x = self.norm(self.depthwise(x)) if x_skip is not None: x = torch.cat([x, x_skip], dim=1) x = self.channelwise(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x + x_res # from https://github.com/facebookresearch/ConvNeXt-V2/blob/3608f67cc1dae164790c5d0aead7bf2d73d9719b/models/utils.py#L105 class GlobalResponseNorm(nn.Module): def __init__(self, dim): super().__init__() self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim)) self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim)) def forward(self, x): agg_norm = torch.norm(x, p=2, dim=(1, 2), keepdim=True) stand_div_norm = agg_norm / (agg_norm.mean(dim=-1, keepdim=True) + 1e-6) return self.gamma * (x * stand_div_norm) + self.beta + x class SDCascadeAttnBlock(nn.Module): def __init__(self, c, c_cond, nhead, self_attn=True, dropout=0.0): super().__init__() linear_cls = nn.Linear self.self_attn = self_attn self.norm = SDCascadeLayerNorm(c, elementwise_affine=False, eps=1e-6) self.attention = Attention(query_dim=c, heads=nhead, dim_head=c // nhead, dropout=dropout, bias=True) self.kv_mapper = nn.Sequential(nn.SiLU(), linear_cls(c_cond, c)) def forward(self, x, kv): kv = self.kv_mapper(kv) norm_x = self.norm(x) if self.self_attn: batch_size, channel, _, _ = x.shape kv = torch.cat([norm_x.view(batch_size, channel, -1).transpose(1, 2), kv], dim=1) x = x + self.attention(norm_x, encoder_hidden_states=kv) return x class UpDownBlock2d(nn.Module): def __init__(self, in_channels, out_channels, mode, enabled=True): super().__init__() if mode not in ["up", "down"]: raise ValueError(f"{mode} not supported") interpolation = ( nn.Upsample(scale_factor=2 if mode == "up" else 0.5, mode="bilinear", align_corners=True) if enabled else nn.Identity() ) mapping = nn.Conv2d(in_channels, out_channels, kernel_size=1) self.blocks = nn.ModuleList([interpolation, mapping] if mode == "up" else [mapping, interpolation]) def forward(self, x): for block in self.blocks: x = block(x) return x @dataclass class StableCascadeUNetOutput(BaseOutput): sample: torch.FloatTensor = None class StableCascadeUNet(ModelMixin, ConfigMixin, FromOriginalUNetMixin): _supports_gradient_checkpointing = True @register_to_config def __init__( self, in_channels: int = 16, out_channels: int = 16, timestep_ratio_embedding_dim: int = 64, patch_size: int = 1, conditioning_dim: int = 2048, block_out_channels: Tuple[int] = (2048, 2048), num_attention_heads: Tuple[int] = (32, 32), down_num_layers_per_block: Tuple[int] = (8, 24), up_num_layers_per_block: Tuple[int] = (24, 8), down_blocks_repeat_mappers: Optional[Tuple[int]] = ( 1, 1, ), up_blocks_repeat_mappers: Optional[Tuple[int]] = (1, 1), block_types_per_layer: Tuple[Tuple[str]] = ( ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"), ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"), ), clip_text_in_channels: Optional[int] = None, clip_text_pooled_in_channels=1280, clip_image_in_channels: Optional[int] = None, clip_seq=4, effnet_in_channels: Optional[int] = None, pixel_mapper_in_channels: Optional[int] = None, kernel_size=3, dropout: Union[float, Tuple[float]] = (0.1, 0.1), self_attn: Union[bool, Tuple[bool]] = True, timestep_conditioning_type: Tuple[str] = ("sca", "crp"), switch_level: Optional[Tuple[bool]] = None, ): """ Parameters: in_channels (`int`, defaults to 16): Number of channels in the input sample. out_channels (`int`, defaults to 16): Number of channels in the output sample. timestep_ratio_embedding_dim (`int`, defaults to 64): Dimension of the projected time embedding. patch_size (`int`, defaults to 1): Patch size to use for pixel unshuffling layer conditioning_dim (`int`, defaults to 2048): Dimension of the image and text conditional embedding. block_out_channels (Tuple[int], defaults to (2048, 2048)): Tuple of output channels for each block. num_attention_heads (Tuple[int], defaults to (32, 32)): Number of attention heads in each attention block. Set to -1 to if block types in a layer do not have attention. down_num_layers_per_block (Tuple[int], defaults to [8, 24]): Number of layers in each down block. up_num_layers_per_block (Tuple[int], defaults to [24, 8]): Number of layers in each up block. down_blocks_repeat_mappers (Tuple[int], optional, defaults to [1, 1]): Number of 1x1 Convolutional layers to repeat in each down block. up_blocks_repeat_mappers (Tuple[int], optional, defaults to [1, 1]): Number of 1x1 Convolutional layers to repeat in each up block. block_types_per_layer (Tuple[Tuple[str]], optional, defaults to ( ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"), ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock") ): Block types used in each layer of the up/down blocks. clip_text_in_channels (`int`, *optional*, defaults to `None`): Number of input channels for CLIP based text conditioning. clip_text_pooled_in_channels (`int`, *optional*, defaults to 1280): Number of input channels for pooled CLIP text embeddings. clip_image_in_channels (`int`, *optional*): Number of input channels for CLIP based image conditioning. clip_seq (`int`, *optional*, defaults to 4): effnet_in_channels (`int`, *optional*, defaults to `None`): Number of input channels for effnet conditioning. pixel_mapper_in_channels (`int`, defaults to `None`): Number of input channels for pixel mapper conditioning. kernel_size (`int`, *optional*, defaults to 3): Kernel size to use in the block convolutional layers. dropout (Tuple[float], *optional*, defaults to (0.1, 0.1)): Dropout to use per block. self_attn (Union[bool, Tuple[bool]]): Tuple of booleans that determine whether to use self attention in a block or not. timestep_conditioning_type (Tuple[str], defaults to ("sca", "crp")): Timestep conditioning type. switch_level (Optional[Tuple[bool]], *optional*, defaults to `None`): Tuple that indicates whether upsampling or downsampling should be applied in a block """ super().__init__() if len(block_out_channels) != len(down_num_layers_per_block): raise ValueError( f"Number of elements in `down_num_layers_per_block` must match the length of `block_out_channels`: {len(block_out_channels)}" ) elif len(block_out_channels) != len(up_num_layers_per_block): raise ValueError( f"Number of elements in `up_num_layers_per_block` must match the length of `block_out_channels`: {len(block_out_channels)}" ) elif len(block_out_channels) != len(down_blocks_repeat_mappers): raise ValueError( f"Number of elements in `down_blocks_repeat_mappers` must match the length of `block_out_channels`: {len(block_out_channels)}" ) elif len(block_out_channels) != len(up_blocks_repeat_mappers): raise ValueError( f"Number of elements in `up_blocks_repeat_mappers` must match the length of `block_out_channels`: {len(block_out_channels)}" ) elif len(block_out_channels) != len(block_types_per_layer): raise ValueError( f"Number of elements in `block_types_per_layer` must match the length of `block_out_channels`: {len(block_out_channels)}" ) if isinstance(dropout, float): dropout = (dropout,) * len(block_out_channels) if isinstance(self_attn, bool): self_attn = (self_attn,) * len(block_out_channels) # CONDITIONING if effnet_in_channels is not None: self.effnet_mapper = nn.Sequential( nn.Conv2d(effnet_in_channels, block_out_channels[0] * 4, kernel_size=1), nn.GELU(), nn.Conv2d(block_out_channels[0] * 4, block_out_channels[0], kernel_size=1), SDCascadeLayerNorm(block_out_channels[0], elementwise_affine=False, eps=1e-6), ) if pixel_mapper_in_channels is not None: self.pixels_mapper = nn.Sequential( nn.Conv2d(pixel_mapper_in_channels, block_out_channels[0] * 4, kernel_size=1), nn.GELU(), nn.Conv2d(block_out_channels[0] * 4, block_out_channels[0], kernel_size=1), SDCascadeLayerNorm(block_out_channels[0], elementwise_affine=False, eps=1e-6), ) self.clip_txt_pooled_mapper = nn.Linear(clip_text_pooled_in_channels, conditioning_dim * clip_seq) if clip_text_in_channels is not None: self.clip_txt_mapper = nn.Linear(clip_text_in_channels, conditioning_dim) if clip_image_in_channels is not None: self.clip_img_mapper = nn.Linear(clip_image_in_channels, conditioning_dim * clip_seq) self.clip_norm = nn.LayerNorm(conditioning_dim, elementwise_affine=False, eps=1e-6) self.embedding = nn.Sequential( nn.PixelUnshuffle(patch_size), nn.Conv2d(in_channels * (patch_size**2), block_out_channels[0], kernel_size=1), SDCascadeLayerNorm(block_out_channels[0], elementwise_affine=False, eps=1e-6), ) def get_block(block_type, in_channels, nhead, c_skip=0, dropout=0, self_attn=True): if block_type == "SDCascadeResBlock": return SDCascadeResBlock(in_channels, c_skip, kernel_size=kernel_size, dropout=dropout) elif block_type == "SDCascadeAttnBlock": return SDCascadeAttnBlock(in_channels, conditioning_dim, nhead, self_attn=self_attn, dropout=dropout) elif block_type == "SDCascadeTimestepBlock": return SDCascadeTimestepBlock( in_channels, timestep_ratio_embedding_dim, conds=timestep_conditioning_type ) else: raise ValueError(f"Block type {block_type} not supported") # BLOCKS # -- down blocks self.down_blocks = nn.ModuleList() self.down_downscalers = nn.ModuleList() self.down_repeat_mappers = nn.ModuleList() for i in range(len(block_out_channels)): if i > 0: self.down_downscalers.append( nn.Sequential( SDCascadeLayerNorm(block_out_channels[i - 1], elementwise_affine=False, eps=1e-6), UpDownBlock2d( block_out_channels[i - 1], block_out_channels[i], mode="down", enabled=switch_level[i - 1] ) if switch_level is not None else nn.Conv2d(block_out_channels[i - 1], block_out_channels[i], kernel_size=2, stride=2), ) ) else: self.down_downscalers.append(nn.Identity()) down_block = nn.ModuleList() for _ in range(down_num_layers_per_block[i]): for block_type in block_types_per_layer[i]: block = get_block( block_type, block_out_channels[i], num_attention_heads[i], dropout=dropout[i], self_attn=self_attn[i], ) down_block.append(block) self.down_blocks.append(down_block) if down_blocks_repeat_mappers is not None: block_repeat_mappers = nn.ModuleList() for _ in range(down_blocks_repeat_mappers[i] - 1): block_repeat_mappers.append(nn.Conv2d(block_out_channels[i], block_out_channels[i], kernel_size=1)) self.down_repeat_mappers.append(block_repeat_mappers) # -- up blocks self.up_blocks = nn.ModuleList() self.up_upscalers = nn.ModuleList() self.up_repeat_mappers = nn.ModuleList() for i in reversed(range(len(block_out_channels))): if i > 0: self.up_upscalers.append( nn.Sequential( SDCascadeLayerNorm(block_out_channels[i], elementwise_affine=False, eps=1e-6), UpDownBlock2d( block_out_channels[i], block_out_channels[i - 1], mode="up", enabled=switch_level[i - 1] ) if switch_level is not None else nn.ConvTranspose2d( block_out_channels[i], block_out_channels[i - 1], kernel_size=2, stride=2 ), ) ) else: self.up_upscalers.append(nn.Identity()) up_block = nn.ModuleList() for j in range(up_num_layers_per_block[::-1][i]): for k, block_type in enumerate(block_types_per_layer[i]): c_skip = block_out_channels[i] if i < len(block_out_channels) - 1 and j == k == 0 else 0 block = get_block( block_type, block_out_channels[i], num_attention_heads[i], c_skip=c_skip, dropout=dropout[i], self_attn=self_attn[i], ) up_block.append(block) self.up_blocks.append(up_block) if up_blocks_repeat_mappers is not None: block_repeat_mappers = nn.ModuleList() for _ in range(up_blocks_repeat_mappers[::-1][i] - 1): block_repeat_mappers.append(nn.Conv2d(block_out_channels[i], block_out_channels[i], kernel_size=1)) self.up_repeat_mappers.append(block_repeat_mappers) # OUTPUT self.clf = nn.Sequential( SDCascadeLayerNorm(block_out_channels[0], elementwise_affine=False, eps=1e-6), nn.Conv2d(block_out_channels[0], out_channels * (patch_size**2), kernel_size=1), nn.PixelShuffle(patch_size), ) self.gradient_checkpointing = False def _set_gradient_checkpointing(self, value=False): self.gradient_checkpointing = value def _init_weights(self, m): if isinstance(m, (nn.Conv2d, nn.Linear)): torch.nn.init.xavier_uniform_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) nn.init.normal_(self.clip_txt_pooled_mapper.weight, std=0.02) nn.init.normal_(self.clip_txt_mapper.weight, std=0.02) if hasattr(self, "clip_txt_mapper") else None nn.init.normal_(self.clip_img_mapper.weight, std=0.02) if hasattr(self, "clip_img_mapper") else None if hasattr(self, "effnet_mapper"): nn.init.normal_(self.effnet_mapper[0].weight, std=0.02) # conditionings nn.init.normal_(self.effnet_mapper[2].weight, std=0.02) # conditionings if hasattr(self, "pixels_mapper"): nn.init.normal_(self.pixels_mapper[0].weight, std=0.02) # conditionings nn.init.normal_(self.pixels_mapper[2].weight, std=0.02) # conditionings torch.nn.init.xavier_uniform_(self.embedding[1].weight, 0.02) # inputs nn.init.constant_(self.clf[1].weight, 0) # outputs # blocks for level_block in self.down_blocks + self.up_blocks: for block in level_block: if isinstance(block, SDCascadeResBlock): block.channelwise[-1].weight.data *= np.sqrt(1 / sum(self.config.blocks[0])) elif isinstance(block, SDCascadeTimestepBlock): nn.init.constant_(block.mapper.weight, 0) def get_timestep_ratio_embedding(self, timestep_ratio, max_positions=10000): r = timestep_ratio * max_positions half_dim = self.config.timestep_ratio_embedding_dim // 2 emb = math.log(max_positions) / (half_dim - 1) emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp() emb = r[:, None] * emb[None, :] emb = torch.cat([emb.sin(), emb.cos()], dim=1) if self.config.timestep_ratio_embedding_dim % 2 == 1: # zero pad emb = nn.functional.pad(emb, (0, 1), mode="constant") return emb.to(dtype=r.dtype) def get_clip_embeddings(self, clip_txt_pooled, clip_txt=None, clip_img=None): if len(clip_txt_pooled.shape) == 2: clip_txt_pool = clip_txt_pooled.unsqueeze(1) clip_txt_pool = self.clip_txt_pooled_mapper(clip_txt_pooled).view( clip_txt_pooled.size(0), clip_txt_pooled.size(1) * self.config.clip_seq, -1 ) if clip_txt is not None and clip_img is not None: clip_txt = self.clip_txt_mapper(clip_txt) if len(clip_img.shape) == 2: clip_img = clip_img.unsqueeze(1) clip_img = self.clip_img_mapper(clip_img).view( clip_img.size(0), clip_img.size(1) * self.config.clip_seq, -1 ) clip = torch.cat([clip_txt, clip_txt_pool, clip_img], dim=1) else: clip = clip_txt_pool return self.clip_norm(clip) def _down_encode(self, x, r_embed, clip): level_outputs = [] block_group = zip(self.down_blocks, self.down_downscalers, self.down_repeat_mappers) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward for down_block, downscaler, repmap in block_group: x = downscaler(x) for i in range(len(repmap) + 1): for block in down_block: if isinstance(block, SDCascadeResBlock): x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, use_reentrant=False) elif isinstance(block, SDCascadeAttnBlock): x = torch.utils.checkpoint.checkpoint( create_custom_forward(block), x, clip, use_reentrant=False ) elif isinstance(block, SDCascadeTimestepBlock): x = torch.utils.checkpoint.checkpoint( create_custom_forward(block), x, r_embed, use_reentrant=False ) else: x = x = torch.utils.checkpoint.checkpoint( create_custom_forward(block), use_reentrant=False ) if i < len(repmap): x = repmap[i](x) level_outputs.insert(0, x) else: for down_block, downscaler, repmap in block_group: x = downscaler(x) for i in range(len(repmap) + 1): for block in down_block: if isinstance(block, SDCascadeResBlock): x = block(x) elif isinstance(block, SDCascadeAttnBlock): x = block(x, clip) elif isinstance(block, SDCascadeTimestepBlock): x = block(x, r_embed) else: x = block(x) if i < len(repmap): x = repmap[i](x) level_outputs.insert(0, x) return level_outputs def _up_decode(self, level_outputs, r_embed, clip): x = level_outputs[0] block_group = zip(self.up_blocks, self.up_upscalers, self.up_repeat_mappers) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward for i, (up_block, upscaler, repmap) in enumerate(block_group): for j in range(len(repmap) + 1): for k, block in enumerate(up_block): if isinstance(block, SDCascadeResBlock): skip = level_outputs[i] if k == 0 and i > 0 else None if skip is not None and (x.size(-1) != skip.size(-1) or x.size(-2) != skip.size(-2)): orig_type = x.dtype x = torch.nn.functional.interpolate( x.float(), skip.shape[-2:], mode="bilinear", align_corners=True ) x = x.to(orig_type) x = torch.utils.checkpoint.checkpoint( create_custom_forward(block), x, skip, use_reentrant=False ) elif isinstance(block, SDCascadeAttnBlock): x = torch.utils.checkpoint.checkpoint( create_custom_forward(block), x, clip, use_reentrant=False ) elif isinstance(block, SDCascadeTimestepBlock): x = torch.utils.checkpoint.checkpoint( create_custom_forward(block), x, r_embed, use_reentrant=False ) else: x = torch.utils.checkpoint.checkpoint(create_custom_forward(block), x, use_reentrant=False) if j < len(repmap): x = repmap[j](x) x = upscaler(x) else: for i, (up_block, upscaler, repmap) in enumerate(block_group): for j in range(len(repmap) + 1): for k, block in enumerate(up_block): if isinstance(block, SDCascadeResBlock): skip = level_outputs[i] if k == 0 and i > 0 else None if skip is not None and (x.size(-1) != skip.size(-1) or x.size(-2) != skip.size(-2)): orig_type = x.dtype x = torch.nn.functional.interpolate( x.float(), skip.shape[-2:], mode="bilinear", align_corners=True ) x = x.to(orig_type) x = block(x, skip) elif isinstance(block, SDCascadeAttnBlock): x = block(x, clip) elif isinstance(block, SDCascadeTimestepBlock): x = block(x, r_embed) else: x = block(x) if j < len(repmap): x = repmap[j](x) x = upscaler(x) return x def forward( self, sample, timestep_ratio, clip_text_pooled, clip_text=None, clip_img=None, effnet=None, pixels=None, sca=None, crp=None, return_dict=True, ): if pixels is None: pixels = sample.new_zeros(sample.size(0), 3, 8, 8) # Process the conditioning embeddings timestep_ratio_embed = self.get_timestep_ratio_embedding(timestep_ratio) for c in self.config.timestep_conditioning_type: if c == "sca": cond = sca elif c == "crp": cond = crp else: cond = None t_cond = cond or torch.zeros_like(timestep_ratio) timestep_ratio_embed = torch.cat([timestep_ratio_embed, self.get_timestep_ratio_embedding(t_cond)], dim=1) clip = self.get_clip_embeddings(clip_txt_pooled=clip_text_pooled, clip_txt=clip_text, clip_img=clip_img) # Model Blocks x = self.embedding(sample) if hasattr(self, "effnet_mapper") and effnet is not None: x = x + self.effnet_mapper( nn.functional.interpolate(effnet, size=x.shape[-2:], mode="bilinear", align_corners=True) ) if hasattr(self, "pixels_mapper"): x = x + nn.functional.interpolate( self.pixels_mapper(pixels), size=x.shape[-2:], mode="bilinear", align_corners=True ) level_outputs = self._down_encode(x, timestep_ratio_embed, clip) x = self._up_decode(level_outputs, timestep_ratio_embed, clip) sample = self.clf(x) if not return_dict: return (sample,) return StableCascadeUNetOutput(sample=sample)
diffusers/src/diffusers/models/unets/unet_stable_cascade.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_stable_cascade.py", "repo_id": "diffusers", "token_count": 14708 }
113
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, is_transformers_version, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( AudioLDMPipeline, ) _dummy_objects.update({"AudioLDMPipeline": AudioLDMPipeline}) else: _import_structure["pipeline_audioldm"] = ["AudioLDMPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.27.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( AudioLDMPipeline, ) else: from .pipeline_audioldm import AudioLDMPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/audioldm/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/audioldm/__init__.py", "repo_id": "diffusers", "token_count": 581 }
114
# Copyright 2024 Salesforce.com, inc. # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Union import PIL.Image import torch from transformers import CLIPTokenizer from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel from ...schedulers import PNDMScheduler from ...utils import ( logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..blip_diffusion.blip_image_processing import BlipImageProcessor from ..blip_diffusion.modeling_blip2 import Blip2QFormerModel from ..blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusers.pipelines import BlipDiffusionControlNetPipeline >>> from diffusers.utils import load_image >>> from controlnet_aux import CannyDetector >>> import torch >>> blip_diffusion_pipe = BlipDiffusionControlNetPipeline.from_pretrained( ... "Salesforce/blipdiffusion-controlnet", torch_dtype=torch.float16 ... ).to("cuda") >>> style_subject = "flower" >>> tgt_subject = "teapot" >>> text_prompt = "on a marble table" >>> cldm_cond_image = load_image( ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/kettle.jpg" ... ).resize((512, 512)) >>> canny = CannyDetector() >>> cldm_cond_image = canny(cldm_cond_image, 30, 70, output_type="pil") >>> style_image = load_image( ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/flower.jpg" ... ) >>> guidance_scale = 7.5 >>> num_inference_steps = 50 >>> negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate" >>> output = blip_diffusion_pipe( ... text_prompt, ... style_image, ... cldm_cond_image, ... style_subject, ... tgt_subject, ... guidance_scale=guidance_scale, ... num_inference_steps=num_inference_steps, ... neg_prompt=negative_prompt, ... height=512, ... width=512, ... ).images >>> output[0].save("image.png") ``` """ class BlipDiffusionControlNetPipeline(DiffusionPipeline): """ Pipeline for Canny Edge based Controlled subject-driven generation using Blip Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: tokenizer ([`CLIPTokenizer`]): Tokenizer for the text encoder text_encoder ([`ContextCLIPTextModel`]): Text encoder to encode the text prompt vae ([`AutoencoderKL`]): VAE model to map the latents to the image unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. scheduler ([`PNDMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. qformer ([`Blip2QFormerModel`]): QFormer model to get multi-modal embeddings from the text and image. controlnet ([`ControlNetModel`]): ControlNet model to get the conditioning image embedding. image_processor ([`BlipImageProcessor`]): Image Processor to preprocess and postprocess the image. ctx_begin_pos (int, `optional`, defaults to 2): Position of the context token in the text encoder. """ model_cpu_offload_seq = "qformer->text_encoder->unet->vae" def __init__( self, tokenizer: CLIPTokenizer, text_encoder: ContextCLIPTextModel, vae: AutoencoderKL, unet: UNet2DConditionModel, scheduler: PNDMScheduler, qformer: Blip2QFormerModel, controlnet: ControlNetModel, image_processor: BlipImageProcessor, ctx_begin_pos: int = 2, mean: List[float] = None, std: List[float] = None, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet, scheduler=scheduler, qformer=qformer, controlnet=controlnet, image_processor=image_processor, ) self.register_to_config(ctx_begin_pos=ctx_begin_pos, mean=mean, std=std) def get_query_embeddings(self, input_image, src_subject): return self.qformer(image_input=input_image, text_input=src_subject, return_dict=False) # from the original Blip Diffusion code, speciefies the target subject and augments the prompt by repeating it def _build_prompt(self, prompts, tgt_subjects, prompt_strength=1.0, prompt_reps=20): rv = [] for prompt, tgt_subject in zip(prompts, tgt_subjects): prompt = f"a {tgt_subject} {prompt.strip()}" # a trick to amplify the prompt rv.append(", ".join([prompt] * int(prompt_strength * prompt_reps))) return rv # Copied from diffusers.pipelines.consistency_models.pipeline_consistency_models.ConsistencyModelPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def encode_prompt(self, query_embeds, prompt, device=None): device = device or self._execution_device # embeddings for prompt, with query_embeds as context max_len = self.text_encoder.text_model.config.max_position_embeddings max_len -= self.qformer.config.num_query_tokens tokenized_prompt = self.tokenizer( prompt, padding="max_length", truncation=True, max_length=max_len, return_tensors="pt", ).to(device) batch_size = query_embeds.shape[0] ctx_begin_pos = [self.config.ctx_begin_pos] * batch_size text_embeddings = self.text_encoder( input_ids=tokenized_prompt.input_ids, ctx_embeddings=query_embeds, ctx_begin_pos=ctx_begin_pos, )[0] return text_embeddings # Adapted from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image def prepare_control_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, ): image = self.image_processor.preprocess( image, size={"width": width, "height": height}, do_rescale=True, do_center_crop=False, do_normalize=False, return_tensors="pt", )["pixel_values"].to(device) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance: image = torch.cat([image] * 2) return image @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: List[str], reference_image: PIL.Image.Image, condtioning_image: PIL.Image.Image, source_subject_category: List[str], target_subject_category: List[str], latents: Optional[torch.FloatTensor] = None, guidance_scale: float = 7.5, height: int = 512, width: int = 512, num_inference_steps: int = 50, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, neg_prompt: Optional[str] = "", prompt_strength: float = 1.0, prompt_reps: int = 20, output_type: Optional[str] = "pil", return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`List[str]`): The prompt or prompts to guide the image generation. reference_image (`PIL.Image.Image`): The reference image to condition the generation on. condtioning_image (`PIL.Image.Image`): The conditioning canny edge image to condition the generation on. source_subject_category (`List[str]`): The source subject category. target_subject_category (`List[str]`): The target subject category. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by random sampling. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. height (`int`, *optional*, defaults to 512): The height of the generated image. width (`int`, *optional*, defaults to 512): The width of the generated image. seed (`int`, *optional*, defaults to 42): The seed to use for random generation. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. neg_prompt (`str`, *optional*, defaults to ""): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_strength (`float`, *optional*, defaults to 1.0): The strength of the prompt. Specifies the number of times the prompt is repeated along with prompt_reps to amplify the prompt. prompt_reps (`int`, *optional*, defaults to 20): The number of times the prompt is repeated along with prompt_strength to amplify the prompt. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ device = self._execution_device reference_image = self.image_processor.preprocess( reference_image, image_mean=self.config.mean, image_std=self.config.std, return_tensors="pt" )["pixel_values"] reference_image = reference_image.to(device) if isinstance(prompt, str): prompt = [prompt] if isinstance(source_subject_category, str): source_subject_category = [source_subject_category] if isinstance(target_subject_category, str): target_subject_category = [target_subject_category] batch_size = len(prompt) prompt = self._build_prompt( prompts=prompt, tgt_subjects=target_subject_category, prompt_strength=prompt_strength, prompt_reps=prompt_reps, ) query_embeds = self.get_query_embeddings(reference_image, source_subject_category) text_embeddings = self.encode_prompt(query_embeds, prompt, device) # 3. unconditional embedding do_classifier_free_guidance = guidance_scale > 1.0 if do_classifier_free_guidance: max_length = self.text_encoder.text_model.config.max_position_embeddings uncond_input = self.tokenizer( [neg_prompt] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt", ) uncond_embeddings = self.text_encoder( input_ids=uncond_input.input_ids.to(device), ctx_embeddings=None, )[0] # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) scale_down_factor = 2 ** (len(self.unet.config.block_out_channels) - 1) latents = self.prepare_latents( batch_size=batch_size, num_channels=self.unet.config.in_channels, height=height // scale_down_factor, width=width // scale_down_factor, generator=generator, latents=latents, dtype=self.unet.dtype, device=device, ) # set timesteps extra_set_kwargs = {} self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) cond_image = self.prepare_control_image( image=condtioning_image, width=width, height=height, batch_size=batch_size, num_images_per_prompt=1, device=device, dtype=self.controlnet.dtype, do_classifier_free_guidance=do_classifier_free_guidance, ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): # expand the latents if we are doing classifier free guidance do_classifier_free_guidance = guidance_scale > 1.0 latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents down_block_res_samples, mid_block_res_sample = self.controlnet( latent_model_input, t, encoder_hidden_states=text_embeddings, controlnet_cond=cond_image, return_dict=False, ) noise_pred = self.unet( latent_model_input, timestep=t, encoder_hidden_states=text_embeddings, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, )["sample"] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step( noise_pred, t, latents, )["prev_sample"] image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/controlnet/pipeline_controlnet_blip_diffusion.py", "repo_id": "diffusers", "token_count": 7632 }
115
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np # noqa: E402 from ....configuration_utils import ConfigMixin, register_to_config from ....schedulers.scheduling_utils import SchedulerMixin try: import librosa # noqa: E402 _librosa_can_be_imported = True _import_error = "" except Exception as e: _librosa_can_be_imported = False _import_error = ( f"Cannot import librosa because {e}. Make sure to correctly install librosa to be able to install it." ) from PIL import Image # noqa: E402 class Mel(ConfigMixin, SchedulerMixin): """ Parameters: x_res (`int`): x resolution of spectrogram (time). y_res (`int`): y resolution of spectrogram (frequency bins). sample_rate (`int`): Sample rate of audio. n_fft (`int`): Number of Fast Fourier Transforms. hop_length (`int`): Hop length (a higher number is recommended if `y_res` < 256). top_db (`int`): Loudest decibel value. n_iter (`int`): Number of iterations for Griffin-Lim Mel inversion. """ config_name = "mel_config.json" @register_to_config def __init__( self, x_res: int = 256, y_res: int = 256, sample_rate: int = 22050, n_fft: int = 2048, hop_length: int = 512, top_db: int = 80, n_iter: int = 32, ): self.hop_length = hop_length self.sr = sample_rate self.n_fft = n_fft self.top_db = top_db self.n_iter = n_iter self.set_resolution(x_res, y_res) self.audio = None if not _librosa_can_be_imported: raise ValueError(_import_error) def set_resolution(self, x_res: int, y_res: int): """Set resolution. Args: x_res (`int`): x resolution of spectrogram (time). y_res (`int`): y resolution of spectrogram (frequency bins). """ self.x_res = x_res self.y_res = y_res self.n_mels = self.y_res self.slice_size = self.x_res * self.hop_length - 1 def load_audio(self, audio_file: str = None, raw_audio: np.ndarray = None): """Load audio. Args: audio_file (`str`): An audio file that must be on disk due to [Librosa](https://librosa.org/) limitation. raw_audio (`np.ndarray`): The raw audio file as a NumPy array. """ if audio_file is not None: self.audio, _ = librosa.load(audio_file, mono=True, sr=self.sr) else: self.audio = raw_audio # Pad with silence if necessary. if len(self.audio) < self.x_res * self.hop_length: self.audio = np.concatenate([self.audio, np.zeros((self.x_res * self.hop_length - len(self.audio),))]) def get_number_of_slices(self) -> int: """Get number of slices in audio. Returns: `int`: Number of spectograms audio can be sliced into. """ return len(self.audio) // self.slice_size def get_audio_slice(self, slice: int = 0) -> np.ndarray: """Get slice of audio. Args: slice (`int`): Slice number of audio (out of `get_number_of_slices()`). Returns: `np.ndarray`: The audio slice as a NumPy array. """ return self.audio[self.slice_size * slice : self.slice_size * (slice + 1)] def get_sample_rate(self) -> int: """Get sample rate. Returns: `int`: Sample rate of audio. """ return self.sr def audio_slice_to_image(self, slice: int) -> Image.Image: """Convert slice of audio to spectrogram. Args: slice (`int`): Slice number of audio to convert (out of `get_number_of_slices()`). Returns: `PIL Image`: A grayscale image of `x_res x y_res`. """ S = librosa.feature.melspectrogram( y=self.get_audio_slice(slice), sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_mels=self.n_mels ) log_S = librosa.power_to_db(S, ref=np.max, top_db=self.top_db) bytedata = (((log_S + self.top_db) * 255 / self.top_db).clip(0, 255) + 0.5).astype(np.uint8) image = Image.fromarray(bytedata) return image def image_to_audio(self, image: Image.Image) -> np.ndarray: """Converts spectrogram to audio. Args: image (`PIL Image`): An grayscale image of `x_res x y_res`. Returns: audio (`np.ndarray`): The audio as a NumPy array. """ bytedata = np.frombuffer(image.tobytes(), dtype="uint8").reshape((image.height, image.width)) log_S = bytedata.astype("float") * self.top_db / 255 - self.top_db S = librosa.db_to_power(log_S) audio = librosa.feature.inverse.mel_to_audio( S, sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_iter=self.n_iter ) return audio
diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/audio_diffusion/mel.py", "repo_id": "diffusers", "token_count": 2698 }
116
from typing import TYPE_CHECKING from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_dit": ["DiTPipeline"]} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_dit import DiTPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, )
diffusers/src/diffusers/pipelines/dit/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/dit/__init__.py", "repo_id": "diffusers", "token_count": 177 }
117
import inspect import math from itertools import repeat from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...configuration_utils import FrozenDict from ...image_processor import PipelineImageInput, VaeImageProcessor from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.attention_processor import Attention, AttnProcessor from ...models.lora import adjust_lora_scale_text_encoder from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...schedulers import DDIMScheduler, DPMSolverMultistepScheduler from ...utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline from .pipeline_output import LEditsPPDiffusionPipelineOutput, LEditsPPInversionPipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import PIL >>> import requests >>> import torch >>> from io import BytesIO >>> from diffusers import LEditsPPPipelineStableDiffusion >>> pipe = LEditsPPPipelineStableDiffusion.from_pretrained( ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> def download_image(url): ... response = requests.get(url) ... return PIL.Image.open(BytesIO(response.content)).convert("RGB") >>> img_url = "https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/cherry_blossom.png" >>> image = download_image(img_url) >>> _ = pipe.invert( ... image = image, ... num_inversion_steps=50, ... skip=0.1 ... ) >>> edited_image = pipe( ... editing_prompt=["cherry blossom"], ... edit_guidance_scale=10.0, ... edit_threshold=0.75, ).images[0] ``` """ # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionAttendAndExcitePipeline.AttentionStore class LeditsAttentionStore: @staticmethod def get_empty_store(): return {"down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": []} def __call__(self, attn, is_cross: bool, place_in_unet: str, editing_prompts, PnP=False): # attn.shape = batch_size * head_size, seq_len query, seq_len_key if attn.shape[1] <= self.max_size: bs = 1 + int(PnP) + editing_prompts skip = 2 if PnP else 1 # skip PnP & unconditional attn = torch.stack(attn.split(self.batch_size)).permute(1, 0, 2, 3) source_batch_size = int(attn.shape[1] // bs) self.forward(attn[:, skip * source_batch_size :], is_cross, place_in_unet) def forward(self, attn, is_cross: bool, place_in_unet: str): key = f"{place_in_unet}_{'cross' if is_cross else 'self'}" self.step_store[key].append(attn) def between_steps(self, store_step=True): if store_step: if self.average: if len(self.attention_store) == 0: self.attention_store = self.step_store else: for key in self.attention_store: for i in range(len(self.attention_store[key])): self.attention_store[key][i] += self.step_store[key][i] else: if len(self.attention_store) == 0: self.attention_store = [self.step_store] else: self.attention_store.append(self.step_store) self.cur_step += 1 self.step_store = self.get_empty_store() def get_attention(self, step: int): if self.average: attention = { key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store } else: assert step is not None attention = self.attention_store[step] return attention def aggregate_attention( self, attention_maps, prompts, res: Union[int, Tuple[int]], from_where: List[str], is_cross: bool, select: int ): out = [[] for x in range(self.batch_size)] if isinstance(res, int): num_pixels = res**2 resolution = (res, res) else: num_pixels = res[0] * res[1] resolution = res[:2] for location in from_where: for bs_item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]: for batch, item in enumerate(bs_item): if item.shape[1] == num_pixels: cross_maps = item.reshape(len(prompts), -1, *resolution, item.shape[-1])[select] out[batch].append(cross_maps) out = torch.stack([torch.cat(x, dim=0) for x in out]) # average over heads out = out.sum(1) / out.shape[1] return out def __init__(self, average: bool, batch_size=1, max_resolution=16, max_size: int = None): self.step_store = self.get_empty_store() self.attention_store = [] self.cur_step = 0 self.average = average self.batch_size = batch_size if max_size is None: self.max_size = max_resolution**2 elif max_size is not None and max_resolution is None: self.max_size = max_size else: raise ValueError("Only allowed to set one of max_resolution or max_size") # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionAttendAndExcitePipeline.GaussianSmoothing class LeditsGaussianSmoothing: def __init__(self, device): kernel_size = [3, 3] sigma = [0.5, 0.5] # The gaussian kernel is the product of the gaussian function of each dimension. kernel = 1 meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size]) for size, std, mgrid in zip(kernel_size, sigma, meshgrids): mean = (size - 1) / 2 kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2)) # Make sure sum of values in gaussian kernel equals 1. kernel = kernel / torch.sum(kernel) # Reshape to depthwise convolutional weight kernel = kernel.view(1, 1, *kernel.size()) kernel = kernel.repeat(1, *[1] * (kernel.dim() - 1)) self.weight = kernel.to(device) def __call__(self, input): """ Arguments: Apply gaussian filter to input. input (torch.Tensor): Input to apply gaussian filter on. Returns: filtered (torch.Tensor): Filtered output. """ return F.conv2d(input, weight=self.weight.to(input.dtype)) class LEDITSCrossAttnProcessor: def __init__(self, attention_store, place_in_unet, pnp, editing_prompts): self.attnstore = attention_store self.place_in_unet = place_in_unet self.editing_prompts = editing_prompts self.pnp = pnp def __call__( self, attn: Attention, hidden_states, encoder_hidden_states, attention_mask=None, temb=None, ): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) self.attnstore( attention_probs, is_cross=True, place_in_unet=self.place_in_unet, editing_prompts=self.editing_prompts, PnP=self.pnp, ) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states = hidden_states / attn.rescale_output_factor return hidden_states # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class LEditsPPPipelineStableDiffusion( DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin ): """ Pipeline for textual image editing using LEDits++ with Stable Diffusion. This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer ([`~transformers.CLIPTokenizer`]): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]): A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of [`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]. If any other scheduler is passed it will automatically be set to [`DPMSolverMultistepScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. feature_extractor ([`~transformers.CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->unet->vae" _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DPMSolverMultistepScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if not isinstance(scheduler, DDIMScheduler) and not isinstance(scheduler, DPMSolverMultistepScheduler): scheduler = DPMSolverMultistepScheduler.from_config( scheduler.config, algorithm_type="sde-dpmsolver++", solver_order=2 ) logger.warning( "This pipeline only supports DDIMScheduler and DPMSolverMultistepScheduler. " "The scheduler has been changed to DPMSolverMultistepScheduler." ) if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) self.inversion_steps = None # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, eta, generator=None): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs def check_inputs( self, negative_prompt=None, editing_prompt_embeddings=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if editing_prompt_embeddings is not None and negative_prompt_embeds is not None: if editing_prompt_embeddings.shape != negative_prompt_embeds.shape: raise ValueError( "`editing_prompt_embeddings` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `editing_prompt_embeddings` {editing_prompt_embeddings.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents): # shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) # if latents.shape != shape: # raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_unet(self, attention_store, PnP: bool = False): attn_procs = {} for name in self.unet.attn_processors.keys(): if name.startswith("mid_block"): place_in_unet = "mid" elif name.startswith("up_blocks"): place_in_unet = "up" elif name.startswith("down_blocks"): place_in_unet = "down" else: continue if "attn2" in name and place_in_unet != "mid": attn_procs[name] = LEDITSCrossAttnProcessor( attention_store=attention_store, place_in_unet=place_in_unet, pnp=PnP, editing_prompts=self.enabled_editing_prompts, ) else: attn_procs[name] = AttnProcessor() self.unet.set_attn_processor(attn_procs) def encode_prompt( self, device, num_images_per_prompt, enable_edit_guidance, negative_prompt=None, editing_prompt=None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, editing_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt enable_edit_guidance (`bool`): whether to perform any editing or reconstruct the input image instead negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). editing_prompt (`str` or `List[str]`, *optional*): Editing prompt(s) to be encoded. If not defined, one has to pass `editing_prompt_embeds` instead. editing_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) batch_size = self.batch_size num_edit_tokens = None if negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but exoected" f"{batch_size} based on the input images. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = negative_prompt_embeds.dtype negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) if enable_edit_guidance: if editing_prompt_embeds is None: # textual inversion: procecss multi-vector tokens if necessary # if isinstance(self, TextualInversionLoaderMixin): # prompt = self.maybe_convert_prompt(prompt, self.tokenizer) if isinstance(editing_prompt, str): editing_prompt = [editing_prompt] max_length = negative_prompt_embeds.shape[1] text_inputs = self.tokenizer( [x for item in editing_prompt for x in repeat(item, batch_size)], padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", return_length=True, ) num_edit_tokens = text_inputs.length - 2 # not counting startoftext and endoftext text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer( [x for item in editing_prompt for x in repeat(item, batch_size)], padding="longest", return_tensors="pt", ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if ( hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask ): attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: editing_prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) editing_prompt_embeds = editing_prompt_embeds[0] else: editing_prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. editing_prompt_embeds = editing_prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. editing_prompt_embeds = self.text_encoder.text_model.final_layer_norm(editing_prompt_embeds) editing_prompt_embeds = editing_prompt_embeds.to(dtype=negative_prompt_embeds.dtype, device=device) bs_embed_edit, seq_len, _ = editing_prompt_embeds.shape editing_prompt_embeds = editing_prompt_embeds.to(dtype=negative_prompt_embeds.dtype, device=device) editing_prompt_embeds = editing_prompt_embeds.repeat(1, num_images_per_prompt, 1) editing_prompt_embeds = editing_prompt_embeds.view(bs_embed_edit * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return editing_prompt_embeds, negative_prompt_embeds, num_edit_tokens @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, negative_prompt: Optional[Union[str, List[str]]] = None, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, editing_prompt: Optional[Union[str, List[str]]] = None, editing_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, reverse_editing_direction: Optional[Union[bool, List[bool]]] = False, edit_guidance_scale: Optional[Union[float, List[float]]] = 5, edit_warmup_steps: Optional[Union[int, List[int]]] = 0, edit_cooldown_steps: Optional[Union[int, List[int]]] = None, edit_threshold: Optional[Union[float, List[float]]] = 0.9, user_mask: Optional[torch.FloatTensor] = None, sem_guidance: Optional[List[torch.Tensor]] = None, use_cross_attn_mask: bool = False, use_intersect_mask: bool = True, attn_store_steps: Optional[List[int]] = [], store_averaged_over_steps: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" The call function to the pipeline for editing. The [`~pipelines.ledits_pp.LEditsPPPipelineStableDiffusion.invert`] method has to be called beforehand. Edits will always be performed for the last inverted image(s). Args: negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). generator (`torch.Generator`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] instead of a plain tuple. editing_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. The image is reconstructed by setting `editing_prompt = None`. Guidance direction of prompt should be specified via `reverse_editing_direction`. editing_prompt_embeds (`torch.Tensor>`, *optional*): Pre-computed embeddings to use for guiding the image generation. Guidance direction of embedding should be specified via `reverse_editing_direction`. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`): Whether the corresponding prompt in `editing_prompt` should be increased or decreased. edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5): Guidance scale for guiding the image generation. If provided as list values should correspond to `editing_prompt`. `edit_guidance_scale` is defined as `s_e` of equation 12 of [LEDITS++ Paper](https://arxiv.org/abs/2301.12247). edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10): Number of diffusion steps (for each prompt) for which guidance will not be applied. edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`): Number of diffusion steps (for each prompt) after which guidance will no longer be applied. edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9): Masking threshold of guidance. Threshold should be proportional to the image region that is modified. 'edit_threshold' is defined as 'λ' of equation 12 of [LEDITS++ Paper](https://arxiv.org/abs/2301.12247). user_mask (`torch.FloatTensor`, *optional*): User-provided mask for even better control over the editing process. This is helpful when LEDITS++'s implicit masks do not meet user preferences. sem_guidance (`List[torch.Tensor]`, *optional*): List of pre-generated guidance vectors to be applied at generation. Length of the list has to correspond to `num_inference_steps`. use_cross_attn_mask (`bool`, defaults to `False`): Whether cross-attention masks are used. Cross-attention masks are always used when use_intersect_mask is set to true. Cross-attention masks are defined as 'M^1' of equation 12 of [LEDITS++ paper](https://arxiv.org/pdf/2311.16711.pdf). use_intersect_mask (`bool`, defaults to `True`): Whether the masking term is calculated as intersection of cross-attention masks and masks derived from the noise estimate. Cross-attention mask are defined as 'M^1' and masks derived from the noise estimate are defined as 'M^2' of equation 12 of [LEDITS++ paper](https://arxiv.org/pdf/2311.16711.pdf). attn_store_steps (`List[int]`, *optional*): Steps for which the attention maps are stored in the AttentionStore. Just for visualization purposes. store_averaged_over_steps (`bool`, defaults to `True`): Whether the attention maps for the 'attn_store_steps' are stored averaged over the diffusion steps. If False, attention maps for each step are stores separately. Just for visualization purposes. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] or `tuple`: [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ if self.inversion_steps is None: raise ValueError( "You need to invert an input image first before calling the pipeline. The `invert` method has to be called beforehand. Edits will always be performed for the last inverted image(s)." ) eta = self.eta num_images_per_prompt = 1 latents = self.init_latents zs = self.zs self.scheduler.set_timesteps(len(self.scheduler.timesteps)) if use_intersect_mask: use_cross_attn_mask = True if use_cross_attn_mask: self.smoothing = LeditsGaussianSmoothing(self.device) if user_mask is not None: user_mask = user_mask.to(self.device) org_prompt = "" # 1. Check inputs. Raise error if not correct self.check_inputs( negative_prompt, editing_prompt_embeds, negative_prompt_embeds, callback_on_step_end_tensor_inputs, ) self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs # 2. Define call parameters batch_size = self.batch_size if editing_prompt: enable_edit_guidance = True if isinstance(editing_prompt, str): editing_prompt = [editing_prompt] self.enabled_editing_prompts = len(editing_prompt) elif editing_prompt_embeds is not None: enable_edit_guidance = True self.enabled_editing_prompts = editing_prompt_embeds.shape[0] else: self.enabled_editing_prompts = 0 enable_edit_guidance = False # 3. Encode input prompt lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) edit_concepts, uncond_embeddings, num_edit_tokens = self.encode_prompt( editing_prompt=editing_prompt, device=self.device, num_images_per_prompt=num_images_per_prompt, enable_edit_guidance=enable_edit_guidance, negative_prompt=negative_prompt, editing_prompt_embeds=editing_prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, clip_skip=self.clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if enable_edit_guidance: text_embeddings = torch.cat([uncond_embeddings, edit_concepts]) self.text_cross_attention_maps = [editing_prompt] if isinstance(editing_prompt, str) else editing_prompt else: text_embeddings = torch.cat([uncond_embeddings]) # 4. Prepare timesteps # self.scheduler.set_timesteps(num_inference_steps, device=self.device) timesteps = self.inversion_steps t_to_idx = {int(v): k for k, v in enumerate(timesteps[-zs.shape[0] :])} if use_cross_attn_mask: self.attention_store = LeditsAttentionStore( average=store_averaged_over_steps, batch_size=batch_size, max_size=(latents.shape[-2] / 4.0) * (latents.shape[-1] / 4.0), max_resolution=None, ) self.prepare_unet(self.attention_store, PnP=False) resolution = latents.shape[-2:] att_res = (int(resolution[0] / 4), int(resolution[1] / 4)) # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, None, None, text_embeddings.dtype, self.device, latents, ) # 6. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(eta) self.sem_guidance = None self.activation_mask = None # 7. Denoising loop num_warmup_steps = 0 with self.progress_bar(total=len(timesteps)) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance if enable_edit_guidance: latent_model_input = torch.cat([latents] * (1 + self.enabled_editing_prompts)) else: latent_model_input = latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) text_embed_input = text_embeddings # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embed_input).sample noise_pred_out = noise_pred.chunk(1 + self.enabled_editing_prompts) # [b,4, 64, 64] noise_pred_uncond = noise_pred_out[0] noise_pred_edit_concepts = noise_pred_out[1:] noise_guidance_edit = torch.zeros( noise_pred_uncond.shape, device=self.device, dtype=noise_pred_uncond.dtype, ) if sem_guidance is not None and len(sem_guidance) > i: noise_guidance_edit += sem_guidance[i].to(self.device) elif enable_edit_guidance: if self.activation_mask is None: self.activation_mask = torch.zeros( (len(timesteps), len(noise_pred_edit_concepts), *noise_pred_edit_concepts[0].shape) ) if self.sem_guidance is None: self.sem_guidance = torch.zeros((len(timesteps), *noise_pred_uncond.shape)) for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts): if isinstance(edit_warmup_steps, list): edit_warmup_steps_c = edit_warmup_steps[c] else: edit_warmup_steps_c = edit_warmup_steps if i < edit_warmup_steps_c: continue if isinstance(edit_guidance_scale, list): edit_guidance_scale_c = edit_guidance_scale[c] else: edit_guidance_scale_c = edit_guidance_scale if isinstance(edit_threshold, list): edit_threshold_c = edit_threshold[c] else: edit_threshold_c = edit_threshold if isinstance(reverse_editing_direction, list): reverse_editing_direction_c = reverse_editing_direction[c] else: reverse_editing_direction_c = reverse_editing_direction if isinstance(edit_cooldown_steps, list): edit_cooldown_steps_c = edit_cooldown_steps[c] elif edit_cooldown_steps is None: edit_cooldown_steps_c = i + 1 else: edit_cooldown_steps_c = edit_cooldown_steps if i >= edit_cooldown_steps_c: continue noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond if reverse_editing_direction_c: noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c if user_mask is not None: noise_guidance_edit_tmp = noise_guidance_edit_tmp * user_mask if use_cross_attn_mask: out = self.attention_store.aggregate_attention( attention_maps=self.attention_store.step_store, prompts=self.text_cross_attention_maps, res=att_res, from_where=["up", "down"], is_cross=True, select=self.text_cross_attention_maps.index(editing_prompt[c]), ) attn_map = out[:, :, :, 1 : 1 + num_edit_tokens[c]] # 0 -> startoftext # average over all tokens if attn_map.shape[3] != num_edit_tokens[c]: raise ValueError( f"Incorrect shape of attention_map. Expected size {num_edit_tokens[c]}, but found {attn_map.shape[3]}!" ) attn_map = torch.sum(attn_map, dim=3) # gaussian_smoothing attn_map = F.pad(attn_map.unsqueeze(1), (1, 1, 1, 1), mode="reflect") attn_map = self.smoothing(attn_map).squeeze(1) # torch.quantile function expects float32 if attn_map.dtype == torch.float32: tmp = torch.quantile(attn_map.flatten(start_dim=1), edit_threshold_c, dim=1) else: tmp = torch.quantile( attn_map.flatten(start_dim=1).to(torch.float32), edit_threshold_c, dim=1 ).to(attn_map.dtype) attn_mask = torch.where( attn_map >= tmp.unsqueeze(1).unsqueeze(1).repeat(1, *att_res), 1.0, 0.0 ) # resolution must match latent space dimension attn_mask = F.interpolate( attn_mask.unsqueeze(1), noise_guidance_edit_tmp.shape[-2:], # 64,64 ).repeat(1, 4, 1, 1) self.activation_mask[i, c] = attn_mask.detach().cpu() if not use_intersect_mask: noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask if use_intersect_mask: if t <= 800: noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) noise_guidance_edit_tmp_quantile = torch.sum( noise_guidance_edit_tmp_quantile, dim=1, keepdim=True ) noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat( 1, self.unet.config.in_channels, 1, 1 ) # torch.quantile function expects float32 if noise_guidance_edit_tmp_quantile.dtype == torch.float32: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False, ) else: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False, ).to(noise_guidance_edit_tmp_quantile.dtype) intersect_mask = ( torch.where( noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], torch.ones_like(noise_guidance_edit_tmp), torch.zeros_like(noise_guidance_edit_tmp), ) * attn_mask ) self.activation_mask[i, c] = intersect_mask.detach().cpu() noise_guidance_edit_tmp = noise_guidance_edit_tmp * intersect_mask else: # print(f"only attention mask for step {i}") noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask elif not use_cross_attn_mask: # calculate quantile noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp) noise_guidance_edit_tmp_quantile = torch.sum( noise_guidance_edit_tmp_quantile, dim=1, keepdim=True ) noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(1, 4, 1, 1) # torch.quantile function expects float32 if noise_guidance_edit_tmp_quantile.dtype == torch.float32: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False, ) else: tmp = torch.quantile( noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False, ).to(noise_guidance_edit_tmp_quantile.dtype) self.activation_mask[i, c] = ( torch.where( noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], torch.ones_like(noise_guidance_edit_tmp), torch.zeros_like(noise_guidance_edit_tmp), ) .detach() .cpu() ) noise_guidance_edit_tmp = torch.where( noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None], noise_guidance_edit_tmp, torch.zeros_like(noise_guidance_edit_tmp), ) noise_guidance_edit += noise_guidance_edit_tmp self.sem_guidance[i] = noise_guidance_edit.detach().cpu() noise_pred = noise_pred_uncond + noise_guidance_edit if enable_edit_guidance and self.guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg( noise_pred, noise_pred_edit_concepts.mean(dim=0, keepdim=False), guidance_rescale=self.guidance_rescale, ) idx = t_to_idx[int(t)] latents = self.scheduler.step( noise_pred, t, latents, variance_noise=zs[idx], **extra_step_kwargs ).prev_sample # step callback if use_cross_attn_mask: store_step = i in attn_store_steps self.attention_store.between_steps(store_step) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) # prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() # 8. Post-processing if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ 0 ] image, has_nsfw_concept = self.run_safety_checker(image, self.device, text_embeddings.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return LEditsPPDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) @torch.no_grad() def invert( self, image: PipelineImageInput, source_prompt: str = "", source_guidance_scale: float = 3.5, num_inversion_steps: int = 30, skip: float = 0.15, generator: Optional[torch.Generator] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, height: Optional[int] = None, width: Optional[int] = None, resize_mode: Optional[str] = "default", crops_coords: Optional[Tuple[int, int, int, int]] = None, ): r""" The function to the pipeline for image inversion as described by the [LEDITS++ Paper](https://arxiv.org/abs/2301.12247). If the scheduler is set to [`~schedulers.DDIMScheduler`] the inversion proposed by [edit-friendly DPDM](https://arxiv.org/abs/2304.06140) will be performed instead. Args: image (`PipelineImageInput`): Input for the image(s) that are to be edited. Multiple input images have to default to the same aspect ratio. source_prompt (`str`, defaults to `""`): Prompt describing the input image that will be used for guidance during inversion. Guidance is disabled if the `source_prompt` is `""`. source_guidance_scale (`float`, defaults to `3.5`): Strength of guidance during inversion. num_inversion_steps (`int`, defaults to `30`): Number of total performed inversion steps after discarding the initial `skip` steps. skip (`float`, defaults to `0.15`): Portion of initial steps that will be ignored for inversion and subsequent generation. Lower values will lead to stronger changes to the input image. `skip` has to be between `0` and `1`. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make inversion deterministic. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. height (`int`, *optional*, defaults to `None`): The height in preprocessed image. If `None`, will use the `get_default_height_width()` to get default height. width (`int`, *optional*`, defaults to `None`): The width in preprocessed. If `None`, will use get_default_height_width()` to get the default width. resize_mode (`str`, *optional*, defaults to `default`): The resize mode, can be one of `default` or `fill`. If `default`, will resize the image to fit within the specified width and height, and it may not maintaining the original aspect ratio. If `fill`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image. If `crop`, will resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess. Note that resize_mode `fill` and `crop` are only supported for PIL image input. crops_coords (`List[Tuple[int, int, int, int]]`, *optional*, defaults to `None`): The crop coordinates for each image in the batch. If `None`, will not crop the image. Returns: [`~pipelines.ledits_pp.LEditsPPInversionPipelineOutput`]: Output will contain the resized input image(s) and respective VAE reconstruction(s). """ # Reset attn processor, we do not want to store attn maps during inversion self.unet.set_attn_processor(AttnProcessor()) self.eta = 1.0 self.scheduler.config.timestep_spacing = "leading" self.scheduler.set_timesteps(int(num_inversion_steps * (1 + skip))) self.inversion_steps = self.scheduler.timesteps[-num_inversion_steps:] timesteps = self.inversion_steps # 1. encode image x0, resized = self.encode_image( image, dtype=self.text_encoder.dtype, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords, ) self.batch_size = x0.shape[0] # autoencoder reconstruction image_rec = self.vae.decode(x0 / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0] image_rec = self.image_processor.postprocess(image_rec, output_type="pil") # 2. get embeddings do_classifier_free_guidance = source_guidance_scale > 1.0 lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None uncond_embedding, text_embeddings, _ = self.encode_prompt( num_images_per_prompt=1, device=self.device, negative_prompt=None, enable_edit_guidance=do_classifier_free_guidance, editing_prompt=source_prompt, lora_scale=lora_scale, clip_skip=clip_skip, ) # 3. find zs and xts variance_noise_shape = (num_inversion_steps, *x0.shape) # intermediate latents t_to_idx = {int(v): k for k, v in enumerate(timesteps)} xts = torch.zeros(size=variance_noise_shape, device=self.device, dtype=uncond_embedding.dtype) for t in reversed(timesteps): idx = num_inversion_steps - t_to_idx[int(t)] - 1 noise = randn_tensor(shape=x0.shape, generator=generator, device=self.device, dtype=x0.dtype) xts[idx] = self.scheduler.add_noise(x0, noise, torch.Tensor([t])) xts = torch.cat([x0.unsqueeze(0), xts], dim=0) self.scheduler.set_timesteps(len(self.scheduler.timesteps)) # noise maps zs = torch.zeros(size=variance_noise_shape, device=self.device, dtype=uncond_embedding.dtype) with self.progress_bar(total=len(timesteps)) as progress_bar: for t in timesteps: idx = num_inversion_steps - t_to_idx[int(t)] - 1 # 1. predict noise residual xt = xts[idx + 1] noise_pred = self.unet(xt, timestep=t, encoder_hidden_states=uncond_embedding).sample if not source_prompt == "": noise_pred_cond = self.unet(xt, timestep=t, encoder_hidden_states=text_embeddings).sample noise_pred = noise_pred + source_guidance_scale * (noise_pred_cond - noise_pred) xtm1 = xts[idx] z, xtm1_corrected = compute_noise(self.scheduler, xtm1, xt, t, noise_pred, self.eta) zs[idx] = z # correction to avoid error accumulation xts[idx] = xtm1_corrected progress_bar.update() self.init_latents = xts[-1].expand(self.batch_size, -1, -1, -1) zs = zs.flip(0) self.zs = zs return LEditsPPInversionPipelineOutput(images=resized, vae_reconstruction_images=image_rec) @torch.no_grad() def encode_image(self, image, dtype=None, height=None, width=None, resize_mode="default", crops_coords=None): image = self.image_processor.preprocess( image=image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords ) resized = self.image_processor.postprocess(image=image, output_type="pil") if max(image.shape[-2:]) > self.vae.config["sample_size"] * 1.5: logger.warning( "Your input images far exceed the default resolution of the underlying diffusion model. " "The output images may contain severe artifacts! " "Consider down-sampling the input using the `height` and `width` parameters" ) image = image.to(dtype) x0 = self.vae.encode(image.to(self.device)).latent_dist.mode() x0 = x0.to(dtype) x0 = self.vae.config.scaling_factor * x0 return x0, resized def compute_noise_ddim(scheduler, prev_latents, latents, timestep, noise_pred, eta): # 1. get previous step value (=t-1) prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps # 2. compute alphas, betas alpha_prod_t = scheduler.alphas_cumprod[timestep] alpha_prod_t_prev = ( scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod ) beta_prod_t = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) # 4. Clip "predicted x_0" if scheduler.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -1, 1) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) variance = scheduler._get_variance(timestep, prev_timestep) std_dev_t = eta * variance ** (0.5) # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * noise_pred # modifed so that updated xtm1 is returned as well (to avoid error accumulation) mu_xt = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction if variance > 0.0: noise = (prev_latents - mu_xt) / (variance ** (0.5) * eta) else: noise = torch.tensor([0.0]).to(latents.device) return noise, mu_xt + (eta * variance**0.5) * noise def compute_noise_sde_dpm_pp_2nd(scheduler, prev_latents, latents, timestep, noise_pred, eta): def first_order_update(model_output, sample): # timestep, prev_timestep, sample): sigma_t, sigma_s = scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index] alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) alpha_s, sigma_s = scheduler._sigma_to_alpha_sigma_t(sigma_s) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s = torch.log(alpha_s) - torch.log(sigma_s) h = lambda_t - lambda_s mu_xt = (sigma_t / sigma_s * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output mu_xt = scheduler.dpm_solver_first_order_update( model_output=model_output, sample=sample, noise=torch.zeros_like(sample) ) sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) if sigma > 0.0: noise = (prev_latents - mu_xt) / sigma else: noise = torch.tensor([0.0]).to(sample.device) prev_sample = mu_xt + sigma * noise return noise, prev_sample def second_order_update(model_output_list, sample): # timestep_list, prev_timestep, sample): sigma_t, sigma_s0, sigma_s1 = ( scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index], scheduler.sigmas[scheduler.step_index - 1], ) alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t) alpha_s0, sigma_s0 = scheduler._sigma_to_alpha_sigma_t(sigma_s0) alpha_s1, sigma_s1 = scheduler._sigma_to_alpha_sigma_t(sigma_s1) lambda_t = torch.log(alpha_t) - torch.log(sigma_t) lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0) lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1) m0, m1 = model_output_list[-1], model_output_list[-2] h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1 r0 = h_0 / h D0, D1 = m0, (1.0 / r0) * (m0 - m1) mu_xt = ( (sigma_t / sigma_s0 * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * D0 + 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1 ) sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h)) if sigma > 0.0: noise = (prev_latents - mu_xt) / sigma else: noise = torch.tensor([0.0]).to(sample.device) prev_sample = mu_xt + sigma * noise return noise, prev_sample if scheduler.step_index is None: scheduler._init_step_index(timestep) model_output = scheduler.convert_model_output(model_output=noise_pred, sample=latents) for i in range(scheduler.config.solver_order - 1): scheduler.model_outputs[i] = scheduler.model_outputs[i + 1] scheduler.model_outputs[-1] = model_output if scheduler.lower_order_nums < 1: noise, prev_sample = first_order_update(model_output, latents) else: noise, prev_sample = second_order_update(scheduler.model_outputs, latents) if scheduler.lower_order_nums < scheduler.config.solver_order: scheduler.lower_order_nums += 1 # upon completion increase step index by one scheduler._step_index += 1 return noise, prev_sample def compute_noise(scheduler, *args): if isinstance(scheduler, DDIMScheduler): return compute_noise_ddim(scheduler, *args) elif ( isinstance(scheduler, DPMSolverMultistepScheduler) and scheduler.config.algorithm_type == "sde-dpmsolver++" and scheduler.config.solver_order == 2 ): return compute_noise_sde_dpm_pp_2nd(scheduler, *args) else: raise NotImplementedError
diffusers/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/ledits_pp/pipeline_leditspp_stable_diffusion.py", "repo_id": "diffusers", "token_count": 35679 }
118
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput from ...image_processor import VaeImageProcessor from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, PriorTransformer, UNet2DConditionModel from ...models.embeddings import get_timestep_embedding from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput, StableDiffusionMixin from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableUnCLIPPipeline >>> pipe = StableUnCLIPPipeline.from_pretrained( ... "fusing/stable-unclip-2-1-l", torch_dtype=torch.float16 ... ) # TODO update model path >>> pipe = pipe.to("cuda") >>> prompt = "a photo of an astronaut riding a horse on mars" >>> images = pipe(prompt).images >>> images[0].save("astronaut_horse.png") ``` """ class StableUnCLIPPipeline(DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin): """ Pipeline for text-to-image generation using stable unCLIP. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights Args: prior_tokenizer ([`CLIPTokenizer`]): A [`CLIPTokenizer`]. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen [`CLIPTextModelWithProjection`] text-encoder. prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_scheduler ([`KarrasDiffusionSchedulers`]): Scheduler used in the prior denoising process. image_normalizer ([`StableUnCLIPImageNormalizer`]): Used to normalize the predicted image embeddings before the noise is applied and un-normalize the image embeddings after the noise has been applied. image_noising_scheduler ([`KarrasDiffusionSchedulers`]): Noise schedule for adding noise to the predicted image embeddings. The amount of noise to add is determined by the `noise_level`. tokenizer ([`CLIPTokenizer`]): A [`CLIPTokenizer`]. text_encoder ([`CLIPTextModel`]): Frozen [`CLIPTextModel`] text-encoder. unet ([`UNet2DConditionModel`]): A [`UNet2DConditionModel`] to denoise the encoded image latents. scheduler ([`KarrasDiffusionSchedulers`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. """ _exclude_from_cpu_offload = ["prior", "image_normalizer"] model_cpu_offload_seq = "text_encoder->prior_text_encoder->unet->vae" # prior components prior_tokenizer: CLIPTokenizer prior_text_encoder: CLIPTextModelWithProjection prior: PriorTransformer prior_scheduler: KarrasDiffusionSchedulers # image noising components image_normalizer: StableUnCLIPImageNormalizer image_noising_scheduler: KarrasDiffusionSchedulers # regular denoising components tokenizer: CLIPTokenizer text_encoder: CLIPTextModel unet: UNet2DConditionModel scheduler: KarrasDiffusionSchedulers vae: AutoencoderKL def __init__( self, # prior components prior_tokenizer: CLIPTokenizer, prior_text_encoder: CLIPTextModelWithProjection, prior: PriorTransformer, prior_scheduler: KarrasDiffusionSchedulers, # image noising components image_normalizer: StableUnCLIPImageNormalizer, image_noising_scheduler: KarrasDiffusionSchedulers, # regular denoising components tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, # vae vae: AutoencoderKL, ): super().__init__() self.register_modules( prior_tokenizer=prior_tokenizer, prior_text_encoder=prior_text_encoder, prior=prior, prior_scheduler=prior_scheduler, image_normalizer=image_normalizer, image_noising_scheduler=image_noising_scheduler, tokenizer=tokenizer, text_encoder=text_encoder, unet=unet, scheduler=scheduler, vae=vae, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt with _encode_prompt->_encode_prior_prompt, tokenizer->prior_tokenizer, text_encoder->prior_text_encoder def _encode_prior_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, ): if text_model_output is None: batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.prior_tokenizer( prompt, padding="max_length", max_length=self.prior_tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) untruncated_ids = self.prior_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.prior_tokenizer.batch_decode( untruncated_ids[:, self.prior_tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.prior_tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.prior_tokenizer.model_max_length] prior_text_encoder_output = self.prior_text_encoder(text_input_ids.to(device)) prompt_embeds = prior_text_encoder_output.text_embeds text_enc_hid_states = prior_text_encoder_output.last_hidden_state else: batch_size = text_model_output[0].shape[0] prompt_embeds, text_enc_hid_states = text_model_output[0], text_model_output[1] text_mask = text_attention_mask prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) text_enc_hid_states = text_enc_hid_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [""] * batch_size uncond_input = self.prior_tokenizer( uncond_tokens, padding="max_length", max_length=self.prior_tokenizer.model_max_length, truncation=True, return_tensors="pt", ) uncond_text_mask = uncond_input.attention_mask.bool().to(device) negative_prompt_embeds_prior_text_encoder_output = self.prior_text_encoder( uncond_input.input_ids.to(device) ) negative_prompt_embeds = negative_prompt_embeds_prior_text_encoder_output.text_embeds uncond_text_enc_hid_states = negative_prompt_embeds_prior_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_enc_hid_states.shape[1] uncond_text_enc_hid_states = uncond_text_enc_hid_states.repeat(1, num_images_per_prompt, 1) uncond_text_enc_hid_states = uncond_text_enc_hid_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) text_enc_hid_states = torch.cat([uncond_text_enc_hid_states, text_enc_hid_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_enc_hid_states, text_mask # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs with prepare_extra_step_kwargs->prepare_prior_extra_step_kwargs, scheduler->prior_scheduler def prepare_prior_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the prior_scheduler step, since not all prior_schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other prior_schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.prior_scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the prior_scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.prior_scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, height, width, callback_steps, noise_level, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Please make sure to define only one of the two." ) if prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( "Provide either `negative_prompt` or `negative_prompt_embeds`. Cannot leave both `negative_prompt` and `negative_prompt_embeds` undefined." ) if prompt is not None and negative_prompt is not None: if type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps: raise ValueError( f"`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive." ) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def noise_image_embeddings( self, image_embeds: torch.Tensor, noise_level: int, noise: Optional[torch.FloatTensor] = None, generator: Optional[torch.Generator] = None, ): """ Add noise to the image embeddings. The amount of noise is controlled by a `noise_level` input. A higher `noise_level` increases the variance in the final un-noised images. The noise is applied in two ways: 1. A noise schedule is applied directly to the embeddings. 2. A vector of sinusoidal time embeddings are appended to the output. In both cases, the amount of noise is controlled by the same `noise_level`. The embeddings are normalized before the noise is applied and un-normalized after the noise is applied. """ if noise is None: noise = randn_tensor( image_embeds.shape, generator=generator, device=image_embeds.device, dtype=image_embeds.dtype ) noise_level = torch.tensor([noise_level] * image_embeds.shape[0], device=image_embeds.device) self.image_normalizer.to(image_embeds.device) image_embeds = self.image_normalizer.scale(image_embeds) image_embeds = self.image_noising_scheduler.add_noise(image_embeds, timesteps=noise_level, noise=noise) image_embeds = self.image_normalizer.unscale(image_embeds) noise_level = get_timestep_embedding( timesteps=noise_level, embedding_dim=image_embeds.shape[-1], flip_sin_to_cos=True, downscale_freq_shift=0 ) # `get_timestep_embeddings` does not contain any weights and will always return f32 tensors, # but we might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. noise_level = noise_level.to(image_embeds.dtype) image_embeds = torch.cat((image_embeds, noise_level), 1) return image_embeds @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, # regular denoising process args prompt: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 20, guidance_scale: float = 10.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, noise_level: int = 0, # prior args prior_num_inference_steps: int = 25, prior_guidance_scale: float = 4.0, prior_latents: Optional[torch.FloatTensor] = None, clip_skip: Optional[int] = None, ): """ The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 20): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 10.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). noise_level (`int`, *optional*, defaults to `0`): The amount of noise to add to the image embeddings. A higher `noise_level` increases the variance in the final un-noised images. See [`StableUnCLIPPipeline.noise_image_embeddings`] for more details. prior_num_inference_steps (`int`, *optional*, defaults to 25): The number of denoising steps in the prior denoising process. More denoising steps usually lead to a higher quality image at the expense of slower inference. prior_guidance_scale (`float`, *optional*, defaults to 4.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. prior_latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image embedding generation in the prior denoising process. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~ pipeline_utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt=prompt, height=height, width=width, callback_steps=callback_steps, noise_level=noise_level, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] batch_size = batch_size * num_images_per_prompt device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. prior_do_classifier_free_guidance = prior_guidance_scale > 1.0 # 3. Encode input prompt prior_prompt_embeds, prior_text_encoder_hidden_states, prior_text_mask = self._encode_prior_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=prior_do_classifier_free_guidance, ) # 4. Prepare prior timesteps self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) prior_timesteps_tensor = self.prior_scheduler.timesteps # 5. Prepare prior latent variables embedding_dim = self.prior.config.embedding_dim prior_latents = self.prepare_latents( (batch_size, embedding_dim), prior_prompt_embeds.dtype, device, generator, prior_latents, self.prior_scheduler, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline prior_extra_step_kwargs = self.prepare_prior_extra_step_kwargs(generator, eta) # 7. Prior denoising loop for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([prior_latents] * 2) if prior_do_classifier_free_guidance else prior_latents latent_model_input = self.prior_scheduler.scale_model_input(latent_model_input, t) predicted_image_embedding = self.prior( latent_model_input, timestep=t, proj_embedding=prior_prompt_embeds, encoder_hidden_states=prior_text_encoder_hidden_states, attention_mask=prior_text_mask, ).predicted_image_embedding if prior_do_classifier_free_guidance: predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( predicted_image_embedding_text - predicted_image_embedding_uncond ) prior_latents = self.prior_scheduler.step( predicted_image_embedding, timestep=t, sample=prior_latents, **prior_extra_step_kwargs, return_dict=False, )[0] if callback is not None and i % callback_steps == 0: callback(i, t, prior_latents) prior_latents = self.prior.post_process_latents(prior_latents) image_embeds = prior_latents # done prior # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 8. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 9. Prepare image embeddings image_embeds = self.noise_image_embeddings( image_embeds=image_embeds, noise_level=noise_level, generator=generator, ) if do_classifier_free_guidance: negative_prompt_embeds = torch.zeros_like(image_embeds) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_embeds = torch.cat([negative_prompt_embeds, image_embeds]) # 10. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 11. Prepare latent variables num_channels_latents = self.unet.config.in_channels shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) latents = self.prepare_latents( shape=shape, dtype=prompt_embeds.dtype, device=device, generator=generator, latents=latents, scheduler=self.scheduler, ) # 12. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 13. Denoising loop for i, t in enumerate(self.progress_bar(timesteps)): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, class_labels=image_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] else: image = latents image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py", "repo_id": "diffusers", "token_count": 19691 }
119
import numpy as np import torch from ...utils import is_invisible_watermark_available if is_invisible_watermark_available(): from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 WATERMARK_MESSAGE = 0b101100111110110010010000011110111011000110011110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class StableDiffusionXLWatermarker: def __init__(self): self.watermark = WATERMARK_BITS self.encoder = WatermarkEncoder() self.encoder.set_watermark("bits", self.watermark) def apply_watermark(self, images: torch.FloatTensor): # can't encode images that are smaller than 256 if images.shape[-1] < 256: return images images = (255 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1).float().numpy() images = [self.encoder.encode(image, "dwtDct") for image in images] images = torch.from_numpy(np.array(images)).permute(0, 3, 1, 2) images = torch.clamp(2 * (images / 255 - 0.5), min=-1.0, max=1.0) return images
diffusers/src/diffusers/pipelines/stable_diffusion_xl/watermark.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_xl/watermark.py", "repo_id": "diffusers", "token_count": 509 }
120
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) _dummy_objects.update( {"ImageTextPipelineOutput": ImageTextPipelineOutput, "UniDiffuserPipeline": UniDiffuserPipeline} ) else: _import_structure["modeling_text_decoder"] = ["UniDiffuserTextDecoder"] _import_structure["modeling_uvit"] = ["UniDiffuserModel", "UTransformer2DModel"] _import_structure["pipeline_unidiffuser"] = ["ImageTextPipelineOutput", "UniDiffuserPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformer2DModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/unidiffuser/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/unidiffuser/__init__.py", "repo_id": "diffusers", "token_count": 733 }
121
# Copyright 2024 NVIDIA and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ...utils.torch_utils import randn_tensor from ..scheduling_utils import SchedulerMixin @dataclass class KarrasVeOutput(BaseOutput): """ Output class for the scheduler's step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. derivative (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Derivative of predicted original image sample (x_0). pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.FloatTensor derivative: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None class KarrasVeScheduler(SchedulerMixin, ConfigMixin): """ A stochastic scheduler tailored to variance-expanding models. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. <Tip> For more details on the parameters, see [Appendix E](https://arxiv.org/abs/2206.00364). The grid search values used to find the optimal `{s_noise, s_churn, s_min, s_max}` for a specific model are described in Table 5 of the paper. </Tip> Args: sigma_min (`float`, defaults to 0.02): The minimum noise magnitude. sigma_max (`float`, defaults to 100): The maximum noise magnitude. s_noise (`float`, defaults to 1.007): The amount of additional noise to counteract loss of detail during sampling. A reasonable range is [1.000, 1.011]. s_churn (`float`, defaults to 80): The parameter controlling the overall amount of stochasticity. A reasonable range is [0, 100]. s_min (`float`, defaults to 0.05): The start value of the sigma range to add noise (enable stochasticity). A reasonable range is [0, 10]. s_max (`float`, defaults to 50): The end value of the sigma range to add noise. A reasonable range is [0.2, 80]. """ order = 2 @register_to_config def __init__( self, sigma_min: float = 0.02, sigma_max: float = 100, s_noise: float = 1.007, s_churn: float = 80, s_min: float = 0.05, s_max: float = 50, ): # standard deviation of the initial noise distribution self.init_noise_sigma = sigma_max # setable values self.num_inference_steps: int = None self.timesteps: np.IntTensor = None self.schedule: torch.FloatTensor = None # sigma(t_i) def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.FloatTensor`: A scaled input sample. """ return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ self.num_inference_steps = num_inference_steps timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() self.timesteps = torch.from_numpy(timesteps).to(device) schedule = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] self.schedule = torch.tensor(schedule, dtype=torch.float32, device=device) def add_noise_to_input( self, sample: torch.FloatTensor, sigma: float, generator: Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]: """ Explicit Langevin-like "churn" step of adding noise to the sample according to a `gamma_i ≥ 0` to reach a higher noise level `sigma_hat = sigma_i + gamma_i*sigma_i`. Args: sample (`torch.FloatTensor`): The input sample. sigma (`float`): generator (`torch.Generator`, *optional*): A random number generator. """ if self.config.s_min <= sigma <= self.config.s_max: gamma = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1) else: gamma = 0 # sample eps ~ N(0, S_noise^2 * I) eps = self.config.s_noise * randn_tensor(sample.shape, generator=generator).to(sample.device) sigma_hat = sigma + gamma * sigma sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def step( self, model_output: torch.FloatTensor, sigma_hat: float, sigma_prev: float, sample_hat: torch.FloatTensor, return_dict: bool = True, ) -> Union[KarrasVeOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. sigma_hat (`float`): sigma_prev (`float`): sample_hat (`torch.FloatTensor`): return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ pred_original_sample = sample_hat + sigma_hat * model_output derivative = (sample_hat - pred_original_sample) / sigma_hat sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample ) def step_correct( self, model_output: torch.FloatTensor, sigma_hat: float, sigma_prev: float, sample_hat: torch.FloatTensor, sample_prev: torch.FloatTensor, derivative: torch.FloatTensor, return_dict: bool = True, ) -> Union[KarrasVeOutput, Tuple]: """ Corrects the predicted sample based on the `model_output` of the network. Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. sigma_hat (`float`): TODO sigma_prev (`float`): TODO sample_hat (`torch.FloatTensor`): TODO sample_prev (`torch.FloatTensor`): TODO derivative (`torch.FloatTensor`): TODO return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_ddpm.DDPMSchedulerOutput`] or `tuple`. Returns: prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO """ pred_original_sample = sample_prev + sigma_prev * model_output derivative_corr = (sample_prev - pred_original_sample) / sigma_prev sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample ) def add_noise(self, original_samples, noise, timesteps): raise NotImplementedError()
diffusers/src/diffusers/schedulers/deprecated/scheduling_karras_ve.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/deprecated/scheduling_karras_ve.py", "repo_id": "diffusers", "token_count": 4089 }
122
# Copyright 2024 Zhejiang University Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class PNDMScheduler(SchedulerMixin, ConfigMixin): """ `PNDMScheduler` uses pseudo numerical methods for diffusion models such as the Runge-Kutta and linear multi-step method. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. skip_prk_steps (`bool`, defaults to `False`): Allows the scheduler to skip the Runge-Kutta steps defined in the original paper as being required before PLMS steps. set_alpha_to_one (`bool`, defaults to `False`): Each diffusion step uses the alphas product value at that step and at the previous one. For the final step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, otherwise it uses the alpha value at step 0. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process) or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper). timestep_spacing (`str`, defaults to `"leading"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. steps_offset (`int`, defaults to 0): An offset added to the inference steps, as required by some model families. """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, skip_prk_steps: bool = False, set_alpha_to_one: bool = False, prediction_type: str = "epsilon", timestep_spacing: str = "leading", steps_offset: int = 0, ): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. self.pndm_order = 4 # running values self.cur_model_output = 0 self.counter = 0 self.cur_sample = None self.ets = [] # setable values self.num_inference_steps = None self._timesteps = np.arange(0, num_train_timesteps)[::-1].copy() self.prk_timesteps = None self.plms_timesteps = None self.timesteps = None def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ self.num_inference_steps = num_inference_steps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": self._timesteps = ( np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps).round().astype(np.int64) ) elif self.config.timestep_spacing == "leading": step_ratio = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 self._timesteps = (np.arange(0, num_inference_steps) * step_ratio).round() self._timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = self.config.num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 self._timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio))[::-1].astype( np.int64 ) self._timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) if self.config.skip_prk_steps: # for some models like stable diffusion the prk steps can/should be skipped to # produce better results. When using PNDM with `self.config.skip_prk_steps` the implementation # is based on crowsonkb's PLMS sampler implementation: https://github.com/CompVis/latent-diffusion/pull/51 self.prk_timesteps = np.array([]) self.plms_timesteps = np.concatenate([self._timesteps[:-1], self._timesteps[-2:-1], self._timesteps[-1:]])[ ::-1 ].copy() else: prk_timesteps = np.array(self._timesteps[-self.pndm_order :]).repeat(2) + np.tile( np.array([0, self.config.num_train_timesteps // num_inference_steps // 2]), self.pndm_order ) self.prk_timesteps = (prk_timesteps[:-1].repeat(2)[1:-1])[::-1].copy() self.plms_timesteps = self._timesteps[:-3][ ::-1 ].copy() # we copy to avoid having negative strides which are not supported by torch.from_numpy timesteps = np.concatenate([self.prk_timesteps, self.plms_timesteps]).astype(np.int64) self.timesteps = torch.from_numpy(timesteps).to(device) self.ets = [] self.counter = 0 self.cur_model_output = 0 def step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, return_dict: bool = True, ) -> Union[SchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise), and calls [`~PNDMScheduler.step_prk`] or [`~PNDMScheduler.step_plms`] depending on the internal variable `counter`. Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`int`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. return_dict (`bool`): Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.counter < len(self.prk_timesteps) and not self.config.skip_prk_steps: return self.step_prk(model_output=model_output, timestep=timestep, sample=sample, return_dict=return_dict) else: return self.step_plms(model_output=model_output, timestep=timestep, sample=sample, return_dict=return_dict) def step_prk( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, return_dict: bool = True, ) -> Union[SchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with the Runge-Kutta method. It performs four forward passes to approximate the solution to the differential equation. Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`int`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. return_dict (`bool`): Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. Returns: [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) diff_to_prev = 0 if self.counter % 2 else self.config.num_train_timesteps // self.num_inference_steps // 2 prev_timestep = timestep - diff_to_prev timestep = self.prk_timesteps[self.counter // 4 * 4] if self.counter % 4 == 0: self.cur_model_output += 1 / 6 * model_output self.ets.append(model_output) self.cur_sample = sample elif (self.counter - 1) % 4 == 0: self.cur_model_output += 1 / 3 * model_output elif (self.counter - 2) % 4 == 0: self.cur_model_output += 1 / 3 * model_output elif (self.counter - 3) % 4 == 0: model_output = self.cur_model_output + 1 / 6 * model_output self.cur_model_output = 0 # cur_sample should not be `None` cur_sample = self.cur_sample if self.cur_sample is not None else sample prev_sample = self._get_prev_sample(cur_sample, timestep, prev_timestep, model_output) self.counter += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def step_plms( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, return_dict: bool = True, ) -> Union[SchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the sample with the linear multistep method. It performs one forward pass multiple times to approximate the solution. Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`int`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. return_dict (`bool`): Whether or not to return a [`~schedulers.scheduling_utils.SchedulerOutput`] or tuple. Returns: [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_utils.SchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) if not self.config.skip_prk_steps and len(self.ets) < 3: raise ValueError( f"{self.__class__} can only be run AFTER scheduler has been run " "in 'prk' mode for at least 12 iterations " "See: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pipeline_pndm.py " "for more information." ) prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps if self.counter != 1: self.ets = self.ets[-3:] self.ets.append(model_output) else: prev_timestep = timestep timestep = timestep + self.config.num_train_timesteps // self.num_inference_steps if len(self.ets) == 1 and self.counter == 0: model_output = model_output self.cur_sample = sample elif len(self.ets) == 1 and self.counter == 1: model_output = (model_output + self.ets[-1]) / 2 sample = self.cur_sample self.cur_sample = None elif len(self.ets) == 2: model_output = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets) == 3: model_output = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: model_output = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) prev_sample = self._get_prev_sample(sample, timestep, prev_timestep, model_output) self.counter += 1 if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=prev_sample) def scale_model_input(self, sample: torch.FloatTensor, *args, **kwargs) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): The input sample. Returns: `torch.FloatTensor`: A scaled input sample. """ return sample def _get_prev_sample(self, sample, timestep, prev_timestep, model_output): # See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf # this function computes x_(t−δ) using the formula of (9) # Note that x_t needs to be added to both sides of the equation # Notation (<variable name> -> <name in paper> # alpha_prod_t -> α_t # alpha_prod_t_prev -> α_(t−δ) # beta_prod_t -> (1 - α_t) # beta_prod_t_prev -> (1 - α_(t−δ)) # sample -> x_t # model_output -> e_θ(x_t, t) # prev_sample -> x_(t−δ) alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev if self.config.prediction_type == "v_prediction": model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample elif self.config.prediction_type != "epsilon": raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `v_prediction`" ) # corresponds to (α_(t−δ) - α_t) divided by # denominator of x_t in formula (9) and plus 1 # Note: (α_(t−δ) - α_t) / (sqrt(α_t) * (sqrt(α_(t−δ)) + sqr(α_t))) = # sqrt(α_(t−δ)) / sqrt(α_t)) sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5) # corresponds to denominator of e_θ(x_t, t) in formula (9) model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + ( alpha_prod_t * beta_prod_t * alpha_prod_t_prev ) ** (0.5) # full formula (9) prev_sample = ( sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff ) return prev_sample # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise def add_noise( self, original_samples: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor, ) -> torch.FloatTensor: # Make sure alphas_cumprod and timestep have same device and dtype as original_samples # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement # for the subsequent add_noise calls self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def __len__(self): return self.config.num_train_timesteps
diffusers/src/diffusers/schedulers/scheduling_pndm.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_pndm.py", "repo_id": "diffusers", "token_count": 9457 }
123
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True, stacklevel=2): from .. import __version__ deprecated_kwargs = take_from values = () if not isinstance(args[0], tuple): args = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__version__).base_version) >= version.parse(version_name): raise ValueError( f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'" f" version {__version__} is >= {version_name}" ) warning = None if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(attribute),) warning = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}." elif hasattr(deprecated_kwargs, attribute): values += (getattr(deprecated_kwargs, attribute),) warning = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}." elif deprecated_kwargs is None: warning = f"`{attribute}` is deprecated and will be removed in version {version_name}." if warning is not None: warning = warning + " " if standard_warn else "" warnings.warn(warning + message, FutureWarning, stacklevel=stacklevel) if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0: call_frame = inspect.getouterframes(inspect.currentframe())[1] filename = call_frame.filename line_number = call_frame.lineno function = call_frame.function key, value = next(iter(deprecated_kwargs.items())) raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`") if len(values) == 0: return elif len(values) == 1: return values[0] return values
diffusers/src/diffusers/utils/deprecation_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/deprecation_utils.py", "repo_id": "diffusers", "token_count": 793 }
124
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import sys import tempfile import traceback import warnings from pathlib import Path from typing import Dict, List, Optional, Union from uuid import uuid4 from huggingface_hub import ( ModelCard, ModelCardData, create_repo, hf_hub_download, upload_folder, ) from huggingface_hub.constants import HF_HUB_CACHE, HF_HUB_DISABLE_TELEMETRY, HF_HUB_OFFLINE from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, validate_hf_hub_args, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger logger = get_logger(__name__) MODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / "model_card_template.md" SESSION_ID = uuid4().hex def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: """ Formats a user-agent string with basic info about a request. """ ua = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if HF_HUB_DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f"; torch/{_torch_version}" if is_flax_available(): ua += f"; jax/{_jax_version}" ua += f"; flax/{_flax_version}" if is_onnx_available(): ua += f"; onnxruntime/{_onnxruntime_version}" # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(user_agent, dict): ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) elif isinstance(user_agent, str): ua += "; " + user_agent return ua def load_or_create_model_card( repo_id_or_path: str = None, token: Optional[str] = None, is_pipeline: bool = False, from_training: bool = False, model_description: Optional[str] = None, base_model: str = None, prompt: Optional[str] = None, license: Optional[str] = None, widget: Optional[List[dict]] = None, inference: Optional[bool] = None, ) -> ModelCard: """ Loads or creates a model card. Args: repo_id_or_path (`str`): The repo id (e.g., "runwayml/stable-diffusion-v1-5") or local path where to look for the model card. token (`str`, *optional*): Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more details. is_pipeline (`bool`): Boolean to indicate if we're adding tag to a [`DiffusionPipeline`]. from_training: (`bool`): Boolean flag to denote if the model card is being created from a training script. model_description (`str`, *optional*): Model description to add to the model card. Helpful when using `load_or_create_model_card` from a training script. base_model (`str`): Base model identifier (e.g., "stabilityai/stable-diffusion-xl-base-1.0"). Useful for DreamBooth-like training. prompt (`str`, *optional*): Prompt used for training. Useful for DreamBooth-like training. license: (`str`, *optional*): License of the output artifact. Helpful when using `load_or_create_model_card` from a training script. widget (`List[dict]`, *optional*): Widget to accompany a gallery template. inference: (`bool`, optional): Whether to turn on inference widget. Helpful when using `load_or_create_model_card` from a training script. """ if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `load_or_create_model_card`." " To install it, please run `pip install Jinja2`." ) try: # Check if the model card is present on the remote repo model_card = ModelCard.load(repo_id_or_path, token=token) except (EntryNotFoundError, RepositoryNotFoundError): # Otherwise create a model card from template if from_training: model_card = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block license=license, library_name="diffusers", inference=inference, base_model=base_model, instance_prompt=prompt, widget=widget, ), template_path=MODEL_CARD_TEMPLATE_PATH, model_description=model_description, ) else: card_data = ModelCardData() component = "pipeline" if is_pipeline else "model" if model_description is None: model_description = f"This is the model card of a 🧨 diffusers {component} that has been pushed on the Hub. This model card has been automatically generated." model_card = ModelCard.from_template(card_data, model_description=model_description) return model_card def populate_model_card(model_card: ModelCard, tags: Union[str, List[str]] = None) -> ModelCard: """Populates the `model_card` with library name and optional tags.""" if model_card.data.library_name is None: model_card.data.library_name = "diffusers" if tags is not None: if isinstance(tags, str): tags = [tags] if model_card.data.tags is None: model_card.data.tags = [] for tag in tags: model_card.data.tags.append(tag) return model_card def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str] = None): """ Extracts the commit hash from a resolved filename toward a cache file. """ if resolved_file is None or commit_hash is not None: return commit_hash resolved_file = str(Path(resolved_file).as_posix()) search = re.search(r"snapshots/([^/]+)/", resolved_file) if search is None: return None commit_hash = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. hf_cache_home = os.path.expanduser( os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) ) old_diffusers_cache = os.path.join(hf_cache_home, "diffusers") def move_cache(old_cache_dir: Optional[str] = None, new_cache_dir: Optional[str] = None) -> None: if new_cache_dir is None: new_cache_dir = HF_HUB_CACHE if old_cache_dir is None: old_cache_dir = old_diffusers_cache old_cache_dir = Path(old_cache_dir).expanduser() new_cache_dir = Path(new_cache_dir).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*"): if old_blob_path.is_file() and not old_blob_path.is_symlink(): new_blob_path = new_cache_dir / old_blob_path.relative_to(old_cache_dir) new_blob_path.parent.mkdir(parents=True, exist_ok=True) os.replace(old_blob_path, new_blob_path) try: os.symlink(new_blob_path, old_blob_path) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). cache_version_file = os.path.join(HF_HUB_CACHE, "version_diffusers_cache.txt") if not os.path.isfile(cache_version_file): cache_version = 0 else: with open(cache_version_file) as f: try: cache_version = int(f.read()) except ValueError: cache_version = 0 if cache_version < 1: old_cache_is_not_empty = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( "The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your " "existing cached models. This is a one-time operation, you can interrupt it or run it " "later by calling `diffusers.utils.hub_utils.move_cache()`." ) try: move_cache() except Exception as e: trace = "\n".join(traceback.format_tb(e.__traceback__)) logger.error( f"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " "file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole " "message and we will do our best to help." ) if cache_version < 1: try: os.makedirs(HF_HUB_CACHE, exist_ok=True) with open(cache_version_file, "w") as f: f.write("1") except Exception: logger.warning( f"There was a problem when trying to write in your cache folder ({HF_HUB_CACHE}). Please, ensure " "the directory exists and can be written to." ) def _add_variant(weights_name: str, variant: Optional[str] = None) -> str: if variant is not None: splits = weights_name.split(".") splits = splits[:-1] + [variant] + splits[-1:] weights_name = ".".join(splits) return weights_name @validate_hf_hub_args def _get_model_file( pretrained_model_name_or_path: Union[str, Path], *, weights_name: str, subfolder: Optional[str] = None, cache_dir: Optional[str] = None, force_download: bool = False, proxies: Optional[Dict] = None, resume_download: bool = False, local_files_only: bool = False, token: Optional[str] = None, user_agent: Optional[Union[Dict, str]] = None, revision: Optional[str] = None, commit_hash: Optional[str] = None, ): pretrained_model_name_or_path = str(pretrained_model_name_or_path) if os.path.isfile(pretrained_model_name_or_path): return pretrained_model_name_or_path elif os.path.isdir(pretrained_model_name_or_path): if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)): # Load from a PyTorch checkpoint model_file = os.path.join(pretrained_model_name_or_path, weights_name) return model_file elif subfolder is not None and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, weights_name) ): model_file = os.path.join(pretrained_model_name_or_path, subfolder, weights_name) return model_file else: raise EnvironmentError( f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__version__).base_version) >= version.parse("0.22.0") ): try: model_file = hf_hub_download( pretrained_model_name_or_path, filename=_add_variant(weights_name, revision), cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, subfolder=subfolder, revision=revision or commit_hash, ) warnings.warn( f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.", FutureWarning, ) return model_file except: # noqa: E722 warnings.warn( f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(weights_name, revision)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(weights_name, revision)}' so that the correct variant file can be added.", FutureWarning, ) try: # 2. Load model file as usual model_file = hf_hub_download( pretrained_model_name_or_path, filename=weights_name, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, subfolder=subfolder, revision=revision or commit_hash, ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " "this model name. Check the model page at " f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." ) except HTTPError as err: raise EnvironmentError( f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" ) except ValueError: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" f" directory containing a file named {weights_name} or" " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing a file named {weights_name}" ) class PushToHubMixin: """ A Mixin to push a model, scheduler, or pipeline to the Hugging Face Hub. """ def _upload_folder( self, working_dir: Union[str, os.PathLike], repo_id: str, token: Optional[str] = None, commit_message: Optional[str] = None, create_pr: bool = False, ): """ Uploads all files in `working_dir` to `repo_id`. """ if commit_message is None: if "Model" in self.__class__.__name__: commit_message = "Upload model" elif "Scheduler" in self.__class__.__name__: commit_message = "Upload scheduler" else: commit_message = f"Upload {self.__class__.__name__}" logger.info(f"Uploading the files of {working_dir} to {repo_id}.") return upload_folder( repo_id=repo_id, folder_path=working_dir, token=token, commit_message=commit_message, create_pr=create_pr ) def push_to_hub( self, repo_id: str, commit_message: Optional[str] = None, private: Optional[bool] = None, token: Optional[str] = None, create_pr: bool = False, safe_serialization: bool = True, variant: Optional[str] = None, ) -> str: """ Upload model, scheduler, or pipeline files to the 🤗 Hugging Face Hub. Parameters: repo_id (`str`): The name of the repository you want to push your model, scheduler, or pipeline files to. It should contain your organization name when pushing to an organization. `repo_id` can also be a path to a local directory. commit_message (`str`, *optional*): Message to commit while pushing. Default to `"Upload {object}"`. private (`bool`, *optional*): Whether or not the repository created should be private. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. The token generated when running `huggingface-cli login` (stored in `~/.huggingface`). create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit. safe_serialization (`bool`, *optional*, defaults to `True`): Whether or not to convert the model weights to the `safetensors` format. variant (`str`, *optional*): If specified, weights are saved in the format `pytorch_model.<variant>.bin`. Examples: ```python from diffusers import UNet2DConditionModel unet = UNet2DConditionModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="unet") # Push the `unet` to your namespace with the name "my-finetuned-unet". unet.push_to_hub("my-finetuned-unet") # Push the `unet` to an organization with the name "my-finetuned-unet". unet.push_to_hub("your-org/my-finetuned-unet") ``` """ repo_id = create_repo(repo_id, private=private, token=token, exist_ok=True).repo_id # Create a new empty model card and eventually tag it model_card = load_or_create_model_card(repo_id, token=token) model_card = populate_model_card(model_card) # Save all files. save_kwargs = {"safe_serialization": safe_serialization} if "Scheduler" not in self.__class__.__name__: save_kwargs.update({"variant": variant}) with tempfile.TemporaryDirectory() as tmpdir: self.save_pretrained(tmpdir, **save_kwargs) # Update model card if needed: model_card.save(os.path.join(tmpdir, "README.md")) return self._upload_folder( tmpdir, repo_id, token=token, commit_message=commit_message, create_pr=create_pr, )
diffusers/src/diffusers/utils/hub_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/hub_utils.py", "repo_id": "diffusers", "token_count": 9131 }
125
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import ( ClapAudioConfig, ClapConfig, ClapFeatureExtractor, ClapModel, ClapTextConfig, GPT2Config, GPT2Model, RobertaTokenizer, SpeechT5HifiGan, SpeechT5HifiGanConfig, T5Config, T5EncoderModel, T5Tokenizer, ) from diffusers import ( AudioLDM2Pipeline, AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, torch_device from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class AudioLDM2PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AudioLDM2Pipeline params = TEXT_TO_AUDIO_PARAMS batch_params = TEXT_TO_AUDIO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = AudioLDM2UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=([None, 16, 32], [None, 16, 32]), ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_branch_config = ClapTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=16, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, projection_dim=16, ) audio_branch_config = ClapAudioConfig( spec_size=64, window_size=4, num_mel_bins=64, intermediate_size=37, layer_norm_eps=1e-05, depths=[2, 2], num_attention_heads=[2, 2], num_hidden_layers=2, hidden_size=192, projection_dim=16, patch_size=2, patch_stride=2, patch_embed_input_channels=4, ) text_encoder_config = ClapConfig.from_text_audio_configs( text_config=text_branch_config, audio_config=audio_branch_config, projection_dim=16 ) text_encoder = ClapModel(text_encoder_config) tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77) feature_extractor = ClapFeatureExtractor.from_pretrained( "hf-internal-testing/tiny-random-ClapModel", hop_length=7900 ) torch.manual_seed(0) text_encoder_2_config = T5Config( vocab_size=32100, d_model=32, d_ff=37, d_kv=8, num_heads=2, num_layers=2, ) text_encoder_2 = T5EncoderModel(text_encoder_2_config) tokenizer_2 = T5Tokenizer.from_pretrained("hf-internal-testing/tiny-random-T5Model", model_max_length=77) torch.manual_seed(0) language_model_config = GPT2Config( n_embd=16, n_head=2, n_layer=2, vocab_size=1000, n_ctx=99, n_positions=99, ) language_model = GPT2Model(language_model_config) language_model.config.max_new_tokens = 8 torch.manual_seed(0) projection_model = AudioLDM2ProjectionModel(text_encoder_dim=16, text_encoder_1_dim=32, langauge_model_dim=16) vocoder_config = SpeechT5HifiGanConfig( model_in_dim=8, sampling_rate=16000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=False, ) vocoder = SpeechT5HifiGan(vocoder_config) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "feature_extractor": feature_extractor, "language_model": language_model, "projection_model": projection_model, "vocoder": vocoder, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def test_audioldm2_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = audioldm_pipe(**inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [0.0025, 0.0018, 0.0018, -0.0023, -0.0026, -0.0020, -0.0026, -0.0021, -0.0027, -0.0020] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_audioldm2_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = audioldm_pipe.tokenizer( prompt, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) clap_prompt_embeds = audioldm_pipe.text_encoder.get_text_features(text_inputs) clap_prompt_embeds = clap_prompt_embeds[:, None, :] text_inputs = audioldm_pipe.tokenizer_2( prompt, padding="max_length", max_length=True, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) t5_prompt_embeds = audioldm_pipe.text_encoder_2( text_inputs, ) t5_prompt_embeds = t5_prompt_embeds[0] projection_embeds = audioldm_pipe.projection_model(clap_prompt_embeds, t5_prompt_embeds)[0] generated_prompt_embeds = audioldm_pipe.generate_language_model(projection_embeds, max_new_tokens=8) inputs["prompt_embeds"] = t5_prompt_embeds inputs["generated_prompt_embeds"] = generated_prompt_embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm2_negative_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] generated_embeds = [] for p in [prompt, negative_prompt]: text_inputs = audioldm_pipe.tokenizer( p, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) clap_prompt_embeds = audioldm_pipe.text_encoder.get_text_features(text_inputs) clap_prompt_embeds = clap_prompt_embeds[:, None, :] text_inputs = audioldm_pipe.tokenizer_2( prompt, padding="max_length", max_length=True if len(embeds) == 0 else embeds[0].shape[1], truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) t5_prompt_embeds = audioldm_pipe.text_encoder_2( text_inputs, ) t5_prompt_embeds = t5_prompt_embeds[0] projection_embeds = audioldm_pipe.projection_model(clap_prompt_embeds, t5_prompt_embeds)[0] generated_prompt_embeds = audioldm_pipe.generate_language_model(projection_embeds, max_new_tokens=8) embeds.append(t5_prompt_embeds) generated_embeds.append(generated_prompt_embeds) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds inputs["generated_prompt_embeds"], inputs["negative_generated_prompt_embeds"] = generated_embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm2_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "egg cracking" output = audioldm_pipe(**inputs, negative_prompt=negative_prompt) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [0.0025, 0.0018, 0.0018, -0.0023, -0.0026, -0.0020, -0.0026, -0.0021, -0.0027, -0.0020] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_audioldm2_num_waveforms_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) audios = audioldm_pipe(prompt, num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts batch_size = 2 audios = audioldm_pipe([prompt] * batch_size, num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt num_waveforms_per_prompt = 2 audios = audioldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts batch_size = 2 audios = audioldm_pipe( [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def test_audioldm2_audio_length_in_s(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) vocoder_sampling_rate = audioldm_pipe.vocoder.config.sampling_rate inputs = self.get_dummy_inputs(device) output = audioldm_pipe(audio_length_in_s=0.016, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.016 output = audioldm_pipe(audio_length_in_s=0.032, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.032 def test_audioldm2_vocoder_model_in_dim(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = ["hey"] output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape assert audio_shape == (1, 256) config = audioldm_pipe.vocoder.config config.model_in_dim *= 2 audioldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device) output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) @unittest.skip("Raises a not implemented error in AudioLDM2") def test_xformers_attention_forwardGenerator_pass(self): pass def test_dict_tuple_outputs_equivalent(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model super().test_dict_tuple_outputs_equivalent(expected_max_difference=2e-4) def test_inference_batch_single_identical(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model self._test_inference_batch_single_identical(expected_max_diff=2e-4) def test_save_load_local(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model super().test_save_load_local(expected_max_difference=2e-4) def test_save_load_optional_components(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model super().test_save_load_optional_components(expected_max_difference=2e-4) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # The method component.dtype returns the dtype of the first parameter registered in the model, not the # dtype of the entire model. In the case of CLAP, the first parameter is a float64 constant (logit scale) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} # Without the logit scale parameters, everything is float32 model_dtypes.pop("text_encoder") self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # the CLAP sub-models are float32 model_dtypes["clap_text_branch"] = components["text_encoder"].text_model.dtype self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # Once we send to fp16, all params are in half-precision, including the logit scale pipe.to(dtype=torch.float16) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes.values())) def test_sequential_cpu_offload_forward_pass(self): pass @nightly class AudioLDM2PipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_audioldm2(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2") audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[17275:17285] expected_slice = np.array([0.0791, 0.0666, 0.1158, 0.1227, 0.1171, -0.2880, -0.1940, -0.0283, -0.0126, 0.1127]) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_audioldm2_lms(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2") audioldm_pipe.scheduler = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[31390:31400] expected_slice = np.array( [-0.1318, -0.0577, 0.0446, -0.0573, 0.0659, 0.1074, -0.2600, 0.0080, -0.2190, -0.4301] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_audioldm2_large(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2-large") audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[8825:8835] expected_slice = np.array( [-0.1829, -0.1461, 0.0759, -0.1493, -0.1396, 0.5783, 0.3001, -0.3038, -0.0639, -0.2244] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3
diffusers/tests/pipelines/audioldm2/test_audioldm2.py/0
{ "file_path": "diffusers/tests/pipelines/audioldm2/test_audioldm2.py", "repo_id": "diffusers", "token_count": 10088 }
126
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from PIL import Image from transformers import AutoTokenizer, T5EncoderModel from diffusers import ( AutoPipelineForImage2Image, AutoPipelineForText2Image, Kandinsky3Pipeline, Kandinsky3UNet, VQModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.schedulers.scheduling_ddpm import DDPMScheduler from diffusers.utils.testing_utils import ( enable_full_determinism, load_image, require_torch_gpu, slow, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Kandinsky3PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = Kandinsky3Pipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS test_xformers_attention = False @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = Kandinsky3UNet( in_channels=4, time_embedding_dim=4, groups=2, attention_head_dim=4, layers_per_block=3, block_out_channels=(32, 64), cross_attention_dim=4, encoder_hid_dim=32, ) scheduler = DDPMScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="squaredcos_cap_v2", clip_sample=True, thresholding=False, ) torch.manual_seed(0) movq = self.dummy_movq torch.manual_seed(0) text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "unet": unet, "scheduler": scheduler, "movq": movq, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "width": 16, "height": 16, } return inputs def test_kandinsky3(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) expected_slice = np.array([0.3768, 0.4373, 0.4865, 0.4890, 0.4299, 0.5122, 0.4921, 0.4924, 0.5599]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) @slow @require_torch_gpu class Kandinsky3PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinskyV3(self): pipe = AutoPipelineForText2Image.from_pretrained( "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background." generator = torch.Generator(device="cpu").manual_seed(0) image = pipe(prompt, num_inference_steps=25, generator=generator).images[0] assert image.size == (1024, 1024) expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png" ) image_processor = VaeImageProcessor() image_np = image_processor.pil_to_numpy(image) expected_image_np = image_processor.pil_to_numpy(expected_image) self.assertTrue(np.allclose(image_np, expected_image_np, atol=5e-2)) def test_kandinskyV3_img2img(self): pipe = AutoPipelineForImage2Image.from_pretrained( "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png" ) w, h = 512, 512 image = image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) prompt = "A painting of the inside of a subway train with tiny raccoons." image = pipe(prompt, image=image, strength=0.75, num_inference_steps=25, generator=generator).images[0] assert image.size == (512, 512) expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/i2i.png" ) image_processor = VaeImageProcessor() image_np = image_processor.pil_to_numpy(image) expected_image_np = image_processor.pil_to_numpy(expected_image) self.assertTrue(np.allclose(image_np, expected_image_np, atol=5e-2))
diffusers/tests/pipelines/kandinsky3/test_kandinsky3.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky3/test_kandinsky3.py", "repo_id": "diffusers", "token_count": 3516 }
127
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): hub_checkpoint = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def get_dummy_inputs(self, seed=0): generator = np.random.RandomState(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_pipeline_default_ddim(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_pndm(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_lms(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_euler(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_euler_ancestral(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_dpm_multistep(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_prompt_embeds(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs() prompt = 3 * [inputs.pop("prompt")] text_inputs = pipe.tokenizer( prompt, padding="max_length", max_length=pipe.tokenizer.model_max_length, truncation=True, return_tensors="np", ) text_inputs = text_inputs["input_ids"] prompt_embeds = pipe.text_encoder(input_ids=text_inputs.astype(np.int32))[0] inputs["prompt_embeds"] = prompt_embeds # forward output = pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_negative_prompt_embeds(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs() prompt = 3 * [inputs.pop("prompt")] embeds = [] for p in [prompt, negative_prompt]: text_inputs = pipe.tokenizer( p, padding="max_length", max_length=pipe.tokenizer.model_max_length, truncation=True, return_tensors="np", ) text_inputs = text_inputs["input_ids"] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.int32))[0]) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds # forward output = pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class OnnxStableDiffusionPipelineIntegrationTests(unittest.TestCase): @property def gpu_provider(self): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def gpu_options(self): options = ort.SessionOptions() options.enable_mem_pattern = False return options def test_inference_default_pndm(self): # using the PNDM scheduler by default sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" np.random.seed(0) output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_ddim(self): ddim_scheduler = DDIMScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" ) sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=ddim_scheduler, safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) sd_pipe.set_progress_bar_config(disable=None) prompt = "open neural network exchange" generator = np.random.RandomState(0) output = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_k_lms(self): lms_scheduler = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" ) sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=lms_scheduler, safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) sd_pipe.set_progress_bar_config(disable=None) prompt = "open neural network exchange" generator = np.random.RandomState(0) output = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_intermediate_state(self): number_of_steps = 0 def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None: test_callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 test_callback_fn.has_been_called = False pipe = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "Andromeda galaxy in a bottle" generator = np.random.RandomState(0) pipe( prompt=prompt, num_inference_steps=5, guidance_scale=7.5, generator=generator, callback=test_callback_fn, callback_steps=1, ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def test_stable_diffusion_no_safety_checker(self): pipe = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) assert isinstance(pipe, OnnxStableDiffusionPipeline) assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = OnnxStableDiffusionPipeline.from_pretrained(tmpdirname) # sanity check that the pipeline still works assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None
diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py", "repo_id": "diffusers", "token_count": 6720 }
128
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def check_same_shape(tensor_list): shapes = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:]) class StableDiffusionLatentUpscalePipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionLatentUpscalePipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "height", "width", "cross_attention_kwargs", "negative_prompt_embeds", "prompt_embeds", } required_optional_params = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) @property def dummy_image(self): batch_size = 1 num_channels = 4 sizes = (16, 16) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image def get_dummy_components(self): torch.manual_seed(0) model = UNet2DConditionModel( act_fn="gelu", attention_head_dim=8, norm_num_groups=None, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=160, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=( "KDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", ), in_channels=8, mid_block_type=None, only_cross_attention=False, out_channels=5, resnet_time_scale_shift="scale_shift", time_embedding_type="fourier", timestep_post_act="gelu", up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"), ) vae = AutoencoderKL( block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) scheduler = EulerDiscreteScheduler(prediction_type="sample") text_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="quick_gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": model.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": self.dummy_image.cpu(), "generator": generator, "num_inference_steps": 2, "output_type": "np", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 256, 256, 3)) expected_slice = np.array( [0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=7e-3) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=3e-3) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=7e-3) def test_pt_np_pil_outputs_equivalent(self): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=3e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=3e-3) def test_karras_schedulers_shape(self): skip_schedulers = [ "DDIMScheduler", "DDPMScheduler", "PNDMScheduler", "HeunDiscreteScheduler", "EulerAncestralDiscreteScheduler", "KDPM2DiscreteScheduler", "KDPM2AncestralDiscreteScheduler", "DPMSolverSDEScheduler", "EDMEulerScheduler", ] components = self.get_dummy_components() pipe = self.pipeline_class(**components) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=True) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 2 outputs = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue scheduler_cls = getattr(diffusers, scheduler_enum.name) pipe.scheduler = scheduler_cls.from_config(pipe.scheduler.config) output = pipe(**inputs)[0] outputs.append(output) assert check_same_shape(outputs) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) @require_torch_gpu @slow class StableDiffusionLatentUpscalePipelineIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_latent_upscaler_fp16(self): generator = torch.manual_seed(33) pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe.to("cuda") upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16 ) upscaler.to("cuda") prompt = "a photo of an astronaut high resolution, unreal engine, ultra realistic" low_res_latents = pipe(prompt, generator=generator, output_type="latent").images image = upscaler( prompt=prompt, image=low_res_latents, num_inference_steps=20, guidance_scale=0, generator=generator, output_type="np", ).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" ) assert np.abs((expected_image - image).mean()) < 5e-2 def test_latent_upscaler_fp16_image(self): generator = torch.manual_seed(33) upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16 ) upscaler.to("cuda") prompt = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas" low_res_img = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" ) image = upscaler( prompt=prompt, image=low_res_img, num_inference_steps=20, guidance_scale=0, generator=generator, output_type="np", ).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" ) assert np.abs((expected_image - image).max()) < 5e-2
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py", "repo_id": "diffusers", "token_count": 5008 }
129
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class StableDiffusionPanoramaPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionPanoramaPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler() torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "a photo of the dolomites", "generator": generator, # Setting height and width to None to prevent OOMs on CPU. "height": None, "width": None, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_stable_diffusion_panorama_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_circular_padding_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs, circular_padding=True).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6127, 0.6299, 0.4595, 0.4051, 0.4543, 0.3925, 0.5510, 0.5693, 0.5031]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # override to speed the overall test timing up. def test_inference_batch_consistent(self): super().test_inference_batch_consistent(batch_sizes=[1, 2]) # override to speed the overall test timing up. def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(batch_size=2, expected_max_diff=5.0e-3) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) def test_stable_diffusion_panorama_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_views_batch(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs, view_batch_size=2) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_views_batch_circular_padding(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs, circular_padding=True, view_batch_size=2) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6127, 0.6299, 0.4595, 0.4051, 0.4543, 0.3925, 0.5510, 0.5693, 0.5031]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True ) sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch_gpu class StableDiffusionPanoramaNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "a photo of the dolomites", "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_panorama_default(self): model_ckpt = "stabilityai/stable-diffusion-2-base" scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) expected_slice = np.array( [ 0.36968392, 0.27025372, 0.32446766, 0.28379387, 0.36363274, 0.30733347, 0.27100027, 0.27054125, 0.25536096, ] ) assert np.abs(expected_slice - image_slice).max() < 1e-2 def test_stable_diffusion_panorama_k_lms(self): pipe = StableDiffusionPanoramaPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) expected_slice = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice).max() < 1e-2 def test_stable_diffusion_panorama_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [ 0.18681869, 0.33907816, 0.5361276, 0.14432865, -0.02856611, -0.73941123, 0.23397987, 0.47322682, -0.37823164, ] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [ 0.18539645, 0.33987248, 0.5378559, 0.14437142, -0.02455261, -0.7338317, 0.23990755, 0.47356272, -0.3786505, ] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False model_ckpt = "stabilityai/stable-diffusion-2-base" scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 3 def test_stable_diffusion_panorama_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() model_ckpt = "stabilityai/stable-diffusion-2-base" scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs() _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
diffusers/tests/pipelines/stable_diffusion_panorama/test_stable_diffusion_panorama.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_panorama/test_stable_diffusion_panorama.py", "repo_id": "diffusers", "token_count": 7594 }
130
import gc import random import tempfile import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModelWithProjection, ) import diffusers from diffusers import ( AutoencoderKLTemporalDecoder, EulerDiscreteScheduler, StableVideoDiffusionPipeline, UNetSpatioTemporalConditionModel, ) from diffusers.utils import is_accelerate_available, is_accelerate_version, load_image, logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( CaptureLogger, enable_full_determinism, floats_tensor, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor class StableVideoDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = StableVideoDiffusionPipeline params = frozenset(["image"]) batch_params = frozenset(["image", "generator"]) required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNetSpatioTemporalConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=( "CrossAttnDownBlockSpatioTemporal", "DownBlockSpatioTemporal", ), up_block_types=("UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal"), cross_attention_dim=32, num_attention_heads=8, projection_class_embeddings_input_dim=96, addition_time_embed_dim=32, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", interpolation_type="linear", num_train_timesteps=1000, prediction_type="v_prediction", sigma_max=700.0, sigma_min=0.002, steps_offset=1, timestep_spacing="leading", timestep_type="continuous", trained_betas=None, use_karras_sigmas=True, ) torch.manual_seed(0) vae = AutoencoderKLTemporalDecoder( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=32, projection_dim=32, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) image_encoder = CLIPVisionModelWithProjection(config) torch.manual_seed(0) feature_extractor = CLIPImageProcessor(crop_size=32, size=32) components = { "unet": unet, "image_encoder": image_encoder, "scheduler": scheduler, "vae": vae, "feature_extractor": feature_extractor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) image = floats_tensor((1, 3, 32, 32), rng=random.Random(0)).to(device) inputs = { "generator": generator, "image": image, "num_inference_steps": 2, "output_type": "pt", "min_guidance_scale": 1.0, "max_guidance_scale": 2.5, "num_frames": 2, "height": 32, "width": 32, } return inputs @unittest.skip("Deprecated functionality") def test_attention_slicing_forward_pass(self): pass @unittest.skip("Batched inference works and outputs look correct, but the test is failing") def test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = torch.Generator("cpu").manual_seed(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) batched_inputs["generator"] = [torch.Generator("cpu").manual_seed(0) for i in range(batch_size)] batched_inputs["image"] = torch.cat([inputs["image"]] * batch_size, dim=0) output = pipe(**inputs).frames output_batch = pipe(**batched_inputs).frames assert len(output_batch) == batch_size max_diff = np.abs(to_np(output_batch[0]) - to_np(output[0])).max() assert max_diff < expected_max_diff @unittest.skip("Test is similar to test_inference_batch_single_identical") def test_inference_batch_consistent(self): pass def test_np_output_type(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["output_type"] = "np" output = pipe(**inputs).frames self.assertTrue(isinstance(output, np.ndarray)) self.assertEqual(len(output.shape), 5) def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" output = pipe(**self.get_dummy_inputs(generator_device)).frames[0] output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0] max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() self.assertLess(max_diff, expected_max_difference) @unittest.skip("Test is currently failing") def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) components = self.get_dummy_components() pipe_fp16 = self.pipeline_class(**components) for component in pipe_fp16.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_fp16.to(torch_device, torch.float16) pipe_fp16.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs).frames[0] fp16_inputs = self.get_dummy_inputs(torch_device) output_fp16 = pipe_fp16(**fp16_inputs).frames[0] max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs).frames[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): if hasattr(component, "dtype"): self.assertTrue( component.dtype == torch.float16, f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs).frames[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess( max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." ) def test_save_load_optional_components(self, expected_max_difference=1e-4): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output = pipe(**inputs).frames[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for component in pipe_loaded.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(generator_device) output_loaded = pipe_loaded(**inputs).frames[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) def test_save_load_local(self, expected_max_difference=9e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs).frames[0] logger = logging.get_logger("diffusers.pipelines.pipeline_utils") logger.setLevel(diffusers.logging.INFO) with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir, safe_serialization=False) with CaptureLogger(logger) as cap_logger: pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) for name in pipe_loaded.components.keys(): if name not in pipe_loaded._optional_components: assert name in str(cap_logger) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs).frames[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu")).frames[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cuda" for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs("cuda")).frames[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(dtype=torch.float16) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", ) def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs).frames[0] pipe.enable_sequential_cpu_offload() inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs).frames[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher", ) def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): generator_device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs).frames[0] pipe.enable_model_cpu_offload() inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs).frames[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") offloaded_modules = [ v for k, v in pipe.components.items() if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload ] ( self.assertTrue(all(v.device.type == "cpu" for v in offloaded_modules)), f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}", ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): expected_max_diff = 9e-4 if not self.test_xformers_attention: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs).frames[0] output_without_offload = ( output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload ) pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs).frames[0] output_with_offload = ( output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload ) max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "XFormers attention should not affect the inference results") def test_disable_cfg(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["max_guidance_scale"] = 1.0 output = pipe(**inputs).frames self.assertEqual(len(output.shape), 5) @slow @require_torch_gpu class StableVideoDiffusionPipelineSlowTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_sd_video(self): pipe = StableVideoDiffusionPipeline.from_pretrained( "stabilityai/stable-video-diffusion-img2vid", variant="fp16", torch_dtype=torch.float16, ) pipe = pipe.to(torch_device) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png?download=true" ) generator = torch.Generator("cpu").manual_seed(0) num_frames = 3 output = pipe( image=image, num_frames=num_frames, generator=generator, num_inference_steps=3, output_type="np", ) image = output.frames[0] assert image.shape == (num_frames, 576, 1024, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.8592, 0.8645, 0.8499, 0.8722, 0.8769, 0.8421, 0.8557, 0.8528, 0.8285]) assert numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice.flatten()) < 1e-3
diffusers/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py", "repo_id": "diffusers", "token_count": 9657 }
131
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils.testing_utils import require_torchsde, torch_device from .test_schedulers import SchedulerCommonTest @require_torchsde class DPMSolverSDESchedulerTest(SchedulerCommonTest): scheduler_classes = (DPMSolverSDEScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "noise_sampler_seed": 0, } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562) < 1e-2 assert abs(result_mean.item() - 0.211619570851326) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621) < 1e-3 def test_full_loop_device(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562) < 1e-2 assert abs(result_mean.item() - 0.211619570851326) < 1e-3 def test_full_loop_device_karras_sigmas(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) model = self.dummy_model() sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma sample = sample.to(torch_device) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811) < 1e-2
diffusers/tests/schedulers/test_scheduler_dpm_sde.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_dpm_sde.py", "repo_id": "diffusers", "token_count": 3050 }
132
import torch from diffusers import TCDScheduler from .test_schedulers import SchedulerCommonTest class TCDSchedulerTest(SchedulerCommonTest): scheduler_classes = (TCDScheduler,) forward_default_kwargs = (("num_inference_steps", 10),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.00085, "beta_end": 0.0120, "beta_schedule": "scaled_linear", "prediction_type": "epsilon", } config.update(**kwargs) return config @property def default_num_inference_steps(self): return 10 @property def default_valid_timestep(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) scheduler_config = self.get_scheduler_config() scheduler = self.scheduler_classes[0](**scheduler_config) scheduler.set_timesteps(num_inference_steps) timestep = scheduler.timesteps[-1] return timestep def test_timesteps(self): for timesteps in [100, 500, 1000]: # 0 is not guaranteed to be in the timestep schedule, but timesteps - 1 is self.check_over_configs(time_step=timesteps - 1, num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(time_step=self.default_valid_timestep, beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear", "squaredcos_cap_v2"]: self.check_over_configs(time_step=self.default_valid_timestep, beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(time_step=self.default_valid_timestep, prediction_type=prediction_type) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(time_step=self.default_valid_timestep, clip_sample=clip_sample) def test_thresholding(self): self.check_over_configs(time_step=self.default_valid_timestep, thresholding=False) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( time_step=self.default_valid_timestep, thresholding=True, prediction_type=prediction_type, sample_max_value=threshold, ) def test_time_indices(self): # Get default timestep schedule. kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) scheduler_config = self.get_scheduler_config() scheduler = self.scheduler_classes[0](**scheduler_config) scheduler.set_timesteps(num_inference_steps) timesteps = scheduler.timesteps for t in timesteps: self.check_over_forward(time_step=t) def test_inference_steps(self): # Hardcoded for now for t, num_inference_steps in zip([99, 39, 39, 19], [10, 25, 26, 50]): self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) def full_loop(self, num_inference_steps=10, seed=0, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) eta = 0.0 # refer to gamma in the paper model = self.dummy_model() sample = self.dummy_sample_deter generator = torch.manual_seed(seed) scheduler.set_timesteps(num_inference_steps) for t in scheduler.timesteps: residual = model(sample, t) sample = scheduler.step(residual, t, sample, eta, generator).prev_sample return sample def test_full_loop_onestep_deter(self): sample = self.full_loop(num_inference_steps=1) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 29.8715) < 1e-3 # 0.0778918 assert abs(result_mean.item() - 0.0389) < 1e-3 def test_full_loop_multistep_deter(self): sample = self.full_loop(num_inference_steps=10) result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 181.2040) < 1e-3 assert abs(result_mean.item() - 0.2359) < 1e-3 def test_custom_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=timesteps) scheduler_timesteps = scheduler.timesteps for i, timestep in enumerate(scheduler_timesteps): if i == len(timesteps) - 1: expected_prev_t = -1 else: expected_prev_t = timesteps[i + 1] prev_t = scheduler.previous_timestep(timestep) prev_t = prev_t.item() self.assertEqual(prev_t, expected_prev_t) def test_custom_timesteps_increasing_order(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 51, 0] with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."): scheduler.set_timesteps(timesteps=timesteps) def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [100, 87, 50, 1, 0] num_inference_steps = len(timesteps) with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) def test_custom_timesteps_too_large(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) timesteps = [scheduler.config.num_train_timesteps] with self.assertRaises( ValueError, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ): scheduler.set_timesteps(timesteps=timesteps)
diffusers/tests/schedulers/test_scheduler_tcd.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_tcd.py", "repo_id": "diffusers", "token_count": 3098 }
133
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import requests # Configuration LIBRARY_NAME = "diffusers" GITHUB_REPO = "huggingface/diffusers" SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL") def check_pypi_for_latest_release(library_name): """Check PyPI for the latest release of the library.""" response = requests.get(f"https://pypi.org/pypi/{library_name}/json") if response.status_code == 200: data = response.json() return data["info"]["version"] else: print("Failed to fetch library details from PyPI.") return None def get_github_release_info(github_repo): """Fetch the latest release info from GitHub.""" url = f"https://api.github.com/repos/{github_repo}/releases/latest" response = requests.get(url) if response.status_code == 200: data = response.json() return {"tag_name": data["tag_name"], "url": data["html_url"], "release_time": data["published_at"]} else: print("Failed to fetch release info from GitHub.") return None def notify_slack(webhook_url, library_name, version, release_info): """Send a notification to a Slack channel.""" message = ( f"🚀 New release for {library_name} available: version **{version}** 🎉\n" f"📜 Release Notes: {release_info['url']}\n" f"⏱️ Release time: {release_info['release_time']}" ) payload = {"text": message} response = requests.post(webhook_url, json=payload) if response.status_code == 200: print("Notification sent to Slack successfully.") else: print("Failed to send notification to Slack.") def main(): latest_version = check_pypi_for_latest_release(LIBRARY_NAME) release_info = get_github_release_info(GITHUB_REPO) parsed_version = release_info["tag_name"].replace("v", "") if latest_version and release_info and latest_version == parsed_version: notify_slack(SLACK_WEBHOOK_URL, LIBRARY_NAME, latest_version, release_info) else: raise ValueError("There were some problems.") if __name__ == "__main__": main()
diffusers/utils/notify_slack_about_release.py/0
{ "file_path": "diffusers/utils/notify_slack_about_release.py", "repo_id": "diffusers", "token_count": 952 }
134
# Introduction to 🤗 Diffusers <CourseFloatingBanner unit={1} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Introduction to Diffusers", value: "https://colab.research.google.com/github/huggingface/diffusion-models-class/blob/main/units/en/unit1/introduction_to_diffusers.ipynb"}, {label: "Introduction to Diffusers", value: "https://studiolab.sagemaker.aws/import/github/huggingface/diffusion-models-class/blob/main/units/en/unit1/introduction_to_diffusers.ipynb"}, ]} /> In this notebook, you'll train your first diffusion model to generate images of cute butterflies 🦋. Along the way, you'll learn about the core components of the 🤗 Diffusers library, which will provide a good foundation for the more advanced applications that we'll cover later in the course. Let's dive in! ## What You Will Learn In this notebook you will: - See a powerful custom diffusion model pipeline in action (with information on how to make your own version) - Create your own mini pipeline by: - Recapping the core ideas behind diffusion models - Loading in data from the Hub for training - Exploring how we add noise to this data with a scheduler - Creating and training the UNet model - Putting the pieces together into a working pipeline - Edit and run a script for initializing longer training runs, that will handle - Multi-GPU training via 🤗 Accelerate - Experiment logging to track critical stats - Uploading the final model to the Hugging Face Hub ## Setup Run the following cell to install the diffusers library as well as a few other requirements: ```py %pip install -qq -U diffusers datasets transformers accelerate ftfy pyarrow==9.0.0 ``` Next, head over to https://huggingface.co/settings/tokens and create an access token with write permission if you don't already have one: <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Bref aperçu du contenu du cours."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Bref aperçu des différents chapitres du cours."> </div> You can login with this token using the command line (`huggingface-cli login`) or by running the following cell: ```py from huggingface_hub import notebook_login notebook_login() ``` Then you need to install Git-LFS to upload your model checkpoints: ```py %%capture !sudo apt -qq install git-lfs !git config --global credential.helper store ``` Finally, let's import the libraries we'll be using and define a few convenience functions which we'll use later in the notebook: ```py import numpy as np import torch import torch.nn.functional as F from matplotlib import pyplot as plt from PIL import Image def show_images(x): """Given a batch of images x, make a grid and convert to PIL""" x = x * 0.5 + 0.5 # Map from (-1, 1) back to (0, 1) grid = torchvision.utils.make_grid(x) grid_im = grid.detach().cpu().permute(1, 2, 0).clip(0, 1) * 255 grid_im = Image.fromarray(np.array(grid_im).astype(np.uint8)) return grid_im def make_grid(images, size=64): """Given a list of PIL images, stack them together into a line for easy viewing""" output_im = Image.new("RGB", (size * len(images), size)) for i, im in enumerate(images): output_im.paste(im.resize((size, size)), (i * size, 0)) return output_im # Mac users may need device = 'mps' (untested) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") ``` OK, we're all set! ## Dreambooth: A Sneak Peak at What's to Come If you've looked at AI-related social media at all in the past few months, you've heard about Stable Diffusion. It's a powerful text-conditioned latent diffusion model (don't worry, we'll learn what all that means). But it has a flaw: it doesn't know what you or I look like unless we're famous enough to have our images plastered around the internet. Dreambooth let's us create our own model variant with some extra knowledge of a specific face, object or style. The Corridor Crew made an excellent video using this to tell stories with consistent characters, which is a great example of what this technique can do: ``` from IPython.display import YouTubeVideo YouTubeVideo("W4Mcuh38wyM") ``` Here's an example using a model trained on 5 photos of a popular children's toy called "Mr Potato Head". First, we load the pipeline. This will download model weights etc. from the Hub. Since this will download several gigabytes of data for a one-line demo, you are welcome to skip this cell and simply admire the example output! ``` from diffusers import StableDiffusionPipeline # Check out https://huggingface.co/sd-dreambooth-library for loads of models from the community model_id = "sd-dreambooth-library/mr-potato-head" # Load the pipeline pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to( device ) ``` Once the pipeline has finished loading, we can generate images with: ``` prompt = "an abstract oil painting of sks mr potato head by picasso" image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image ``` Exercise: Try it yourself with different prompts. The `sks` token represents a unique identifier for the novel concept in this case - what happens if you leave that out? You can also experiment with changing the number of sampling steps (how low can you go?) and the `guidance_scale`, which determines how much the model will try to match the prompt. There's a lot going on in that magical pipeline! By the end of the course you'll know how it all works. For now, let's take a look at how we can train a diffusion model from scratch. ## MVP (Minimum Viable Pipeline) ### Exemple d'inférence sur les papillons The core API of 🤗 Diffusers is divided into three main components: Pipelines: high-level classes designed to rapidly generate samples from popular trained diffusion models in a user-friendly fashion. Models: popular architectures for training new diffusion models, e.g. UNet. Schedulers: various techniques for generating images from noise during inference as well as to generate noisy images for training. Pipelines are great for end-users, but if you're here for this course we assume you want to know what is going on under the hood! So, over the rest of this notebook we're going to build our own pipeline capable of generating small butterfly pictures. Here's the final result in action: ``` from diffusers import DDPMPipeline # Load the butterfly pipeline butterfly_pipeline = DDPMPipeline.from_pretrained( "johnowhitaker/ddpm-butterflies-32px" ).to(device) # Create 8 images images = butterfly_pipeline(batch_size=8).images # View the result make_grid(images) ``` Not as impressive as the DreamBooth example perhaps, but then we're training from scratch with ~0.0001% of the data used to train Stable Diffusion. Speaking of training, recall from the introduction to this unit that training a diffusion model looks something like this: Load in some images from the training data Add noise, in different amounts. Feed the noisy versions of the inputs into the model Evaluate how well the model does at denoising these inputs Use this information to update the model weights, and repeat We'll explore these steps one by one in the next few sections until we have a complete training loop working, and then we'll explore how to sample from the trained model and how to package everything up into a pipeline for easy sharing. Let's begin with the data... ### Download the training dataset For this example, we'll use a dataset of images from the Hugging Face Hub. Specifically, this collection of 1000 butterfly pictures. This is a very small dataset, so we've also included commented out lines for a few larger options. If you'd prefer to use your own collection of images, you can also use the commented-out code example to load in pictures from a folder instead. ``` import torchvision from datasets import load_dataset from torchvision import transforms dataset = load_dataset("huggan/smithsonian_butterflies_subset", split="train") # Or load images from a local folder # dataset = load_dataset("imagefolder", data_dir="path/to/folder") # We'll train on 32-pixel square images, but you can try larger sizes too image_size = 32 # You can lower your batch size if you're running out of GPU memory batch_size = 64 # Define data augmentations preprocess = transforms.Compose( [ transforms.Resize((image_size, image_size)), # Resize transforms.RandomHorizontalFlip(), # Randomly flip (data augmentation) transforms.ToTensor(), # Convert to tensor (0, 1) transforms.Normalize([0.5], [0.5]), # Map to (-1, 1) ] ) def transform(examples): images = [preprocess(image.convert("RGB")) for image in examples["image"]] return {"images": images} dataset.set_transform(transform) # Create a dataloader from the dataset to serve up the transformed images in batches train_dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=True ) ``` We can grab a batch of images and view some of them like so: ``` xb = next(iter(train_dataloader))["images"].to(device)[:8] print("X shape:", xb.shape) show_images(xb).resize((8 * 64, 64), resample=Image.NEAREST) ``` We're sticking to a small dataset with 32 pixel images to keep training times manageable in this notebook. ## Define the Scheduler Our plan for training is to take these input images and add noise to them, then feed the noisy images to the model. And during inference, we will use the model predictions to iteratively remove noise. In diffusers, these processes are both handled by the scheduler. The noise schedule determines how much noise is added at different timesteps. Here's how we might create a scheduler using the default settings for 'DDPM' training and sampling (based on the paper "Denoising Diffusion Probabalistic Models": ``` from diffusers import DDPMScheduler noise_scheduler = DDPMScheduler(num_train_timesteps=1000) ``` The DDPM paper describes a corruption process that adds a small amount of noise for every 'timestep'. Given $x_{t-1}$ for some timestep, we can get the next (slightly more noisy) version $x_t$ with:<br><br> $q(\mathbf{x}_t \vert \mathbf{x}_{t-1}) = \mathcal{N}(\mathbf{x}_t; \sqrt{1 - \beta_t} \mathbf{x}_{t-1}, \beta_t\mathbf{I}) \quad q(\mathbf{x}_{1:T} \vert \mathbf{x}_0) = \prod^T_{t=1} q(\mathbf{x}_t \vert \mathbf{x}_{t-1})$<br><br> That is, we take $x_{t-1}$, scale it by $\sqrt{1 - \beta_t}$ and add noise scaled by $\beta_t$. This $\beta$ is defined for every t according to some schedule, and determines how much noise is added per timestep. Now, we don't necessarily want to do this operation 500 times to get $x_{500}$ so we have another formula to get $x_t$ for any t given $x_0$: <br><br> $\begin{aligned} q(\mathbf{x}_t \vert \mathbf{x}_0) &= \mathcal{N}(\mathbf{x}_t; \sqrt{\bar{\alpha}_t} \mathbf{x}_0, {(1 - \bar{\alpha}_t)} \mathbf{I}) \end{aligned}$ where $\bar{\alpha}_t = \prod_{i=1}^T \alpha_i$ and $\alpha_i = 1-\beta_i$<br><br> The maths notation always looks scary! Luckily the scheduler handles all that for us. We can plot $\sqrt{\bar{\alpha}_t}$ (labelled as `sqrt_alpha_prod`) and $\sqrt{(1 - \bar{\alpha}_t)}$ (labelled as `sqrt_one_minus_alpha_prod`) to view how the input (x) and the noise are scaled and mixed across different timesteps: ``` plt.plot(noise_scheduler.alphas_cumprod.cpu() ** 0.5, label=r"${\sqrt{\bar{\alpha}_t}}$") plt.plot((1 - noise_scheduler.alphas_cumprod.cpu()) ** 0.5, label=r"$\sqrt{(1 - \bar{\alpha}_t)}$") plt.legend(fontsize="x-large"); ``` Exercise: You can explore how this plot changes with different settings for beta_start, beta_end and beta_schedule by swapping in one of the commented-out options here: ``` # One with too little noise added: # noise_scheduler = DDPMScheduler(num_train_timesteps=1000, beta_start=0.001, beta_end=0.004) # The 'cosine' schedule, which may be better for small image sizes: # noise_scheduler = DDPMScheduler(num_train_timesteps=1000, beta_schedule='squaredcos_cap_v2') ``` Whichever scheduler you've chosen, we can now use it to add noise in different amounts using the `noise_scheduler.add_noise` function like so: ``` timesteps = torch.linspace(0, 999, 8).long().to(device) noise = torch.randn_like(xb) noisy_xb = noise_scheduler.add_noise(xb, noise, timesteps) print("Noisy X shape", noisy_xb.shape) show_images(noisy_xb).resize((8 * 64, 64), resample=Image.NEAREST) ``` Again, explore the effect of using different noise schedules and parameters here. This video does a great job explaining some of the maths above in more detail, and is a great introduction to some of these concepts. ### Define the Model Now we come to the core component: the model itself. Most diffusion models use architectures that are some variant of a U-net and that's what we'll use here. In a nutshell: the model has the input image go through several blocks of ResNet layers, each of which halves the image size by 2 then through the same number of blocks that upsample it again. there are skip connections linking the features on the downsample path to the corresponding layers in the upsample path. A key feature of this model is that it predicts images of the same size as the input, which is exactly what we need here. Diffusers provides us a handy `UNet2DModel` class which creates the desired architecture in PyTorch. Let's create a U-net for our desired image size. Note that `down_block_types` correspond to the downsampling blocks (green on the diagram above), and `up_block_types` are the upsampling blocks (red on the diagram): ``` from diffusers import UNet2DModel # Create a model model = UNet2DModel( sample_size=image_size, # the target image resolution in_channels=3, # the number of input channels, 3 for RGB images out_channels=3, # the number of output channels layers_per_block=2, # how many ResNet layers to use per UNet block block_out_channels=(64, 128, 128, 256), # More channels -> more parameters down_block_types=( "DownBlock2D", # a regular ResNet downsampling block "DownBlock2D", "AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention "AttnDownBlock2D", ), up_block_types=( "AttnUpBlock2D", "AttnUpBlock2D", # a ResNet upsampling block with spatial self-attention "UpBlock2D", "UpBlock2D", # a regular ResNet upsampling block ), ) model.to(device); ``` When dealing with higher-resolution inputs you may want to use more down and up-blocks, and keep the attention layers only at the lowest resolution (bottom) layers to reduce memory usage. We'll talk later about how you might experiment to find the best settings for your use-case. We can check that passing in a batch of data and some random timesteps produces an output the same shape as the input data: ``` with torch.no_grad(): model_prediction = model(noisy_xb, timesteps).sample model_prediction.shape ``` In the next section we'll see how to train this model. ### Create a Training Loop Time to train! Below is a typical optimization loop in PyTorch, where we run through the data batch by batch and update the parameters of our model each step using an optimizer - in this case the AdamW optimizer with a learning rate of 0.0004. For each batch of data, we Sample some random timesteps Noise the data accordingly Feed the noisy data through the model Compare the model predictions with the target (i.e. the noise in this case) using mean squared error as our loss function Update the model parameters via `loss.backward()` and `optimizer.step()` During this process we also log the losses over time for later plotting. NB: This code takes nearly 10 minutes to run - feel free to skip these two cells and use the pretrained model if you are in a hurry. Alternatively, you can explore how reducing the number of channels in each layer via the model definition above can speed things up. The official diffusers training example trains a larger model on this dataset at higher resolution, and is a good reference for what a less minimal training loop looks like: ``` # Set the noise scheduler noise_scheduler = DDPMScheduler( num_train_timesteps=1000, beta_schedule="squaredcos_cap_v2" ) # Training loop optimizer = torch.optim.AdamW(model.parameters(), lr=4e-4) losses = [] for epoch in range(30): for step, batch in enumerate(train_dataloader): clean_images = batch["images"].to(device) # Sample noise to add to the images noise = torch.randn(clean_images.shape).to(clean_images.device) bs = clean_images.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, noise_scheduler.num_train_timesteps, (bs,), device=clean_images.device ).long() # Add noise to the clean images according to the noise magnitude at each timestep noisy_images = noise_scheduler.add_noise(clean_images, noise, timesteps) # Get the model prediction noise_pred = model(noisy_images, timesteps, return_dict=False)[0] # Calculate the loss loss = F.mse_loss(noise_pred, noise) loss.backward(loss) losses.append(loss.item()) # Update the model parameters with the optimizer optimizer.step() optimizer.zero_grad() if (epoch + 1) % 5 == 0: loss_last_epoch = sum(losses[-len(train_dataloader) :]) / len(train_dataloader) print(f"Epoch:{epoch+1}, loss: {loss_last_epoch}") ``` Plotting the loss, we see that the model rapidly improves initially and then continues to get better at a slower rate (which is more obvious if we use a log scale as shown on the right): ``` fig, axs = plt.subplots(1, 2, figsize=(12, 4)) axs[0].plot(losses) axs[1].plot(np.log(losses)) plt.show() ``` As an alternative to running the training code above, you can use the model from the pipeline like so: ``` # Uncomment to instead load the model I trained earlier: # model = butterfly_pipeline.unet ``` ### Generate Images How do we get images with this model? Option 1: Creating a pipeline: ``` from diffusers import DDPMPipeline image_pipe = DDPMPipeline(unet=model, scheduler=noise_scheduler) pipeline_output = image_pipe() pipeline_output.images[0] ``` We can save a pipeline to a local folder like so: ``` image_pipe.save_pretrained("my_pipeline") ``` Inspecting the folder contents: ``` !ls my_pipeline/ ``` ``` model_index.json scheduler unet ``` The `scheduler` and `unet` subfolders contain everything needed to re-create those components. For example, inside the `unet` folder you'll find the model weights (`diffusion_pytorch_model.bin`) alongside a config file which specifies the UNet architecture. ``` !ls my_pipeline/unet/ ``` ``` config.json diffusion_pytorch_model.bin ``` Together, these files contain everything needed to recreate the pipeline. You can manually upload them to the hub to share the pipeline with others, or check out the code to do this via the API in the next section. Option 2: Writing a Sampling Loop If you inspect the forward method of the pipeline you'll be able to see what is happening when we run `image_pipe()`: ``` # ??image_pipe.forward ``` We begin with random noise, and run through the scheduler timesteps from most to least noisy, removing a small amount of noise each step based on the model prediction: ``` # Random starting point (8 random images): sample = torch.randn(8, 3, 32, 32).to(device) for i, t in enumerate(noise_scheduler.timesteps): # Get model pred with torch.no_grad(): residual = model(sample, t).sample # Update sample with step sample = noise_scheduler.step(residual, t, sample).prev_sample show_images(sample) ``` The `noise_scheduler.step()` function does the maths required to update `sample` appropriately. There are a number of sampling methods - in the next unit we'll see how we can swap in a different sampler to speed up image generation with existing models, and talk more about the theory behind sampling from diffusion models. ### Push your model to the Hub In the example above we saved our pipeline to a local folder. To push our model to the Hub, we will need to model repository to push our files to. We'll determine the repository name from the model ID we want to give our model (feel free to replace `the model_name` with your own choice; it just needs to contain your username, which is what the function `get_full_repo_name()` does): ``` from huggingface_hub import get_full_repo_name model_name = "sd-class-butterflies-32" hub_model_id = get_full_repo_name(model_name) hub_model_id ``` Next, create a model repository on the 🤗 Hub and push our model: ``` from huggingface_hub import HfApi, create_repo create_repo(hub_model_id) api = HfApi() api.upload_folder( folder_path="my_pipeline/scheduler", path_in_repo="", repo_id=hub_model_id ) api.upload_folder(folder_path="my_pipeline/unet", path_in_repo="", repo_id=hub_model_id) api.upload_file( path_or_fileobj="my_pipeline/model_index.json", path_in_repo="model_index.json", repo_id=hub_model_id, ) ``` The last thing to do is create a nice model card so that our butterfly generator can easily be found on the Hub (feel free to expand and edit the description!): ``` from huggingface_hub import ModelCard content = f""" --- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('{hub_model_id}') image = pipeline().images[0] image ``` """ ``` card = ModelCard(content) card.push_to_hub(hub_model_id) ``` Now that the model is on the Hub, you can download it from anywhere by using the `from_pretrained()` method of the `DDPMPipeline` as follows" ``` from diffusers import DDPMPipeline image_pipe = DDPMPipeline.from_pretrained(hub_model_id) pipeline_output = image_pipe() pipeline_output.images[0] ``` Great it works! ## Scaling up with 🤗 Accelerate This notebook was made for learning purposes, and as such I tried to keep the code as minimal and clean as possible. Because of this, we omitted some of the things you might want if you were to try training a larger model on much more data, such as multi-GPU support, logging of progress and example images, gradient checkpointing to support larger batch sizes, automatic uploading of models and so on. Thankfully most of these features are available in the example training script here. You can download the file like so: ``` !wget https://github.com/huggingface/diffusers/raw/main/examples/unconditional_image_generation/train_unconditional.py ``` Open up the file and you'll see where the model is defined and what settings are available. I ran the script with the following command: ``` # Let's give our new model a name for the Hub model_name = "sd-class-butterflies-64" hub_model_id = get_full_repo_name(model_name) ``` ``` !accelerate launch train_unconditional.py \ --dataset_name="huggan/smithsonian_butterflies_subset" \ --resolution=64 \ --output_dir={model_name} \ --train_batch_size=32 \ --num_epochs=50 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision="no" ``` As before, let's push the model to the Hub and create a nice model card (and feel free to edit it as you wish!): ``` create_repo(hub_model_id) api = HfApi() api.upload_folder( folder_path=f"{model_name}/scheduler", path_in_repo="", repo_id=hub_model_id ) api.upload_folder( folder_path=f"{model_name}/unet", path_in_repo="", repo_id=hub_model_id ) api.upload_file( path_or_fileobj=f"{model_name}/model_index.json", path_in_repo="model_index.json", repo_id=hub_model_id, ) content = f""" --- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('{hub_model_id}') image = pipeline().images[0] image ``` """ card = ModelCard(content) card.push_to_hub(hub_model_id) ``` About 45 minutes later, this is the result: ``` pipeline = DDPMPipeline.from_pretrained(hub_model_id).to(device) images = pipeline(batch_size=8).images make_grid(images) ``` Exercise: See if you can find training/model settings that give good results in as little time as possible, and share your findings with the community. Dig around in the script to see if you can understand the code, and ask for clarification on anything that looks confusing. Avenues for Further Exploration Hopefully this has given you a taste of what you can do with the 🤗 Diffusers library! Some possible next steps: Try training an unconditional diffusion model on a new dataset - bonus points if you create one yourself. You can find some great image datasets for this task in the HugGan organization on the Hub. Just make sure you downsample them if you don't want to wait a very long time for the model to train! Try out DreamBooth to create your own customized Stable Diffusion pipeline using either this Space or this notebook Modify the training script to explore different UNet hyperparameters (number of layers, channels etc), different noise schedules etc. Check out the Diffusion Models from Scratch notebook for a different take on the core ideas we've covered in this unit
diffusion-models-class/units/en/unit1/2.mdx/0
{ "file_path": "diffusion-models-class/units/en/unit1/2.mdx", "repo_id": "diffusion-models-class", "token_count": 8144 }
135
<jupyter_start><jupyter_text>Hackathon DreamBooth 🏆 Bienvenue au Hackathon DreamBooth ! Dans cette compétition, vous allez **personnaliser un modèle de Stable Diffusion en le *finetunant* sur une poignée de vos propres images**. Pour cela, nous allons utiliser une technique appelée [_DreamBooth_](https://arxiv.org/abs/2208.12242), qui permet d'implanter un sujet (par exemple votre animal de compagnie ou votre plat préféré) dans le domaine de sortie du modèle de telle sorte qu'il puisse être synthétisé avec un _identifiant unique_ dans le prompt.Plongeons dans l'aventure ! PrérequisAvant de vous plonger dans ce *notebook*, vous devriez lire :* La vision d'ensemble de l'[unitét 3](https://github.com/huggingface/diffusion-models-class/blob/main/unit3/README.md) qui contient une plongée dans Stable Diffusion.* L'[article de blog](https://dreambooth.github.io/) des auteurs de DreamBooth pour avoir une idée de ce qui est possible avec cette technique* L'[article de blog](https://huggingface.co/blog/dreambooth) d'Hugging Face sur les meilleures pratiques pour *finetuner* Stable Diffusion avec DreamBooth. 🚨 **Note :** le code de **ce *notebook* nécessite au moins 14GB de vRAM GPU** et est une version simplifiée du [script d'entraînement officiel](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) fourni dans 🤗 *Diffusers*. Il produit des modèles décents pour la plupart des applications, mais nous recommandons d'expérimenter les fonctionnalités avancées telles que la préservation de la perte de classe & le *finetuning* de l'encodeur textuel que si vous disposez d'au moins 24 Go de vRAM. Consultez la [documentation](https://huggingface.co/docs/diffusers/training/dreambooth) de 🤗 *Diffusers* pour plus de détails. Qu'est-ce que DreamBooth? DreamBooth est une technique permettant d'enseigner de nouveaux concepts à Stable Diffusion en utilisant une forme spécialisée de *finetuning*. Si vous êtes sur Twitter ou Reddit, vous avez peut-être vu des gens utiliser cette technique pour créer des avatars (souvent hilarants) d'eux-mêmes. Par exemple, voici à quoi ressemblerait [Andrej Karpathy](https://karpathy.ai/) en tant que cow-boy (vous devrez peut-être exécuter la cellule pour voir le résultat) :<jupyter_code>%%html <blockquote class="twitter-tweet"><p lang="en" dir="ltr">Stableboost auto-suggests a few hundred prompts by default but you can generate additional variations for any one prompt that seems to be giving fun/interesting results, or adjust it in any way: <a href="https://t.co/qWmadiXftP">pic.twitter.com/qWmadiXftP</a></p>&mdash; Andrej Karpathy (@karpathy) <a href="https://twitter.com/karpathy/status/1600578187141840896?ref_src=twsrc%5Etfw">December 7, 2022</a></blockquote> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script><jupyter_output><empty_output><jupyter_text>Le fonctionnement de DreamBooth est le suivant :* Recueillir environ 10-20 images d'un sujet (par exemple votre chien) et définir un identifiant unique [V] qui se réfère au sujet. Cet identifiant est généralement un mot inventé comme `flffydog` qui est implanté dans différentes prompts au moment de l'inférence pour placer le sujet dans différents contextes.* *Finetuner* le modèle de diffusion en fournissant les images accompagnées d'un prompt comme "Une photo d'un chien [V]" qui contient l'identifiant unique et le nom de la classe (c'est-à-dire "chien" dans cet exemple).* (Optionnellement) Appliquer un _a priori de perte de préservation spécifique à la classe_, qui exploite l`a priori sémantique que le modèle a sur la classe et l`encourage à générer des instances diverses appartenant à la classe du sujet en injectant le nom de la classe dans le prompt. En pratique, cette étape n'est vraiment nécessaire que pour les visages humains et peut être ignorée pour les thèmes que nous explorerons dans ce hackathon.Une vue d'ensemble de DreamBooth est présentée dans l'image ci-dessous : Que peut faire DreamBooth ?En plus de placer votre sujet dans des endroits intéressants, DreamBooth peut être utilisé pour la _**synthèse de vues guidée par texte**_, où le sujet est vu de différents points de vue comme le montre l'exemple ci-dessous : DreamBooth peut également être utilisé pour modifier les propriétés du sujet, comme la couleur ou le mélange des espèces animales ! Maintenant que nous avons vu quelques unes des possibilités de DreamBooth, commençons à entraîner nos propres modèles ! Étape 1 : Configuration Si vous exécutez ce *notebook* sur Google Colab ou Kaggle, exécutez la cellule ci-dessous pour installer les bibliothèques requises :<jupyter_code>%pip install -qqU diffusers transformers bitsandbytes accelerate ftfy datasets<jupyter_output><empty_output><jupyter_text>Si vous travaillez sur Kaggle, vous devrez installer la dernière version de PyTorch pour fonctionner avec 🤗 Accelerate :<jupyter_code># Décommenter et exécuter si vous utilisez les *notebooks* de Kaggle. Il se peut que vous deviez redémarrer le notebook par la suite # %pip install -U torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116<jupyter_output><empty_output><jupyter_text>Pour pouvoir pousser votre modèle vers le *Hub* et le faire apparaître sur le [Classement de l'Hackathon](https://huggingface.co/spaces/dreambooth-hackathon/leaderboard), il y a encore quelques étapes à suivre. Tout d'abord, vous devez créer un [*token* d'accès](https://huggingface.co/docs/hub/security-tokens) avec un accès d'écriture à partir de votre compte Hugging Face, puis exécuter la cellule suivante et entrer votre *token* :<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>La dernière étape consiste à installer Git LFS :<jupyter_code>%%capture !sudo apt -qq install git-lfs !git config --global credential.helper store<jupyter_output><empty_output><jupyter_text>Étape 2 : Choisir un thème Cette compétition est composée de 5 _thèmes_, où chacun rassemblera des modèles appartenant aux catégories suivantes :* **Animal 🐨 :** Utilisez ce thème pour générer des images de votre animal de compagnie ou de votre animal favori en train de se promener dans l'Acropole, de nager ou de voler dans l'espace.* **Science 🔬 :** Utilisez ce thème pour générer des images synthétiques de galaxies, de protéines ou de tout autre domaine des sciences naturelles et médicales.* **Nourriture 🍔 :** Utilisez ce thème pour finetuner un modèle sur votre plat ou cuisine préféré.* **Paysage 🏔 :** Utilisez ce thème pour créer de magnifiques paysages de votre montagne, lac ou jardin préféré.* **Carte blanche 🔥 :** Utilisez ce thème pour créer des modèles dans la catégorie de votre choix !Nous **attribuerons des prix aux 3 modèles les plus appréciés par thème**, et vous êtes encouragés à soumettre autant de modèles que vous le souhaitez ! Exécutez la cellule ci-dessous pour créer un widget déroulant dans lequel vous pouvez sélectionner le thème auquel vous souhaitez vous soumettre :<jupyter_code>import ipywidgets as widgets theme = "animal" drop_down = widgets.Dropdown( options=["animal", "science", "food", "landscape", "wildcard"], description="Choisir un thème", disabled=False, ) def dropdown_handler(change): global theme theme = change.new drop_down.observe(dropdown_handler, names="value") display(drop_down) print(f"Vous avez sélectionné le thème {theme} !")<jupyter_output>You've selected the animal theme!<jupyter_text>Étape 3 : Créer un jeu de données d'images et le télécharger vers le *Hub* Une fois que vous avez choisi un thème, l'étape suivante consiste à **créer un jeu de données d'images pour ce thème** et à le télécharger sur le *Hub* d'Hugging Face :* Vous aurez besoin d'environ **10-20 images du sujet** que vous souhaitez implanter dans le modèle. Il peut s'agir de photos que vous avez prises ou téléchargées à partir de plateformes comme [Unsplash](https://unsplash.com/). Vous pouvez également vous inspirer des [jeux de données d'images](https://huggingface.co/datasets?task_categories=task_categories:image-classification&sort=downloads) trouvables sur le *Hub*.* Pour de meilleurs résultats, nous vous recommandons d'utiliser des images de votre sujet sous **différents angles et perspectives**. Une fois que vous avez rassemblé vos images dans un dossier, vous pouvez les télécharger vers le *Hub* en utilisant l'interface utilisateur pour glisser-déposer vos images. Consultez [ce guide](https://huggingface.co/docs/datasets/upload_datasetupload-with-the-hub-ui) pour plus de détails, ou regardez la vidéo ci-dessous :<jupyter_code>from IPython.display import YouTubeVideo YouTubeVideo("HaN6qCr_Afc")<jupyter_output><empty_output><jupyter_text>Alternativement, vous pouvez charger votre jeu de données localement en utilisant la fonctionnalité `imagefolder` de 🤗 *Datasets* et ensuite le pousser vers le *Hub* :```pythonfrom datasets import load_datasetdataset = load_dataset("imagefolder", data_dir="votre_dossier_d'images") Supprimer la colonne d'étiquettes facticesdataset = dataset.remove_columns("label") Pousser vers le Hubdataset.push_to_hub("dreambooth-hackathon-images")``` Une fois que vous avez créé votre jeu de données, vous pouvez le télécharger en utilisant la fonction `load_dataset()` comme suit :<jupyter_code>from datasets import load_dataset dataset_id = "lewtun/corgi" # CMODIFIEZ CELA EN VOTRE {hub_username}/{dataset_id} dataset = load_dataset(dataset_id, split="train") dataset<jupyter_output>Using custom data configuration lewtun--corgi-387a0d84d49c152d Found cached dataset parquet (/home/lewis_huggingface_co/.cache/huggingface/datasets/lewtun___parquet/lewtun--corgi-387a0d84d49c152d/0.0.0/2a3b91fbd88a2c90d1dbbb32b460cf621d31bd5b05b934492fdef7d8d6f236ec)<jupyter_text>Maintenant que nous avons notre jeu de données, définissons une fonction d'aide pour visualiser quelques images :<jupyter_code>from PIL import Image def image_grid(imgs, rows, cols): assert len(imgs) == rows * cols w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) grid_w, grid_h = grid.size for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid num_samples = 4 image_grid(dataset["image"][:num_samples], rows=1, cols=num_samples)<jupyter_output><empty_output><jupyter_text>Si cela semble correct, vous pouvez passer à l'étape suivante : créer un jeu de données PyTorch pour entraîner avec DreamBooth. Étape 3 : Créer un jeu de données d'entraînement Pour créer un ensemble d'entraînement pour nos images, nous avons besoin de quelques éléments :* Une _instance prompt_ qui est utilisée pour amorcer le modèle au début de l'entraînement. Dans la plupart des cas, l'utilisation de " a photo of [identifier] [class noun] " fonctionne très bien, par exemple " a photo of ccorgi dog " pour nos jolies images de Corgi. * **Note :** il est recommandé de choisir un mot unique / inventé comme `ccorgi` pour décrire votre sujet. Cela permettra d'éviter qu'un mot courant du vocabulaire du modèle ne soit écrasé.* Un _tokenizer_ pour convertir le prompt de l'instance en ID d'entrée qui peuvent être fournis à l'encodeur textuel de Stable Diffusion.* Un ensemble de _transformations d'images_, notamment le redimensionnement des images selon une forme commune et la normalisation des valeurs des pixels selon une moyenne et une distribution standard communes.En gardant cela à l'esprit, commençons par définir le prompt de l'instance :<jupyter_code>name_of_your_concept = "ccorgi" # À MODIFIER EN FONCTION DE VOTRE SUJET type_of_thing = "dog" # À MODIFIER EN FONCTION DE VOTRE SUJET instance_prompt = f"a photo of {name_of_your_concept} {type_of_thing}" print(f"Instance prompt: {instance_prompt}")<jupyter_output>Instance prompt: a photo of ccorgi dog<jupyter_text>Ensuite, nous devons créer un objet PyTorch `Dataset` qui implémente les méthodes `__len__` et `__getitem__` :<jupyter_code>from torch.utils.data import Dataset from torchvision import transforms class DreamBoothDataset(Dataset): def __init__(self, dataset, instance_prompt, tokenizer, size=512): self.dataset = dataset self.instance_prompt = instance_prompt self.tokenizer = tokenizer self.size = size self.transforms = transforms.Compose( [ transforms.Resize(size), transforms.CenterCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return len(self.dataset) def __getitem__(self, index): example = {} image = self.dataset[index]["image"] example["instance_images"] = self.transforms(image) example["instance_prompt_ids"] = self.tokenizer( self.instance_prompt, padding="do_not_pad", truncation=True, max_length=self.tokenizer.model_max_length, ).input_ids return example<jupyter_output><empty_output><jupyter_text>Très bien, vérifions maintenant que cela fonctionne en chargeant le *tokenizer* CLIP associé à l'encodeur textuel du modèle Stable Diffusion original, puis en créant le jeu de données d'entraînement :<jupyter_code>from transformers import CLIPTokenizer # Le point de contrôle de Stable Diffusion que nous allons finetuner model_id = "CompVis/stable-diffusion-v1-4" tokenizer = CLIPTokenizer.from_pretrained( model_id, subfolder="tokenizer", ) train_dataset = DreamBoothDataset(dataset, instance_prompt, tokenizer) train_dataset[0]<jupyter_output><empty_output><jupyter_text>Étape 4 : Définir un collecteur de données Maintenant que nous disposons d'un jeu de données d'entraînement, la prochaine chose à faire est de définir un _collecteur de données_. Un collecteur de données est une fonction qui collecte les éléments d'un batch de données et applique une certaine logique pour former un tenseur unique que nous pouvons fournir au modèle. Si vous souhaitez en savoir plus, vous pouvez consulter cette vidéo du [Cours de NLP d'Hugging Face](https://huggingface.co/learn/nlp-course/fr/chapter1/1):<jupyter_code>YouTubeVideo("-RPeakdlHYo")<jupyter_output><empty_output><jupyter_text>Pour DreamBooth, notre collecteur de données doit fournir au modèle les ID d'entrée du *tokenizer* et les valeurs des pixels des images sous forme de tenseur empilé. La fonction ci-dessous fait l'affaire :<jupyter_code>import torch def collate_fn(examples): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = tokenizer.pad( {"input_ids": input_ids}, padding=True, return_tensors="pt" ).input_ids batch = { "input_ids": input_ids, "pixel_values": pixel_values, } return batch<jupyter_output><empty_output><jupyter_text>Étape 5 : Charger les composants du pipeline Stable Diffusion Nous avons presque toutes les pièces prêtes pour entraîner ! Comme vous l'avez vu dans le *notebook* de l'unité 3 sur Stable Diffusion, le pipeline est composé de plusieurs modèles :* Un encodeur textuel qui convertit les prompts en enchâssements. Nous utilisons ici CLIP car c'est l'encodeur utilisé pour entraîner Stable Diffusion v1-4.* Un VAE ou autoencodeur variationnel qui convertit les images en représentations compressées (c.-à-d. latentes) et les décompresse au moment de l'inférence.* Un UNet qui applique l'opération de débruitage sur le latent du VAE.Nous pouvons charger tous ces composants en utilisant les bibliothèques 🤗 *Diffusers* et 🤗 *Transformers* comme suit :<jupyter_code>from diffusers import AutoencoderKL, UNet2DConditionModel from transformers import CLIPFeatureExtractor, CLIPTextModel text_encoder = CLIPTextModel.from_pretrained(model_id, subfolder="text_encoder") vae = AutoencoderKL.from_pretrained(model_id, subfolder="vae") unet = UNet2DConditionModel.from_pretrained(model_id, subfolder="unet") feature_extractor = CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")<jupyter_output><empty_output><jupyter_text>Étape 6 : *Finetuner* le modèle Maintenant vient la partie amusante : entraîner notre modèle avec DreamBooth ! Comme le montre l'[article de blog d'Hugging Face](https://huggingface.co/blog/dreambooth), les hyperparamètres les plus importants à ajuster sont le taux d'apprentissage et le nombre d'étapes d'entraînement.En général, vous obtiendrez de meilleurs résultats avec un taux d'apprentissage plus faible, mais vous devrez augmenter le nombre d'étapes d'entraînement. Les valeurs ci-dessous constituent un bon point de départ, mais vous devrez peut-être les ajuster en fonction de votre jeu de données :<jupyter_code>learning_rate = 2e-06 max_train_steps = 400<jupyter_output><empty_output><jupyter_text>Ensuite, enveloppons les autres hyperparamètres dont nous avons besoin dans un objet `Namespace` pour faciliter la configuration de l'entraînement :<jupyter_code>from argparse import Namespace args = Namespace( pretrained_model_name_or_path=model_id, resolution=512, # Réduisez cette valeur si vous souhaitez économiser de la mémoire train_dataset=train_dataset, instance_prompt=instance_prompt, learning_rate=learning_rate, max_train_steps=max_train_steps, train_batch_size=1, gradient_accumulation_steps=1, # Augmentez cette valeur si vous souhaitez réduire l'utilisation de la mémoire max_grad_norm=1.0, gradient_checkpointing=True, # mettez cette valeur à True pour réduire l'utilisation de la mémoire use_8bit_adam=True, # utiliser l'optimiseur 8bit de bitsandbytes seed=3434554, sample_batch_size=2, output_dir="my-dreambooth", # où sauvegarder le pipeline )<jupyter_output><empty_output><jupyter_text>La dernière étape consiste à définir une fonction `training_function()` qui enveloppe la logique d'entraînement et peut être passée à 🤗 *Accelerate* pour gérer l'entraînement sur 1 ou plusieurs GPU. Si c'est la première fois que vous utilisez 🤗 *Accelerate*, regardez cette vidéo pour avoir un aperçu rapide de ce qu'il peut faire :<jupyter_code>YouTubeVideo("s7dy8QRgjJ0")<jupyter_output><empty_output><jupyter_text>Les détails devraient ressembler à ce que nous avons vu dans les unités 1 et 2 lorsque nous avons entraîné nos propres modèles de diffusion à partir de zéro :<jupyter_code>import math import torch.nn.functional as F from accelerate import Accelerator from accelerate.utils import set_seed from diffusers import DDPMScheduler, PNDMScheduler, StableDiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker from torch.utils.data import DataLoader from tqdm.auto import tqdm def training_function(text_encoder, vae, unet): accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, ) set_seed(args.seed) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Utiliser Adam 8 bits pour réduire l'utilisation de la mémoire ou pour finetuner le modèle sur les GPU de 16 Go if args.use_8bit_adam: import bitsandbytes as bnb optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW optimizer = optimizer_class( unet.parameters(), # n'optimisent que l'unet lr=args.learning_rate, ) noise_scheduler = DDPMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, ) train_dataloader = DataLoader( args.train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, ) unet, optimizer, train_dataloader = accelerator.prepare( unet, optimizer, train_dataloader ) # Déplacer text_encode et vae vers le gpu text_encoder.to(accelerator.device) vae.to(accelerator.device) # Nous devons recalculer le nombre total d'étapes d'entraînement car la taille du chargeur de données d'entraînement peut avoir changé num_update_steps_per_epoch = math.ceil( len(train_dataloader) / args.gradient_accumulation_steps ) num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Entraîner ! total_batch_size = ( args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps ) # N'afficher la barre de progression qu'une seule fois par machine progress_bar = tqdm( range(args.max_train_steps), disable=not accelerator.is_local_main_process ) progress_bar.set_description("Steps") global_step = 0 for epoch in range(num_train_epochs): unet.train() for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # Convertir les images en espace latent with torch.no_grad(): latents = vae.encode(batch["pixel_values"]).latent_dist.sample() latents = latents * 0.18215 # Échantillon de bruit que nous ajouterons aux latents noise = torch.randn(latents.shape).to(latents.device) bsz = latents.shape[0] # Échantillonner un pas de temps aléatoire pour chaque image timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device, ).long() # Ajouter du bruit aux latents en fonction de la magnitude du bruit à chaque pas de temps # (il s'agit du processus de diffusion vers l'avant) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Obtenir l'enchâssement du texte pour le conditionnement with torch.no_grad(): encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Prédire le bruit résiduel noise_pred = unet( noisy_latents, timesteps, encoder_hidden_states ).sample loss = ( F.mse_loss(noise_pred, noise, reduction="none") .mean([1, 2, 3]) .mean() ) accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() optimizer.zero_grad() # Vérifie si l'accélérateur a effectué une étape d'optimisation en coulisses if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 logs = {"loss": loss.detach().item()} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break accelerator.wait_for_everyone() # Créé le pipeline en utilisant les modules entraînés et le sauvegarde if accelerator.is_main_process: print(f"Loading pipeline and saving to {args.output_dir}...") scheduler = PNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True, steps_offset=1, ) pipeline = StableDiffusionPipeline( text_encoder=text_encoder, vae=vae, unet=accelerator.unwrap_model(unet), tokenizer=tokenizer, scheduler=scheduler, safety_checker=StableDiffusionSafetyChecker.from_pretrained( "CompVis/stable-diffusion-safety-checker" ), feature_extractor=feature_extractor, ) pipeline.save_pretrained(args.output_dir)<jupyter_output><empty_output><jupyter_text>Maintenant que nous avons défini la fonction, entraînons-la ! Selon la taille de votre jeu de données et le type de GPU, cette opération peut prendre entre 5 minutes et 1 heure :<jupyter_code>from accelerate import notebook_launcher num_of_gpus = 1 # MODIFIEZ POUR QUE CELA CORRESPONDE AU NOMBRE DE GPUS DONT VOUS DISPOSEZ notebook_launcher( training_function, args=(text_encoder, vae, unet), num_processes=num_of_gpus )<jupyter_output>Launching training on one GPU.<jupyter_text>Si vous travaillez sur un seul GPU, vous pouvez libérer de la mémoire pour la section suivante en copiant le code ci-dessous dans une nouvelle cellule et en l'exécutant. Pour les machines multi-GPU, 🤗 *Accelerate* ne permet à _aucune_ cellule d'accéder directement au GPU avec `torch.cuda`, donc nous ne recommandons pas d'utiliser cette astuce dans ces cas :```pythonavec torch.no_grad() : torch.cuda.empty_cache()``` Étape 7 : Exécution de l'inférence et inspection des générations Now that we've trained the model, let's generate some images with it to see how it fares! First we'll load the pipeline from the output directory we save the model to:<jupyter_code>pipe = StableDiffusionPipeline.from_pretrained( args.output_dir, torch_dtype=torch.float16, ).to("cuda")<jupyter_output><empty_output><jupyter_text>Ensuite, générons quelques images. La variable `prompt` sera utilisée plus tard pour définir la valeur par défaut du widget du *Hub* d'*Hugging Face*, alors expérimentez un peu pour en trouver une bonne. Vous pouvez également essayer de créer des prompts élaborés avec [CLIP Interrogator](https://huggingface.co/spaces/pharma/CLIP-Interrogator) :<jupyter_code># Choisissez un prompt amusant ici et il sera utilisé comme valeur par défaut du widget lorsque nous pousserons vers le *Hub* dans la section suivante prompt = f"a photo of {name_of_your_concept} {type_of_thing} in the Acropolis" # Ajustez l'orientation pour contrôler à quel point les générations suivent le prompt # Les valeurs comprises entre 7 et 11 sont généralement les plus efficaces guidance_scale = 7 num_cols = 2 all_images = [] for _ in range(num_cols): images = pipe(prompt, guidance_scale=guidance_scale).images all_images.extend(images) image_grid(all_images, 1, num_cols)<jupyter_output><empty_output><jupyter_text>Étape 8 : Pousser le modèle vers le *Hub* If you're happy with you model, the final step is to push it to the Hub and view it on the [DreamBooth Leaderboard](https://huggingface.co/spaces/dreambooth-hackathon/leaderboard)!First, you'll need to define a name for your model repo. By default, we use the unique identifier and class name, but feel free to change this if you want:<jupyter_code># Créez un nom pour votre modèle sur le Hub. Aucun espace n'est autorisé. model_name = f"{name_of_your_concept}-{type_of_thing}"<jupyter_output><empty_output><jupyter_text>Ensuite, ajoutez une brève description du type de modèle que vous avez entraîné ou toute autre information que vous souhaitez partager :<jupyter_code># Décrivez le thème et le modèle que vous avez entraînés description = f""" This is a Stable Diffusion model fine-tuned on `{type_of_thing}` images for the {theme} theme. """<jupyter_output><empty_output><jupyter_text>Enfin, exécutez la cellule ci-dessous pour créer un repo sur le Hub et pousser tous nos fichiers avec une belle carte de modèle :<jupyter_code># Code pour télécharger un pipeline sauvegardé localement vers le Hub from huggingface_hub import HfApi, ModelCard, create_repo, get_full_repo_name # Mise en place du repo et téléchargement des fichiers hub_model_id = get_full_repo_name(model_name) create_repo(hub_model_id) api = HfApi() api.upload_folder(folder_path=args.output_dir, path_in_repo="", repo_id=hub_model_id) content = f""" --- license: creativeml-openrail-m tags: - pytorch - diffusers - stable-diffusion - text-to-image - diffusion-models-class - dreambooth-hackathon - {theme} widget: - text: {prompt} --- # DreamBooth model for the {name_of_your_concept} concept trained by {api.whoami()["name"]} on the {dataset_id} dataset. This is a Stable Diffusion model fine-tuned on the {name_of_your_concept} concept with DreamBooth. It can be used by modifying the `instance_prompt`: **{instance_prompt}** This model was created as part of the DreamBooth Hackathon 🔥. Visit the [organisation page](https://huggingface.co/dreambooth-hackathon) for instructions on how to take part! ## Description {description} ## Usage ```python from diffusers import StableDiffusionPipeline pipeline = StableDiffusionPipeline.from_pretrained('{hub_model_id}') image = pipeline().images[0] image ``` """ card = ModelCard(content) hub_url = card.push_to_hub(hub_model_id) print(f"Téléchargement réussi ! Le modèle peut être trouvé ici : {hub_url}") print( f"Consultez votre soumission sur le classement public ici: https://huggingface.co/spaces/dreambooth-hackathon/leaderboard" )<jupyter_output>Upload successful! Model can be found here: https://huggingface.co/lewtun/test-dogs/blob/main/README.md View your submission on the public leaderboard here: https://huggingface.co/spaces/dreambooth-hackathon/leaderboard
diffusion-models-class/units/fr/events/dreambooth.ipynb/0
{ "file_path": "diffusion-models-class/units/fr/events/dreambooth.ipynb", "repo_id": "diffusion-models-class", "token_count": 11430 }
136
<jupyter_start><jupyter_text>Traduction (TensorFlow) Installez les bibliothèques 🤗 *Datasets* et 🤗 *Transformers* pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] !apt install git-lfs<jupyter_output>Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/ Collecting datasets Downloading datasets-2.6.1-py3-none-any.whl (441 kB)  |████████████████████████████████| 441 kB 5.2 MB/s [?25hCollecting transformers[sentencepiece] Downloading transformers-4.23.1-py3-none-any.whl (5.3 MB)  |████████████████████████████████| 5.3 MB 38.1 MB/s [?25hRequirement already satisfied: dill<0.3.6 in /usr/local/lib/python3.7/dist-packages (from datasets) (0.3.5.1) Collecting responses<0.19 Downloading responses-0.18.0-py3-none-any.whl (38 kB) Requirement already satisfied: pyarrow>=6.0.0 in /usr/local/lib/python3.7/dist-packages (from datasets) (6.0.1) Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.7/dist-packages (from datasets) (1.21.6) Requirement already satisfied: fsspec[http]>=2021.11.1 in /usr/local/lib/python3.7/dist-packages (from datasets) (2022.8.2) Requirement already satisfied: aiohttp in /usr/local/lib[...]<jupyter_text>Vous aurez besoin de configurer git, adaptez votre email et votre nom dans la cellule suivante.<jupyter_code>!git config --global user.email "[email protected]" !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Vous devrez également être connecté au Hub d'Hugging Face. Exécutez ce qui suit et entrez vos informations d'identification.<jupyter_code>from huggingface_hub import notebook_login notebook_login() from datasets import load_dataset, load_metric raw_datasets = load_dataset("kde4", lang1="en", lang2="fr") raw_datasets split_datasets = raw_datasets["train"].train_test_split(train_size=0.9, seed=20) split_datasets split_datasets["validation"] = split_datasets.pop("test") split_datasets["train"][1]["translation"] from transformers import pipeline model_checkpoint = "Helsinki-NLP/opus-mt-en-fr" translator = pipeline("translation", model=model_checkpoint) translator("Default to expanded threads") split_datasets["train"][172]["translation"] translator( "Unable to import %1 using the OFX importer plugin. This file is not the correct format." ) from transformers import AutoTokenizer model_checkpoint = "Helsinki-NLP/opus-mt-en-fr" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, return_tensors="tf") en_sentence = split_datasets["train"][1]["translation"]["en"] fr_sentence = split_datasets["train"][1]["translation"]["fr"] inputs = tokenizer(en_sentence) with tokenizer.as_target_tokenizer(): targets = tokenizer(fr_sentence) wrong_targets = tokenizer(fr_sentence) print(tokenizer.convert_ids_to_tokens(wrong_targets["input_ids"])) print(tokenizer.convert_ids_to_tokens(targets["input_ids"])) max_input_length = 128 max_target_length = 128 def preprocess_function(examples): inputs = [ex["en"] for ex in examples["translation"]] targets = [ex["fr"] for ex in examples["translation"]] model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True) # Configurer le tokenizer pour les cibles with tokenizer.as_target_tokenizer(): labels = tokenizer(targets, max_length=max_target_length, truncation=True) model_inputs["labels"] = labels["input_ids"] return model_inputs tokenized_datasets = split_datasets.map( preprocess_function, batched=True, remove_columns=split_datasets["train"].column_names, ) from transformers import TFAutoModelForSeq2SeqLM model = TFAutoModelForSeq2SeqLM.from_pretrained(model_checkpoint, from_pt=True) from transformers import DataCollatorForSeq2Seq data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="tf") batch = data_collator([tokenized_datasets["train"][i] for i in range(1, 3)]) batch.keys() batch["labels"] batch["decoder_input_ids"] for i in range(1, 3): print(tokenized_datasets["train"][i]["labels"]) tf_train_dataset = model.prepare_tf_dataset( tokenized_datasets["train"], collate_fn=data_collator, shuffle=True, batch_size=32, ) tf_eval_dataset = model.prepare_tf_dataset( tokenized_datasets["validation"], collate_fn=data_collator, shuffle=False, batch_size=16, ) !pip install sacrebleu from datasets import load_metric metric = load_metric("sacrebleu") predictions = [ "This plugin lets you translate web pages between several languages automatically." ] references = [ [ "This plugin allows you to automatically translate web pages between several languages." ] ] metric.compute(predictions=predictions, references=references) predictions = ["This This This This"] references = [ [ "This plugin allows you to automatically translate web pages between several languages." ] ] metric.compute(predictions=predictions, references=references) predictions = ["This plugin"] references = [ [ "This plugin allows you to automatically translate web pages between several languages." ] ] metric.compute(predictions=predictions, references=references) import numpy as np import tensorflow as tf from tqdm import tqdm generation_data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, return_tensors="tf", pad_to_multiple_of=128 ) tf_generate_dataset = model.prepare_tf_dataset( tokenized_datasets["validation"], collate_fn=generation_data_collator, shuffle=False, batch_size=8, ) @tf.function(jit_compile=True) def generate_with_xla(batch): return model.generate( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], max_new_tokens=128, ) def compute_metrics(): all_preds = [] all_labels = [] for batch, labels in tqdm(tf_generate_dataset): predictions = generate_with_xla(batch) for batch in tf_generate_dataset: predictions = model.generate( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"] ) labels = labels.numpy() labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) decoded_preds = [pred.strip() for pred in decoded_preds] decoded_labels = [[label.strip()] for label in decoded_labels] all_preds.extend(decoded_preds) all_labels.extend(decoded_labels) result = metric.compute(predictions=all_preds, references=all_labels) return {"bleu": result["score"]} from huggingface_hub import notebook_login notebook_login() print(compute_metrics()) from transformers import create_optimizer from transformers.keras_callbacks import PushToHubCallback import tensorflow as tf # Le nombre d'étapes d'entraînement est le nombre d'échantillons dans le jeu de données, divisé par la taille du batch puis multiplié # par le nombre total d'époques. Notez que le jeu de données tf_train_dataset est ici un lot de données tf.data.Dataset, # pas le jeu de données original Hugging Face, donc son len() est déjà num_samples // batch_size. num_epochs = 3 num_train_steps = len(tf_train_dataset) * num_epochs optimizer, schedule = create_optimizer( init_lr=5e-5, num_warmup_steps=0, num_train_steps=num_train_steps, weight_decay_rate=0.01, ) model.compile(optimizer=optimizer) # Entraîner en mixed-precision float16 tf.keras.mixed_precision.set_global_policy("mixed_float16") from transformers.keras_callbacks import PushToHubCallback callback = PushToHubCallback( output_dir="marian-finetuned-kde4-en-to-fr", tokenizer=tokenizer ) model.fit( tf_train_dataset, validation_data=tf_eval_dataset, callbacks=[callback], epochs=num_epochs, ) print(compute_metrics()) from transformers import pipeline # Remplacer par votre propre checkpoint model_checkpoint = "huggingface-course/marian-finetuned-kde4-en-to-fr" translator = pipeline("translation", model=model_checkpoint) translator("Default to expanded threads") translator( "Unable to import %1 using the OFX importer plugin. This file is not the correct format." )<jupyter_output><empty_output>
notebooks/course/fr/chapter7/section4_tf.ipynb/0
{ "file_path": "notebooks/course/fr/chapter7/section4_tf.ipynb", "repo_id": "notebooks", "token_count": 3062 }
137